From 53a128b2a3a469b06ae28ff013397b8d015433c8 Mon Sep 17 00:00:00 2001 From: wu champion Date: Mon, 22 Mar 2021 17:02:53 +0800 Subject: [PATCH 01/22] fix case,add delete file when processing over --- tests/pytest/query/query1970YearsAf.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/pytest/query/query1970YearsAf.py b/tests/pytest/query/query1970YearsAf.py index d2eead241f..441f835e89 100644 --- a/tests/pytest/query/query1970YearsAf.py +++ b/tests/pytest/query/query1970YearsAf.py @@ -18,7 +18,6 @@ import json import subprocess import datetime - from util.log import * from util.sql import * from util.cases import * @@ -71,7 +70,7 @@ class TDTestCase: "update": 0 } - # 设置创建的超级表格式 + # set stable schema stable1 = { "name": "stb2", "child_table_exists": "no", @@ -83,7 +82,7 @@ class TDTestCase: "insert_rows": 5000, "multi_thread_write_one_tbl": "no", "number_of_tbl_in_one_sql": 0, - "rows_per_tbl": 1000, + "rows_per_tbl": 1, "max_sql_len": 65480, "disorder_ratio": 0, "disorder_range": 1000, @@ -117,7 +116,7 @@ class TDTestCase: ] } - # 创建不同的超级表格式并添加至super_tables + # create different stables like stable1 and add to list super_tables super_tables = [] super_tables.append(stable1) database = { @@ -235,7 +234,7 @@ class TDTestCase: tdLog.info("==========step2:query join") self.sqlsquery() - # 进行数据落盘后检查 + # after wal and sync, check again tdSql.query("show dnodes") index = tdSql.getData(0, 0) tdDnodes.stop(index) @@ -246,8 +245,9 @@ class TDTestCase: def stop(self): tdSql.close() + rm_cmd = f"rm -f /tmp/insert* > /dev/null 2>&1" + _ = subprocess.check_output(rm_cmd, shell=True).decode("utf-8") tdLog.success(f"{__file__} successfully executed") - tdCases.addLinux(__file__, TDTestCase()) tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file From 7c749e24c8814f03eb147048158efa1ae7b7bfd9 Mon Sep 17 00:00:00 2001 From: wu champion Date: Fri, 19 Mar 2021 18:35:26 +0800 Subject: [PATCH 02/22] [TD-1380] fix the case of query1970YearsAf.py --- tests/pytest/query/query1970YearsAf.py | 253 +++++++++++++++++++++++++ 1 file changed, 253 insertions(+) create mode 100644 tests/pytest/query/query1970YearsAf.py diff --git a/tests/pytest/query/query1970YearsAf.py b/tests/pytest/query/query1970YearsAf.py new file mode 100644 index 0000000000..9902f6908b --- /dev/null +++ b/tests/pytest/query/query1970YearsAf.py @@ -0,0 +1,253 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import taos +import sys +import os +import json +import subprocess +import datetime + + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.dnodes import TDDnode + +class TDTestCase: + + def __init__(self): + self.path = "" + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + + def getcfgPath(self, path): + binPath = os.path.dirname(os.path.realpath(__file__)) + binPath = binPath + "/../../../debug/" + tdLog.debug(f"binPath {binPath}") + binPath = os.path.realpath(binPath) + tdLog.debug(f"binPath real path {binPath}") + if path == "": + self.path = os.path.abspath(binPath + "../../") + else: + self.path = os.path.realpath(path) + return self.path + + def getCfgDir(self): + self.getcfgPath(self.path) + self.cfgDir = f"{self.path}/sim/psim/cfg" + return self.cfgDir + + def creatcfg(self): + dbinfo = { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp": 2, + "walLevel": 1, + "cachelast": 0, + "quorum": 1, + "fsync": 3000, + "update": 0 + } + + # 设置创建的超级表格式 + stable1 = { + "name": "stb2", + "child_table_exists": "no", + "childtable_count": 10, + "childtable_prefix": "t", + "auto_create_table": "no", + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 5000, + "multi_thread_write_one_tbl": "no", + "number_of_tbl_in_one_sql": 0, + "rows_per_tbl": 1000, + "max_sql_len": 65480, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 20000, + "start_timestamp": "1969-12-31 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [ + {"type": "INT", "count": 2}, + {"type": "DOUBLE", "count": 2}, + {"type": "BIGINT", "count": 2}, + {"type": "FLOAT", "count": 2}, + {"type": "SMALLINT", "count": 2}, + {"type": "TINYINT", "count": 2}, + {"type": "BOOL", "count": 2}, + {"type": "NCHAR", "len": 3, "count": 1}, + {"type": "BINARY", "len": 8, "count": 1} + + ], + "tags": [ + {"type": "INT", "count": 2}, + {"type": "DOUBLE", "count": 2}, + {"type": "BIGINT", "count": 2}, + {"type": "FLOAT", "count": 2}, + {"type": "SMALLINT", "count": 2}, + {"type": "TINYINT", "count": 2}, + {"type": "BOOL", "count": 2}, + {"type": "NCHAR", "len": 3, "count": 1}, + {"type": "BINARY", "len": 8, "count": 1} + ] + } + + # 创建不同的超级表格式并添加至super_tables + super_tables = [] + super_tables.append(stable1) + database = { + "dbinfo": dbinfo, + "super_tables": super_tables + } + + cfgdir = self.getCfgDir() + create_table = { + "filetype": "insert", + "cfgdir": cfgdir, + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "/tmp/insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "num_of_records_per_req": 100, + "databases": [database] + } + return create_table + + def createinsertfile(self): + create_table = self.creatcfg() + date = datetime.datetime.now().strftime("%Y%m%d%H%M") + file_create_table = f"/tmp/insert_{date}.json" + + with open(file_create_table, 'w') as f: + json.dump(create_table, f) + return file_create_table + + def inserttable(self, filepath): + create_table_cmd = f"taosdemo -f {filepath} > /tmp/1.log 2>&1" + _ = subprocess.check_output(create_table_cmd, shell=True).decode("utf-8") + + def sqlsquery(self): + # stable query + tdSql.query( + "select * from stb2 where stb2.ts < '1970-01-01 00:00:00.000' " + ) + tdSql.checkRows(43200) + + tdSql.query( + "select * from stb2 where stb2.ts >= '1970-01-01 00:00:00.000' " + ) + tdSql.checkRows(6800) + + tdSql.query( + "select * from stb2 where stb2.ts > '1969-12-31 23:00:00.000' and stb2.ts <'1970-01-01 01:00:00.000' " + ) + tdSql.checkRows(3590) + + # child-table query + tdSql.query( + "select * from t0 where t0.ts < '1970-01-01 00:00:00.000' " + ) + tdSql.checkRows(4320) + + tdSql.query( + "select * from t1 where t1.ts >= '1970-01-01 00:00:00.000' " + ) + tdSql.checkRows(680) + + tdSql.query( + "select * from t9 where t9.ts > '1969-12-31 22:00:00.000' and t9.ts <'1970-01-01 02:00:00.000' " + ) + tdSql.checkRows(719) + + tdSql.query( + "select * from t0,t1 where t0.ts=t1.ts and t1.ts >= '1970-01-01 00:00:00.000' " + ) + tdSql.checkRows(680) + + tdSql.query( + "select diff(col1) from t0 where t0.ts >= '1970-01-01 00:00:00.000' " + ) + tdSql.checkRows(679) + + tdSql.query( + "select t0,col1 from stb2 where stb2.ts < '1970-01-01 00:00:00.000' order by ts" + ) + tdSql.checkRows(43200) + + # query with timestamp in 'where ...' + tdSql.query( + "select * from stb2 where stb2.ts > -28800000 " + ) + tdSql.checkRows(6790) + + tdSql.query( + "select * from stb2 where stb2.ts > -28800000 and stb2.ts < '1970-01-01 08:00:00.000' " + ) + tdSql.checkRows(6790) + + tdSql.query( + "select * from stb2 where stb2.ts < -28800000 and stb2.ts > '1969-12-31 22:00:00.000' " + ) + tdSql.checkRows(3590) + + def run(self): + s = 'reset query cache' + tdSql.execute(s) + s = 'create database if not exists db' + tdSql.execute(s) + s = 'use db' + tdSql.execute(s) + + tdLog.info("==========step1:create table stable and child table,then insert data automatically") + insertfile = self.createinsertfile() + self.inserttable(insertfile) + + tdLog.info("==========step2:query join") + self.sqlsquery() + + # 进行数据落盘后检查 + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + tdDnodes.start(index) + + tdLog.info("==========step3: query join again") + self.sqlsquery() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file From dff5c11f290453a349bdb885e00128586700bece Mon Sep 17 00:00:00 2001 From: wu champion Date: Sat, 20 Mar 2021 16:10:45 +0800 Subject: [PATCH 03/22] [test] add test case and bug case --- tests/pytest/fulltest.sh | 4 ++ tests/pytest/query/bug3351.py | 74 ++++++++++++++++++++++++++ tests/pytest/query/query1970YearsAf.py | 4 +- 3 files changed, 80 insertions(+), 2 deletions(-) create mode 100644 tests/pytest/query/bug3351.py diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 5037f2c399..aa8e679e81 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -196,6 +196,10 @@ python3 ./test.py -f query/bug2119.py python3 ./test.py -f query/isNullTest.py python3 ./test.py -f query/queryWithTaosdKilled.py python3 ./test.py -f query/floatCompare.py +python3 ./test.py -f query/query1970YearsAf.py +python3 ./test.py -f query/bug3351.py + + #stream python3 ./test.py -f stream/metric_1.py diff --git a/tests/pytest/query/bug3351.py b/tests/pytest/query/bug3351.py new file mode 100644 index 0000000000..288d071a69 --- /dev/null +++ b/tests/pytest/query/bug3351.py @@ -0,0 +1,74 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 36500") + tdSql.execute("use db") + tdLog.printNoPrefix("==========step1:create table && insert data") + + tdSql.execute( + "create table stb1 (ts timestamp, c1 int) TAGS(t1 int)" + ) + tdSql.execute("create table t0 using stb1 tags(1)") + tdSql.execute("insert into t0 values (-865000000, 1)") + tdSql.execute("insert into t0 values (-864000000, 2)") + tdSql.execute("insert into t0 values (-863000000, 3)") + tdSql.execute("insert into t0 values (-15230000, 4)") + tdSql.execute("insert into t0 values (-15220000, 5)") + tdSql.execute("insert into t0 values (-15210000, 6)") + + tdLog.printNoPrefix("==========step2:query") + # bug1:when ts > -864000000, return 0 rows; + # bug2:when ts = -15220000, return 0 rows. + tdSql.query('select * from t0 where ts < -864000000') + tdSql.checkRows(1) + tdSql.query('select * from t0 where ts <= -864000000') + tdSql.checkRows(2) + tdSql.query('select * from t0 where ts = -864000000') + tdSql.checkRows(1) + tdSql.query('select * from t0 where ts > -864000000') + tdSql.checkRows(4) + tdSql.query('select * from t0 where ts >= -864000000') + tdSql.checkRows(5) + tdSql.query('select * from t0 where ts < -15220000') + tdSql.checkRows(4) + tdSql.query('select * from t0 where ts <= -15220000') + tdSql.checkRows(5) + tdSql.query('select * from t0 where ts = -15220000') + tdSql.checkRows(1) + tdSql.query('select * from t0 where ts > -15220000') + tdSql.checkRows(1) + tdSql.query('select * from t0 where ts >= -15220000') + tdSql.checkRows(2) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/query/query1970YearsAf.py b/tests/pytest/query/query1970YearsAf.py index 9902f6908b..d2eead241f 100644 --- a/tests/pytest/query/query1970YearsAf.py +++ b/tests/pytest/query/query1970YearsAf.py @@ -88,7 +88,7 @@ class TDTestCase: "disorder_ratio": 0, "disorder_range": 1000, "timestamp_step": 20000, - "start_timestamp": "1969-12-31 00:00:00.000", + "start_timestamp": "1969-12-30 23:59:40.000", "sample_format": "csv", "sample_file": "./sample.csv", "tags_file": "", @@ -153,7 +153,7 @@ class TDTestCase: return file_create_table def inserttable(self, filepath): - create_table_cmd = f"taosdemo -f {filepath} > /tmp/1.log 2>&1" + create_table_cmd = f"taosdemo -f {filepath} > /dev/null 2>&1" _ = subprocess.check_output(create_table_cmd, shell=True).decode("utf-8") def sqlsquery(self): From ef248d09fd248775dfea4a88136354b42dd54f1e Mon Sep 17 00:00:00 2001 From: wu champion Date: Mon, 22 Mar 2021 17:02:53 +0800 Subject: [PATCH 04/22] fix case,add delete file when processing over --- tests/pytest/query/query1970YearsAf.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/pytest/query/query1970YearsAf.py b/tests/pytest/query/query1970YearsAf.py index d2eead241f..441f835e89 100644 --- a/tests/pytest/query/query1970YearsAf.py +++ b/tests/pytest/query/query1970YearsAf.py @@ -18,7 +18,6 @@ import json import subprocess import datetime - from util.log import * from util.sql import * from util.cases import * @@ -71,7 +70,7 @@ class TDTestCase: "update": 0 } - # 设置创建的超级表格式 + # set stable schema stable1 = { "name": "stb2", "child_table_exists": "no", @@ -83,7 +82,7 @@ class TDTestCase: "insert_rows": 5000, "multi_thread_write_one_tbl": "no", "number_of_tbl_in_one_sql": 0, - "rows_per_tbl": 1000, + "rows_per_tbl": 1, "max_sql_len": 65480, "disorder_ratio": 0, "disorder_range": 1000, @@ -117,7 +116,7 @@ class TDTestCase: ] } - # 创建不同的超级表格式并添加至super_tables + # create different stables like stable1 and add to list super_tables super_tables = [] super_tables.append(stable1) database = { @@ -235,7 +234,7 @@ class TDTestCase: tdLog.info("==========step2:query join") self.sqlsquery() - # 进行数据落盘后检查 + # after wal and sync, check again tdSql.query("show dnodes") index = tdSql.getData(0, 0) tdDnodes.stop(index) @@ -246,8 +245,9 @@ class TDTestCase: def stop(self): tdSql.close() + rm_cmd = f"rm -f /tmp/insert* > /dev/null 2>&1" + _ = subprocess.check_output(rm_cmd, shell=True).decode("utf-8") tdLog.success(f"{__file__} successfully executed") - tdCases.addLinux(__file__, TDTestCase()) tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file From 2de96282490234d89195ad401d001bc71ab71bb5 Mon Sep 17 00:00:00 2001 From: wu champion Date: Tue, 23 Mar 2021 13:40:29 +0800 Subject: [PATCH 05/22] fix the case --- tests/pytest/query/query1970YearsAf.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/pytest/query/query1970YearsAf.py b/tests/pytest/query/query1970YearsAf.py index 441f835e89..9cc8948c2f 100644 --- a/tests/pytest/query/query1970YearsAf.py +++ b/tests/pytest/query/query1970YearsAf.py @@ -243,10 +243,11 @@ class TDTestCase: tdLog.info("==========step3: query join again") self.sqlsquery() - def stop(self): - tdSql.close() rm_cmd = f"rm -f /tmp/insert* > /dev/null 2>&1" _ = subprocess.check_output(rm_cmd, shell=True).decode("utf-8") + + def stop(self): + tdSql.close() tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) From 965d553964aea78aeb2e59b3bad9811ea08a86f5 Mon Sep 17 00:00:00 2001 From: wu champion Date: Tue, 23 Mar 2021 13:59:29 +0800 Subject: [PATCH 06/22] fix the case:add delete tmp file step --- tests/pytest/query/query1970YearsAf.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/pytest/query/query1970YearsAf.py b/tests/pytest/query/query1970YearsAf.py index 9cc8948c2f..ea83d8669d 100644 --- a/tests/pytest/query/query1970YearsAf.py +++ b/tests/pytest/query/query1970YearsAf.py @@ -87,7 +87,7 @@ class TDTestCase: "disorder_ratio": 0, "disorder_range": 1000, "timestamp_step": 20000, - "start_timestamp": "1969-12-30 23:59:40.000", + "start_timestamp": "1969-12-31 00:00:00.000", "sample_format": "csv", "sample_file": "./sample.csv", "tags_file": "", @@ -172,7 +172,7 @@ class TDTestCase: ) tdSql.checkRows(3590) - # child-table query + # child-tables query tdSql.query( "select * from t0 where t0.ts < '1970-01-01 00:00:00.000' " ) @@ -243,6 +243,7 @@ class TDTestCase: tdLog.info("==========step3: query join again") self.sqlsquery() + # delete temporary file rm_cmd = f"rm -f /tmp/insert* > /dev/null 2>&1" _ = subprocess.check_output(rm_cmd, shell=True).decode("utf-8") From 5f27c38a0b0753d1ff9bcab9fbf64dd378d923ab Mon Sep 17 00:00:00 2001 From: wu champion <72908628+wu-champion@users.noreply.github.com> Date: Tue, 23 Mar 2021 14:10:25 +0800 Subject: [PATCH 07/22] Update query1970YearsAf.py --- tests/pytest/query/query1970YearsAf.py | 17 +---------------- 1 file changed, 1 insertion(+), 16 deletions(-) diff --git a/tests/pytest/query/query1970YearsAf.py b/tests/pytest/query/query1970YearsAf.py index 307180a29c..b3f3ff7a82 100644 --- a/tests/pytest/query/query1970YearsAf.py +++ b/tests/pytest/query/query1970YearsAf.py @@ -87,11 +87,7 @@ class TDTestCase: "disorder_ratio": 0, "disorder_range": 1000, "timestamp_step": 20000, -<<<<<<< HEAD "start_timestamp": "1969-12-31 00:00:00.000", -======= - "start_timestamp": "1969-12-30 23:59:40.000", ->>>>>>> 53a128b2a3a469b06ae28ff013397b8d015433c8 "sample_format": "csv", "sample_file": "./sample.csv", "tags_file": "", @@ -176,11 +172,7 @@ class TDTestCase: ) tdSql.checkRows(3590) -<<<<<<< HEAD # child-tables query -======= - # child-table query ->>>>>>> 53a128b2a3a469b06ae28ff013397b8d015433c8 tdSql.query( "select * from t0 where t0.ts < '1970-01-01 00:00:00.000' " ) @@ -251,20 +243,13 @@ class TDTestCase: tdLog.info("==========step3: query join again") self.sqlsquery() -<<<<<<< HEAD # delete temporary file rm_cmd = f"rm -f /tmp/insert* > /dev/null 2>&1" _ = subprocess.check_output(rm_cmd, shell=True).decode("utf-8") def stop(self): tdSql.close() -======= - def stop(self): - tdSql.close() - rm_cmd = f"rm -f /tmp/insert* > /dev/null 2>&1" - _ = subprocess.check_output(rm_cmd, shell=True).decode("utf-8") ->>>>>>> 53a128b2a3a469b06ae28ff013397b8d015433c8 tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) From d2a0fc5b931459022819c94c6942e72940df4f85 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 30 Mar 2021 16:36:16 +0800 Subject: [PATCH 08/22] [TD-3314]: new staging directory for deleting vnodes --- src/dnode/src/dnodeMain.c | 12 +++ src/vnode/inc/vnodeBackup.h | 32 +++++++ src/vnode/src/vnodeBackup.c | 172 ++++++++++++++++++++++++++++++++++++ src/vnode/src/vnodeMain.c | 15 ++-- src/vnode/src/vnodeMgmt.c | 2 + 5 files changed, 224 insertions(+), 9 deletions(-) create mode 100644 src/vnode/inc/vnodeBackup.h create mode 100644 src/vnode/src/vnodeBackup.c diff --git a/src/dnode/src/dnodeMain.c b/src/dnode/src/dnodeMain.c index 2cd5c637e5..ea0ef4655d 100644 --- a/src/dnode/src/dnodeMain.c +++ b/src/dnode/src/dnodeMain.c @@ -237,6 +237,18 @@ static int32_t dnodeInitStorage() { return -1; } + TDIR *tdir = tfsOpendir("vnode_bak/.staging"); + if (tfsReaddir(tdir) != NULL) { + dError("vnode_bak/.staging dir not empty, fix it first."); + tfsClosedir(tdir); + return -1; + } + + if (tfsMkdir("vnode_bak/.staging") < 0) { + dError("failed to create vnode_bak/.staging dir since %s", tstrerror(terrno)); + return -1; + } + dnodeCheckDataDirOpenned(tsDnodeDir); dInfo("dnode storage is initialized at %s", tsDnodeDir); diff --git a/src/vnode/inc/vnodeBackup.h b/src/vnode/inc/vnodeBackup.h new file mode 100644 index 0000000000..0a6b26546c --- /dev/null +++ b/src/vnode/inc/vnodeBackup.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_VNODE_BACKUP_H +#define TDENGINE_VNODE_BACKUP_H + +#ifdef __cplusplus +extern "C" { +#endif +#include "vnodeInt.h" + +int32_t vnodeInitBackup(); +void vnodeCleanupBackup(); +int32_t vnodeBackup(int32_t vgId); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/vnode/src/vnodeBackup.c b/src/vnode/src/vnodeBackup.c new file mode 100644 index 0000000000..a0a975be2b --- /dev/null +++ b/src/vnode/src/vnodeBackup.c @@ -0,0 +1,172 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _DEFAULT_SOURCE +#include "os.h" +#include "taoserror.h" +#include "taosmsg.h" +#include "tutil.h" +#include "tqueue.h" +#include "tglobal.h" +#include "tfs.h" +#include "vnodeBackup.h" +#include "vnodeMain.h" + +typedef struct { + int32_t vgId; +} SVBackupMsg; + +typedef struct { + pthread_t thread; + int32_t workerId; +} SVBackupWorker; + +typedef struct { + int32_t num; + SVBackupWorker *worker; +} SVBackupWorkerPool; + +static SVBackupWorkerPool tsVBackupPool; +static taos_qset tsVBackupQset; +static taos_queue tsVBackupQueue; + +static void vnodeProcessBackupMsg(SVBackupMsg *pMsg) { + int32_t vgId = pMsg->vgId; + char newDir[TSDB_FILENAME_LEN] = {0}; + char stagingDir[TSDB_FILENAME_LEN] = {0}; + + sprintf(newDir, "%s/vnode%d", "vnode_bak", vgId); + sprintf(stagingDir, "%s/.staging/vnode%d", "vnode_bak", vgId); + + if (tsEnableVnodeBak) { + tfsRmdir(newDir); + tfsRename(stagingDir, newDir); + } else { + vInfo("vgId:%d, vnode backup not enabled", vgId); + + tfsRmdir(stagingDir); + } +} + +static void *vnodeBackupFunc(void *param) { + while (1) { + SVBackupMsg *pMsg = NULL; + if (taosReadQitemFromQset(tsVBackupQset, NULL, (void **)&pMsg, NULL) == 0) { + vDebug("qset:%p, vbackup got no message from qset, exiting", tsVBackupQset); + break; + } + + vTrace("vgId:%d, will be processed in vbackup queue", pMsg->vgId); + vnodeProcessBackupMsg(pMsg); + + vTrace("vgId:%d, disposed in vbackup worker", pMsg->vgId); + taosFreeQitem(pMsg); + } + + return NULL; +} + +static int32_t vnodeStartBackup() { + tsVBackupQueue = taosOpenQueue(); + if (tsVBackupQueue == NULL) return TSDB_CODE_DND_OUT_OF_MEMORY; + + taosAddIntoQset(tsVBackupQset, tsVBackupQueue, NULL); + + for (int32_t i = 0; i < tsVBackupPool.num; ++i) { + SVBackupWorker *pWorker = tsVBackupPool.worker + i; + pWorker->workerId = i; + + pthread_attr_t thAttr; + pthread_attr_init(&thAttr); + pthread_attr_setdetachstate(&thAttr, PTHREAD_CREATE_JOINABLE); + + if (pthread_create(&pWorker->thread, &thAttr, vnodeBackupFunc, pWorker) != 0) { + vError("failed to create thread to process vbackup queue, reason:%s", strerror(errno)); + } + + pthread_attr_destroy(&thAttr); + + vDebug("vbackup:%d is launched, total:%d", pWorker->workerId, tsVBackupPool.num); + } + + vDebug("vbackup queue:%p is allocated", tsVBackupQueue); + + return TSDB_CODE_SUCCESS; +} + +static int32_t vnodeWriteIntoBackupWorker(int32_t vgId) { + SVBackupMsg *pMsg = taosAllocateQitem(sizeof(SVBackupMsg)); + if (pMsg == NULL) return TSDB_CODE_VND_OUT_OF_MEMORY; + + pMsg->vgId = vgId; + + int32_t code = taosWriteQitem(tsVBackupQueue, TAOS_QTYPE_RPC, pMsg); + if (code == 0) code = TSDB_CODE_DND_ACTION_IN_PROGRESS; + + return code; +} + +int32_t vnodeBackup(int32_t vgId) { + vTrace("vgId:%d, will backup", vgId); + return vnodeWriteIntoBackupWorker(vgId); +} + +int32_t vnodeInitBackup() { + tsVBackupQset = taosOpenQset(); + + tsVBackupPool.num = 1; + tsVBackupPool.worker = calloc(sizeof(SVBackupWorker), tsVBackupPool.num); + + if (tsVBackupPool.worker == NULL) return -1; + for (int32_t i = 0; i < tsVBackupPool.num; ++i) { + SVBackupWorker *pWorker = tsVBackupPool.worker + i; + pWorker->workerId = i; + vDebug("vbackup:%d is created", i); + } + + vDebug("vbackup is initialized, num:%d qset:%p", tsVBackupPool.num, tsVBackupQset); + + return vnodeStartBackup(); +} + +void vnodeCleanupBackup() { + for (int32_t i = 0; i < tsVBackupPool.num; ++i) { + SVBackupWorker *pWorker = tsVBackupPool.worker + i; + if (taosCheckPthreadValid(pWorker->thread)) { + taosQsetThreadResume(tsVBackupQset); + } + vDebug("vbackup:%d is closed", i); + } + + for (int32_t i = 0; i < tsVBackupPool.num; ++i) { + SVBackupWorker *pWorker = tsVBackupPool.worker + i; + vDebug("vbackup:%d start to join", i); + if (taosCheckPthreadValid(pWorker->thread)) { + pthread_join(pWorker->thread, NULL); + } + vDebug("vbackup:%d join success", i); + } + + vDebug("vbackup is closed, qset:%p", tsVBackupQset); + + taosCloseQset(tsVBackupQset); + tsVBackupQset = NULL; + + tfree(tsVBackupPool.worker); + + vDebug("vbackup queue:%p is freed", tsVBackupQueue); + taosCloseQueue(tsVBackupQueue); + tsVBackupQueue = NULL; +} diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index 441e951250..7a14cc7f5b 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -27,6 +27,7 @@ #include "vnodeVersion.h" #include "vnodeMgmt.h" #include "vnodeWorker.h" +#include "vnodeBackup.h" #include "vnodeMain.h" static int32_t vnodeProcessTsdbStatus(void *arg, int32_t status, int32_t eno); @@ -448,18 +449,14 @@ void vnodeDestroy(SVnodeObj *pVnode) { if (pVnode->dropped) { char rootDir[TSDB_FILENAME_LEN] = {0}; - char newDir[TSDB_FILENAME_LEN] = {0}; + char stagingDir[TSDB_FILENAME_LEN] = {0}; sprintf(rootDir, "%s/vnode%d", "vnode", vgId); - sprintf(newDir, "%s/vnode%d", "vnode_bak", vgId); + sprintf(stagingDir, "%s/.staging/vnode%d", "vnode_bak", vgId); - if (0 == tsEnableVnodeBak) { - vInfo("vgId:%d, vnode backup not enabled", pVnode->vgId); - } else { - tfsRmdir(newDir); - tfsRename(rootDir, newDir); - } + tfsRename(rootDir, stagingDir); + + vnodeBackup(vgId); - tfsRmdir(rootDir); dnodeSendStatusMsgToMnode(); } diff --git a/src/vnode/src/vnodeMgmt.c b/src/vnode/src/vnodeMgmt.c index 71d9bc07f5..32f9532138 100644 --- a/src/vnode/src/vnodeMgmt.c +++ b/src/vnode/src/vnodeMgmt.c @@ -17,6 +17,7 @@ #include "os.h" #include "dnode.h" #include "vnodeStatus.h" +#include "vnodeBackup.h" #include "vnodeWorker.h" #include "vnodeRead.h" #include "vnodeWrite.h" @@ -29,6 +30,7 @@ static void vnodeCleanupHash(void); static void vnodeIncRef(void *ptNode); static SStep tsVnodeSteps[] = { + {"vnode-backup", vnodeInitBackup, vnodeCleanupBackup}, {"vnode-worker", vnodeInitMWorker, vnodeCleanupMWorker}, {"vnode-write", vnodeInitWrite, vnodeCleanupWrite}, {"vnode-read", vnodeInitRead, vnodeCleanupRead}, From 61d7d9ab41b52e1327c40987facff6c1f547e91d Mon Sep 17 00:00:00 2001 From: nianhongdong Date: Wed, 31 Mar 2021 13:46:01 +0800 Subject: [PATCH 09/22] Update JDBCDemo.java MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 71和76行测试示例的数据库和表名没有动态拼接,测试用在前面改数据库名和表名,会导致运行报错 --- .../JDBCDemo/src/main/java/com/taosdata/example/JDBCDemo.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JDBCDemo.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JDBCDemo.java index 07a21e79d1..da865b3ffd 100644 --- a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JDBCDemo.java +++ b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JDBCDemo.java @@ -68,12 +68,12 @@ public class JDBCDemo { } private void insert() { - final String sql = "insert into test.weather (ts, temperature, humidity) values(now, 20.5, 34)"; + final String sql = "insert into " + dbName + "." + tbName + " (ts, temperature, humidity) values(now, 20.5, 34)"; exuete(sql); } private void select() { - final String sql = "select * from test.weather"; + final String sql = "select * from "+ dbName + "." + tbName; executeQuery(sql); } From 536a9e9f3fcb7792ee3bc5943bf342d6d03a8cb5 Mon Sep 17 00:00:00 2001 From: slguan Date: Wed, 31 Mar 2021 16:55:31 +0800 Subject: [PATCH 10/22] remove FResetVersion function --- src/inc/tsync.h | 4 ---- src/sync/inc/syncInt.h | 1 - src/sync/src/syncMain.c | 1 - src/sync/src/syncRestore.c | 1 - src/vnode/inc/vnodeSync.h | 1 - src/vnode/src/vnodeMain.c | 1 - src/vnode/src/vnodeSync.c | 19 ++----------------- 7 files changed, 2 insertions(+), 26 deletions(-) diff --git a/src/inc/tsync.h b/src/inc/tsync.h index 99dfd3a6a3..d1b68e3f5a 100644 --- a/src/inc/tsync.h +++ b/src/inc/tsync.h @@ -79,9 +79,6 @@ typedef void (*FStopSyncFile)(int32_t vgId, uint64_t fversion); // get file version typedef int32_t (*FGetVersion)(int32_t vgId, uint64_t *fver, uint64_t *vver); -// reset version -typedef int32_t (*FResetVersion)(int32_t vgId, uint64_t fver); - typedef int32_t (*FSendFile)(void *tsdb, SOCKET socketFd); typedef int32_t (*FRecvFile)(void *tsdb, SOCKET socketFd); @@ -99,7 +96,6 @@ typedef struct { FStartSyncFile startSyncFileFp; FStopSyncFile stopSyncFileFp; FGetVersion getVersionFp; - FResetVersion resetVersionFp; FSendFile sendFileFp; FRecvFile recvFileFp; } SSyncInfo; diff --git a/src/sync/inc/syncInt.h b/src/sync/inc/syncInt.h index 2b87938474..91613ae351 100644 --- a/src/sync/inc/syncInt.h +++ b/src/sync/inc/syncInt.h @@ -117,7 +117,6 @@ typedef struct SSyncNode { FStartSyncFile startSyncFileFp; FStopSyncFile stopSyncFileFp; FGetVersion getVersionFp; - FResetVersion resetVersionFp; FSendFile sendFileFp; FRecvFile recvFileFp; pthread_mutex_t mutex; diff --git a/src/sync/src/syncMain.c b/src/sync/src/syncMain.c index 76d4379c5f..d21743d40a 100644 --- a/src/sync/src/syncMain.c +++ b/src/sync/src/syncMain.c @@ -182,7 +182,6 @@ int64_t syncStart(const SSyncInfo *pInfo) { pNode->startSyncFileFp = pInfo->startSyncFileFp; pNode->stopSyncFileFp = pInfo->stopSyncFileFp; pNode->getVersionFp = pInfo->getVersionFp; - pNode->resetVersionFp = pInfo->resetVersionFp; pNode->sendFileFp = pInfo->sendFileFp; pNode->recvFileFp = pInfo->recvFileFp; diff --git a/src/sync/src/syncRestore.c b/src/sync/src/syncRestore.c index 22d0a27581..c0d66316cd 100644 --- a/src/sync/src/syncRestore.c +++ b/src/sync/src/syncRestore.c @@ -238,7 +238,6 @@ static int32_t syncRestoreDataStepByStep(SSyncPeer *pPeer) { (*pNode->stopSyncFileFp)(pNode->vgId, fversion); nodeVersion = fversion; - if (pNode->resetVersionFp) (*pNode->resetVersionFp)(pNode->vgId, fversion); sInfo("%s, start to restore wal, fver:%" PRIu64, pPeer->id, nodeVersion); uint64_t wver = 0; diff --git a/src/vnode/inc/vnodeSync.h b/src/vnode/inc/vnodeSync.h index 75d7ffbabd..28fb63dd6a 100644 --- a/src/vnode/inc/vnodeSync.h +++ b/src/vnode/inc/vnodeSync.h @@ -30,7 +30,6 @@ void vnodeStopSyncFile(int32_t vgId, uint64_t fversion); void vnodeConfirmForard(int32_t vgId, void *wparam, int32_t code); int32_t vnodeWriteToCache(int32_t vgId, void *wparam, int32_t qtype, void *rparam); int32_t vnodeGetVersion(int32_t vgId, uint64_t *fver, uint64_t *wver); -int32_t vnodeResetVersion(int32_t vgId, uint64_t fver); void vnodeConfirmForward(void *pVnode, uint64_t version, int32_t code, bool force); diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index 441e951250..96a9decf4b 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -364,7 +364,6 @@ int32_t vnodeOpen(int32_t vgId) { syncInfo.startSyncFileFp = vnodeStartSyncFile; syncInfo.stopSyncFileFp = vnodeStopSyncFile; syncInfo.getVersionFp = vnodeGetVersion; - syncInfo.resetVersionFp = vnodeResetVersion; syncInfo.sendFileFp = tsdbSyncSend; syncInfo.recvFileFp = tsdbSyncRecv; syncInfo.pTsdb = pVnode->tsdb; diff --git a/src/vnode/src/vnodeSync.c b/src/vnode/src/vnodeSync.c index 929dd15926..aa4cf0fc15 100644 --- a/src/vnode/src/vnodeSync.c +++ b/src/vnode/src/vnodeSync.c @@ -107,8 +107,9 @@ void vnodeStopSyncFile(int32_t vgId, uint64_t fversion) { pVnode->fversion = fversion; pVnode->version = fversion; vnodeSaveVersion(pVnode); + walResetVersion(pVnode->wal, fversion); - vDebug("vgId:%d, datafile is synced, fver:%" PRIu64 " vver:%" PRIu64, vgId, fversion, fversion); + vInfo("vgId:%d, datafile is synced, fver:%" PRIu64 " vver:%" PRIu64, vgId, fversion, fversion); vnodeSetReadyStatus(pVnode); vnodeRelease(pVnode); @@ -158,22 +159,6 @@ int32_t vnodeGetVersion(int32_t vgId, uint64_t *fver, uint64_t *wver) { return code; } -int32_t vnodeResetVersion(int32_t vgId, uint64_t fver) { - SVnodeObj *pVnode = vnodeAcquire(vgId); - if (pVnode == NULL) { - vError("vgId:%d, vnode not found while reset version", vgId); - return -1; - } - - pVnode->fversion = fver; - pVnode->version = fver; - walResetVersion(pVnode->wal, fver); - vInfo("vgId:%d, version reset to %" PRIu64, vgId, fver); - - vnodeRelease(pVnode); - return 0; -} - void vnodeConfirmForward(void *vparam, uint64_t version, int32_t code, bool force) { SVnodeObj *pVnode = vparam; syncConfirmForward(pVnode->sync, version, code, force); From 0bbfc5a532d94ed49a35f2164ee84c9cd7aeeaa1 Mon Sep 17 00:00:00 2001 From: slguan Date: Wed, 31 Mar 2021 18:32:56 +0800 Subject: [PATCH 11/22] Bxiao request changed from 100 to 1000 --- src/inc/taosdef.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h index 252e37a5d9..024bc198df 100644 --- a/src/inc/taosdef.h +++ b/src/inc/taosdef.h @@ -259,7 +259,7 @@ do { \ #define TSDB_MIN_TABLES 4 #define TSDB_MAX_TABLES 10000000 #define TSDB_DEFAULT_TABLES 1000000 -#define TSDB_TABLES_STEP 100 +#define TSDB_TABLES_STEP 1000 #define TSDB_MIN_DAYS_PER_FILE 1 #define TSDB_MAX_DAYS_PER_FILE 3650 From 6cb5af8072ae1d4a196c7d666d459654146aa8e7 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 31 Mar 2021 21:40:40 +0800 Subject: [PATCH 12/22] Hotfix/sangshuduo/td 3607 taosdemo buffer overflow (#5636) * [TD-3607] : fix taosdemo buffer overflow. * [TD-3607] : taosdemo buffer overflow. add tmp buffer. * [TD-3607] : taosdemo buffer overflow. fix data generation. * [TD-3607] : taosdemo buffer overflow. fix normal table writting. * [TD-3607] : taosdemo buffer overflow. remove tail spaces. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table test case. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table case. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 240 ++++++++++-------- tests/pytest/fulltest.sh | 24 +- ...taosdemoTest2.py => taosdemoTestTblAlt.py} | 50 +++- 3 files changed, 185 insertions(+), 129 deletions(-) rename tests/pytest/tools/{taosdemoTest2.py => taosdemoTestTblAlt.py} (63%) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 3725712339..9f367b41f8 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -124,7 +124,7 @@ typedef enum enum_INSERT_MODE { typedef enum enumQUERY_TYPE { NO_INSERT_TYPE, - INSERT_TYPE, + INSERT_TYPE, QUERY_TYPE_BUT } QUERY_TYPE; @@ -229,7 +229,7 @@ typedef struct SColumn_S { typedef struct SSuperTable_S { char sTblName[MAX_TB_NAME_SIZE+1]; int childTblCount; - bool childTblExists; // 0: no, 1: yes + bool childTblExists; // 0: no, 1: yes int batchCreateTableNum; // 0: no batch, > 0: batch table number in one sql int8_t autoCreateTable; // 0: create sub table, 1: auto create sub table char childTblPrefix[MAX_TB_NAME_SIZE]; @@ -239,15 +239,15 @@ typedef struct SSuperTable_S { int childTblOffset; int multiThreadWriteOneTbl; // 0: no, 1: yes - int interlaceRows; // + int interlaceRows; // int disorderRatio; // 0: no disorder, >0: x% int disorderRange; // ms or us by database precision - int maxSqlLen; // + int maxSqlLen; // int insertInterval; // insert interval, will override global insert interval int64_t insertRows; // 0: no limit int timeStampStep; - char startTimestamp[MAX_TB_NAME_SIZE]; // + char startTimestamp[MAX_TB_NAME_SIZE]; // char sampleFormat[MAX_TB_NAME_SIZE]; // csv, json char sampleFile[MAX_FILE_NAME_LEN+1]; char tagsFile[MAX_FILE_NAME_LEN+1]; @@ -539,7 +539,7 @@ SArguments g_args = { true, // insert_only false, // debug_print false, // verbose_print - false, // performance statistic print + false, // performance statistic print false, // answer_yes; "./output.txt", // output_file 0, // mode : sync or async @@ -641,7 +641,7 @@ static void printHelp() { "The password to use when connecting to the server. Default is 'taosdata'."); printf("%s%s%s%s\n", indent, "-c", indent, "Configuration directory. Default is '/etc/taos/'."); -#endif +#endif printf("%s%s%s%s\n", indent, "-h", indent, "The host to connect to TDengine. Default is localhost."); printf("%s%s%s%s\n", indent, "-p", indent, @@ -684,7 +684,7 @@ static void printHelp() { "Print debug info."); printf("%s%s%s%s\n", indent, "-V, --version", indent, "Print version info."); -/* printf("%s%s%s%s\n", indent, "-D", indent, +/* printf("%s%s%s%s\n", indent, "-D", indent, "if elete database if exists. 0: no, 1: yes, default is 1"); */ } @@ -749,7 +749,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { && strcasecmp(argv[i], "SMALLINT") && strcasecmp(argv[i], "BIGINT") && strcasecmp(argv[i], "DOUBLE") - && strcasecmp(argv[i], "BINARY") + && strcasecmp(argv[i], "BINARY") && strcasecmp(argv[i], "NCHAR")) { printHelp(); ERROR_EXIT( "Invalid data_type!\n"); @@ -1762,7 +1762,7 @@ static void printfQuerySystemInfo(TAOS * taos) { } for (int i = 0; i < dbCount; i++) { - // printf database info + // printf database info printfDbInfoForQueryToFile(filename, dbInfos[i], i); // show db.vgroups @@ -2098,7 +2098,7 @@ static int calcRowLen(SSuperTable* superTbls) { lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 6; } else if (strcasecmp(dataType, "FLOAT") == 0) { lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 22; - } else if (strcasecmp(dataType, "DOUBLE") == 0) { + } else if (strcasecmp(dataType, "DOUBLE") == 0) { lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 42; } else { printf("get error tag type : %s\n", dataType); @@ -2262,7 +2262,7 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName, /* if (TBL_ALREADY_EXISTS == superTbls->childTblExists) { - //get all child table name use cmd: select tbname from superTblName; + //get all child table name use cmd: select tbname from superTblName; int childTblCount = 10000; superTbls->childTblName = (char*)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN); if (superTbls->childTblName == NULL) { @@ -2289,7 +2289,7 @@ static int createSuperTable(TAOS * taos, char* dbName, int lenOfOneRow = 0; for (colIndex = 0; colIndex < superTbls->columnCount; colIndex++) { char* dataType = superTbls->columns[colIndex].dataType; - + if (strcasecmp(dataType, "BINARY") == 0) { len += snprintf(cols + len, STRING_LEN - len, ", col%d %s(%d)", colIndex, "BINARY", @@ -2386,7 +2386,7 @@ static int createSuperTable(TAOS * taos, char* dbName, len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "FLOAT"); lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 22; - } else if (strcasecmp(dataType, "DOUBLE") == 0) { + } else if (strcasecmp(dataType, "DOUBLE") == 0) { len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "DOUBLE"); lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 42; @@ -2638,7 +2638,7 @@ static void* createTable(void *sarg) lastPrintTime = currentPrintTime; } } - + if (0 != len) { verbosePrint("%s() %d buffer: %s\n", __func__, __LINE__, buffer); if (0 != queryDbExec(winfo->taos, buffer, NO_INSERT_TYPE, false)) { @@ -2703,7 +2703,7 @@ static int startMultiThreadCreateChildTable( t_info->minDelay = INT16_MAX; pthread_create(pids + i, NULL, createTable, t_info); } - + for (int i = 0; i < threads; i++) { pthread_join(pids[i], NULL); } @@ -2920,7 +2920,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( cJSON* stbInfo, SSuperTable* superTbls) { bool ret = false; - // columns + // columns cJSON *columns = cJSON_GetObjectItem(stbInfo, "columns"); if (columns && columns->type != cJSON_Array) { printf("ERROR: failed to read json, columns not found\n"); @@ -2958,7 +2958,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( count = 1; } - // column info + // column info memset(&columnCase, 0, sizeof(StrColumn)); cJSON *dataType = cJSON_GetObjectItem(column, "type"); if (!dataType || dataType->type != cJSON_String || dataType->valuestring == NULL) { @@ -2989,7 +2989,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( count = 1; index = 0; - // tags + // tags cJSON *tags = cJSON_GetObjectItem(stbInfo, "tags"); if (!tags || tags->type != cJSON_Array) { debugPrint("%s() LN%d, failed to read json, tags not found\n", __func__, __LINE__); @@ -3018,7 +3018,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( count = 1; } - // column info + // column info memset(&columnCase, 0, sizeof(StrColumn)); cJSON *dataType = cJSON_GetObjectItem(tag, "type"); if (!dataType || dataType->type != cJSON_String || dataType->valuestring == NULL) { @@ -3166,7 +3166,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { if (numRecPerReq && numRecPerReq->type == cJSON_Number) { g_args.num_of_RPR = numRecPerReq->valueint; } else if (!numRecPerReq) { - g_args.num_of_RPR = 100; + g_args.num_of_RPR = 0xffff; } else { errorPrint("%s() LN%d, failed to read json, num_of_records_per_req not found\n", __func__, __LINE__); goto PARSE_OVER; @@ -3209,7 +3209,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { cJSON* dbinfos = cJSON_GetArrayItem(dbs, i); if (dbinfos == NULL) continue; - // dbinfo + // dbinfo cJSON *dbinfo = cJSON_GetObjectItem(dbinfos, "dbinfo"); if (!dbinfo || dbinfo->type != cJSON_Object) { printf("ERROR: failed to read json, dbinfo not found\n"); @@ -3615,7 +3615,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } g_Dbs.db[i].superTbls[j].maxSqlLen = len; } else if (!maxSqlLen) { - g_Dbs.db[i].superTbls[j].maxSqlLen = TSDB_MAX_SQL_LEN; + g_Dbs.db[i].superTbls[j].maxSqlLen = g_args.max_sql_len; } else { printf("ERROR: failed to read json, maxSqlLen not found\n"); goto PARSE_OVER; @@ -3748,7 +3748,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON* user = cJSON_GetObjectItem(root, "user"); if (user && user->type == cJSON_String && user->valuestring != NULL) { - tstrncpy(g_queryInfo.user, user->valuestring, MAX_USERNAME_SIZE); + tstrncpy(g_queryInfo.user, user->valuestring, MAX_USERNAME_SIZE); } else if (!user) { tstrncpy(g_queryInfo.user, "root", MAX_USERNAME_SIZE); ; } @@ -3805,7 +3805,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { goto PARSE_OVER; } - // super_table_query + // super_table_query cJSON *specifiedQuery = cJSON_GetObjectItem(root, "specified_table_query"); if (!specifiedQuery) { g_queryInfo.specifiedQueryInfo.concurrent = 0; @@ -3930,7 +3930,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { } } - // sub_table_query + // sub_table_query cJSON *superQuery = cJSON_GetObjectItem(root, "super_table_query"); if (!superQuery) { g_queryInfo.superQueryInfo.threadCnt = 0; @@ -3996,7 +3996,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON* subinterval = cJSON_GetObjectItem(superQuery, "interval"); if (subinterval && subinterval->type == cJSON_Number) { g_queryInfo.superQueryInfo.subscribeInterval = subinterval->valueint; - } else if (!subinterval) { + } else if (!subinterval) { //printf("failed to read json, subscribe interval no found\n"); //goto PARSE_OVER; g_queryInfo.superQueryInfo.subscribeInterval = 10000; @@ -4200,71 +4200,77 @@ static int getRowDataFromSample(char* dataBuf, int maxLen, int64_t timestamp, dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, ")"); (*sampleUsePos)++; - + return dataLen; } -static int generateRowData(char* dataBuf, int maxLen, int64_t timestamp, SSuperTable* stbInfo) { - int dataLen = 0; - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "(%" PRId64 ", ", timestamp); +static int generateRowData(char* recBuf, int64_t timestamp, SSuperTable* stbInfo) { + int dataLen = 0; + char *pstr = recBuf; + int maxLen = MAX_DATA_SIZE; + + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "(%" PRId64 ", ", timestamp); + for (int i = 0; i < stbInfo->columnCount; i++) { if ((0 == strncasecmp(stbInfo->columns[i].dataType, "binary", 6)) || (0 == strncasecmp(stbInfo->columns[i].dataType, "nchar", 5))) { if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) { errorPrint( "binary or nchar length overflow, max size:%u\n", (uint32_t)TSDB_MAX_BINARY_LEN); - return (-1); + return -1; } char* buf = (char*)calloc(stbInfo->columns[i].dataLen+1, 1); if (NULL == buf) { errorPrint( "calloc failed! size:%d\n", stbInfo->columns[i].dataLen); - return (-1); + return -1; } rand_string(buf, stbInfo->columns[i].dataLen); - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "\'%s\', ", buf); + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "\'%s\', ", buf); tmfree(buf); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "int", 3)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%d, ", rand_int()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "bigint", 6)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%"PRId64", ", rand_bigint()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "float", 5)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%f, ", rand_float()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "double", 6)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%f, ", rand_double()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "smallint", 8)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%d, ", rand_smallint()); + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%d, ", rand_smallint()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "tinyint", 7)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%d, ", rand_tinyint()); + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%d, ", rand_tinyint()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "bool", 4)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%d, ", rand_bool()); + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%d, ", rand_bool()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "timestamp", 9)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%"PRId64", ", rand_bigint()); + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%"PRId64", ", rand_bigint()); } else { errorPrint( "No support data type: %s\n", stbInfo->columns[i].dataType); - return (-1); + return -1; } } dataLen -= 2; - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, ")"); + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, ")"); - return dataLen; + verbosePrint("%s() LN%d, recBuf:\n\t%s\n", __func__, __LINE__, recBuf); + + return strlen(recBuf); } -static int32_t generateData(char *res, char **data_type, +static int32_t generateData(char *recBuf, char **data_type, int num_of_cols, int64_t timestamp, int lenOfBinary) { - memset(res, 0, MAX_DATA_SIZE); - char *pstr = res; + memset(recBuf, 0, MAX_DATA_SIZE); + char *pstr = recBuf; pstr += sprintf(pstr, "(%" PRId64, timestamp); int c = 0; @@ -4285,7 +4291,7 @@ static int32_t generateData(char *res, char **data_type, } else if (strcasecmp(data_type[i % c], "smallint") == 0) { pstr += sprintf(pstr, ", %d", rand_smallint()); } else if (strcasecmp(data_type[i % c], "int") == 0) { - pstr += sprintf(pstr, ", %d", rand_int()); + pstr += sprintf(pstr, ", %d", rand_int()); } else if (strcasecmp(data_type[i % c], "bigint") == 0) { pstr += sprintf(pstr, ", %" PRId64, rand_bigint()); } else if (strcasecmp(data_type[i % c], "float") == 0) { @@ -4308,7 +4314,7 @@ static int32_t generateData(char *res, char **data_type, free(s); } - if (pstr - res > MAX_DATA_SIZE) { + if (strlen(recBuf) > MAX_DATA_SIZE) { perror("column length too long, abort"); exit(-1); } @@ -4316,7 +4322,7 @@ static int32_t generateData(char *res, char **data_type, pstr += sprintf(pstr, ")"); - return (int32_t)(pstr - res); + return (int32_t)strlen(recBuf); } static int prepareSampleDataForSTable(SSuperTable *superTblInfo) { @@ -4325,9 +4331,9 @@ static int prepareSampleDataForSTable(SSuperTable *superTblInfo) { sampleDataBuf = calloc( superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, 1); if (sampleDataBuf == NULL) { - errorPrint("%s() LN%d, Failed to calloc %d Bytes, reason:%s\n", + errorPrint("%s() LN%d, Failed to calloc %d Bytes, reason:%s\n", __func__, __LINE__, - superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, + superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, strerror(errno)); return -1; } @@ -4396,7 +4402,7 @@ static void getTableName(char *pTblName, threadInfo* pThreadInfo, int tableSeq) static int generateDataTail(char *tableName, int32_t tableSeq, threadInfo* pThreadInfo, SSuperTable* superTblInfo, - int batch, char* buffer, int64_t insertRows, + int batch, char* buffer, int remainderBufLen, int64_t insertRows, int64_t startFrom, uint64_t startTime, int *pSamplePos, int *dataLen) { int len = 0; int ncols_per_record = 1; // count first col ts @@ -4413,18 +4419,19 @@ static int generateDataTail(char *tableName, int32_t tableSeq, int k = 0; for (k = 0; k < batch;) { - if (superTblInfo) { - int retLen = 0; + char data[MAX_DATA_SIZE]; + int retLen = 0; - if (0 == strncasecmp(superTblInfo->dataSource, + if (superTblInfo) { + if (0 == strncasecmp(superTblInfo->dataSource, "sample", strlen("sample"))) { retLen = getRowDataFromSample( - buffer + len, - superTblInfo->maxSqlLen - len, + data, + remainderBufLen, startTime + superTblInfo->timeStampStep * k, superTblInfo, pSamplePos); - } else if (0 == strncasecmp(superTblInfo->dataSource, + } else if (0 == strncasecmp(superTblInfo->dataSource, "rand", strlen("rand"))) { int rand_num = rand_tinyint() % 100; if (0 != superTblInfo->disorderRatio @@ -4433,60 +4440,56 @@ static int generateDataTail(char *tableName, int32_t tableSeq, + superTblInfo->timeStampStep * k - taosRandom() % superTblInfo->disorderRange; retLen = generateRowData( - buffer + len, - superTblInfo->maxSqlLen - len, + data, d, superTblInfo); } else { retLen = generateRowData( - buffer + len, - superTblInfo->maxSqlLen - len, + data, startTime + superTblInfo->timeStampStep * k, superTblInfo); - } - } + } + } - if (retLen < 0) { - return -1; - } + if (retLen > remainderBufLen) { + break; + } - len += retLen; - - if (len >= (superTblInfo->maxSqlLen - 256)) { // reserve for overwrite - k++; - break; - } + buffer += sprintf(buffer, " %s", data); + k++; + len += retLen; + remainderBufLen -= retLen; } else { int rand_num = taosRandom() % 100; - char data[MAX_DATA_SIZE]; - char **data_type = g_args.datatype; - int lenOfBinary = g_args.len_of_binary; + char **data_type = g_args.datatype; + int lenOfBinary = g_args.len_of_binary; if ((g_args.disorderRatio != 0) && (rand_num < g_args.disorderRange)) { - int64_t d = startTime + DEFAULT_TIMESTAMP_STEP * k + int64_t d = startTime + DEFAULT_TIMESTAMP_STEP * k - taosRandom() % 1000000 + rand_num; - len = generateData(data, data_type, + retLen = generateData(data, data_type, ncols_per_record, d, lenOfBinary); } else { - len = generateData(data, data_type, + retLen = generateData(data, data_type, ncols_per_record, startTime + DEFAULT_TIMESTAMP_STEP * k, lenOfBinary); } + if (len > remainderBufLen) + break; + buffer += sprintf(buffer, " %s", data); - if (strlen(buffer) >= (g_args.max_sql_len - 256)) { // too long - k++; - break; - } + k++; + len += retLen; + remainderBufLen -= retLen; } verbosePrint("%s() LN%d len=%d k=%d \nbuffer=%s\n", __func__, __LINE__, len, k, buffer); - k++; startFrom ++; if (startFrom >= insertRows) { @@ -4570,20 +4573,25 @@ static int generateProgressiveDataBuffer(char *pTblName, assert(buffer != NULL); - memset(buffer, 0, superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len); + int maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len; + int remainderBufLen = maxSqlLen; + + memset(buffer, 0, maxSqlLen); char *pstr = buffer; int headLen = generateSQLHead(pTblName, tableSeq, pThreadInfo, superTblInfo, buffer); pstr += headLen; + remainderBufLen -= headLen; int k; int dataLen; k = generateDataTail(pTblName, tableSeq, pThreadInfo, superTblInfo, - g_args.num_of_RPR, pstr, insertRows, startFrom, + g_args.num_of_RPR, pstr, remainderBufLen, insertRows, startFrom, startTime, pSamplePos, &dataLen); + return k; } @@ -4656,13 +4664,18 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { int generatedRecPerTbl = 0; bool flagSleep = true; int sleepTimeTotal = 0; + + int maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len; + int remainderBufLen; + while(pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) { if ((flagSleep) && (insert_interval)) { st = taosGetTimestampUs(); flagSleep = false; } // generate data - memset(buffer, 0, superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len); + memset(buffer, 0, maxSqlLen); + remainderBufLen = maxSqlLen; char *pstr = buffer; int recOfBatch = 0; @@ -4685,6 +4698,8 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { pThreadInfo->threadID, __func__, __LINE__, i, buffer); pstr += headLen; + remainderBufLen -= headLen; + int dataLen = 0; verbosePrint("[%d] %s() LN%d i=%d batchPerTblTimes=%d batchPerTbl = %d\n", @@ -4698,13 +4713,20 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { } else { startTime = 1500000000000; } - generateDataTail( + int generated = generateDataTail( tableName, tableSeq, pThreadInfo, superTblInfo, - batchPerTbl, pstr, insertRows, 0, + batchPerTbl, pstr, remainderBufLen, insertRows, 0, startTime, &(pThreadInfo->samplePos), &dataLen); + if (generated < 0) { + debugPrint("[%d] %s() LN%d, generated data is %d\n", + pThreadInfo->threadID, __func__, __LINE__, generated); + goto free_and_statistics_interlace; + } pstr += dataLen; + remainderBufLen -= dataLen; + recOfBatch += batchPerTbl; startTime += batchPerTbl * superTblInfo->timeStampStep; pThreadInfo->totalInsertRows += batchPerTbl; @@ -4796,9 +4818,9 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { free_and_statistics_interlace: tmfree(buffer); - printf("====thread[%d] completed total inserted rows: %"PRId64 ", total affected rows: %"PRId64 "====\n", - pThreadInfo->threadID, - pThreadInfo->totalInsertRows, + printf("====thread[%d] completed total inserted rows: %"PRId64 ", total affected rows: %"PRId64 "====\n", + pThreadInfo->threadID, + pThreadInfo->totalInsertRows, pThreadInfo->totalAffectedRows); return NULL; } @@ -4929,16 +4951,16 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { free_and_statistics_2: tmfree(buffer); - printf("====thread[%d] completed total inserted rows: %"PRId64 ", total affected rows: %"PRId64 "====\n", - pThreadInfo->threadID, - pThreadInfo->totalInsertRows, + printf("====thread[%d] completed total inserted rows: %"PRId64 ", total affected rows: %"PRId64 "====\n", + pThreadInfo->threadID, + pThreadInfo->totalInsertRows, pThreadInfo->totalAffectedRows); return NULL; } static void* syncWrite(void *sarg) { - threadInfo *winfo = (threadInfo *)sarg; + threadInfo *winfo = (threadInfo *)sarg; SSuperTable* superTblInfo = winfo->superTblInfo; int interlaceRows = superTblInfo?superTblInfo->interlaceRows:g_args.interlace_rows; @@ -4953,7 +4975,7 @@ static void* syncWrite(void *sarg) { } static void callBack(void *param, TAOS_RES *res, int code) { - threadInfo* winfo = (threadInfo*)param; + threadInfo* winfo = (threadInfo*)param; SSuperTable* superTblInfo = winfo->superTblInfo; int insert_interval = @@ -4966,7 +4988,7 @@ static void callBack(void *param, TAOS_RES *res, int code) { } char *buffer = calloc(1, winfo->superTblInfo->maxSqlLen); - char *data = calloc(1, MAX_DATA_SIZE); + char data[MAX_DATA_SIZE]; char *pstr = buffer; pstr += sprintf(pstr, "insert into %s.%s%d values", winfo->db_name, winfo->tb_prefix, winfo->start_table_from); @@ -4978,7 +5000,6 @@ static void callBack(void *param, TAOS_RES *res, int code) { if (winfo->start_table_from > winfo->end_table_to) { tsem_post(&winfo->lock_sem); free(buffer); - free(data); taos_free_result(res); return; } @@ -4988,11 +5009,9 @@ static void callBack(void *param, TAOS_RES *res, int code) { if (0 != winfo->superTblInfo->disorderRatio && rand_num < winfo->superTblInfo->disorderRatio) { int64_t d = winfo->lastTs - taosRandom() % 1000000 + rand_num; - //generateData(data, datatype, ncols_per_record, d, len_of_binary); - generateRowData(data, MAX_DATA_SIZE, d, winfo->superTblInfo); + generateRowData(data, d, winfo->superTblInfo); } else { - //generateData(data, datatype, ncols_per_record, start_time += 1000, len_of_binary); - generateRowData(data, MAX_DATA_SIZE, winfo->lastTs += 1000, winfo->superTblInfo); + generateRowData(data, winfo->lastTs += 1000, winfo->superTblInfo); } pstr += sprintf(pstr, "%s", data); winfo->counter++; @@ -5007,7 +5026,6 @@ static void callBack(void *param, TAOS_RES *res, int code) { } taos_query_a(winfo->taos, buffer, callBack, winfo); free(buffer); - free(data); taos_free_result(res); } @@ -5373,7 +5391,7 @@ static void *readTable(void *sarg) { } static void *readMetric(void *sarg) { -#if 1 +#if 1 threadInfo *rinfo = (threadInfo *)sarg; TAOS *taos = rinfo->taos; char command[BUFFER_SIZE] = "\0"; @@ -5524,7 +5542,7 @@ static int insertTestProcess() { //int64_t totalInsertRows = 0; //int64_t totalAffectedRows = 0; - //for (int i = 0; i < g_Dbs.dbCount; i++) { + //for (int i = 0; i < g_Dbs.dbCount; i++) { // for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { // totalInsertRows+= g_Dbs.db[i].superTbls[j].totalInsertRows; // totalAffectedRows += g_Dbs.db[i].superTbls[j].totalAffectedRows; @@ -5921,7 +5939,7 @@ static void *subSubscribeProcess(void *sarg) { sprintf(tmpFile, "%s-%d", g_queryInfo.superQueryInfo.result[i], winfo->threadID); } - tsub[i] = subscribeImpl(winfo->taos, subSqlstr, topic, tmpFile); + tsub[i] = subscribeImpl(winfo->taos, subSqlstr, topic, tmpFile); if (NULL == tsub[i]) { taos_close(winfo->taos); return NULL; @@ -6109,7 +6127,7 @@ static int subscribeTestProcess() { && (g_queryInfo.superQueryInfo.threadCnt > 0)) { pidsOfSub = malloc(g_queryInfo.superQueryInfo.threadCnt * sizeof(pthread_t)); - infosOfSub = malloc(g_queryInfo.superQueryInfo.threadCnt * + infosOfSub = malloc(g_queryInfo.superQueryInfo.threadCnt * sizeof(threadInfo)); if ((NULL == pidsOfSub) || (NULL == infosOfSub)) { errorPrint("%s() LN%d, malloc failed for create threads\n", @@ -6256,7 +6274,7 @@ static void setParaFromArg(){ g_Dbs.db[0].superTbls[0].timeStampStep = DEFAULT_TIMESTAMP_STEP; g_Dbs.db[0].superTbls[0].insertRows = g_args.num_of_DPT; - g_Dbs.db[0].superTbls[0].maxSqlLen = TSDB_PAYLOAD_SIZE; + g_Dbs.db[0].superTbls[0].maxSqlLen = g_args.max_sql_len; g_Dbs.db[0].superTbls[0].columnCount = 0; for (int i = 0; i < MAX_NUM_DATATYPE; i++) { diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index c3282ce6f7..8e15d1b5ec 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -139,6 +139,18 @@ python3 ./test.py -f import_merge/importInsertThenImport.py python3 ./test.py -f import_merge/importCSV.py #======================p1-end=============== #======================p2-start=============== +# tools +python3 test.py -f tools/taosdumpTest.py + +python3 test.py -f tools/taosdemoTest.py +python3 test.py -f tools/taosdemoTestWithoutMetric.py +python3 test.py -f tools/taosdemoTestWithJson.py +python3 test.py -f tools/taosdemoTestLimitOffset.py +python3 test.py -f tools/taosdemoTestTblAlt.py +python3 test.py -f tools/taosdemoTestSampleData.py +python3 test.py -f tools/taosdemoTestInterlace.py +python3 test.py -f tools/taosdemoTestQuery.py + # update python3 ./test.py -f update/allow_update.py python3 ./test.py -f update/allow_update-0.py @@ -247,18 +259,6 @@ python3 test.py -f subscribe/supertable.py #======================p3-end=============== #======================p4-start=============== -# tools -python3 test.py -f tools/taosdumpTest.py - -python3 test.py -f tools/taosdemoTest.py -python3 test.py -f tools/taosdemoTestWithoutMetric.py -python3 test.py -f tools/taosdemoTestWithJson.py -python3 test.py -f tools/taosdemoTestLimitOffset.py -python3 test.py -f tools/taosdemoTest2.py -python3 test.py -f tools/taosdemoTestSampleData.py -python3 test.py -f tools/taosdemoTestInterlace.py -python3 test.py -f tools/taosdemoTestQuery.py - python3 ./test.py -f update/merge_commit_data-0.py # wal python3 ./test.py -f wal/addOldWalTest.py diff --git a/tests/pytest/tools/taosdemoTest2.py b/tests/pytest/tools/taosdemoTestTblAlt.py similarity index 63% rename from tests/pytest/tools/taosdemoTest2.py rename to tests/pytest/tools/taosdemoTestTblAlt.py index 74b05faf8b..bb367817cf 100644 --- a/tests/pytest/tools/taosdemoTest2.py +++ b/tests/pytest/tools/taosdemoTestTblAlt.py @@ -29,10 +29,33 @@ class TDTestCase: self.numberOfTables = 10 self.numberOfRecords = 1000000 + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + def insertDataAndAlterTable(self, threadID): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + if(threadID == 0): - os.system("taosdemo -y -t %d -n %d" % - (self.numberOfTables, self.numberOfRecords)) + os.system("%staosdemo -y -t %d -n %d" % + (binPath, self.numberOfTables, self.numberOfRecords)) if(threadID == 1): time.sleep(2) print("use test") @@ -47,7 +70,13 @@ class TDTestCase: # check if all the tables have heen created while True: - tdSql.query("show tables") + try: + tdSql.query("show tables") + except Exception as e: + tdLog.info("show tables test failed") + time.sleep(1) + continue + rows = tdSql.queryRows print("number of tables: %d" % rows) if(rows == self.numberOfTables): @@ -56,16 +85,23 @@ class TDTestCase: # check if there are any records in the last created table while True: print("query started") - tdSql.query("select * from test.t9") + try: + tdSql.query("select * from test.t9") + except Exception as e: + tdLog.info("select * test failed") + time.sleep(2) + continue + rows = tdSql.queryRows print("number of records: %d" % rows) if(rows > 0): break time.sleep(1) + print("alter table test.meters add column col10 int") tdSql.execute("alter table test.meters add column col10 int") - print("insert into test.t0 values (now, 1, 2, 3, 4, 0.1, 0.01,'test', '测试', TRUE, 1610000000000, 0)") - tdSql.execute("insert into test.t0 values (now, 1, 2, 3, 4, 0.1, 0.01,'test', '测试', TRUE, 1610000000000, 0)") + print("insert into test.t9 values (now, 1, 2, 3, 4, 0.1, 0.01,'test', '测试', TRUE, 1610000000000, 0)") + tdSql.execute("insert into test.t9 values (now, 1, 2, 3, 4, 0.1, 0.01,'test', '测试', TRUE, 1610000000000, 0)") def run(self): tdSql.prepare() @@ -78,6 +114,8 @@ class TDTestCase: t1.join() t2.join() + time.sleep(3) + tdSql.query("select count(*) from test.meters") tdSql.checkData(0, 0, self.numberOfRecords * self.numberOfTables + 1) From ac7baae88b67587184692a148fe97edac6121b4b Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Thu, 1 Apr 2021 10:14:25 +0800 Subject: [PATCH 13/22] fix travis output --- Jenkinsfile | 4 +++- tests/test-all.sh | 5 ----- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 433b46067a..bf2454c903 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -42,11 +42,12 @@ def pre_test(){ killall -9 taosd ||echo "no taosd running" killall -9 gdb || echo "no gdb running" cd ${WKC} - git checkout develop git reset --hard HEAD~10 >/dev/null + git checkout develop git pull >/dev/null git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD + git clean -dfx find ${WKC}/tests/pytest -name \'*\'.sql -exec rm -rf {} \\; cd ${WK} git reset --hard HEAD~10 @@ -55,6 +56,7 @@ def pre_test(){ cd ${WK} export TZ=Asia/Harbin date + git clean -dfx rm -rf ${WK}/debug mkdir debug cd debug diff --git a/tests/test-all.sh b/tests/test-all.sh index 883c1495e9..7bde698d95 100755 --- a/tests/test-all.sh +++ b/tests/test-all.sh @@ -25,9 +25,6 @@ function stopTaosd { function dohavecore(){ corefile=`find $corepath -mmin 1` core_file=`echo $corefile|cut -d " " -f2` - echo "corefile:$core_file" - echo "corepath:$corepath" - ls -l $corepath proc=`echo $corefile|cut -d "_" -f3` if [ -n "$corefile" ];then echo 'taosd or taos has generated core' @@ -82,7 +79,6 @@ function runSimCaseOneByOne { # fi end_time=`date +%s` echo execution time of $case was `expr $end_time - $start_time`s. | tee -a out.log - dohavecore 0 fi done < $1 } @@ -159,7 +155,6 @@ function runPyCaseOneByOne { else $line > /dev/null 2>&1 fi - dohavecore 0 fi done < $1 } From 58d76f3d26ec60a8ba6790a2f1fed4beb76bd8e5 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Thu, 1 Apr 2021 10:41:37 +0800 Subject: [PATCH 14/22] fix --- tests/test-all.sh | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/tests/test-all.sh b/tests/test-all.sh index 7bde698d95..3c8aed7d18 100755 --- a/tests/test-all.sh +++ b/tests/test-all.sh @@ -28,23 +28,21 @@ function dohavecore(){ proc=`echo $corefile|cut -d "_" -f3` if [ -n "$corefile" ];then echo 'taosd or taos has generated core' + rm case.log if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]] && [[ $1 == 1 ]]; then cd ../../../ tar -zcPf $corepath'taos_'`date "+%Y_%m_%d_%H_%M_%S"`.tar.gz debug/build/bin/taosd debug/build/bin/tsim debug/build/lib/libtaos*so* if [[ $2 == 1 ]];then cp -r sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S"` - rm -rf sim/case.log else cd community cp -r sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S" ` - rm -rf sim/case.log fi else cd ../../ if [[ $1 == 1 ]];then tar -zcPf $corepath'taos_'`date "+%Y_%m_%d_%H_%M_%S"`.tar.gz debug/build/bin/taosd debug/build/bin/tsim debug/build/lib/libtaos*so* cp -r sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S" ` - rm -rf sim/case.log fi fi if [[ $1 == 1 ]];then @@ -95,26 +93,25 @@ function runSimCaseOneByOnefq { date +%F\ %T | tee -a out.log if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]]; then echo -n $case - ./test.sh -f $case > ../../../sim/case.log 2>&1 && \ + ./test.sh -f $case > case.log 2>&1 && \ ( grep -q 'script.*'$case'.*failed.*, err.*lineNum' ../../../sim/tsim/log/taoslog0.0 && echo -e "${RED} failed${NC}" | tee -a out.log || echo -e "${GREEN} success${NC}" | tee -a out.log )|| \ ( grep -q 'script.*success.*m$' ../../../sim/tsim/log/taoslog0.0 && echo -e "${GREEN} success${NC}" | tee -a out.log ) || \ - ( echo -e "${RED} failed${NC}" | tee -a out.log && echo '=====================log=====================' && cat ../../../sim/case.log ) + ( echo -e "${RED} failed${NC}" | tee -a out.log && echo '=====================log=====================' && cat case.log ) else echo -n $case ./test.sh -f $case > ../../sim/case.log 2>&1 && \ ( grep -q 'script.*'$case'.*failed.*, err.*lineNum' ../../sim/tsim/log/taoslog0.0 && echo -e "${RED} failed${NC}" | tee -a out.log || echo -e "${GREEN} success${NC}" | tee -a out.log )|| \ ( grep -q 'script.*success.*m$' ../../sim/tsim/log/taoslog0.0 && echo -e "${GREEN} success${NC}" | tee -a out.log ) || \ - ( echo -e "${RED} failed${NC}" | tee -a out.log && echo '=====================log=====================' && cat ../../sim/case.log ) + ( echo -e "${RED} failed${NC}" | tee -a out.log && echo '=====================log=====================' && cat case.log ) fi out_log=`tail -1 out.log ` if [[ $out_log =~ 'failed' ]];then + rm case.log if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]]; then cp -r ../../../sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S"` - rm -rf ../../../sim/case.log else cp -r ../../sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S" ` - rm -rf ../../sim/case.log fi dohavecore $2 1 if [[ $2 == 1 ]];then @@ -180,7 +177,7 @@ function runPyCaseOneByOnefq() { start_time=`date +%s` date +%F\ %T | tee -a pytest-out.log echo -n $case - $line > ../../sim/case.log 2>&1 && \ + $line > case.log 2>&1 && \ echo -e "${GREEN} success${NC}" | tee -a pytest-out.log || \ echo -e "${RED} failed${NC}" | tee -a pytest-out.log end_time=`date +%s` @@ -188,8 +185,8 @@ function runPyCaseOneByOnefq() { if [[ $out_log =~ 'failed' ]];then cp -r ../../sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S" ` echo '=====================log===================== ' - cat ../../sim/case.log - rm -rf ../../sim/case.log + cat case.log + rm -rf case.log dohavecore $2 2 if [[ $2 == 1 ]];then exit 8 From 73889873fc6786bf12dda6e3daf3837bf525c797 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Thu, 1 Apr 2021 10:54:46 +0800 Subject: [PATCH 15/22] git clean --- Jenkinsfile | 2 -- 1 file changed, 2 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index bf2454c903..bac0cce33b 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -48,7 +48,6 @@ def pre_test(){ git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD git clean -dfx - find ${WKC}/tests/pytest -name \'*\'.sql -exec rm -rf {} \\; cd ${WK} git reset --hard HEAD~10 git checkout develop @@ -57,7 +56,6 @@ def pre_test(){ export TZ=Asia/Harbin date git clean -dfx - rm -rf ${WK}/debug mkdir debug cd debug cmake .. > /dev/null From 3e073149888f54a264d20dae63476b256e1f9419 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Thu, 1 Apr 2021 14:17:44 +0800 Subject: [PATCH 16/22] [TD-2518] : timestamp add support for time before year 1970. --- documentation20/cn/12.taos-sql/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index b4fa2b160a..dfda8997c2 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -43,7 +43,7 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic | | 类型 | Bytes | 说明 | | ---- | :-------: | ------ | ------------------------------------------------------------ | -| 1 | TIMESTAMP | 8 | 时间戳。缺省精度毫秒,可支持微秒。从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始,计时不能早于该时间。 | +| 1 | TIMESTAMP | 8 | 时间戳。缺省精度毫秒,可支持微秒。从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始,计时不能早于该时间。(从 2.0.18 版本开始,已经去除了这一时间范围限制) | | 2 | INT | 4 | 整型,范围 [-2^31+1, 2^31-1], -2^31 用作 NULL | | 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63 用于 NULL | | 4 | FLOAT | 4 | 浮点型,有效位数 6-7,范围 [-3.4E38, 3.4E38] | From 4aebd5bdf6604fb692d43d61d54ca4a6ba702ed7 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 1 Apr 2021 15:13:54 +0800 Subject: [PATCH 17/22] [TD-3612]: fix vnode write msg double free issue --- src/dnode/src/dnodeVWrite.c | 2 +- src/vnode/src/vnodeWrite.c | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/src/dnode/src/dnodeVWrite.c b/src/dnode/src/dnodeVWrite.c index 87b31e4604..84fd260d91 100644 --- a/src/dnode/src/dnodeVWrite.c +++ b/src/dnode/src/dnodeVWrite.c @@ -205,7 +205,7 @@ static void *dnodeProcessVWriteQueue(void *wparam) { pWrite->rpcMsg.ahandle, taosMsg[pWrite->pHead.msgType], qtypeStr[qtype], pWrite->pHead.version); pWrite->code = vnodeProcessWrite(pVnode, &pWrite->pHead, qtype, pWrite); - if (pWrite->code <= 0) pWrite->processedCount = 1; + if (pWrite->code <= 0) atomic_add_fetch_32(&pWrite->processedCount, 1); if (pWrite->code > 0) pWrite->code = 0; if (pWrite->code == 0 && pWrite->pHead.msgType != TSDB_MSG_TYPE_SUBMIT) forceFsync = true; diff --git a/src/vnode/src/vnodeWrite.c b/src/vnode/src/vnodeWrite.c index e318978a11..92e1ba804b 100644 --- a/src/vnode/src/vnodeWrite.c +++ b/src/vnode/src/vnodeWrite.c @@ -96,6 +96,7 @@ int32_t vnodeProcessWrite(void *vparam, void *wparam, int32_t qtype, void *rpara // write into WAL code = walWrite(pVnode->wal, pHead); if (code < 0) { + if (syncCode > 0) atomic_sub_fetch_32(&pWrite->processedCount, 1); vError("vgId:%d, hver:%" PRIu64 " vver:%" PRIu64 " code:0x%x", pVnode->vgId, pHead->version, pVnode->version, code); return code; } @@ -104,7 +105,10 @@ int32_t vnodeProcessWrite(void *vparam, void *wparam, int32_t qtype, void *rpara // write data locally code = (*vnodeProcessWriteMsgFp[pHead->msgType])(pVnode, pHead->cont, pRspRet); - if (code < 0) return code; + if (code < 0) { + if (syncCode > 0) atomic_sub_fetch_32(&pWrite->processedCount, 1); + return code; + } return syncCode; } From af996100a27410d42d047e8cbeb386ab0b68cdc8 Mon Sep 17 00:00:00 2001 From: zyyang <69311263+zyyang-taosdata@users.noreply.github.com> Date: Thu, 1 Apr 2021 15:15:50 +0800 Subject: [PATCH 18/22] [TD-3417]: remove.sh stop and delete the nginxd.service for enterprise edition (#5617) --- packaging/tools/remove.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh index e63889aff1..9241f01efa 100755 --- a/packaging/tools/remove.sh +++ b/packaging/tools/remove.sh @@ -120,7 +120,7 @@ function clean_service_on_systemd() { if [ "$verMode" == "cluster" ]; then nginx_service_config="${service_config_dir}/${nginx_service_name}.service" - if [ -d ${bin_dir}/web ]; then + if [ -d ${install_nginxd_dir} ]; then if systemctl is-active --quiet ${nginx_service_name}; then echo "Nginx for TDengine is running, stopping it..." ${csudo} systemctl stop ${nginx_service_name} &> /dev/null || echo &> /dev/null From 598b1ce6bffaa97a5abdb15d03b753f52bcaf2d6 Mon Sep 17 00:00:00 2001 From: zyyang <69311263+zyyang-taosdata@users.noreply.github.com> Date: Thu, 1 Apr 2021 15:16:36 +0800 Subject: [PATCH 19/22] Feature/td 3411 (#5648) * [TD-3410]feature: adapted to nutz * change * change * change * change * [TD-3447]: JDBC-RESTful and JDBC-JNI support setObject * change * change * change * change * change * change * change * change * change * change * change * change * [TD-3548]: support ParameterMetaData in JDBC-RESTful and JDBC-JNI * change * change * [TD-3446]: JDBC-JNI and JDBC-RESTful do not need invoke Class.forName any more * change jdbc version * change * change * change * change * change * change * change * change * change version --- cmake/install.inc | 2 +- src/connector/jdbc/CMakeLists.txt | 2 +- src/connector/jdbc/deploy-pom.xml | 2 +- src/connector/jdbc/pom.xml | 10 ++- .../jdbc/AbstractParameterMetaData.java | 2 +- .../com/taosdata/jdbc/TSDBJNIConnector.java | 46 ++----------- .../taosdata/jdbc/rs/RestfulResultSet.java | 4 +- .../java/com/taosdata/jdbc/utils/OSUtils.java | 17 +++++ .../jdbc/cases/DriverAutoloadTest.java | 1 - ...ullValueInResultSetForJdbcRestfulTest.java | 64 +++++++++++++++++++ .../com/taosdata/jdbc/utils/OSUtilsTest.java | 30 +++++++++ 11 files changed, 133 insertions(+), 47 deletions(-) create mode 100644 src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/OSUtils.java create mode 100644 src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetForJdbcRestfulTest.java create mode 100644 src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/OSUtilsTest.java diff --git a/cmake/install.inc b/cmake/install.inc index 01d3c8a4df..5823ef743e 100755 --- a/cmake/install.inc +++ b/cmake/install.inc @@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS) #INSTALL(TARGETS taos RUNTIME DESTINATION driver) #INSTALL(TARGETS shell RUNTIME DESTINATION .) IF (TD_MVN_INSTALLED) - INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.24-dist.jar DESTINATION connector/jdbc) + INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.25-dist.jar DESTINATION connector/jdbc) ENDIF () ELSEIF (TD_DARWIN) SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh") diff --git a/src/connector/jdbc/CMakeLists.txt b/src/connector/jdbc/CMakeLists.txt index 540dc8eb58..eb158b1f76 100644 --- a/src/connector/jdbc/CMakeLists.txt +++ b/src/connector/jdbc/CMakeLists.txt @@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED) ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME} POST_BUILD COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml - COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.24-dist.jar ${LIBRARY_OUTPUT_PATH} + COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.25-dist.jar ${LIBRARY_OUTPUT_PATH} COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml COMMENT "build jdbc driver") ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME}) diff --git a/src/connector/jdbc/deploy-pom.xml b/src/connector/jdbc/deploy-pom.xml index 935b3f7e4a..eb8c92575c 100755 --- a/src/connector/jdbc/deploy-pom.xml +++ b/src/connector/jdbc/deploy-pom.xml @@ -5,7 +5,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.24 + 2.0.25 jar JDBCDriver diff --git a/src/connector/jdbc/pom.xml b/src/connector/jdbc/pom.xml index 5d7c89e2d2..1f75754b0c 100755 --- a/src/connector/jdbc/pom.xml +++ b/src/connector/jdbc/pom.xml @@ -3,7 +3,7 @@ 4.0.0 com.taosdata.jdbc taos-jdbcdriver - 2.0.24 + 2.0.25 jar JDBCDriver https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc @@ -62,6 +62,14 @@ + + + src/main/resources + + **/*.md + + + org.apache.maven.plugins diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractParameterMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractParameterMetaData.java index 999df06fc7..7df7252ae2 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractParameterMetaData.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractParameterMetaData.java @@ -15,7 +15,7 @@ public abstract class AbstractParameterMetaData extends WrapperImpl implements P @Override public int getParameterCount() throws SQLException { - return parameters.length; + return parameters == null ? 0 : parameters.length; } @Override diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java index 7d3741917c..5e3ffffa4f 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java @@ -29,45 +29,25 @@ public class TSDBJNIConnector { private static volatile Boolean isInitialized = false; private TaosInfo taosInfo = TaosInfo.getInstance(); + // Connection pointer used in C + private long taos = TSDBConstants.JNI_NULL_POINTER; + // result set status in current connection + private boolean isResultsetClosed = true; + private int affectedRows = -1; static { System.loadLibrary("taos"); System.out.println("java.library.path:" + System.getProperty("java.library.path")); } - /** - * Connection pointer used in C - */ - private long taos = TSDBConstants.JNI_NULL_POINTER; - - /** - * Result set pointer for the current connection - */ -// private long taosResultSetPointer = TSDBConstants.JNI_NULL_POINTER; - - /** - * result set status in current connection - */ - private boolean isResultsetClosed = true; - private int affectedRows = -1; - - /** - * Whether the connection is closed - */ public boolean isClosed() { return this.taos == TSDBConstants.JNI_NULL_POINTER; } - /** - * Returns the status of last result set in current connection - */ public boolean isResultsetClosed() { return this.isResultsetClosed; } - /** - * Initialize static variables in JNI to optimize performance - */ public static void init(String configDir, String locale, String charset, String timezone) throws SQLWarning { synchronized (isInitialized) { if (!isInitialized) { @@ -93,11 +73,6 @@ public class TSDBJNIConnector { public static native String getTsCharset(); - /** - * Get connection pointer - * - * @throws SQLException - */ public boolean connect(String host, int port, String dbName, String user, String password) throws SQLException { if (this.taos != TSDBConstants.JNI_NULL_POINTER) { // this.closeConnectionImp(this.taos); @@ -185,13 +160,6 @@ public class TSDBJNIConnector { private native String getErrMsgImp(long pSql); - /** - * Get resultset pointer - * Each connection should have a single open result set at a time - */ -// public long getResultSet() { -// return taosResultSetPointer; -// } private native long getResultSetImp(long connection, long pSql); public boolean isUpdateQuery(long pSql) { @@ -231,6 +199,7 @@ public class TSDBJNIConnector { // } // return resCode; // } + private native int freeResultSetImp(long connection, long result); /** @@ -323,8 +292,7 @@ public class TSDBJNIConnector { * Validate if a create table sql statement is correct without actually creating that table */ public boolean validateCreateTableSql(String sql) { - long connection = taos; - int res = validateCreateTableSqlImp(connection, sql.getBytes()); + int res = validateCreateTableSqlImp(taos, sql.getBytes()); return res != 0 ? false : true; } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java index 856f5257bf..5c2d4c45b0 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java @@ -84,9 +84,9 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: return new Timestamp(row.getDate(colIndex).getTime()); case TSDBConstants.TSDB_DATA_TYPE_BINARY: - return row.getString(colIndex).getBytes(); + return row.getString(colIndex) == null ? null : row.getString(colIndex).getBytes(); case TSDBConstants.TSDB_DATA_TYPE_NCHAR: - return row.getString(colIndex); + return row.getString(colIndex) == null ? null : row.getString(colIndex); default: return row.get(colIndex); } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/OSUtils.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/OSUtils.java new file mode 100644 index 0000000000..a67b4763f9 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/OSUtils.java @@ -0,0 +1,17 @@ +package com.taosdata.jdbc.utils; + +public class OSUtils { + private static final String OS = System.getProperty("os.name").toLowerCase(); + + public static boolean isWindows() { + return OS.indexOf("win") >= 0; + } + + public static boolean isMac() { + return OS.indexOf("mac") >= 0; + } + + public static boolean isLinux() { + return OS.indexOf("nux") >= 0; + } +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/DriverAutoloadTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/DriverAutoloadTest.java index 580b2ac1b5..9826e6ed76 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/DriverAutoloadTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/DriverAutoloadTest.java @@ -17,7 +17,6 @@ public class DriverAutoloadTest { @Test public void testRestful() throws SQLException { -// Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"; Connection conn = DriverManager.getConnection(url, properties); Assert.assertNotNull(conn); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetForJdbcRestfulTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetForJdbcRestfulTest.java new file mode 100644 index 0000000000..f2ac94adc1 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetForJdbcRestfulTest.java @@ -0,0 +1,64 @@ +package com.taosdata.jdbc.cases; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.sql.*; + +public class NullValueInResultSetForJdbcRestfulTest { + + private static final String host = "127.0.0.1"; + Connection conn; + + @Test + public void test() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select * from weather"); + ResultSetMetaData meta = rs.getMetaData(); + while (rs.next()) { + for (int i = 1; i <= meta.getColumnCount(); i++) { + Object value = rs.getObject(i); + System.out.print(meta.getColumnLabel(i) + ": " + value + "\t"); + } + System.out.println(); + } + + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Before + public void before() throws SQLException { + final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"; + conn = DriverManager.getConnection(url); + try (Statement stmt = conn.createStatement()) { + stmt.execute("drop database if exists test_null"); + stmt.execute("create database if not exists test_null"); + stmt.execute("use test_null"); + stmt.execute("create table weather(ts timestamp, f1 int, f2 bigint, f3 float, f4 double, f5 smallint, f6 tinyint, f7 bool, f8 binary(64), f9 nchar(64))"); + stmt.executeUpdate("insert into weather(ts, f1) values(now+1s, 1)"); + stmt.executeUpdate("insert into weather(ts, f2) values(now+2s, 2)"); + stmt.executeUpdate("insert into weather(ts, f3) values(now+3s, 3.0)"); + stmt.executeUpdate("insert into weather(ts, f4) values(now+4s, 4.0)"); + stmt.executeUpdate("insert into weather(ts, f5) values(now+5s, 5)"); + stmt.executeUpdate("insert into weather(ts, f6) values(now+6s, 6)"); + stmt.executeUpdate("insert into weather(ts, f7) values(now+7s, true)"); + stmt.executeUpdate("insert into weather(ts, f8) values(now+8s, 'hello')"); + stmt.executeUpdate("insert into weather(ts, f9) values(now+9s, '涛思数据')"); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @After + public void after() { + try { + if (conn != null) + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/OSUtilsTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/OSUtilsTest.java new file mode 100644 index 0000000000..fd6c83ad1c --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/OSUtilsTest.java @@ -0,0 +1,30 @@ +package com.taosdata.jdbc.utils; + +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +public class OSUtilsTest { + + private String OS; + + @Test + public void inWindows() { + Assert.assertEquals(OS.indexOf("win") >= 0, OSUtils.isWindows()); + } + + @Test + public void isMac() { + Assert.assertEquals(OS.indexOf("mac") >= 0, OSUtils.isMac()); + } + + @Test + public void isLinux() { + Assert.assertEquals(OS.indexOf("nux") >= 0, OSUtils.isLinux()); + } + + @Before + public void before() { + OS = System.getProperty("os.name").toLowerCase(); + } +} \ No newline at end of file From e39762deb1571554e603d26b6a404e8d90fa7ccb Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 1 Apr 2021 16:06:25 +0800 Subject: [PATCH 20/22] Hotfix/sangshuduo/td 3631 max stb count (#5645) * [TD-3631] : increase max super table count to 10000 * [TD-3631] : use smaller max stb count due to data structure limit. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 9f367b41f8..d76185ca43 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -85,7 +85,7 @@ enum TEST_MODE { #define MAX_NUM_DATATYPE 10 #define MAX_DB_COUNT 8 -#define MAX_SUPER_TABLE_COUNT 8 +#define MAX_SUPER_TABLE_COUNT 200 #define MAX_COLUMN_COUNT 1024 #define MAX_TAG_COUNT 128 @@ -4090,7 +4090,7 @@ static bool getInfoFromJsonFile(char* file) { } bool ret = false; - int maxLen = 64000; + int maxLen = 6400000; char *content = calloc(1, maxLen + 1); int len = fread(content, 1, maxLen, fp); if (len <= 0) { From ad1c6534a93602953c7baf53c548b5f1747e7938 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Thu, 1 Apr 2021 16:40:04 +0800 Subject: [PATCH 21/22] [TD-3645] : add description about dimension microsecond in timestamp. --- documentation20/cn/12.taos-sql/docs.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index dfda8997c2..58191e0bd8 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -29,17 +29,17 @@ taos> DESCRIBE meters; ## 支持的数据类型 -使用TDengine,最重要的是时间戳。创建并插入记录、查询历史记录的时候,均需要指定时间戳。时间戳有如下规则: +使用 TDengine,最重要的是时间戳。创建并插入记录、查询历史记录的时候,均需要指定时间戳。时间戳有如下规则: -- 时间格式为```YYYY-MM-DD HH:mm:ss.MS```, 默认时间分辨率为毫秒。比如:```2017-08-12 18:25:58.128``` -- 内部函数now是服务器的当前时间 -- 插入记录时,如果时间戳为now,插入数据时使用服务器当前时间 -- Epoch Time: 时间戳也可以是一个长整数,表示从1970-01-01 08:00:00.000开始的毫秒数 -- 时间可以加减,比如 now-2h,表明查询时刻向前推2个小时(最近2小时)。 数字后面的时间单位可以是 a(毫秒)、s(秒)、 m(分)、h(小时)、d(天)、w(周)。 比如select * from t1 where ts > now-2w and ts <= now-1w, 表示查询两周前整整一周的数据。 在指定降频操作(down sampling)的时间窗口(interval)时,时间单位还可以使用 n(自然月) 和 y(自然年)。 +- 时间格式为 ```YYYY-MM-DD HH:mm:ss.MS```,默认时间分辨率为毫秒。比如:```2017-08-12 18:25:58.128``` +- 内部函数 now 是客户端的当前时间 +- 插入记录时,如果时间戳为 now,插入数据时使用提交这条记录的客户端的当前时间 +- Epoch Time:时间戳也可以是一个长整数,表示从 1970-01-01 08:00:00.000 开始的毫秒数 +- 时间可以加减,比如 now-2h,表明查询时刻向前推 2 个小时(最近 2 小时)。数字后面的时间单位可以是 u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 比如 `select * from t1 where ts > now-2w and ts <= now-1w`,表示查询两周前整整一周的数据。在指定降频操作(down sampling)的时间窗口(interval)时,时间单位还可以使用 n(自然月) 和 y(自然年)。 -TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMicrosecond就可支持微秒。 +TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableMicrosecond 就可以支持微秒。 -在TDengine中,普通表的数据模型中可使用以下10种数据类型。 +在TDengine中,普通表的数据模型中可使用以下 10 种数据类型。 | | 类型 | Bytes | 说明 | | ---- | :-------: | ------ | ------------------------------------------------------------ | From c91fccf29cded78a19e862aa811356631fead602 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 1 Apr 2021 17:49:19 +0800 Subject: [PATCH 22/22] Hotfix/sangshuduo/td 3633 taosdemo segfault (#5649) * [TD-3633] : fix taosdemo segfault. * [TD-3633] : fix taosdemo segfault. use snprintf instead of sprintf. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index d76185ca43..df1c7bee26 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -4455,7 +4455,7 @@ static int generateDataTail(char *tableName, int32_t tableSeq, break; } - buffer += sprintf(buffer, " %s", data); + buffer += snprintf(buffer, retLen + 1, "%s", data); k++; len += retLen; remainderBufLen -= retLen;