From d4b72613fd7d2a4dd5ee3597bacbb474e85aca34 Mon Sep 17 00:00:00 2001 From: jiajingbin Date: Wed, 1 Jun 2022 17:57:30 +0800 Subject: [PATCH 01/46] test: add debug cases --- tests/pytest/insert/insert_drop.py | 49 ++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 tests/pytest/insert/insert_drop.py diff --git a/tests/pytest/insert/insert_drop.py b/tests/pytest/insert/insert_drop.py new file mode 100644 index 0000000000..5ccaa5540b --- /dev/null +++ b/tests/pytest/insert/insert_drop.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +import threading + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def genMultiThreadSeq(self, sql_list): + tlist = list() + for insert_sql in sql_list: + t = threading.Thread(target=tdSql.execute, args=(insert_sql,)) + tlist.append(t) + return tlist + + def multiThreadRun(self, tlist): + for t in tlist: + t.start() + for t in tlist: + t.join() + + def run(self): + tdSql.prepare() + tdSql.execute('create database if not exists test;') + tdSql.execute('create table test.stb (ts timestamp, c11 int, c12 float ) TAGS(t11 int, t12 int );') + tdSql.execute('create table test.tb using test.stb TAGS (1, 1);') + sql_list = list() + for i in range(5): + sql = f'insert into test.tb values (now-{i}m, {i}, {i});' + sql_list.append(sql) + sql_list.append(f'drop database test;') + tlist = self.genMultiThreadSeq(sql_list) + self.multiThreadRun(tlist) + tdSql.query(f'show databases') + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) From c45f6c4ba95f8c537d26e34c6e21e650f99c86a4 Mon Sep 17 00:00:00 2001 From: jiacy-jcy Date: Fri, 15 Jul 2022 19:51:53 +0800 Subject: [PATCH 02/46] test:test case for update_iotdata --- tests/system-test/1-insert/update_data.py | 193 ++++++++++++++++++++++ tests/system-test/fulltest.sh | 2 +- 2 files changed, 194 insertions(+), 1 deletion(-) create mode 100644 tests/system-test/1-insert/update_data.py diff --git a/tests/system-test/1-insert/update_data.py b/tests/system-test/1-insert/update_data.py new file mode 100644 index 0000000000..8204df24af --- /dev/null +++ b/tests/system-test/1-insert/update_data.py @@ -0,0 +1,193 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import random +import string +from util import constant +from util.log import * +from util.cases import * +from util.sql import * +from util.common import * +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + self.dbname = 'db_test' + self.ntbname = 'ntb' + self.stbname = 'stb' + self.ts = 1537146000000 + self.binary_length = 20 + self.nchar_length = 20 + self.column_dict = { + 'col1': 'tinyint', + 'col2': 'smallint', + 'col3': 'int', + 'col4': 'bigint', + 'col5': 'tinyint unsigned', + 'col6': 'smallint unsigned', + 'col7': 'int unsigned', + 'col8': 'bigint unsigned', + 'col9': 'float', + 'col10': 'double', + 'col11': 'bool', + 'col12': f'binary({self.binary_length})', + 'col13': f'nchar({self.nchar_length})', + 'col_ts' : 'timestamp' + } + self.tinyint = random.randint(constant.TINYINT_MIN,constant.TINYINT_MAX) + self.smallint = random.randint(constant.SMALLINT_MIN,constant.SMALLINT_MAX) + self.int = random.randint(constant.INT_MIN,constant.INT_MAX) + self.bigint = random.randint(constant.BIGINT_MIN,constant.BIGINT_MAX) + self.untinyint = random.randint(constant.TINYINT_UN_MIN,constant.TINYINT_UN_MAX) + self.unsmallint = random.randint(constant.SMALLINT_UN_MIN,constant.SMALLINT_UN_MAX) + self.unint = random.randint(constant.INT_UN_MIN,constant.INT_MAX) + self.unbigint = random.randint(constant.BIGINT_UN_MIN,constant.BIGINT_UN_MAX) + self.bool = random.randint(0,100)%2 + self.float = random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX) + self.double = random.uniform(constant.DOUBLE_MIN*(1E-300),constant.DOUBLE_MAX*(1E-300)) + self.binary = tdCom.getLongName(self.binary_length) + self.tnchar = tdCom.getLongName(self.nchar_length) + def insert_base_data(self,col_type,tbname,value=None): + if value == None: + if col_type.lower() == 'tinyint': + tdSql.execute(f'insert into {tbname} values({self.ts},{self.tinyint})') + elif col_type.lower() == 'smallint': + tdSql.execute(f'insert into {tbname} values({self.ts},{self.smallint})') + elif col_type.lower() == 'int': + tdSql.execute(f'insert into {tbname} values({self.ts},{self.int})') + elif col_type.lower() == 'bigint': + tdSql.execute(f'insert into {tbname} values({self.ts},{self.bigint})') + elif col_type.lower() == 'tinyint unsigned': + tdSql.execute(f'insert into {tbname} values({self.ts},{self.untinyint})') + elif col_type.lower() == 'smallint unsigned': + tdSql.execute(f'insert into {tbname} values({self.ts},{self.unsmallint})') + elif col_type.lower() == 'int unsigned': + tdSql.execute(f'insert into {tbname} values({self.ts},{self.unint})') + elif col_type.lower() == 'bigint unsigned': + tdSql.execute(f'insert into {tbname} values({self.ts},{self.unbigint})') + elif col_type.lower() == 'bool': + tdSql.execute(f'insert into {tbname} values({self.ts},{self.bool})') + elif col_type.lower() == 'float': + tdSql.execute(f'insert into {tbname} values({self.ts},{self.float})') + elif col_type.lower() == 'double': + tdSql.execute(f'insert into {tbname} values({self.ts},{self.double})') + elif 'binary' in col_type.lower(): + tdSql.execute(f'insert into {tbname} values({self.ts},"{self.binary}")') + elif 'nchar' in col_type.lower(): + tdSql.execute(f'insert into {tbname} values({self.ts},"{self.nchar}")') + + def update_and_check_data(self,tbname,col_name,col_type,value): + if 'binary' in col_type.lower() or 'nchar' in col_type.lower(): + tdSql.execute(f'insert into {tbname} values({self.ts},"{value}")') + else: + tdSql.execute(f'insert into {tbname} values({self.ts},{value})') + tdSql.query(f'select {col_name} from {tbname}') + if col_type.lower() == 'float' or col_type.lower() == 'double': + if abs(tdSql.queryResult[0][0] - value) / value <= 0.0001: + tdSql.checkEqual(tdSql.queryResult[0][0],tdSql.queryResult[0][0]) + else: + tdLog.exit(f'{col_name} data check failure') + else: + tdSql.checkEqual(tdSql.queryResult[0][0],value) + + def update_check_ntb(self): + up_tinyint = random.randint(constant.TINYINT_MIN,constant.TINYINT_MAX) + up_smallint = random.randint(constant.SMALLINT_MIN,constant.SMALLINT_MAX) + up_int = random.randint(constant.INT_MIN,constant.INT_MAX) + up_bigint = random.randint(constant.BIGINT_MIN,constant.BIGINT_MAX) + up_untinyint = random.randint(constant.TINYINT_UN_MIN,constant.TINYINT_UN_MAX) + up_unsmallint = random.randint(constant.SMALLINT_UN_MIN,constant.SMALLINT_UN_MAX) + up_unint = random.randint(constant.INT_UN_MIN,constant.INT_MAX) + up_unbigint = random.randint(constant.BIGINT_UN_MIN,constant.BIGINT_UN_MAX) + up_bool = random.randint(0,100)%2 + up_float = random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX) + up_double = random.uniform(constant.DOUBLE_MIN*(1E-300),constant.DOUBLE_MAX*(1E-300)) + binary_length = random.randint(0,self.binary_length) + nchar_length = random.randint(0,self.binary_length) + up_binary = tdCom.getLongName(binary_length) + up_nchar = tdCom.getLongName(nchar_length) + tdSql.execute(f'drop database if exists {self.dbname}') + tdSql.execute(f'create database {self.dbname}') + tdSql.execute(f'use {self.dbname}') + + for col_name,col_type in self.column_dict.items(): + tdSql.execute(f'create table {self.ntbname} (ts timestamp,{col_name} {col_type})') + self.insert_base_data(col_name,self.ntbname) + if col_type.lower() == 'tinyint': + self.update_and_check_data(self.ntbname,col_name,col_type,up_tinyint) + elif col_type.lower() == 'smallint': + self.update_and_check_data(self.ntbname,col_name,col_type,up_smallint) + elif col_type.lower() == 'int': + self.update_and_check_data(self.ntbname,col_name,col_type,up_int) + elif col_type.lower() == 'bigint': + self.update_and_check_data(self.ntbname,col_name,col_type,up_bigint) + elif col_type.lower() == 'tinyint unsigned': + self.update_and_check_data(self.ntbname,col_name,col_type,up_untinyint) + elif col_type.lower() == 'smallint unsigned': + self.update_and_check_data(self.ntbname,col_name,col_type,up_unsmallint) + elif col_type.lower() == 'int unsigned': + self.update_and_check_data(self.ntbname,col_name,col_type,up_unint) + elif col_type.lower() == 'bigint unsigned': + self.update_and_check_data(self.ntbname,col_name,col_type,up_unbigint) + elif col_type.lower() == 'bool': + self.update_and_check_data(self.ntbname,col_name,col_type,up_bool) + elif col_type.lower() == 'float': + self.update_and_check_data(self.ntbname,col_name,col_type,up_float) + elif col_type.lower() == 'double': + self.update_and_check_data(self.ntbname,col_name,col_type,up_double) + elif 'binary' in col_type.lower(): + self.update_and_check_data(self.ntbname,col_name,col_type,up_binary) + elif 'nchar' in col_type.lower(): + self.update_and_check_data(self.ntbname,col_name,col_type,up_nchar) + tdSql.execute(f'drop table {self.ntbname}') + for col_name,col_type in self.column_dict.items(): + tdSql.execute(f'create table {self.ntbname} (ts timestamp,{col_name} {col_type})') + self.insert_base_data(col_name,self.ntbname) + tdSql.execute(f'insert into {self.ntbname} values({self.ts},null)') + tdSql.query(f'select {col_name} from {self.ntbname}') + tdSql.checkEqual(tdSql.queryResult[0][0],None) + tdSql.execute(f'drop table {self.ntbname}') + + + def update_check_error_ntb(self): + tdSql.execute(f'drop database if exists {self.dbname}') + tdSql.execute(f'create database {self.dbname}') + tdSql.execute(f'use {self.dbname}') + binary_length = self.binary_length+1 + for col_name,col_type in self.column_dict.items(): + tdSql.execute(f'create table {self.ntbname} (ts timestamp,{col_name} {col_type})') + self.insert_base_data(col_name,self.ntbname) + if col_type.lower() in ['tinyint','smallint','int','bigint','tinyint unsigned','smallint unsigned','int unsigned','bigint unsigned']: + for error_value in [random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.binary_length),True,False]: + tdSql.error(f'insert into {self.ntbname} values({self.ts},{error_value})') + elif col_type.lower() in ['float','double']: + for error_value in [tdCom.getLongName(self.binary_length),True,False]: + tdSql.error(f'insert into {self.ntbname} values({self.ts},{error_value})') + elif 'binary' in col_type.lower() or 'nchar' in col_type.lower(): + for error_value in [tdCom.getLongName(binary_length)]: + tdSql.error(f'insert into {self.ntbname} values({self.ts},{error_value})') + elif col_type.lower() == 'bool': + for error_value in [tdCom.getLongName(self.binary_length)]: + tdSql.error(f'insert into {self.ntbname} values({self.ts},{error_value})') + tdSql.execute(f'drop table {self.ntbname}') + + def run(self): + self.update_check_ntb() + self.update_check_error_ntb() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index 8bafe3c966..6275e2878c 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -28,7 +28,7 @@ python3 ./test.py -f 1-insert/time_range_wise.py python3 ./test.py -f 1-insert/block_wise.py python3 ./test.py -f 1-insert/create_retentions.py python3 ./test.py -f 1-insert/table_param_ttl.py - +python3 ./test.py -f 1-insert/update_data.py python3 ./test.py -f 2-query/between.py python3 ./test.py -f 2-query/distinct.py python3 ./test.py -f 2-query/varchar.py From eeb57d8893173b6abb81ad5a47aaf590d2a344ac Mon Sep 17 00:00:00 2001 From: jiacy-jcy Date: Sat, 16 Jul 2022 10:07:02 +0800 Subject: [PATCH 03/46] update test case for update --- tests/system-test/1-insert/update_data.py | 61 ++++++++++++++++------- 1 file changed, 44 insertions(+), 17 deletions(-) diff --git a/tests/system-test/1-insert/update_data.py b/tests/system-test/1-insert/update_data.py index 8204df24af..40682a1910 100644 --- a/tests/system-test/1-insert/update_data.py +++ b/tests/system-test/1-insert/update_data.py @@ -26,8 +26,8 @@ class TDTestCase: self.ntbname = 'ntb' self.stbname = 'stb' self.ts = 1537146000000 - self.binary_length = 20 - self.nchar_length = 20 + self.str_length = 20 + self.column_dict = { 'col1': 'tinyint', 'col2': 'smallint', @@ -40,8 +40,8 @@ class TDTestCase: 'col9': 'float', 'col10': 'double', 'col11': 'bool', - 'col12': f'binary({self.binary_length})', - 'col13': f'nchar({self.nchar_length})', + 'col12': f'binary({self.str_length})', + 'col13': f'nchar({self.str_length})', 'col_ts' : 'timestamp' } self.tinyint = random.randint(constant.TINYINT_MIN,constant.TINYINT_MAX) @@ -55,8 +55,8 @@ class TDTestCase: self.bool = random.randint(0,100)%2 self.float = random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX) self.double = random.uniform(constant.DOUBLE_MIN*(1E-300),constant.DOUBLE_MAX*(1E-300)) - self.binary = tdCom.getLongName(self.binary_length) - self.tnchar = tdCom.getLongName(self.nchar_length) + self.binary = tdCom.getLongName(self.str_length) + self.tnchar = tdCom.getLongName(self.str_length) def insert_base_data(self,col_type,tbname,value=None): if value == None: if col_type.lower() == 'tinyint': @@ -112,8 +112,8 @@ class TDTestCase: up_bool = random.randint(0,100)%2 up_float = random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX) up_double = random.uniform(constant.DOUBLE_MIN*(1E-300),constant.DOUBLE_MAX*(1E-300)) - binary_length = random.randint(0,self.binary_length) - nchar_length = random.randint(0,self.binary_length) + binary_length = random.randint(0,self.str_length) + nchar_length = random.randint(0,self.str_length) up_binary = tdCom.getLongName(binary_length) up_nchar = tdCom.getLongName(nchar_length) tdSql.execute(f'drop database if exists {self.dbname}') @@ -163,22 +163,49 @@ class TDTestCase: tdSql.execute(f'drop database if exists {self.dbname}') tdSql.execute(f'create database {self.dbname}') tdSql.execute(f'use {self.dbname}') - binary_length = self.binary_length+1 + str_length = self.str_length+1 for col_name,col_type in self.column_dict.items(): tdSql.execute(f'create table {self.ntbname} (ts timestamp,{col_name} {col_type})') self.insert_base_data(col_name,self.ntbname) - if col_type.lower() in ['tinyint','smallint','int','bigint','tinyint unsigned','smallint unsigned','int unsigned','bigint unsigned']: - for error_value in [random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.binary_length),True,False]: - tdSql.error(f'insert into {self.ntbname} values({self.ts},{error_value})') - elif col_type.lower() in ['float','double']: - for error_value in [tdCom.getLongName(self.binary_length),True,False]: + + + if col_type.lower() == 'double': + for error_value in [tdCom.getLongName(self.str_length),True,False,random.uniform(constant.DOUBLE_MIN*1.01,constant.DOUBLE_MAX*1.01)]: tdSql.error(f'insert into {self.ntbname} values({self.ts},{error_value})') + #!bug TD-17453 + # elif col_type.lower() == 'float': + # for error_value in [tdCom.getLongName(self.str_length),True,False,random.uniform(constant.FLOAT_MIN*1.01,constant.FLOAT_MAX*1.01)]: + # tdSql.error(f'insert into {self.ntbname} values({self.ts},{error_value})') elif 'binary' in col_type.lower() or 'nchar' in col_type.lower(): - for error_value in [tdCom.getLongName(binary_length)]: - tdSql.error(f'insert into {self.ntbname} values({self.ts},{error_value})') + for error_value in [tdCom.getLongName(str_length)]: + tdSql.error(f'insert into {self.ntbname} values({self.ts},"{error_value}")') elif col_type.lower() == 'bool': - for error_value in [tdCom.getLongName(self.binary_length)]: + for error_value in [tdCom.getLongName(self.str_length)]: tdSql.error(f'insert into {self.ntbname} values({self.ts},{error_value})') + elif col_type.lower() == 'tinyint': + for error_value in [constant.TINYINT_MIN-1,constant.TINYINT_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]: + tdSql.error(f'insert into {self.ntbname} values({self.ts},{error_value})') + elif col_type.lower() == 'smallint': + for error_value in [constant.SMALLINT_MIN-1,constant.SMALLINT_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]: + tdSql.error(f'insert into {self.ntbname} values({self.ts},{error_value})') + elif col_type.lower() == 'int': + for error_value in [constant.INT_MIN-1,constant.INT_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]: + tdSql.error(f'insert into {self.ntbname} values({self.ts},{error_value})') + elif col_type.lower() == 'bigint': + for error_value in [constant.BIGINT_MIN-1,constant.BIGINT_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]: + tdSql.error(f'insert into {self.ntbname} values({self.ts},{error_value})') + elif col_type.lower() == 'tinyint unsigned': + for error_value in [constant.TINYINT_UN_MIN-1,constant.TINYINT_UN_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]: + tdSql.error(f'insert into {self.ntbname} values({self.ts},{error_value})') + elif col_type.lower() == 'smallint unsigned': + for error_value in [constant.SMALLINT_UN_MIN-1,constant.SMALLINT_UN_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]: + tdSql.error(f'insert into {self.ntbname} values({self.ts},{error_value})') + elif col_type.lower() == 'int unsigned': + for error_value in [constant.INT_UN_MIN-1,constant.INT_UN_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]: + tdSql.error(f'insert into {self.ntbname} values({self.ts},{error_value})') + elif col_type.lower() == 'bigint unsigned': + for error_value in [constant.BIGINT_UN_MIN-1,constant.BIGINT_UN_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]: + tdSql.error(f'insert into {self.ntbname} values({self.ts},{error_value})') tdSql.execute(f'drop table {self.ntbname}') def run(self): From a2090052b3a4a60d2d4de71da3a390856df393b0 Mon Sep 17 00:00:00 2001 From: jiacy-jcy Date: Sat, 16 Jul 2022 10:21:52 +0800 Subject: [PATCH 04/46] update --- tests/system-test/1-insert/update_data.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/tests/system-test/1-insert/update_data.py b/tests/system-test/1-insert/update_data.py index 40682a1910..90b5b456cf 100644 --- a/tests/system-test/1-insert/update_data.py +++ b/tests/system-test/1-insert/update_data.py @@ -167,15 +167,12 @@ class TDTestCase: for col_name,col_type in self.column_dict.items(): tdSql.execute(f'create table {self.ntbname} (ts timestamp,{col_name} {col_type})') self.insert_base_data(col_name,self.ntbname) - - if col_type.lower() == 'double': - for error_value in [tdCom.getLongName(self.str_length),True,False,random.uniform(constant.DOUBLE_MIN*1.01,constant.DOUBLE_MAX*1.01)]: + for error_value in [tdCom.getLongName(self.str_length),True,False,1.1*constant.DOUBLE_MIN,1.1*constant.DOUBLE_MAX]: + tdSql.error(f'insert into {self.ntbname} values({self.ts},{error_value})') + elif col_type.lower() == 'float': + for error_value in [tdCom.getLongName(self.str_length),True,False,10*constant.FLOAT_MIN,1.1*constant.FLOAT_MAX]: tdSql.error(f'insert into {self.ntbname} values({self.ts},{error_value})') - #!bug TD-17453 - # elif col_type.lower() == 'float': - # for error_value in [tdCom.getLongName(self.str_length),True,False,random.uniform(constant.FLOAT_MIN*1.01,constant.FLOAT_MAX*1.01)]: - # tdSql.error(f'insert into {self.ntbname} values({self.ts},{error_value})') elif 'binary' in col_type.lower() or 'nchar' in col_type.lower(): for error_value in [tdCom.getLongName(str_length)]: tdSql.error(f'insert into {self.ntbname} values({self.ts},"{error_value}")') From 698baa52ce7eedb09fc1596df84b1a047a19468d Mon Sep 17 00:00:00 2001 From: jiacy-jcy Date: Sat, 16 Jul 2022 10:22:33 +0800 Subject: [PATCH 05/46] update --- tests/system-test/1-insert/update_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/system-test/1-insert/update_data.py b/tests/system-test/1-insert/update_data.py index 90b5b456cf..12b61b4458 100644 --- a/tests/system-test/1-insert/update_data.py +++ b/tests/system-test/1-insert/update_data.py @@ -171,7 +171,7 @@ class TDTestCase: for error_value in [tdCom.getLongName(self.str_length),True,False,1.1*constant.DOUBLE_MIN,1.1*constant.DOUBLE_MAX]: tdSql.error(f'insert into {self.ntbname} values({self.ts},{error_value})') elif col_type.lower() == 'float': - for error_value in [tdCom.getLongName(self.str_length),True,False,10*constant.FLOAT_MIN,1.1*constant.FLOAT_MAX]: + for error_value in [tdCom.getLongName(self.str_length),True,False,1.1*constant.FLOAT_MIN,1.1*constant.FLOAT_MAX]: tdSql.error(f'insert into {self.ntbname} values({self.ts},{error_value})') elif 'binary' in col_type.lower() or 'nchar' in col_type.lower(): for error_value in [tdCom.getLongName(str_length)]: From fb6d4a566c832e23374546ebb039abdc7c07cb88 Mon Sep 17 00:00:00 2001 From: jiacy-jcy Date: Sat, 16 Jul 2022 14:36:17 +0800 Subject: [PATCH 06/46] update --- tests/system-test/1-insert/update_data.py | 46 +++++++++++++---------- 1 file changed, 26 insertions(+), 20 deletions(-) diff --git a/tests/system-test/1-insert/update_data.py b/tests/system-test/1-insert/update_data.py index 12b61b4458..35b082f726 100644 --- a/tests/system-test/1-insert/update_data.py +++ b/tests/system-test/1-insert/update_data.py @@ -85,12 +85,7 @@ class TDTestCase: tdSql.execute(f'insert into {tbname} values({self.ts},"{self.binary}")') elif 'nchar' in col_type.lower(): tdSql.execute(f'insert into {tbname} values({self.ts},"{self.nchar}")') - - def update_and_check_data(self,tbname,col_name,col_type,value): - if 'binary' in col_type.lower() or 'nchar' in col_type.lower(): - tdSql.execute(f'insert into {tbname} values({self.ts},"{value}")') - else: - tdSql.execute(f'insert into {tbname} values({self.ts},{value})') + def data_check(self,tbname,col_name,col_type,value): tdSql.query(f'select {col_name} from {tbname}') if col_type.lower() == 'float' or col_type.lower() == 'double': if abs(tdSql.queryResult[0][0] - value) / value <= 0.0001: @@ -99,7 +94,15 @@ class TDTestCase: tdLog.exit(f'{col_name} data check failure') else: tdSql.checkEqual(tdSql.queryResult[0][0],value) - + pass + def update_and_check_data(self,tbname,col_name,col_type,value,dbname): + if 'binary' in col_type.lower() or 'nchar' in col_type.lower(): + tdSql.execute(f'insert into {tbname} values({self.ts},"{value}")') + else: + tdSql.execute(f'insert into {tbname} values({self.ts},{value})') + self.data_check(tbname,col_name,col_type,value) + tdSql.execute(f'flush database {dbname}') + self.data_check(tbname,col_name,col_type,value) def update_check_ntb(self): up_tinyint = random.randint(constant.TINYINT_MIN,constant.TINYINT_MAX) up_smallint = random.randint(constant.SMALLINT_MIN,constant.SMALLINT_MAX) @@ -124,31 +127,31 @@ class TDTestCase: tdSql.execute(f'create table {self.ntbname} (ts timestamp,{col_name} {col_type})') self.insert_base_data(col_name,self.ntbname) if col_type.lower() == 'tinyint': - self.update_and_check_data(self.ntbname,col_name,col_type,up_tinyint) + self.update_and_check_data(self.ntbname,col_name,col_type,up_tinyint,self.dbname) elif col_type.lower() == 'smallint': - self.update_and_check_data(self.ntbname,col_name,col_type,up_smallint) + self.update_and_check_data(self.ntbname,col_name,col_type,up_smallint,self.dbname) elif col_type.lower() == 'int': - self.update_and_check_data(self.ntbname,col_name,col_type,up_int) + self.update_and_check_data(self.ntbname,col_name,col_type,up_int,self.dbname) elif col_type.lower() == 'bigint': - self.update_and_check_data(self.ntbname,col_name,col_type,up_bigint) + self.update_and_check_data(self.ntbname,col_name,col_type,up_bigint,self.dbname) elif col_type.lower() == 'tinyint unsigned': - self.update_and_check_data(self.ntbname,col_name,col_type,up_untinyint) + self.update_and_check_data(self.ntbname,col_name,col_type,up_untinyint,self.dbname) elif col_type.lower() == 'smallint unsigned': - self.update_and_check_data(self.ntbname,col_name,col_type,up_unsmallint) + self.update_and_check_data(self.ntbname,col_name,col_type,up_unsmallint,self.dbname) elif col_type.lower() == 'int unsigned': - self.update_and_check_data(self.ntbname,col_name,col_type,up_unint) + self.update_and_check_data(self.ntbname,col_name,col_type,up_unint,self.dbname) elif col_type.lower() == 'bigint unsigned': - self.update_and_check_data(self.ntbname,col_name,col_type,up_unbigint) + self.update_and_check_data(self.ntbname,col_name,col_type,up_unbigint,self.dbname) elif col_type.lower() == 'bool': - self.update_and_check_data(self.ntbname,col_name,col_type,up_bool) + self.update_and_check_data(self.ntbname,col_name,col_type,up_bool,self.dbname) elif col_type.lower() == 'float': - self.update_and_check_data(self.ntbname,col_name,col_type,up_float) + self.update_and_check_data(self.ntbname,col_name,col_type,up_float,self.dbname) elif col_type.lower() == 'double': - self.update_and_check_data(self.ntbname,col_name,col_type,up_double) + self.update_and_check_data(self.ntbname,col_name,col_type,up_double,self.dbname) elif 'binary' in col_type.lower(): - self.update_and_check_data(self.ntbname,col_name,col_type,up_binary) + self.update_and_check_data(self.ntbname,col_name,col_type,up_binary,self.dbname) elif 'nchar' in col_type.lower(): - self.update_and_check_data(self.ntbname,col_name,col_type,up_nchar) + self.update_and_check_data(self.ntbname,col_name,col_type,up_nchar,self.dbname) tdSql.execute(f'drop table {self.ntbname}') for col_name,col_type in self.column_dict.items(): tdSql.execute(f'create table {self.ntbname} (ts timestamp,{col_name} {col_type})') @@ -156,6 +159,9 @@ class TDTestCase: tdSql.execute(f'insert into {self.ntbname} values({self.ts},null)') tdSql.query(f'select {col_name} from {self.ntbname}') tdSql.checkEqual(tdSql.queryResult[0][0],None) + tdSql.execute(f'flush database {self.dbname}') + tdSql.query(f'select {col_name} from {self.ntbname}') + tdSql.checkEqual(tdSql.queryResult[0][0],None) tdSql.execute(f'drop table {self.ntbname}') From 7615341dffedf532efe1c124a953b1eaec22694d Mon Sep 17 00:00:00 2001 From: jiacy-jcy Date: Sat, 16 Jul 2022 16:08:46 +0800 Subject: [PATCH 07/46] update --- tests/system-test/1-insert/update_data.py | 218 +++++++++++++--------- 1 file changed, 131 insertions(+), 87 deletions(-) diff --git a/tests/system-test/1-insert/update_data.py b/tests/system-test/1-insert/update_data.py index 35b082f726..f6ede9c2f9 100644 --- a/tests/system-test/1-insert/update_data.py +++ b/tests/system-test/1-insert/update_data.py @@ -11,6 +11,7 @@ # -*- coding: utf-8 -*- +from optparse import Values import random import string from util import constant @@ -18,16 +19,18 @@ from util.log import * from util.cases import * from util.sql import * from util.common import * +from util.sqlset import TDSetSql class TDTestCase: def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor()) + self.setsql = TDSetSql() self.dbname = 'db_test' self.ntbname = 'ntb' self.stbname = 'stb' + self.ctbname = 'ctb' self.ts = 1537146000000 self.str_length = 20 - self.column_dict = { 'col1': 'tinyint', 'col2': 'smallint', @@ -44,19 +47,19 @@ class TDTestCase: 'col13': f'nchar({self.str_length})', 'col_ts' : 'timestamp' } - self.tinyint = random.randint(constant.TINYINT_MIN,constant.TINYINT_MAX) - self.smallint = random.randint(constant.SMALLINT_MIN,constant.SMALLINT_MAX) - self.int = random.randint(constant.INT_MIN,constant.INT_MAX) - self.bigint = random.randint(constant.BIGINT_MIN,constant.BIGINT_MAX) - self.untinyint = random.randint(constant.TINYINT_UN_MIN,constant.TINYINT_UN_MAX) - self.unsmallint = random.randint(constant.SMALLINT_UN_MIN,constant.SMALLINT_UN_MAX) - self.unint = random.randint(constant.INT_UN_MIN,constant.INT_MAX) - self.unbigint = random.randint(constant.BIGINT_UN_MIN,constant.BIGINT_UN_MAX) - self.bool = random.randint(0,100)%2 - self.float = random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX) - self.double = random.uniform(constant.DOUBLE_MIN*(1E-300),constant.DOUBLE_MAX*(1E-300)) - self.binary = tdCom.getLongName(self.str_length) - self.tnchar = tdCom.getLongName(self.str_length) + self.tinyint = None + self.smallint = None + self.int = None + self.bigint = None + self.untinyint = None + self.unsmallint = None + self.unint = None + self.unbigint = None + self.bool = None + self.float = None + self.double = None + self.binary = None + self.tnchar = None def insert_base_data(self,col_type,tbname,value=None): if value == None: if col_type.lower() == 'tinyint': @@ -94,7 +97,6 @@ class TDTestCase: tdLog.exit(f'{col_name} data check failure') else: tdSql.checkEqual(tdSql.queryResult[0][0],value) - pass def update_and_check_data(self,tbname,col_name,col_type,value,dbname): if 'binary' in col_type.lower() or 'nchar' in col_type.lower(): tdSql.execute(f'insert into {tbname} values({self.ts},"{value}")') @@ -102,8 +104,57 @@ class TDTestCase: tdSql.execute(f'insert into {tbname} values({self.ts},{value})') self.data_check(tbname,col_name,col_type,value) tdSql.execute(f'flush database {dbname}') + tdSql.execute('reset query cache') self.data_check(tbname,col_name,col_type,value) - def update_check_ntb(self): + def error_check(self,tbname,column_dict,tb_type=None,stbname=None): + str_length = self.str_length+1 + for col_name,col_type in column_dict.items(): + if tb_type == 'ntb': + tdSql.execute(f'create table {tbname} (ts timestamp,{col_name} {col_type})') + elif tb_type == 'ctb': + tdSql.execute(f'create table {stbname} (ts timestamp,{col_name} {col_type}) tags(t0 int)') + tdSql.execute(f'create table {tbname} using {stbname} tags(1)') + self.insert_base_data(col_name,tbname) + if col_type.lower() == 'double': + for error_value in [tdCom.getLongName(self.str_length),True,False,1.1*constant.DOUBLE_MIN,1.1*constant.DOUBLE_MAX]: + tdSql.error(f'insert into {tbname} values({self.ts},{error_value})') + elif col_type.lower() == 'float': + for error_value in [tdCom.getLongName(self.str_length),True,False,1.1*constant.FLOAT_MIN,1.1*constant.FLOAT_MAX]: + tdSql.error(f'insert into {tbname} values({self.ts},{error_value})') + elif 'binary' in col_type.lower() or 'nchar' in col_type.lower(): + for error_value in [tdCom.getLongName(str_length)]: + tdSql.error(f'insert into {tbname} values({self.ts},"{error_value}")') + elif col_type.lower() == 'bool': + for error_value in [tdCom.getLongName(self.str_length)]: + tdSql.error(f'insert into {tbname} values({self.ts},{error_value})') + elif col_type.lower() == 'tinyint': + for error_value in [constant.TINYINT_MIN-1,constant.TINYINT_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]: + tdSql.error(f'insert into {tbname} values({self.ts},{error_value})') + elif col_type.lower() == 'smallint': + for error_value in [constant.SMALLINT_MIN-1,constant.SMALLINT_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]: + tdSql.error(f'insert into {tbname} values({self.ts},{error_value})') + elif col_type.lower() == 'int': + for error_value in [constant.INT_MIN-1,constant.INT_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]: + tdSql.error(f'insert into {tbname} values({self.ts},{error_value})') + elif col_type.lower() == 'bigint': + for error_value in [constant.BIGINT_MIN-1,constant.BIGINT_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]: + tdSql.error(f'insert into {tbname} values({self.ts},{error_value})') + elif col_type.lower() == 'tinyint unsigned': + for error_value in [constant.TINYINT_UN_MIN-1,constant.TINYINT_UN_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]: + tdSql.error(f'insert into {tbname} values({self.ts},{error_value})') + elif col_type.lower() == 'smallint unsigned': + for error_value in [constant.SMALLINT_UN_MIN-1,constant.SMALLINT_UN_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]: + tdSql.error(f'insert into {tbname} values({self.ts},{error_value})') + elif col_type.lower() == 'int unsigned': + for error_value in [constant.INT_UN_MIN-1,constant.INT_UN_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]: + tdSql.error(f'insert into {tbname} values({self.ts},{error_value})') + elif col_type.lower() == 'bigint unsigned': + for error_value in [constant.BIGINT_UN_MIN-1,constant.BIGINT_UN_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]: + tdSql.error(f'insert into {tbname} values({self.ts},{error_value})') + tdSql.execute(f'drop table {tbname}') + if tb_type == 'ctb': + tdSql.execute(f'drop table {stbname}') + def update_data_check(self,tbname,column_dict,dbname,tb_type=None,stbname=None): up_tinyint = random.randint(constant.TINYINT_MIN,constant.TINYINT_MAX) up_smallint = random.randint(constant.SMALLINT_MIN,constant.SMALLINT_MAX) up_int = random.randint(constant.INT_MIN,constant.INT_MAX) @@ -119,102 +170,95 @@ class TDTestCase: nchar_length = random.randint(0,self.str_length) up_binary = tdCom.getLongName(binary_length) up_nchar = tdCom.getLongName(nchar_length) - tdSql.execute(f'drop database if exists {self.dbname}') - tdSql.execute(f'create database {self.dbname}') - tdSql.execute(f'use {self.dbname}') - - for col_name,col_type in self.column_dict.items(): - tdSql.execute(f'create table {self.ntbname} (ts timestamp,{col_name} {col_type})') - self.insert_base_data(col_name,self.ntbname) + + for col_name,col_type in column_dict.items(): + if tb_type == 'ntb': + tdSql.execute(f'create table {tbname} (ts timestamp,{col_name} {col_type})') + elif tb_type == 'ctb': + tdSql.execute(f'create table {stbname} (ts timestamp,{col_name} {col_type}) tags(t0 int)') + tdSql.execute(f'create table {tbname} using {stbname} tags(1)') + self.insert_base_data(col_name,tbname) if col_type.lower() == 'tinyint': - self.update_and_check_data(self.ntbname,col_name,col_type,up_tinyint,self.dbname) + self.update_and_check_data(tbname,col_name,col_type,up_tinyint,dbname) elif col_type.lower() == 'smallint': - self.update_and_check_data(self.ntbname,col_name,col_type,up_smallint,self.dbname) + self.update_and_check_data(tbname,col_name,col_type,up_smallint,dbname) elif col_type.lower() == 'int': - self.update_and_check_data(self.ntbname,col_name,col_type,up_int,self.dbname) + self.update_and_check_data(tbname,col_name,col_type,up_int,dbname) elif col_type.lower() == 'bigint': - self.update_and_check_data(self.ntbname,col_name,col_type,up_bigint,self.dbname) + self.update_and_check_data(tbname,col_name,col_type,up_bigint,dbname) elif col_type.lower() == 'tinyint unsigned': - self.update_and_check_data(self.ntbname,col_name,col_type,up_untinyint,self.dbname) + self.update_and_check_data(tbname,col_name,col_type,up_untinyint,dbname) elif col_type.lower() == 'smallint unsigned': - self.update_and_check_data(self.ntbname,col_name,col_type,up_unsmallint,self.dbname) + self.update_and_check_data(tbname,col_name,col_type,up_unsmallint,dbname) elif col_type.lower() == 'int unsigned': - self.update_and_check_data(self.ntbname,col_name,col_type,up_unint,self.dbname) + self.update_and_check_data(tbname,col_name,col_type,up_unint,dbname) elif col_type.lower() == 'bigint unsigned': - self.update_and_check_data(self.ntbname,col_name,col_type,up_unbigint,self.dbname) + self.update_and_check_data(tbname,col_name,col_type,up_unbigint,dbname) elif col_type.lower() == 'bool': - self.update_and_check_data(self.ntbname,col_name,col_type,up_bool,self.dbname) + self.update_and_check_data(tbname,col_name,col_type,up_bool,dbname) elif col_type.lower() == 'float': - self.update_and_check_data(self.ntbname,col_name,col_type,up_float,self.dbname) + self.update_and_check_data(tbname,col_name,col_type,up_float,dbname) elif col_type.lower() == 'double': - self.update_and_check_data(self.ntbname,col_name,col_type,up_double,self.dbname) + self.update_and_check_data(tbname,col_name,col_type,up_double,dbname) elif 'binary' in col_type.lower(): - self.update_and_check_data(self.ntbname,col_name,col_type,up_binary,self.dbname) + self.update_and_check_data(tbname,col_name,col_type,up_binary,dbname) elif 'nchar' in col_type.lower(): - self.update_and_check_data(self.ntbname,col_name,col_type,up_nchar,self.dbname) - tdSql.execute(f'drop table {self.ntbname}') - for col_name,col_type in self.column_dict.items(): - tdSql.execute(f'create table {self.ntbname} (ts timestamp,{col_name} {col_type})') - self.insert_base_data(col_name,self.ntbname) - tdSql.execute(f'insert into {self.ntbname} values({self.ts},null)') - tdSql.query(f'select {col_name} from {self.ntbname}') + self.update_and_check_data(tbname,col_name,col_type,up_nchar,dbname) + tdSql.execute(f'insert into {tbname} values({self.ts},null)') + tdSql.query(f'select {col_name} from {tbname}') tdSql.checkEqual(tdSql.queryResult[0][0],None) tdSql.execute(f'flush database {self.dbname}') - tdSql.query(f'select {col_name} from {self.ntbname}') + tdSql.execute('reset query cache') + tdSql.query(f'select {col_name} from {tbname}') tdSql.checkEqual(tdSql.queryResult[0][0],None) - tdSql.execute(f'drop table {self.ntbname}') - - - def update_check_error_ntb(self): + tdSql.execute(f'drop table {tbname}') + if tb_type == 'ctb': + tdSql.execute(f'drop table {stbname}') + def update_check(self): tdSql.execute(f'drop database if exists {self.dbname}') tdSql.execute(f'create database {self.dbname}') tdSql.execute(f'use {self.dbname}') - str_length = self.str_length+1 + self.update_data_check(self.ntbname,self.column_dict,self.dbname,'ntb') for col_name,col_type in self.column_dict.items(): tdSql.execute(f'create table {self.ntbname} (ts timestamp,{col_name} {col_type})') self.insert_base_data(col_name,self.ntbname) - if col_type.lower() == 'double': - for error_value in [tdCom.getLongName(self.str_length),True,False,1.1*constant.DOUBLE_MIN,1.1*constant.DOUBLE_MAX]: - tdSql.error(f'insert into {self.ntbname} values({self.ts},{error_value})') - elif col_type.lower() == 'float': - for error_value in [tdCom.getLongName(self.str_length),True,False,1.1*constant.FLOAT_MIN,1.1*constant.FLOAT_MAX]: - tdSql.error(f'insert into {self.ntbname} values({self.ts},{error_value})') - elif 'binary' in col_type.lower() or 'nchar' in col_type.lower(): - for error_value in [tdCom.getLongName(str_length)]: - tdSql.error(f'insert into {self.ntbname} values({self.ts},"{error_value}")') - elif col_type.lower() == 'bool': - for error_value in [tdCom.getLongName(self.str_length)]: - tdSql.error(f'insert into {self.ntbname} values({self.ts},{error_value})') - elif col_type.lower() == 'tinyint': - for error_value in [constant.TINYINT_MIN-1,constant.TINYINT_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]: - tdSql.error(f'insert into {self.ntbname} values({self.ts},{error_value})') - elif col_type.lower() == 'smallint': - for error_value in [constant.SMALLINT_MIN-1,constant.SMALLINT_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]: - tdSql.error(f'insert into {self.ntbname} values({self.ts},{error_value})') - elif col_type.lower() == 'int': - for error_value in [constant.INT_MIN-1,constant.INT_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]: - tdSql.error(f'insert into {self.ntbname} values({self.ts},{error_value})') - elif col_type.lower() == 'bigint': - for error_value in [constant.BIGINT_MIN-1,constant.BIGINT_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]: - tdSql.error(f'insert into {self.ntbname} values({self.ts},{error_value})') - elif col_type.lower() == 'tinyint unsigned': - for error_value in [constant.TINYINT_UN_MIN-1,constant.TINYINT_UN_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]: - tdSql.error(f'insert into {self.ntbname} values({self.ts},{error_value})') - elif col_type.lower() == 'smallint unsigned': - for error_value in [constant.SMALLINT_UN_MIN-1,constant.SMALLINT_UN_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]: - tdSql.error(f'insert into {self.ntbname} values({self.ts},{error_value})') - elif col_type.lower() == 'int unsigned': - for error_value in [constant.INT_UN_MIN-1,constant.INT_UN_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]: - tdSql.error(f'insert into {self.ntbname} values({self.ts},{error_value})') - elif col_type.lower() == 'bigint unsigned': - for error_value in [constant.BIGINT_UN_MIN-1,constant.BIGINT_UN_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]: - tdSql.error(f'insert into {self.ntbname} values({self.ts},{error_value})') + if 'binary' in col_type.lower(): + up_binary = tdCom.getLongName(self.str_length+1) + tdSql.execute(f'alter table {self.ntbname} modify column {col_name} binary({self.str_length+1})') + self.update_and_check_data(self.ntbname,col_name,col_type,up_binary,self.dbname) + elif 'nchar' in col_type.lower(): + up_nchar = tdCom.getLongName(self.str_length+1) + tdSql.execute(f'alter table {self.ntbname} modify column {col_name} nchar({self.str_length+1})') + self.update_and_check_data(self.ntbname,col_name,col_type,up_nchar,self.dbname) tdSql.execute(f'drop table {self.ntbname}') + + self.update_data_check(self.ctbname,self.column_dict,self.dbname,'ctb',self.stbname) + for col_name,col_type in self.column_dict.items(): + tdSql.execute(f'create table {self.stbname} (ts timestamp,{col_name} {col_type}) tags(t0 int)') + tdSql.execute(f'create table {self.ctbname} using {self.stbname} tags(1)') + self.insert_base_data(col_name,self.ctbname) + if 'binary' in col_type.lower(): + up_binary = tdCom.getLongName(self.str_length+1) + tdSql.execute(f'alter table {self.stbname} modify column {col_name} binary({self.str_length+1})') + self.update_and_check_data(self.ctbname,col_name,col_type,up_binary,self.dbname) + elif 'nchar' in col_type.lower(): + up_nchar = tdCom.getLongName(self.str_length+1) + tdSql.execute(f'alter table {self.stbname} modify column {col_name} nchar({self.str_length+1})') + self.update_and_check_data(self.ctbname,col_name,col_type,up_nchar,self.dbname) + tdSql.execute(f'drop table {self.stbname}') + + + def update_check_error(self): + tdSql.execute(f'drop database if exists {self.dbname}') + tdSql.execute(f'create database {self.dbname}') + tdSql.execute(f'use {self.dbname}') + self.error_check(self.ntbname,self.column_dict,'ntb') + self.error_check(self.ctbname,self.column_dict,'ctb',self.stbname) def run(self): - self.update_check_ntb() - self.update_check_error_ntb() - + self.update_check() + self.update_check_error() + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) From bc3f7303ef6968280c081856d9f3e9fbdae2df88 Mon Sep 17 00:00:00 2001 From: jiacy-jcy Date: Sat, 16 Jul 2022 16:16:18 +0800 Subject: [PATCH 08/46] update --- tests/system-test/1-insert/update_data.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/system-test/1-insert/update_data.py b/tests/system-test/1-insert/update_data.py index f6ede9c2f9..387fc18774 100644 --- a/tests/system-test/1-insert/update_data.py +++ b/tests/system-test/1-insert/update_data.py @@ -95,6 +95,8 @@ class TDTestCase: tdSql.checkEqual(tdSql.queryResult[0][0],tdSql.queryResult[0][0]) else: tdLog.exit(f'{col_name} data check failure') + elif col_type.lower() == 'timestamp': + tdSql.checkEqual(str(tdSql.queryResult[0][0]),str(datetime.datetime.fromtimestamp(value/1000).strftime("%Y-%m-%d %H:%M:%S.%f"))) else: tdSql.checkEqual(tdSql.queryResult[0][0],value) def update_and_check_data(self,tbname,col_name,col_type,value,dbname): @@ -204,6 +206,8 @@ class TDTestCase: self.update_and_check_data(tbname,col_name,col_type,up_binary,dbname) elif 'nchar' in col_type.lower(): self.update_and_check_data(tbname,col_name,col_type,up_nchar,dbname) + elif col_type.lower() == 'timestamp': + self.update_and_check_data(tbname,col_name,col_type,self.ts+1,dbname) tdSql.execute(f'insert into {tbname} values({self.ts},null)') tdSql.query(f'select {col_name} from {tbname}') tdSql.checkEqual(tdSql.queryResult[0][0],None) From 17610f98a02f3b70200d3744e60293263ac759be Mon Sep 17 00:00:00 2001 From: jiacy-jcy Date: Mon, 18 Jul 2022 08:43:30 +0800 Subject: [PATCH 09/46] update --- tests/system-test/1-insert/update_data.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/system-test/1-insert/update_data.py b/tests/system-test/1-insert/update_data.py index 387fc18774..d2151976c8 100644 --- a/tests/system-test/1-insert/update_data.py +++ b/tests/system-test/1-insert/update_data.py @@ -108,6 +108,8 @@ class TDTestCase: tdSql.execute(f'flush database {dbname}') tdSql.execute('reset query cache') self.data_check(tbname,col_name,col_type,value) + for func in ['first','last']: + tdSql.execute(f'select {func}({col_name}) from {tbname}') def error_check(self,tbname,column_dict,tb_type=None,stbname=None): str_length = self.str_length+1 for col_name,col_type in column_dict.items(): From da5e5c52e0c8a709cc8b08430eaf7ab3b68c2778 Mon Sep 17 00:00:00 2001 From: jiacy-jcy Date: Mon, 18 Jul 2022 08:53:42 +0800 Subject: [PATCH 10/46] add delete_data.py --- tests/system-test/1-insert/delete_data.py | 88 +++++++++++++++++++++++ 1 file changed, 88 insertions(+) create mode 100644 tests/system-test/1-insert/delete_data.py diff --git a/tests/system-test/1-insert/delete_data.py b/tests/system-test/1-insert/delete_data.py new file mode 100644 index 0000000000..6c0c2d24b7 --- /dev/null +++ b/tests/system-test/1-insert/delete_data.py @@ -0,0 +1,88 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import random +import string +from util import constant +from util.log import * +from util.cases import * +from util.sql import * +from util.common import * +from util.sqlset import TDSetSql + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + self.dbname = 'db_test' + self.setsql = TDSetSql() + self.ntbname = 'ntb' + self.rowNum = 10 + self.tbnum = 20 + self.ts = 1537146000000 + self.binary_str = 'taosdata' + self.nchar_str = '涛思数据' + # first column must be timestamp + self.column_dict = { + 'ts' : 'timestamp', + 'col1': 'tinyint', + + } + self.value = [ + f'1' + ] + + self.param_list = [1,100] + def insert_data(self,column_dict,tbname,row_num): + insert_sql = self.setsql.set_insertsql(column_dict,tbname,self.binary_str,self.nchar_str) + for i in range(row_num): + insert_list = [] + + def delete_all_data(self,tbname): + tdSql.execute(f'delete from {tbname}') + tdSql.query(f'select * from {tbname}') + tdSql.checkRows(0) + + def delete_one_row(self,tbname,column_dict): + for column_name,column_type in column_dict.items(): + if column_type.lower() == 'timestamp': + + tdSql.execute(f'delete from {tbname} where {column_name}={self.ts}') + tdSql.query(f'select * from {tbname}') + tdSql.checkRows(self.rowNum-1) + tdSql.query(f'select * from {tbname} where {column_name}={self.ts}') + tdSql.checkRows(0) + tdSql.execute(f'insert into {tbname} values({self.ts},{self.value_list[0]}') + tdSql.query(f'select * from {tbname} where {column_name}={self.ts}') + tdSql.checkEqual(str(tdSql.queryResult[0][0]),str(datetime.datetime.fromtimestamp(value/1000).strftime("%Y-%m-%d %H:%M:%S.%f"))) + + def delete_data_ntb(self): + tdSql.execute(f'create database if not exists {self.dbname}') + tdSql.execute(f'use {self.dbname}') + tdSql.execute(self.setsql.set_create_normaltable_sql(self.ntbname,self.column_dict)) + for i in range(self.rowNum): + tdSql.execute(f'insert into {self.ntbname} values({self.ts+i},{self.value_list[0]})') + + + # self.delete_all_data(self.ntbname) + + def run(self): + self.delete_data_ntb() + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file From 977d6d5242bc762075d5180b4ad48661c540b8f8 Mon Sep 17 00:00:00 2001 From: jiacy-jcy Date: Mon, 18 Jul 2022 11:34:24 +0800 Subject: [PATCH 11/46] update test case --- tests/system-test/1-insert/update_data.py | 41 ++++------------------- 1 file changed, 7 insertions(+), 34 deletions(-) diff --git a/tests/system-test/1-insert/update_data.py b/tests/system-test/1-insert/update_data.py index d2151976c8..c2b8e7ab08 100644 --- a/tests/system-test/1-insert/update_data.py +++ b/tests/system-test/1-insert/update_data.py @@ -11,7 +11,6 @@ # -*- coding: utf-8 -*- -from optparse import Values import random import string from util import constant @@ -23,7 +22,7 @@ from util.sqlset import TDSetSql class TDTestCase: def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor()) + tdSql.init(conn.cursor(),logSql) self.setsql = TDSetSql() self.dbname = 'db_test' self.ntbname = 'ntb' @@ -60,34 +59,8 @@ class TDTestCase: self.double = None self.binary = None self.tnchar = None - def insert_base_data(self,col_type,tbname,value=None): - if value == None: - if col_type.lower() == 'tinyint': - tdSql.execute(f'insert into {tbname} values({self.ts},{self.tinyint})') - elif col_type.lower() == 'smallint': - tdSql.execute(f'insert into {tbname} values({self.ts},{self.smallint})') - elif col_type.lower() == 'int': - tdSql.execute(f'insert into {tbname} values({self.ts},{self.int})') - elif col_type.lower() == 'bigint': - tdSql.execute(f'insert into {tbname} values({self.ts},{self.bigint})') - elif col_type.lower() == 'tinyint unsigned': - tdSql.execute(f'insert into {tbname} values({self.ts},{self.untinyint})') - elif col_type.lower() == 'smallint unsigned': - tdSql.execute(f'insert into {tbname} values({self.ts},{self.unsmallint})') - elif col_type.lower() == 'int unsigned': - tdSql.execute(f'insert into {tbname} values({self.ts},{self.unint})') - elif col_type.lower() == 'bigint unsigned': - tdSql.execute(f'insert into {tbname} values({self.ts},{self.unbigint})') - elif col_type.lower() == 'bool': - tdSql.execute(f'insert into {tbname} values({self.ts},{self.bool})') - elif col_type.lower() == 'float': - tdSql.execute(f'insert into {tbname} values({self.ts},{self.float})') - elif col_type.lower() == 'double': - tdSql.execute(f'insert into {tbname} values({self.ts},{self.double})') - elif 'binary' in col_type.lower(): - tdSql.execute(f'insert into {tbname} values({self.ts},"{self.binary}")') - elif 'nchar' in col_type.lower(): - tdSql.execute(f'insert into {tbname} values({self.ts},"{self.nchar}")') + + def data_check(self,tbname,col_name,col_type,value): tdSql.query(f'select {col_name} from {tbname}') if col_type.lower() == 'float' or col_type.lower() == 'double': @@ -118,7 +91,7 @@ class TDTestCase: elif tb_type == 'ctb': tdSql.execute(f'create table {stbname} (ts timestamp,{col_name} {col_type}) tags(t0 int)') tdSql.execute(f'create table {tbname} using {stbname} tags(1)') - self.insert_base_data(col_name,tbname) + tdSql.execute(f'insert into {tbname} values({self.ts},null)') if col_type.lower() == 'double': for error_value in [tdCom.getLongName(self.str_length),True,False,1.1*constant.DOUBLE_MIN,1.1*constant.DOUBLE_MAX]: tdSql.error(f'insert into {tbname} values({self.ts},{error_value})') @@ -181,7 +154,7 @@ class TDTestCase: elif tb_type == 'ctb': tdSql.execute(f'create table {stbname} (ts timestamp,{col_name} {col_type}) tags(t0 int)') tdSql.execute(f'create table {tbname} using {stbname} tags(1)') - self.insert_base_data(col_name,tbname) + tdSql.execute(f'insert into {tbname} values({self.ts},null)') if col_type.lower() == 'tinyint': self.update_and_check_data(tbname,col_name,col_type,up_tinyint,dbname) elif col_type.lower() == 'smallint': @@ -227,7 +200,7 @@ class TDTestCase: self.update_data_check(self.ntbname,self.column_dict,self.dbname,'ntb') for col_name,col_type in self.column_dict.items(): tdSql.execute(f'create table {self.ntbname} (ts timestamp,{col_name} {col_type})') - self.insert_base_data(col_name,self.ntbname) + tdSql.execute(f'insert into {self.ntbname} values({self.ts},null)') if 'binary' in col_type.lower(): up_binary = tdCom.getLongName(self.str_length+1) tdSql.execute(f'alter table {self.ntbname} modify column {col_name} binary({self.str_length+1})') @@ -242,7 +215,7 @@ class TDTestCase: for col_name,col_type in self.column_dict.items(): tdSql.execute(f'create table {self.stbname} (ts timestamp,{col_name} {col_type}) tags(t0 int)') tdSql.execute(f'create table {self.ctbname} using {self.stbname} tags(1)') - self.insert_base_data(col_name,self.ctbname) + tdSql.execute(f'insert into {self.ctbname} values({self.ts},null)') if 'binary' in col_type.lower(): up_binary = tdCom.getLongName(self.str_length+1) tdSql.execute(f'alter table {self.stbname} modify column {col_name} binary({self.str_length+1})') From 2dfa77f2ced6bb3232b3579fd8df54e570d65229 Mon Sep 17 00:00:00 2001 From: jiacy-jcy Date: Mon, 18 Jul 2022 13:57:13 +0800 Subject: [PATCH 12/46] update --- tests/system-test/1-insert/update_data.py | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/tests/system-test/1-insert/update_data.py b/tests/system-test/1-insert/update_data.py index c2b8e7ab08..27e1559d7e 100644 --- a/tests/system-test/1-insert/update_data.py +++ b/tests/system-test/1-insert/update_data.py @@ -46,20 +46,6 @@ class TDTestCase: 'col13': f'nchar({self.str_length})', 'col_ts' : 'timestamp' } - self.tinyint = None - self.smallint = None - self.int = None - self.bigint = None - self.untinyint = None - self.unsmallint = None - self.unint = None - self.unbigint = None - self.bool = None - self.float = None - self.double = None - self.binary = None - self.tnchar = None - def data_check(self,tbname,col_name,col_type,value): tdSql.query(f'select {col_name} from {tbname}') @@ -147,7 +133,6 @@ class TDTestCase: nchar_length = random.randint(0,self.str_length) up_binary = tdCom.getLongName(binary_length) up_nchar = tdCom.getLongName(nchar_length) - for col_name,col_type in column_dict.items(): if tb_type == 'ntb': tdSql.execute(f'create table {tbname} (ts timestamp,{col_name} {col_type})') @@ -210,7 +195,6 @@ class TDTestCase: tdSql.execute(f'alter table {self.ntbname} modify column {col_name} nchar({self.str_length+1})') self.update_and_check_data(self.ntbname,col_name,col_type,up_nchar,self.dbname) tdSql.execute(f'drop table {self.ntbname}') - self.update_data_check(self.ctbname,self.column_dict,self.dbname,'ctb',self.stbname) for col_name,col_type in self.column_dict.items(): tdSql.execute(f'create table {self.stbname} (ts timestamp,{col_name} {col_type}) tags(t0 int)') @@ -226,7 +210,6 @@ class TDTestCase: self.update_and_check_data(self.ctbname,col_name,col_type,up_nchar,self.dbname) tdSql.execute(f'drop table {self.stbname}') - def update_check_error(self): tdSql.execute(f'drop database if exists {self.dbname}') tdSql.execute(f'create database {self.dbname}') From b83968697d65b72c2885ba94ef47d6baf52971b0 Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Mon, 18 Jul 2022 14:02:27 +0800 Subject: [PATCH 13/46] test:update case for cachemodel --- .../0-others/{cachelast.py => cachemodel.py} | 86 +++++++++++-------- tests/system-test/fulltest.sh | 2 +- 2 files changed, 50 insertions(+), 38 deletions(-) rename tests/system-test/0-others/{cachelast.py => cachemodel.py} (62%) diff --git a/tests/system-test/0-others/cachelast.py b/tests/system-test/0-others/cachemodel.py similarity index 62% rename from tests/system-test/0-others/cachelast.py rename to tests/system-test/0-others/cachemodel.py index 2df6b8c9cc..09538e6678 100644 --- a/tests/system-test/0-others/cachelast.py +++ b/tests/system-test/0-others/cachemodel.py @@ -56,6 +56,16 @@ class TDTestCase: } return numbers.get(value, 'other') + def getCacheModelNum(self,str): + numbers = { + "none" : 0, + "last_row" : 1, + "last_value" : 2, + "both" : 3 + + } + return numbers.get(str, 'other') + def prepare_datas(self): for i in range(4): str = self.getCacheModelStr(i) @@ -69,7 +79,7 @@ class TDTestCase: tdSql.execute(" insert into tb1 values(now , %d, %f)" %(k,k*10) ) tdSql.execute(" insert into tb2 values(now , %d, %f)" %(k,k*10) ) - def check_cache_last_sets(self): + def check_cachemodel_sets(self): # check cache_last value for database @@ -84,52 +94,54 @@ class TDTestCase: # print(cache_last_value) if dbname in ["information_schema" , "performance_schema"]: continue - cache_lasts[dbname]=cache_last_value + cache_lasts[dbname]=self.getCacheModelNum(cache_last_value) # cache_last_set value for k , v in cache_lasts.items(): - if k=="testdb_"+str(v): - tdLog.info(" database %s cache_last value check pass, value is %s "%(k,v) ) + if k=="testdb_"+str(self.getCacheModelStr(v)): + tdLog.info(" database %s cache_last value check pass, value is %s "%(k,self.getCacheModelStr(v)) ) else: - tdLog.exit(" database %s cache_last value check fail, value is %s "%(k,v) ) + tdLog.exit(" database %s cache_last value check fail, value is %s "%(k,self.getCacheModelStr(v)) ) # # check storage layer implementation - # buildPath = self.getBuildPath() - # if (buildPath == ""): - # tdLog.exit("taosd not found!") - # else: - # tdLog.info("taosd found in %s" % buildPath) - # dataPath = buildPath + "/../sim/dnode1/data" - # abs_vnodePath = os.path.abspath(dataPath)+"/vnode/" - # tdLog.info("abs_vnodePath: %s" % abs_vnodePath) + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + dataPath = buildPath + "/../sim/dnode1/data" + abs_vnodePath = os.path.abspath(dataPath)+"/vnode/" + tdLog.info("abs_vnodePath: %s" % abs_vnodePath) - # tdSql.query(" show dnodes ") - # dnode_id = tdSql.queryResult[0][0] + tdSql.query(" show dnodes ") + dnode_id = tdSql.queryResult[0][0] - # for dbname in cache_lasts.keys(): - # print(dbname) - # tdSql.execute(" use %s" % dbname) - # tdSql.query(" show vgroups ") - # vgroups_infos = tdSql.queryResult - # for vgroup_info in vgroups_infos: - # vnode_json = abs_vnodePath + "/vnode" +f"{vgroup_info[0]}/" + "vnode.json" - # vnode_info_of_db = f"cat {vnode_json}" - # vnode_info = subprocess.check_output(vnode_info_of_db, shell=True).decode("utf-8") - # infoDict = json.loads(vnode_info) - # vnode_json_of_dbname = f"{dnode_id}."+ dbname - # config = infoDict["config"] - # if infoDict["config"]["dbname"] == vnode_json_of_dbname: - # if "cachelast" in infoDict["config"]: - # if int(infoDict["config"]["cachelast"]) != cache_lasts[dbname]: - # tdLog.exit("cachelast value is error in vnode.json of vnode%d "%(vgroup_info[0])) - # else: - # tdLog.exit("cachelast not found in vnode.json of vnode%d "%(vgroup_info[0])) + for dbname in cache_lasts.keys(): + # print(dbname) + tdSql.execute(" use %s" % dbname) + tdSql.query(" show vgroups ") + vgroups_infos = tdSql.queryResult + for vgroup_info in vgroups_infos: + vnode_json = abs_vnodePath + "/vnode" +f"{vgroup_info[0]}/" + "vnode.json" + vnode_info_of_db = f"cat {vnode_json}" + vnode_info = subprocess.check_output(vnode_info_of_db, shell=True).decode("utf-8") + infoDict = json.loads(vnode_info) + vnode_json_of_dbname = f"{dnode_id}."+ dbname + config = infoDict["config"] + if infoDict["config"]["dbname"] == vnode_json_of_dbname: + if "cacheLast" in infoDict["config"]: + if int(infoDict["config"]["cacheLast"]) != cache_lasts[dbname]: + tdLog.exit("cachemodel value is error in vnode.json of vnode%d "%(vgroup_info[0])) + else: + tdLog.info("cachemodel value is success in vnode.json of vnode%d "%(vgroup_info[0])) + else: + tdLog.exit("cacheLast not found in vnode.json of vnode%d "%(vgroup_info[0])) - def restart_check_cache_last_sets(self): + def restart_check_cachemodel_sets(self): for i in range(3): tdSql.query("show dnodes") @@ -137,14 +149,14 @@ class TDTestCase: tdDnodes.stop(index) tdDnodes.start(index) time.sleep(3) - self.check_cache_last_sets() + self.check_cachemodel_sets() def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring self.illegal_params() self.prepare_datas() - self.check_cache_last_sets() - self.restart_check_cache_last_sets() + self.check_cachemodel_sets() + self.restart_check_cachemodel_sets() def stop(self): tdSql.close() diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index ddbbefe739..d1957b96a5 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -10,7 +10,7 @@ python3 ./test.py -f 0-others/taosdMonitor.py python3 ./test.py -f 0-others/udfTest.py python3 ./test.py -f 0-others/udf_create.py python3 ./test.py -f 0-others/udf_restart_taosd.py -python3 ./test.py -f 0-others/cachelast.py +python3 ./test.py -f 0-others/cachemodel.py python3 ./test.py -f 0-others/udf_cfg1.py python3 ./test.py -f 0-others/udf_cfg2.py From 2512bf26aa6259e35bce5a7fbfea2596deda5cfa Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Mon, 18 Jul 2022 14:53:41 +0800 Subject: [PATCH 14/46] test: valgrind case --- tests/script/tsim/valgrind/checkError5.sim | 33 ++++++++++++++++++++-- tests/script/tsim/valgrind/checkError6.sim | 2 +- 2 files changed, 32 insertions(+), 3 deletions(-) diff --git a/tests/script/tsim/valgrind/checkError5.sim b/tests/script/tsim/valgrind/checkError5.sim index 61964d1c42..f3d418cfd1 100644 --- a/tests/script/tsim/valgrind/checkError5.sim +++ b/tests/script/tsim/valgrind/checkError5.sim @@ -26,6 +26,7 @@ print =============== step2: create db sql create database db sql use db sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 float, t3 binary(16)) comment "abd" +sql create table db.c1 using db.stb tags(101, 102, "103") print =============== step3: alter stb sql_error alter table db.stb add column ts int @@ -42,9 +43,8 @@ sql alter table db.stb drop tag c1 sql alter table db.stb drop tag t5 sql alter table db.stb MODIFY tag t3 binary(32) sql alter table db.stb rename tag t1 tx - sql alter table db.stb comment 'abcde' ; -goto _OVER +sql drop table db.stb print =============== step4: alter tb sql create table tb (ts timestamp, a int) @@ -66,6 +66,35 @@ sql alter table tb add column h binary(10) sql select count(a), count(b), count(c), count(d), count(e), count(f), count(g), count(h) from tb sql select * from tb order by ts desc +print =============== step5: alter stb and insert data +sql create table stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 float, t3 binary(16)) comment "abd" +sql show db.stables +sql describe stb +sql_error alter table stb add column ts int + +sql create table db.ctb using db.stb tags(101, 102, "103") +sql insert into db.ctb values(now, 1, "2") +sql show db.tables +sql select * from db.stb +sql select * from tb + +sql alter table stb add column c3 int +sql describe stb +sql select * from db.stb +sql select * from tb +sql insert into db.ctb values(now+1s, 1, 2, 3) +sql select * from db.stb + +sql alter table db.stb add column c4 bigint +sql select * from db.stb +sql insert into db.ctb values(now+2s, 1, 2, 3, 4) + +sql alter table db.stb drop column c1 +sql reset query cache +sql select * from tb +sql insert into db.ctb values(now+3s, 2, 3, 4) +sql select * from db.stb + _OVER: system sh/exec.sh -n dnode1 -s stop -x SIGINT print =============== check diff --git a/tests/script/tsim/valgrind/checkError6.sim b/tests/script/tsim/valgrind/checkError6.sim index a9f66647f9..2783e94771 100644 --- a/tests/script/tsim/valgrind/checkError6.sim +++ b/tests/script/tsim/valgrind/checkError6.sim @@ -68,7 +68,7 @@ $null= system_content sh/checkValgrind.sh -n dnode1 print cmd return result ----> [ $system_content ] -if $system_content > 0 then +if $system_content > 3 then return -1 endi From f32eee2f58c4e19994f13a20bf3c294a7021baf6 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Mon, 18 Jul 2022 14:54:23 +0800 Subject: [PATCH 15/46] test: valgrind case --- tests/script/jenkins/basic.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index 0bdbd644a0..bff6177ad2 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -312,6 +312,7 @@ ./test.sh -f tsim/valgrind/checkError3.sim ./test.sh -f tsim/valgrind/checkError4.sim ./test.sh -f tsim/valgrind/checkError5.sim +./test.sh -f tsim/valgrind/checkError6.sim # --- vnode # unsupport ./test.sh -f tsim/vnode/replica3_basic.sim From 34c87dc428f9d1d3551cab8fb243cb0c30f0c2d5 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 18 Jul 2022 15:33:42 +0800 Subject: [PATCH 16/46] fix: ts already exist when insert with schemaless & modify the interface of tmq meta --- include/client/taos.h | 12 ++++++--- source/client/src/clientSml.c | 25 ++++++++++++----- source/client/src/tmq.c | 43 +++++++++++++----------------- source/libs/parser/src/parInsert.c | 4 ++- 4 files changed, 47 insertions(+), 37 deletions(-) diff --git a/include/client/taos.h b/include/client/taos.h index 690c473986..8afefe4e6b 100644 --- a/include/client/taos.h +++ b/include/client/taos.h @@ -259,13 +259,17 @@ enum tmq_res_t { TMQ_RES_TABLE_META = 2, }; +typedef struct { + void* raw_meta; + uint32_t raw_meta_len; + uint16_t raw_meta_type; +} tmq_raw_data; + typedef enum tmq_res_t tmq_res_t; -typedef struct tmq_raw_data tmq_raw_data; DLL_EXPORT tmq_res_t tmq_get_res_type(TAOS_RES *res); -DLL_EXPORT tmq_raw_data *tmq_get_raw_meta(TAOS_RES *res); -DLL_EXPORT int32_t taos_write_raw_meta(TAOS *taos, tmq_raw_data *raw_meta); -DLL_EXPORT void tmq_free_raw_meta(tmq_raw_data *rawMeta); +DLL_EXPORT int32_t tmq_get_raw_meta(TAOS_RES *res, tmq_raw_data *raw_meta); +DLL_EXPORT int32_t taos_write_raw_meta(TAOS *taos, tmq_raw_data raw_meta); DLL_EXPORT char *tmq_get_json_meta(TAOS_RES *res); // Returning null means error. Returned result need to be freed by tmq_free_json_meta DLL_EXPORT void tmq_free_json_meta(char* jsonMeta); DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res); diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c index 83f2cf9307..ab3cacfb49 100644 --- a/source/client/src/clientSml.c +++ b/source/client/src/clientSml.c @@ -268,7 +268,7 @@ static int32_t smlGenerateSchemaAction(SSchema *colField, SHashObj *colHash, SSm *actionNeeded = true; } if (*actionNeeded) { - uDebug("SML:0x%" PRIx64 " generate schema action. column name: %s, action: %d", info->id, colField->name, + uDebug("SML:0x%" PRIx64 " generate schema action. kv->name: %s, action: %d", info->id, kv->key, action->action); } return 0; @@ -436,6 +436,7 @@ static int32_t smlProcessSchemaAction(SSmlHandle *info, SSchema *schemaField, SH SSchemaAction *action, bool isTag) { int32_t code = TSDB_CODE_SUCCESS; for (int j = 0; j < taosArrayGetSize(cols); ++j) { + if(j == 0 && !isTag) continue; SSmlKv *kv = (SSmlKv *)taosArrayGetP(cols, j); bool actionNeeded = false; code = smlGenerateSchemaAction(schemaField, schemaHash, kv, isTag, action, &actionNeeded, info); @@ -452,18 +453,25 @@ static int32_t smlProcessSchemaAction(SSmlHandle *info, SSchema *schemaField, SH return TSDB_CODE_SUCCESS; } -static int32_t smlCheckMeta(SSchema *schema, int32_t length, SArray *cols) { +static int32_t smlCheckMeta(SSchema *schema, int32_t length, SArray *cols, bool isTag) { SHashObj *hashTmp = taosHashInit(length, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); - for (uint16_t i = 0; i < length; i++) { + int32_t i = 0; + for ( ;i < length; i++) { taosHashPut(hashTmp, schema[i].name, strlen(schema[i].name), &i, SHORT_BYTES); } - for (int32_t i = 0; i < taosArrayGetSize(cols); i++) { + if (isTag){ + i = 0; + } else { + i = 1; + } + for (; i < taosArrayGetSize(cols); i++) { SSmlKv *kv = (SSmlKv *)taosArrayGetP(cols, i); if (taosHashGet(hashTmp, kv->key, kv->keyLen) == NULL) { return -1; } } + taosHashCleanup(hashTmp); return 0; } @@ -523,7 +531,7 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) { } taosHashClear(hashTmp); - for (uint16_t i = 0; i < pTableMeta->tableInfo.numOfColumns; i++) { + for (uint16_t i = 1; i < pTableMeta->tableInfo.numOfColumns; i++) { taosHashPut(hashTmp, pTableMeta->schema[i].name, strlen(pTableMeta->schema[i].name), &i, SHORT_BYTES); } code = smlProcessSchemaAction(info, pTableMeta->schema, hashTmp, sTableData->cols, &schemaAction, false); @@ -551,12 +559,12 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) { if (needCheckMeta) { code = smlCheckMeta(&(pTableMeta->schema[pTableMeta->tableInfo.numOfColumns]), pTableMeta->tableInfo.numOfTags, - sTableData->tags); + sTableData->tags, true); if (code != TSDB_CODE_SUCCESS) { uError("SML:0x%" PRIx64 " check tag failed. super table name %s", info->id, (char *)superTable); goto end; } - code = smlCheckMeta(&(pTableMeta->schema[0]), pTableMeta->tableInfo.numOfColumns, sTableData->cols); + code = smlCheckMeta(&(pTableMeta->schema[0]), pTableMeta->tableInfo.numOfColumns, sTableData->cols, false); if (code != TSDB_CODE_SUCCESS) { uError("SML:0x%" PRIx64 " check cols failed. super table name %s", info->id, (char *)superTable); goto end; @@ -832,6 +840,7 @@ static int64_t smlParseOpenTsdbTime(SSmlHandle *info, const char *data, int32_t static int32_t smlParseTS(SSmlHandle *info, const char *data, int32_t len, SArray *cols) { int64_t ts = 0; if (info->protocol == TSDB_SML_LINE_PROTOCOL) { +// uError("SML:data:%s,len:%d", data, len); ts = smlParseInfluxTime(info, data, len); } else if (info->protocol == TSDB_SML_TELNET_PROTOCOL) { ts = smlParseOpenTsdbTime(info, data, len); @@ -2031,6 +2040,8 @@ static int32_t smlParseJSONString(SSmlHandle *info, cJSON *root, SSmlTableInfo * static int32_t smlParseInfluxLine(SSmlHandle *info, const char *sql) { SSmlLineInfo elements = {0}; + uError("SML:0x%" PRIx64 " smlParseInfluxLine sql:%s, hello", info->id, sql); + int ret = smlParseInfluxString(sql, &elements, &info->msgBuf); if (ret != TSDB_CODE_SUCCESS) { uError("SML:0x%" PRIx64 " smlParseInfluxLine failed", info->id); diff --git a/source/client/src/tmq.c b/source/client/src/tmq.c index 6ad6413159..46b21086ce 100644 --- a/source/client/src/tmq.c +++ b/source/client/src/tmq.c @@ -106,12 +106,6 @@ struct tmq_t { tsem_t rspSem; }; -struct tmq_raw_data { - void* raw_meta; - int32_t raw_meta_len; - int16_t raw_meta_type; -}; - enum { TMQ_VG_STATUS__IDLE = 0, TMQ_VG_STATUS__WAIT, @@ -1858,16 +1852,15 @@ const char* tmq_get_table_name(TAOS_RES* res) { return NULL; } -tmq_raw_data *tmq_get_raw_meta(TAOS_RES* res) { - if (TD_RES_TMQ_META(res)) { - tmq_raw_data *raw = taosMemoryCalloc(1, sizeof(tmq_raw_data)); +int32_t tmq_get_raw_meta(TAOS_RES* res, tmq_raw_data *raw) { + if (TD_RES_TMQ_META(res) && raw) { SMqMetaRspObj* pMetaRspObj = (SMqMetaRspObj*)res; raw->raw_meta = pMetaRspObj->metaRsp.metaRsp; raw->raw_meta_len = pMetaRspObj->metaRsp.metaRspLen; raw->raw_meta_type = pMetaRspObj->metaRsp.resMsgType; - return raw; + return TSDB_CODE_SUCCESS; } - return NULL; + return TSDB_CODE_INVALID_PARA; } static char *buildCreateTableJson(SSchemaWrapper *schemaRow, SSchemaWrapper* schemaTag, char* name, int64_t id, int8_t t){ @@ -2875,23 +2868,23 @@ static int32_t taosAlterTable(TAOS *taos, void *meta, int32_t metaLen){ return code; } -int32_t taos_write_raw_meta(TAOS *taos, tmq_raw_data *raw_meta){ - if (!taos || !raw_meta) { +int32_t taos_write_raw_meta(TAOS *taos, tmq_raw_data raw_meta){ + if (!taos) { return TSDB_CODE_INVALID_PARA; } - if(raw_meta->raw_meta_type == TDMT_VND_CREATE_STB) { - return taosCreateStb(taos, raw_meta->raw_meta, raw_meta->raw_meta_len); - }else if(raw_meta->raw_meta_type == TDMT_VND_ALTER_STB){ - return taosCreateStb(taos, raw_meta->raw_meta, raw_meta->raw_meta_len); - }else if(raw_meta->raw_meta_type == TDMT_VND_DROP_STB){ - return taosDropStb(taos, raw_meta->raw_meta, raw_meta->raw_meta_len); - }else if(raw_meta->raw_meta_type == TDMT_VND_CREATE_TABLE){ - return taosCreateTable(taos, raw_meta->raw_meta, raw_meta->raw_meta_len); - }else if(raw_meta->raw_meta_type == TDMT_VND_ALTER_TABLE){ - return taosAlterTable(taos, raw_meta->raw_meta, raw_meta->raw_meta_len); - }else if(raw_meta->raw_meta_type == TDMT_VND_DROP_TABLE){ - return taosDropTable(taos, raw_meta->raw_meta, raw_meta->raw_meta_len); + if(raw_meta.raw_meta_type == TDMT_VND_CREATE_STB) { + return taosCreateStb(taos, raw_meta.raw_meta, raw_meta.raw_meta_len); + }else if(raw_meta.raw_meta_type == TDMT_VND_ALTER_STB){ + return taosCreateStb(taos, raw_meta.raw_meta, raw_meta.raw_meta_len); + }else if(raw_meta.raw_meta_type == TDMT_VND_DROP_STB){ + return taosDropStb(taos, raw_meta.raw_meta, raw_meta.raw_meta_len); + }else if(raw_meta.raw_meta_type == TDMT_VND_CREATE_TABLE){ + return taosCreateTable(taos, raw_meta.raw_meta, raw_meta.raw_meta_len); + }else if(raw_meta.raw_meta_type == TDMT_VND_ALTER_TABLE){ + return taosAlterTable(taos, raw_meta.raw_meta, raw_meta.raw_meta_len); + }else if(raw_meta.raw_meta_type == TDMT_VND_DROP_TABLE){ + return taosDropTable(taos, raw_meta.raw_meta, raw_meta.raw_meta_len); } return TSDB_CODE_INVALID_PARA; } diff --git a/source/libs/parser/src/parInsert.c b/source/libs/parser/src/parInsert.c index 5ac4476d14..623b26204d 100644 --- a/source/libs/parser/src/parInsert.c +++ b/source/libs/parser/src/parInsert.c @@ -2177,7 +2177,7 @@ static int32_t smlBoundColumnData(SArray* cols, SParsedDataColInfo* pColList, SS SSmlKv* kv = taosArrayGetP(cols, i); SToken sToken = {.n = kv->keyLen, .z = (char*)kv->key}; col_id_t t = lastColIdx + 1; - col_id_t index = findCol(&sToken, t, nCols, pSchema); + col_id_t index = (t == 0 ? 0 : findCol(&sToken, t, nCols, pSchema)); if (index < 0 && t > 0) { index = findCol(&sToken, 0, t, pSchema); isOrdered = false; @@ -2401,7 +2401,9 @@ int32_t smlBindData(void* handle, SArray* tags, SArray* colsSchema, SArray* cols } else { int32_t colLen = kv->length; if (pColSchema->type == TSDB_DATA_TYPE_TIMESTAMP) { +// uError("SML:data before:%ld, precision:%d", kv->i, pTableMeta->tableInfo.precision); kv->i = convertTimePrecision(kv->i, TSDB_TIME_PRECISION_NANO, pTableMeta->tableInfo.precision); +// uError("SML:data after:%ld, precision:%d", kv->i, pTableMeta->tableInfo.precision); } if (IS_VAR_DATA_TYPE(kv->type)) { From be35f08d094532f89790d48b6032a60206ae91ac Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Mon, 18 Jul 2022 15:52:38 +0800 Subject: [PATCH 17/46] update case about bug fix --- tests/system-test/2-query/abs.py | 3 +++ tests/system-test/2-query/last_row.py | 4 +++- tests/system-test/2-query/max_partition.py | 2 ++ tests/system-test/2-query/sample.py | 5 ++++- 4 files changed, 12 insertions(+), 2 deletions(-) diff --git a/tests/system-test/2-query/abs.py b/tests/system-test/2-query/abs.py index 6dc65ce3c2..f0f559c6aa 100644 --- a/tests/system-test/2-query/abs.py +++ b/tests/system-test/2-query/abs.py @@ -554,6 +554,9 @@ class TDTestCase: tdSql.query("select t1 from stb1 where abs(c1+t1)=1") tdSql.checkRows(1) tdSql.checkData(0,0,0) + + tdSql.query("select last_row(c1) from (select ts , c1 ,t1 from stb1)") + tdSql.checkRows(20) tdSql.query( "select abs(c1+t1)*t1 from stb1 where abs(c1)/floor(abs(ceil(t1))) ==1") diff --git a/tests/system-test/2-query/last_row.py b/tests/system-test/2-query/last_row.py index fce3cc7043..6a75d1a1c0 100644 --- a/tests/system-test/2-query/last_row.py +++ b/tests/system-test/2-query/last_row.py @@ -604,7 +604,7 @@ class TDTestCase: tdSql.checkData(0,0,None) tdSql.query("select ts , last_row(c1) ,c1 from (select ts , c1 ,t1 from stb1)") - tdSql.checkData(0,1,None,None) + tdSql.checkData(0,1,None) tdSql.query("select ts , last_row(c1) ,c1 from (select ts , max(c1) c1 ,t1 from stb1 where ts >now -1h and ts ="2022-07-06 16:00:00.000 " and ts < "2022-07-06 17:00:00.000 " interval(50s) sliding(30s)') tdSql.checkRows(5) + tdSql.query("select unique(c1) from stb1 partition by tbname") + def support_super_table_test(self): diff --git a/tests/system-test/2-query/max_partition.py b/tests/system-test/2-query/max_partition.py index cf0d6639c2..5fa78c08e4 100644 --- a/tests/system-test/2-query/max_partition.py +++ b/tests/system-test/2-query/max_partition.py @@ -162,6 +162,8 @@ class TDTestCase: tdSql.query("select tbname , max(c1) from stb partition by tbname interval(10s)") tdSql.checkRows(self.row_nums*2) + tdSql.query("select unique(c1) from stb1 partition by tbname order by tbname") + tdSql.query("select tbname , count(c1) from sub_stb_1 partition by tbname interval(10s)") tdSql.checkData(0,0,'sub_stb_1') tdSql.checkData(0,1,self.row_nums) diff --git a/tests/system-test/2-query/sample.py b/tests/system-test/2-query/sample.py index 84ff5a0056..b6bdd8926e 100644 --- a/tests/system-test/2-query/sample.py +++ b/tests/system-test/2-query/sample.py @@ -870,7 +870,10 @@ class TDTestCase: tdSql.query("select sample(c1 ,1000) from st") tdSql.checkRows(1000) - + # bug need fix + tdSql.query("select c1 ,t1, sample(c1,2) from db.stb1 partition by c1 ") + tdSql.query("select sample(c1,2) from db.stb1 partition by c1 ") + # tdSql.query("select c1 ,ind, sample(c1,2) from sample_db.st partition by c1 ") def run(self): import traceback From e498736de8c9dab96f94a0fd74693e4491fcce42 Mon Sep 17 00:00:00 2001 From: jiacy-jcy Date: Mon, 18 Jul 2022 16:35:57 +0800 Subject: [PATCH 18/46] update --- tests/system-test/1-insert/delete_data.py | 166 +++++++++++++++++----- 1 file changed, 134 insertions(+), 32 deletions(-) diff --git a/tests/system-test/1-insert/delete_data.py b/tests/system-test/1-insert/delete_data.py index 6c0c2d24b7..a7eba2d97d 100644 --- a/tests/system-test/1-insert/delete_data.py +++ b/tests/system-test/1-insert/delete_data.py @@ -13,6 +13,8 @@ import random import string + +from numpy import logspace from util import constant from util.log import * from util.cases import * @@ -23,7 +25,7 @@ from util.sqlset import TDSetSql class TDTestCase: def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor()) + tdSql.init(conn.cursor(),logSql) self.dbname = 'db_test' self.setsql = TDSetSql() self.ntbname = 'ntb' @@ -32,49 +34,149 @@ class TDTestCase: self.ts = 1537146000000 self.binary_str = 'taosdata' self.nchar_str = '涛思数据' - # first column must be timestamp + self.str_length = 20 self.column_dict = { - 'ts' : 'timestamp', 'col1': 'tinyint', + 'col2': 'smallint', + 'col3': 'int', + 'col4': 'bigint', + 'col5': 'tinyint unsigned', + 'col6': 'smallint unsigned', + 'col7': 'int unsigned', + 'col8': 'bigint unsigned', + 'col9': 'float', + 'col10': 'double', + 'col11': 'bool', + 'col12': f'binary({self.str_length})', + 'col13': f'nchar({self.str_length})', } - self.value = [ - f'1' - ] + self.tinyint_val = random.randint(constant.TINYINT_MIN,constant.TINYINT_MAX) + self.smallint_val = random.randint(constant.SMALLINT_MIN,constant.SMALLINT_MAX) + self.int_val = random.randint(constant.INT_MIN,constant.INT_MAX) + self.bigint_val = random.randint(constant.BIGINT_MIN,constant.BIGINT_MAX) + self.untingint_val = random.randint(constant.TINYINT_UN_MIN,constant.TINYINT_UN_MAX) + self.unsmallint_val = random.randint(constant.SMALLINT_UN_MIN,constant.SMALLINT_UN_MAX) + self.unint_val = random.randint(constant.INT_UN_MIN,constant.INT_MAX) + self.unbigint_val = random.randint(constant.BIGINT_UN_MIN,constant.BIGINT_UN_MAX) + self.float_val = random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX) + self.double_val = random.uniform(constant.DOUBLE_MIN*(1E-300),constant.DOUBLE_MAX*(1E-300)) + self.bool_val = random.randint(0,100)%2 + self.binary_val = tdCom.getLongName(random.randint(0,self.str_length)) + self.nchar_val = tdCom.getLongName(random.randint(0,self.str_length)) + self.base_data = { + 'tinyint':self.tinyint_val, + 'smallint':self.smallint_val, + 'int':self.int_val, + 'bigint':self.bigint_val, + 'tinyint unsigned':self.untingint_val, + 'smallint unsigned':self.unsmallint_val, + 'int unsigned':self.unint_val, + 'bigint unsigned':self.unbigint_val, + 'bool':self.bool_val, + 'float':self.float_val, + 'double':self.double_val, + 'binary':self.binary_val, + 'nchar':self.nchar_val + } + def insert_base_data(self,col_type,tbname,rows,base_data): + for i in range(rows): + if col_type.lower() == 'tinyint': + tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["tinyint"]})') + elif col_type.lower() == 'smallint': + tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["smallint"]})') + elif col_type.lower() == 'int': + tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["int"]})') + elif col_type.lower() == 'bigint': + tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["bigint"]})') + elif col_type.lower() == 'tinyint unsigned': + tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["tinyint unsigned"]})') + elif col_type.lower() == 'smallint unsigned': + tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["smallint unsigned"]})') + elif col_type.lower() == 'int unsigned': + tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["int unsigned"]})') + elif col_type.lower() == 'bigint unsigned': + tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["bigint unsigned"]})') + elif col_type.lower() == 'bool': + tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["bool"]})') + elif col_type.lower() == 'float': + tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["float"]})') + elif col_type.lower() == 'double': + tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["double"]})') + elif 'binary' in col_type.lower(): + tdSql.execute(f'''insert into {tbname} values({self.ts+i},"{base_data['binary']}")''') + elif 'nchar' in col_type.lower(): + tdSql.execute(f'''insert into {tbname} values({self.ts+i},"{base_data['nchar']}")''') - self.param_list = [1,100] - def insert_data(self,column_dict,tbname,row_num): - insert_sql = self.setsql.set_insertsql(column_dict,tbname,self.binary_str,self.nchar_str) - for i in range(row_num): - insert_list = [] - - def delete_all_data(self,tbname): + def delete_all_data(self,tbname,col_type,row_num,base_data,dbname): tdSql.execute(f'delete from {tbname}') + tdSql.execute(f'flush database {dbname}') + tdSql.execute('reset query cache') tdSql.query(f'select * from {tbname}') tdSql.checkRows(0) - - def delete_one_row(self,tbname,column_dict): - for column_name,column_type in column_dict.items(): - if column_type.lower() == 'timestamp': + self.insert_base_data(col_type,tbname,row_num,base_data) + tdSql.execute(f'flush database {dbname}') + tdSql.execute('reset query cache') + tdSql.query(f'select * from {tbname}') + tdSql.checkRows(row_num) + def delete_one_row(self,tbname,column_type,column_name,base_data,dbname): + tdSql.execute(f'delete from {tbname} where ts={self.ts}') + tdSql.execute(f'flush database {dbname}') + tdSql.execute('reset query cache') + tdSql.query(f'select {column_name} from {tbname}') + tdSql.checkRows(self.rowNum-1) + tdSql.query(f'select {column_name} from {tbname} where ts={self.ts}') + tdSql.checkRows(0) + if 'binary' in column_type.lower(): + tdSql.execute(f'''insert into {tbname} values({self.ts},"{base_data['binary']}")''') + elif 'nchar' in column_type.lower(): + tdSql.execute(f'''insert into {tbname} values({self.ts},"{base_data['nchar']}")''') + else: + tdSql.execute(f'insert into {tbname} values({self.ts},{base_data[column_type]})') + tdSql.query(f'select {column_name} from {tbname} where ts={self.ts}') + if column_type.lower() == 'float' or column_type.lower() == 'double': + if abs(tdSql.queryResult[0][0] - base_data[column_type]) / base_data[column_type] <= 0.0001: + tdSql.checkEqual(tdSql.queryResult[0][0],tdSql.queryResult[0][0]) + else: + tdLog.exit(f'{column_type} data check failure') + elif 'binary' in column_type.lower(): + tdSql.checkEqual(tdSql.queryResult[0][0],base_data['binary']) + elif 'nchar' in column_type.lower(): + tdSql.checkEqual(tdSql.queryResult[0][0],base_data['nchar']) + else: + tdSql.checkEqual(tdSql.queryResult[0][0],base_data[column_type]) + + def delete_rows(self): + + + pass + def delete_error(self,tbname,column_name,column_type,base_data): + for error_list in ['',f'ts = {self.ts} and',f'ts = {self.ts} or']: + if 'binary' in column_type.lower(): + tdSql.error(f'''delete from {tbname} where {error_list} {column_name} ="{base_data['binary']}"''') + elif 'nchar' in column_type.lower(): + tdSql.error(f'''delete from {tbname} where {error_list} {column_name} ="{base_data['nchar']}"''') + else: + tdSql.error('delete from {tbname} where {error_list} {column_name} = {base_data[column_type]}') - tdSql.execute(f'delete from {tbname} where {column_name}={self.ts}') - tdSql.query(f'select * from {tbname}') - tdSql.checkRows(self.rowNum-1) - tdSql.query(f'select * from {tbname} where {column_name}={self.ts}') - tdSql.checkRows(0) - tdSql.execute(f'insert into {tbname} values({self.ts},{self.value_list[0]}') - tdSql.query(f'select * from {tbname} where {column_name}={self.ts}') - tdSql.checkEqual(str(tdSql.queryResult[0][0]),str(datetime.datetime.fromtimestamp(value/1000).strftime("%Y-%m-%d %H:%M:%S.%f"))) - def delete_data_ntb(self): tdSql.execute(f'create database if not exists {self.dbname}') tdSql.execute(f'use {self.dbname}') - tdSql.execute(self.setsql.set_create_normaltable_sql(self.ntbname,self.column_dict)) - for i in range(self.rowNum): - tdSql.execute(f'insert into {self.ntbname} values({self.ts+i},{self.value_list[0]})') - - - # self.delete_all_data(self.ntbname) + for col_name,col_type in self.column_dict.items(): + tdSql.execute(f'create table {self.ntbname} (ts timestamp,{col_name} {col_type})') + self.insert_base_data(col_type,self.ntbname,self.rowNum,self.base_data) + self.delete_one_row(self.ntbname,col_type,col_name,self.base_data,self.dbname) + self.delete_all_data(self.ntbname,col_type,self.rowNum,self.base_data,self.dbname) + self.delete_error(self.ntbname,col_name,col_type,self.base_data) + for i in range(self.rowNum): + tdSql.execute(f'delete from {self.ntbname} where ts>{self.ts+i}') + tdSql.execute(f'flush database {self.dbname}') + tdSql.execute('reset query cache') + tdSql.query(f'select {col_name} from {self.ntbname}') + tdSql.checkRows(i+1) + self.insert_base_data(col_type,self.ntbname,self.rowNum,self.base_data) + + tdSql.execute(f'drop table {self.ntbname}') def run(self): self.delete_data_ntb() From 1d30ff6190b710ad4b01ab74d5ea5d49260967ba Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Mon, 18 Jul 2022 16:36:13 +0800 Subject: [PATCH 19/46] update case for bug fix --- tests/system-test/2-query/csum.py | 4 +-- tests/system-test/2-query/max_partition.py | 35 +++++++++++++++++++++- 2 files changed, 36 insertions(+), 3 deletions(-) diff --git a/tests/system-test/2-query/csum.py b/tests/system-test/2-query/csum.py index 425597a919..91869cb012 100644 --- a/tests/system-test/2-query/csum.py +++ b/tests/system-test/2-query/csum.py @@ -435,8 +435,8 @@ class TDTestCase: tdSql.checkRows(40) # # bug need fix - # tdSql.query("select csum(st1+c1) from stb1 partition by tbname slimit 1 ") - # tdSql.checkRows(4) + tdSql.query("select csum(st1+c1) from stb1 partition by tbname slimit 1 ") + tdSql.checkRows(4) # tdSql.error("select csum(st1+c1) from stb1 partition by tbname limit 1 ") diff --git a/tests/system-test/2-query/max_partition.py b/tests/system-test/2-query/max_partition.py index 5fa78c08e4..0a7214ec75 100644 --- a/tests/system-test/2-query/max_partition.py +++ b/tests/system-test/2-query/max_partition.py @@ -162,12 +162,45 @@ class TDTestCase: tdSql.query("select tbname , max(c1) from stb partition by tbname interval(10s)") tdSql.checkRows(self.row_nums*2) - tdSql.query("select unique(c1) from stb1 partition by tbname order by tbname") + tdSql.query("select unique(c1) from stb partition by tbname order by tbname") tdSql.query("select tbname , count(c1) from sub_stb_1 partition by tbname interval(10s)") tdSql.checkData(0,0,'sub_stb_1') tdSql.checkData(0,1,self.row_nums) + tdSql.query("select c1 , mavg(c1 ,2 ) from stb partition by c1") + tdSql.checkRows(72) + + tdSql.query("select c1 , diff(c1 , 0) from stb partition by c1") + tdSql.checkRows(72) + + tdSql.query("select c1 , csum(c1) from stb partition by c1") + tdSql.checkRows(80) + + tdSql.query("select c1 , sample(c1,2) from stb partition by c1 order by c1") + tdSql.checkRows(21) + # bug need fix + # tdSql.checkData(0,1,None) + + tdSql.query("select c1 , twa(c1) from stb partition by c1 order by c1") + tdSql.checkRows(11) + tdSql.checkData(0,1,0.000000000) + + tdSql.query("select c1 , irate(c1) from stb partition by c1 order by c1") + tdSql.checkRows(11) + tdSql.checkData(0,1,None) + + tdSql.query("select c1 , DERIVATIVE(c1,2,1) from stb partition by c1 order by c1") + tdSql.checkRows(72) + # bug need fix + # tdSql.checkData(0,1,None) + + + + + + + # bug need fix # tdSql.query(" select tbname , max(c1) from stb partition by tbname order by tbname slimit 5 soffset 0 ") # tdSql.checkRows(5) From cbfc1688f40794e1c8931f257acdeb229c2726cb Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Mon, 18 Jul 2022 16:38:57 +0800 Subject: [PATCH 20/46] update case --- tests/system-test/2-query/abs.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/system-test/2-query/abs.py b/tests/system-test/2-query/abs.py index f0f559c6aa..f924cb7c3d 100644 --- a/tests/system-test/2-query/abs.py +++ b/tests/system-test/2-query/abs.py @@ -555,8 +555,8 @@ class TDTestCase: tdSql.checkRows(1) tdSql.checkData(0,0,0) - tdSql.query("select last_row(c1) from (select ts , c1 ,t1 from stb1)") - tdSql.checkRows(20) + tdSql.query("select abs(c1) from (select ts , c1 ,t1 from stb1)") + tdSql.checkRows(25) tdSql.query( "select abs(c1+t1)*t1 from stb1 where abs(c1)/floor(abs(ceil(t1))) ==1") From 7542fa0b62f1d047082c07211c79d3bc8b8e8a03 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Mon, 18 Jul 2022 16:42:54 +0800 Subject: [PATCH 21/46] fix(tmq): topic name should be lower unless quoted --- source/client/src/tmq.c | 6 +++++- source/dnode/vnode/src/tq/tqPush.c | 2 +- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/source/client/src/tmq.c b/source/client/src/tmq.c index 5b845fd455..e73888d9ba 100644 --- a/source/client/src/tmq.c +++ b/source/client/src/tmq.c @@ -363,7 +363,11 @@ tmq_list_t* tmq_list_new() { int32_t tmq_list_append(tmq_list_t* list, const char* src) { SArray* container = &list->container; - char* topic = strdup(src); + if (src == NULL || src[0] == 0) return -1; + char* topic = strdup(src); + if (topic[0] != '`') { + strtolower(topic, src); + } if (taosArrayPush(container, &topic) == NULL) return -1; return 0; } diff --git a/source/dnode/vnode/src/tq/tqPush.c b/source/dnode/vnode/src/tq/tqPush.c index 4b2160434f..c929c84203 100644 --- a/source/dnode/vnode/src/tq/tqPush.c +++ b/source/dnode/vnode/src/tq/tqPush.c @@ -243,7 +243,7 @@ int tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver) void* data = taosMemoryMalloc(msgLen); if (data == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; - qError("failed to copy data for stream since out of memory"); + tqError("failed to copy data for stream since out of memory"); return -1; } memcpy(data, msg, msgLen); From 7187c87d35284db7943bb81ddccc1120fcf75e30 Mon Sep 17 00:00:00 2001 From: Minghao Li Date: Mon, 18 Jul 2022 16:55:24 +0800 Subject: [PATCH 22/46] refactor(sync): add restart heartbeat timer --- source/libs/sync/inc/syncInt.h | 1 + source/libs/sync/src/syncMain.c | 6 ++++++ 2 files changed, 7 insertions(+) diff --git a/source/libs/sync/inc/syncInt.h b/source/libs/sync/inc/syncInt.h index 0cbd7d36b2..3a30cf801e 100644 --- a/source/libs/sync/inc/syncInt.h +++ b/source/libs/sync/inc/syncInt.h @@ -192,6 +192,7 @@ int32_t syncNodeRestartElectTimer(SSyncNode* pSyncNode, int32_t ms); int32_t syncNodeResetElectTimer(SSyncNode* pSyncNode); int32_t syncNodeStartHeartbeatTimer(SSyncNode* pSyncNode); int32_t syncNodeStopHeartbeatTimer(SSyncNode* pSyncNode); +int32_t syncNodeRestartHeartbeatTimer(SSyncNode* pSyncNode); // utils -------------- int32_t syncNodeSendMsgById(const SRaftId* destRaftId, SSyncNode* pSyncNode, SRpcMsg* pMsg); diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index bb7454ea6f..ddb2b9355e 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -1298,6 +1298,12 @@ int32_t syncNodeStopHeartbeatTimer(SSyncNode* pSyncNode) { return ret; } +int32_t syncNodeRestartHeartbeatTimer(SSyncNode* pSyncNode) { + syncNodeStopHeartbeatTimer(pSyncNode); + syncNodeStartHeartbeatTimer(pSyncNode); + return 0; +} + // utils -------------- int32_t syncNodeSendMsgById(const SRaftId* destRaftId, SSyncNode* pSyncNode, SRpcMsg* pMsg) { SEpSet epSet; From f57ad234dcc8cd02c403c5bd6d2fd7819efade14 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Mon, 18 Jul 2022 16:59:03 +0800 Subject: [PATCH 23/46] fix(query): fix function first/last do not support constant input TD-17400 --- source/libs/function/src/builtins.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index 489a11da39..87afac70eb 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -1422,14 +1422,6 @@ static int32_t translateIrate(SFunctionNode* pFunc, char* pErrBuf, int32_t len) } static int32_t translateFirstLast(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); - for (int32_t i = 0; i < numOfParams; ++i) { - SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, i); - if (QUERY_NODE_VALUE == nodeType(pParamNode)) { - return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName); - } - } - pFunc->node.resType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType; return TSDB_CODE_SUCCESS; } From ccdda33ff8012cd593f733846b669f0c389757e7 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 18 Jul 2022 17:17:51 +0800 Subject: [PATCH 24/46] fix:modify the interface of tmq meta --- examples/c/tmq.c | 6 +++--- tests/test/c/tmqSim.c | 46 +++++++++++++++++++++---------------------- 2 files changed, 26 insertions(+), 26 deletions(-) diff --git a/examples/c/tmq.c b/examples/c/tmq.c index 38c3eba647..7e9a24ff1f 100644 --- a/examples/c/tmq.c +++ b/examples/c/tmq.c @@ -28,8 +28,9 @@ static void msg_process(TAOS_RES* msg) { printf("db: %s\n", tmq_get_db_name(msg)); printf("vg: %d\n", tmq_get_vgroup_id(msg)); if (tmq_get_res_type(msg) == TMQ_RES_TABLE_META) { - tmq_raw_data* raw = tmq_get_raw_meta(msg); - if (raw) { + tmq_raw_data raw = {0}; + int32_t code = tmq_get_raw_meta(msg, &raw); + if (code == 0) { TAOS* pConn = taos_connect("192.168.1.86", "root", "taosdata", NULL, 0); if (pConn == NULL) { return; @@ -53,7 +54,6 @@ static void msg_process(TAOS_RES* msg) { printf("write raw data: %s\n", tmq_err2str(ret)); taos_close(pConn); } - tmq_free_raw_meta(raw); char* result = tmq_get_json_meta(msg); if (result) { printf("meta result: %s\n", result); diff --git a/tests/test/c/tmqSim.c b/tests/test/c/tmqSim.c index 5459e3f159..70b8c1257e 100644 --- a/tests/test/c/tmqSim.c +++ b/tests/test/c/tmqSim.c @@ -45,10 +45,10 @@ typedef enum { NOTIFY_CMD_ID_BUTT, } NOTIFY_CMD_ID; -typedef enum enumQUERY_TYPE { - NO_INSERT_TYPE, - INSERT_TYPE, - QUERY_TYPE_BUT +typedef enum enumQUERY_TYPE { + NO_INSERT_TYPE, + INSERT_TYPE, + QUERY_TYPE_BUT } QUERY_TYPE; typedef struct { @@ -597,9 +597,10 @@ static int32_t meta_msg_process(TAOS_RES* msg, SThreadInfo* pInfo, int32_t msgIn tmq_get_topic_name(msg), vgroupId); { - tmq_raw_data *raw = tmq_get_raw_meta(msg); + tmq_raw_data raw = {0}; + int32_t code = tmq_get_raw_meta(msg, &raw); - if(raw){ + if(code == TSDB_CODE_SUCCESS){ TAOS_RES* pRes = taos_query(pInfo->taos, "use metadb"); if (taos_errno(pRes) != 0) { pError("error when use metadb, reason:%s\n", taos_errstr(pRes)); @@ -609,10 +610,9 @@ static int32_t meta_msg_process(TAOS_RES* msg, SThreadInfo* pInfo, int32_t msgIn exit(-1); } taos_free_result(pRes); - taosFprintfFile(g_fp, "raw:%p\n", raw); + taosFprintfFile(g_fp, "raw:%p\n", &raw); - int32_t ret = taos_write_raw_meta(pInfo->taos, raw); - taosMemoryFree(raw); + taos_write_raw_meta(pInfo->taos, raw); } char* result = tmq_get_json_meta(msg); @@ -1169,23 +1169,23 @@ void* ombConsumeThreadFunc(void* param) { static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type) { - TAOS_RES *res = taos_query(taos, command); - int32_t code = taos_errno(res); - - if (code != 0) { + TAOS_RES *res = taos_query(taos, command); + int32_t code = taos_errno(res); + + if (code != 0) { pPrint("%s Failed to execute <%s>, reason: %s %s", GREEN, command, taos_errstr(res), NC); taos_free_result(res); return -1; - } - - if (INSERT_TYPE == type) { - int affectedRows = taos_affected_rows(res); - taos_free_result(res); - return affectedRows; - } - - taos_free_result(res); - return 0; + } + + if (INSERT_TYPE == type) { + int affectedRows = taos_affected_rows(res); + taos_free_result(res); + return affectedRows; + } + + taos_free_result(res); + return 0; } void* ombProduceThreadFunc(void* param) { From 1ab22f9d9eb4eb3d35be6c7aaf2f5015bc96dd5c Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Mon, 18 Jul 2022 17:29:40 +0800 Subject: [PATCH 25/46] fix(query): fix taosd crashed when using first(col+null) TD-17381 --- source/common/src/tdatablock.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index d9ac84ec06..eefac77e52 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -228,7 +228,7 @@ int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, uint32_t numOfRow1, ui uint32_t finalNumOfRows = numOfRow1 + numOfRow2; if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) { // Handle the bitmap - if (finalNumOfRows > *capacity || numOfRow1 == 0) { + if (finalNumOfRows > *capacity || (numOfRow1 == 0 && pColumnInfoData->info.bytes != 0)) { char* p = taosMemoryRealloc(pColumnInfoData->varmeta.offset, sizeof(int32_t) * (numOfRow1 + numOfRow2)); if (p == NULL) { return TSDB_CODE_OUT_OF_MEMORY; @@ -262,7 +262,7 @@ int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, uint32_t numOfRow1, ui memcpy(pColumnInfoData->pData + oldLen, pSource->pData, len); pColumnInfoData->varmeta.length = len + oldLen; } else { - if (finalNumOfRows > *capacity || numOfRow1 == 0) { + if (finalNumOfRows > *capacity || (numOfRow1 == 0 && pColumnInfoData->info.bytes != 0)) { ASSERT(finalNumOfRows * pColumnInfoData->info.bytes); char* tmp = taosMemoryRealloc(pColumnInfoData->pData, finalNumOfRows * pColumnInfoData->info.bytes); if (tmp == NULL) { From 1d3648fc9bc6eeb389ecccca7067cd1da2593f6c Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Mon, 18 Jul 2022 17:31:11 +0800 Subject: [PATCH 26/46] fix(tdb): make txn write when insert subdb into main db --- source/libs/tdb/src/db/tdbBtree.c | 5 +---- source/libs/tdb/src/db/tdbPager.c | 22 ---------------------- 2 files changed, 1 insertion(+), 26 deletions(-) diff --git a/source/libs/tdb/src/db/tdbBtree.c b/source/libs/tdb/src/db/tdbBtree.c index cca451ceb7..6e8fd32641 100644 --- a/source/libs/tdb/src/db/tdbBtree.c +++ b/source/libs/tdb/src/db/tdbBtree.c @@ -103,10 +103,9 @@ int tdbBtreeOpen(int keyLen, int valLen, SPager *pPager, char const *tbname, SPg // if pgno == 0 fetch new btree root leaf page if (pgno == 0) { // fetch page & insert into main db - // allocate a new child page SPage *pPage; TXN txn; - tdbTxnOpen(&txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, 0); + tdbTxnOpen(&txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED); pPager->inTran = 1; @@ -118,8 +117,6 @@ int tdbBtreeOpen(int keyLen, int valLen, SPager *pPager, char const *tbname, SPg return -1; } - // TODO: Need to zero the page - ret = tdbPagerWrite(pPager, pPage); if (ret < 0) { return -1; diff --git a/source/libs/tdb/src/db/tdbPager.c b/source/libs/tdb/src/db/tdbPager.c index e7765ed667..d9a44ba570 100644 --- a/source/libs/tdb/src/db/tdbPager.c +++ b/source/libs/tdb/src/db/tdbPager.c @@ -473,12 +473,6 @@ int tdbPagerRestore(SPager *pPager, SBTree *pBt) { return -1; } - TXN txn; - tdbTxnOpen(&txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED); - SBtreeInitPageArg iArg; - iArg.pBt = pBt; - iArg.flags = 0; - for (int pgIndex = 0; pgIndex < journalSize; ++pgIndex) { // read pgno & the page from journal SPgno pgno; @@ -494,20 +488,6 @@ int tdbPagerRestore(SPager *pPager, SBTree *pBt) { return -1; } - /* - ret = tdbPagerFetchPage(pPager, &pgno, &pPage, tdbBtreeInitPage, &iArg, &txn); - if (ret < 0) { - return -1; - } - - // write the page to db - ret = tdbPagerWritePageToDB(pPager, pPage); - if (ret < 0) { - return -1; - } - - tdbPCacheRelease(pPager->pCache, pPage, &txn); - */ i64 offset = pPager->pageSize * (pgno - 1); if (tdbOsLSeek(pPager->fd, offset, SEEK_SET) < 0) { ASSERT(0); @@ -523,8 +503,6 @@ int tdbPagerRestore(SPager *pPager, SBTree *pBt) { tdbOsFSync(pPager->fd); - tdbTxnClose(&txn); - tdbOsFree(pageBuf); tdbOsClose(jfd); From a10a689176899335557ea2088c8440a9b77dd83e Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Mon, 18 Jul 2022 17:50:22 +0800 Subject: [PATCH 27/46] test: enh tools of crash_gen about generate start time of records --- tests/pytest/crash_gen/crash_gen_main.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/tests/pytest/crash_gen/crash_gen_main.py b/tests/pytest/crash_gen/crash_gen_main.py index 08155f656b..8f0bfdd481 100755 --- a/tests/pytest/crash_gen/crash_gen_main.py +++ b/tests/pytest/crash_gen/crash_gen_main.py @@ -30,6 +30,7 @@ import argparse import sys import os import io +import datetime import signal import traceback import requests @@ -1107,14 +1108,20 @@ class Database: # TODO: fix the error as result of above: "tsdb timestamp is out of range" @classmethod def setupLastTick(cls): - t1 = datetime.datetime(2020, 6, 1) + # start time will be auto generated , start at 10 years ago local time + local_time = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-16] + local_epoch_time = [int(i) for i in local_time.split("-")] + #local_epoch_time will be such as : [2022, 7, 18] + + t1 = datetime.datetime(local_epoch_time[0]-5, local_epoch_time[1], local_epoch_time[2]) t2 = datetime.datetime.now() # maybe a very large number, takes 69 years to exceed Python int range elSec = int(t2.timestamp() - t1.timestamp()) elSec2 = (elSec % (8 * 12 * 30 * 24 * 60 * 60 / 500)) * \ 500 # a number representing seconds within 10 years # print("elSec = {}".format(elSec)) - t3 = datetime.datetime(2012, 1, 1) # default "keep" is 10 years + + t3 = datetime.datetime(local_epoch_time[0]-10, local_epoch_time[1], local_epoch_time[2]) # default "keep" is 10 years t4 = datetime.datetime.fromtimestamp( t3.timestamp() + elSec2) # see explanation above Logging.debug("Setting up TICKS to start from: {}".format(t4)) From d328b271a5d93e49f1d33d2e4e636b05436cd84f Mon Sep 17 00:00:00 2001 From: afwerar <1296468573@qq.com> Date: Mon, 18 Jul 2022 17:52:00 +0800 Subject: [PATCH 28/46] os: tsim read line error --- tests/tsim/src/simParse.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/tsim/src/simParse.c b/tests/tsim/src/simParse.c index 5b6dda4dae..b9f7610be8 100644 --- a/tests/tsim/src/simParse.c +++ b/tests/tsim/src/simParse.c @@ -175,7 +175,7 @@ SScript *simBuildScriptObj(char *fileName) { SScript *simParseScript(char *fileName) { TdFilePtr pFile; int32_t tokenLen, lineNum = 0; - char *buffer = NULL, name[128], *token, *rest; + char buffer[10*1024], name[128], *token, *rest; SCommand *pCmd; SScript *script; @@ -195,7 +195,7 @@ SScript *simParseScript(char *fileName) { simResetParser(); while (!taosEOFFile(pFile)) { - if (taosGetLineFile(pFile, &buffer) == -1) continue; + if (taosGetsFile(pFile, sizeof(buffer) - 1, buffer) == -1) continue; lineNum++; int32_t cmdlen = (int32_t)strlen(buffer); @@ -240,7 +240,6 @@ SScript *simParseScript(char *fileName) { return NULL; } } - if(buffer != NULL) taosMemoryFree(buffer); taosCloseFile(&pFile); script = simBuildScriptObj(fileName); From 993f5fe9ecad278fe35b09f38e331557eb1a8275 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Mon, 18 Jul 2022 18:37:58 +0800 Subject: [PATCH 29/46] fix: let certain type of write messages to be executed sequentially --- source/dnode/mgmt/mgmt_vnode/src/vmWorker.c | 10 +++++ source/dnode/vnode/src/vnd/vnodeSvr.c | 3 +- source/dnode/vnode/src/vnd/vnodeSync.c | 49 ++++++++------------- tests/script/tsim/db/alter_replica_31.sim | 9 ++++ 4 files changed, 39 insertions(+), 32 deletions(-) diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c index 93df7f8ab2..5ad13e383a 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c @@ -179,6 +179,16 @@ static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtyp } else { dGTrace("vgId:%d, msg:%p put into vnode-write queue", pVnode->vgId, pMsg); taosWriteQitem(pVnode->pWriteQ, pMsg); +#if 0 // tests for batch writes + if (pMsg->msgType == TDMT_VND_CREATE_TABLE) { + SRpcMsg *pDup = taosAllocateQitem(sizeof(SRpcMsg), RPC_QITEM); + memcpy(pDup, pMsg, sizeof(SRpcMsg)); + pDup->pCont = rpcMallocCont(pMsg->contLen); + memcpy(pDup->pCont, pMsg->pCont, pMsg->contLen); + pDup->info.handle = NULL; + taosWriteQitem(pVnode->pWriteQ, pDup); + } +#endif } break; case SYNC_QUEUE: diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index e5322f1bd3..5f730bcfa5 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -53,6 +53,7 @@ int32_t vnodePreProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg) { *(int64_t *)(dc.data + dc.pos) = uid; *(int64_t *)(dc.data + dc.pos + 8) = ctime; + vTrace("vgId:%d, table:%s uid:%" PRId64 " is generated", pVnode->config.vgId, name, uid); tEndDecode(&dc); } @@ -381,7 +382,7 @@ static int32_t vnodeProcessDropTtlTbReq(SVnode *pVnode, int64_t version, void *p goto end; } - vInfo("vgId:%d, drop ttl table req will be processed, time:%d", pVnode->config.vgId, ttlReq.timestamp); + vDebug("vgId:%d, drop ttl table req will be processed, time:%d", pVnode->config.vgId, ttlReq.timestamp); int32_t ret = metaTtlDropTable(pVnode->pMeta, ttlReq.timestamp, tbUids); if (ret != 0) { goto end; diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c index 712cee9fd0..dbe4458681 100644 --- a/source/dnode/vnode/src/vnd/vnodeSync.c +++ b/source/dnode/vnode/src/vnd/vnodeSync.c @@ -17,35 +17,22 @@ #include "vnd.h" static inline bool vnodeIsMsgBlock(tmsg_t type) { - return (type == TDMT_VND_CREATE_TABLE) || (type == TDMT_VND_ALTER_CONFIRM) || (type == TDMT_VND_ALTER_REPLICA); + return (type == TDMT_VND_CREATE_TABLE) || (type == TDMT_VND_CREATE_TABLE) || (type == TDMT_VND_CREATE_TABLE) || + (type == TDMT_VND_ALTER_TABLE) || (type == TDMT_VND_DROP_TABLE) || (type == TDMT_VND_UPDATE_TAG_VAL); } static inline bool vnodeIsMsgWeak(tmsg_t type) { return false; } -static inline void vnodeAccumBlockMsg(SVnode *pVnode, tmsg_t type) { - if (!vnodeIsMsgBlock(type)) return; - - int32_t count = atomic_add_fetch_32(&pVnode->blockCount, 1); - vTrace("vgId:%d, accum block, count:%d type:%s", pVnode->config.vgId, count, TMSG_INFO(type)); +static inline void vnodeWaitBlockMsg(SVnode *pVnode, const SRpcMsg *pMsg) { + if (vnodeIsMsgBlock(pMsg->msgType)) { + vTrace("vgId:%d, msg:%p wait block, type:%s", pVnode->config.vgId, pMsg, TMSG_INFO(pMsg->msgType)); + tsem_wait(&pVnode->syncSem); + } } -static inline void vnodeWaitBlockMsg(SVnode *pVnode) { - int32_t count = atomic_load_32(&pVnode->blockCount); - if (count <= 0) return; - - vTrace("vgId:%d, wait block finish, count:%d", pVnode->config.vgId, count); - tsem_wait(&pVnode->syncSem); -} - -static inline void vnodePostBlockMsg(SVnode *pVnode, tmsg_t type) { - if (!vnodeIsMsgBlock(type)) return; - - int32_t count = atomic_load_32(&pVnode->blockCount); - if (count <= 0) return; - - count = atomic_sub_fetch_32(&pVnode->blockCount, 1); - vTrace("vgId:%d, post block, count:%d type:%s", pVnode->config.vgId, count, TMSG_INFO(type)); - if (count <= 0) { +static inline void vnodePostBlockMsg(SVnode *pVnode, const SRpcMsg *pMsg) { + if (vnodeIsMsgBlock(pMsg->msgType)) { + vTrace("vgId:%d, msg:%p post block, type:%s", pVnode->config.vgId, pMsg, TMSG_INFO(pMsg->msgType)); tsem_post(&pVnode->syncSem); } } @@ -143,6 +130,8 @@ void vnodeProposeWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) int32_t code = 0; SRpcMsg *pMsg = NULL; + vTrace("vgId:%d, get %d msgs from vnode-write queue", vgId, numOfMsgs); + for (int32_t m = 0; m < numOfMsgs; m++) { if (taosGetQitem(qall, (void **)&pMsg) == 0) continue; const STraceId *trace = &pMsg->info.traceId; @@ -165,13 +154,14 @@ void vnodeProposeWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) if (rsp.info.handle != NULL) { tmsgSendRsp(&rsp); } + } else if (code == 0) { + vnodeWaitBlockMsg(pVnode, pMsg); + } else { } } } - if (code == 0) { - vnodeAccumBlockMsg(pVnode, pMsg->msgType); - } else if (code < 0) { + if (code < 0) { if (terrno == TSDB_CODE_SYN_NOT_LEADER) { vnodeRedirectRpcMsg(pVnode, pMsg); } else { @@ -182,15 +172,12 @@ void vnodeProposeWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) tmsgSendRsp(&rsp); } } - } else { } vGTrace("vgId:%d, msg:%p is freed, code:0x%x", vgId, pMsg, code); rpcFreeCont(pMsg->pCont); taosFreeQitem(pMsg); } - - vnodeWaitBlockMsg(pVnode); } void vnodeApplyWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) { @@ -213,7 +200,7 @@ void vnodeApplyWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) { } } - vnodePostBlockMsg(pVnode, pMsg->msgType); + vnodePostBlockMsg(pVnode, pMsg); if (rsp.info.handle != NULL) { tmsgSendRsp(&rsp); } @@ -418,7 +405,7 @@ static void vnodeSyncReconfig(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SReCon tmsgSendRsp(&rpcMsg); } - vnodePostBlockMsg(pVnode, TDMT_VND_ALTER_REPLICA); + vnodePostBlockMsg(pVnode, pMsg); } static void vnodeSyncCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) { diff --git a/tests/script/tsim/db/alter_replica_31.sim b/tests/script/tsim/db/alter_replica_31.sim index e9a295820c..1823f182c9 100644 --- a/tests/script/tsim/db/alter_replica_31.sim +++ b/tests/script/tsim/db/alter_replica_31.sim @@ -111,6 +111,15 @@ if $hasleader != 1 then goto step2 endi +# sql use db; +# sql create table stb (ts timestamp, c int) tags (t int); +# sql create table t0 using stb tags (0); +# sql insert into t0 values(now, 1); +# sql show db.stables; +# sql show db.tables; +# sql show db.vgroups; +return + sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 binary(16)) comment "abd" sql create table db.ctb using db.stb tags(101, "102") sql insert into db.ctb values(now, 1, "2") From d8d32ebc51ae5ff7cef52379620bd4a528223ab1 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Mon, 18 Jul 2022 19:15:23 +0800 Subject: [PATCH 30/46] fix(tmq): set version after consuming from tsdb --- source/dnode/vnode/src/tq/tqExec.c | 4 ++-- tests/system-test/99-TDcase/TD-17255.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/source/dnode/vnode/src/tq/tqExec.c b/source/dnode/vnode/src/tq/tqExec.c index 618b32239a..f18b25bef4 100644 --- a/source/dnode/vnode/src/tq/tqExec.c +++ b/source/dnode/vnode/src/tq/tqExec.c @@ -68,7 +68,7 @@ int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVa pRsp->rspOffset = *pOffset; return 0; } else { - tqOffsetResetToLog(pOffset, pHandle->snapshotVer + 1); + tqOffsetResetToLog(pOffset, pHandle->snapshotVer); if (qStreamPrepareScan(task, pOffset) < 0) { pRsp->rspOffset = *pOffset; return 0; @@ -106,7 +106,7 @@ int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVa } if (pRsp->blockNum == 0 && pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) { - tqOffsetResetToLog(pOffset, pHandle->snapshotVer + 1); + tqOffsetResetToLog(pOffset, pHandle->snapshotVer); qStreamPrepareScan(task, pOffset); continue; } diff --git a/tests/system-test/99-TDcase/TD-17255.py b/tests/system-test/99-TDcase/TD-17255.py index 9eb8d531f7..28a1a1fd4a 100644 --- a/tests/system-test/99-TDcase/TD-17255.py +++ b/tests/system-test/99-TDcase/TD-17255.py @@ -320,7 +320,7 @@ class TDTestCase: tdSql.prepare() self.tmqCase1() - # self.tmqCase2() + self.tmqCase2() self.tmqCase3() def stop(self): From 156e72b07118436b7e49e59d501ee87d92135979 Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Mon, 18 Jul 2022 19:34:53 +0800 Subject: [PATCH 31/46] enh: rsma drop and code optimization --- source/dnode/vnode/src/inc/sma.h | 27 +++-- source/dnode/vnode/src/meta/metaTable.c | 2 +- source/dnode/vnode/src/sma/smaCommit.c | 4 +- source/dnode/vnode/src/sma/smaEnv.c | 20 ++++ source/dnode/vnode/src/sma/smaRollup.c | 126 ++++++++++++++++++------ 5 files changed, 136 insertions(+), 43 deletions(-) diff --git a/source/dnode/vnode/src/inc/sma.h b/source/dnode/vnode/src/inc/sma.h index 1c8434594d..4673b01d27 100644 --- a/source/dnode/vnode/src/inc/sma.h +++ b/source/dnode/vnode/src/inc/sma.h @@ -43,9 +43,9 @@ typedef struct SRSmaInfo SRSmaInfo; typedef struct SRSmaInfoItem SRSmaInfoItem; struct SSmaEnv { - SRWLatch lock; - int8_t type; - SSmaStat *pStat; + SRWLatch lock; + int8_t type; + SSmaStat *pStat; }; typedef struct { @@ -103,10 +103,15 @@ struct SRSmaInfoItem { }; struct SRSmaInfo { - STSchema *pTSchema; - int64_t suid; + STSchema *pTSchema; + int64_t suid; + int8_t delFlag; + T_REF_DECLARE() SRSmaInfoItem items[TSDB_RETENTION_L2]; }; +#define RSMA_INFO_HEAD_LEN 24 +#define RSMA_INFO_IS_DEL(r) ((r)->delFlag == 1) +#define RSMA_INFO_SET_DEL(r) ((r)->delFlag = 1) enum { TASK_TRIGGER_STAT_INIT = 0, @@ -120,8 +125,8 @@ enum { enum { RSMA_ROLE_CREATE = 0, RSMA_ROLE_DROP = 1, - RSMA_ROLE_FETCH = 2, - RSMA_ROLE_SUBMIT = 3, + RSMA_ROLE_SUBMIT = 2, + RSMA_ROLE_FETCH = 3, RSMA_ROLE_ITERATE = 4, }; @@ -134,6 +139,8 @@ int32_t tdInsertRSmaData(SSma *pSma, char *msg); int32_t tdRefSmaStat(SSma *pSma, SSmaStat *pStat); int32_t tdUnRefSmaStat(SSma *pSma, SSmaStat *pStat); +int32_t tdRefRSmaInfo(SSma *pSma, SRSmaInfo *pRSmaInfo); +int32_t tdUnRefRSmaInfo(SSma *pSma, SRSmaInfo *pRSmaInfo); void *tdAcquireSmaRef(int32_t rsetId, int64_t refId, const char *tags, int32_t ln); int32_t tdReleaseSmaRef(int32_t rsetId, int64_t refId, const char *tags, int32_t ln); @@ -193,6 +200,7 @@ void tdFreeQTaskInfo(qTaskInfo_t *taskHandle, int32_t vgId, int32_t le static int32_t tdDestroySmaState(SSmaStat *pSmaStat, int8_t smaType); void *tdFreeSmaState(SSmaStat *pSmaStat, int8_t smaType); void *tdFreeRSmaInfo(SSma *pSma, SRSmaInfo *pInfo, bool isDeepFree); +void tdRemoveRSmaInfoBySuid(SSma *pSma, int64_t suid); int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat, SHashObj *pInfoHash); int32_t tdProcessRSmaCreateImpl(SSma *pSma, SRSmaParam *param, int64_t suid, const char *tbName); @@ -258,8 +266,9 @@ void tdUpdateTFileMagic(STFile *pTFile, void *pCksm); void tdCloseTFile(STFile *pTFile); void tdDestroyTFile(STFile *pTFile); -void tdGetVndFileName(int32_t vgId, const char *pdname, const char *dname, const char *fname, int64_t version, char *outputName); -void tdGetVndDirName(int32_t vgId,const char *pdname, const char *dname, bool endWithSep, char *outputName); +void tdGetVndFileName(int32_t vgId, const char *pdname, const char *dname, const char *fname, int64_t version, + char *outputName); +void tdGetVndDirName(int32_t vgId, const char *pdname, const char *dname, bool endWithSep, char *outputName); #ifdef __cplusplus } diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c index 29c10c40c2..71345df747 100644 --- a/source/dnode/vnode/src/meta/metaTable.c +++ b/source/dnode/vnode/src/meta/metaTable.c @@ -277,7 +277,7 @@ _drop_super_table: _exit: tdbFree(pKey); tdbFree(pData); - metaDebug("vgId:%d, super table %s uid:%" PRId64 " is dropped", TD_VID(pMeta->pVnode), pReq->name, pReq->suid); + metaDebug("vgId:%d, super table %s uid:%" PRId64 " is dropped", TD_VID(pMeta->pVnode), pReq->name, pReq->suid); return 0; } diff --git a/source/dnode/vnode/src/sma/smaCommit.c b/source/dnode/vnode/src/sma/smaCommit.c index cdaaf2bbdb..2174a479e7 100644 --- a/source/dnode/vnode/src/sma/smaCommit.c +++ b/source/dnode/vnode/src/sma/smaCommit.c @@ -191,7 +191,7 @@ static int32_t tdCleanupQTaskInfoFiles(SSma *pSma, SRSmaStat *pRSmaStat) { if ((pDir = taosOpenDir(dir)) == NULL) { regfree(®ex); terrno = TAOS_SYSTEM_ERROR(errno); - smaWarn("vgId:%d, rsma post commit, open dir %s failed since %s", TD_VID(pVnode), dir, terrstr()); + smaDebug("vgId:%d, rsma post commit, open dir %s failed since %s", TD_VID(pVnode), dir, terrstr()); return TSDB_CODE_FAILED; } @@ -392,5 +392,7 @@ static int32_t tdProcessRSmaAsyncPostCommitImpl(SSma *pSma) { // step 2: cleanup outdated qtaskinfo files tdCleanupQTaskInfoFiles(pSma, pRSmaStat); + atomic_store_8(RSMA_COMMIT_STAT(pRSmaStat), 0); + return TSDB_CODE_SUCCESS; } diff --git a/source/dnode/vnode/src/sma/smaEnv.c b/source/dnode/vnode/src/sma/smaEnv.c index 577d5fd4fa..23706d54e0 100644 --- a/source/dnode/vnode/src/sma/smaEnv.c +++ b/source/dnode/vnode/src/sma/smaEnv.c @@ -169,6 +169,26 @@ int32_t tdUnRefSmaStat(SSma *pSma, SSmaStat *pStat) { return 0; } +int32_t tdRefRSmaInfo(SSma *pSma, SRSmaInfo *pRSmaInfo) { + if (!pRSmaInfo) return 0; + + int ref = T_REF_INC(pRSmaInfo); + smaDebug("vgId:%d, ref rsma info:%p, val:%d", SMA_VID(pSma), pRSmaInfo, ref); + return 0; +} + +int32_t tdUnRefRSmaInfo(SSma *pSma, SRSmaInfo *pRSmaInfo) { + if (!pRSmaInfo) return 0; + + int ref = T_REF_DEC(pRSmaInfo); + smaDebug("vgId:%d, unref rsma info:%p, val:%d", SMA_VID(pSma), pRSmaInfo, ref); + + if (ref == 0) { + tdRemoveRSmaInfoBySuid(pSma, pRSmaInfo->suid); + } + return 0; +} + static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType, const SSma *pSma) { ASSERT(pSmaStat != NULL); diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c index 32d6dee57f..1f7664c22a 100644 --- a/source/dnode/vnode/src/sma/smaRollup.c +++ b/source/dnode/vnode/src/sma/smaRollup.c @@ -24,6 +24,7 @@ SSmaMgmt smaMgmt = { }; #define TD_QTASKINFO_FNAME_PREFIX "qtaskinfo.ver" +#define TD_RSMAINFO_DEL_FILE "rsmainfo.del" typedef struct SRSmaQTaskInfoItem SRSmaQTaskInfoItem; typedef struct SRSmaQTaskInfoIter SRSmaQTaskInfoIter; @@ -48,14 +49,11 @@ static int32_t tdRSmaRestoreQTaskInfoInit(SSma *pSma, int64_t *nTables); static int32_t tdRSmaRestoreQTaskInfoReload(SSma *pSma, int64_t *committed); static int32_t tdRSmaRestoreTSDataReload(SSma *pSma, int64_t committed); - - static SRSmaInfo *tdGetRSmaInfoByItem(SRSmaInfoItem *pItem) { // adapt accordingly if definition of SRSmaInfo update SRSmaInfo *pResult = NULL; - int32_t rsmaInfoHeadLen = sizeof(int64_t) + sizeof(STSchema *); ASSERT(pItem->level == TSDB_RETENTION_L1 || pItem->level == TSDB_RETENTION_L2); - pResult = (SRSmaInfo *)POINTER_SHIFT(pItem, -(sizeof(SRSmaInfoItem) * (pItem->level - 1) + rsmaInfoHeadLen)); + pResult = (SRSmaInfo *)POINTER_SHIFT(pItem, -(sizeof(SRSmaInfoItem) * (pItem->level - 1) + RSMA_INFO_HEAD_LEN)); ASSERT(pResult->pTSchema->numOfCols > 1); return pResult; } @@ -116,8 +114,8 @@ void *tdFreeRSmaInfo(SSma *pSma, SRSmaInfo *pInfo, bool isDeepFree) { SRSmaInfoItem *pItem = &pInfo->items[i]; if (pItem->taskInfo) { if (isDeepFree && pItem->tmrId) { - smaDebug("vgId:%d, table %" PRIi64 " stop fetch timer %p level %d", SMA_VID(pSma), pInfo->suid, pItem->tmrId, - i + 1); + smaDebug("vgId:%d, stop fetch timer %p for table %" PRIi64 " level %d", SMA_VID(pSma), pInfo->suid, + pItem->tmrId, i + 1); taosTmrStopA(&pItem->tmrId); } tdFreeQTaskInfo(&pItem->taskInfo, SMA_VID(pSma), i + 1); @@ -337,6 +335,7 @@ int32_t tdProcessRSmaCreateImpl(SSma *pSma, SRSmaParam *param, int64_t suid, con } pRSmaInfo->pTSchema = pTSchema; pRSmaInfo->suid = suid; + T_REF_INIT_VAL(pRSmaInfo, 1); if (tdSetRSmaInfoItemParams(pSma, param, pStat, pRSmaInfo, 0) < 0) { goto _err; @@ -392,11 +391,33 @@ int32_t tdProcessRSmaCreate(SSma *pSma, SVCreateStbReq *pReq) { int32_t tdProcessRSmaDrop(SSma *pSma, SVDropStbReq *pReq) { SVnode *pVnode = pSma->pVnode; if (!VND_IS_RSMA(pVnode)) { - smaTrace("vgId:%d, not create rsma for stable %s %" PRIi64 " since vnd is not rsma", TD_VID(pVnode), pReq->name, + smaTrace("vgId:%d, not drop rsma for stable %s %" PRIi64 " since vnd is not rsma", TD_VID(pVnode), pReq->name, pReq->suid); return TSDB_CODE_SUCCESS; } + SSmaEnv *pSmaEnv = SMA_RSMA_ENV(pSma); + if (!pSmaEnv) { + return TSDB_CODE_SUCCESS; + } + + SSmaStat *pStat = SMA_ENV_STAT(pSmaEnv); + SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat); + + SRSmaInfo *pRSmaInfo = tdGetRSmaInfoBySuid(pSma, pReq->suid); + + if (!pRSmaInfo) { + smaWarn("vgId:%d, drop rsma for stable %s %" PRIi64 " failed no rsma in hash", TD_VID(pVnode), pReq->name, + pReq->suid); + return TSDB_CODE_SUCCESS; + } + + // set del flag for data in mem + RSMA_INFO_SET_DEL(pRSmaInfo); + tdUnRefRSmaInfo(pSma, pRSmaInfo); + + // save to file + smaDebug("vgId:%d, drop rsma for table %" PRIi64 " succeed", TD_VID(pVnode), pReq->suid); return TSDB_CODE_SUCCESS; } @@ -650,10 +671,10 @@ static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t inputType /** * @brief During async commit, the SRSmaInfo object would be COW from iRSmaInfoHash and write lock should be applied. - * - * @param pSma - * @param suid - * @return SRSmaInfo* + * + * @param pSma + * @param suid + * @return SRSmaInfo* */ static SRSmaInfo *tdGetRSmaInfoBySuid(SSma *pSma, int64_t suid) { SSmaEnv *pEnv = SMA_RSMA_ENV(pSma); @@ -661,7 +682,6 @@ static SRSmaInfo *tdGetRSmaInfoBySuid(SSma *pSma, int64_t suid) { SRSmaInfo *pRSmaInfo = NULL; if (!pEnv) { - // only applicable when rsma env exists return NULL; } @@ -683,18 +703,21 @@ static SRSmaInfo *tdGetRSmaInfoBySuid(SSma *pSma, int64_t suid) { SRSmaInfo *pCowRSmaInfo = NULL; // lock taosWLockLatch(SMA_ENV_LOCK(pEnv)); - void *iRSmaInfo = taosHashGet(RSMA_IMU_INFO_HASH(pStat), &suid, sizeof(tb_uid_t)); - if (iRSmaInfo) { - SRSmaInfo *pIRSmaInfo = *(SRSmaInfo **)iRSmaInfo; - if (pIRSmaInfo) { - if (tdCloneRSmaInfo(pSma, pCowRSmaInfo, pIRSmaInfo) < 0) { - taosWUnLockLatch(SMA_ENV_LOCK(pEnv)); - smaError("vgId:%d, clone rsma info failed for suid:%" PRIu64 " since %s", SMA_VID(pSma), suid, terrstr()); - return NULL; - } - if (taosHashPut(RSMA_INFO_HASH(pStat), &suid, sizeof(tb_uid_t), &pCowRSmaInfo, sizeof(pCowRSmaInfo)) < 0) { - taosWUnLockLatch(SMA_ENV_LOCK(pEnv)); - return NULL; + if (!taosHashGet(RSMA_INFO_HASH(pStat), &suid, sizeof(tb_uid_t))) { // 2-phase lock + void *iRSmaInfo = taosHashGet(RSMA_IMU_INFO_HASH(pStat), &suid, sizeof(tb_uid_t)); + if (iRSmaInfo) { + SRSmaInfo *pIRSmaInfo = *(SRSmaInfo **)iRSmaInfo; + if (pIRSmaInfo) { + if (tdCloneRSmaInfo(pSma, pCowRSmaInfo, pIRSmaInfo) < 0) { + taosWUnLockLatch(SMA_ENV_LOCK(pEnv)); + smaError("vgId:%d, clone rsma info failed for suid:%" PRIu64 " since %s", SMA_VID(pSma), suid, terrstr()); + return NULL; + } + smaDebug("vgId:%d, clone rsma info succeed for suid:%" PRIu64, SMA_VID(pSma), suid); + if (taosHashPut(RSMA_INFO_HASH(pStat), &suid, sizeof(tb_uid_t), &pCowRSmaInfo, sizeof(pCowRSmaInfo)) < 0) { + taosWUnLockLatch(SMA_ENV_LOCK(pEnv)); + return NULL; + } } } } @@ -703,21 +726,56 @@ static SRSmaInfo *tdGetRSmaInfoBySuid(SSma *pSma, int64_t suid) { return pCowRSmaInfo; } +/** + * @brief During the drop procedure, only need to delete the object in rsmaInfoHash. + * + * @param pSma + * @param suid + * @return SRSmaInfo* + */ +void tdRemoveRSmaInfoBySuid(SSma *pSma, int64_t suid) { + SSmaEnv *pEnv = SMA_RSMA_ENV(pSma); + SRSmaStat *pStat = NULL; + SRSmaInfo *pRSmaInfo = NULL; + + if (!pEnv) { + return; + } + + pStat = (SRSmaStat *)SMA_ENV_STAT(pEnv); + if (!pStat || !RSMA_INFO_HASH(pStat)) { + return; + } + + pRSmaInfo = taosHashGet(RSMA_INFO_HASH(pStat), &suid, sizeof(tb_uid_t)); + if (pRSmaInfo) { + if (pRSmaInfo = *(SRSmaInfo **)pRSmaInfo) { + tdFreeRSmaInfo(pSma, pRSmaInfo, true); + } + taosHashRemove(RSMA_INFO_HASH(pStat), &suid, sizeof(tb_uid_t)); + smaDebug("vgId:%d, remove from infoHash for table:%" PRIu64 " succeed", SMA_VID(pSma), suid); + } +} + static int32_t tdExecuteRSma(SSma *pSma, const void *pMsg, int32_t inputType, tb_uid_t suid) { SRSmaInfo *pRSmaInfo = tdGetRSmaInfoBySuid(pSma, suid); if (!pRSmaInfo) { - smaDebug("vgId:%d, return as no rsma info for suid:%" PRIu64, SMA_VID(pSma), suid); + smaDebug("vgId:%d, execute rsma, no rsma info for suid:%" PRIu64, SMA_VID(pSma), suid); return TSDB_CODE_SUCCESS; } if (!pRSmaInfo->items[0].taskInfo) { - smaDebug("vgId:%d, return as no rsma qTaskInfo for suid:%" PRIu64, SMA_VID(pSma), suid); + smaDebug("vgId:%d, execute rsma, no rsma qTaskInfo for suid:%" PRIu64, SMA_VID(pSma), suid); return TSDB_CODE_SUCCESS; } if (inputType == STREAM_INPUT__DATA_SUBMIT) { + tdRefRSmaInfo(pSma, pRSmaInfo); + tdExecuteRSmaImpl(pSma, pMsg, inputType, &pRSmaInfo->items[0], pRSmaInfo->pTSchema, suid, TSDB_RETENTION_L1); tdExecuteRSmaImpl(pSma, pMsg, inputType, &pRSmaInfo->items[1], pRSmaInfo->pTSchema, suid, TSDB_RETENTION_L2); + + tdUnRefRSmaInfo(pSma, pRSmaInfo); } return TSDB_CODE_SUCCESS; @@ -931,10 +989,10 @@ _err: /** * @brief Restore from SRSmaQTaskInfoItem - * - * @param pSma - * @param pItem - * @return int32_t + * + * @param pSma + * @param pItem + * @return int32_t */ static int32_t tdRSmaQTaskInfoItemRestore(SSma *pSma, const SRSmaQTaskInfoItem *pItem) { SRSmaInfo *pRSmaInfo = NULL; @@ -1271,6 +1329,9 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) { } SRSmaInfo *pRSmaInfo = tdGetRSmaInfoByItem(pItem); + if (RSMA_INFO_IS_DEL(pRSmaInfo)) { + goto _end; + } int8_t fetchTriggerStat = atomic_val_compare_exchange_8(&pItem->triggerStat, TASK_TRIGGER_STAT_ACTIVE, TASK_TRIGGER_STAT_INACTIVE); @@ -1279,13 +1340,14 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) { smaDebug("vgId:%d, fetch rsma level %" PRIi8 " data for table:%" PRIi64 " since stat is active", SMA_VID(pSma), pItem->level, pRSmaInfo->suid); - tdRefSmaStat(pSma, (SSmaStat *)pStat); + // sync procedure => async process + tdRefRSmaInfo(pSma, pRSmaInfo); SSDataBlock dataBlock = {.info.type = STREAM_GET_ALL}; qSetStreamInput(pItem->taskInfo, &dataBlock, STREAM_INPUT__DATA_BLOCK, false); tdRSmaFetchAndSubmitResult(pItem, pRSmaInfo->pTSchema, pRSmaInfo->suid, pStat, STREAM_INPUT__DATA_BLOCK); - tdUnRefSmaStat(pSma, (SSmaStat *)pStat); + tdUnRefRSmaInfo(pSma, pRSmaInfo); } break; case TASK_TRIGGER_STAT_PAUSED: { smaDebug("vgId:%d, not fetch rsma level %" PRIi8 " data for table:%" PRIi64 " since stat is paused", From 322ec02e39e711cd243f0d2fc27a05fa40d24373 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Mon, 18 Jul 2022 11:45:13 +0000 Subject: [PATCH 32/46] fix: delete causes coredump --- source/dnode/vnode/src/inc/tsdb.h | 1 + source/dnode/vnode/src/tsdb/tsdbFS.c | 2 +- source/dnode/vnode/src/tsdb/tsdbFile.c | 2 ++ source/dnode/vnode/src/tsdb/tsdbReaderWriter.c | 2 +- 4 files changed, 5 insertions(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/inc/tsdb.h b/source/dnode/vnode/src/inc/tsdb.h index 3f667a7465..4e2c762cd3 100644 --- a/source/dnode/vnode/src/inc/tsdb.h +++ b/source/dnode/vnode/src/inc/tsdb.h @@ -186,6 +186,7 @@ int32_t tsdbGetNRowsInTbData(STbData *pTbData); typedef enum { TSDB_HEAD_FILE = 0, TSDB_DATA_FILE, TSDB_LAST_FILE, TSDB_SMA_FILE } EDataFileT; void tsdbDataFileName(STsdb *pTsdb, SDFileSet *pDFileSet, EDataFileT ftype, char fname[]); bool tsdbFileIsSame(SDFileSet *pDFileSet1, SDFileSet *pDFileSet2, EDataFileT ftype); +bool tsdbDelFileIsSame(SDelFile *pDelFile1, SDelFile *pDelFile2); int32_t tsdbUpdateDFileHdr(TdFilePtr pFD, SDFileSet *pSet, EDataFileT ftype); int32_t tsdbDFileRollback(STsdb *pTsdb, SDFileSet *pSet, EDataFileT ftype); int32_t tPutDataFileHdr(uint8_t *p, SDFileSet *pSet, EDataFileT ftype); diff --git a/source/dnode/vnode/src/tsdb/tsdbFS.c b/source/dnode/vnode/src/tsdb/tsdbFS.c index 70fbd90646..3bc79621e1 100644 --- a/source/dnode/vnode/src/tsdb/tsdbFS.c +++ b/source/dnode/vnode/src/tsdb/tsdbFS.c @@ -245,7 +245,7 @@ static int32_t tsdbApplyDelFileChange(STsdbFS *pFS, SDelFile *pFrom, SDelFile *p char fname[TSDB_FILENAME_LEN]; if (pFrom && pTo) { - if (pFrom != pTo) { + if (!tsdbDelFileIsSame(pFrom, pTo)) { tsdbDelFileName(pFS->pTsdb, pFrom, fname); if (taosRemoveFile(fname) < 0) { code = TAOS_SYSTEM_ERROR(errno); diff --git a/source/dnode/vnode/src/tsdb/tsdbFile.c b/source/dnode/vnode/src/tsdb/tsdbFile.c index e7f8cb4789..f15ad072e7 100644 --- a/source/dnode/vnode/src/tsdb/tsdbFile.c +++ b/source/dnode/vnode/src/tsdb/tsdbFile.c @@ -140,6 +140,8 @@ bool tsdbFileIsSame(SDFileSet *pDFileSet1, SDFileSet *pDFileSet2, EDataFileT fty } } +bool tsdbDelFileIsSame(SDelFile *pDelFile1, SDelFile *pDelFile2) { return pDelFile1->commitID == pDelFile2->commitID; } + int32_t tsdbUpdateDFileHdr(TdFilePtr pFD, SDFileSet *pSet, EDataFileT ftype) { int32_t code = 0; int64_t n; diff --git a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c index 0ed2e12542..5e8157864f 100644 --- a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c +++ b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c @@ -246,7 +246,7 @@ int32_t tsdbDelFReaderOpen(SDelFReader **ppReader, SDelFile *pFile, STsdb *pTsdb tsdbDelFileName(pTsdb, pFile, fname); pDelFReader->pReadH = taosOpenFile(fname, TD_FILE_READ); - if (pDelFReader == NULL) { + if (pDelFReader->pReadH == NULL) { code = TAOS_SYSTEM_ERROR(errno); taosMemoryFree(pDelFReader); goto _err; From d4032483c545242d8445c0f16f31fbc1f2d16e96 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Mon, 18 Jul 2022 20:04:52 +0800 Subject: [PATCH 33/46] fix last_row.py --- tests/system-test/2-query/last_row.py | 94 +++++++++++++-------------- 1 file changed, 47 insertions(+), 47 deletions(-) diff --git a/tests/system-test/2-query/last_row.py b/tests/system-test/2-query/last_row.py index b856590787..c79e3b58fb 100644 --- a/tests/system-test/2-query/last_row.py +++ b/tests/system-test/2-query/last_row.py @@ -48,7 +48,7 @@ class TDTestCase: c9 = "'nchar_val'" c10 = ts tdSql.execute(f" insert into {tbname} values ({ts},{c1},{c2},{c3},{c4},{c5},{c6},{c7},{c8},{c9},{c10})") - + tdSql.execute("use test") tbnames = ["stb", "sub_tb_1"] support_types = ["BIGINT", "SMALLINT", "TINYINT", "FLOAT", "DOUBLE", "INT"] @@ -61,7 +61,7 @@ class TDTestCase: origin_sql = "select {} from {} order by tbname".format(colname, 'test.'+tbname) if coltype[1] in support_types: self.check_result_auto(origin_sql , abs_sql) - + def prepare_datas(self ,cache_value): tdSql.execute("drop database if exists db ") create_db_sql = f"create database if not exists db keep 3650 duration 1000 cachemodel {cache_value}" @@ -220,22 +220,22 @@ class TDTestCase: def test_errors(self): tdSql.execute("use testdb") - # bug need fix - tdSql.error("select last_row(c1 ,NULL) from testdb.t1") + # bug need fix + tdSql.query("select last_row(c1 ,NULL) from testdb.t1") error_sql_lists = [ "select last_row from testdb.t1", "select last_row(-+--+c1) from testdb.t1", "select last_row(123--123)==1 from testdb.t1", "select last_row(c1) as 'd1' from testdb.t1", - "select last_row(c1 ,NULL) from testdb.t1", + #"select last_row(c1 ,NULL) from testdb.t1", "select last_row(,) from testdb.t1;", "select last_row(abs(c1) ab from testdb.t1)", "select last_row(c1) as int from testdb.t1", "select last_row from testdb.stb1", "select last_row(123--123)==1 from testdb.stb1", "select last_row(c1) as 'd1' from testdb.stb1", - "select last_row(c1 ,NULL) from testdb.stb1", + #"select last_row(c1 ,NULL) from testdb.stb1", "select last_row(,) from testdb.stb1;", "select last_row(abs(c1) ab from testdb.stb1)", "select last_row(c1) as int from testdb.stb1" @@ -246,7 +246,7 @@ class TDTestCase: def support_types(self): tdSql.execute("use testdb") tbnames = ["stb1", "t1", "ct1", "ct2"] - + for tbname in tbnames: tdSql.query("desc {}".format(tbname)) coltypes = tdSql.queryResult @@ -256,7 +256,7 @@ class TDTestCase: if col_note != "TAG": abs_sql = "select last_row({}) from {}".format(colname, "testdb."+tbname) tdSql.query(abs_sql) - + def basic_abs_function(self): @@ -283,7 +283,7 @@ class TDTestCase: # used for regular table - # bug need fix + # bug need fix tdSql.query("select last_row(c1) from testdb.t1") tdSql.checkData(0, 0, None) tdSql.query("select last_row(c1) from testdb.ct4") @@ -291,21 +291,21 @@ class TDTestCase: tdSql.query("select last_row(c1) from testdb.stb1") tdSql.checkData(0, 0, None) - # # bug need fix + # # bug need fix tdSql.query("select last_row(c1), c2, c3 , c4, c5 from testdb.t1") tdSql.checkData(0, 0, None) tdSql.checkData(0, 1, None) tdSql.checkData(0, 2, None) - # # bug need fix + # # bug need fix tdSql.query("select last_row(c1), c2, c3 , c4, c5 from testdb.ct1") tdSql.checkData(0, 0, 9) tdSql.checkData(0, 1, -99999) tdSql.checkData(0, 2, -999) tdSql.checkData(0, 3, None) tdSql.checkData(0, 4,-9.99000) - - # bug need fix + + # bug need fix tdSql.query("select last_row(c1), c2, c3 , c4, c5 from testdb.stb1 where tbname='ct1'") tdSql.checkData(0, 0, 9) tdSql.checkData(0, 1, -99999) @@ -313,14 +313,14 @@ class TDTestCase: tdSql.checkData(0, 3, None) tdSql.checkData(0, 4,-9.99000) - # bug fix + # bug fix tdSql.query("select last_row(abs(c1)) from testdb.ct1") tdSql.checkData(0,0,9) - # # bug fix + # # bug fix tdSql.query("select last_row(c1+1) from testdb.ct1") - tdSql.query("select last_row(c1+1) from testdb.stb1") - tdSql.query("select last_row(c1+1) from testdb.t1") + tdSql.query("select last_row(c1+1) from testdb.stb1") + tdSql.query("select last_row(c1+1) from testdb.t1") # used for stable table tdSql.query("select last_row(c1 ,c2 ,c3) ,last_row(c4) from testdb.ct1") @@ -329,7 +329,7 @@ class TDTestCase: tdSql.checkData(0,2,-999) tdSql.checkData(0,3,None) - # bug need fix + # bug need fix tdSql.query("select last_row(c1 ,c2 ,c3) from testdb.stb1 ") tdSql.checkData(0,0,None) tdSql.checkData(0,1,None) @@ -338,7 +338,7 @@ class TDTestCase: tdSql.query('select last_row(c1) from testdb.t1 where ts <"2022-12-31 01:01:36.000"') tdSql.checkData(0,0,8) - # bug need fix + # bug need fix tdSql.query("select abs(last_row(c1)-2)+max(c1),ceil(last_row(c4)-2) from testdb.stb1 where c4 is not null") tdSql.checkData(0,0,16.000000000) tdSql.checkData(0,1,-101.000000000) @@ -371,7 +371,7 @@ class TDTestCase: tdSql.query("select last_row(c1) ,count(*) from testdb.stb1 where c1 is null") tdSql.checkData(0,0,None) tdSql.checkData(0,1,3) - + tdSql.query("select last_row(c1) ,count(c1) from testdb.stb1 where c1 is null") tdSql.checkData(0,0,None) tdSql.checkData(0,1,0) @@ -380,7 +380,7 @@ class TDTestCase: tdSql.query("select tbname ,last_row(c1) from testdb.stb1") tdSql.checkData(0,0,'ct4') tdSql.checkData(0,1,None) - + tdSql.query(" select tbname ,last_row(c1) from testdb.stb1 partition by tbname order by tbname ") tdSql.checkData(0,0,'ct1') tdSql.checkData(0,1,9) @@ -396,11 +396,11 @@ class TDTestCase: tdSql.query(" select t1 ,count(c1) from testdb.stb1 partition by t1 ") tdSql.checkRows(2) - # filter by tbname + # filter by tbname tdSql.query("select last_row(c1) from testdb.stb1 where tbname = 'ct1' ") tdSql.checkData(0,0,9) - # bug need fix + # bug need fix tdSql.query("select tbname ,last_row(c1) from testdb.stb1 where tbname = 'ct1' ") tdSql.checkData(0,1,9) tdSql.query("select tbname ,last_row(c1) from testdb.stb1 partition by tbname order by tbname") @@ -428,7 +428,7 @@ class TDTestCase: tdSql.checkData(0,2,333) tdSql.checkData(0,3,3) - # filter by tag + # filter by tag tdSql.query("select tbname ,last_row(c1) from testdb.stb1 where t1 =0 ") tdSql.checkData(0,1,9) tdSql.query("select tbname ,last_row(c1) ,t1 from testdb.stb1 partition by t1 order by t1") @@ -437,7 +437,7 @@ class TDTestCase: tdSql.checkData(1, 0, 'ct4') tdSql.checkData(1, 1, None) - # filter by col + # filter by col tdSql.query("select tbname ,last_row(c1),abs(c1)from testdb.stb1 where c1 =1;") tdSql.checkData(0, 0, 'ct1') @@ -445,7 +445,7 @@ class TDTestCase: tdSql.checkData(0, 2, 1) tdSql.query("select last_row(c1) from testdb.stb1 where abs(ceil(c1))*c1==1") tdSql.checkData(0,0,1) - + # mix with common functions tdSql.query("select last_row(*) ,last(*) from testdb.stb1 ") tdSql.checkRows(1) @@ -457,11 +457,11 @@ class TDTestCase: tdSql.query("select last_row(c1+abs(c1)) from testdb.stb1 partition by tbname order by tbname") tdSql.query("select last(c1), max(c1+abs(c1)),last_row(c1+abs(c1)) from testdb.stb1 partition by tbname order by tbname") - # # bug need fix ,taosd crash + # # bug need fix ,taosd crash tdSql.error("select last_row(*) ,last(*) from testdb.stb1 partition by tbname order by last(*)") tdSql.error("select last_row(*) ,last(*) from testdb.stb1 partition by tbname order by last_row(*)") - # mix with agg functions + # mix with agg functions tdSql.query("select last(*), last_row(*),last(c1), last_row(c1) from testdb.stb1 ") tdSql.query("select last(*), last_row(*),last(c1), last_row(c1) from testdb.ct1 ") tdSql.query("select last(*), last_row(*),last(c1+1)*max(c1), last_row(c1+2)/2 from testdb.t1 ") @@ -564,7 +564,7 @@ class TDTestCase: # bug need fix - tdSql.query(" select sum(c1) from testdb.stb1 where t1+10 >1; ") + tdSql.query(" select sum(c1) from testdb.stb1 where t1+10 >1; ") tdSql.query("select c1 ,t1 from testdb.stb1 where t1 =0 ") tdSql.checkRows(13) tdSql.query("select last_row(c1,t1) from testdb.stb1 ") @@ -627,8 +627,8 @@ class TDTestCase: tdSql.execute(" use testdb ") tdSql.query(" select last_row(c1) from testdb.stb1 group by t1 order by t1 ") tdSql.checkRows(2) - - # bug need fix + + # bug need fix tdSql.query("select last_row(c1) from testdb.stb1 group by c1 order by c1,t1 ") tdSql.checkRows(10) tdSql.checkData(9,0,8) @@ -643,7 +643,7 @@ class TDTestCase: tdSql.checkRows(11) tdSql.checkData(10,0,9) - # bug need fix , result is error + # bug need fix , result is error tdSql.query("select last_row(c1) from testdb.ct4 group by c1 order by t1 ") tdSql.query("select last_row(t1) from testdb.ct4 group by c1 order by t1 ") @@ -661,24 +661,24 @@ class TDTestCase: tdSql.query("select last_row(c1+c3) from testdb.stb1 group by abs(c1+c3) order by abs(c1+c3)") tdSql.checkRows(11) - # bug need fix , taosd crash + # bug need fix , taosd crash tdSql.query("select last_row(c1+c3)+c2 from testdb.stb1 group by abs(c1+c3)+c2 order by abs(c1+c3)+c2") tdSql.checkRows(11) tdSql.query("select last_row(c1+c3)+last_row(c2) from testdb.stb1 group by abs(c1+c3)+abs(c2) order by abs(c1+c3)+abs(c2)") tdSql.checkRows(11) tdSql.checkData(0,0,None) tdSql.checkData(2,0,11223.000000000) - + tdSql.query("select last_row(t1) from testdb.stb1 where abs(c1+t1)=1 partition by tbname") tdSql.checkData(0,0,1) - + tdSql.query("select tbname , last_row(c1) from testdb.stb1 partition by tbname order by tbname") tdSql.checkRows(2) tdSql.checkData(0, 0, 'ct1') tdSql.checkData(0, 1, 9) tdSql.checkData(0, 2, 'ct4') tdSql.checkData(0, 3, None) - + tdSql.query("select tbname , last_row(c1) from testdb.stb1 partition by t1 order by t1") tdSql.checkRows(2) tdSql.checkData(0, 0, 'ct1') @@ -686,7 +686,7 @@ class TDTestCase: tdSql.checkData(0, 2, 'ct4') tdSql.checkData(0, 3, None) - # bug need fix + # bug need fix tdSql.query("select tbname , last_row(c1) from testdb.stb1 partition by c2 order by c1") tdSql.checkRows(11) tdSql.checkData(10,1,9) @@ -700,7 +700,7 @@ class TDTestCase: tdSql.query("select abs(c1) ,c2 ,t1, last_row(t1) from testdb.stb1 partition by c2 order by t1") tdSql.checkRows(11) - + tdSql.query("select t1 ,last_row(t1) ,c2 from testdb.stb1 partition by c2 order by t1") tdSql.checkRows(11) @@ -722,7 +722,7 @@ class TDTestCase: tdSql.query("select last_row(ceil(c1-2)) , abs(floor(t1+1)) ,floor(c2-c1) from testdb.stb1 partition by abs(floor(c1)) order by abs(c1)") tdSql.checkRows(11) - # interval + # interval tdSql.query("select last_row(c1) from testdb.stb1 interval(50s) sliding(30s)") tdSql.checkRows(27) @@ -745,13 +745,13 @@ class TDTestCase: tdSql.query('select last_row(c1) from testdb.stb1 where ts>="2022-07-06 16:00:00.000 " and ts < "2022-07-06 17:00:00.000 " interval(50s) sliding(30s)') tdSql.query('select last_row(c1) from (select ts , c1 from testdb.t1 where ts>="2021-01-01 01:01:06.000" and ts < "2021-07-21 01:01:01.000" ) interval(10s) sliding(5s)') - # join + # join tdSql.query("use test") tdSql.query("select last(sub_tb_1.c1), last(sub_tb_2.c2) from sub_tb_1, sub_tb_2 where sub_tb_1.ts=sub_tb_2.ts") tdSql.checkCols(2) last_row_result = tdSql.queryResult tdSql.query("select last_row(sub_tb_1.c1), last_row(sub_tb_2.c2) from sub_tb_1, sub_tb_2 where sub_tb_1.ts=sub_tb_2.ts") - + for ind , row in enumerate(last_row_result): tdSql.checkData(ind , 0 , row[0]) @@ -769,7 +769,7 @@ class TDTestCase: tdSql.query("select last_row(*), last(*) from sub_tb_1, sub_tb_2 where sub_tb_1.ts=sub_tb_2.ts") for ind , row in enumerate(last_row_result): tdSql.checkData(ind , 0 , row[0]) - + def support_super_table_test(self): tdSql.execute(" use testdb ") @@ -783,9 +783,9 @@ class TDTestCase: self.check_result_auto( " select t3,c1 from testdb.stb1 where c1 > 0 order by tbname " , "select t3 ,abs(c1) from testdb.stb1 where c1 > 0 order by tbname" ) self.check_result_auto( " select t4,c1 from testdb.stb1 where c1 > 0 order by tbname " , "select t4 , abs(c1) from testdb.stb1 where c1 > 0 order by tbname" ) pass - + def basic_query(self): - + tdLog.printNoPrefix("==========step2:test errors ==============") self.test_errors() @@ -828,19 +828,19 @@ class TDTestCase: self.insert_datas_and_check_abs(self.tb_nums,self.row_nums,self.time_step,"'NONE'") self.basic_query() - # cache_last 1 + # cache_last 1 self.prepare_datas("'LAST_ROW'") self.prepare_tag_datas("'LAST_ROW'") self.insert_datas_and_check_abs(self.tb_nums,self.row_nums,self.time_step,"'LAST_ROW'") self.basic_query() - # cache_last 2 + # cache_last 2 self.prepare_datas("'LAST_VALUE'") self.prepare_tag_datas("'LAST_VALUE'") self.insert_datas_and_check_abs(self.tb_nums,self.row_nums,self.time_step,"'LAST_VALUE'") self.basic_query() - # cache_last 3 + # cache_last 3 self.prepare_datas("'BOTH'") self.prepare_tag_datas("'BOTH'") self.insert_datas_and_check_abs(self.tb_nums,self.row_nums,self.time_step,"'BOTH'") From cc3967b9058bc380715c7d35077d1282ce89a4ec Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Mon, 18 Jul 2022 20:12:10 +0800 Subject: [PATCH 34/46] fix: parenthesized expression --- source/dnode/vnode/src/sma/smaRollup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c index 1f7664c22a..5b1a87be20 100644 --- a/source/dnode/vnode/src/sma/smaRollup.c +++ b/source/dnode/vnode/src/sma/smaRollup.c @@ -749,7 +749,7 @@ void tdRemoveRSmaInfoBySuid(SSma *pSma, int64_t suid) { pRSmaInfo = taosHashGet(RSMA_INFO_HASH(pStat), &suid, sizeof(tb_uid_t)); if (pRSmaInfo) { - if (pRSmaInfo = *(SRSmaInfo **)pRSmaInfo) { + if ((pRSmaInfo = *(SRSmaInfo **)pRSmaInfo)) { tdFreeRSmaInfo(pSma, pRSmaInfo, true); } taosHashRemove(RSMA_INFO_HASH(pStat), &suid, sizeof(tb_uid_t)); From 1e4dae929ce49bf551ce989b4553b062eb1f4086 Mon Sep 17 00:00:00 2001 From: afwerar <1296468573@qq.com> Date: Mon, 18 Jul 2022 20:30:04 +0800 Subject: [PATCH 35/46] shell: limit taos_history file size and rows --- tools/shell/src/shellEngine.c | 28 +++++++++++++++++++++------- 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index 0982dbc019..836e8899af 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -737,13 +737,6 @@ int32_t shellDumpResult(TAOS_RES *tres, char *fname, int32_t *error_no, bool ver void shellReadHistory() { SShellHistory *pHistory = &shell.history; - int64_t file_size; - if (taosStatFile(pHistory->file, &file_size, NULL) != 0) { - return; - } else if (file_size > SHELL_MAX_COMMAND_SIZE) { - taosRemoveFile(pHistory->file); - return; - } TdFilePtr pFile = taosOpenFile(pHistory->file, TD_FILE_READ | TD_FILE_STREAM); if (pFile == NULL) return; @@ -763,10 +756,31 @@ void shellReadHistory() { if (line != NULL) taosMemoryFree(line); taosCloseFile(&pFile); + int64_t file_size; + if (taosStatFile(pHistory->file, &file_size, NULL) == 0 && file_size > SHELL_MAX_COMMAND_SIZE) { + fprintf(stdout,"%s(%d) %s %08" PRId64 "\n", __FILE__, __LINE__,__func__,taosGetSelfPthreadId());fflush(stdout); + TdFilePtr pFile = taosOpenFile(pHistory->file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_STREAM | TD_FILE_TRUNC); + if (pFile == NULL) return; + int32_t endIndex = pHistory->hstart; + if (endIndex != 0) { + endIndex = pHistory->hend; + } + for (int32_t i = (pHistory->hend + SHELL_MAX_HISTORY_SIZE - 1) % SHELL_MAX_HISTORY_SIZE; i != endIndex;) { + printf("%d ",i); + taosFprintfFile(pFile, "%s\n", pHistory->hist[i]); + i = (i + SHELL_MAX_HISTORY_SIZE - 1) % SHELL_MAX_HISTORY_SIZE; + } + taosFprintfFile(pFile, "%s\n", pHistory->hist[endIndex]); + printf("\n"); + taosFsyncFile(pFile); + taosCloseFile(&pFile); + } + pHistory->hend = pHistory->hstart; } void shellWriteHistory() { SShellHistory *pHistory = &shell.history; + if (pHistory->hend == pHistory->hstart) return; TdFilePtr pFile = taosOpenFile(pHistory->file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_STREAM | TD_FILE_APPEND); if (pFile == NULL) return; From 912652c2e69372b20d81629fbc41e68a443bd23e Mon Sep 17 00:00:00 2001 From: afwerar <1296468573@qq.com> Date: Mon, 18 Jul 2022 20:30:28 +0800 Subject: [PATCH 36/46] shell: limit taos_history file size and rows --- tools/shell/src/shellEngine.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index 836e8899af..14cab041f6 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -766,12 +766,10 @@ void shellReadHistory() { endIndex = pHistory->hend; } for (int32_t i = (pHistory->hend + SHELL_MAX_HISTORY_SIZE - 1) % SHELL_MAX_HISTORY_SIZE; i != endIndex;) { - printf("%d ",i); taosFprintfFile(pFile, "%s\n", pHistory->hist[i]); i = (i + SHELL_MAX_HISTORY_SIZE - 1) % SHELL_MAX_HISTORY_SIZE; } taosFprintfFile(pFile, "%s\n", pHistory->hist[endIndex]); - printf("\n"); taosFsyncFile(pFile); taosCloseFile(&pFile); } From 4dad41388140f2976d92f3c7060d3895909dd171 Mon Sep 17 00:00:00 2001 From: plum-lihui Date: Mon, 18 Jul 2022 21:06:39 +0800 Subject: [PATCH 37/46] test:modify --- tests/system-test/7-tmq/stbTagFilter.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/tests/system-test/7-tmq/stbTagFilter.py b/tests/system-test/7-tmq/stbTagFilter.py index 82596a32ba..1bb3d24bde 100644 --- a/tests/system-test/7-tmq/stbTagFilter.py +++ b/tests/system-test/7-tmq/stbTagFilter.py @@ -97,18 +97,18 @@ class TDTestCase: paraDict['rowsPerTbl'] = self.rowsPerTbl # update to half tables - paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2) + # paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2) # tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx", # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], - ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], - startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + # tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], + # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' - # queryString = "select ts, c1, c2 from %s.%s where t4 == 'beijing' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName']) - queryString = "select ts, c1, c2, t4 from %s.%s where t4 == 'shanghai' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName']) + queryString = "select ts, c1, c2 from %s.%s where t4 == 'shanghai' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName']) + # queryString = "select ts, c1, c2, t4 from %s.%s where t4 == 'shanghai' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicFromStb1, queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) @@ -242,11 +242,11 @@ class TDTestCase: self.tmqCase1() # self.tmqCase2() - self.prepareTestEnv() - tdLog.printNoPrefix("====================================================================") - tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal") - self.snapshot = 1 - self.tmqCase1() + # self.prepareTestEnv() + # tdLog.printNoPrefix("====================================================================") + # tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal") + # self.snapshot = 1 + # self.tmqCase1() # self.tmqCase2() From 4c00aec172663e7d7998071aac72348a2032c484 Mon Sep 17 00:00:00 2001 From: plum-lihui Date: Mon, 18 Jul 2022 21:26:29 +0800 Subject: [PATCH 38/46] test: add tmq test case --- tests/system-test/7-tmq/tmqCommon.py | 7 +++++++ tests/system-test/7-tmq/tmqUpdate-1ctb.py | 13 +++++++++++-- tests/system-test/fulltest.sh | 2 +- 3 files changed, 19 insertions(+), 3 deletions(-) diff --git a/tests/system-test/7-tmq/tmqCommon.py b/tests/system-test/7-tmq/tmqCommon.py index 151558a33a..1f57099e3a 100644 --- a/tests/system-test/7-tmq/tmqCommon.py +++ b/tests/system-test/7-tmq/tmqCommon.py @@ -205,6 +205,13 @@ class TMQCom: tdLog.debug("complete to create %d child tables by %s.%s" %(ctbNum, dbName, stbName)) return + def drop_ctable(self, tsql, dbname=None, count=1, default_ctbname_prefix="ctb",ctbStartIdx=0): + for _ in range(count): + create_ctable_sql = f'drop table {dbname}.{default_ctbname_prefix}{ctbStartIdx};' + ctbStartIdx += 1 + tdLog.info("drop ctb sql: %s"%create_ctable_sql) + tsql.execute(create_ctable_sql) + # schema: (ts timestamp, c1 int, c2 binary(16)) def insert_data(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs=None): tdLog.debug("start to insert data ............") diff --git a/tests/system-test/7-tmq/tmqUpdate-1ctb.py b/tests/system-test/7-tmq/tmqUpdate-1ctb.py index de11f0f9ab..5d891bdedf 100644 --- a/tests/system-test/7-tmq/tmqUpdate-1ctb.py +++ b/tests/system-test/7-tmq/tmqUpdate-1ctb.py @@ -116,7 +116,12 @@ class TDTestCase: # paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl consumerId = 0 - expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 3/2) + + if self.snapshot == 0: + expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 + 1/2)) + elif self.snapshot == 1: + expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1)) + topicList = topicFromStb1 ifcheckdata = 1 ifManualCommit = 1 @@ -199,7 +204,11 @@ class TDTestCase: # paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl consumerId = 1 - expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2) + if self.snapshot == 0: + expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2)) + elif self.snapshot == 1: + expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1)) + topicList = topicFromStb1 ifcheckdata = 1 ifManualCommit = 1 diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index d98764edb6..24d1be203d 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -182,7 +182,7 @@ python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py python3 ./test.py -f 7-tmq/tmqAutoCreateTbl.py #python3 ./test.py -f 7-tmq/tmqDnodeRestart.py -#python3 ./test.py -f 7-tmq/tmqUpdate-1ctb.py +python3 ./test.py -f 7-tmq/tmqUpdate-1ctb.py python3 ./test.py -f 7-tmq/tmqUpdate-multiCtb.py #python3 ./test.py -f 7-tmq/tmqDelete-1ctb.py From d3be1e5f4d173311b35a044d7d9355a184d4ddf7 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 19 Jul 2022 09:39:26 +0800 Subject: [PATCH 39/46] fix: avoid rpc mem leak --- source/dnode/mgmt/mgmt_dnode/src/dmHandle.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c index 62f1f9e315..27a4056249 100644 --- a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c +++ b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c @@ -49,9 +49,9 @@ static void dmProcessStatusRsp(SDnodeMgmt *pMgmt, SRpcMsg *pRsp) { dmUpdateEps(pMgmt->pData, statusRsp.pDnodeEps); } } - rpcFreeCont(pRsp->pCont); tFreeSStatusRsp(&statusRsp); } + rpcFreeCont(pRsp->pCont); } void dmSendStatusReq(SDnodeMgmt *pMgmt) { From 802c86fe178a6aff932059560bacc91dbbf3d36b Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 19 Jul 2022 09:58:51 +0800 Subject: [PATCH 40/46] fix:error when bind ts column in schemaless --- source/client/src/clientSml.c | 2 +- source/libs/parser/src/parInsert.c | 9 +++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c index ab3cacfb49..49f26e2c24 100644 --- a/source/client/src/clientSml.c +++ b/source/client/src/clientSml.c @@ -2040,7 +2040,7 @@ static int32_t smlParseJSONString(SSmlHandle *info, cJSON *root, SSmlTableInfo * static int32_t smlParseInfluxLine(SSmlHandle *info, const char *sql) { SSmlLineInfo elements = {0}; - uError("SML:0x%" PRIx64 " smlParseInfluxLine sql:%s, hello", info->id, sql); + uDebug("SML:0x%" PRIx64 " smlParseInfluxLine sql:%s, hello", info->id, sql); int ret = smlParseInfluxString(sql, &elements, &info->msgBuf); if (ret != TSDB_CODE_SUCCESS) { diff --git a/source/libs/parser/src/parInsert.c b/source/libs/parser/src/parInsert.c index 623b26204d..a6862598ee 100644 --- a/source/libs/parser/src/parInsert.c +++ b/source/libs/parser/src/parInsert.c @@ -2161,7 +2161,7 @@ static void smlDestroyTableHandle(void* pHandle) { tdDestroySVCreateTbReq(&handle->createTblReq); } -static int32_t smlBoundColumnData(SArray* cols, SParsedDataColInfo* pColList, SSchema* pSchema) { +static int32_t smlBoundColumnData(SArray* cols, SParsedDataColInfo* pColList, SSchema* pSchema, bool isTag) { col_id_t nCols = pColList->numOfCols; pColList->numOfBound = 0; @@ -2177,7 +2177,8 @@ static int32_t smlBoundColumnData(SArray* cols, SParsedDataColInfo* pColList, SS SSmlKv* kv = taosArrayGetP(cols, i); SToken sToken = {.n = kv->keyLen, .z = (char*)kv->key}; col_id_t t = lastColIdx + 1; - col_id_t index = (t == 0 ? 0 : findCol(&sToken, t, nCols, pSchema)); + col_id_t index = ((t == 0 && !isTag) ? 0 : findCol(&sToken, t, nCols, pSchema)); + uDebug("SML, index:%d, t:%d, ncols:%d, kv->name:%s", index, t, nCols, kv->key); if (index < 0 && t > 0) { index = findCol(&sToken, 0, t, pSchema); isOrdered = false; @@ -2312,7 +2313,7 @@ int32_t smlBindData(void* handle, SArray* tags, SArray* colsSchema, SArray* cols smlDestroyTableHandle(&smlHandle->tableExecHandle); // free for each table SSchema* pTagsSchema = getTableTagSchema(pTableMeta); setBoundColumnInfo(&smlHandle->tableExecHandle.tags, pTagsSchema, getNumOfTags(pTableMeta)); - int ret = smlBoundColumnData(tags, &smlHandle->tableExecHandle.tags, pTagsSchema); + int ret = smlBoundColumnData(tags, &smlHandle->tableExecHandle.tags, pTagsSchema, true); if (ret != TSDB_CODE_SUCCESS) { buildInvalidOperationMsg(&pBuf, "bound tags error"); return ret; @@ -2343,7 +2344,7 @@ int32_t smlBindData(void* handle, SArray* tags, SArray* colsSchema, SArray* cols SSchema* pSchema = getTableColumnSchema(pTableMeta); - ret = smlBoundColumnData(colsSchema, &pDataBlock->boundColumnInfo, pSchema); + ret = smlBoundColumnData(colsSchema, &pDataBlock->boundColumnInfo, pSchema, false); if (ret != TSDB_CODE_SUCCESS) { buildInvalidOperationMsg(&pBuf, "bound cols error"); return ret; From 651ae5832e8a68f2333d060b41c3e8ef7b523871 Mon Sep 17 00:00:00 2001 From: Minghao Li Date: Tue, 19 Jul 2022 10:46:47 +0800 Subject: [PATCH 41/46] refactor(sync): delete not exist log --- source/libs/sync/src/syncRaftLog.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/libs/sync/src/syncRaftLog.c b/source/libs/sync/src/syncRaftLog.c index edc01c9a05..7f905f7cb5 100644 --- a/source/libs/sync/src/syncRaftLog.c +++ b/source/libs/sync/src/syncRaftLog.c @@ -313,7 +313,7 @@ static int32_t raftLogGetEntry(struct SSyncLogStore* pLogStore, SyncIndex index, snprintf(logBuf, sizeof(logBuf), "wal read error, index:%" PRId64 ", err:%d %X, msg:%s, syserr:%d, sysmsg:%s", index, err, err, errStr, sysErr, sysErrStr); if (terrno == TSDB_CODE_WAL_LOG_NOT_EXIST) { - syncNodeEventLog(pData->pSyncNode, logBuf); + // syncNodeEventLog(pData->pSyncNode, logBuf); } else { syncNodeErrorLog(pData->pSyncNode, logBuf); } @@ -499,7 +499,7 @@ SSyncRaftEntry* logStoreGetEntry(SSyncLogStore* pLogStore, SyncIndex index) { snprintf(logBuf, sizeof(logBuf), "wal read error, index:%" PRId64 ", err:%d %X, msg:%s, syserr:%d, sysmsg:%s", index, err, err, errStr, sysErr, sysErrStr); if (terrno == TSDB_CODE_WAL_LOG_NOT_EXIST) { - syncNodeEventLog(pData->pSyncNode, logBuf); + // syncNodeEventLog(pData->pSyncNode, logBuf); } else { syncNodeErrorLog(pData->pSyncNode, logBuf); } From e646c7304ca3a0ab2ac82b81b72e1a3699fdf562 Mon Sep 17 00:00:00 2001 From: jiajingbin Date: Tue, 19 Jul 2022 11:37:35 +0800 Subject: [PATCH 42/46] test: add debug case for TD-16209 --- tests/{pytest/insert => system-test/1-insert}/insert_drop.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename tests/{pytest/insert => system-test/1-insert}/insert_drop.py (100%) diff --git a/tests/pytest/insert/insert_drop.py b/tests/system-test/1-insert/insert_drop.py similarity index 100% rename from tests/pytest/insert/insert_drop.py rename to tests/system-test/1-insert/insert_drop.py From 83ac069beff626be3bb0cadda37d43cfca571df9 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Tue, 19 Jul 2022 12:00:43 +0800 Subject: [PATCH 43/46] enh: If an 'object is creating' error is reported when creating a database, wait for the transaction to end then response --- source/dnode/mnode/impl/inc/mndDef.h | 43 +++++++++- source/dnode/mnode/impl/inc/mndPrivilege.h | 40 --------- source/dnode/mnode/impl/inc/mndTrans.h | 2 + source/dnode/mnode/impl/src/mndDb.c | 9 ++ source/dnode/mnode/impl/src/mndTrans.c | 98 +++++++++++++++++----- tests/script/jenkins/basic.txt | 4 +- tests/script/tsim/valgrind/checkError1.sim | 2 +- 7 files changed, 131 insertions(+), 67 deletions(-) diff --git a/source/dnode/mnode/impl/inc/mndDef.h b/source/dnode/mnode/impl/inc/mndDef.h index 5cf6e26ee8..977f33de49 100644 --- a/source/dnode/mnode/impl/inc/mndDef.h +++ b/source/dnode/mnode/impl/inc/mndDef.h @@ -35,6 +35,46 @@ extern "C" { #endif +typedef enum { + MND_OPER_CONNECT = 1, + MND_OPER_CREATE_ACCT, + MND_OPER_DROP_ACCT, + MND_OPER_ALTER_ACCT, + MND_OPER_CREATE_USER, + MND_OPER_DROP_USER, + MND_OPER_ALTER_USER, + MND_OPER_CREATE_BNODE, + MND_OPER_DROP_BNODE, + MND_OPER_CREATE_DNODE, + MND_OPER_DROP_DNODE, + MND_OPER_CONFIG_DNODE, + MND_OPER_CREATE_MNODE, + MND_OPER_DROP_MNODE, + MND_OPER_CREATE_QNODE, + MND_OPER_DROP_QNODE, + MND_OPER_CREATE_SNODE, + MND_OPER_DROP_SNODE, + MND_OPER_REDISTRIBUTE_VGROUP, + MND_OPER_MERGE_VGROUP, + MND_OPER_SPLIT_VGROUP, + MND_OPER_BALANCE_VGROUP, + MND_OPER_CREATE_FUNC, + MND_OPER_DROP_FUNC, + MND_OPER_KILL_TRANS, + MND_OPER_KILL_CONN, + MND_OPER_KILL_QUERY, + MND_OPER_CREATE_DB, + MND_OPER_ALTER_DB, + MND_OPER_DROP_DB, + MND_OPER_COMPACT_DB, + MND_OPER_TRIM_DB, + MND_OPER_USE_DB, + MND_OPER_WRITE_DB, + MND_OPER_READ_DB, + MND_OPER_READ_OR_WRITE_DB, + MND_OPER_SHOW_VARIBALES, +} EOperType; + typedef enum { MND_AUTH_ACCT_START = 0, MND_AUTH_ACCT_USER, @@ -109,9 +149,9 @@ typedef struct { ETrnPolicy policy; ETrnConflct conflict; ETrnExec exec; + EOperType oper; int32_t code; int32_t failedTimes; - SRpcHandleInfo rpcInfo; void* rpcRsp; int32_t rpcRspLen; int32_t redoActionPos; @@ -130,6 +170,7 @@ typedef struct { int32_t stopFunc; int32_t paramLen; void* param; + SArray* pRpcArray; } STrans; typedef struct { diff --git a/source/dnode/mnode/impl/inc/mndPrivilege.h b/source/dnode/mnode/impl/inc/mndPrivilege.h index a149c0f0e2..f6002e3be8 100644 --- a/source/dnode/mnode/impl/inc/mndPrivilege.h +++ b/source/dnode/mnode/impl/inc/mndPrivilege.h @@ -22,46 +22,6 @@ extern "C" { #endif -typedef enum { - MND_OPER_CONNECT = 1, - MND_OPER_CREATE_ACCT, - MND_OPER_DROP_ACCT, - MND_OPER_ALTER_ACCT, - MND_OPER_CREATE_USER, - MND_OPER_DROP_USER, - MND_OPER_ALTER_USER, - MND_OPER_CREATE_BNODE, - MND_OPER_DROP_BNODE, - MND_OPER_CREATE_DNODE, - MND_OPER_DROP_DNODE, - MND_OPER_CONFIG_DNODE, - MND_OPER_CREATE_MNODE, - MND_OPER_DROP_MNODE, - MND_OPER_CREATE_QNODE, - MND_OPER_DROP_QNODE, - MND_OPER_CREATE_SNODE, - MND_OPER_DROP_SNODE, - MND_OPER_REDISTRIBUTE_VGROUP, - MND_OPER_MERGE_VGROUP, - MND_OPER_SPLIT_VGROUP, - MND_OPER_BALANCE_VGROUP, - MND_OPER_CREATE_FUNC, - MND_OPER_DROP_FUNC, - MND_OPER_KILL_TRANS, - MND_OPER_KILL_CONN, - MND_OPER_KILL_QUERY, - MND_OPER_CREATE_DB, - MND_OPER_ALTER_DB, - MND_OPER_DROP_DB, - MND_OPER_COMPACT_DB, - MND_OPER_TRIM_DB, - MND_OPER_USE_DB, - MND_OPER_WRITE_DB, - MND_OPER_READ_DB, - MND_OPER_READ_OR_WRITE_DB, - MND_OPER_SHOW_VARIBALES, -} EOperType; - int32_t mndInitPrivilege(SMnode *pMnode); void mndCleanupPrivilege(SMnode *pMnode); diff --git a/source/dnode/mnode/impl/inc/mndTrans.h b/source/dnode/mnode/impl/inc/mndTrans.h index 6e367fbe24..faf656a251 100644 --- a/source/dnode/mnode/impl/inc/mndTrans.h +++ b/source/dnode/mnode/impl/inc/mndTrans.h @@ -73,12 +73,14 @@ void mndTransSetRpcRsp(STrans *pTrans, void *pCont, int32_t contLen); void mndTransSetCb(STrans *pTrans, ETrnFunc startFunc, ETrnFunc stopFunc, void *param, int32_t paramLen); void mndTransSetDbName(STrans *pTrans, const char *dbname1, const char *dbname2); void mndTransSetSerial(STrans *pTrans); +void mndTransSetOper(STrans *pTrans, EOperType oper); int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans); int32_t mndTransProcessRsp(SRpcMsg *pRsp); void mndTransPullup(SMnode *pMnode); int32_t mndKillTrans(SMnode *pMnode, STrans *pTrans); void mndTransExecute(SMnode *pMnode, STrans *pTrans); +int32_t mndSetRpcInfoForDbTrans(SMnode *pMnode, SRpcMsg *pMsg, EOperType oper, const char *dbname); #ifdef __cplusplus } diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c index b4b1b383e7..064ef9b40a 100644 --- a/source/dnode/mnode/impl/src/mndDb.c +++ b/source/dnode/mnode/impl/src/mndDb.c @@ -487,6 +487,7 @@ static int32_t mndCreateDb(SMnode *pMnode, SRpcMsg *pReq, SCreateDbReq *pCreate, mDebug("trans:%d, used to create db:%s", pTrans->id, pCreate->db); mndTransSetDbName(pTrans, dbObj.name, NULL); + mndTransSetOper(pTrans, MND_OPER_CREATE_DB); if (mndSetCreateDbRedoLogs(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER; if (mndSetCreateDbUndoLogs(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER; if (mndSetCreateDbCommitLogs(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER; @@ -534,6 +535,14 @@ static int32_t mndProcessCreateDbReq(SRpcMsg *pReq) { terrno = TSDB_CODE_MND_DB_ALREADY_EXIST; goto _OVER; } + } else if (terrno == TSDB_CODE_SDB_OBJ_CREATING) { + if (mndSetRpcInfoForDbTrans(pMnode, pReq, MND_OPER_CREATE_DB, createReq.db) == 0) { + mDebug("db:%s, is creating and response after trans finished", createReq.db); + code = TSDB_CODE_ACTION_IN_PROGRESS; + goto _OVER; + } else { + goto _OVER; + } } else if (terrno != TSDB_CODE_MND_DB_NOT_EXIST) { goto _OVER; } diff --git a/source/dnode/mnode/impl/src/mndTrans.c b/source/dnode/mnode/impl/src/mndTrans.c index ea92b2f0e6..679f8085bd 100644 --- a/source/dnode/mnode/impl/src/mndTrans.c +++ b/source/dnode/mnode/impl/src/mndTrans.c @@ -122,6 +122,10 @@ static SSdbRaw *mndTransActionEncode(STrans *pTrans) { SDB_SET_INT8(pRaw, dataPos, pTrans->policy, _OVER) SDB_SET_INT8(pRaw, dataPos, pTrans->conflict, _OVER) SDB_SET_INT8(pRaw, dataPos, pTrans->exec, _OVER) + SDB_SET_INT8(pRaw, dataPos, pTrans->oper, _OVER) + SDB_SET_INT8(pRaw, dataPos, 0, _OVER) + SDB_SET_INT8(pRaw, dataPos, 0, _OVER) + SDB_SET_INT8(pRaw, dataPos, 0, _OVER) SDB_SET_INT64(pRaw, dataPos, pTrans->createdTime, _OVER) SDB_SET_BINARY(pRaw, dataPos, pTrans->dbname1, TSDB_DB_FNAME_LEN, _OVER) SDB_SET_BINARY(pRaw, dataPos, pTrans->dbname2, TSDB_DB_FNAME_LEN, _OVER) @@ -269,15 +273,22 @@ static SSdbRow *mndTransActionDecode(SSdbRaw *pRaw) { int8_t policy = 0; int8_t conflict = 0; int8_t exec = 0; + int8_t oper = 0; + int8_t reserved = 0; int8_t actionType = 0; SDB_GET_INT8(pRaw, dataPos, &stage, _OVER) SDB_GET_INT8(pRaw, dataPos, &policy, _OVER) SDB_GET_INT8(pRaw, dataPos, &conflict, _OVER) SDB_GET_INT8(pRaw, dataPos, &exec, _OVER) + SDB_GET_INT8(pRaw, dataPos, &oper, _OVER) + SDB_GET_INT8(pRaw, dataPos, &reserved, _OVER) + SDB_GET_INT8(pRaw, dataPos, &reserved, _OVER) + SDB_GET_INT8(pRaw, dataPos, &reserved, _OVER) pTrans->stage = stage; pTrans->policy = policy; pTrans->conflict = conflict; pTrans->exec = exec; + pTrans->oper = oper; SDB_GET_INT64(pRaw, dataPos, &pTrans->createdTime, _OVER) SDB_GET_BINARY(pRaw, dataPos, pTrans->dbname1, TSDB_DB_FNAME_LEN, _OVER) SDB_GET_BINARY(pRaw, dataPos, pTrans->dbname2, TSDB_DB_FNAME_LEN, _OVER) @@ -495,6 +506,10 @@ static void mndTransDropData(STrans *pTrans) { mndTransDropActions(pTrans->commitActions); pTrans->commitActions = NULL; } + if (pTrans->pRpcArray != NULL) { + taosArrayDestroy(pTrans->pRpcArray); + pTrans->pRpcArray = NULL; + } if (pTrans->rpcRsp != NULL) { taosMemoryFree(pTrans->rpcRsp); pTrans->rpcRsp = NULL; @@ -585,14 +600,18 @@ STrans *mndTransCreate(SMnode *pMnode, ETrnPolicy policy, ETrnConflct conflict, pTrans->redoActions = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(STransAction)); pTrans->undoActions = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(STransAction)); pTrans->commitActions = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(STransAction)); + pTrans->pRpcArray = taosArrayInit(1, sizeof(SRpcHandleInfo)); - if (pTrans->redoActions == NULL || pTrans->undoActions == NULL || pTrans->commitActions == NULL) { + if (pTrans->redoActions == NULL || pTrans->undoActions == NULL || pTrans->commitActions == NULL || + pTrans->pRpcArray == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; mError("failed to create transaction since %s", terrstr()); return NULL; } - if (pReq != NULL) pTrans->rpcInfo = pReq->info; + if (pReq != NULL) { + taosArrayPush(pTrans->pRpcArray, &pReq->info); + } mTrace("trans:%d, local object is created, data:%p", pTrans->id, pTrans); return pTrans; } @@ -677,6 +696,31 @@ void mndTransSetCb(STrans *pTrans, ETrnFunc startFunc, ETrnFunc stopFunc, void * pTrans->paramLen = paramLen; } +int32_t mndSetRpcInfoForDbTrans(SMnode *pMnode, SRpcMsg *pMsg, EOperType oper, const char *dbname) { + STrans *pTrans = NULL; + void *pIter = NULL; + int32_t code = -1; + + while (1) { + pIter = sdbFetch(pMnode->pSdb, SDB_TRANS, pIter, (void **)&pTrans); + if (pIter == NULL) break; + + if (pTrans->oper == oper) { + if (strcasecmp(dbname, pTrans->dbname1) == 0) { + mDebug("trans:%d, db:%s oper:%d matched with input", pTrans->id, dbname, oper); + if (taosArrayPush(pTrans->pRpcArray, &pMsg->info) != NULL) { + code = 0; + } + sdbRelease(pMnode->pSdb, pTrans); + break; + } + } + + sdbRelease(pMnode->pSdb, pTrans); + } + return code; +} + void mndTransSetDbName(STrans *pTrans, const char *dbname1, const char *dbname2) { if (dbname1 != NULL) { memcpy(pTrans->dbname1, dbname1, TSDB_DB_FNAME_LEN); @@ -688,6 +732,8 @@ void mndTransSetDbName(STrans *pTrans, const char *dbname1, const char *dbname2) void mndTransSetSerial(STrans *pTrans) { pTrans->exec = TRN_EXEC_SERIAL; } +void mndTransSetOper(STrans *pTrans, EOperType oper) { pTrans->oper = oper; } + static int32_t mndTransSync(SMnode *pMnode, STrans *pTrans) { SSdbRaw *pRaw = mndTransActionEncode(pTrans); if (pRaw == NULL) { @@ -711,7 +757,7 @@ static int32_t mndTransSync(SMnode *pMnode, STrans *pTrans) { static bool mndCheckDbConflict(const char *db, STrans *pTrans) { if (db[0] == 0) return false; - if (strcmp(db, pTrans->dbname1) == 0 || strcmp(db, pTrans->dbname2) == 0) return true; + if (strcasecmp(db, pTrans->dbname1) == 0 || strcasecmp(db, pTrans->dbname2) == 0) return true; return false; } @@ -784,9 +830,10 @@ int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans) { return -1; } - pNew->rpcInfo = pTrans->rpcInfo; + pNew->pRpcArray = pTrans->pRpcArray; pNew->rpcRsp = pTrans->rpcRsp; pNew->rpcRspLen = pTrans->rpcRspLen; + pTrans->pRpcArray = NULL; pTrans->rpcRsp = NULL; pTrans->rpcRspLen = 0; @@ -835,29 +882,34 @@ static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans) { } } - if (sendRsp && pTrans->rpcInfo.handle != NULL) { - mDebug("trans:%d, send rsp, code:0x%x stage:%s app:%p", pTrans->id, code, mndTransStr(pTrans->stage), - pTrans->rpcInfo.ahandle); - if (code == TSDB_CODE_RPC_NETWORK_UNAVAIL) { - code = TSDB_CODE_MND_TRANS_NETWORK_UNAVAILL; - } - SRpcMsg rspMsg = {.code = code, .info = pTrans->rpcInfo}; + if (!sendRsp) return; - if (pTrans->rpcRspLen != 0) { - void *rpcCont = rpcMallocCont(pTrans->rpcRspLen); - if (rpcCont != NULL) { - memcpy(rpcCont, pTrans->rpcRsp, pTrans->rpcRspLen); - rspMsg.pCont = rpcCont; - rspMsg.contLen = pTrans->rpcRspLen; + int32_t size = taosArrayGetSize(pTrans->pRpcArray); + if (size <= 0) return; + + for (int32_t i = 0; i < size; ++i) { + SRpcHandleInfo *pInfo = taosArrayGet(pTrans->pRpcArray, i); + if (pInfo->handle != NULL) { + mDebug("trans:%d, send rsp, code:0x%x stage:%s app:%p", pTrans->id, code, mndTransStr(pTrans->stage), + pInfo->ahandle); + if (code == TSDB_CODE_RPC_NETWORK_UNAVAIL) { + code = TSDB_CODE_MND_TRANS_NETWORK_UNAVAILL; } - taosMemoryFree(pTrans->rpcRsp); - } + SRpcMsg rspMsg = {.code = code, .info = *pInfo}; - tmsgSendRsp(&rspMsg); - pTrans->rpcInfo.handle = NULL; - pTrans->rpcRsp = NULL; - pTrans->rpcRspLen = 0; + if (pTrans->rpcRspLen != 0) { + void *rpcCont = rpcMallocCont(pTrans->rpcRspLen); + if (rpcCont != NULL) { + memcpy(rpcCont, pTrans->rpcRsp, pTrans->rpcRspLen); + rspMsg.pCont = rpcCont; + rspMsg.contLen = pTrans->rpcRspLen; + } + } + + tmsgSendRsp(&rspMsg); + } } + taosArrayClear(pTrans->pRpcArray); } int32_t mndTransProcessRsp(SRpcMsg *pRsp) { diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index bff6177ad2..1b228925b1 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -248,8 +248,8 @@ ./test.sh -f tsim/stream/sliding.sim # ---- transaction - ./test.sh -f tsim/trans/lossdata1.sim - ./test.sh -f tsim/trans/create_db.sim +./test.sh -f tsim/trans/lossdata1.sim +./test.sh -f tsim/trans/create_db.sim # ---- tmq ./test.sh -f tsim/tmq/basic1.sim diff --git a/tests/script/tsim/valgrind/checkError1.sim b/tests/script/tsim/valgrind/checkError1.sim index 76a29ee62f..59eede7c91 100644 --- a/tests/script/tsim/valgrind/checkError1.sim +++ b/tests/script/tsim/valgrind/checkError1.sim @@ -152,7 +152,7 @@ endi system_content sh/checkValgrind.sh -n dnode2 print cmd return result ----> [ $system_content ] -if $system_content > 4 then +if $system_content > 0 then return -1 endi From ce999711ec00bd4205fd319cd688e68ac98f6f8c Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Tue, 19 Jul 2022 13:07:24 +0800 Subject: [PATCH 44/46] test: add case --- tests/script/tmp/data.sim | 125 ++++++++++++++++++++-- tests/script/tmp/monitor.sim | 21 +--- tests/script/tsim/db/alter_replica_31.sim | 9 -- 3 files changed, 121 insertions(+), 34 deletions(-) diff --git a/tests/script/tmp/data.sim b/tests/script/tmp/data.sim index 4e714c01ee..dcfa02e0a7 100644 --- a/tests/script/tmp/data.sim +++ b/tests/script/tmp/data.sim @@ -1,18 +1,125 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c telemetryReporting -v 1 -system sh/cfg.sh -n dnode1 -c telemetryInterval -v 1 -system sh/cfg.sh -n dnode1 -c telemetryServer -v localhost -system sh/cfg.sh -n dnode1 -c telemetryPort -v 80 - -return +system sh/deploy.sh -n dnode2 -i 2 +system sh/deploy.sh -n dnode3 -i 3 +system sh/deploy.sh -n dnode4 -i 4 system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start +system sh/exec.sh -n dnode4 -s start sql connect -sql create database db -sql create table db.tb (ts timestamp, i int) -sql insert into db.tb values(now, 1) +print =============== step1: create dnodes +sql create dnode $hostname port 7200 +sql create dnode $hostname port 7300 +sql create dnode $hostname port 7400 +$x = 0 +step1: + $x = $x + 1 + sleep 1000 + if $x == 10 then + print ====> dnode not ready! + return -1 + endi +sql show dnodes +print ===> rows: $rows +print ===> $data00 $data01 $data02 $data03 $data04 $data05 +print ===> $data10 $data11 $data12 $data13 $data14 $data15 +print ===> $data20 $data21 $data22 $data23 $data24 $data25 +print ===> $data30 $data31 $data32 $data33 $data24 $data35 +if $rows != 4 then + return -1 +endi +if $data(1)[4] != ready then + goto step1 +endi +if $data(2)[4] != ready then + goto step1 +endi +if $data(3)[4] != ready then + goto step1 +endi +if $data(4)[4] != ready then + goto step1 +endi + +print =============== step2: create database +sql create database db vgroups 1 replica 3 +sql show databases +if $rows != 3 then + return -1 +endi +if $data(db)[4] != 3 then + return -1 +endi + +sql show dnodes +if $data(2)[2] != 1 then + return -1 +endi +if $data(3)[2] != 1 then + return -1 +endi +if $data(4)[2] != 1 then + return -1 +endi + +# vnodes +sql show dnodes +if $data(2)[2] != 1 then + return -1 +endi +if $data(3)[2] != 1 then + return -1 +endi +if $data(4)[2] != 1 then + return -1 +endi + +# v1_dnode +$hasleader = 0 +$x = 0 +step2: + $x = $x + 1 + sleep 1000 + if $x == 20 then + print ====> dnode not ready! + return -1 + endi +sql show db.vgroups +print ===> $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 +if $data(2)[3] != 2 then + return -1 +endi +if $data(2)[5] != 3 then + return -1 +endi +if $data(2)[7] != 4 then + return -1 +endi +if $data(2)[4] == leader then + $hasleader = 1 +endi +if $data(2)[6] == leader then + $hasleader = 1 +endi +if $data(2)[8] == leader then + $hasleader = 1 +endi +if $hasleader != 1 then + goto step2 +endi + +sql use db; +sql create table stb (ts timestamp, c int) tags (t int); +sql create table t0 using stb tags (0); +sql insert into t0 values(now, 1); +sql show db.stables; +sql show db.tables; +sql show db.vgroups; + +return print ======== start back run_back tmp/back.sim diff --git a/tests/script/tmp/monitor.sim b/tests/script/tmp/monitor.sim index 036d4cc6db..ec728fe733 100644 --- a/tests/script/tmp/monitor.sim +++ b/tests/script/tmp/monitor.sim @@ -6,32 +6,21 @@ system sh/cfg.sh -n dnode1 -c monitorInterval -v 1 system sh/cfg.sh -n dnode1 -c monitorComp -v 1 #system sh/cfg.sh -n dnode1 -c supportVnodes -v 128 +#system sh/cfg.sh -n dnode1 -c telemetryReporting -v 1 +#system sh/cfg.sh -n dnode1 -c telemetryInterval -v 1 +#system sh/cfg.sh -n dnode1 -c telemetryServer -v localhost +#system sh/cfg.sh -n dnode1 -c telemetryPort -v 80 + system sh/exec.sh -n dnode1 -s start sql connect print =============== show dnodes -sleep 2000 sql create database db vgroups 2; sql use db; sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 binary(16)) comment "abd"; -sleep 2000 print =============== create drop qnode 1 sql create qnode on dnode 1 sql create snode on dnode 1 sql create bnode on dnode 1 -return -print =============== restart -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode1 -s start - - -return -system sh/deploy.sh -n dnode2 -i 2 -system sh/exec.sh -n dnode2 -s start -system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s start - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT diff --git a/tests/script/tsim/db/alter_replica_31.sim b/tests/script/tsim/db/alter_replica_31.sim index 1823f182c9..e9a295820c 100644 --- a/tests/script/tsim/db/alter_replica_31.sim +++ b/tests/script/tsim/db/alter_replica_31.sim @@ -111,15 +111,6 @@ if $hasleader != 1 then goto step2 endi -# sql use db; -# sql create table stb (ts timestamp, c int) tags (t int); -# sql create table t0 using stb tags (0); -# sql insert into t0 values(now, 1); -# sql show db.stables; -# sql show db.tables; -# sql show db.vgroups; -return - sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 binary(16)) comment "abd" sql create table db.ctb using db.stb tags(101, "102") sql insert into db.ctb values(now, 1, "2") From 007c5eb0a510e1cd747a224087a43e536e609f61 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 19 Jul 2022 13:27:00 +0800 Subject: [PATCH 45/46] chore: libtaos ws submodule for3.0 (#15105) * chore: add libtaos-ws for 3.0 * chore: update taosws-rs * chore: add libtaosws to install/remove script * chore: update taosws-rs * chore: update taosws-rs * chore: update taos-tools, taosws-rs for 3.0 * fix: packaging/tools/make_install.sh for 3.0 * chore: update taos-tools * chore: fix release script for 3.0 * chore: update taosws-rs for 3.0 * chore: add taows-rs submodule for 3.0 * chore: update taosws-rs for 3.0 * fix: install script support taosws for 3.0 --- packaging/deb/makedeb.sh | 2 +- packaging/rpm/tdengine.spec | 2 +- packaging/tools/install.sh | 4 ++-- packaging/tools/install_client.sh | 12 ++++++++++++ packaging/tools/makeclient.sh | 7 +++++++ packaging/tools/makepkg.sh | 4 ++-- packaging/tools/post.sh | 11 +++++++++++ 7 files changed, 36 insertions(+), 6 deletions(-) diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh index 1a4131ec6f..f09d46da59 100755 --- a/packaging/deb/makedeb.sh +++ b/packaging/deb/makedeb.sh @@ -73,7 +73,7 @@ cp ${compile_dir}/../include/client/taos.h ${pkg_dir}${install_home_pat cp ${compile_dir}/../include/common/taosdef.h ${pkg_dir}${install_home_path}/include cp ${compile_dir}/../include/util/taoserror.h ${pkg_dir}${install_home_path}/include cp ${compile_dir}/../include/libs/function/taosudf.h ${pkg_dir}${install_home_path}/include -cp ${compile_dir}/../src/inc/taosws.h ${pkg_dir}${install_home_path}/include ||: +cp ${compile_dir}/build/include/taosws.h ${pkg_dir}${install_home_path}/include ||: cp -r ${top_dir}/examples/* ${pkg_dir}${install_home_path}/examples #cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector #cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector diff --git a/packaging/rpm/tdengine.spec b/packaging/rpm/tdengine.spec index 02e7e9b5e5..bf0921f18a 100644 --- a/packaging/rpm/tdengine.spec +++ b/packaging/rpm/tdengine.spec @@ -80,7 +80,7 @@ cp %{_compiledir}/../include/client/taos.h %{buildroot}%{homepath}/incl cp %{_compiledir}/../include/common/taosdef.h %{buildroot}%{homepath}/include cp %{_compiledir}/../include/util/taoserror.h %{buildroot}%{homepath}/include cp %{_compiledir}/../include/libs/function/taosudf.h %{buildroot}%{homepath}/include -cp %{_compiledir}/../src/inc/taosws.h %{buildroot}%{homepath}/include ||: +cp %{_compiledir}/build/include/taosws.h %{buildroot}%{homepath}/include ||: #cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector #cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector #cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index f409834091..72ac76ec8e 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -229,13 +229,13 @@ function install_lib() { ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 ${csudo}ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so - ${csudo}ln -s ${lib_link_dir}/libtaosws.so ${lib_link_dir}/libtaosws.so || : + [ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}ln -s ${install_main_dir}/driver/libtaosws.so ${lib_link_dir}/libtaosws.so || : if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : ${csudo}ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : - ${csudo}ln -s ${lib64_link_dir}/libtaosws.so ${lib64_link_dir}/libtaosws.so || : + ${csudo}ln -s ${install_main_dir}/libtaosws.so ${lib64_link_dir}/libtaosws.so || : fi ${csudo}ldconfig diff --git a/packaging/tools/install_client.sh b/packaging/tools/install_client.sh index 0c86877c99..a205206dda 100755 --- a/packaging/tools/install_client.sh +++ b/packaging/tools/install_client.sh @@ -116,6 +116,7 @@ function install_bin() { function clean_lib() { sudo rm -f /usr/lib/libtaos.* || : + sudo rm -f /usr/lib/libtaosws.* || : sudo rm -rf ${lib_dir} || : } @@ -123,6 +124,9 @@ function install_lib() { # Remove links ${csudo}rm -f ${lib_link_dir}/libtaos.* || : ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : + + ${csudo}rm -f ${lib_link_dir}/libtaosws.* || : + ${csudo}rm -f ${lib64_link_dir}/libtaosws.* || : #${csudo}rm -rf ${v15_java_app_dir} || : ${csudo}cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/* @@ -131,13 +135,19 @@ function install_lib() { ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 ${csudo}ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so + [ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}ln -s ${install_main_dir}/driver/libtaosws.so ${lib_link_dir}/libtaosws.so + if [ -d "${lib64_link_dir}" ]; then ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : ${csudo}ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : + + [ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}ln -s ${install_main_dir}/driver/libtaosws.so ${lib64_link_dir}/libtaosws.so || : fi else ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib ${csudo}ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib + + [ -f ${install_main_dir}/driver/libtaosws.dylib ] && ${csudo}ln -s ${install_main_dir}/driver/libtaosws.dylib ${lib_link_dir}/libtaosws.dylib fi if [ "$osType" != "Darwin" ]; then @@ -154,6 +164,8 @@ function install_header() { ${csudo}ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h ${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h ${csudo}ln -s ${install_main_dir}/include/taosudf.h ${inc_link_dir}/taosudf.h + + [ -f ${install_main_dir}/include/taosws.h ] && ${csudo}ln -s ${install_main_dir}/include/taosws.h ${inc_link_dir}/taos.h } function install_jemalloc() { diff --git a/packaging/tools/makeclient.sh b/packaging/tools/makeclient.sh index ca3a9a19be..ae846ee493 100755 --- a/packaging/tools/makeclient.sh +++ b/packaging/tools/makeclient.sh @@ -57,12 +57,16 @@ if [ "$osType" != "Darwin" ]; then ${script_dir}/get_client.sh" fi lib_files="${build_dir}/lib/libtaos.so.${version}" + wslib_files="${build_dir}/lib/libtaosws.so" else bin_files="${build_dir}/bin/${clientName} ${script_dir}/remove_client.sh" lib_files="${build_dir}/lib/libtaos.${version}.dylib" + wslib_files="${build_dir}/lib/libtaosws.dylib" fi header_files="${code_dir}/include/client/taos.h ${code_dir}/include/common/taosdef.h ${code_dir}/include/util/taoserror.h ${code_dir}/include/libs/function/taosudf.h" +wsheader_files="${build_dir}/include/taosws.h" + if [ "$dbName" != "taos" ]; then cfg_dir="${top_dir}/../enterprise/packaging/cfg" else @@ -74,6 +78,8 @@ install_files="${script_dir}/install_client.sh" # make directories. mkdir -p ${install_dir} mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc +[ -f ${wsheader_files} ] && cp ${wsheader_files} ${install_dir}/inc + mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/${configFile} ${install_dir}/cfg/${configFile} mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* @@ -187,6 +193,7 @@ cp ${lib_files} ${install_dir}/driver # Copy connector connector_dir="${code_dir}/connector" mkdir -p ${install_dir}/connector +[ -f ${wslib_files} ] && cp ${wslib_files} ${install_dir}/driver if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then if [ "$osType" != "Darwin" ]; then diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index c37397005c..f34adca9f8 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -96,7 +96,7 @@ lib_files="${build_dir}/lib/libtaos.so.${version}" wslib_files="${build_dir}/lib/libtaosws.so." header_files="${code_dir}/include/client/taos.h ${code_dir}/include/common/taosdef.h ${code_dir}/include/util/taoserror.h ${code_dir}/include/libs/function/taosudf.h" -wsheader_files="${code_dir}/inc/taosws.h" +wsheader_files="${build_dir}/include/taosws.h" if [ "$dbName" != "taos" ]; then cfg_dir="${top_dir}/../enterprise/packaging/cfg" @@ -114,7 +114,7 @@ init_file_rpm=${script_dir}/../rpm/taosd mkdir -p ${install_dir} mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc -${wsheader_files} ${install_dir}/inc || : +[ -f ${wsheader_files} ] && cp ${wsheader_files} ${install_dir}/inc || : mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/${configFile} ${install_dir}/cfg/${configFile} diff --git a/packaging/tools/post.sh b/packaging/tools/post.sh index 2d744233ba..28163775a1 100755 --- a/packaging/tools/post.sh +++ b/packaging/tools/post.sh @@ -82,22 +82,33 @@ function kill_taosd() { function install_include() { ${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h ${inc_link_dir}/taosudf.h || : + ${csudo}rm -f ${inc_link_dir}/taosws.h + ${csudo}ln -s ${inc_dir}/taos.h ${inc_link_dir}/taos.h ${csudo}ln -s ${inc_dir}/taosdef.h ${inc_link_dir}/taosdef.h ${csudo}ln -s ${inc_dir}/taoserror.h ${inc_link_dir}/taoserror.h ${csudo}ln -s ${inc_dir}/taosudf.h ${inc_link_dir}/taosudf.h + + [ -f ${inc_dir}/taosws.h ] && ${csudo}ln -s ${inc_dir}/taosudf.h ${inc_link_dir}/taosudf.h ||: } function install_lib() { ${csudo}rm -f ${lib_link_dir}/libtaos* || : ${csudo}rm -f ${lib64_link_dir}/libtaos* || : + ${csudo}rm -f ${lib_link_dir}/libtaosws* || : + ${csudo}rm -f ${lib64_link_dir}/libtaosws* || : + ${csudo}ln -s ${lib_dir}/libtaos.* ${lib_link_dir}/libtaos.so.1 ${csudo}ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so + [ -f ${lib_dir}/libtaosws.so ]${csudo}ln -s ${lib_dir}/libtaosws.so ${lib_link_dir}/libtaosws.so + if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then ${csudo}ln -s ${lib_dir}/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : ${csudo}ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : + + [ -ff ${lib_dir}/libtaosws.so ] && ${csudo}ln -s ${lib_dir}/libtaosws.so ${lib64_link_dir}/libtaosws.so || : fi ${csudo}ldconfig From 5e754a7662e8efa77ee806eafb9b8d808e800095 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 19 Jul 2022 14:00:45 +0800 Subject: [PATCH 46/46] fix(query): add check for deleting record --- source/dnode/vnode/src/tsdb/tsdbRead.c | 69 +++++++++++++++++++------- 1 file changed, 51 insertions(+), 18 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index 0b05bb7224..2801e09f06 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -1453,38 +1453,71 @@ static bool keyOverlapFileBlock(TSDBKEY key, SBlock* pBlock, SVersionRange* pVer (pBlock->minVersion <= pVerRange->maxVer); } -static bool overlapWithDelSkyline(STableBlockScanInfo* pBlockScanInfo, const SBlock* pBlock, int32_t order) { - if (pBlockScanInfo->delSkyline == NULL) { - return false; - } - TSDBKEY* pFirst = taosArrayGet(pBlockScanInfo->delSkyline, 0); - TSDBKEY* pLast = taosArrayGetLast(pBlockScanInfo->delSkyline); - - // ts is not overlap - if (pBlock->minKey.ts > pLast->ts || pBlock->maxKey.ts < pFirst->ts) { - return false; - } - - int32_t step = ASCENDING_TRAVERSE(order) ? 1 : -1; - - // version is not overlap +static bool doCheckforDatablockOverlap(STableBlockScanInfo* pBlockScanInfo, const SBlock* pBlock) { size_t num = taosArrayGetSize(pBlockScanInfo->delSkyline); - for (int32_t i = pBlockScanInfo->fileDelIndex; i < num; i += step) { + + for (int32_t i = pBlockScanInfo->fileDelIndex; i < num; i += 1) { TSDBKEY* p = taosArrayGet(pBlockScanInfo->delSkyline, i); if (p->ts >= pBlock->minKey.ts && p->ts <= pBlock->maxKey.ts) { if (p->version >= pBlock->minVersion) { return true; } - } else if (p->ts > pBlock->maxKey.ts) { + } else if (p->ts < pBlock->minKey.ts) { // p->ts < pBlock->minKey.ts + if (p->version >= pBlock->minVersion) { + if (i < num - 1) { + TSDBKEY* pnext = taosArrayGet(pBlockScanInfo->delSkyline, i + 1); + if (i + 1 == num - 1) { // pnext is the last point + if (pnext->ts >= pBlock->minKey.ts) { + return true; + } + } else { + if (pnext->ts >= pBlock->minKey.ts && pnext->version >= pBlock->minVersion) { + return true; + } + } + } else { // it must be the last point + ASSERT(p->version == 0); + } + } + } else { // (p->ts > pBlock->maxKey.ts) { return false; } } - ASSERT(0); return false; } +static bool overlapWithDelSkyline(STableBlockScanInfo* pBlockScanInfo, const SBlock* pBlock, int32_t order) { + if (pBlockScanInfo->delSkyline == NULL) { + return false; + } + + // ts is not overlap + TSDBKEY* pFirst = taosArrayGet(pBlockScanInfo->delSkyline, 0); + TSDBKEY* pLast = taosArrayGetLast(pBlockScanInfo->delSkyline); + if (pBlock->minKey.ts > pLast->ts || pBlock->maxKey.ts < pFirst->ts) { + return false; + } + + // version is not overlap + if (ASCENDING_TRAVERSE(order)) { + return doCheckforDatablockOverlap(pBlockScanInfo, pBlock); + } else { + int32_t index = pBlockScanInfo->fileDelIndex; + while(1) { + TSDBKEY* p = taosArrayGet(pBlockScanInfo->delSkyline, index); + if (p->ts > pBlock->minKey.ts && index > 0) { + index -= 1; + } else { // find the first point that is smaller than the minKey.ts of dataBlock. + break; + } + } + + return doCheckforDatablockOverlap(pBlockScanInfo, pBlock); + } +} + // 1. the version of all rows should be less than the endVersion // 2. current block should not overlap with next neighbor block // 3. current timestamp should not be overlap with each other