From cdf8ea13893cd43deb096b6b13fbc8bc138836b6 Mon Sep 17 00:00:00 2001 From: wenzhouwww Date: Sun, 30 Oct 2022 18:55:22 +0800 Subject: [PATCH 01/69] test: enh crash_gen function and let it run --- tests/pytest/auto_crash_gen.py | 365 ++++++++++++++++ tests/pytest/auto_crash_gen_valgrind.py | 399 ++++++++++++++++++ .../pytest/auto_crash_gen_valgrind_cluster.py | 399 ++++++++++++++++++ tests/pytest/auto_run_regular.sh | 11 + tests/pytest/auto_run_valgrind.sh | 11 + tests/pytest/auto_run_valgrind_cluster.sh | 11 + 6 files changed, 1196 insertions(+) create mode 100755 tests/pytest/auto_crash_gen.py create mode 100755 tests/pytest/auto_crash_gen_valgrind.py create mode 100755 tests/pytest/auto_crash_gen_valgrind_cluster.py create mode 100755 tests/pytest/auto_run_regular.sh create mode 100755 tests/pytest/auto_run_valgrind.sh create mode 100755 tests/pytest/auto_run_valgrind_cluster.sh diff --git a/tests/pytest/auto_crash_gen.py b/tests/pytest/auto_crash_gen.py new file mode 100755 index 0000000000..02cca810a7 --- /dev/null +++ b/tests/pytest/auto_crash_gen.py @@ -0,0 +1,365 @@ +import os +import socket +import requests + +# -*- coding: utf-8 -*- +import os ,sys +import random +import argparse +import subprocess +import time +import platform + +# valgrind mode ? +valgrind_mode = False + +msg_dict = {0:"success" , 1:"failed" , 2:"other errors" , 3:"crash occured" , 4:"Invalid read/write" , 5:"memory leak" } + +# formal +hostname = socket.gethostname() + +group_url = 'https://open.feishu.cn/open-apis/bot/v2/hook/56c333b5-eae9-4c18-b0b6-7e4b7174f5c9' + +def get_msg(text): + return { + "msg_type": "post", + "content": { + "post": { + "zh_cn": { + "title": "Crash_gen Monitor", + "content": [ + [{ + "tag": "text", + "text": text + } + ]] + } + } + } + } + + +def send_msg(json): + headers = { + 'Content-Type': 'application/json' + } + + req = requests.post(url=group_url, headers=headers, json=json) + inf = req.json() + if "StatusCode" in inf and inf["StatusCode"] == 0: + pass + else: + print(inf) + + +# set path about run instance + +core_path = subprocess.Popen("cat /proc/sys/kernel/core_pattern", shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") +core_path = "/".join(core_path.split("/")[:-1]) +print(" ======= core path is %s ======== " %core_path) +if not os.path.exists(core_path): + os.mkdir(core_path) + +base_dir = os.path.dirname(os.path.realpath(__file__)) +if base_dir.find("community")>0: + repo = "community" +elif base_dir.find("TDengine")>0: + repo = "TDengine" +else: + repo ="TDengine" +print("base_dir:",base_dir) +home_dir = base_dir[:base_dir.find(repo)] +print("home_dir:",home_dir) +run_dir = os.path.join(home_dir,'run_dir') +run_dir = os.path.abspath(run_dir) +print("run dir is *** :",run_dir) +if not os.path.exists(run_dir): + os.mkdir(run_dir) +run_log_file = run_dir+'/crash_gen_run.log' +crash_gen_cmds_file = os.path.join(run_dir, 'crash_gen_cmds.sh') +exit_status_logs = os.path.join(run_dir, 'crash_exit.log') + +def get_path(): + buildPath='' + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + +# generate crash_gen start script randomly + +def random_args(args_list): + nums_args_list = ["--max-dbs","--num-replicas","--num-dnodes","--max-steps","--num-threads",] # record int type arguments + bools_args_list = ["--auto-start-service" , "--debug","--run-tdengine","--ignore-errors","--track-memory-leaks","--larger-data","--mix-oos-data","--dynamic-db-table-names", + "--per-thread-db-connection","--record-ops","--verify-data","--use-shadow-db","--continue-on-exception" + ] # record bool type arguments + strs_args_list = ["--connector-type"] # record str type arguments + + args_list["--auto-start-service"]= False + args_list["--continue-on-exception"]=True + # connect_types=['native','rest','mixed'] # restful interface has change ,we should trans dbnames to connection or change sql such as "db.test" + connect_types=['native'] + # args_list["--connector-type"]=connect_types[random.randint(0,2)] + args_list["--connector-type"]= connect_types[0] + args_list["--max-dbs"]= random.randint(1,10) + + # dnodes = [1,3] # set single dnodes; + + # args_list["--num-dnodes"]= random.sample(dnodes,1)[0] + # args_list["--num-replicas"]= random.randint(1,args_list["--num-dnodes"]) + args_list["--debug"]=False + args_list["--per-thread-db-connection"]=True + args_list["--track-memory-leaks"]=False + + args_list["--max-steps"]=random.randint(500,2000) + + # args_list["--ignore-errors"]=[] ## can add error codes for detail + + + args_list["--run-tdengine"]= False + args_list["--use-shadow-db"]= False + args_list["--dynamic-db-table-names"]= True + args_list["--verify-data"]= False + args_list["--record-ops"] = False + + for key in bools_args_list: + set_bool_value = [True,False] + if key == "--auto-start-service" : + continue + elif key =="--run-tdengine": + continue + elif key == "--ignore-errors": + continue + elif key == "--debug": + continue + elif key == "--per-thread-db-connection": + continue + elif key == "--continue-on-exception": + continue + elif key == "--use-shadow-db": + continue + elif key =="--track-memory-leaks": + continue + elif key == "--dynamic-db-table-names": + continue + elif key == "--verify-data": + continue + elif key == "--record-ops": + continue + else: + args_list[key]=set_bool_value[random.randint(0,1)] + + if args_list["--larger-data"]: + threads = [16,32] + else: + threads = [32,64,128,256] + args_list["--num-threads"]=random.sample(threads,1)[0] #$ debug + + return args_list + +def limits(args_list): + if args_list["--use-shadow-db"]==True: + if args_list["--max-dbs"] > 1: + print("Cannot combine use-shadow-db with max-dbs of more than 1 ,set max-dbs=1") + args_list["--max-dbs"]=1 + else: + pass + + # env is start by test frame , not crash_gen instance + + # elif args_list["--num-replicas"]==0: + # print(" make sure num-replicas is at least 1 ") + # args_list["--num-replicas"]=1 + # elif args_list["--num-replicas"]==1: + # pass + + # elif args_list["--num-replicas"]>1: + # if not args_list["--auto-start-service"]: + # print("it should be deployed by crash_gen auto-start-service for multi replicas") + + # else: + # pass + + return args_list + +def get_auto_mix_cmds(args_list ,valgrind=valgrind_mode): + build_path = get_path() + if repo == "community": + crash_gen_path = build_path[:-5]+"community/tests/pytest/" + elif repo == "TDengine": + crash_gen_path = build_path[:-5]+"/tests/pytest/" + else: + pass + + bools_args_list = ["--auto-start-service" , "--debug","--run-tdengine","--ignore-errors","--track-memory-leaks","--larger-data","--mix-oos-data","--dynamic-db-table-names", + "--per-thread-db-connection","--record-ops","--verify-data","--use-shadow-db","--continue-on-exception"] + arguments = "" + for k ,v in args_list.items(): + if k == "--ignore-errors": + if v: + arguments+=(k+"="+str(v)+" ") + else: + arguments+="" + elif k in bools_args_list and v==True: + arguments+=(k+" ") + elif k in bools_args_list and v==False: + arguments+="" + else: + arguments+=(k+"="+str(v)+" ") + + if valgrind : + + crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550 '%(crash_gen_path ,arguments) + + else: + + crash_gen_cmd = 'cd %s && ./crash_gen.sh %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550'%(crash_gen_path ,arguments) + + return crash_gen_cmd + +def start_taosd(): + build_path = get_path() + if repo == "community": + start_path = build_path[:-5]+"community/tests/system-test/" + elif repo == "TDengine": + start_path = build_path[:-5]+"/tests/system-test/" + else: + pass + + start_cmd = 'cd %s && python3 test.py >>/dev/null '%(start_path) + os.system(start_cmd) + +def get_cmds(args_list): + # build_path = get_path() + # if repo == "community": + # crash_gen_path = build_path[:-5]+"community/tests/pytest/" + # elif repo == "TDengine": + # crash_gen_path = build_path[:-5]+"/tests/pytest/" + # else: + # pass + + # crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind -p -t 10 -s 1000 -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550 '%(crash_gen_path) + + crash_gen_cmd = get_auto_mix_cmds(args_list,valgrind=valgrind_mode) + return crash_gen_cmd + +def run_crash_gen(crash_cmds): + + # prepare env of taosd + start_taosd() + + build_path = get_path() + if repo == "community": + crash_gen_path = build_path[:-5]+"community/tests/pytest/" + elif repo == "TDengine": + crash_gen_path = build_path[:-5]+"/tests/pytest/" + else: + pass + result_file = os.path.join(crash_gen_path, 'valgrind.out') + + + # run crash_gen and back logs + os.system('echo "%s">>%s'%(crash_cmds,crash_gen_cmds_file)) + os.system("%s >>%s "%(crash_cmds,result_file)) + + +def check_status(): + build_path = get_path() + if repo == "community": + crash_gen_path = build_path[:-5]+"community/tests/pytest/" + elif repo == "TDengine": + crash_gen_path = build_path[:-5]+"/tests/pytest/" + else: + pass + result_file = os.path.join(crash_gen_path, 'valgrind.out') + run_code = subprocess.Popen("tail -n 50 %s"%result_file, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") + os.system("tail -n 50 %s>>%s"%(result_file,exit_status_logs)) + + core_check = subprocess.Popen('ls -l %s | grep "^-" | wc -l'%core_path, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") + + if int(core_check.strip().rstrip()) > 0: + # it means core files has occured + return 3 + + if "Crash_Gen is now exiting with status code: 1" in run_code: + return 1 + elif "Crash_Gen is now exiting with status code: 0" in run_code: + return 0 + else: + return 2 + + +def main(): + + args_list = {"--auto-start-service":False ,"--max-dbs":0,"--connector-type":"native","--debug":False,"--run-tdengine":False,"--ignore-errors":[], + "--track-memory-leaks":False , "--larger-data":False, "--mix-oos-data":False, "--dynamic-db-table-names":False, + "--per-thread-db-connection":False , "--record-ops":False , "--max-steps":100, "--num-threads":10, "--verify-data":False,"--use-shadow-db":False , + "--continue-on-exception":False } + + args = random_args(args_list) + args = limits(args) + + + build_path = get_path() + os.system("pip install git+https://github.com/taosdata/taos-connector-python.git") + if repo =="community": + crash_gen_path = build_path[:-5]+"community/tests/pytest/" + elif repo =="TDengine": + crash_gen_path = build_path[:-5]+"/tests/pytest/" + else: + pass + + if os.path.exists(crash_gen_path+"crash_gen.sh"): + print(" make sure crash_gen.sh is ready") + else: + print( " crash_gen.sh is not exists ") + sys.exit(1) + + git_commit = subprocess.Popen("cd %s && git log | head -n1"%crash_gen_path, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")[8:16] + + # crash_cmds = get_cmds() + + crash_cmds = get_cmds(args) + # clean run_dir + os.system('rm -rf %s'%run_dir ) + if not os.path.exists(run_dir): + os.mkdir(run_dir) + print(crash_cmds) + run_crash_gen(crash_cmds) + status = check_status() + + print("exit status : ", status) + + if status ==4: + print('======== crash_gen found memory bugs ========') + if status ==5: + print('======== crash_gen found memory errors ========') + if status >0: + print('======== crash_gen run failed and not exit as expected ========') + else: + print('======== crash_gen run sucess and exit as expected ========') + + + if status!=0 : + + try: + text = f"crash_gen instance exit status of docker [ {hostname} ] is : {msg_dict[status]}\n " + f" and git commit : {git_commit}" + send_msg(get_msg(text)) + except Exception as e: + print("exception:", e) + exit(status) + + +if __name__ == '__main__': + main() + + diff --git a/tests/pytest/auto_crash_gen_valgrind.py b/tests/pytest/auto_crash_gen_valgrind.py new file mode 100755 index 0000000000..1443dcd543 --- /dev/null +++ b/tests/pytest/auto_crash_gen_valgrind.py @@ -0,0 +1,399 @@ +#!/usr/bin/python3 + + +import os +import socket +import requests + +# -*- coding: utf-8 -*- +import os ,sys +import random +import argparse +import subprocess +import time +import platform + +# valgrind mode ? +valgrind_mode = True + +msg_dict = {0:"success" , 1:"failed" , 2:"other errors" , 3:"crash occured" , 4:"Invalid read/write" , 5:"memory leak" } + +# formal +hostname = socket.gethostname() + +group_url = 'https://open.feishu.cn/open-apis/bot/v2/hook/56c333b5-eae9-4c18-b0b6-7e4b7174f5c9' + +def get_msg(text): + return { + "msg_type": "post", + "content": { + "post": { + "zh_cn": { + "title": "Crash_gen Monitor", + "content": [ + [{ + "tag": "text", + "text": text + } + ]] + } + } + } + } + + +def send_msg(json): + headers = { + 'Content-Type': 'application/json' + } + + req = requests.post(url=group_url, headers=headers, json=json) + inf = req.json() + if "StatusCode" in inf and inf["StatusCode"] == 0: + pass + else: + print(inf) + + +# set path about run instance + +core_path = subprocess.Popen("cat /proc/sys/kernel/core_pattern", shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") +core_path = "/".join(core_path.split("/")[:-1]) +print(" ======= core path is %s ======== " %core_path) +if not os.path.exists(core_path): + os.mkdir(core_path) + +base_dir = os.path.dirname(os.path.realpath(__file__)) +if base_dir.find("community")>0: + repo = "community" +elif base_dir.find("TDengine")>0: + repo = "TDengine" +else: + repo ="TDengine" +print("base_dir:",base_dir) +home_dir = base_dir[:base_dir.find(repo)] +print("home_dir:",home_dir) +run_dir = os.path.join(home_dir,'run_dir') +run_dir = os.path.abspath(run_dir) +print("run dir is *** :",run_dir) +if not os.path.exists(run_dir): + os.mkdir(run_dir) +run_log_file = run_dir+'/crash_gen_run.log' +crash_gen_cmds_file = os.path.join(run_dir, 'crash_gen_cmds.sh') +exit_status_logs = os.path.join(run_dir, 'crash_exit.log') + +def get_path(): + buildPath='' + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + +# generate crash_gen start script randomly + +def random_args(args_list): + nums_args_list = ["--max-dbs","--num-replicas","--num-dnodes","--max-steps","--num-threads",] # record int type arguments + bools_args_list = ["--auto-start-service" , "--debug","--run-tdengine","--ignore-errors","--track-memory-leaks","--larger-data","--mix-oos-data","--dynamic-db-table-names", + "--per-thread-db-connection","--record-ops","--verify-data","--use-shadow-db","--continue-on-exception" + ] # record bool type arguments + strs_args_list = ["--connector-type"] # record str type arguments + + args_list["--auto-start-service"]= False + args_list["--continue-on-exception"]=True + # connect_types=['native','rest','mixed'] # restful interface has change ,we should trans dbnames to connection or change sql such as "db.test" + connect_types=['native'] + # args_list["--connector-type"]=connect_types[random.randint(0,2)] + args_list["--connector-type"]= connect_types[0] + args_list["--max-dbs"]= random.randint(1,10) + + # dnodes = [1,3] # set single dnodes; + + # args_list["--num-dnodes"]= random.sample(dnodes,1)[0] + # args_list["--num-replicas"]= random.randint(1,args_list["--num-dnodes"]) + args_list["--debug"]=False + args_list["--per-thread-db-connection"]=True + args_list["--track-memory-leaks"]=False + + args_list["--max-steps"]=random.randint(200,500) + + threads = [16,32] + + args_list["--num-threads"]=random.sample(threads,1)[0] #$ debug + # args_list["--ignore-errors"]=[] ## can add error codes for detail + + + args_list["--run-tdengine"]= False + args_list["--use-shadow-db"]= False + args_list["--dynamic-db-table-names"]= True + args_list["--verify-data"]= False + args_list["--record-ops"] = False + + for key in bools_args_list: + set_bool_value = [True,False] + if key == "--auto-start-service" : + continue + elif key =="--run-tdengine": + continue + elif key == "--ignore-errors": + continue + elif key == "--debug": + continue + elif key == "--per-thread-db-connection": + continue + elif key == "--continue-on-exception": + continue + elif key == "--use-shadow-db": + continue + elif key =="--track-memory-leaks": + continue + elif key == "--dynamic-db-table-names": + continue + elif key == "--verify-data": + continue + elif key == "--record-ops": + continue + elif key == "--larger-data": + continue + else: + args_list[key]=set_bool_value[random.randint(0,1)] + return args_list + +def limits(args_list): + if args_list["--use-shadow-db"]==True: + if args_list["--max-dbs"] > 1: + print("Cannot combine use-shadow-db with max-dbs of more than 1 ,set max-dbs=1") + args_list["--max-dbs"]=1 + else: + pass + + # env is start by test frame , not crash_gen instance + + # elif args_list["--num-replicas"]==0: + # print(" make sure num-replicas is at least 1 ") + # args_list["--num-replicas"]=1 + # elif args_list["--num-replicas"]==1: + # pass + + # elif args_list["--num-replicas"]>1: + # if not args_list["--auto-start-service"]: + # print("it should be deployed by crash_gen auto-start-service for multi replicas") + + # else: + # pass + + return args_list + +def get_auto_mix_cmds(args_list ,valgrind=valgrind_mode): + build_path = get_path() + if repo == "community": + crash_gen_path = build_path[:-5]+"community/tests/pytest/" + elif repo == "TDengine": + crash_gen_path = build_path[:-5]+"/tests/pytest/" + else: + pass + + bools_args_list = ["--auto-start-service" , "--debug","--run-tdengine","--ignore-errors","--track-memory-leaks","--larger-data","--mix-oos-data","--dynamic-db-table-names", + "--per-thread-db-connection","--record-ops","--verify-data","--use-shadow-db","--continue-on-exception"] + arguments = "" + for k ,v in args_list.items(): + if k == "--ignore-errors": + if v: + arguments+=(k+"="+str(v)+" ") + else: + arguments+="" + elif k in bools_args_list and v==True: + arguments+=(k+" ") + elif k in bools_args_list and v==False: + arguments+="" + else: + arguments+=(k+"="+str(v)+" ") + + if valgrind : + + crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550 '%(crash_gen_path ,arguments) + + else: + + crash_gen_cmd = 'cd %s && ./crash_gen.sh %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550'%(crash_gen_path ,arguments) + + return crash_gen_cmd + + +def start_taosd(): + build_path = get_path() + if repo == "community": + start_path = build_path[:-5]+"community/tests/system-test/" + elif repo == "TDengine": + start_path = build_path[:-5]+"/tests/system-test/" + else: + pass + + start_cmd = 'cd %s && python3 test.py '%(start_path) + os.system(start_cmd +">>/dev/null") + +def get_cmds(args_list): + # build_path = get_path() + # if repo == "community": + # crash_gen_path = build_path[:-5]+"community/tests/pytest/" + # elif repo == "TDengine": + # crash_gen_path = build_path[:-5]+"/tests/pytest/" + # else: + # pass + + # crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind -p -t 10 -s 1000 -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550 '%(crash_gen_path) + + crash_gen_cmd = get_auto_mix_cmds(args_list,valgrind=valgrind_mode) + return crash_gen_cmd + +def run_crash_gen(crash_cmds): + + # prepare env of taosd + start_taosd() + # run crash_gen and back logs + os.system('echo "%s">>%s'%(crash_cmds,crash_gen_cmds_file)) + # os.system("cp %s %s"%(crash_gen_cmds_file, core_path)) + os.system("%s"%(crash_cmds)) + +def check_status(): + build_path = get_path() + if repo == "community": + crash_gen_path = build_path[:-5]+"community/tests/pytest/" + elif repo == "TDengine": + crash_gen_path = build_path[:-5]+"/tests/pytest/" + else: + pass + result_file = os.path.join(crash_gen_path, 'valgrind.out') + run_code = subprocess.Popen("tail -n 50 %s"%result_file, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") + os.system("tail -n 50 %s>>%s"%(result_file,exit_status_logs)) + + core_check = subprocess.Popen('ls -l %s | grep "^-" | wc -l'%core_path, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") + + if int(core_check.strip().rstrip()) > 0: + # it means core files has occured + return 3 + + mem_status = check_memory() + if mem_status >0: + return mem_status + if "Crash_Gen is now exiting with status code: 1" in run_code: + return 1 + elif "Crash_Gen is now exiting with status code: 0" in run_code: + return 0 + else: + return 2 + + +def check_memory(): + + build_path = get_path() + if repo == "community": + crash_gen_path = build_path[:-5]+"community/tests/pytest/" + elif repo == "TDengine": + crash_gen_path = build_path[:-5]+"/tests/pytest/" + else: + pass + ''' + invalid read, invalid write + ''' + back_path = os.path.join(core_path,"valgrind_report") + if not os.path.exists(back_path): + os.mkdir(back_path) + + stderr_file = os.path.join(crash_gen_path , "valgrind.err") + + status = 0 + + grep_res = subprocess.Popen("grep -i 'Invalid read' %s "%stderr_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") + + if grep_res: + # os.system("cp %s %s"%(stderr_file , back_path)) + status = 4 + + grep_res = subprocess.Popen("grep -i 'Invalid write' %s "%stderr_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") + + if grep_res: + # os.system("cp %s %s"%(stderr_file , back_path)) + status = 4 + + grep_res = subprocess.Popen("grep -i 'taosMemoryMalloc' %s "%stderr_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") + + if grep_res: + # os.system("cp %s %s"%(stderr_file , back_path)) + status = 5 + + return status + +def main(): + + args_list = {"--auto-start-service":False ,"--max-dbs":0,"--connector-type":"native","--debug":False,"--run-tdengine":False,"--ignore-errors":[], + "--track-memory-leaks":False , "--larger-data":False, "--mix-oos-data":False, "--dynamic-db-table-names":False, + "--per-thread-db-connection":False , "--record-ops":False , "--max-steps":100, "--num-threads":10, "--verify-data":False,"--use-shadow-db":False , + "--continue-on-exception":False } + + args = random_args(args_list) + args = limits(args) + + build_path = get_path() + os.system("pip install git+https://github.com/taosdata/taos-connector-python.git >>/dev/null") + if repo =="community": + crash_gen_path = build_path[:-5]+"community/tests/pytest/" + elif repo =="TDengine": + crash_gen_path = build_path[:-5]+"/tests/pytest/" + else: + pass + + if os.path.exists(crash_gen_path+"crash_gen.sh"): + print(" make sure crash_gen.sh is ready") + else: + print( " crash_gen.sh is not exists ") + sys.exit(1) + + git_commit = subprocess.Popen("cd %s && git log | head -n1"%crash_gen_path, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")[8:16] + + # crash_cmds = get_cmds() + + crash_cmds = get_cmds(args) + + # clean run_dir + os.system('rm -rf %s'%run_dir ) + if not os.path.exists(run_dir): + os.mkdir(run_dir) + print(crash_cmds) + run_crash_gen(crash_cmds) + status = check_status() + # back_path = os.path.join(core_path,"valgrind_report") + + print("exit status : ", status) + + if status ==4: + print('======== crash_gen found memory bugs ========') + if status ==5: + print('======== crash_gen found memory errors ========') + if status >0: + print('======== crash_gen run failed and not exit as expected ========') + else: + print('======== crash_gen run sucess and exit as expected ========') + + if status!=0 : + + try: + text = f"crash_gen instance exit status of docker [ {hostname} ] is : {msg_dict[status]}\n " + f" and git commit : {git_commit}" + send_msg(get_msg(text)) + except Exception as e: + print("exception:", e) + exit(status) + + +if __name__ == '__main__': + main() + + diff --git a/tests/pytest/auto_crash_gen_valgrind_cluster.py b/tests/pytest/auto_crash_gen_valgrind_cluster.py new file mode 100755 index 0000000000..05cdaa6cc5 --- /dev/null +++ b/tests/pytest/auto_crash_gen_valgrind_cluster.py @@ -0,0 +1,399 @@ +#!/usr/bin/python3 + + +import os +import socket +import requests + +# -*- coding: utf-8 -*- +import os ,sys +import random +import argparse +import subprocess +import time +import platform + +# valgrind mode ? +valgrind_mode = True + +msg_dict = {0:"success" , 1:"failed" , 2:"other errors" , 3:"crash occured" , 4:"Invalid read/write" , 5:"memory leak" } + +# formal +hostname = socket.gethostname() + +group_url = 'https://open.feishu.cn/open-apis/bot/v2/hook/56c333b5-eae9-4c18-b0b6-7e4b7174f5c9' + +def get_msg(text): + return { + "msg_type": "post", + "content": { + "post": { + "zh_cn": { + "title": "Crash_gen Monitor", + "content": [ + [{ + "tag": "text", + "text": text + } + ]] + } + } + } + } + + +def send_msg(json): + headers = { + 'Content-Type': 'application/json' + } + + req = requests.post(url=group_url, headers=headers, json=json) + inf = req.json() + if "StatusCode" in inf and inf["StatusCode"] == 0: + pass + else: + print(inf) + + +# set path about run instance + +core_path = subprocess.Popen("cat /proc/sys/kernel/core_pattern", shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") +core_path = "/".join(core_path.split("/")[:-1]) +print(" ======= core path is %s ======== " %core_path) +if not os.path.exists(core_path): + os.mkdir(core_path) + +base_dir = os.path.dirname(os.path.realpath(__file__)) +if base_dir.find("community")>0: + repo = "community" +elif base_dir.find("TDengine")>0: + repo = "TDengine" +else: + repo ="TDengine" +print("base_dir:",base_dir) +home_dir = base_dir[:base_dir.find(repo)] +print("home_dir:",home_dir) +run_dir = os.path.join(home_dir,'run_dir') +run_dir = os.path.abspath(run_dir) +print("run dir is *** :",run_dir) +if not os.path.exists(run_dir): + os.mkdir(run_dir) +run_log_file = run_dir+'/crash_gen_run.log' +crash_gen_cmds_file = os.path.join(run_dir, 'crash_gen_cmds.sh') +exit_status_logs = os.path.join(run_dir, 'crash_exit.log') + +def get_path(): + buildPath='' + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + +# generate crash_gen start script randomly + +def random_args(args_list): + nums_args_list = ["--max-dbs","--num-replicas","--num-dnodes","--max-steps","--num-threads",] # record int type arguments + bools_args_list = ["--auto-start-service" , "--debug","--run-tdengine","--ignore-errors","--track-memory-leaks","--larger-data","--mix-oos-data","--dynamic-db-table-names", + "--per-thread-db-connection","--record-ops","--verify-data","--use-shadow-db","--continue-on-exception" + ] # record bool type arguments + strs_args_list = ["--connector-type"] # record str type arguments + + args_list["--auto-start-service"]= False + args_list["--continue-on-exception"]=True + # connect_types=['native','rest','mixed'] # restful interface has change ,we should trans dbnames to connection or change sql such as "db.test" + connect_types=['native'] + # args_list["--connector-type"]=connect_types[random.randint(0,2)] + args_list["--connector-type"]= connect_types[0] + args_list["--max-dbs"]= random.randint(1,10) + + # dnodes = [1,3] # set single dnodes; + + # args_list["--num-dnodes"]= random.sample(dnodes,1)[0] + # args_list["--num-replicas"]= random.randint(1,args_list["--num-dnodes"]) + args_list["--debug"]=False + args_list["--per-thread-db-connection"]=True + args_list["--track-memory-leaks"]=False + + args_list["--max-steps"]=random.randint(200,500) + + threads = [16,32] + + args_list["--num-threads"]=random.sample(threads,1)[0] #$ debug + # args_list["--ignore-errors"]=[] ## can add error codes for detail + + + args_list["--run-tdengine"]= False + args_list["--use-shadow-db"]= False + args_list["--dynamic-db-table-names"]= True + args_list["--verify-data"]= False + args_list["--record-ops"] = False + + for key in bools_args_list: + set_bool_value = [True,False] + if key == "--auto-start-service" : + continue + elif key =="--run-tdengine": + continue + elif key == "--ignore-errors": + continue + elif key == "--debug": + continue + elif key == "--per-thread-db-connection": + continue + elif key == "--continue-on-exception": + continue + elif key == "--use-shadow-db": + continue + elif key =="--track-memory-leaks": + continue + elif key == "--dynamic-db-table-names": + continue + elif key == "--verify-data": + continue + elif key == "--record-ops": + continue + elif key == "--larger-data": + continue + else: + args_list[key]=set_bool_value[random.randint(0,1)] + return args_list + +def limits(args_list): + if args_list["--use-shadow-db"]==True: + if args_list["--max-dbs"] > 1: + print("Cannot combine use-shadow-db with max-dbs of more than 1 ,set max-dbs=1") + args_list["--max-dbs"]=1 + else: + pass + + # env is start by test frame , not crash_gen instance + + # elif args_list["--num-replicas"]==0: + # print(" make sure num-replicas is at least 1 ") + # args_list["--num-replicas"]=1 + # elif args_list["--num-replicas"]==1: + # pass + + # elif args_list["--num-replicas"]>1: + # if not args_list["--auto-start-service"]: + # print("it should be deployed by crash_gen auto-start-service for multi replicas") + + # else: + # pass + + return args_list + +def get_auto_mix_cmds(args_list ,valgrind=valgrind_mode): + build_path = get_path() + if repo == "community": + crash_gen_path = build_path[:-5]+"community/tests/pytest/" + elif repo == "TDengine": + crash_gen_path = build_path[:-5]+"/tests/pytest/" + else: + pass + + bools_args_list = ["--auto-start-service" , "--debug","--run-tdengine","--ignore-errors","--track-memory-leaks","--larger-data","--mix-oos-data","--dynamic-db-table-names", + "--per-thread-db-connection","--record-ops","--verify-data","--use-shadow-db","--continue-on-exception"] + arguments = "" + for k ,v in args_list.items(): + if k == "--ignore-errors": + if v: + arguments+=(k+"="+str(v)+" ") + else: + arguments+="" + elif k in bools_args_list and v==True: + arguments+=(k+" ") + elif k in bools_args_list and v==False: + arguments+="" + else: + arguments+=(k+"="+str(v)+" ") + + if valgrind : + + crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind -i 3 %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0707 '%(crash_gen_path ,arguments) + + else: + + crash_gen_cmd = 'cd %s && ./crash_gen.sh -i 3 %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0014,0x0707'%(crash_gen_path ,arguments) + + return crash_gen_cmd + + +def start_taosd(): + build_path = get_path() + if repo == "community": + start_path = build_path[:-5]+"community/tests/system-test/" + elif repo == "TDengine": + start_path = build_path[:-5]+"/tests/system-test/" + else: + pass + + start_cmd = 'cd %s && python3 test.py -N 4 -M 1 '%(start_path) + os.system(start_cmd +">>/dev/null") + +def get_cmds(args_list): + # build_path = get_path() + # if repo == "community": + # crash_gen_path = build_path[:-5]+"community/tests/pytest/" + # elif repo == "TDengine": + # crash_gen_path = build_path[:-5]+"/tests/pytest/" + # else: + # pass + + # crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind -p -t 10 -s 1000 -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550 '%(crash_gen_path) + + crash_gen_cmd = get_auto_mix_cmds(args_list,valgrind=valgrind_mode) + return crash_gen_cmd + +def run_crash_gen(crash_cmds): + + # prepare env of taosd + start_taosd() + # run crash_gen and back logs + os.system('echo "%s">>%s'%(crash_cmds,crash_gen_cmds_file)) + # os.system("cp %s %s"%(crash_gen_cmds_file, core_path)) + os.system("%s"%(crash_cmds)) + +def check_status(): + build_path = get_path() + if repo == "community": + crash_gen_path = build_path[:-5]+"community/tests/pytest/" + elif repo == "TDengine": + crash_gen_path = build_path[:-5]+"/tests/pytest/" + else: + pass + result_file = os.path.join(crash_gen_path, 'valgrind.out') + run_code = subprocess.Popen("tail -n 50 %s"%result_file, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") + os.system("tail -n 50 %s>>%s"%(result_file,exit_status_logs)) + + core_check = subprocess.Popen('ls -l %s | grep "^-" | wc -l'%core_path, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") + + if int(core_check.strip().rstrip()) > 0: + # it means core files has occured + return 3 + + mem_status = check_memory() + if mem_status >0: + return mem_status + if "Crash_Gen is now exiting with status code: 1" in run_code: + return 1 + elif "Crash_Gen is now exiting with status code: 0" in run_code: + return 0 + else: + return 2 + + +def check_memory(): + + build_path = get_path() + if repo == "community": + crash_gen_path = build_path[:-5]+"community/tests/pytest/" + elif repo == "TDengine": + crash_gen_path = build_path[:-5]+"/tests/pytest/" + else: + pass + ''' + invalid read, invalid write + ''' + back_path = os.path.join(core_path,"valgrind_report") + if not os.path.exists(back_path): + os.mkdir(back_path) + + stderr_file = os.path.join(crash_gen_path , "valgrind.err") + + status = 0 + + grep_res = subprocess.Popen("grep -i 'Invalid read' %s "%stderr_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") + + if grep_res: + # os.system("cp %s %s"%(stderr_file , back_path)) + status = 4 + + grep_res = subprocess.Popen("grep -i 'Invalid write' %s "%stderr_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") + + if grep_res: + # os.system("cp %s %s"%(stderr_file , back_path)) + status = 4 + + grep_res = subprocess.Popen("grep -i 'taosMemoryMalloc' %s "%stderr_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") + + if grep_res: + # os.system("cp %s %s"%(stderr_file , back_path)) + status = 5 + + return status + +def main(): + + args_list = {"--auto-start-service":False ,"--max-dbs":0,"--connector-type":"native","--debug":False,"--run-tdengine":False,"--ignore-errors":[], + "--track-memory-leaks":False , "--larger-data":False, "--mix-oos-data":False, "--dynamic-db-table-names":False, + "--per-thread-db-connection":False , "--record-ops":False , "--max-steps":100, "--num-threads":10, "--verify-data":False,"--use-shadow-db":False , + "--continue-on-exception":False } + + args = random_args(args_list) + args = limits(args) + + build_path = get_path() + os.system("pip install git+https://github.com/taosdata/taos-connector-python.git >>/dev/null") + if repo =="community": + crash_gen_path = build_path[:-5]+"community/tests/pytest/" + elif repo =="TDengine": + crash_gen_path = build_path[:-5]+"/tests/pytest/" + else: + pass + + if os.path.exists(crash_gen_path+"crash_gen.sh"): + print(" make sure crash_gen.sh is ready") + else: + print( " crash_gen.sh is not exists ") + sys.exit(1) + + git_commit = subprocess.Popen("cd %s && git log | head -n1"%crash_gen_path, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")[8:16] + + # crash_cmds = get_cmds() + + crash_cmds = get_cmds(args) + + # clean run_dir + os.system('rm -rf %s'%run_dir ) + if not os.path.exists(run_dir): + os.mkdir(run_dir) + print(crash_cmds) + run_crash_gen(crash_cmds) + status = check_status() + # back_path = os.path.join(core_path,"valgrind_report") + + print("exit status : ", status) + + if status ==4: + print('======== crash_gen found memory bugs ========') + if status ==5: + print('======== crash_gen found memory errors ========') + if status >0: + print('======== crash_gen run failed and not exit as expected ========') + else: + print('======== crash_gen run sucess and exit as expected ========') + + if status!=0 : + + try: + text = f"crash_gen instance exit status of docker [ {hostname} ] is : {msg_dict[status]}\n " + f" and git commit : {git_commit}" + send_msg(get_msg(text)) + except Exception as e: + print("exception:", e) + exit(status) + + +if __name__ == '__main__': + main() + + diff --git a/tests/pytest/auto_run_regular.sh b/tests/pytest/auto_run_regular.sh new file mode 100755 index 0000000000..27e8013269 --- /dev/null +++ b/tests/pytest/auto_run_regular.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +# set LD_LIBRARY_PATH +export PATH=$PATH:/home/TDengine/debug/build/bin +export LD_LIBRARY_PATH=/home/TDengine/debug/build/lib +ln -s /home/TDengine/debug/build/lib/libtaos.so /usr/lib/libtaos.so 2>/dev/null +ln -s /home/TDengine/debug/build/lib/libtaos.so /usr/lib/libtaos.so.1 2>/dev/null +ln -s /home/TDengine/include/client/taos.h /usr/include/taos.h 2>/dev/null + +# run crash_gen auto script +python3 /home/TDengine/tests/pytest/auto_crash_gen.py \ No newline at end of file diff --git a/tests/pytest/auto_run_valgrind.sh b/tests/pytest/auto_run_valgrind.sh new file mode 100755 index 0000000000..c7154e867c --- /dev/null +++ b/tests/pytest/auto_run_valgrind.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +# set LD_LIBRARY_PATH +export PATH=$PATH:/home/TDengine/debug/build/bin +export LD_LIBRARY_PATH=/home/TDengine/debug/build/lib +ln -s /home/TDengine/debug/build/lib/libtaos.so /usr/lib/libtaos.so 2>/dev/null +ln -s /home/TDengine/debug/build/lib/libtaos.so /usr/lib/libtaos.so.1 2>/dev/null +ln -s /home/TDengine/include/client/taos.h /usr/include/taos.h 2>/dev/null + +# run crash_gen auto script +python3 /home/TDengine/tests/pytest/auto_crash_gen_valgrind.py \ No newline at end of file diff --git a/tests/pytest/auto_run_valgrind_cluster.sh b/tests/pytest/auto_run_valgrind_cluster.sh new file mode 100755 index 0000000000..62bc22e923 --- /dev/null +++ b/tests/pytest/auto_run_valgrind_cluster.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +# set LD_LIBRARY_PATH +export PATH=$PATH:/home/TDengine/debug/build/bin +export LD_LIBRARY_PATH=/home/TDengine/debug/build/lib +ln -s /home/TDengine/debug/build/lib/libtaos.so /usr/lib/libtaos.so 2>/dev/null +ln -s /home/TDengine/debug/build/lib/libtaos.so /usr/lib/libtaos.so.1 2>/dev/null +ln -s /home/TDengine/include/client/taos.h /usr/include/taos.h 2>/dev/null + +# run crash_gen auto script +python3 /home/TDengine/tests/pytest/auto_crash_gen_valgrind_cluster.py \ No newline at end of file From 17a502f569353e8a7c2f66632f59502f50beb1bd Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Mon, 7 Nov 2022 13:44:12 +0800 Subject: [PATCH 02/69] update --- tests/pytest/crash_gen.sh | 2 +- tests/pytest/crash_gen/crash_gen_main.py | 478 +++++++++++++++++++++-- 2 files changed, 456 insertions(+), 24 deletions(-) diff --git a/tests/pytest/crash_gen.sh b/tests/pytest/crash_gen.sh index 539314dea4..cc2941a52a 100755 --- a/tests/pytest/crash_gen.sh +++ b/tests/pytest/crash_gen.sh @@ -45,7 +45,7 @@ fi # Now getting ready to execute Python # The following is the default of our standard dev env (Ubuntu 20.04), modify/adjust at your own risk -PYTHON_EXEC=python3.8 +PYTHON_EXEC=python3 # First we need to set up a path for Python to find our own TAOS modules, so that "import" can work. # export PYTHONPATH=$(pwd)/../../src/connector/python:$(pwd) diff --git a/tests/pytest/crash_gen/crash_gen_main.py b/tests/pytest/crash_gen/crash_gen_main.py index 600c64b8e6..d8946780a2 100755 --- a/tests/pytest/crash_gen/crash_gen_main.py +++ b/tests/pytest/crash_gen/crash_gen_main.py @@ -254,7 +254,7 @@ class WorkerThread: class ThreadCoordinator: - WORKER_THREAD_TIMEOUT = 120 # Normal: 120 + WORKER_THREAD_TIMEOUT = 1200 # Normal: 120 def __init__(self, pool: ThreadPool, dbManager: DbManager): self._curStep = -1 # first step is 0 @@ -674,9 +674,12 @@ class AnyState: # only "under normal circumstances", as we may override it with the -b option CAN_DROP_DB = 2 CAN_CREATE_FIXED_SUPER_TABLE = 3 + CAN_CREATE_STREAM = 3 # super table must exists CAN_DROP_FIXED_SUPER_TABLE = 4 CAN_ADD_DATA = 5 + CAN_DROP_STREAM = 5 CAN_READ_DATA = 6 + CAN_DELETE_DATA = 6 def __init__(self): self._info = self.getInfo() @@ -727,12 +730,18 @@ class AnyState: return False return self._info[self.CAN_DROP_FIXED_SUPER_TABLE] + def canCreateStream(self): + return self._info[self.CAN_CREATE_STREAM] + def canAddData(self): return self._info[self.CAN_ADD_DATA] def canReadData(self): return self._info[self.CAN_READ_DATA] + def canDeleteData(self): + return self._info[self.CAN_DELETE_DATA] + def assertAtMostOneSuccess(self, tasks, cls): sCnt = 0 for task in tasks: @@ -921,7 +930,7 @@ class StateMechine: except taos.error.ProgrammingError as err: Logging.error("Failed to initialized state machine, cannot find current state: {}".format(err)) traceback.print_stack() - raise # re-throw + pass # re-throw # TODO: seems no lnoger used, remove? def getCurrentState(self): @@ -974,14 +983,21 @@ class StateMechine: # did not do this when openning connection, and this is NOT the worker # thread, which does this on their own dbc.use(dbName) + if not dbc.hasTables(): # no tables + Logging.debug("[STT] DB_ONLY found, between {} and {}".format(ts, time.time())) return StateDbOnly() # For sure we have tables, which means we must have the super table. # TODO: are we sure? + sTable = self._db.getFixedSuperTable() - if sTable.hasRegTables(dbc): # no regular tables + + + if sTable.hasRegTables(dbc): # no regular tables + # print("debug=====*\n"*100) Logging.debug("[STT] SUPER_TABLE_ONLY found, between {} and {}".format(ts, time.time())) + return StateSuperTableOnly() else: # has actual tables Logging.debug("[STT] HAS_DATA found, between {} and {}".format(ts, time.time())) @@ -1109,6 +1125,7 @@ class Database: return "fs_table" def getFixedSuperTable(self) -> TdSuperTable: + return TdSuperTable(self.getFixedSuperTableName(), self.getName()) # We aim to create a starting time tick, such that, whenever we run our test here once @@ -1342,6 +1359,11 @@ class Task(): 0x2603, # Table does not exist, replaced by 2662 below 0x260d, # Tags number not matched 0x2662, # Table does not exist #TODO: what about 2603 above? + 0x032C, # Object is creating + 0x032D, # Object is dropping + 0x03D3, # Conflict transaction not completed + 0x0707, # Query not ready , it always occur at replica 3 + 0x707, # Query not ready @@ -1638,9 +1660,12 @@ class TaskCreateDb(StateTransitionTask): # numReplica = Dice.throw(Settings.getConfig().max_replicas) + 1 # 1,2 ... N numReplica = Config.getConfig().num_replicas # fixed, always repStr = "replica {}".format(numReplica) - updatePostfix = "update 1" if Config.getConfig().verify_data else "" # allow update only when "verify data" is active + updatePostfix = "" if Config.getConfig().verify_data else "" # allow update only when "verify data" is active , 3.0 version default is update 1 + vg_nums = random.randint(1,8) + cache_model = Dice.choice(['none' , 'last_row' , 'last_value' , 'both']) + buffer = random.randint(3,128) dbName = self._db.getName() - self.execWtSql(wt, "create database {} {} {} ".format(dbName, repStr, updatePostfix ) ) + self.execWtSql(wt, "create database {} {} {} vgroups {} cachemodel '{}' buffer {} ".format(dbName, repStr, updatePostfix, vg_nums, cache_model,buffer ) ) if dbName == "db_0" and Config.getConfig().use_shadow_db: self.execWtSql(wt, "create database {} {} {} ".format("db_s", repStr, updatePostfix ) ) @@ -1657,6 +1682,69 @@ class TaskDropDb(StateTransitionTask): self.execWtSql(wt, "drop database {}".format(self._db.getName())) Logging.debug("[OPS] database dropped at {}".format(time.time())) +class TaskCreateStream(StateTransitionTask): + + @classmethod + def getEndState(cls): + return StateHasData() + + @classmethod + def canBeginFrom(cls, state: AnyState): + return state.canCreateStream() + + def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): + dbname = self._db.getName() + + sub_stream_name = dbname+ '_sub_stream' + sub_stream_tb_name = 'avg_sub' + super_stream_name = dbname+ '_super_stream' + super_stream_tb_name = 'avg_super' + if not self._db.exists(wt.getDbConn()): + Logging.debug("Skipping task, no DB yet") + return + + sTable = self._db.getFixedSuperTable() # type: TdSuperTable + # wt.execSql("use db") # should always be in place + + # create stream + ''' + CREATE STREAM avg_vol_s INTO avg_vol AS SELECT _wstartts, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s); + ''' + stbname =sTable.getName() + sub_tables = sTable.getRegTables(wt.getDbConn()) + if sub_tables: + + if sub_tables: # if not empty + sub_tbname = sub_tables[0] + + # create stream with query above sub_table + stream_sql = 'create stream {} into {}.{} as select count(*), avg(speed) FROM {}.{} PARTITION BY tbname INTERVAL(5s) SLIDING(3s) '.format(sub_stream_name,dbname,sub_stream_tb_name ,dbname,sub_tbname) + try: + self.execWtSql(wt, stream_sql) + except taos.error.ProgrammingError as err: + errno = Helper.convertErrno(err.errno) + if errno in [0x03f0]: # stream already exists + # stream need drop before drop table + pass + + sTable.setStreamName(sub_stream_name) + else: + pass + + else: + stream_sql = 'create stream {} into {}.{} as select count(*), avg(speed) FROM {}.{} PARTITION BY tbname INTERVAL(5s) SLIDING(3s) '.format(super_stream_name,dbname,super_stream_tb_name,dbname,stbname) + + try: + self.execWtSql(wt, stream_sql) + except taos.error.ProgrammingError as err: + errno = Helper.convertErrno(err.errno) + if errno in [0x03f0]: # stream already exists + # stream need drop before drop table + pass + + + + class TaskCreateSuperTable(StateTransitionTask): @classmethod def getEndState(cls): @@ -1688,15 +1776,40 @@ class TdSuperTable: def __init__(self, stName, dbName): self._stName = stName self._dbName = dbName + self._streamName = [] def getName(self): return self._stName + def setStreamName(self,name): + self._streamName.append(name) + + def getStreamName(self): + return self._streamName + def drop(self, dbc, skipCheck = False): dbName = self._dbName if self.exists(dbc) : # if myself exists - fullTableName = dbName + '.' + self._stName - dbc.execute("DROP TABLE {}".format(fullTableName)) + fullTableName = dbName + '.' + self._stName + try: + dbc.execute("DROP TABLE {}".format(fullTableName)) + except taos.error.ProgrammingError as err: + errno = Helper.convertErrno(err.errno) + if errno in [1011,0x3F3,0x03f3,0x2662]: # table doesn't exist + pass + # # stream need drop before drop table + # for stream in self.getStreamName(): + # drop_stream_sql = 'drop stream {}'.format(stream) + # try: + # dbc.execute(drop_stream_sql) + # except taos.error.ProgrammingError as err: + # # correcting for strange error number scheme + # errno3 = Helper.convertErrno(err.errno) + # if errno3 in [1011,0x3F3,0x03f3,0x2662,0x03f1]: # stream not exists + # pass + # dbc.execute("DROP TABLE {}".format(fullTableName)) + # pass + else: if not skipCheck: raise CrashGenError("Cannot drop non-existant super table: {}".format(self._stName)) @@ -1711,10 +1824,17 @@ class TdSuperTable: dbName = self._dbName dbc.execute("USE " + dbName) - fullTableName = dbName + '.' + self._stName + fullTableName = dbName + '.' + self._stName + if dbc.existsSuperTable(self._stName): if dropIfExists: + dbc.execute("DROP TABLE {}".format(fullTableName)) + + pass + + + # dbc.execute("DROP TABLE {}".format(fullTableName)) else: # error raise CrashGenError("Cannot create super table, already exists: {}".format(self._stName)) @@ -1733,7 +1853,7 @@ class TdSuperTable: def getRegTables(self, dbc: DbConn): dbName = self._dbName try: - dbc.query("select TBNAME from {}.{}".format(dbName, self._stName)) # TODO: analyze result set later + dbc.query("select distinct TBNAME from {}.{}".format(dbName, self._stName)) # TODO: analyze result set later except taos.error.ProgrammingError as err: errno2 = Helper.convertErrno(err.errno) Logging.debug("[=] Failed to get tables from super table: errno=0x{:X}, msg: {}".format(errno2, err)) @@ -1743,7 +1863,44 @@ class TdSuperTable: return [v[0] for v in qr] # list transformation, ref: https://stackoverflow.com/questions/643823/python-list-transformation def hasRegTables(self, dbc: DbConn): - return dbc.query("SELECT * FROM {}.{}".format(self._dbName, self._stName)) > 0 + # print(self._stName) + # dbc.query("SELECT * FROM {}.{}".format(self._dbName, self._stName)) + # print(dbc.getQueryResult()) + if self.hasStreamTables(dbc) or self.hasStreams(dbc): + if self.dropStreams(dbc): + self.dropStreamTables(dbc) + if dbc.existsSuperTable(self._stName): + + return dbc.query("SELECT * FROM {}.{}".format(self._dbName, self._stName)) > 0 + else: + return False + + def hasStreamTables(self,dbc: DbConn): + + return dbc.query("show {}.stables like 'avg%'".format(self._dbName)) > 0 + + def hasStreams(self,dbc: DbConn): + return dbc.query("show streams") > 0 + + def dropStreams(self,dbc:DbConn): + dbc.query("show streams ") + Streams = dbc.getQueryResult() + for Stream in Streams: + if Stream[0].startswith(self._dbName): + dbc.execute('drop stream {}'.format(Stream[0])) + + return not dbc.query("show streams ") > 0 + + def dropStreamTables(self, dbc: DbConn): + dbc.query("show {}.stables like 'avg%'".format(self._dbName)) + + StreamTables = dbc.getQueryResult() + + for StreamTable in StreamTables: + if self.dropStreams: + dbc.execute('drop table {}.{}'.format(self._dbName,StreamTable[0])) + + return not dbc.query("show {}.stables like 'avg%'".format(self._dbName)) def ensureRegTable(self, task: Optional[Task], dbc: DbConn, regTableName: str): ''' @@ -1838,10 +1995,46 @@ class TdSuperTable: # Run the query against the regular table first doAggr = (Dice.throw(2) == 0) # 1 in 2 chance if not doAggr: # don't do aggregate query, just simple one + commonExpr = Dice.choice([ + '*', + # 'abs(speed)', + # 'acos(speed)', + # 'asin(speed)', + # 'atan(speed)', + # 'ceil(speed)', + # 'cos(speed)', + # 'cos(speed)', + # 'floor(speed)', + # 'log(speed,2)', + # 'pow(speed,2)', + # 'round(speed)', + # 'sin(speed)', + # 'sqrt(speed)', + # 'char_length(color)', + # 'concat(color,color)', + # 'concat_ws(" ", color,color," ")', + # 'length(color)', + # 'lower(color)', + # 'ltrim(color)', + # 'substr(color , 2)', + # 'upper(color)', + # 'cast(speed as double)', + # 'cast(ts as bigint)', + # # 'TO_ISO8601(color)', + # # 'TO_UNIXTIMESTAMP(ts)', + # 'now()', + # 'timediff(ts,now)', + # 'timezone()', + # 'TIMETRUNCATE(ts,1s)', + # 'TIMEZONE()', + # 'TODAY()', + # 'distinct(color)' + ] + ) ret.append(SqlQuery( # reg table - "select {} from {}.{}".format('*', self._dbName, rTbName))) + "select {} from {}.{}".format(commonExpr, self._dbName, rTbName))) ret.append(SqlQuery( # super table - "select {} from {}.{}".format('*', self._dbName, self.getName()))) + "select {} from {}.{}".format(commonExpr, self._dbName, self.getName()))) else: # Aggregate query aggExpr = Dice.choice([ 'count(*)', @@ -1857,17 +2050,38 @@ class TdSuperTable: 'top(speed, 50)', # TODO: not supported? 'bottom(speed, 50)', # TODO: not supported? 'apercentile(speed, 10)', # TODO: TD-1316 - # 'last_row(speed)', # TODO: commented out per TD-3231, we should re-create + 'last_row(*)', # TODO: commented out per TD-3231, we should re-create # Transformation Functions # 'diff(speed)', # TODO: no supported?! - 'spread(speed)' + 'spread(speed)', + # 'elapsed(ts)', + # 'mode(speed)', + # 'bottom(speed,1)', + # 'top(speed,1)', + # 'tail(speed,1)', + # 'unique(color)', + # 'csum(speed)', + # 'DERIVATIVE(speed,1s,1)', + # 'diff(speed,1)', + # 'irate(speed)', + # 'mavg(speed,3)', + # 'sample(speed,5)', + # 'STATECOUNT(speed,"LT",1)', + # 'STATEDURATION(speed,"LT",1)', + # 'twa(speed)' + + + + + ]) # TODO: add more from 'top' # if aggExpr not in ['stddev(speed)']: # STDDEV not valid for super tables?! (Done in TD-1049) sql = "select {} from {}.{}".format(aggExpr, self._dbName, self.getName()) if Dice.throw(3) == 0: # 1 in X chance - sql = sql + ' GROUP BY color' + partion_expr = Dice.choice(['color','tbname']) + sql = sql + ' partition BY ' + partion_expr + ' order by ' + partion_expr Progress.emit(Progress.QUERY_GROUP_BY) # Logging.info("Executing GROUP-BY query: " + sql) ret.append(SqlQuery(sql)) @@ -1964,16 +2178,17 @@ class TaskDropSuperTable(StateTransitionTask): isSuccess = True for i in tblSeq: regTableName = self.getRegTableName(i) # "db.reg_table_{}".format(i) + streams_prix = self._db.getName() try: self.execWtSql(wt, "drop table {}.{}". format(self._db.getName(), regTableName)) # nRows always 0, like MySQL except taos.error.ProgrammingError as err: - # correcting for strange error number scheme - errno2 = Helper.convertErrno(err.errno) - if (errno2 in [0x362]): # mnode invalid table name - isSuccess = False - Logging.debug("[DB] Acceptable error when dropping a table") - continue # try to delete next regular table + pass + + + + + if (not tickOutput): tickOutput = True # Print only one time @@ -1984,7 +2199,17 @@ class TaskDropSuperTable(StateTransitionTask): # Drop the super table itself tblName = self._db.getFixedSuperTableName() - self.execWtSql(wt, "drop table {}.{}".format(self._db.getName(), tblName)) + try: + self.execWtSql(wt, "drop table {}.{}".format(self._db.getName(), tblName)) + except taos.error.ProgrammingError as err: + # correcting for strange error number scheme + errno2 = Helper.convertErrno(err.errno) + if (errno2 in [0x362]): # mnode invalid table name + isSuccess = False + Logging.debug("[DB] Acceptable error when dropping a table") + elif errno2 in [1011,0x3F3,0x03f3]: # table doesn't exist + + pass class TaskAlterTags(StateTransitionTask): @@ -2234,6 +2459,212 @@ class TaskAddData(StateTransitionTask): self.activeTable.discard(i) # not raising an error, unlike remove +class TaskDeleteData(StateTransitionTask): + # Track which table is being actively worked on + activeTable: Set[int] = set() + + # We use these two files to record operations to DB, useful for power-off tests + fAddLogReady = None # type: Optional[io.TextIOWrapper] + fAddLogDone = None # type: Optional[io.TextIOWrapper] + + @classmethod + def prepToRecordOps(cls): + if Config.getConfig().record_ops: + if (cls.fAddLogReady is None): + Logging.info( + "Recording in a file operations to be performed...") + cls.fAddLogReady = open("add_log_ready.txt", "w") + if (cls.fAddLogDone is None): + Logging.info("Recording in a file operations completed...") + cls.fAddLogDone = open("add_log_done.txt", "w") + + @classmethod + def getEndState(cls): + return StateHasData() + + @classmethod + def canBeginFrom(cls, state: AnyState): + return state.canDeleteData() + + def _lockTableIfNeeded(self, fullTableName, extraMsg = ''): + if Config.getConfig().verify_data: + # Logging.info("Locking table: {}".format(fullTableName)) + self.lockTable(fullTableName) + # Logging.info("Table locked {}: {}".format(extraMsg, fullTableName)) + # print("_w" + str(nextInt % 100), end="", flush=True) # Trace what was written + else: + # Logging.info("Skipping locking table") + pass + + def _unlockTableIfNeeded(self, fullTableName): + if Config.getConfig().verify_data: + # Logging.info("Unlocking table: {}".format(fullTableName)) + self.unlockTable(fullTableName) + # Logging.info("Table unlocked: {}".format(fullTableName)) + else: + pass + # Logging.info("Skipping unlocking table") + + def _deleteData(self, db: Database, dbc, regTableName, te: TaskExecutor): # implied: NOT in batches + numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS + del_Records = int(numRecords/5) + if Dice.throw(2) == 0: + for j in range(del_Records): # number of records per table + intToWrite = db.getNextInt() + nextTick = db.getNextTick() + # nextColor = db.getNextColor() + if Config.getConfig().record_ops: + self.prepToRecordOps() + if self.fAddLogReady is None: + raise CrashGenError("Unexpected empty fAddLogReady") + self.fAddLogReady.write("Ready to delete {} to {}\n".format(intToWrite, regTableName)) + self.fAddLogReady.flush() + os.fsync(self.fAddLogReady.fileno()) + + # TODO: too ugly trying to lock the table reliably, refactor... + fullTableName = db.getName() + '.' + regTableName + self._lockTableIfNeeded(fullTableName) # so that we are verify read-back. TODO: deal with exceptions before unlock + + try: + sql = "delete from {} where ts = '{}' ;".format( # removed: tags ('{}', {}) + fullTableName, + # ds.getFixedSuperTableName(), + # ds.getNextBinary(), ds.getNextFloat(), + nextTick) + + # print(sql) + # Logging.info("Adding data: {}".format(sql)) + dbc.execute(sql) + # Logging.info("Data added: {}".format(sql)) + intWrote = intToWrite + + # Quick hack, attach an update statement here. TODO: create an "update" task + if (not Config.getConfig().use_shadow_db) and Dice.throw(5) == 0: # 1 in N chance, plus not using shaddow DB + intToUpdate = db.getNextInt() # Updated, but should not succeed + # nextColor = db.getNextColor() + sql = "delete from {} where ts = '{}' ;".format( # "INSERt" means "update" here + fullTableName, + nextTick) + # sql = "UPDATE {} set speed={}, color='{}' WHERE ts='{}'".format( + # fullTableName, db.getNextInt(), db.getNextColor(), nextTick) + dbc.execute(sql) + intWrote = intToUpdate # We updated, seems TDengine non-cluster accepts this. + + except: # Any exception at all + self._unlockTableIfNeeded(fullTableName) + raise + + # Now read it back and verify, we might encounter an error if table is dropped + if Config.getConfig().verify_data: # only if command line asks for it + try: + readBack = dbc.queryScalar("SELECT * from {}.{} WHERE ts='{}'". + format(db.getName(), regTableName, nextTick)) + if readBack == None : + pass + except taos.error.ProgrammingError as err: + errno = Helper.convertErrno(err.errno) + if errno == CrashGenError.INVALID_EMPTY_RESULT: # empty result + print("D1",end="") # D1 means delete data success and only 1 record + + elif errno in [0x218, 0x362]: # table doesn't exist + # do nothing + pass + else: + # Re-throw otherwise + raise + finally: + self._unlockTableIfNeeded(fullTableName) # Quite ugly, refactor lock/unlock + # Done with read-back verification, unlock the table now + # Successfully wrote the data into the DB, let's record it somehow + te.recordDataMark(intWrote) + else: + + # delete all datas and verify datas ,expected table is empty + if Config.getConfig().record_ops: + self.prepToRecordOps() + if self.fAddLogReady is None: + raise CrashGenError("Unexpected empty fAddLogReady") + self.fAddLogReady.write("Ready to delete {} to {}\n".format(intToWrite, regTableName)) + self.fAddLogReady.flush() + os.fsync(self.fAddLogReady.fileno()) + + # TODO: too ugly trying to lock the table reliably, refactor... + fullTableName = db.getName() + '.' + regTableName + self._lockTableIfNeeded(fullTableName) # so that we are verify read-back. TODO: deal with exceptions before unlock + + try: + sql = "delete from {} ;".format( # removed: tags ('{}', {}) + fullTableName) + # Logging.info("Adding data: {}".format(sql)) + dbc.execute(sql) + # Logging.info("Data added: {}".format(sql)) + + # Quick hack, attach an update statement here. TODO: create an "update" task + if (not Config.getConfig().use_shadow_db) and Dice.throw(5) == 0: # 1 in N chance, plus not using shaddow DB + sql = "delete from {} ;".format( # "INSERt" means "update" here + fullTableName) + dbc.execute(sql) + + except: # Any exception at all + self._unlockTableIfNeeded(fullTableName) + raise + + # Now read it back and verify, we might encounter an error if table is dropped + if Config.getConfig().verify_data: # only if command line asks for it + try: + readBack = dbc.queryScalar("SELECT * from {}.{} ". + format(db.getName(), regTableName)) + if readBack == None : + pass + except taos.error.ProgrammingError as err: + errno = Helper.convertErrno(err.errno) + if errno == CrashGenError.INVALID_EMPTY_RESULT: # empty result + print("Da",end="") # Da means delete data success and for all datas + + elif errno in [0x218, 0x362]: # table doesn't exist + # do nothing + pass + else: + # Re-throw otherwise + raise + finally: + self._unlockTableIfNeeded(fullTableName) # Quite ugly, refactor lock/unlock + # Done with read-back verification, unlock the table now + + if Config.getConfig().record_ops: + if self.fAddLogDone is None: + raise CrashGenError("Unexpected empty fAddLogDone") + self.fAddLogDone.write("Wrote {} to {}\n".format(intWrote, regTableName)) + self.fAddLogDone.flush() + os.fsync(self.fAddLogDone.fileno()) + + def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): + # ds = self._dbManager # Quite DANGEROUS here, may result in multi-thread client access + db = self._db + dbc = wt.getDbConn() + numTables = self.LARGE_NUMBER_OF_TABLES if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_TABLES + numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS + tblSeq = list(range(numTables )) + random.shuffle(tblSeq) # now we have random sequence + for i in tblSeq: + if (i in self.activeTable): # wow already active + # print("x", end="", flush=True) # concurrent insertion + Progress.emit(Progress.CONCURRENT_INSERTION) + else: + self.activeTable.add(i) # marking it active + + dbName = db.getName() + sTable = db.getFixedSuperTable() + regTableName = self.getRegTableName(i) # "db.reg_table_{}".format(i) + fullTableName = dbName + '.' + regTableName + # self._lockTable(fullTableName) # "create table" below. Stop it if the table is "locked" + sTable.ensureRegTable(self, wt.getDbConn(), regTableName) # Ensure the table exists + # self._unlockTable(fullTableName) + + self._deleteData(db, dbc, regTableName, te) + + self.activeTable.discard(i) # not raising an error, unlike remove + class ThreadStacks: # stack info for all threads def __init__(self): @@ -2259,7 +2690,8 @@ class ThreadStacks: # stack info for all threads # Now print print("\n<----- Thread Info for LWP/ID: {} (most recent call last) <-----".format(shortTid)) lastSqlForThread = DbConn.fetchSqlForThread(shortTid) - print("Last SQL statement attempted from thread {} is: {}".format(shortTid, lastSqlForThread)) + time_cost = DbConn.get_time_cost() + print("Last SQL statement attempted from thread {} ({:.4f} sec ago) is: {}".format(shortTid, time_cost ,lastSqlForThread)) stackFrame = 0 for frame in stack: # was using: reversed(stack) # print(frame) From 8a5427c95bd27eb55397d8a17ff76892a1b47a5e Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Tue, 8 Nov 2022 09:41:42 +0800 Subject: [PATCH 03/69] update --- tests/pytest/crash_gen/crash_gen_main.py | 39 ++++++++++------ tests/pytest/crash_gen/shared/db.py | 57 +++++++++++++++++++++--- 2 files changed, 77 insertions(+), 19 deletions(-) diff --git a/tests/pytest/crash_gen/crash_gen_main.py b/tests/pytest/crash_gen/crash_gen_main.py index d8946780a2..1ef89dec4e 100755 --- a/tests/pytest/crash_gen/crash_gen_main.py +++ b/tests/pytest/crash_gen/crash_gen_main.py @@ -1761,7 +1761,10 @@ class TaskCreateSuperTable(StateTransitionTask): sTable = self._db.getFixedSuperTable() # type: TdSuperTable # wt.execSql("use db") # should always be in place - + + if sTable.hasStreams(wt.getDbConn()) or sTable.hasStreamTables(wt.getDbConn()): + sTable.dropStreams(wt.getDbConn()) + sTable.dropStreamTables(wt.getDbConn()) sTable.create(wt.getDbConn(), {'ts': TdDataType.TIMESTAMP, 'speed': TdDataType.INT, 'color': TdDataType.BINARY16}, { 'b': TdDataType.BINARY200, 'f': TdDataType.FLOAT}, @@ -2557,16 +2560,20 @@ class TaskDeleteData(StateTransitionTask): # Now read it back and verify, we might encounter an error if table is dropped if Config.getConfig().verify_data: # only if command line asks for it try: - readBack = dbc.queryScalar("SELECT * from {}.{} WHERE ts='{}'". + dbc.query("SELECT * from {}.{} WHERE ts='{}'". format(db.getName(), regTableName, nextTick)) - if readBack == None : - pass + result = dbc.getQueryResult() + if len(result)==0: + # means data has been delete + print("D1",end="") # DF means delete failed + else: + print("DF",end="") # DF means delete failed except taos.error.ProgrammingError as err: errno = Helper.convertErrno(err.errno) - if errno == CrashGenError.INVALID_EMPTY_RESULT: # empty result - print("D1",end="") # D1 means delete data success and only 1 record + # if errno == CrashGenError.INVALID_EMPTY_RESULT: # empty result + # print("D1",end="") # D1 means delete data success and only 1 record - elif errno in [0x218, 0x362]: # table doesn't exist + if errno in [0x218, 0x362,0x2662]: # table doesn't exist # do nothing pass else: @@ -2612,16 +2619,20 @@ class TaskDeleteData(StateTransitionTask): # Now read it back and verify, we might encounter an error if table is dropped if Config.getConfig().verify_data: # only if command line asks for it try: - readBack = dbc.queryScalar("SELECT * from {}.{} ". - format(db.getName(), regTableName)) - if readBack == None : - pass + dbc.query("SELECT * from {}.{} WHERE ts='{}'". + format(db.getName(), regTableName, nextTick)) + result = dbc.getQueryResult() + if len(result)==0: + # means data has been delete + print("DA",end="") + else: + print("DF",end="") # DF means delete failed except taos.error.ProgrammingError as err: errno = Helper.convertErrno(err.errno) - if errno == CrashGenError.INVALID_EMPTY_RESULT: # empty result - print("Da",end="") # Da means delete data success and for all datas + # if errno == CrashGenError.INVALID_EMPTY_RESULT: # empty result + # print("Da",end="") # Da means delete data success and for all datas - elif errno in [0x218, 0x362]: # table doesn't exist + if errno in [0x218, 0x362,0x2662]: # table doesn't exist # do nothing pass else: diff --git a/tests/pytest/crash_gen/shared/db.py b/tests/pytest/crash_gen/shared/db.py index 60c830f4f7..f97e4025e5 100644 --- a/tests/pytest/crash_gen/shared/db.py +++ b/tests/pytest/crash_gen/shared/db.py @@ -26,9 +26,12 @@ class DbConn: TYPE_NATIVE = "native-c" TYPE_REST = "rest-api" TYPE_INVALID = "invalid" + + # class variables lastSqlFromThreads : dict[int, str] = {} # stored by thread id, obtained from threading.current_thread().ident%10000 + spendThreads : dict[int, float] = {} # stored by thread id, obtained from threading.current_thread().ident%10000 @classmethod def saveSqlForCurrentThread(cls, sql: str): @@ -37,15 +40,36 @@ class DbConn: run into a dead-lock situation, we can pick out the deadlocked thread, and use that information to find what what SQL statement is stuck. ''' + th = threading.current_thread() shortTid = th.native_id % 10000 #type: ignore cls.lastSqlFromThreads[shortTid] = sql # Save this for later @classmethod - def fetchSqlForThread(cls, shortTid : int) -> str : + def fetchSqlForThread(cls, shortTid : int) -> str : + + print("=======================") if shortTid not in cls.lastSqlFromThreads: raise CrashGenError("No last-attempted-SQL found for thread id: {}".format(shortTid)) - return cls.lastSqlFromThreads[shortTid] + return cls.lastSqlFromThreads[shortTid] + + + @classmethod + def sql_exec_spend(cls, cost: float): + ''' + Let us save the last SQL statement on a per-thread basis, so that when later we + run into a dead-lock situation, we can pick out the deadlocked thread, and use + that information to find what what SQL statement is stuck. + ''' + th = threading.current_thread() + shortTid = th.native_id % 10000 #type: ignore + cls.spendThreads[shortTid] = cost # Save this for later + + @classmethod + def get_time_cost(cls) ->float: + th = threading.current_thread() + shortTid = th.native_id % 10000 #type: ignore + return cls.spendThreads.get(shortTid) @classmethod def create(cls, connType, dbTarget): @@ -61,6 +85,7 @@ class DbConn: def createNative(cls, dbTarget) -> DbConn: return cls.create(cls.TYPE_NATIVE, dbTarget) + @classmethod def createRest(cls, dbTarget) -> DbConn: return cls.create(cls.TYPE_REST, dbTarget) @@ -75,6 +100,7 @@ class DbConn: return "[DbConn: type={}, target={}]".format(self._type, self._dbTarget) def getLastSql(self): + return self._lastSql def open(self): @@ -184,13 +210,19 @@ class DbConnRest(DbConn): def _doSql(self, sql): self._lastSql = sql # remember this, last SQL attempted self.saveSqlForCurrentThread(sql) # Save in global structure too. #TODO: combine with above - try: + time_cost = -1 + time_start = time.time() + try: r = requests.post(self._url, data = sql, - auth = HTTPBasicAuth('root', 'taosdata')) + auth = HTTPBasicAuth('root', 'taosdata')) except: print("REST API Failure (TODO: more info here)") + self.sql_exec_spend(-2) raise + finally: + time_cost = time.time()- time_start + self.sql_exec_spend(time_cost) rj = r.json() # Sanity check for the "Json Result" if ('status' not in rj): @@ -223,6 +255,8 @@ class DbConnRest(DbConn): "[SQL-REST] Execution Result, nRows = {}, SQL = {}".format(nRows, sql)) return nRows + + def query(self, sql): # return rows affected return self.execute(sql) @@ -336,6 +370,7 @@ class MyTDSql: raise return self.affectedRows + class DbTarget: def __init__(self, cfgPath, hostAddr, port): self.cfgPath = cfgPath @@ -355,6 +390,7 @@ class DbConnNative(DbConn): # _connInfoDisplayed = False # TODO: find another way to display this totalConnections = 0 # Not private totalRequests = 0 + time_cost = -1 def __init__(self, dbTarget): super().__init__(dbTarget) @@ -413,8 +449,19 @@ class DbConnNative(DbConn): "Cannot exec SQL unless db connection is open", CrashGenError.DB_CONNECTION_NOT_OPEN) Logging.debug("[SQL] Executing SQL: {}".format(sql)) self._lastSql = sql + time_cost = -1 + nRows = 0 + time_start = time.time() self.saveSqlForCurrentThread(sql) # Save in global structure too. #TODO: combine with above - nRows = self._tdSql.execute(sql) + try: + nRows= self._tdSql.execute(sql) + except Exception as e: + self.sql_exec_spend(-2) + finally: + time_cost = time.time() - time_start + self.sql_exec_spend(time_cost) + + cls = self.__class__ cls.totalRequests += 1 Logging.debug( From 7aac1066507d69f55ed3317cfcd6228d2673ea3f Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Tue, 8 Nov 2022 10:54:57 +0800 Subject: [PATCH 04/69] update --- tests/pytest/crash_gen/crash_gen_main.py | 149 +++++++++++++---------- 1 file changed, 87 insertions(+), 62 deletions(-) diff --git a/tests/pytest/crash_gen/crash_gen_main.py b/tests/pytest/crash_gen/crash_gen_main.py index 1ef89dec4e..8d2fb65ca8 100755 --- a/tests/pytest/crash_gen/crash_gen_main.py +++ b/tests/pytest/crash_gen/crash_gen_main.py @@ -1712,30 +1712,66 @@ class TaskCreateStream(StateTransitionTask): ''' stbname =sTable.getName() sub_tables = sTable.getRegTables(wt.getDbConn()) + aggExpr = Dice.choice([ + 'count(*)', + 'avg(speed)', + # 'twa(speed)', # TODO: this one REQUIRES a where statement, not reasonable + 'sum(speed)', + 'stddev(speed)', + # SELECTOR functions + 'min(speed)', + 'max(speed)', + 'first(speed)', + 'last(speed)', + 'top(speed, 50)', # TODO: not supported? + 'bottom(speed, 50)', # TODO: not supported? + 'apercentile(speed, 10)', # TODO: TD-1316 + 'last_row(*)', # TODO: commented out per TD-3231, we should re-create + # Transformation Functions + # 'diff(speed)', # TODO: no supported?! + 'spread(speed)', + 'elapsed(ts)', + 'mode(speed)', + 'bottom(speed,1)', + 'top(speed,1)', + 'tail(speed,1)', + 'unique(color)', + 'csum(speed)', + 'DERIVATIVE(speed,1s,1)', + 'diff(speed,1)', + 'irate(speed)', + 'mavg(speed,3)', + 'sample(speed,5)', + 'STATECOUNT(speed,"LT",1)', + 'STATEDURATION(speed,"LT",1)', + 'twa(speed)' + + ]) # TODO: add more from 'top' + if sub_tables: if sub_tables: # if not empty sub_tbname = sub_tables[0] - # create stream with query above sub_table - stream_sql = 'create stream {} into {}.{} as select count(*), avg(speed) FROM {}.{} PARTITION BY tbname INTERVAL(5s) SLIDING(3s) '.format(sub_stream_name,dbname,sub_stream_tb_name ,dbname,sub_tbname) + stream_sql = 'create stream {} into {}.{} as select {}, avg(speed) FROM {}.{} PARTITION BY tbname INTERVAL(5s) SLIDING(3s) '.format(sub_stream_name,dbname,sub_stream_tb_name ,aggExpr,dbname,sub_tbname) try: self.execWtSql(wt, stream_sql) + Logging.debug("[OPS] stream is creating at {}".format(time.time())) except taos.error.ProgrammingError as err: errno = Helper.convertErrno(err.errno) if errno in [0x03f0]: # stream already exists # stream need drop before drop table pass - sTable.setStreamName(sub_stream_name) else: pass else: - stream_sql = 'create stream {} into {}.{} as select count(*), avg(speed) FROM {}.{} PARTITION BY tbname INTERVAL(5s) SLIDING(3s) '.format(super_stream_name,dbname,super_stream_tb_name,dbname,stbname) + stream_sql = 'create stream {} into {}.{} as select {}, avg(speed) FROM {}.{} PARTITION BY tbname INTERVAL(5s) SLIDING(3s) '.format(super_stream_name,dbname,super_stream_tb_name,aggExpr, dbname,stbname) try: self.execWtSql(wt, stream_sql) + Logging.debug("[OPS] stream is creating at {}".format(time.time())) except taos.error.ProgrammingError as err: errno = Helper.convertErrno(err.errno) if errno in [0x03f0]: # stream already exists @@ -1779,16 +1815,10 @@ class TdSuperTable: def __init__(self, stName, dbName): self._stName = stName self._dbName = dbName - self._streamName = [] def getName(self): return self._stName - def setStreamName(self,name): - self._streamName.append(name) - - def getStreamName(self): - return self._streamName def drop(self, dbc, skipCheck = False): dbName = self._dbName @@ -2000,38 +2030,38 @@ class TdSuperTable: if not doAggr: # don't do aggregate query, just simple one commonExpr = Dice.choice([ '*', - # 'abs(speed)', - # 'acos(speed)', - # 'asin(speed)', - # 'atan(speed)', - # 'ceil(speed)', - # 'cos(speed)', - # 'cos(speed)', - # 'floor(speed)', - # 'log(speed,2)', - # 'pow(speed,2)', - # 'round(speed)', - # 'sin(speed)', - # 'sqrt(speed)', - # 'char_length(color)', - # 'concat(color,color)', - # 'concat_ws(" ", color,color," ")', - # 'length(color)', - # 'lower(color)', - # 'ltrim(color)', - # 'substr(color , 2)', - # 'upper(color)', - # 'cast(speed as double)', - # 'cast(ts as bigint)', - # # 'TO_ISO8601(color)', - # # 'TO_UNIXTIMESTAMP(ts)', - # 'now()', - # 'timediff(ts,now)', - # 'timezone()', - # 'TIMETRUNCATE(ts,1s)', - # 'TIMEZONE()', - # 'TODAY()', - # 'distinct(color)' + 'abs(speed)', + 'acos(speed)', + 'asin(speed)', + 'atan(speed)', + 'ceil(speed)', + 'cos(speed)', + 'cos(speed)', + 'floor(speed)', + 'log(speed,2)', + 'pow(speed,2)', + 'round(speed)', + 'sin(speed)', + 'sqrt(speed)', + 'char_length(color)', + 'concat(color,color)', + 'concat_ws(" ", color,color," ")', + 'length(color)', + 'lower(color)', + 'ltrim(color)', + 'substr(color , 2)', + 'upper(color)', + 'cast(speed as double)', + 'cast(ts as bigint)', + # 'TO_ISO8601(color)', + # 'TO_UNIXTIMESTAMP(ts)', + 'now()', + 'timediff(ts,now)', + 'timezone()', + 'TIMETRUNCATE(ts,1s)', + 'TIMEZONE()', + 'TODAY()', + 'distinct(color)' ] ) ret.append(SqlQuery( # reg table @@ -2057,21 +2087,21 @@ class TdSuperTable: # Transformation Functions # 'diff(speed)', # TODO: no supported?! 'spread(speed)', - # 'elapsed(ts)', - # 'mode(speed)', - # 'bottom(speed,1)', - # 'top(speed,1)', - # 'tail(speed,1)', - # 'unique(color)', - # 'csum(speed)', - # 'DERIVATIVE(speed,1s,1)', - # 'diff(speed,1)', - # 'irate(speed)', - # 'mavg(speed,3)', - # 'sample(speed,5)', - # 'STATECOUNT(speed,"LT",1)', - # 'STATEDURATION(speed,"LT",1)', - # 'twa(speed)' + 'elapsed(ts)', + 'mode(speed)', + 'bottom(speed,1)', + 'top(speed,1)', + 'tail(speed,1)', + 'unique(color)', + 'csum(speed)', + 'DERIVATIVE(speed,1s,1)', + 'diff(speed,1)', + 'irate(speed)', + 'mavg(speed,3)', + 'sample(speed,5)', + 'STATECOUNT(speed,"LT",1)', + 'STATEDURATION(speed,"LT",1)', + 'twa(speed)' @@ -2181,7 +2211,6 @@ class TaskDropSuperTable(StateTransitionTask): isSuccess = True for i in tblSeq: regTableName = self.getRegTableName(i) # "db.reg_table_{}".format(i) - streams_prix = self._db.getName() try: self.execWtSql(wt, "drop table {}.{}". format(self._db.getName(), regTableName)) # nRows always 0, like MySQL @@ -2189,10 +2218,6 @@ class TaskDropSuperTable(StateTransitionTask): pass - - - - if (not tickOutput): tickOutput = True # Print only one time if isSuccess: From 937631085f2956529e501dcf09eb83d47f984378 Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Tue, 8 Nov 2022 11:44:42 +0800 Subject: [PATCH 05/69] update --- tests/pytest/crash_gen/crash_gen_main.py | 20 +------------------- 1 file changed, 1 insertion(+), 19 deletions(-) diff --git a/tests/pytest/crash_gen/crash_gen_main.py b/tests/pytest/crash_gen/crash_gen_main.py index 8d2fb65ca8..1701c293a6 100755 --- a/tests/pytest/crash_gen/crash_gen_main.py +++ b/tests/pytest/crash_gen/crash_gen_main.py @@ -1712,7 +1712,7 @@ class TaskCreateStream(StateTransitionTask): ''' stbname =sTable.getName() sub_tables = sTable.getRegTables(wt.getDbConn()) - aggExpr = Dice.choice([ + aggExpr = Dice.choice([ CREATE STREAM avg_vol_s1 INTO avg_vol1 AS SELECT csum(current), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s); 'count(*)', 'avg(speed)', # 'twa(speed)', # TODO: this one REQUIRES a where statement, not reasonable @@ -1723,27 +1723,9 @@ class TaskCreateStream(StateTransitionTask): 'max(speed)', 'first(speed)', 'last(speed)', - 'top(speed, 50)', # TODO: not supported? - 'bottom(speed, 50)', # TODO: not supported? 'apercentile(speed, 10)', # TODO: TD-1316 'last_row(*)', # TODO: commented out per TD-3231, we should re-create # Transformation Functions - # 'diff(speed)', # TODO: no supported?! - 'spread(speed)', - 'elapsed(ts)', - 'mode(speed)', - 'bottom(speed,1)', - 'top(speed,1)', - 'tail(speed,1)', - 'unique(color)', - 'csum(speed)', - 'DERIVATIVE(speed,1s,1)', - 'diff(speed,1)', - 'irate(speed)', - 'mavg(speed,3)', - 'sample(speed,5)', - 'STATECOUNT(speed,"LT",1)', - 'STATEDURATION(speed,"LT",1)', 'twa(speed)' ]) # TODO: add more from 'top' From 908f0d719f191feecf55a303e09c584689072a55 Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Tue, 8 Nov 2022 11:47:00 +0800 Subject: [PATCH 06/69] update --- tests/pytest/crash_gen/crash_gen_main.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/pytest/crash_gen/crash_gen_main.py b/tests/pytest/crash_gen/crash_gen_main.py index 1701c293a6..6c23158535 100755 --- a/tests/pytest/crash_gen/crash_gen_main.py +++ b/tests/pytest/crash_gen/crash_gen_main.py @@ -1696,9 +1696,9 @@ class TaskCreateStream(StateTransitionTask): dbname = self._db.getName() sub_stream_name = dbname+ '_sub_stream' - sub_stream_tb_name = 'avg_sub' + sub_stream_tb_name = 'stream_tb_sub' super_stream_name = dbname+ '_super_stream' - super_stream_tb_name = 'avg_super' + super_stream_tb_name = 'stream_tb_super' if not self._db.exists(wt.getDbConn()): Logging.debug("Skipping task, no DB yet") return @@ -1892,7 +1892,7 @@ class TdSuperTable: def hasStreamTables(self,dbc: DbConn): - return dbc.query("show {}.stables like 'avg%'".format(self._dbName)) > 0 + return dbc.query("show {}.stables like 'stream_tb%'".format(self._dbName)) > 0 def hasStreams(self,dbc: DbConn): return dbc.query("show streams") > 0 @@ -1907,7 +1907,7 @@ class TdSuperTable: return not dbc.query("show streams ") > 0 def dropStreamTables(self, dbc: DbConn): - dbc.query("show {}.stables like 'avg%'".format(self._dbName)) + dbc.query("show {}.stables like 'stream_tb%'".format(self._dbName)) StreamTables = dbc.getQueryResult() @@ -1915,7 +1915,7 @@ class TdSuperTable: if self.dropStreams: dbc.execute('drop table {}.{}'.format(self._dbName,StreamTable[0])) - return not dbc.query("show {}.stables like 'avg%'".format(self._dbName)) + return not dbc.query("show {}.stables like 'stream_tb%'".format(self._dbName)) def ensureRegTable(self, task: Optional[Task], dbc: DbConn, regTableName: str): ''' From ca00459abb72a56a2b833ed225daf06532145510 Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Tue, 8 Nov 2022 11:56:18 +0800 Subject: [PATCH 07/69] update --- tests/pytest/crash_gen/crash_gen_main.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/pytest/crash_gen/crash_gen_main.py b/tests/pytest/crash_gen/crash_gen_main.py index 6c23158535..933d620824 100755 --- a/tests/pytest/crash_gen/crash_gen_main.py +++ b/tests/pytest/crash_gen/crash_gen_main.py @@ -1712,7 +1712,7 @@ class TaskCreateStream(StateTransitionTask): ''' stbname =sTable.getName() sub_tables = sTable.getRegTables(wt.getDbConn()) - aggExpr = Dice.choice([ CREATE STREAM avg_vol_s1 INTO avg_vol1 AS SELECT csum(current), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s); + aggExpr = Dice.choice([ 'count(*)', 'avg(speed)', # 'twa(speed)', # TODO: this one REQUIRES a where statement, not reasonable @@ -1735,7 +1735,7 @@ class TaskCreateStream(StateTransitionTask): if sub_tables: # if not empty sub_tbname = sub_tables[0] # create stream with query above sub_table - stream_sql = 'create stream {} into {}.{} as select {}, avg(speed) FROM {}.{} PARTITION BY tbname INTERVAL(5s) SLIDING(3s) '.format(sub_stream_name,dbname,sub_stream_tb_name ,aggExpr,dbname,sub_tbname) + stream_sql = 'create stream {} into {}.{} as select {}, avg(speed) FROM {}.{} where ts now -1h PARTITION BY tbname INTERVAL(5s) SLIDING(3s) FILL (prev) '.format(sub_stream_name,dbname,sub_stream_tb_name ,aggExpr,dbname,sub_tbname) try: self.execWtSql(wt, stream_sql) Logging.debug("[OPS] stream is creating at {}".format(time.time())) @@ -1749,7 +1749,7 @@ class TaskCreateStream(StateTransitionTask): pass else: - stream_sql = 'create stream {} into {}.{} as select {}, avg(speed) FROM {}.{} PARTITION BY tbname INTERVAL(5s) SLIDING(3s) '.format(super_stream_name,dbname,super_stream_tb_name,aggExpr, dbname,stbname) + stream_sql = 'create stream {} into {}.{} as select {}, avg(speed) FROM {}.{} where ts now -1h PARTITION BY tbname INTERVAL(5s) SLIDING(3s) FILL (prev) '.format(super_stream_name,dbname,super_stream_tb_name,aggExpr, dbname,stbname) try: self.execWtSql(wt, stream_sql) From 59f50ae8c5ad38fc8e36eba5128ebdcda4f13f59 Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Wed, 9 Nov 2022 18:08:02 +0800 Subject: [PATCH 08/69] update --- tests/pytest/crash_gen/crash_gen_main.py | 298 +++++++++++++++++++++-- 1 file changed, 271 insertions(+), 27 deletions(-) diff --git a/tests/pytest/crash_gen/crash_gen_main.py b/tests/pytest/crash_gen/crash_gen_main.py index 933d620824..6c52b459ca 100755 --- a/tests/pytest/crash_gen/crash_gen_main.py +++ b/tests/pytest/crash_gen/crash_gen_main.py @@ -37,6 +37,7 @@ import requests # from guppy import hpy import gc import taos +from taos.tmq import * from .shared.types import TdColumns, TdTags @@ -675,9 +676,10 @@ class AnyState: CAN_DROP_DB = 2 CAN_CREATE_FIXED_SUPER_TABLE = 3 CAN_CREATE_STREAM = 3 # super table must exists + CAN_CREATE_TOPIC = 3 # super table must exists + CAN_CREATE_CONSUMERS = 3 CAN_DROP_FIXED_SUPER_TABLE = 4 CAN_ADD_DATA = 5 - CAN_DROP_STREAM = 5 CAN_READ_DATA = 6 CAN_DELETE_DATA = 6 @@ -730,6 +732,12 @@ class AnyState: return False return self._info[self.CAN_DROP_FIXED_SUPER_TABLE] + def canCreateTopic(self): + return self._info[self.CAN_CREATE_TOPIC] + + def canCreateConsumers(self): + return self._info[self.CAN_CREATE_CONSUMERS] + def canCreateStream(self): return self._info[self.CAN_CREATE_STREAM] @@ -1679,9 +1687,22 @@ class TaskDropDb(StateTransitionTask): return state.canDropDb() def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): + + # drop topics before drop db + + if self._db.getFixedSuperTable().hasTopics(wt.getDbConn()): + + self._db.getFixedSuperTable().dropTopics(wt.getDbConn(),self._db.getName(),None) + self._db.getFixedSuperTable().dropTopics(wt.getDbConn(),self._db.getName(),self._db.getFixedSuperTableName) + self.execWtSql(wt, "drop database {}".format(self._db.getName())) + Logging.debug("[OPS] database dropped at {}".format(time.time())) + +''' +# Streams will generator TD-20237 (it will crash taosd , start this task when this issue fixed ) + class TaskCreateStream(StateTransitionTask): @classmethod @@ -1707,9 +1728,9 @@ class TaskCreateStream(StateTransitionTask): # wt.execSql("use db") # should always be in place # create stream - ''' - CREATE STREAM avg_vol_s INTO avg_vol AS SELECT _wstartts, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s); - ''' + + # CREATE STREAM avg_vol_s INTO avg_vol AS SELECT _wstartts, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s); + stbname =sTable.getName() sub_tables = sTable.getRegTables(wt.getDbConn()) aggExpr = Dice.choice([ @@ -1735,7 +1756,7 @@ class TaskCreateStream(StateTransitionTask): if sub_tables: # if not empty sub_tbname = sub_tables[0] # create stream with query above sub_table - stream_sql = 'create stream {} into {}.{} as select {}, avg(speed) FROM {}.{} where ts now -1h PARTITION BY tbname INTERVAL(5s) SLIDING(3s) FILL (prev) '.format(sub_stream_name,dbname,sub_stream_tb_name ,aggExpr,dbname,sub_tbname) + stream_sql = 'create stream {} into {}.{} as select {}, avg(speed) FROM {}.{} PARTITION BY tbname INTERVAL(5s) SLIDING(3s) '.format(sub_stream_name,dbname,sub_stream_tb_name ,aggExpr,dbname,sub_tbname) try: self.execWtSql(wt, stream_sql) Logging.debug("[OPS] stream is creating at {}".format(time.time())) @@ -1749,7 +1770,7 @@ class TaskCreateStream(StateTransitionTask): pass else: - stream_sql = 'create stream {} into {}.{} as select {}, avg(speed) FROM {}.{} where ts now -1h PARTITION BY tbname INTERVAL(5s) SLIDING(3s) FILL (prev) '.format(super_stream_name,dbname,super_stream_tb_name,aggExpr, dbname,stbname) + stream_sql = 'create stream {} into {}.{} as select {}, avg(speed) FROM {}.{} PARTITION BY tbname INTERVAL(5s) SLIDING(3s) '.format(super_stream_name,dbname,super_stream_tb_name,aggExpr, dbname,stbname) try: self.execWtSql(wt, stream_sql) @@ -1759,8 +1780,142 @@ class TaskCreateStream(StateTransitionTask): if errno in [0x03f0]: # stream already exists # stream need drop before drop table pass +''' - +class TaskCreateTopic(StateTransitionTask): + + @classmethod + def getEndState(cls): + return StateHasData() + + @classmethod + def canBeginFrom(cls, state: AnyState): + return state.canCreateTopic() + + def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): + dbname = self._db.getName() + + sub_topic_name = dbname+ '_sub_topic' + super_topic_name = dbname+ '_super_topic' + stable_topic = dbname+ '_stable_topic' + db_topic = 'database_' + dbname+ '_topics' + if not self._db.exists(wt.getDbConn()): + Logging.debug("Skipping task, no DB yet") + return + + sTable = self._db.getFixedSuperTable() # type: TdSuperTable + # wt.execSql("use db") # should always be in place + + # create topic + + # create topic if not exists topic_ctb_column as select ts, c1, c2, c3 from stb1; + + stbname =sTable.getName() + sub_tables = sTable.getRegTables(wt.getDbConn()) + scalarExpr = Dice.choice([ '*','speed','color', + 'abs(speed)', + 'acos(speed)', + 'asin(speed)', + 'atan(speed)', + 'ceil(speed)', + 'cos(speed)', + 'cos(speed)', + 'floor(speed)', + 'log(speed,2)', + 'pow(speed,2)', + 'round(speed)', + 'sin(speed)', + 'sqrt(speed)', + 'char_length(color)', + 'concat(color,color)', + 'concat_ws(" ", color,color," ")', + 'length(color)', + 'lower(color)', + 'ltrim(color)', + 'substr(color , 2)', + 'upper(color)', + 'cast(speed as double)', + 'cast(ts as bigint)', + + ]) # TODO: add more from 'top' + if Dice.throw(3)==0: + + if sub_tables: + + if sub_tables: # if not empty + sub_tbname = sub_tables[0] + # create stream with query above sub_table + topic_sql = 'create topic {} as select {} FROM {}.{} ; '.format(sub_topic_name,scalarExpr,dbname,sub_tbname) + try: + self.execWtSql(wt, "use {}".format(dbname)) + self.execWtSql(wt, topic_sql) + Logging.debug("[OPS] topic is creating at {}".format(time.time())) + except taos.error.ProgrammingError as err: + errno = Helper.convertErrno(err.errno) + if errno in [0x03f0]: # topic already exists + # topic need drop before drop table + pass + + else: + pass + + else: + topic_sql = 'create topic {} as select {} FROM {}.{} '.format(super_topic_name,scalarExpr, dbname,stbname) + try: + self.execWtSql(wt, "use {}".format(dbname)) + self.execWtSql(wt, topic_sql) + Logging.debug("[OPS] subquery topic is creating at {}".format(time.time())) + except taos.error.ProgrammingError as err: + errno = Helper.convertErrno(err.errno) + if errno in [0x03f0]: # topic already exists + # topic need drop before drop table + pass + elif Dice.throw(3)==1: + topic_sql = 'create topic {} AS STABLE {}.{} '.format(stable_topic,dbname,stbname) + try: + self.execWtSql(wt, "use {}".format(dbname)) + self.execWtSql(wt, topic_sql) + Logging.debug("[OPS] stable topic is creating at {}".format(time.time())) + except taos.error.ProgrammingError as err: + errno = Helper.convertErrno(err.errno) + if errno in [0x03f0]: # topic already exists + # topic need drop before drop table + pass + elif Dice.throw(3)==2: + topic_sql = 'create topic {} AS DATABASE {} '.format(db_topic,dbname) + try: + self.execWtSql(wt, "use {}".format(dbname)) + self.execWtSql(wt, topic_sql) + Logging.debug("[OPS] db topic is creating at {}".format(time.time())) + except taos.error.ProgrammingError as err: + errno = Helper.convertErrno(err.errno) + if errno in [0x03f0]: # topic already exists + # topic need drop before drop table + pass + else: + pass + + +class TaskCreateConsumers(StateTransitionTask): + + @classmethod + def getEndState(cls): + return StateHasData() + + @classmethod + def canBeginFrom(cls, state: AnyState): + return state.canCreateConsumers() + + def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): + dbname = self._db.getName() + + sTable = self._db.getFixedSuperTable() # type: TdSuperTable + # wt.execSql("use db") # should always be in place + + # create Consumers + if Dice.throw(50)==0: # because subscribe is cost so much time , Reduce frequency of this task + if sTable.hasTopics(wt.getDbConn()): + sTable.createConsumer(wt.getDbConn(),random.randint(1,10)) class TaskCreateSuperTable(StateTransitionTask): @@ -1780,9 +1935,6 @@ class TaskCreateSuperTable(StateTransitionTask): sTable = self._db.getFixedSuperTable() # type: TdSuperTable # wt.execSql("use db") # should always be in place - if sTable.hasStreams(wt.getDbConn()) or sTable.hasStreamTables(wt.getDbConn()): - sTable.dropStreams(wt.getDbConn()) - sTable.dropStreamTables(wt.getDbConn()) sTable.create(wt.getDbConn(), {'ts': TdDataType.TIMESTAMP, 'speed': TdDataType.INT, 'color': TdDataType.BINARY16}, { 'b': TdDataType.BINARY200, 'f': TdDataType.FLOAT}, @@ -1797,6 +1949,8 @@ class TdSuperTable: def __init__(self, stName, dbName): self._stName = stName self._dbName = dbName + self._consumerLists = {} + self._ConsumerInsts = [] def getName(self): return self._stName @@ -1806,6 +1960,12 @@ class TdSuperTable: dbName = self._dbName if self.exists(dbc) : # if myself exists fullTableName = dbName + '.' + self._stName + if self.hasStreams(dbc): + self.dropStreams(dbc) + self.dropStreamTables(dbc) + if self.hasTopics(dbc): + self.dropTopics(dbName,None) + self.dropTopics(dbName,self._stName) try: dbc.execute("DROP TABLE {}".format(fullTableName)) except taos.error.ProgrammingError as err: @@ -1843,12 +2003,19 @@ class TdSuperTable: if dbc.existsSuperTable(self._stName): if dropIfExists: + if self.hasStreams(dbc): + self.dropStreams(dbc) + self.dropStreamTables(dbc) + + # drop topics before drop stables + if self.hasTopics(dbc): + self.dropTopics(dbc,self._dbName,None) + self.dropTopics(dbc,self._dbName,self._stName ) + dbc.execute("DROP TABLE {}".format(fullTableName)) - pass - - + pass # dbc.execute("DROP TABLE {}".format(fullTableName)) else: # error raise CrashGenError("Cannot create super table, already exists: {}".format(self._stName)) @@ -1863,7 +2030,47 @@ class TdSuperTable: ) else: sql += " TAGS (dummy int) " - dbc.execute(sql) + dbc.execute(sql) + + def createConsumer(self, dbc: DbConn , Consumer_nums): + + def generateConsumer(current_topic_list): + conf = TaosTmqConf() + conf.set("group.id", "tg2") + conf.set("td.connect.user", "root") + conf.set("td.connect.pass", "taosdata") + conf.set("enable.auto.commit", "true") + def tmq_commit_cb_print(tmq, resp, offset, param=None): + print(f"commit: {resp}, tmq: {tmq}, offset: {offset}, param: {param}") + conf.set_auto_commit_cb(tmq_commit_cb_print, None) + consumer = conf.new_consumer() + topic_list = TaosTmqList() + for topic in current_topic_list: + topic_list.append(topic) + consumer.subscribe(topic_list) + time.sleep(5) # consumer work only 5 sec ,and then it will exit + try: + consumer.unsubscribe() + except Exception as e : + pass + return + + # mulit Consumer + current_topic_list = self.getTopicLists(dbc) + for i in range(Consumer_nums): + consumer_inst = threading.Thread(target=generateConsumer, args=(current_topic_list,)) + self._ConsumerInsts.append(consumer_inst) + + for ConsumerInst in self._ConsumerInsts: + ConsumerInst.start() + for ConsumerInst in self._ConsumerInsts: + ConsumerInst.join() + + def getTopicLists(self, dbc: DbConn): + dbc.query("show topics ") + topics = dbc.getQueryResult() + topicLists = [v[0] for v in topics] + return topicLists def getRegTables(self, dbc: DbConn): dbName = self._dbName @@ -1878,12 +2085,7 @@ class TdSuperTable: return [v[0] for v in qr] # list transformation, ref: https://stackoverflow.com/questions/643823/python-list-transformation def hasRegTables(self, dbc: DbConn): - # print(self._stName) - # dbc.query("SELECT * FROM {}.{}".format(self._dbName, self._stName)) - # print(dbc.getQueryResult()) - if self.hasStreamTables(dbc) or self.hasStreams(dbc): - if self.dropStreams(dbc): - self.dropStreamTables(dbc) + if dbc.existsSuperTable(self._stName): return dbc.query("SELECT * FROM {}.{}".format(self._dbName, self._stName)) > 0 @@ -1896,7 +2098,41 @@ class TdSuperTable: def hasStreams(self,dbc: DbConn): return dbc.query("show streams") > 0 - + + def hasTopics(self,dbc: DbConn): + + return dbc.query("show topics") > 0 + + def dropTopics(self,dbc: DbConn , dbname=None,stb_name=None): + dbc.query("show topics ") + topics = dbc.getQueryResult() + + if dbname !=None and stb_name == None : + + for topic in topics: + if dbname in topic[0] and topic[0].startswith("database"): + try: + dbc.execute('drop topic {}'.format(topic[0])) + except taos.error.ProgrammingError as err: + errno = Helper.convertErrno(err.errno) + if errno in [0x03EB]: # Topic subscribed cannot be dropped + pass + # for subsript in subscriptions: + + else: + pass + + pass + return True + elif dbname !=None and stb_name!= None: + for topic in topics: + if topic[0].startswith(self._dbName) and topic[0].endswith('topic'): + dbc.execute('drop topic {}'.format(topic[0])) + return True + else: + return True + pass + def dropStreams(self,dbc:DbConn): dbc.query("show streams ") Streams = dbc.getQueryResult() @@ -1912,7 +2148,7 @@ class TdSuperTable: StreamTables = dbc.getQueryResult() for StreamTable in StreamTables: - if self.dropStreams: + if self.dropStreams(dbc): dbc.execute('drop table {}.{}'.format(self._dbName,StreamTable[0])) return not dbc.query("show {}.stables like 'stream_tb%'".format(self._dbName)) @@ -2083,11 +2319,7 @@ class TdSuperTable: 'sample(speed,5)', 'STATECOUNT(speed,"LT",1)', 'STATEDURATION(speed,"LT",1)', - 'twa(speed)' - - - - + 'twa(speed)' ]) # TODO: add more from 'top' @@ -2194,6 +2426,7 @@ class TaskDropSuperTable(StateTransitionTask): for i in tblSeq: regTableName = self.getRegTableName(i) # "db.reg_table_{}".format(i) try: + self.execWtSql(wt, "drop table {}.{}". format(self._db.getName(), regTableName)) # nRows always 0, like MySQL except taos.error.ProgrammingError as err: @@ -2209,6 +2442,17 @@ class TaskDropSuperTable(StateTransitionTask): # Drop the super table itself tblName = self._db.getFixedSuperTableName() + + # drop streams before drop stables + if self._db.getFixedSuperTable().hasStreams(wt.getDbConn()): + self._db.getFixedSuperTable().dropStreams(wt.getDbConn()) + self._db.getFixedSuperTable().dropStreamTables(wt.getDbConn()) + + # drop topics before drop stables + if self._db.getFixedSuperTable().hasTopics(wt.getDbConn()): + self._db.getFixedSuperTable().dropTopics(wt.getDbConn(),self._db.getName(),None) + self._db.getFixedSuperTable().dropTopics(wt.getDbConn(),self._db.getName(),tblName) + try: self.execWtSql(wt, "drop table {}.{}".format(self._db.getName(), tblName)) except taos.error.ProgrammingError as err: From dde87a374601d6511c4ca808dd5696dc8c976ba5 Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Wed, 9 Nov 2022 18:21:43 +0800 Subject: [PATCH 09/69] update --- tests/pytest/crash_gen/crash_gen_main.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/pytest/crash_gen/crash_gen_main.py b/tests/pytest/crash_gen/crash_gen_main.py index 6c52b459ca..dbaa8b8ef2 100755 --- a/tests/pytest/crash_gen/crash_gen_main.py +++ b/tests/pytest/crash_gen/crash_gen_main.py @@ -1372,6 +1372,7 @@ class Task(): 0x03D3, # Conflict transaction not completed 0x0707, # Query not ready , it always occur at replica 3 0x707, # Query not ready + 0x396, # Database in creating status From 05f0ac62e7c5d6a824c24a258841b1c9fadf7254 Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Wed, 9 Nov 2022 18:32:31 +0800 Subject: [PATCH 10/69] update --- tests/pytest/crash_gen/crash_gen_main.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/pytest/crash_gen/crash_gen_main.py b/tests/pytest/crash_gen/crash_gen_main.py index dbaa8b8ef2..eeffe15a88 100755 --- a/tests/pytest/crash_gen/crash_gen_main.py +++ b/tests/pytest/crash_gen/crash_gen_main.py @@ -1373,7 +1373,7 @@ class Task(): 0x0707, # Query not ready , it always occur at replica 3 0x707, # Query not ready 0x396, # Database in creating status - + 0x386, # Database in droping status 1000 # REST catch-all error @@ -2052,7 +2052,7 @@ class TdSuperTable: time.sleep(5) # consumer work only 5 sec ,and then it will exit try: consumer.unsubscribe() - except Exception as e : + except TmqError as e : pass return From 3e133a8661ab4b15fc09482b054787f3079279ab Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Thu, 10 Nov 2022 09:35:22 +0800 Subject: [PATCH 11/69] updaate --- tests/pytest/crash_gen/crash_gen_main.py | 32 +++++++++++++++--------- 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/tests/pytest/crash_gen/crash_gen_main.py b/tests/pytest/crash_gen/crash_gen_main.py index eeffe15a88..18c3957630 100755 --- a/tests/pytest/crash_gen/crash_gen_main.py +++ b/tests/pytest/crash_gen/crash_gen_main.py @@ -255,7 +255,7 @@ class WorkerThread: class ThreadCoordinator: - WORKER_THREAD_TIMEOUT = 1200 # Normal: 120 + WORKER_THREAD_TIMEOUT = 120 # Normal: 120 def __init__(self, pool: ThreadPool, dbManager: DbManager): self._curStep = -1 # first step is 0 @@ -1374,6 +1374,7 @@ class Task(): 0x707, # Query not ready 0x396, # Database in creating status 0x386, # Database in droping status + 0x03E1, # failed on tmq_subscribe ,topic not exist 1000 # REST catch-all error @@ -1905,18 +1906,23 @@ class TaskCreateConsumers(StateTransitionTask): @classmethod def canBeginFrom(cls, state: AnyState): - return state.canCreateConsumers() + return state.canCreateConsumers() def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): - dbname = self._db.getName() - - sTable = self._db.getFixedSuperTable() # type: TdSuperTable - # wt.execSql("use db") # should always be in place - # create Consumers - if Dice.throw(50)==0: # because subscribe is cost so much time , Reduce frequency of this task - if sTable.hasTopics(wt.getDbConn()): - sTable.createConsumer(wt.getDbConn(),random.randint(1,10)) + if Config.getConfig().connector_type == 'native': + dbname = self._db.getName() + + sTable = self._db.getFixedSuperTable() # type: TdSuperTable + # wt.execSql("use db") # should always be in place + + # create Consumers + if Dice.throw(50)==0: # because subscribe is cost so much time , Reduce frequency of this task + if sTable.hasTopics(wt.getDbConn()): + sTable.createConsumer(wt.getDbConn(),random.randint(1,10)) + else: + print(" restful not support tmq consumers") + return class TaskCreateSuperTable(StateTransitionTask): @@ -2048,7 +2054,10 @@ class TdSuperTable: topic_list = TaosTmqList() for topic in current_topic_list: topic_list.append(topic) - consumer.subscribe(topic_list) + try: + consumer.subscribe(topic_list) + except TmqError as e : + pass time.sleep(5) # consumer work only 5 sec ,and then it will exit try: consumer.unsubscribe() @@ -3326,4 +3335,3 @@ class Container(): return self._verifyValidProperty(name) self._cargo[name] = value - From 86b5b89531772a795f179c851f16b7ff5f282366 Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Thu, 10 Nov 2022 10:37:16 +0800 Subject: [PATCH 12/69] update --- tests/pytest/crash_gen/crash_gen_main.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/pytest/crash_gen/crash_gen_main.py b/tests/pytest/crash_gen/crash_gen_main.py index 18c3957630..7d5d1c0c8a 100755 --- a/tests/pytest/crash_gen/crash_gen_main.py +++ b/tests/pytest/crash_gen/crash_gen_main.py @@ -1367,6 +1367,7 @@ class Task(): 0x2603, # Table does not exist, replaced by 2662 below 0x260d, # Tags number not matched 0x2662, # Table does not exist #TODO: what about 2603 above? + 0x2600, # database not specified, SQL: show stables , database droped , and show tables 0x032C, # Object is creating 0x032D, # Object is dropping 0x03D3, # Conflict transaction not completed From bf30350195ee6d91ece72d9b29057358351a9229 Mon Sep 17 00:00:00 2001 From: wenzhouwww Date: Thu, 10 Nov 2022 18:47:21 +0800 Subject: [PATCH 13/69] test:run cast function in query mode 4 --- tests/parallel_test/cases.task | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 6775760ee4..dda053ccf8 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -968,6 +968,7 @@ ,,,system-test,python3 ./test.py -f 2-query/csum.py -Q 4 ,,,system-test,python3 ./test.py -f 2-query/mavg.py -Q 4 ,,,system-test,python3 ./test.py -f 2-query/sample.py -Q 4 +,,,system-test,python3 ./test.py -f 2-query/cast.py -Q 4 ,,,system-test,python3 ./test.py -f 2-query/function_diff.py -Q 4 ,,,system-test,python3 ./test.py -f 2-query/unique.py -Q 4 ,,,system-test,python3 ./test.py -f 2-query/stateduration.py -Q 4 From 20071f09a729da0280c0ca82f76b410463f5e30a Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Thu, 10 Nov 2022 20:23:00 +0800 Subject: [PATCH 14/69] update --- tests/pytest/crash_gen/crash_gen_main.py | 218 ++++++++++++++++++----- tests/pytest/crash_gen/shared/db.py | 24 ++- 2 files changed, 196 insertions(+), 46 deletions(-) diff --git a/tests/pytest/crash_gen/crash_gen_main.py b/tests/pytest/crash_gen/crash_gen_main.py index 7d5d1c0c8a..ba501c3e78 100755 --- a/tests/pytest/crash_gen/crash_gen_main.py +++ b/tests/pytest/crash_gen/crash_gen_main.py @@ -420,10 +420,12 @@ class ThreadCoordinator: except threading.BrokenBarrierError as err: self._execStats.registerFailure("Aborted due to worker thread timeout") Logging.error("\n") + Logging.error("Main loop aborted, caused by worker thread(s) time-out of {} seconds".format( ThreadCoordinator.WORKER_THREAD_TIMEOUT)) Logging.error("TAOS related threads blocked at (stack frames top-to-bottom):") ts = ThreadStacks() + ts.record_current_time(time.time()) # record thread exit time at current moment ts.print(filterInternal=True) workerTimeout = True @@ -547,7 +549,12 @@ class ThreadCoordinator: # pick a task type for current state db = self.pickDatabase() - taskType = db.getStateMachine().pickTaskType() # dynamic name of class + if Dice.throw(2)==1: + taskType = db.getStateMachine().pickTaskType() # dynamic name of class + else: + taskType = db.getStateMachine().balance_pickTaskType() # and an method can get balance task types + pass + return taskType(self._execStats, db) # create a task from it def resetExecutedTasks(self): @@ -679,6 +686,8 @@ class AnyState: CAN_CREATE_TOPIC = 3 # super table must exists CAN_CREATE_CONSUMERS = 3 CAN_DROP_FIXED_SUPER_TABLE = 4 + CAN_DROP_TOPIC = 4 + CAN_DROP_STREAM = 4 CAN_ADD_DATA = 5 CAN_READ_DATA = 6 CAN_DELETE_DATA = 6 @@ -734,13 +743,19 @@ class AnyState: def canCreateTopic(self): return self._info[self.CAN_CREATE_TOPIC] + + def canDropTopic(self): + return self._info[self.CAN_DROP_TOPIC] def canCreateConsumers(self): return self._info[self.CAN_CREATE_CONSUMERS] - def canCreateStream(self): + def canCreateStreams(self): return self._info[self.CAN_CREATE_STREAM] + def canDropStream(self): + return self._info[self.CAN_DROP_STREAM] + def canAddData(self): return self._info[self.CAN_ADD_DATA] @@ -919,7 +934,7 @@ class StateHasData(AnyState): ): # only if we didn't create one # we shouldn't have dropped it self.assertNoTask(tasks, TaskDropDb) - if (not self.hasTask(tasks, TaskCreateSuperTable) + if not( self.hasTask(tasks, TaskCreateSuperTable) ): # if we didn't create the table # we should not have a task that drops it self.assertNoTask(tasks, TaskDropSuperTable) @@ -1075,6 +1090,28 @@ class StateMechine: # Logging.debug(" (weighted random:{}/{}) ".format(i, len(taskTypes))) return taskTypes[i] + def balance_pickTaskType(self): + # all the task types we can choose from at curent state + BasicTypes = self.getTaskTypes() + weightsTypes = BasicTypes.copy() + + # this matrixs can balance the Frequency of different types of tasks + weight_matrixs = {'TaskDropDb': 5 , 'TaskDropTopics': 20 , 'TaskDropStreams':10 , 'TaskDropStreamTables':10 , + 'TaskReadData':50 , 'TaskDropSuperTable':5 , 'TaskAlterTags':3 , 'TaskAddData':10, + 'TaskDeleteData':10 , 'TaskCreateDb':10 , 'TaskCreateStream': 3, 'TaskCreateTopic' :3, + 'TaskCreateConsumers':10, 'TaskCreateSuperTable': 10 } # task type : weghts matrixs + + for task , weights in weight_matrixs.items(): + + for basicType in BasicTypes: + if basicType.__name__ == task: + for _ in range(weights): + weightsTypes.append(basicType) + + task = random.sample(weightsTypes,1) + return task[0] + + # ref: # https://eli.thegreenplace.net/2010/01/22/weighted-random-generation-in-python/ def _weighted_choice_sub(self, weights) -> int: @@ -1376,6 +1413,10 @@ class Task(): 0x396, # Database in creating status 0x386, # Database in droping status 0x03E1, # failed on tmq_subscribe ,topic not exist + 0x03ed , # Topic must be dropped first, SQL: drop database db_0 + 0x0203 , # Invalid value + + 1000 # REST catch-all error @@ -1693,19 +1734,24 @@ class TaskDropDb(StateTransitionTask): # drop topics before drop db - if self._db.getFixedSuperTable().hasTopics(wt.getDbConn()): + # if self._db.getFixedSuperTable().hasTopics(wt.getDbConn()): - self._db.getFixedSuperTable().dropTopics(wt.getDbConn(),self._db.getName(),None) - self._db.getFixedSuperTable().dropTopics(wt.getDbConn(),self._db.getName(),self._db.getFixedSuperTableName) - - self.execWtSql(wt, "drop database {}".format(self._db.getName())) + # self._db.getFixedSuperTable().dropTopics(wt.getDbConn(),self._db.getName(),None) + # self._db.getFixedSuperTable().dropTopics(wt.getDbConn(),self._db.getName(),self._db.getFixedSuperTableName) + try: + self.queryWtSql(wt, "drop database {}".format(self._db.getName())) # drop database maybe failed ,because topic exists + except taos.error.ProgrammingError as err: + errno = Helper.convertErrno(err.errno) + if errno in [0x0203]: # drop maybe failed + pass Logging.debug("[OPS] database dropped at {}".format(time.time())) -''' + # Streams will generator TD-20237 (it will crash taosd , start this task when this issue fixed ) + class TaskCreateStream(StateTransitionTask): @classmethod @@ -1714,7 +1760,7 @@ class TaskCreateStream(StateTransitionTask): @classmethod def canBeginFrom(cls, state: AnyState): - return state.canCreateStream() + return state.canCreateStreams() def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): dbname = self._db.getName() @@ -1783,7 +1829,6 @@ class TaskCreateStream(StateTransitionTask): if errno in [0x03f0]: # stream already exists # stream need drop before drop table pass -''' class TaskCreateTopic(StateTransitionTask): @@ -1855,7 +1900,7 @@ class TaskCreateTopic(StateTransitionTask): Logging.debug("[OPS] topic is creating at {}".format(time.time())) except taos.error.ProgrammingError as err: errno = Helper.convertErrno(err.errno) - if errno in [0x03f0]: # topic already exists + if errno in [0x03f0 ]: # topic already exists # topic need drop before drop table pass @@ -1877,7 +1922,7 @@ class TaskCreateTopic(StateTransitionTask): topic_sql = 'create topic {} AS STABLE {}.{} '.format(stable_topic,dbname,stbname) try: self.execWtSql(wt, "use {}".format(dbname)) - self.execWtSql(wt, topic_sql) + self.queryWtSql(wt, topic_sql) Logging.debug("[OPS] stable topic is creating at {}".format(time.time())) except taos.error.ProgrammingError as err: errno = Helper.convertErrno(err.errno) @@ -1897,7 +1942,81 @@ class TaskCreateTopic(StateTransitionTask): pass else: pass - + +class TaskDropTopics(StateTransitionTask): + + @classmethod + def getEndState(cls): + return StateHasData() + + @classmethod + def canBeginFrom(cls, state: AnyState): + return state.canDropTopic() + + def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): + dbname = self._db.getName() + + + if not self._db.exists(wt.getDbConn()): + Logging.debug("Skipping task, no DB yet") + return + + sTable = self._db.getFixedSuperTable() # type: TdSuperTable + # wt.execSql("use db") # should always be in place + tblName = sTable.getName() + if sTable.hasTopics(wt.getDbConn()): + sTable.dropTopics(wt.getDbConn(),dbname,None) # drop topics of database + sTable.dropTopics(wt.getDbConn(),dbname,tblName) # drop topics of stable + +class TaskDropStreams(StateTransitionTask): + + @classmethod + def getEndState(cls): + return StateHasData() + + @classmethod + def canBeginFrom(cls, state: AnyState): + return state.canDropStream() + + def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): + # dbname = self._db.getName() + + + if not self._db.exists(wt.getDbConn()): + Logging.debug("Skipping task, no DB yet") + return + + sTable = self._db.getFixedSuperTable() # type: TdSuperTable + # wt.execSql("use db") # should always be in place + # tblName = sTable.getName() + if sTable.hasStreams(wt.getDbConn()): + sTable.dropStreams(wt.getDbConn()) # drop stream of database + # sTable.dropStreamTables(wt.getDbConn()) # drop streamtables of stable + +class TaskDropStreamTables(StateTransitionTask): + + @classmethod + def getEndState(cls): + return StateHasData() + + @classmethod + def canBeginFrom(cls, state: AnyState): + return state.canDropStream() + + def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): + # dbname = self._db.getName() + + + if not self._db.exists(wt.getDbConn()): + Logging.debug("Skipping task, no DB yet") + return + + sTable = self._db.getFixedSuperTable() # type: TdSuperTable + # wt.execSql("use db") # should always be in place + # tblName = sTable.getName() + if sTable.hasStreamTables(wt.getDbConn()): + # sTable.dropStreams(wt.getDbConn()) + sTable.dropStreamTables(wt.getDbConn()) # drop stream tables class TaskCreateConsumers(StateTransitionTask): @@ -1918,9 +2037,10 @@ class TaskCreateConsumers(StateTransitionTask): # wt.execSql("use db") # should always be in place # create Consumers - if Dice.throw(50)==0: # because subscribe is cost so much time , Reduce frequency of this task - if sTable.hasTopics(wt.getDbConn()): - sTable.createConsumer(wt.getDbConn(),random.randint(1,10)) + # if Dice.throw(50)==0: # because subscribe is cost so much time , Reduce frequency of this task + if sTable.hasTopics(wt.getDbConn()): + sTable.createConsumer(wt.getDbConn(),random.randint(1,10)) + pass else: print(" restful not support tmq consumers") return @@ -1968,17 +2088,17 @@ class TdSuperTable: dbName = self._dbName if self.exists(dbc) : # if myself exists fullTableName = dbName + '.' + self._stName - if self.hasStreams(dbc): - self.dropStreams(dbc) - self.dropStreamTables(dbc) - if self.hasTopics(dbc): - self.dropTopics(dbName,None) - self.dropTopics(dbName,self._stName) + # if self.hasStreams(dbc): + # self.dropStreams(dbc) + # self.dropStreamTables(dbc) + # if self.hasTopics(dbc): + # self.dropTopics(dbName,None) + # self.dropTopics(dbName,self._stName) try: dbc.execute("DROP TABLE {}".format(fullTableName)) except taos.error.ProgrammingError as err: errno = Helper.convertErrno(err.errno) - if errno in [1011,0x3F3,0x03f3,0x2662]: # table doesn't exist + if errno in [1011,0x3F3,0x03f3,0x2662]: # table doesn't exist # Stream must be dropped first, SQL: DROP TABLE db_0.fs_table pass # # stream need drop before drop table # for stream in self.getStreamName(): @@ -2011,17 +2131,21 @@ class TdSuperTable: if dbc.existsSuperTable(self._stName): if dropIfExists: - if self.hasStreams(dbc): - self.dropStreams(dbc) - self.dropStreamTables(dbc) + # if self.hasStreams(dbc): + # self.dropStreams(dbc) + # self.dropStreamTables(dbc) - # drop topics before drop stables - if self.hasTopics(dbc): - self.dropTopics(dbc,self._dbName,None) - self.dropTopics(dbc,self._dbName,self._stName ) + # # drop topics before drop stables + # if self.hasTopics(dbc): + # self.dropTopics(dbc,self._dbName,None) + # self.dropTopics(dbc,self._dbName,self._stName ) - - dbc.execute("DROP TABLE {}".format(fullTableName)) + try: + dbc.execute("DROP TABLE {}".format(fullTableName)) + except taos.error.ProgrammingError as err: + errno = Helper.convertErrno(err.errno) + if errno in [1011,0x3F3,0x03f3,0x2662]: # table doesn't exist # Stream must be dropped first, SQL: DROP TABLE db_0.fs_table + pass pass # dbc.execute("DROP TABLE {}".format(fullTableName)) @@ -2124,6 +2248,7 @@ class TdSuperTable: if dbname in topic[0] and topic[0].startswith("database"): try: dbc.execute('drop topic {}'.format(topic[0])) + Logging.debug("[OPS] topic {} is droping at {}".format(topic,time.time())) except taos.error.ProgrammingError as err: errno = Helper.convertErrno(err.errno) if errno in [0x03EB]: # Topic subscribed cannot be dropped @@ -2139,6 +2264,7 @@ class TdSuperTable: for topic in topics: if topic[0].startswith(self._dbName) and topic[0].endswith('topic'): dbc.execute('drop topic {}'.format(topic[0])) + Logging.debug("[OPS] topic {} is droping at {}".format(topic,time.time())) return True else: return True @@ -2454,15 +2580,16 @@ class TaskDropSuperTable(StateTransitionTask): # Drop the super table itself tblName = self._db.getFixedSuperTableName() - # drop streams before drop stables - if self._db.getFixedSuperTable().hasStreams(wt.getDbConn()): - self._db.getFixedSuperTable().dropStreams(wt.getDbConn()) - self._db.getFixedSuperTable().dropStreamTables(wt.getDbConn()) + # # drop streams before drop stables + # if self._db.getFixedSuperTable().hasStreams(wt.getDbConn()): + # self._db.getFixedSuperTable().dropStreams(wt.getDbConn()) + # self._db.getFixedSuperTable().dropStreamTables(wt.getDbConn()) + + # # drop topics before drop stables + # if self._db.getFixedSuperTable().hasTopics(wt.getDbConn()): + # self._db.getFixedSuperTable().dropTopics(wt.getDbConn(),self._db.getName(),None) + # self._db.getFixedSuperTable().dropTopics(wt.getDbConn(),self._db.getName(),tblName) - # drop topics before drop stables - if self._db.getFixedSuperTable().hasTopics(wt.getDbConn()): - self._db.getFixedSuperTable().dropTopics(wt.getDbConn(),self._db.getName(),None) - self._db.getFixedSuperTable().dropTopics(wt.getDbConn(),self._db.getName(),tblName) try: self.execWtSql(wt, "drop table {}.{}".format(self._db.getName(), tblName)) @@ -2948,6 +3075,9 @@ class ThreadStacks: # stack info for all threads shortTid = th.native_id % 10000 #type: ignore self._allStacks[shortTid] = stack # Was using th.native_id + def record_current_time(self,current_time): + self.current_time = current_time + def print(self, filteredEndName = None, filterInternal = False): for shortTid, stack in self._allStacks.items(): # for each thread, stack frames top to bottom lastFrame = stack[-1] @@ -2962,9 +3092,11 @@ class ThreadStacks: # stack info for all threads continue # ignore # Now print print("\n<----- Thread Info for LWP/ID: {} (most recent call last) <-----".format(shortTid)) + lastSqlForThread = DbConn.fetchSqlForThread(shortTid) - time_cost = DbConn.get_time_cost() - print("Last SQL statement attempted from thread {} ({:.4f} sec ago) is: {}".format(shortTid, time_cost ,lastSqlForThread)) + last_sql_commit_time = DbConn.get_save_sql_time(shortTid) + # time_cost = DbConn.get_time_cost() + print("Last SQL statement attempted from thread {} ({:.4f} sec ago) is: {}".format(shortTid, self.current_time-last_sql_commit_time ,lastSqlForThread)) stackFrame = 0 for frame in stack: # was using: reversed(stack) # print(frame) diff --git a/tests/pytest/crash_gen/shared/db.py b/tests/pytest/crash_gen/shared/db.py index f97e4025e5..05711efbc6 100644 --- a/tests/pytest/crash_gen/shared/db.py +++ b/tests/pytest/crash_gen/shared/db.py @@ -32,7 +32,7 @@ class DbConn: # class variables lastSqlFromThreads : dict[int, str] = {} # stored by thread id, obtained from threading.current_thread().ident%10000 spendThreads : dict[int, float] = {} # stored by thread id, obtained from threading.current_thread().ident%10000 - + current_time : dict[int, float] = {} # save current time @classmethod def saveSqlForCurrentThread(cls, sql: str): ''' @@ -44,6 +44,7 @@ class DbConn: th = threading.current_thread() shortTid = th.native_id % 10000 #type: ignore cls.lastSqlFromThreads[shortTid] = sql # Save this for later + cls.record_save_sql_time() @classmethod def fetchSqlForThread(cls, shortTid : int) -> str : @@ -53,6 +54,25 @@ class DbConn: raise CrashGenError("No last-attempted-SQL found for thread id: {}".format(shortTid)) return cls.lastSqlFromThreads[shortTid] + @classmethod + def get_save_sql_time(cls, shortTid : int): + ''' + Let us save the last SQL statement on a per-thread basis, so that when later we + run into a dead-lock situation, we can pick out the deadlocked thread, and use + that information to find what what SQL statement is stuck. + ''' + return cls.current_time[shortTid] + + @classmethod + def record_save_sql_time(cls): + ''' + Let us save the last SQL statement on a per-thread basis, so that when later we + run into a dead-lock situation, we can pick out the deadlocked thread, and use + that information to find what what SQL statement is stuck. + ''' + th = threading.current_thread() + shortTid = th.native_id % 10000 #type: ignore + cls.current_time[shortTid] = float(time.time()) # Save this for later @classmethod def sql_exec_spend(cls, cost: float): @@ -460,7 +480,6 @@ class DbConnNative(DbConn): finally: time_cost = time.time() - time_start self.sql_exec_spend(time_cost) - cls = self.__class__ cls.totalRequests += 1 @@ -541,4 +560,3 @@ class DbManager(): self._dbConn.close() self._dbConn = None Logging.debug("DbManager closed DB connection...") - From 4774696aaeddc6e7eda26a30729f3916922c84c8 Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Thu, 10 Nov 2022 20:32:08 +0800 Subject: [PATCH 15/69] update errorno --- tests/pytest/auto_crash_gen.py | 4 ++-- tests/pytest/auto_crash_gen_valgrind.py | 4 ++-- tests/pytest/auto_crash_gen_valgrind_cluster.py | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/pytest/auto_crash_gen.py b/tests/pytest/auto_crash_gen.py index 02cca810a7..9c134e6d64 100755 --- a/tests/pytest/auto_crash_gen.py +++ b/tests/pytest/auto_crash_gen.py @@ -218,11 +218,11 @@ def get_auto_mix_cmds(args_list ,valgrind=valgrind_mode): if valgrind : - crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550 '%(crash_gen_path ,arguments) + crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0203 '%(crash_gen_path ,arguments) else: - crash_gen_cmd = 'cd %s && ./crash_gen.sh %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550'%(crash_gen_path ,arguments) + crash_gen_cmd = 'cd %s && ./crash_gen.sh %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0203'%(crash_gen_path ,arguments) return crash_gen_cmd diff --git a/tests/pytest/auto_crash_gen_valgrind.py b/tests/pytest/auto_crash_gen_valgrind.py index 1443dcd543..ce87fec684 100755 --- a/tests/pytest/auto_crash_gen_valgrind.py +++ b/tests/pytest/auto_crash_gen_valgrind.py @@ -219,11 +219,11 @@ def get_auto_mix_cmds(args_list ,valgrind=valgrind_mode): if valgrind : - crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550 '%(crash_gen_path ,arguments) + crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0203 '%(crash_gen_path ,arguments) else: - crash_gen_cmd = 'cd %s && ./crash_gen.sh %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550'%(crash_gen_path ,arguments) + crash_gen_cmd = 'cd %s && ./crash_gen.sh %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0203'%(crash_gen_path ,arguments) return crash_gen_cmd diff --git a/tests/pytest/auto_crash_gen_valgrind_cluster.py b/tests/pytest/auto_crash_gen_valgrind_cluster.py index 05cdaa6cc5..f4afa80afe 100755 --- a/tests/pytest/auto_crash_gen_valgrind_cluster.py +++ b/tests/pytest/auto_crash_gen_valgrind_cluster.py @@ -219,11 +219,11 @@ def get_auto_mix_cmds(args_list ,valgrind=valgrind_mode): if valgrind : - crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind -i 3 %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0707 '%(crash_gen_path ,arguments) + crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind -i 3 %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0707,0x0203 '%(crash_gen_path ,arguments) else: - crash_gen_cmd = 'cd %s && ./crash_gen.sh -i 3 %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0014,0x0707'%(crash_gen_path ,arguments) + crash_gen_cmd = 'cd %s && ./crash_gen.sh -i 3 %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0014,0x0707,0x0203'%(crash_gen_path ,arguments) return crash_gen_cmd From 4eb5c2cae7f146e190276472ec9b173e0b9996c8 Mon Sep 17 00:00:00 2001 From: "wenzhouwww@live.cn" Date: Fri, 11 Nov 2022 13:49:01 +0800 Subject: [PATCH 16/69] update --- tests/pytest/crash_gen/crash_gen_main.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/pytest/crash_gen/crash_gen_main.py b/tests/pytest/crash_gen/crash_gen_main.py index ba501c3e78..4fa19f543d 100755 --- a/tests/pytest/crash_gen/crash_gen_main.py +++ b/tests/pytest/crash_gen/crash_gen_main.py @@ -953,7 +953,7 @@ class StateMechine: except taos.error.ProgrammingError as err: Logging.error("Failed to initialized state machine, cannot find current state: {}".format(err)) traceback.print_stack() - pass # re-throw + raise # re-throw # TODO: seems no lnoger used, remove? def getCurrentState(self): @@ -1095,13 +1095,13 @@ class StateMechine: BasicTypes = self.getTaskTypes() weightsTypes = BasicTypes.copy() - # this matrixs can balance the Frequency of different types of tasks - weight_matrixs = {'TaskDropDb': 5 , 'TaskDropTopics': 20 , 'TaskDropStreams':10 , 'TaskDropStreamTables':10 , + # this matrixs can balance the Frequency of TaskTypes + balance_TaskType_matrixs = {'TaskDropDb': 5 , 'TaskDropTopics': 20 , 'TaskDropStreams':10 , 'TaskDropStreamTables':10 , 'TaskReadData':50 , 'TaskDropSuperTable':5 , 'TaskAlterTags':3 , 'TaskAddData':10, 'TaskDeleteData':10 , 'TaskCreateDb':10 , 'TaskCreateStream': 3, 'TaskCreateTopic' :3, - 'TaskCreateConsumers':10, 'TaskCreateSuperTable': 10 } # task type : weghts matrixs + 'TaskCreateConsumers':10, 'TaskCreateSuperTable': 10 } # TaskType : balance_matrixs of task - for task , weights in weight_matrixs.items(): + for task , weights in balance_TaskType_matrixs.items(): for basicType in BasicTypes: if basicType.__name__ == task: From 8c07f44f1374cf0cfaf87c43ae09fcade10276ba Mon Sep 17 00:00:00 2001 From: danielclow <106956386+danielclow@users.noreply.github.com> Date: Tue, 15 Nov 2022 13:02:07 +0800 Subject: [PATCH 17/69] doc: updated contribution guidelines --- CONTRIBUTING-CN.md | 27 ++++++++------------- CONTRIBUTING.md | 60 +++++++++++++++++++++------------------------- 2 files changed, 37 insertions(+), 50 deletions(-) diff --git a/CONTRIBUTING-CN.md b/CONTRIBUTING-CN.md index 19f3000d45..efaa2077fe 100644 --- a/CONTRIBUTING-CN.md +++ b/CONTRIBUTING-CN.md @@ -7,25 +7,18 @@ - 任何用户都可以通过 **[GitHub issue tracker](https://github.com/taosdata/TDengine/issues)** 向我们报告错误。请您对所遇到的问题进行**详细描述**,最好提供重现错误的详细步骤。 - 欢迎提供包含由 Bug 生成的日志文件的附录。 -## 需要强调的代码提交规则 +## 代码提交规则 -- 在提交代码之前,需要**同意贡献者许可协议(CLA)**。点击 [TaosData CLA](https://cla-assistant.io/taosdata/TDengine) 阅读并签署协议。如果您不接受该协议,请停止提交。 -- 请在 [GitHub issue tracker](https://github.com/taosdata/TDengine/issues) 中解决问题或添加注册功能。 -- 如果在 [GitHub issue tracker](https://github.com/taosdata/TDengine/issues) 中没有找到相应的问题或功能,请**创建一个新的 issue**。 -- 将代码提交到我们的存储库时,请创建**包含问题编号的 PR**。 +1. 在提交代码之前,需要**同意贡献者许可协议(CLA)**。点击 [TaosData CLA](https://cla-assistant.io/taosdata/TDengine) 阅读并签署协议。如果您不接受该协议,请停止提交。 +2. 请在 [GitHub issue tracker](https://github.com/taosdata/TDengine/issues) 中解决问题或添加注册功能。 + 如果在 [GitHub issue tracker](https://github.com/taosdata/TDengine/issues) 中没有找到相应的问题或功能,请**创建一个新的 issue**。 + 将代码提交到我们的存储库时,请创建**包含问题编号的 PR**。 +3. 将TDengine仓库库fork到自己的账户中并创建分支(branch)。 + 注意:默认分支`main`不能直接接受PR,请基于开发分支`3.0`创建自己的分支。 + 注意:修改文档的分支要以`docs/`为开头,以免进行不必要的测试。 +4. 创建pull request,将自己的分支合并到开发分支`3.0`,我们开发团队将尽快审核。 -## 贡献指南 - -1. 请用友好的语气书写。 - -2. **主动语态**总体上优于被动语态。主动语态中的句子会突出执行动作的人,而不是被动语态突出动作的接受者。 - -3. 文档写作建议 - -- 正确拼写产品名称 “TDengine”。 “TD” 用大写字母,“TD” 和 “engine” 之间没有空格 **(正确拼写:TDengine)**。 -- 在句号或其他标点符号后只留一个空格。 - -4. 尽量**使用简单句**,而不是复杂句。 +如遇任何问题,请添加官方微信TDengineECO。我们的团队会帮忙解决。 ## 给贡献者的礼品 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5be84bec34..058c624e10 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,40 +1,36 @@ -# Contributing +# Contributing to TDengine -We appreciate contributions from all developers. Feel free to follow us, fork the repository, report bugs, and even submit your code on GitHub. However, we would like developers to follow the guidelines in this document to ensure effective cooperation. +TDengine Community Edition is free, open-source software. Its development is led by the TDengine Team, but we welcome contributions from all community members and open-source developers. This document describes how you can contribute, no matter whether you're a user or a developer yourself. -## Reporting a bug +## Bug reports -- Any users can report bugs to us through the **[GitHub issue tracker](https://github.com/taosdata/TDengine/issues)**. We would appreciate if you could provide **a detailed description** of the problem you encountered, including steps to reproduce it. +All users can report bugs to us through the **[GitHub issue tracker](https://github.com/taosdata/TDengine/issues)**. To ensure that the development team can locate and resolve the issue that you experienced, please include the following in your bug report: -- Attaching log files caused by the bug is really appreciated. +- A detailed description of the issue, including the steps to reproduce it. +- Any log files that may be relevant to the issue. -## Guidelines for committing code +## Code contributions -- You must agree to the **Contributor License Agreement(CLA) before submitting your code patch**. Follow the **[TAOSData CLA](https://cla-assistant.io/taosdata/TDengine)** link to read through and sign the agreement. If you do not accept the agreement, your contributions cannot be accepted. +Developers are encouraged to submit patches to the project, and all contributions, from minor documentation changes to bug fixes, are appreciated by our team. To ensure that your code can be merged successfully and improve the experience for other community members, we ask that you go through the following procedure before submitting a pull request: -- Please solve an issue or add a feature registered in the **[GitHub issue tracker](https://github.com/taosdata/TDengine/issues)**. -- If no corresponding issue or feature is found in the issue tracker, please **create one**. -- When submitting your code to our repository, please create a pull request with the **issue number** included. +1. Read and accept the terms of the TAOS Data Contributor License Agreement (CLA) located at [https://cla-assistant.io/taosdata/TDengine](https://cla-assistant.io/taosdata/TDengine). -## Guidelines for communicating +2. For bug fixes, search the [GitHub issue tracker](https://github.com/taosdata/TDengine/issues) to check whether the bug has already been filed. + - If the bug that you want to fix already exists in the issue tracker, review the previous discussion before submitting your patch. + - If the bug that you want to fix does not exist in the issue tracker, click **New issue** and file a report. + - Ensure that you note the issue number in your pull request when you submit your patch. + +3. Fork our repository to your GitHub account and create a branch for your patch. + **Important:** The `main` branch is for stable versions and cannot accept patches directly. For all code and documentation changes, create your own branch from the development branch `3.0` and not from `main`. + Note: For a documentation change, ensure that the branch name starts with `docs/` so that the change can be merged without running tests. + +4. Create a pull request to merge your changes into the development branch `3.0`, and our team members will review the request as soon as possible. -1. Please be **nice and polite** in the description. -2. **Active voice is better than passive voice in general**. Sentences in the active voice will highlight who is performing the action rather than the recipient of the action highlighted by the passive voice. -3. Documentation writing advice +If you encounter any difficulties or problems in contributing your code, you can join our [Discord server](https://discord.com/invite/VZdSuUg4pS) and receive assistance from members of the TDengine Team. -- Spell the product name "TDengine" correctly. "TD" is written in capital letters, and there is no space between "TD" and "engine" (**Correct spelling: TDengine**). -- Please **capitalize the first letter** of every sentence. -- Leave **only one space** after periods or other punctuation marks. -- Use **American spelling**. -- When possible, **use second person** rather than first person (e.g.“You are recommended to use a reverse proxy such as Nginx.” rather than “We recommend to use a reverse proxy such as Nginx.”). +## Expressing our thanks -5. Use **simple sentences**, rather than complex sentences. - -## Gifts for the contributors - -Developers, as long as you contribute to TDengine, whether it's code contributions to fix bugs or feature requests, or documentation changes, **you are eligible for a very special Contributor Souvenir Gift!** - -**You can choose one of the following gifts:** +To thank community members for your support, we are offering a free gift to any developer who submits at least one contribution. You can choose one of the following items:

-The TDengine community is committed to making TDengine accepted and used by more developers. +If you would like to claim your gift, send an email to [developer@tdengine.com](mailto:developer@tdengine.com?subject=Claiming&20my%20developer%20gift) including the following information: -Just fill out the **Contributor Submission Form** to choose your desired gift. +- Your GitHub account name +- Your name and mailing address +- Your preferred gift -- [Contributor Submission Form](https://page.ma.scrmtech.com/form/index?pf_uid=27715_2095&id=12100) - -## Contact us - -If you have any problems or questions that need help from us, please feel free to add our WeChat account: TDengineECO. +Note: Limit one per person. \ No newline at end of file From 9c7913b00b1100cb48845af2cb640fd594a65f71 Mon Sep 17 00:00:00 2001 From: wenzhouwww Date: Wed, 16 Nov 2022 10:39:37 +0800 Subject: [PATCH 18/69] Update crash_gen_main.py --- tests/pytest/crash_gen/crash_gen_main.py | 244 +++++------------------ 1 file changed, 48 insertions(+), 196 deletions(-) diff --git a/tests/pytest/crash_gen/crash_gen_main.py b/tests/pytest/crash_gen/crash_gen_main.py index 4fa19f543d..90d27c3b8d 100755 --- a/tests/pytest/crash_gen/crash_gen_main.py +++ b/tests/pytest/crash_gen/crash_gen_main.py @@ -1415,6 +1415,7 @@ class Task(): 0x03E1, # failed on tmq_subscribe ,topic not exist 0x03ed , # Topic must be dropped first, SQL: drop database db_0 0x0203 , # Invalid value + 0x03f0 , # Stream already exist , topic already exists @@ -1732,12 +1733,6 @@ class TaskDropDb(StateTransitionTask): def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): - # drop topics before drop db - - # if self._db.getFixedSuperTable().hasTopics(wt.getDbConn()): - - # self._db.getFixedSuperTable().dropTopics(wt.getDbConn(),self._db.getName(),None) - # self._db.getFixedSuperTable().dropTopics(wt.getDbConn(),self._db.getName(),self._db.getFixedSuperTableName) try: self.queryWtSql(wt, "drop database {}".format(self._db.getName())) # drop database maybe failed ,because topic exists except taos.error.ProgrammingError as err: @@ -1748,10 +1743,6 @@ class TaskDropDb(StateTransitionTask): Logging.debug("[OPS] database dropped at {}".format(time.time())) - -# Streams will generator TD-20237 (it will crash taosd , start this task when this issue fixed ) - - class TaskCreateStream(StateTransitionTask): @classmethod @@ -1775,60 +1766,25 @@ class TaskCreateStream(StateTransitionTask): sTable = self._db.getFixedSuperTable() # type: TdSuperTable # wt.execSql("use db") # should always be in place - - # create stream - - # CREATE STREAM avg_vol_s INTO avg_vol AS SELECT _wstartts, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s); - stbname =sTable.getName() sub_tables = sTable.getRegTables(wt.getDbConn()) aggExpr = Dice.choice([ - 'count(*)', - 'avg(speed)', - # 'twa(speed)', # TODO: this one REQUIRES a where statement, not reasonable - 'sum(speed)', - 'stddev(speed)', - # SELECTOR functions - 'min(speed)', - 'max(speed)', - 'first(speed)', - 'last(speed)', - 'apercentile(speed, 10)', # TODO: TD-1316 - 'last_row(*)', # TODO: commented out per TD-3231, we should re-create - # Transformation Functions - 'twa(speed)' - - ]) # TODO: add more from 'top' + 'count(*)', 'avg(speed)', 'sum(speed)', 'stddev(speed)','min(speed)', 'max(speed)', 'first(speed)', 'last(speed)', + 'apercentile(speed, 10)', 'last_row(*)', 'twa(speed)']) + + stream_sql = '' # set default value if sub_tables: - - if sub_tables: # if not empty - sub_tbname = sub_tables[0] - # create stream with query above sub_table - stream_sql = 'create stream {} into {}.{} as select {}, avg(speed) FROM {}.{} PARTITION BY tbname INTERVAL(5s) SLIDING(3s) '.format(sub_stream_name,dbname,sub_stream_tb_name ,aggExpr,dbname,sub_tbname) - try: - self.execWtSql(wt, stream_sql) - Logging.debug("[OPS] stream is creating at {}".format(time.time())) - except taos.error.ProgrammingError as err: - errno = Helper.convertErrno(err.errno) - if errno in [0x03f0]: # stream already exists - # stream need drop before drop table - pass - - else: - pass - + sub_tbname = sub_tables[0] + # create stream with query above sub_table + stream_sql = 'create stream {} into {}.{} as select {}, avg(speed) FROM {}.{} PARTITION BY tbname INTERVAL(5s) SLIDING(3s) '.\ + format(sub_stream_name,dbname,sub_stream_tb_name ,aggExpr,dbname,sub_tbname) else: - stream_sql = 'create stream {} into {}.{} as select {}, avg(speed) FROM {}.{} PARTITION BY tbname INTERVAL(5s) SLIDING(3s) '.format(super_stream_name,dbname,super_stream_tb_name,aggExpr, dbname,stbname) + stream_sql = 'create stream {} into {}.{} as select {}, avg(speed) FROM {}.{} PARTITION BY tbname INTERVAL(5s) SLIDING(3s) '.\ + format(super_stream_name,dbname,super_stream_tb_name,aggExpr, dbname,stbname) + self.execWtSql(wt, stream_sql) + Logging.debug("[OPS] stream is creating at {}".format(time.time())) - try: - self.execWtSql(wt, stream_sql) - Logging.debug("[OPS] stream is creating at {}".format(time.time())) - except taos.error.ProgrammingError as err: - errno = Helper.convertErrno(err.errno) - if errno in [0x03f0]: # stream already exists - # stream need drop before drop table - pass class TaskCreateTopic(StateTransitionTask): @@ -1853,96 +1809,37 @@ class TaskCreateTopic(StateTransitionTask): sTable = self._db.getFixedSuperTable() # type: TdSuperTable # wt.execSql("use db") # should always be in place - - # create topic - # create topic if not exists topic_ctb_column as select ts, c1, c2, c3 from stb1; stbname =sTable.getName() sub_tables = sTable.getRegTables(wt.getDbConn()) - scalarExpr = Dice.choice([ '*','speed','color', - 'abs(speed)', - 'acos(speed)', - 'asin(speed)', - 'atan(speed)', - 'ceil(speed)', - 'cos(speed)', - 'cos(speed)', - 'floor(speed)', - 'log(speed,2)', - 'pow(speed,2)', - 'round(speed)', - 'sin(speed)', - 'sqrt(speed)', - 'char_length(color)', - 'concat(color,color)', - 'concat_ws(" ", color,color," ")', - 'length(color)', - 'lower(color)', - 'ltrim(color)', - 'substr(color , 2)', - 'upper(color)', - 'cast(speed as double)', - 'cast(ts as bigint)', - - ]) # TODO: add more from 'top' - if Dice.throw(3)==0: - if sub_tables: - - if sub_tables: # if not empty - sub_tbname = sub_tables[0] - # create stream with query above sub_table - topic_sql = 'create topic {} as select {} FROM {}.{} ; '.format(sub_topic_name,scalarExpr,dbname,sub_tbname) - try: - self.execWtSql(wt, "use {}".format(dbname)) - self.execWtSql(wt, topic_sql) - Logging.debug("[OPS] topic is creating at {}".format(time.time())) - except taos.error.ProgrammingError as err: - errno = Helper.convertErrno(err.errno) - if errno in [0x03f0 ]: # topic already exists - # topic need drop before drop table - pass - - else: - pass - - else: + scalarExpr = Dice.choice([ '*','speed','color','abs(speed)','acos(speed)','asin(speed)','atan(speed)','ceil(speed)','cos(speed)','cos(speed)', + 'floor(speed)','log(speed,2)','pow(speed,2)','round(speed)','sin(speed)','sqrt(speed)','char_length(color)','concat(color,color)', + 'concat_ws(" ", color,color," ")','length(color)', 'lower(color)', 'ltrim(color)','substr(color , 2)','upper(color)','cast(speed as double)', + 'cast(ts as bigint)']) + topic_sql = '' # set default value + if Dice.throw(3)==0: # create topic : source data from sub query + if sub_tables: # if not empty + sub_tbname = sub_tables[0] + # create topic : source data from sub query of sub stable + topic_sql = 'create topic {} as select {} FROM {}.{} ; '.format(sub_topic_name,scalarExpr,dbname,sub_tbname) + + else: # create topic : source data from sub query of stable topic_sql = 'create topic {} as select {} FROM {}.{} '.format(super_topic_name,scalarExpr, dbname,stbname) - try: - self.execWtSql(wt, "use {}".format(dbname)) - self.execWtSql(wt, topic_sql) - Logging.debug("[OPS] subquery topic is creating at {}".format(time.time())) - except taos.error.ProgrammingError as err: - errno = Helper.convertErrno(err.errno) - if errno in [0x03f0]: # topic already exists - # topic need drop before drop table - pass - elif Dice.throw(3)==1: + elif Dice.throw(3)==1: # create topic : source data from super table topic_sql = 'create topic {} AS STABLE {}.{} '.format(stable_topic,dbname,stbname) - try: - self.execWtSql(wt, "use {}".format(dbname)) - self.queryWtSql(wt, topic_sql) - Logging.debug("[OPS] stable topic is creating at {}".format(time.time())) - except taos.error.ProgrammingError as err: - errno = Helper.convertErrno(err.errno) - if errno in [0x03f0]: # topic already exists - # topic need drop before drop table - pass - elif Dice.throw(3)==2: - topic_sql = 'create topic {} AS DATABASE {} '.format(db_topic,dbname) - try: - self.execWtSql(wt, "use {}".format(dbname)) - self.execWtSql(wt, topic_sql) - Logging.debug("[OPS] db topic is creating at {}".format(time.time())) - except taos.error.ProgrammingError as err: - errno = Helper.convertErrno(err.errno) - if errno in [0x03f0]: # topic already exists - # topic need drop before drop table - pass + + elif Dice.throw(3)==2: # create topic : source data from whole database + topic_sql = 'create topic {} AS DATABASE {} '.format(db_topic,dbname) else: pass + # exec create topics + self.execWtSql(wt, "use {}".format(dbname)) + self.execWtSql(wt, topic_sql) + Logging.debug("[OPS] db topic is creating at {}".format(time.time())) + class TaskDropTopics(StateTransitionTask): @classmethod @@ -1991,7 +1888,6 @@ class TaskDropStreams(StateTransitionTask): # tblName = sTable.getName() if sTable.hasStreams(wt.getDbConn()): sTable.dropStreams(wt.getDbConn()) # drop stream of database - # sTable.dropStreamTables(wt.getDbConn()) # drop streamtables of stable class TaskDropStreamTables(StateTransitionTask): @@ -2012,10 +1908,9 @@ class TaskDropStreamTables(StateTransitionTask): return sTable = self._db.getFixedSuperTable() # type: TdSuperTable - # wt.execSql("use db") # should always be in place + wt.execSql("use db") # should always be in place # tblName = sTable.getName() if sTable.hasStreamTables(wt.getDbConn()): - # sTable.dropStreams(wt.getDbConn()) sTable.dropStreamTables(wt.getDbConn()) # drop stream tables class TaskCreateConsumers(StateTransitionTask): @@ -2031,13 +1926,9 @@ class TaskCreateConsumers(StateTransitionTask): def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): if Config.getConfig().connector_type == 'native': - dbname = self._db.getName() - + sTable = self._db.getFixedSuperTable() # type: TdSuperTable # wt.execSql("use db") # should always be in place - - # create Consumers - # if Dice.throw(50)==0: # because subscribe is cost so much time , Reduce frequency of this task if sTable.hasTopics(wt.getDbConn()): sTable.createConsumer(wt.getDbConn(),random.randint(1,10)) pass @@ -2088,30 +1979,13 @@ class TdSuperTable: dbName = self._dbName if self.exists(dbc) : # if myself exists fullTableName = dbName + '.' + self._stName - # if self.hasStreams(dbc): - # self.dropStreams(dbc) - # self.dropStreamTables(dbc) - # if self.hasTopics(dbc): - # self.dropTopics(dbName,None) - # self.dropTopics(dbName,self._stName) + try: dbc.execute("DROP TABLE {}".format(fullTableName)) except taos.error.ProgrammingError as err: errno = Helper.convertErrno(err.errno) if errno in [1011,0x3F3,0x03f3,0x2662]: # table doesn't exist # Stream must be dropped first, SQL: DROP TABLE db_0.fs_table pass - # # stream need drop before drop table - # for stream in self.getStreamName(): - # drop_stream_sql = 'drop stream {}'.format(stream) - # try: - # dbc.execute(drop_stream_sql) - # except taos.error.ProgrammingError as err: - # # correcting for strange error number scheme - # errno3 = Helper.convertErrno(err.errno) - # if errno3 in [1011,0x3F3,0x03f3,0x2662,0x03f1]: # stream not exists - # pass - # dbc.execute("DROP TABLE {}".format(fullTableName)) - # pass else: if not skipCheck: @@ -2131,15 +2005,6 @@ class TdSuperTable: if dbc.existsSuperTable(self._stName): if dropIfExists: - # if self.hasStreams(dbc): - # self.dropStreams(dbc) - # self.dropStreamTables(dbc) - - # # drop topics before drop stables - # if self.hasTopics(dbc): - # self.dropTopics(dbc,self._dbName,None) - # self.dropTopics(dbc,self._dbName,self._stName ) - try: dbc.execute("DROP TABLE {}".format(fullTableName)) except taos.error.ProgrammingError as err: @@ -2164,7 +2029,7 @@ class TdSuperTable: sql += " TAGS (dummy int) " dbc.execute(sql) - def createConsumer(self, dbc: DbConn , Consumer_nums): + def createConsumer(self, dbc,Consumer_nums): def generateConsumer(current_topic_list): conf = TaosTmqConf() @@ -2183,7 +2048,14 @@ class TdSuperTable: consumer.subscribe(topic_list) except TmqError as e : pass - time.sleep(5) # consumer work only 5 sec ,and then it will exit + + # consumer work only 30 sec + time_start = time.time() + while 1: + res = consumer.poll(1000) + if time.time() - time_start >5 : + break + # time.sleep(10) try: consumer.unsubscribe() except TmqError as e : @@ -2579,29 +2451,9 @@ class TaskDropSuperTable(StateTransitionTask): # Drop the super table itself tblName = self._db.getFixedSuperTableName() - - # # drop streams before drop stables - # if self._db.getFixedSuperTable().hasStreams(wt.getDbConn()): - # self._db.getFixedSuperTable().dropStreams(wt.getDbConn()) - # self._db.getFixedSuperTable().dropStreamTables(wt.getDbConn()) + self.execWtSql(wt, "drop table {}.{}".format(self._db.getName(), tblName)) + - # # drop topics before drop stables - # if self._db.getFixedSuperTable().hasTopics(wt.getDbConn()): - # self._db.getFixedSuperTable().dropTopics(wt.getDbConn(),self._db.getName(),None) - # self._db.getFixedSuperTable().dropTopics(wt.getDbConn(),self._db.getName(),tblName) - - - try: - self.execWtSql(wt, "drop table {}.{}".format(self._db.getName(), tblName)) - except taos.error.ProgrammingError as err: - # correcting for strange error number scheme - errno2 = Helper.convertErrno(err.errno) - if (errno2 in [0x362]): # mnode invalid table name - isSuccess = False - Logging.debug("[DB] Acceptable error when dropping a table") - elif errno2 in [1011,0x3F3,0x03f3]: # table doesn't exist - - pass class TaskAlterTags(StateTransitionTask): From b600b4c8bc088582acd2e7d6fe1fdf877c11fa24 Mon Sep 17 00:00:00 2001 From: Minghao Li Date: Mon, 21 Nov 2022 16:18:50 +0800 Subject: [PATCH 19/69] refactor(sync): modify wal pInfo++, to find which case can not pass --- source/libs/wal/src/walWrite.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/source/libs/wal/src/walWrite.c b/source/libs/wal/src/walWrite.c index 6cd127fed4..caae669e4a 100644 --- a/source/libs/wal/src/walWrite.c +++ b/source/libs/wal/src/walWrite.c @@ -325,7 +325,8 @@ int32_t walEndSnapshot(SWal *pWal) { SWalFileInfo *pInfo = taosArraySearch(pWal->fileInfoSet, &tmp, compareWalFileInfo, TD_LE); if (pInfo) { if (ver >= pInfo->lastVer) { - pInfo--; + //pInfo--; + pInfo++; } if (POINTER_DISTANCE(pInfo, pWal->fileInfoSet->pData) > 0) { wDebug("vgId:%d, wal end remove for %" PRId64, pWal->cfg.vgId, pInfo->firstVer); From 6d5a2567749750728111f6ad029c921d21ca893c Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 21 Nov 2022 22:40:20 +0800 Subject: [PATCH 20/69] fix(query): fix some memory leaks. --- source/dnode/vnode/src/tsdb/tsdbRead.c | 26 +++++++++---- source/libs/executor/src/cachescanoperator.c | 1 + source/libs/executor/src/executorimpl.c | 40 -------------------- 3 files changed, 19 insertions(+), 48 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index 986cba8b17..fbafc63382 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -361,11 +361,17 @@ static void resetAllDataBlockScanInfo(SHashObj* pTableMap, int64_t ts) { STableBlockScanInfo* pInfo = *(STableBlockScanInfo**)p; pInfo->iterInit = false; + pInfo->iter.hasVal = false; pInfo->iiter.hasVal = false; + if (pInfo->iter.iter != NULL) { pInfo->iter.iter = tsdbTbDataIterDestroy(pInfo->iter.iter); } + if (pInfo->iiter.iter != NULL) { + pInfo->iiter.iter = tsdbTbDataIterDestroy(pInfo->iiter.iter); + } + pInfo->delSkyline = taosArrayDestroy(pInfo->delSkyline); pInfo->lastKey = ts; } @@ -373,6 +379,8 @@ static void resetAllDataBlockScanInfo(SHashObj* pTableMap, int64_t ts) { static void clearBlockScanInfo(STableBlockScanInfo* p) { p->iterInit = false; + + p->iter.hasVal = false; p->iiter.hasVal = false; if (p->iter.iter != NULL) { @@ -388,9 +396,9 @@ static void clearBlockScanInfo(STableBlockScanInfo* p) { tMapDataClear(&p->mapData); } -static void destroyAllBlockScanInfo(SHashObj* pTableMap, bool clearEntry) { +static void destroyAllBlockScanInfo(SHashObj* pTableMap) { void* p = NULL; - while (clearEntry && ((p = taosHashIterate(pTableMap, p)) != NULL)) { + while ((p = taosHashIterate(pTableMap, p)) != NULL) { clearBlockScanInfo(*(STableBlockScanInfo**)p); } @@ -2226,6 +2234,7 @@ static int32_t initMemDataIterator(STableBlockScanInfo* pBlockScanInfo, STsdbRea if (pReader->pReadSnap->pMem != NULL) { d = tsdbGetTbDataFromMemTable(pReader->pReadSnap->pMem, pReader->suid, pBlockScanInfo->uid); if (d != NULL) { + ASSERT(pBlockScanInfo->iter.iter == NULL); code = tsdbTbDataIterCreate(d, &startKey, backward, &pBlockScanInfo->iter.iter); if (code == TSDB_CODE_SUCCESS) { pBlockScanInfo->iter.hasVal = (tsdbTbDataIterGet(pBlockScanInfo->iter.iter) != NULL); @@ -3789,9 +3798,7 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, void* pTableL updateBlockSMAInfo(pReader->pSchema, &pReader->suppInfo); } - STsdbReader* p = pReader->innerReader[0] != NULL ? pReader->innerReader[0] : pReader; - - pReader->status.pTableMap = createDataBlockScanInfo(p, pTableList, numOfTables); + pReader->status.pTableMap = createDataBlockScanInfo(pReader, pTableList, numOfTables); if (pReader->status.pTableMap == NULL) { tsdbReaderClose(pReader); *ppReader = NULL; @@ -3849,7 +3856,7 @@ void tsdbReaderClose(STsdbReader* pReader) { } { - if (pReader->innerReader[0] != NULL) { + if (pReader->innerReader[0] != NULL || pReader->innerReader[1] != NULL) { STsdbReader* p = pReader->innerReader[0]; p->status.pTableMap = NULL; @@ -3887,9 +3894,12 @@ void tsdbReaderClose(STsdbReader* pReader) { cleanupDataBlockIterator(&pReader->status.blockIter); size_t numOfTables = taosHashGetSize(pReader->status.pTableMap); - destroyAllBlockScanInfo(pReader->status.pTableMap, (pReader->innerReader[0] == NULL) ? true : false); + if (pReader->status.pTableMap != NULL) { + destroyAllBlockScanInfo(pReader->status.pTableMap); + clearBlockScanInfoBuf(&pReader->blockInfoBuf); + } + blockDataDestroy(pReader->pResBlock); - clearBlockScanInfoBuf(&pReader->blockInfoBuf); if (pReader->pFileReader != NULL) { tsdbDataFReaderClose(&pReader->pFileReader); diff --git a/source/libs/executor/src/cachescanoperator.c b/source/libs/executor/src/cachescanoperator.c index b78ba8ac0a..922ed05653 100644 --- a/source/libs/executor/src/cachescanoperator.c +++ b/source/libs/executor/src/cachescanoperator.c @@ -246,6 +246,7 @@ void destroyLastrowScanOperator(void* param) { pInfo->pLastrowReader = tsdbCacherowsReaderClose(pInfo->pLastrowReader); } + cleanupExprSupp(&pInfo->pseudoExprSup); taosMemoryFreeClear(param); } diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 0dd5765aa4..a7e955100c 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -20,7 +20,6 @@ #include "querynodes.h" #include "tfill.h" #include "tname.h" -#include "tref.h" #include "tdatablock.h" #include "tglobal.h" @@ -134,45 +133,6 @@ static int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, static void initCtxOutputBuffer(SqlFunctionCtx* pCtx, int32_t size); static void doSetTableGroupOutputBuf(SOperatorInfo* pOperator, int32_t numOfOutput, uint64_t groupId); -#if 0 -static bool chkResultRowFromKey(STaskRuntimeEnv* pRuntimeEnv, SResultRowInfo* pResultRowInfo, char* pData, - int16_t bytes, bool masterscan, uint64_t uid) { - bool existed = false; - SET_RES_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, uid); - - SResultRow** p1 = - (SResultRow**)taosHashGet(pRuntimeEnv->pResultRowHashTable, pRuntimeEnv->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes)); - - // in case of repeat scan/reverse scan, no new time window added. - if (QUERY_IS_INTERVAL_QUERY(pRuntimeEnv->pQueryAttr)) { - if (!masterscan) { // the *p1 may be NULL in case of sliding+offset exists. - return p1 != NULL; - } - - if (p1 != NULL) { - if (pResultRowInfo->size == 0) { - existed = false; - } else if (pResultRowInfo->size == 1) { - // existed = (pResultRowInfo->pResult[0] == (*p1)); - } else { // check if current pResultRowInfo contains the existed pResultRow - SET_RES_EXT_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, uid, pResultRowInfo); - int64_t* index = - taosHashGet(pRuntimeEnv->pResultRowListSet, pRuntimeEnv->keyBuf, GET_RES_EXT_WINDOW_KEY_LEN(bytes)); - if (index != NULL) { - existed = true; - } else { - existed = false; - } - } - } - - return existed; - } - - return p1 != NULL; -} -#endif - SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int32_t* currentPageId, int32_t interBufSize) { SFilePage* pData = NULL; From bb51d3cac774def54dda8ec7a5eefee8f7626333 Mon Sep 17 00:00:00 2001 From: slzhou Date: Mon, 21 Nov 2022 23:11:00 +0800 Subject: [PATCH 21/69] fix: fix error for select count(c2),count(ts) from table caused by invalid ts column aggregation --- source/dnode/vnode/src/tsdb/tsdbRead.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index 986cba8b17..bb2de2ec93 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -4118,9 +4118,13 @@ int32_t tsdbRetrieveDatablockSMA(STsdbReader* pReader, SColumnDataAgg*** pBlockS } else if (pAgg->colId < pSup->colIds[j]) { i += 1; } else if (pSup->colIds[j] < pAgg->colId) { + if (pSup->colIds[j] == PRIMARYKEY_TIMESTAMP_COL_ID) { + taosArrayPush(pNewAggList, &pSup->tsColAgg); + } else { // all date in this block are null - SColumnDataAgg nullColAgg = {.colId = pSup->colIds[j], .numOfNull = pBlock->nRow}; - taosArrayPush(pNewAggList, &nullColAgg); + SColumnDataAgg nullColAgg = {.colId = pSup->colIds[j], .numOfNull = pBlock->nRow}; + taosArrayPush(pNewAggList, &nullColAgg); + } j += 1; } } From ae0bd036a07fa0d0a36e12aba9b0ae5d6c98d061 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Mon, 21 Nov 2022 23:11:00 +0800 Subject: [PATCH 22/69] fix: more log --- source/dnode/mgmt/mgmt_vnode/src/vmWorker.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c index 24240e82ef..4aa07cad98 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c @@ -148,8 +148,8 @@ static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtyp SVnodeObj *pVnode = vmAcquireVnode(pMgmt, pHead->vgId); if (pVnode == NULL) { - dGError("vgId:%d, msg:%p failed to put into vnode queue since %s, type:%s qtype:%d", pHead->vgId, pMsg, terrstr(), - TMSG_INFO(pMsg->msgType), qtype); + dGError("vgId:%d, msg:%p failed to put into vnode queue since %s, type:%s qtype:%d contLen:%d", pHead->vgId, pMsg, terrstr(), + TMSG_INFO(pMsg->msgType), qtype, pHead->contLen); return terrno != 0 ? terrno : -1; } From 514ad921c74b4d20c818340304db65b574b70e04 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Tue, 22 Nov 2022 01:16:38 +0800 Subject: [PATCH 23/69] test:add testcase of enterprise installPackages --- packaging/MPtestJenkinsfile | 197 +++++++++++++++++++++++------------- packaging/testpackage.sh | 107 +++++++++++++++++--- 2 files changed, 218 insertions(+), 86 deletions(-) diff --git a/packaging/MPtestJenkinsfile b/packaging/MPtestJenkinsfile index 5dc8024f29..0e958e8384 100644 --- a/packaging/MPtestJenkinsfile +++ b/packaging/MPtestJenkinsfile @@ -40,27 +40,32 @@ pipeline { choice( name: 'sourcePath', choices: ['nas','web'], - description: 'choice which way to download the installation pacakge;web is Office Web and nas means taos nas server ' + description: 'Choice which way to download the installation pacakge;web is Office Web and nas means taos nas server ' + ) + choice( + name: 'verMode', + choices: ['all','community','enterprise'], + description: 'Choice which types of package you want do check ' ) string ( name:'version', - defaultValue:'3.0.1.6', - description: 'release version number,eg: 3.0.0.1 or 3.0.0.' + defaultValue:'3.0.1.7', + description: 'Release version number,eg: 3.0.0.1 or 3.0.0.' ) string ( name:'baseVersion', - defaultValue:'3.0.1.6', - description: 'This number of baseVerison is generally not modified.Now it is 3.0.0.1' + defaultValue:'3.0.1.7', + description: 'The number of baseVerison is generally not modified.Now it is 3.0.0.1' ) string ( name:'toolsVersion', defaultValue:'2.2.7', - description: 'This number of baseVerison is generally not modified.Now it is 3.0.0.1' + description: 'Release version number,eg:2.2.0' ) string ( name:'toolsBaseVersion', defaultValue:'2.1.2', - description: 'This number of baseVerison is generally not modified.Now it is 3.0.0.1' + description: 'The number of baseVerison is generally not modified.Now it is 2.1.2' ) } environment{ @@ -68,10 +73,10 @@ pipeline { TDINTERNAL_ROOT_DIR = '/var/lib/jenkins/workspace/TDinternal' TDENGINE_ROOT_DIR = '/var/lib/jenkins/workspace/TDinternal/community' BRANCH_NAME = '3.0' - - TD_SERVER_TAR = "TDengine-server-${version}-Linux-x64.tar.gz" + + TD_SERVER_TAR = "${preServerPackag}-${version}-Linux-x64.tar.gz" BASE_TD_SERVER_TAR = "TDengine-server-${baseVersion}-Linux-x64.tar.gz" - + TD_SERVER_ARM_TAR = "TDengine-server-${version}-Linux-arm64.tar.gz" BASE_TD_SERVER_ARM_TAR = "TDengine-server-${baseVersion}-Linux-arm64.tar.gz" @@ -108,19 +113,24 @@ pipeline { timeout(time: 30, unit: 'MINUTES'){ sync_source("${BRANCH_NAME}") sh ''' + if [ "${verMode}" = "all" ];then + verMode="community enterprise" + fi + verModeList=${verMode} + for verModeSin in ${verModeList} + do + cd ${TDENGINE_ROOT_DIR}/packaging + bash testpackage.sh -f server -m ${verModeSin} -f server -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar + python3 checkPackageRuning.py + + cd ${TDENGINE_ROOT_DIR}/packaging + bash testpackage.sh -f server -m ${verModeSin} -f server -l true -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar + python3 checkPackageRuning.py + done + cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${sourcePath} - python3 checkPackageRuning.py - ''' - sh ''' - cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server ${sourcePath} - python3 checkPackageRuning.py - ''' - sh ''' - cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh ${TD_SERVER_DEB} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${sourcePath} - python3 checkPackageRuning.py + bash testpackage.sh -f server -m ${verModeSin} -f server -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t deb + python3 checkPackageRuning.py ''' } } @@ -131,22 +141,26 @@ pipeline { timeout(time: 30, unit: 'MINUTES'){ sync_source("${BRANCH_NAME}") sh ''' - cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${sourcePath} - python3 checkPackageRuning.py - ''' - sh ''' - cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server ${sourcePath} - python3 checkPackageRuning.py - ''' - sh ''' - cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh ${TD_SERVER_DEB} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${sourcePath} - python3 checkPackageRuning.py - dpkg -r tdengine - ''' + if [ "${verMode}" = "all" ];then + verMode="community enterprise" + fi + verModeList=${verMode} + for verModeSin in ${verModeList} + do + cd ${TDENGINE_ROOT_DIR}/packaging + bash testpackage.sh -f server -m ${verModeSin} -f server -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar + python3 checkPackageRuning.py + cd ${TDENGINE_ROOT_DIR}/packaging + bash testpackage.sh -f server -m ${verModeSin} -f server -l true -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar + python3 checkPackageRuning.py + done + + cd ${TDENGINE_ROOT_DIR}/packaging + bash testpackage.sh -f server -m ${verModeSin} -f server -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t deb + python3 checkPackageRuning.py + dpkg -r tdengine + ''' } } } @@ -156,19 +170,24 @@ pipeline { timeout(time: 30, unit: 'MINUTES'){ sync_source("${BRANCH_NAME}") sh ''' + if [ "${verMode}" = "all" ];then + verMode="community enterprise" + fi + verModeList=${verMode} + for verModeSin in ${verModeList} + do + cd ${TDENGINE_ROOT_DIR}/packaging + bash testpackage.sh -f server -m ${verModeSin} -f server -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar + python3 checkPackageRuning.py + + cd ${TDENGINE_ROOT_DIR}/packaging + bash testpackage.sh -f server -m ${verModeSin} -f server -l true -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar + python3 checkPackageRuning.py + done + cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${sourcePath} - python3 checkPackageRuning.py - ''' - sh ''' - cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server ${sourcePath} - python3 checkPackageRuning.py - ''' - sh ''' - cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh ${TD_SERVER_RPM} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${sourcePath} - python3 checkPackageRuning.py + bash testpackage.sh -f server -m ${verModeSin} -f server -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t rpm + python3 checkPackageRuning.py ''' } } @@ -179,21 +198,26 @@ pipeline { timeout(time: 30, unit: 'MINUTES'){ sync_source("${BRANCH_NAME}") sh ''' - cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${sourcePath} - python3 checkPackageRuning.py - ''' - sh ''' + if [ "${verMode}" = "all" ];then + verMode="community enterprise" + fi + verModeList=${verMode} + for verModeSin in ${verModeList} + do + cd ${TDENGINE_ROOT_DIR}/packaging + bash testpackage.sh -f server -m ${verModeSin} -f server -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar + python3 checkPackageRuning.py + + cd ${TDENGINE_ROOT_DIR}/packaging + bash testpackage.sh -f server -m ${verModeSin} -f server -l true -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar + python3 checkPackageRuning.py + done + cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server ${sourcePath} - python3 checkPackageRuning.py - ''' - sh ''' - cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh ${TD_SERVER_RPM} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${sourcePath} - python3 checkPackageRuning.py + bash testpackage.sh -f server -m ${verModeSin} -f server -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t rpm + python3 checkPackageRuning.py sudo rpm -e tdengine - ''' + ''' } } } @@ -203,9 +227,16 @@ pipeline { timeout(time: 30, unit: 'MINUTES'){ sync_source("${BRANCH_NAME}") sh ''' - cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh ${TD_SERVER_ARM_TAR} ${version} ${BASE_TD_SERVER_ARM_TAR} ${baseVersion} server ${sourcePath} - python3 checkPackageRuning.py + if [ "${verMode}" = "all" ];then + verMode="community enterprise" + fi + verModeList=${verMode} + for verModeSin in ${verModeList} + do + cd ${TDENGINE_ROOT_DIR}/packaging + bash testpackage.sh -f server -m ${verModeSin} -f server -l false -c arm64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar + python3 checkPackageRuning.py + done ''' } } @@ -219,8 +250,16 @@ pipeline { steps { timeout(time: 30, unit: 'MINUTES'){ sh ''' - cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh ${TD_CLIENT_TAR} ${version} ${BASE_TD_CLIENT_TAR} ${baseVersion} client ${sourcePath} + if [ "${verMode}" = "all" ];then + verMode="community enterprise" + fi + verModeList=${verMode} + for verModeSin in ${verModeList} + do + cd ${TDENGINE_ROOT_DIR}/packaging + bash testpackage.sh -f server -m ${verModeSin} -f client -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar + python3 checkPackageRuning.py + done python3 checkPackageRuning.py 192.168.0.21 ''' } @@ -231,8 +270,16 @@ pipeline { steps { timeout(time: 30, unit: 'MINUTES'){ sh ''' - cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh ${TD_CLIENT_LITE_TAR} ${version} ${BASE_TD_CLIENT_LITE_TAR} ${baseVersion} client ${sourcePath} + if [ "${verMode}" = "all" ];then + verMode="community enterprise" + fi + verModeList=${verMode} + for verModeSin in ${verModeList} + do + cd ${TDENGINE_ROOT_DIR}/packaging + bash testpackage.sh -f server -m ${verModeSin} -f client -l true -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar + python3 checkPackageRuning.py + done python3 checkPackageRuning.py 192.168.0.24 ''' } @@ -245,8 +292,16 @@ pipeline { steps { timeout(time: 30, unit: 'MINUTES'){ sh ''' - cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh ${TD_CLIENT_ARM_TAR} ${version} ${BASE_TD_CLIENT_ARM_TAR} ${baseVersion} client ${sourcePath} + if [ "${verMode}" = "all" ];then + verMode="community enterprise" + fi + verModeList=${verMode} + for verModeSin in ${verModeList} + do + cd ${TDENGINE_ROOT_DIR}/packaging + bash testpackage.sh -f server -m ${verModeSin} -f client -l true -c arm64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar + python3 checkPackageRuning.py + done python3 checkPackageRuning.py 192.168.0.21 ''' } diff --git a/packaging/testpackage.sh b/packaging/testpackage.sh index b0e33835dd..846c8d160f 100755 --- a/packaging/testpackage.sh +++ b/packaging/testpackage.sh @@ -1,15 +1,72 @@ #!/bin/sh + + +function usage() { + echo "$0" + echo -e "\t -f test file type,server/client/tools/" + echo -e "\t -m pacakage version Type,community/enterprise" + echo -e "\t -l package type,lite or not" + echo -e "\t -c operation type,x64/arm64" + echo -e "\t -v pacakage version,3.0.1.7" + echo -e "\t -o pacakage version,3.0.1.7" + echo -e "\t -s source Path,web/nas" + echo -e "\t -t package Type,tar/rpm/deb" + echo -e "\t -h help" +} + + #parameter scriptDir=$(dirname $(readlink -f $0)) -packgeName=$1 -version=$2 -originPackageName=$3 -originversion=$4 -testFile=$5 -# sourcePath:web/nas -sourcePath=$6 +version="3.0.1.7" +originversion="3.0.1.7" +testFile="server" +verMode="communtity" +sourcePath="nas" +cpuType="x64" +lite="true" +packageType="tar" subFile="taos.tar.gz" +while getopts "m:c:f:l:s:o:t:v:h" opt; do + case $opt in + m) + verMode=$OPTARG + ;; + v) + version=$OPTARG + ;; + f) + testFile=$OPTARG + ;; + l) + lite=$OPTARG + ;; + s) + sourcePath=$OPTARG + ;; + o) + originversion=$OPTARG + ;; + c) + cpuType=$OPTARG + ;; + t) + packageType=$OPTARG + ;; + h) + usage + exit 0 + ;; + ?) + echo "Invalid option: -$OPTARG" + usage + exit 0 + ;; + esac +done + + +echo "testFile:${testFile},verMode:${verMode},lite:${lite},cpuType:${cpuType},packageType:${packageType},version-${version},originversion:${originversion},sourcePath:${sourcePath}" # Color setting RED='\033[41;30m' GREEN='\033[1;32m' @@ -21,20 +78,40 @@ BLUE_DARK='\033[0;34m' GREEN_UNDERLINE='\033[4;32m' NC='\033[0m' -if [ ${testFile} = "server" ];then - tdPath="TDengine-server-${version}" - originTdpPath="TDengine-server-${originversion}" +if [[ ${verMode} = "enterprise" ]];then + prePackag="TDengine-enterprise-${testFile}" +elif [ ${verMode} = "community" ];then + prePackag="TDengine-${testFile}" +fi +if [ ${lite} = "true" ];then + packageLite="-Lite" +elif [ ${lite} = "false" ];then + packageLite="" +fi +if [[ "$packageType" = "tar" ]] ;then + packageType="tar.gz" +fi + +tdPath="${prePackag}-${version}" +originTdpPath="${prePackag}-${originversion}" + +packgeName="${tdPath}-Linux-${cpuType}${packageLite}.${packageType}" +originPackageName="${originTdpPath}-Linux-${cpuType}${packageLite}.${packageType}" + +if [ "$testFile" == "server" ] ;then installCmd="install.sh" elif [ ${testFile} = "client" ];then - tdPath="TDengine-client-${version}" - originTdpPath="TDengine-client-${originversion}" installCmd="install_client.sh" elif [ ${testFile} = "tools" ];then tdPath="taosTools-${version}" originTdpPath="taosTools-${originversion}" + packgeName="${tdPath}-Linux-${cpuType}${packageLite}.${packageType}" + originPackageName="${originTdpPath}-Linux-${cpuType}${packageLite}.${packageType}" installCmd="install-taostools.sh" fi + +echo "tdPath:${tdPath},originTdpPath:${originTdpPath},packgeName:${packgeName},originPackageName:${originPackageName}" function cmdInstall { command=$1 if command -v ${command} ;then @@ -76,16 +153,16 @@ file=$1 versionPath=$2 sourceP=$3 nasServerIP="192.168.1.131" -packagePath="/nas/TDengine/v${versionPath}/community" +packagePath="/nas/TDengine/v${versionPath}/${verMode}" if [ -f ${file} ];then echoColor YD "${file} already exists ,it will delete it and download it again " rm -rf ${file} fi -if [ ${sourceP} = 'web' ];then +if [[ ${sourceP} = 'web' ]];then echoColor BD "====download====:wget https://www.taosdata.com/assets-download/3.0/${file}" wget https://www.taosdata.com/assets-download/3.0/${file} -elif [ ${sourceP} = 'nas' ];then +elif [[ ${sourceP} = 'nas' ]];then echoColor BD "====download====:scp root@${nasServerIP}:${packagePath}/${file} ." scp root@${nasServerIP}:${packagePath}/${file} . fi From c3514a97a204275d034d612cae41c322762d85c3 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Tue, 22 Nov 2022 01:53:08 +0800 Subject: [PATCH 24/69] test:modify two options that downloads the installPackages --- packaging/MPtestJenkinsfile | 93 ++++++++++++++++++++++++------------- 1 file changed, 60 insertions(+), 33 deletions(-) diff --git a/packaging/MPtestJenkinsfile b/packaging/MPtestJenkinsfile index 0e958e8384..de6cace508 100644 --- a/packaging/MPtestJenkinsfile +++ b/packaging/MPtestJenkinsfile @@ -117,19 +117,26 @@ pipeline { verMode="community enterprise" fi verModeList=${verMode} + ''' + + sh ''' for verModeSin in ${verModeList} do cd ${TDENGINE_ROOT_DIR}/packaging bash testpackage.sh -f server -m ${verModeSin} -f server -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar python3 checkPackageRuning.py - - cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh -f server -m ${verModeSin} -f server -l true -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar - python3 checkPackageRuning.py done + ''' + sh ''' cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh -f server -m ${verModeSin} -f server -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t deb + bash testpackage.sh -f server -m community -f server -l true -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar + python3 checkPackageRuning.py + ''' + + sh ''' + cd ${TDENGINE_ROOT_DIR}/packaging + bash testpackage.sh -f server -m community -f server -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t deb python3 checkPackageRuning.py ''' } @@ -145,19 +152,26 @@ pipeline { verMode="community enterprise" fi verModeList=${verMode} + ''' + + sh ''' for verModeSin in ${verModeList} do cd ${TDENGINE_ROOT_DIR}/packaging bash testpackage.sh -f server -m ${verModeSin} -f server -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar python3 checkPackageRuning.py - - cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh -f server -m ${verModeSin} -f server -l true -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar - python3 checkPackageRuning.py done + ''' + sh ''' cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh -f server -m ${verModeSin} -f server -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t deb + bash testpackage.sh -f server -m community -f server -l true -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar + python3 checkPackageRuning.py + ''' + + sh ''' + cd ${TDENGINE_ROOT_DIR}/packaging + bash testpackage.sh -f server -m community -f server -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t deb python3 checkPackageRuning.py dpkg -r tdengine ''' @@ -174,19 +188,25 @@ pipeline { verMode="community enterprise" fi verModeList=${verMode} + ''' + sh ''' for verModeSin in ${verModeList} do cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh -f server -m ${verModeSin} -f server -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar - python3 checkPackageRuning.py - - cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh -f server -m ${verModeSin} -f server -l true -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar + bash testpackage.sh -f server -m community -f server -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar python3 checkPackageRuning.py done + ''' + sh ''' cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh -f server -m ${verModeSin} -f server -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t rpm + bash testpackage.sh -f server -m community -f server -l true -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar + python3 checkPackageRuning.py + ''' + + sh ''' + cd ${TDENGINE_ROOT_DIR}/packaging + bash testpackage.sh -f server -m community -f server -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t rpm python3 checkPackageRuning.py ''' } @@ -202,19 +222,25 @@ pipeline { verMode="community enterprise" fi verModeList=${verMode} + ''' + sh ''' for verModeSin in ${verModeList} do cd ${TDENGINE_ROOT_DIR}/packaging bash testpackage.sh -f server -m ${verModeSin} -f server -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar python3 checkPackageRuning.py - - cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh -f server -m ${verModeSin} -f server -l true -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar - python3 checkPackageRuning.py - done + done + ''' + sh ''' cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh -f server -m ${verModeSin} -f server -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t rpm + bash testpackage.sh -f server -m community -f server -l true -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar + python3 checkPackageRuning.py + ''' + + sh ''' + cd ${TDENGINE_ROOT_DIR}/packaging + bash testpackage.sh -f server -m community -f server -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t rpm python3 checkPackageRuning.py sudo rpm -e tdengine ''' @@ -231,6 +257,9 @@ pipeline { verMode="community enterprise" fi verModeList=${verMode} + ''' + + sh ''' for verModeSin in ${verModeList} do cd ${TDENGINE_ROOT_DIR}/packaging @@ -254,6 +283,8 @@ pipeline { verMode="community enterprise" fi verModeList=${verMode} + ''' + sh ''' for verModeSin in ${verModeList} do cd ${TDENGINE_ROOT_DIR}/packaging @@ -270,16 +301,10 @@ pipeline { steps { timeout(time: 30, unit: 'MINUTES'){ sh ''' - if [ "${verMode}" = "all" ];then - verMode="community enterprise" - fi - verModeList=${verMode} - for verModeSin in ${verModeList} - do - cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh -f server -m ${verModeSin} -f client -l true -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar - python3 checkPackageRuning.py - done + verModeList=community + cd ${TDENGINE_ROOT_DIR}/packaging + bash testpackage.sh -f server -m ${verModeSin} -f client -l true -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar + python3 checkPackageRuning.py python3 checkPackageRuning.py 192.168.0.24 ''' } @@ -296,10 +321,12 @@ pipeline { verMode="community enterprise" fi verModeList=${verMode} + ''' + sh ''' for verModeSin in ${verModeList} do cd ${TDENGINE_ROOT_DIR}/packaging - bash testpackage.sh -f server -m ${verModeSin} -f client -l true -c arm64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar + bash testpackage.sh -f server -m ${verModeSin} -f client -l false -c arm64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar python3 checkPackageRuning.py done python3 checkPackageRuning.py 192.168.0.21 From 642202681c7ee1c4359d794b7589f9059a1af5b1 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Tue, 22 Nov 2022 09:22:13 +0800 Subject: [PATCH 25/69] test: add asan case --- tests/parallel_test/cases.task | 132 ++++++++++++++++----------------- 1 file changed, 66 insertions(+), 66 deletions(-) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 75ff515078..1741e282fb 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -431,40 +431,40 @@ ,,,system-test,python3 ./test.py -f 1-insert/time_range_wise.py ,,,system-test,python3 ./test.py -f 1-insert/block_wise.py ,,,system-test,python3 ./test.py -f 1-insert/create_retentions.py -,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/table_param_ttl.py ,,,system-test,python3 ./test.py -f 1-insert/mutil_stage.py -,,,system-test,python3 ./test.py -f 1-insert/table_param_ttl.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/table_param_ttl.py +,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/table_param_ttl.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/update_data_muti_rows.py ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/db_tb_name_check.py ,,,system-test,python3 ./test.py -f 1-insert/database_pre_suf.py ,,,system-test,python3 ./test.py -f 1-insert/InsertFuturets.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/show.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/abs.py -,,,system-test,python3 ./test.py -f 2-query/abs.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/abs.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/and_or_for_byte.py -,,,system-test,python3 ./test.py -f 2-query/and_or_for_byte.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/and_or_for_byte.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/apercentile.py -,,,system-test,python3 ./test.py -f 2-query/apercentile.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/apercentile.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/arccos.py -,,,system-test,python3 ./test.py -f 2-query/arccos.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/arccos.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/arcsin.py -,,,system-test,python3 ./test.py -f 2-query/arcsin.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/arcsin.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/arctan.py -,,,system-test,python3 ./test.py -f 2-query/arctan.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/arctan.py -R ,,,system-test,python3 ./test.py -f 2-query/avg.py ,,,system-test,python3 ./test.py -f 2-query/avg.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/between.py -,,,system-test,python3 ./test.py -f 2-query/between.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/between.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/bottom.py -,,,system-test,python3 ./test.py -f 2-query/bottom.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/bottom.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/cast.py -,,,system-test,python3 ./test.py -f 2-query/cast.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/cast.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/ceil.py -,,,system-test,python3 ./test.py -f 2-query/ceil.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/ceil.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/char_length.py -,,,system-test,python3 ./test.py -f 2-query/char_length.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/char_length.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/check_tsdb.py -,,,system-test,python3 ./test.py -f 2-query/check_tsdb.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/check_tsdb.py -R ,,,system-test,python3 ./test.py -f 2-query/concat.py ,,,system-test,python3 ./test.py -f 2-query/concat.py -R ,,,system-test,python3 ./test.py -f 2-query/concat_ws.py @@ -472,63 +472,63 @@ ,,,system-test,python3 ./test.py -f 2-query/concat_ws2.py ,,,system-test,python3 ./test.py -f 2-query/concat_ws2.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/cos.py -,,,system-test,python3 ./test.py -f 2-query/cos.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/cos.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count_partition.py -,,,system-test,python3 ./test.py -f 2-query/count_partition.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count_partition.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count.py -,,,system-test,python3 ./test.py -f 2-query/count.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/countAlwaysReturnValue.py -,,,system-test,python3 ./test.py -f 2-query/countAlwaysReturnValue.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/countAlwaysReturnValue.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/db.py -,,,system-test,python3 ./test.py -f 2-query/db.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/db.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/diff.py -,,,system-test,python3 ./test.py -f 2-query/diff.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/diff.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distinct.py -,,,system-test,python3 ./test.py -f 2-query/distinct.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distinct.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_apercentile.py -,,,system-test,python3 ./test.py -f 2-query/distribute_agg_apercentile.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_apercentile.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_avg.py -,,,system-test,python3 ./test.py -f 2-query/distribute_agg_avg.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_avg.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_count.py -,,,system-test,python3 ./test.py -f 2-query/distribute_agg_count.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_count.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_max.py -,,,system-test,python3 ./test.py -f 2-query/distribute_agg_max.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_max.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_min.py -,,,system-test,python3 ./test.py -f 2-query/distribute_agg_min.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_min.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_spread.py -,,,system-test,python3 ./test.py -f 2-query/distribute_agg_spread.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_spread.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_stddev.py -,,,system-test,python3 ./test.py -f 2-query/distribute_agg_stddev.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_stddev.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_sum.py -,,,system-test,python3 ./test.py -f 2-query/distribute_agg_sum.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_sum.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/explain.py -,,,system-test,python3 ./test.py -f 2-query/explain.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/explain.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/first.py -,,,system-test,python3 ./test.py -f 2-query/first.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/first.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/floor.py -,,,system-test,python3 ./test.py -f 2-query/floor.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/floor.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_null.py -,,,system-test,python3 ./test.py -f 2-query/function_null.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_null.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_stateduration.py -,,,system-test,python3 ./test.py -f 2-query/function_stateduration.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_stateduration.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/histogram.py -,,,system-test,python3 ./test.py -f 2-query/histogram.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/histogram.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/hyperloglog.py -,,,system-test,python3 ./test.py -f 2-query/hyperloglog.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/hyperloglog.py -R ,,,system-test,python3 ./test.py -f 2-query/interp.py ,,,system-test,python3 ./test.py -f 2-query/interp.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/irate.py -,,,system-test,python3 ./test.py -f 2-query/irate.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/irate.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/join.py -,,,system-test,python3 ./test.py -f 2-query/join.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/join.py -R ,,,system-test,python3 ./test.py -f 2-query/last_row.py ,,,system-test,python3 ./test.py -f 2-query/last_row.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last.py -,,,system-test,python3 ./test.py -f 2-query/last.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/leastsquares.py -,,,system-test,python3 ./test.py -f 2-query/leastsquares.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/leastsquares.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/length.py -,,,system-test,python3 ./test.py -f 2-query/length.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/length.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/log.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/log.py -R ,,,system-test,python3 ./test.py -f 2-query/lower.py @@ -536,25 +536,25 @@ ,,,system-test,python3 ./test.py -f 2-query/ltrim.py ,,,system-test,python3 ./test.py -f 2-query/ltrim.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/mavg.py -,,,system-test,python3 ./test.py -f 2-query/mavg.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/mavg.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_partition.py -,,,system-test,python3 ./test.py -f 2-query/max_partition.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_partition.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max.py -,,,system-test,python3 ./test.py -f 2-query/max.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/min.py -,,,system-test,python3 ./test.py -f 2-query/min.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/min.py -R ,,,system-test,python3 ./test.py -f 2-query/mode.py ,,,system-test,python3 ./test.py -f 2-query/mode.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/Now.py -,,,system-test,python3 ./test.py -f 2-query/Now.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/Now.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/percentile.py -,,,system-test,python3 ./test.py -f 2-query/percentile.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/percentile.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/pow.py -,,,system-test,python3 ./test.py -f 2-query/pow.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/pow.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/query_cols_tags_and_or.py -,,,system-test,python3 ./test.py -f 2-query/query_cols_tags_and_or.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/query_cols_tags_and_or.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/round.py -,,,system-test,python3 ./test.py -f 2-query/round.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/round.py -R ,,,system-test,python3 ./test.py -f 2-query/rtrim.py ,,,system-test,python3 ./test.py -f 2-query/rtrim.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sample.py @@ -566,51 +566,51 @@ ,,,system-test,python3 ./test.py -f 2-query/sml.py ,,,system-test,python3 ./test.py -f 2-query/sml.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/spread.py -,,,system-test,python3 ./test.py -f 2-query/spread.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/spread.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sqrt.py -,,,system-test,python3 ./test.py -f 2-query/sqrt.py -R +,,y,system-test,./pytest.sh python3./test.py -f 2-query/sqrt.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/statecount.py -,,,system-test,python3 ./test.py -f 2-query/statecount.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/statecount.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stateduration.py ,,,system-test,python3 ./test.py -f 2-query/stateduration.py -R ,,,system-test,python3 ./test.py -f 2-query/substr.py ,,,system-test,python3 ./test.py -f 2-query/substr.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sum.py -,,,system-test,python3 ./test.py -f 2-query/sum.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sum.py -R ,,,system-test,python3 ./test.py -f 2-query/tail.py ,,,system-test,python3 ./test.py -f 2-query/tail.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tan.py -,,,system-test,python3 ./test.py -f 2-query/tan.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tan.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/Timediff.py -,,,system-test,python3 ./test.py -f 2-query/Timediff.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/Timediff.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/timetruncate.py -,,,system-test,python3 ./test.py -f 2-query/timetruncate.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/timetruncate.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/timezone.py -,,,system-test,python3 ./test.py -f 2-query/timezone.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/timezone.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/To_iso8601.py -,,,system-test,python3 ./test.py -f 2-query/To_iso8601.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/To_iso8601.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/To_unixtimestamp.py -,,,system-test,python3 ./test.py -f 2-query/To_unixtimestamp.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/To_unixtimestamp.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/Today.py -,,,system-test,python3 ./test.py -f 2-query/Today.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/Today.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/top.py -,,,system-test,python3 ./test.py -f 2-query/top.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/top.py -R ,,,system-test,python3 ./test.py -f 2-query/tsbsQuery.py ,,,system-test,python3 ./test.py -f 2-query/tsbsQuery.py -R ,,,system-test,python3 ./test.py -f 2-query/ttl_comment.py ,,,system-test,python3 ./test.py -f 2-query/ttl_comment.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/twa.py -,,,system-test,python3 ./test.py -f 2-query/twa.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/twa.py -R ,,,system-test,python3 ./test.py -f 2-query/union.py ,,,system-test,python3 ./test.py -f 2-query/union.py -R ,,,system-test,python3 ./test.py -f 2-query/unique.py ,,,system-test,python3 ./test.py -f 2-query/unique.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/upper.py -,,,system-test,python3 ./test.py -f 2-query/upper.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/upper.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/varchar.py -,,,system-test,python3 ./test.py -f 2-query/varchar.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/varchar.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/case_when.py -,,,system-test,python3 ./test.py -f 2-query/case_when.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/case_when.py -R ,,,system-test,python3 ./test.py -f 1-insert/update_data.py ,,,system-test,python3 ./test.py -f 1-insert/tb_100w_data_order.py ,,,system-test,python3 ./test.py -f 1-insert/delete_stable.py From 726d4d69aad90bca7d517aa1a14d4de4d902677e Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Tue, 22 Nov 2022 09:55:36 +0800 Subject: [PATCH 26/69] test: add asan case --- tests/parallel_test/cases.task | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 1741e282fb..b91d53bcf2 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -572,7 +572,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/statecount.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/statecount.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stateduration.py -,,,system-test,python3 ./test.py -f 2-query/stateduration.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stateduration.py -R ,,,system-test,python3 ./test.py -f 2-query/substr.py ,,,system-test,python3 ./test.py -f 2-query/substr.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sum.py @@ -601,8 +601,8 @@ ,,,system-test,python3 ./test.py -f 2-query/ttl_comment.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/twa.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/twa.py -R -,,,system-test,python3 ./test.py -f 2-query/union.py -,,,system-test,python3 ./test.py -f 2-query/union.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/union.py +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/union.py -R ,,,system-test,python3 ./test.py -f 2-query/unique.py ,,,system-test,python3 ./test.py -f 2-query/unique.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/upper.py From b9f7d181b64255a913a06d29abecb7727324a501 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 22 Nov 2022 10:47:50 +0800 Subject: [PATCH 27/69] fix: free the batch create table reqs' decoded comment field --- source/dnode/vnode/src/vnd/vnodeSvr.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index 5c8c166833..c75d1ffded 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -609,6 +609,7 @@ static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t version, void *pR _exit: for (int32_t iReq = 0; iReq < req.nReqs; iReq++) { pCreateReq = req.pReqs + iReq; + taosMemoryFree(pCreateReq->comment); taosArrayDestroy(pCreateReq->ctb.tagName); } taosArrayDestroyEx(rsp.pArray, tFreeSVCreateTbRsp); From fbb0a1e921dbc12aff0e3abdaae054d603834512 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 22 Nov 2022 11:08:55 +0800 Subject: [PATCH 28/69] fix(query): set correct tsdbreader during the creation of tableblockscan info --- source/dnode/vnode/src/tsdb/tsdbRead.c | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index fbafc63382..69fae4d481 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -329,20 +329,6 @@ static SHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, const STableK } taosHashPut(pTableMap, &pScanInfo->uid, sizeof(uint64_t), &pScanInfo, POINTER_BYTES); - -#if 0 -// STableBlockScanInfo info = {.lastKey = 0, .uid = idList[j].uid}; - if (ASCENDING_TRAVERSE(pTsdbReader->order)) { - int64_t skey = pTsdbReader->window.skey; - info.lastKey = (skey > INT64_MIN) ? (skey - 1) : skey; - } else { - int64_t ekey = pTsdbReader->window.ekey; - info.lastKey = (ekey < INT64_MAX) ? (ekey + 1) : ekey; - } - - taosHashPut(pTableMap, &info.uid, sizeof(uint64_t), &info, sizeof(info)); -#endif - tsdbTrace("%p check table uid:%" PRId64 " from lastKey:%" PRId64 " %s", pTsdbReader, pScanInfo->uid, pScanInfo->lastKey, pTsdbReader->idStr); } @@ -3798,7 +3784,8 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, void* pTableL updateBlockSMAInfo(pReader->pSchema, &pReader->suppInfo); } - pReader->status.pTableMap = createDataBlockScanInfo(pReader, pTableList, numOfTables); + STsdbReader* p = (pReader->innerReader[0] != NULL)? pReader->innerReader[0]:pReader; + pReader->status.pTableMap = createDataBlockScanInfo(p, pTableList, numOfTables); if (pReader->status.pTableMap == NULL) { tsdbReaderClose(pReader); *ppReader = NULL; From 96f691cd5c04ac2eb32cdd13e992891962d21dd9 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 22 Nov 2022 11:12:20 +0800 Subject: [PATCH 29/69] avoid invalid error --- source/common/src/tglobal.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 27dcbd5be3..f2d8b9aa7c 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -277,7 +277,9 @@ static int32_t taosAddServerLogCfg(SConfig *pCfg) { static int32_t taosAddClientCfg(SConfig *pCfg) { char defaultFqdn[TSDB_FQDN_LEN] = {0}; int32_t defaultServerPort = 6030; - if (taosGetFqdn(defaultFqdn) != 0) return -1; + if (taosGetFqdn(defaultFqdn) != 0) { + strcpy(defaultFqdn, "localhost"); + } if (cfgAddString(pCfg, "firstEp", "", 1) != 0) return -1; if (cfgAddString(pCfg, "secondEp", "", 1) != 0) return -1; From 33538e39e24715cbb8f68497a9eaad86dcc9edba Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Tue, 22 Nov 2022 12:21:18 +0800 Subject: [PATCH 30/69] test: add asan case --- tests/parallel_test/cases.task | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index b91d53bcf2..1e5f2fad44 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -568,7 +568,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/spread.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/spread.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sqrt.py -,,y,system-test,./pytest.sh python3./test.py -f 2-query/sqrt.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sqrt.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/statecount.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/statecount.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stateduration.py @@ -597,8 +597,8 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/top.py -R ,,,system-test,python3 ./test.py -f 2-query/tsbsQuery.py ,,,system-test,python3 ./test.py -f 2-query/tsbsQuery.py -R -,,,system-test,python3 ./test.py -f 2-query/ttl_comment.py -,,,system-test,python3 ./test.py -f 2-query/ttl_comment.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/ttl_comment.py +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/ttl_comment.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/twa.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/twa.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/union.py From 1d48426e94ddb732bc132dc8d17e0262598fcbd6 Mon Sep 17 00:00:00 2001 From: Minghao Li Date: Tue, 22 Nov 2022 12:45:31 +0800 Subject: [PATCH 31/69] refactor(sync): del wal in multi-replicas --- include/libs/sync/sync.h | 2 +- source/libs/sync/src/syncAppendEntriesReply.c | 3 ++ source/libs/sync/src/syncMain.c | 49 +++++++++++++------ source/libs/sync/src/syncRaftLog.c | 12 ++++- source/libs/sync/src/syncTimeout.c | 24 ++++----- source/libs/sync/src/syncUtil.c | 4 +- 6 files changed, 63 insertions(+), 31 deletions(-) diff --git a/include/libs/sync/sync.h b/include/libs/sync/sync.h index 3a808ac6f3..513ba8cb34 100644 --- a/include/libs/sync/sync.h +++ b/include/libs/sync/sync.h @@ -36,7 +36,7 @@ extern "C" { #define SYNC_DEL_WAL_MS (1000 * 60) #define SYNC_ADD_QUORUM_COUNT 3 #define SYNC_MNODE_LOG_RETENTION 10000 -#define SYNC_VNODE_LOG_RETENTION 100 +#define SYNC_VNODE_LOG_RETENTION 20 #define SNAPSHOT_MAX_CLOCK_SKEW_MS 1000 * 10 #define SNAPSHOT_WAIT_MS 1000 * 30 diff --git a/source/libs/sync/src/syncAppendEntriesReply.c b/source/libs/sync/src/syncAppendEntriesReply.c index 13ea250155..c602788b19 100644 --- a/source/libs/sync/src/syncAppendEntriesReply.c +++ b/source/libs/sync/src/syncAppendEntriesReply.c @@ -67,6 +67,9 @@ int32_t syncNodeOnAppendEntriesReply(SSyncNode* ths, const SRpcMsg* pRpcMsg) { if (pMsg->matchIndex > oldMatchIndex) { syncIndexMgrSetIndex(ths->pMatchIndex, &(pMsg->srcId), pMsg->matchIndex); syncMaybeAdvanceCommitIndex(ths); + + // maybe update minMatchIndex + ths->minMatchIndex = syncMinMatchIndex(ths); } syncIndexMgrSetIndex(ths->pNextIndex, &(pMsg->srcId), pMsg->matchIndex + 1); diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index a427d7aa0c..88b8ba7e25 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -243,6 +243,18 @@ int32_t syncBeginSnapshot(int64_t rid, int64_t lastApplyIndex) { goto _DEL_WAL; } else { + lastApplyIndex -= SYNC_VNODE_LOG_RETENTION; + + SyncIndex beginIndex = pSyncNode->pLogStore->syncLogBeginIndex(pSyncNode->pLogStore); + SyncIndex endIndex = pSyncNode->pLogStore->syncLogEndIndex(pSyncNode->pLogStore); + bool isEmpty = pSyncNode->pLogStore->syncLogIsEmpty(pSyncNode->pLogStore); + + if (isEmpty || !(lastApplyIndex >= beginIndex && lastApplyIndex <= endIndex)) { + sNTrace(pSyncNode, "new-snapshot-index:%" PRId64 ", empty:%d, do not delete wal", lastApplyIndex, isEmpty); + syncNodeRelease(pSyncNode); + return 0; + } + // vnode if (pSyncNode->replicaNum > 1) { // multi replicas @@ -300,26 +312,31 @@ int32_t syncBeginSnapshot(int64_t rid, int64_t lastApplyIndex) { _DEL_WAL: do { - SyncIndex snapshottingIndex = atomic_load_64(&pSyncNode->snapshottingIndex); + SSyncLogStoreData* pData = pSyncNode->pLogStore->data; + SyncIndex snapshotVer = walGetSnapshotVer(pData->pWal); + SyncIndex walCommitVer = walGetCommittedVer(pData->pWal); + SyncIndex wallastVer = walGetLastVer(pData->pWal); + if (lastApplyIndex <= walCommitVer) { + SyncIndex snapshottingIndex = atomic_load_64(&pSyncNode->snapshottingIndex); - if (snapshottingIndex == SYNC_INDEX_INVALID) { - atomic_store_64(&pSyncNode->snapshottingIndex, lastApplyIndex); - pSyncNode->snapshottingTime = taosGetTimestampMs(); + if (snapshottingIndex == SYNC_INDEX_INVALID) { + atomic_store_64(&pSyncNode->snapshottingIndex, lastApplyIndex); + pSyncNode->snapshottingTime = taosGetTimestampMs(); + + code = walBeginSnapshot(pData->pWal, lastApplyIndex); + if (code == 0) { + sNTrace(pSyncNode, "wal snapshot begin, index:%" PRId64 ", last apply index:%" PRId64, + pSyncNode->snapshottingIndex, lastApplyIndex); + } else { + sNError(pSyncNode, "wal snapshot begin error since:%s, index:%" PRId64 ", last apply index:%" PRId64, + terrstr(terrno), pSyncNode->snapshottingIndex, lastApplyIndex); + atomic_store_64(&pSyncNode->snapshottingIndex, SYNC_INDEX_INVALID); + } - SSyncLogStoreData* pData = pSyncNode->pLogStore->data; - code = walBeginSnapshot(pData->pWal, lastApplyIndex); - if (code == 0) { - sNTrace(pSyncNode, "wal snapshot begin, index:%" PRId64 ", last apply index:%" PRId64, - pSyncNode->snapshottingIndex, lastApplyIndex); } else { - sNError(pSyncNode, "wal snapshot begin error since:%s, index:%" PRId64 ", last apply index:%" PRId64, - terrstr(terrno), pSyncNode->snapshottingIndex, lastApplyIndex); - atomic_store_64(&pSyncNode->snapshottingIndex, SYNC_INDEX_INVALID); + sNTrace(pSyncNode, "snapshotting for %" PRId64 ", do not delete wal for new-snapshot-index:%" PRId64, + snapshottingIndex, lastApplyIndex); } - - } else { - sNTrace(pSyncNode, "snapshotting for %" PRId64 ", do not delete wal for new-snapshot-index:%" PRId64, - snapshottingIndex, lastApplyIndex); } } while (0); diff --git a/source/libs/sync/src/syncRaftLog.c b/source/libs/sync/src/syncRaftLog.c index db0b6d1d02..2f824b6b3b 100644 --- a/source/libs/sync/src/syncRaftLog.c +++ b/source/libs/sync/src/syncRaftLog.c @@ -375,7 +375,17 @@ static int32_t raftLogGetLastEntry(SSyncLogStore* pLogStore, SSyncRaftEntry** pp int32_t raftLogUpdateCommitIndex(SSyncLogStore* pLogStore, SyncIndex index) { SSyncLogStoreData* pData = pLogStore->data; SWal* pWal = pData->pWal; - // ASSERT(walCommit(pWal, index) == 0); + + // need not update + SyncIndex snapshotVer = walGetSnapshotVer(pWal); + SyncIndex walCommitVer = walGetCommittedVer(pWal); + SyncIndex wallastVer = walGetLastVer(pWal); + + if (index < snapshotVer || index > wallastVer) { + // ignore + return 0; + } + int32_t code = walCommit(pWal, index); if (code != 0) { int32_t err = terrno; diff --git a/source/libs/sync/src/syncTimeout.c b/source/libs/sync/src/syncTimeout.c index 3d4583aadb..151e5cdf46 100644 --- a/source/libs/sync/src/syncTimeout.c +++ b/source/libs/sync/src/syncTimeout.c @@ -62,18 +62,20 @@ static int32_t syncNodeTimerRoutine(SSyncNode* ths) { syncNodeCleanConfigIndex(ths); } - // end timeout wal snapshot int64_t timeNow = taosGetTimestampMs(); - if (timeNow - ths->snapshottingIndex > SYNC_DEL_WAL_MS && - atomic_load_64(&ths->snapshottingIndex) != SYNC_INDEX_INVALID) { - SSyncLogStoreData* pData = ths->pLogStore->data; - int32_t code = walEndSnapshot(pData->pWal); - if (code != 0) { - sNError(ths, "timer wal snapshot end error since:%s", terrstr()); - return -1; - } else { - sNTrace(ths, "wal snapshot end, index:%" PRId64, atomic_load_64(&ths->snapshottingIndex)); - atomic_store_64(&ths->snapshottingIndex, SYNC_INDEX_INVALID); + if (atomic_load_64(&ths->snapshottingIndex) != SYNC_INDEX_INVALID) { + // end timeout wal snapshot + if (timeNow - ths->snapshottingTime > SYNC_DEL_WAL_MS && + atomic_load_64(&ths->snapshottingIndex) != SYNC_INDEX_INVALID) { + SSyncLogStoreData* pData = ths->pLogStore->data; + int32_t code = walEndSnapshot(pData->pWal); + if (code != 0) { + sNError(ths, "timer wal snapshot end error since:%s", terrstr()); + return -1; + } else { + sNTrace(ths, "wal snapshot end, index:%" PRId64, atomic_load_64(&ths->snapshottingIndex)); + atomic_store_64(&ths->snapshottingIndex, SYNC_INDEX_INVALID); + } } } diff --git a/source/libs/sync/src/syncUtil.c b/source/libs/sync/src/syncUtil.c index b50336cd63..4fc7dd245d 100644 --- a/source/libs/sync/src/syncUtil.c +++ b/source/libs/sync/src/syncUtil.c @@ -239,11 +239,11 @@ void syncPrintNodeLog(const char* flags, ELogLevel level, int32_t dflag, SSyncNo "vgId:%d, sync %s " "%s" ", tm:%" PRIu64 ", cmt:%" PRId64 ", fst:%" PRId64 ", lst:%" PRId64 ", min:%" PRId64 ", snap:%" PRId64 - ", snap-tm:%" PRIu64 ", sby:%d, aq:%d, bch:%d, r-num:%d, lcfg:%" PRId64 + ", snap-tm:%" PRIu64 ", sby:%d, aq:%d, snaping:%" PRId64 ", r-num:%d, lcfg:%" PRId64 ", chging:%d, rsto:%d, dquorum:%d, elt:%" PRId64 ", hb:%" PRId64 ", %s, %s", pNode->vgId, syncStr(pNode->state), eventLog, currentTerm, pNode->commitIndex, logBeginIndex, logLastIndex, pNode->minMatchIndex, snapshot.lastApplyIndex, snapshot.lastApplyTerm, - pNode->pRaftCfg->isStandBy, aqItems, pNode->pRaftCfg->batchSize, pNode->replicaNum, + pNode->pRaftCfg->isStandBy, aqItems, pNode->snapshottingIndex, pNode->replicaNum, pNode->pRaftCfg->lastConfigIndex, pNode->changing, pNode->restoreFinish, quorum, pNode->electTimerLogicClock, pNode->heartbeatTimerLogicClockUser, peerStr, cfgStr); } From cd49e5b4e5252791bcb99dfdda6436ab1011276c Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Tue, 22 Nov 2022 13:40:25 +0800 Subject: [PATCH 32/69] test:add testcase of enterprise installPackages --- packaging/MPtestJenkinsfile | 19 +------------------ 1 file changed, 1 insertion(+), 18 deletions(-) diff --git a/packaging/MPtestJenkinsfile b/packaging/MPtestJenkinsfile index de6cace508..dad5b7f129 100644 --- a/packaging/MPtestJenkinsfile +++ b/packaging/MPtestJenkinsfile @@ -116,10 +116,7 @@ pipeline { if [ "${verMode}" = "all" ];then verMode="community enterprise" fi - verModeList=${verMode} - ''' - - sh ''' + verModeList=${verMode} for verModeSin in ${verModeList} do cd ${TDENGINE_ROOT_DIR}/packaging @@ -152,9 +149,6 @@ pipeline { verMode="community enterprise" fi verModeList=${verMode} - ''' - - sh ''' for verModeSin in ${verModeList} do cd ${TDENGINE_ROOT_DIR}/packaging @@ -188,8 +182,6 @@ pipeline { verMode="community enterprise" fi verModeList=${verMode} - ''' - sh ''' for verModeSin in ${verModeList} do cd ${TDENGINE_ROOT_DIR}/packaging @@ -222,8 +214,6 @@ pipeline { verMode="community enterprise" fi verModeList=${verMode} - ''' - sh ''' for verModeSin in ${verModeList} do cd ${TDENGINE_ROOT_DIR}/packaging @@ -257,9 +247,6 @@ pipeline { verMode="community enterprise" fi verModeList=${verMode} - ''' - - sh ''' for verModeSin in ${verModeList} do cd ${TDENGINE_ROOT_DIR}/packaging @@ -283,8 +270,6 @@ pipeline { verMode="community enterprise" fi verModeList=${verMode} - ''' - sh ''' for verModeSin in ${verModeList} do cd ${TDENGINE_ROOT_DIR}/packaging @@ -321,8 +306,6 @@ pipeline { verMode="community enterprise" fi verModeList=${verMode} - ''' - sh ''' for verModeSin in ${verModeList} do cd ${TDENGINE_ROOT_DIR}/packaging From 4d4ba0ff110781487e8e01a877b5fffbee6b16fb Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 22 Nov 2022 13:55:11 +0800 Subject: [PATCH 33/69] docs: update jdbc version in connector matrix for3.0 (#18332) * docs: update csharp connector status * docs: fix csharp ws bulk pulling * docs: clarify database param is optional to websocket dsn * docs: fix python version and a few typos * docs: fix jdbc version in connector matrix --- docs/en/14-reference/03-connector/index.mdx | 15 +++++++-------- docs/zh/08-connector/index.md | 15 +++++++-------- 2 files changed, 14 insertions(+), 16 deletions(-) diff --git a/docs/en/14-reference/03-connector/index.mdx b/docs/en/14-reference/03-connector/index.mdx index 4fd9c452d8..54031db618 100644 --- a/docs/en/14-reference/03-connector/index.mdx +++ b/docs/en/14-reference/03-connector/index.mdx @@ -26,14 +26,13 @@ Using REST connection can support a broader range of operating systems as it doe TDengine version updates often add new features, and the connector versions in the list are the best-fit versions of the connector. -| **TDengine Versions** | **Java** | **Python** | **Go** | **C#** | **Node.js** | **Rust** | -| --------------------- | -------- | ---------- | ------------ | ------------- | --------------- | -------- | -| **3.0.0.0 and later** | 3.0.0 | current version | 3.0 branch | 3.0.0 | 3.0.0 | current version | -| **2.4.0.14 and up** | 2.0.38 | current version | develop branch | 1.0.2 - 1.0.6 | 2.0.10 - 2.0.12 | current version | -| **2.4.0.6 and up** | 2.0.37 | current version | develop branch | 1.0.2 - 1.0.6 | 2.0.10 - 2.0.12 | current version | -| **2.4.0.4 - 2.4.0.5** | 2.0.37 | current version | develop branch | 1.0.2 - 1.0.6 | 2.0.10 - 2.0.12 | current version | -| **2.2.x.x ** | 2.0.36 | current version | master branch | n/a | 2.0.7 - 2.0.9 | current version | -| **2.0.x.x ** | 2.0.34 | current version | master branch | n/a | 2.0.1 - 2.0.6 | current version | +| **TDengine Versions** | **Java** | **Python** | **Go** | **C#** | **Node.js** | **Rust** | +| --------------------------- | -------------- | -------------- | -------------- | ------------- | --------------- | --------------- | +| **3.0.0.0 and later** | 3.0.2 + | latest version | 3.0 branch | 3.0.0 | 3.0.0 | current version | +| **2.4.0.14 and up ** | 2.0.38 | latest version | develop branch | 1.0.2 - 1.0.6 | 2.0.10 - 2.0.12 | current version | +| **2.4.0.4 - 2.4.0.13 ** | 2.0.37 | latest version | develop branch | 1.0.2 - 1.0.6 | 2.0.10 - 2.0.12 | current version | +| **2.2.x.x ** | 2.0.36 | latest version | master branch | n/a | 2.0.7 - 2.0.9 | current version | +| **2.0.x.x ** | 2.0.34 | latest version | master branch | n/a | 2.0.1 - 2.0.6 | current version | ## Functional Features diff --git a/docs/zh/08-connector/index.md b/docs/zh/08-connector/index.md index e00e0b2fa2..eecf564b90 100644 --- a/docs/zh/08-connector/index.md +++ b/docs/zh/08-connector/index.md @@ -26,14 +26,13 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速 TDengine 版本更新往往会增加新的功能特性,列表中的连接器版本为连接器最佳适配版本。 -| **TDengine 版本** | **Java** | **Python** | **Go** | **C#** | **Node.js** | **Rust** | -| --------------------- | -------- | ---------- | ------------ | ------------- | --------------- | -------- | -| **3.0.0.0 及以上** | 3.0.0 | 当前版本 | 3.0 分支 | 3.0.0 | 3.0.0 | 当前版本 | -| **2.4.0.14 及以上** | 2.0.38 | 当前版本 | develop 分支 | 1.0.2 - 1.0.6 | 2.0.10 - 2.0.12 | 当前版本 | -| **2.4.0.6 及以上** | 2.0.37 | 当前版本 | develop 分支 | 1.0.2 - 1.0.6 | 2.0.10 - 2.0.12 | 当前版本 | -| **2.4.0.4 - 2.4.0.5** | 2.0.37 | 当前版本 | develop 分支 | 1.0.2 - 1.0.6 | 2.0.10 - 2.0.12 | 当前版本 | -| **2.2.x.x ** | 2.0.36 | 当前版本 | master 分支 | n/a | 2.0.7 - 2.0.9 | 当前版本 | -| **2.0.x.x ** | 2.0.34 | 当前版本 | master 分支 | n/a | 2.0.1 - 2.0.6 | 当前版本 | +| **TDengine 版本** | **Java** | **Python** | **Go** | **C#** | **Node.js** | **Rust** | +| ---------------------- | --------- | ---------- | ------------ | ------------- | --------------- | -------- | +| **3.0.0.0 及以上** | 3.0.2以上 | 当前版本 | 3.0 分支 | 3.0.0 | 3.0.0 | 当前版本 | +| **2.4.0.14 及以上** | 2.0.38 | 当前版本 | develop 分支 | 1.0.2 - 1.0.6 | 2.0.10 - 2.0.12 | 当前版本 | +| **2.4.0.4 - 2.4.0.13** | 2.0.37 | 当前版本 | develop 分支 | 1.0.2 - 1.0.6 | 2.0.10 - 2.0.12 | 当前版本 | +| **2.2.x.x ** | 2.0.36 | 当前版本 | master 分支 | n/a | 2.0.7 - 2.0.9 | 当前版本 | +| **2.0.x.x ** | 2.0.34 | 当前版本 | master 分支 | n/a | 2.0.1 - 2.0.6 | 当前版本 | ## 功能特性 From 886e84c3150199058b6f6f2955b2d6d87dc63759 Mon Sep 17 00:00:00 2001 From: Xuefeng Tan <1172915550@qq.com> Date: Tue, 22 Nov 2022 13:56:35 +0800 Subject: [PATCH 34/69] enh(taosAdapter): keep debug information (#18324) --- tools/CMakeLists.txt | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index 0f3c383b9e..d61d25602b 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -101,19 +101,20 @@ ELSE () BUILD_COMMAND COMMAND set CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client COMMAND set CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib - COMMAND go build -a -o taosadapter.exe -ldflags "-s -w -X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}" - COMMAND go build -a -o taosadapter-debug.exe -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}" + COMMAND go build -a -o taosadapter.exe -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}" +# COMMAND go build -a -o taosadapter.exe -ldflags "-s -w -X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}" +# COMMAND go build -a -o taosadapter-debug.exe -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}" INSTALL_COMMAND - COMMAND cmake -E echo "Comparessing taosadapter.exe" - COMMAND cmake -E time upx taosadapter.exe +# COMMAND cmake -E echo "Comparessing taosadapter.exe" +# COMMAND cmake -E time upx taosadapter.exe COMMAND cmake -E echo "Copy taosadapter.exe" COMMAND cmake -E copy taosadapter.exe ${CMAKE_BINARY_DIR}/build/bin/taosadapter.exe COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/ COMMAND cmake -E echo "Copy taosadapter.toml" COMMAND cmake -E copy ./example/config/taosadapter.toml ${CMAKE_BINARY_DIR}/test/cfg/ - COMMAND cmake -E echo "Copy taosadapter-debug.exe" - COMMAND cmake -E copy taosadapter-debug.exe ${CMAKE_BINARY_DIR}/build/bin +# COMMAND cmake -E echo "Copy taosadapter-debug.exe" +# COMMAND cmake -E copy taosadapter-debug.exe ${CMAKE_BINARY_DIR}/build/bin ) ELSE (TD_WINDOWS) MESSAGE("Building taosAdapter on non-Windows") @@ -128,19 +129,20 @@ ELSE () PATCH_COMMAND COMMAND git clean -f -d BUILD_COMMAND - COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-s -w -X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}" - COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}" + COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}" +# COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-s -w -X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}" +# COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}" INSTALL_COMMAND - COMMAND cmake -E echo "Comparessing taosadapter.exe" - COMMAND upx taosadapter || : +# COMMAND cmake -E echo "Comparessing taosadapter.exe" +# COMMAND upx taosadapter || : COMMAND cmake -E echo "Copy taosadapter" COMMAND cmake -E copy taosadapter ${CMAKE_BINARY_DIR}/build/bin COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/ COMMAND cmake -E echo "Copy taosadapter.toml" COMMAND cmake -E copy ./example/config/taosadapter.toml ${CMAKE_BINARY_DIR}/test/cfg/ COMMAND cmake -E copy ./taosadapter.service ${CMAKE_BINARY_DIR}/test/cfg/ - COMMAND cmake -E echo "Copy taosadapter-debug" - COMMAND cmake -E copy taosadapter-debug ${CMAKE_BINARY_DIR}/build/bin +# COMMAND cmake -E echo "Copy taosadapter-debug" +# COMMAND cmake -E copy taosadapter-debug ${CMAKE_BINARY_DIR}/build/bin ) ENDIF (TD_WINDOWS) ENDIF () From d3fe6f0ba38a04ff68c8c65330c5dbc54c170e83 Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Tue, 22 Nov 2022 14:36:27 +0800 Subject: [PATCH 35/69] fix [ASAN] memory leak while tdbRealloc --- source/libs/executor/src/timewindowoperator.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index fc78f8a20d..1ec7d6d26b 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -2703,6 +2703,7 @@ static void rebuildIntervalWindow(SOperatorInfo* pOperator, SArray* pWinArray, S pChildSup->rowEntryInfoOffset, &pChInfo->aggSup); updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &parentWin, true); compactFunctions(pSup->pCtx, pChildSup->pCtx, numOfOutput, pTaskInfo, &pInfo->twAggSup.timeWindowData); + releaseOutputBuf(pChInfo->pState, pWinRes, pChResult); } if (num > 0 && pUpdatedMap) { saveWinResultInfo(pCurResult->win.skey, pWinRes->groupId, pUpdatedMap); From fec994e12444340e974bcf626e4d3d0beaedfd04 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 22 Nov 2022 15:04:07 +0800 Subject: [PATCH 36/69] avoid double compress --- source/libs/transport/src/transCli.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 4fb00b1a6d..eaef8184b0 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -820,7 +820,7 @@ void cliSend(SCliConn* pConn) { uv_timer_start((uv_timer_t*)pConn->timer, cliReadTimeoutCb, TRANS_READ_TIMEOUT, 0); } - if (pTransInst->compressSize != -1 && pTransInst->compressSize < pMsg->contLen) { + if (pHead->comp == 0 && pTransInst->compressSize != -1 && pTransInst->compressSize < pMsg->contLen) { msgLen = transCompressMsg(pMsg->pCont, pMsg->contLen) + sizeof(STransMsgHead); pHead->msgLen = (int32_t)htonl((uint32_t)msgLen); } From 865332d960862fbfee3ee6074b254731027292fc Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 22 Nov 2022 15:27:07 +0800 Subject: [PATCH 37/69] avoid double compress --- source/libs/transport/src/transCli.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index eaef8184b0..5b8f059e04 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -791,6 +791,7 @@ void cliSend(SCliConn* pConn) { int msgLen = transMsgLenFromCont(pMsg->contLen); STransMsgHead* pHead = transHeadFromCont(pMsg->pCont); + pHead->ahandle = pCtx != NULL ? (uint64_t)pCtx->ahandle : 0; pHead->noResp = REQUEST_NO_RESP(pMsg) ? 1 : 0; pHead->persist = REQUEST_PERSIS_HANDLE(pMsg) ? 1 : 0; @@ -820,10 +821,15 @@ void cliSend(SCliConn* pConn) { uv_timer_start((uv_timer_t*)pConn->timer, cliReadTimeoutCb, TRANS_READ_TIMEOUT, 0); } - if (pHead->comp == 0 && pTransInst->compressSize != -1 && pTransInst->compressSize < pMsg->contLen) { - msgLen = transCompressMsg(pMsg->pCont, pMsg->contLen) + sizeof(STransMsgHead); - pHead->msgLen = (int32_t)htonl((uint32_t)msgLen); + if (pHead->comp == 0) { + if (pTransInst->compressSize != -1 && pTransInst->compressSize < pMsg->contLen) { + msgLen = transCompressMsg(pMsg->pCont, pMsg->contLen) + sizeof(STransMsgHead); + pHead->msgLen = (int32_t)htonl((uint32_t)msgLen); + } + } else { + msgLen = (int32_t)ntohl((uint32_t)(pHead->msgLen)); } + tGDebug("%s conn %p %s is sent to %s, local info %s, len:%d", CONN_GET_INST_LABEL(pConn), pConn, TMSG_INFO(pHead->msgType), pConn->dst, pConn->src, msgLen); From 2da5a6106e7b16a92752d0c71543efbad16f3e9d Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 22 Nov 2022 15:31:20 +0800 Subject: [PATCH 38/69] refactor: do some internal refactor. --- source/libs/executor/src/cachescanoperator.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/source/libs/executor/src/cachescanoperator.c b/source/libs/executor/src/cachescanoperator.c index 922ed05653..0c97ce3f30 100644 --- a/source/libs/executor/src/cachescanoperator.c +++ b/source/libs/executor/src/cachescanoperator.c @@ -26,7 +26,7 @@ #include "ttypes.h" static SSDataBlock* doScanCache(SOperatorInfo* pOperator); -static void destroyLastrowScanOperator(void* param); +static void destroyCacheScanOperator(void* param); static int32_t extractCacheScanSlotId(const SArray* pColMatchInfo, SExecTaskInfo* pTaskInfo, int32_t** pSlotIds); static int32_t removeRedundantTsCol(SLastRowScanPhysiNode* pScanNode, SColMatchInfo* pColMatchInfo); @@ -97,14 +97,14 @@ SOperatorInfo* createCacherowsScanOperator(SLastRowScanPhysiNode* pScanNode, SRe pOperator->exprSupp.numOfExprs = taosArrayGetSize(pInfo->pRes->pDataBlock); pOperator->fpSet = - createOperatorFpSet(operatorDummyOpenFn, doScanCache, NULL, destroyLastrowScanOperator, NULL); + createOperatorFpSet(operatorDummyOpenFn, doScanCache, NULL, destroyCacheScanOperator, NULL); pOperator->cost.openCost = 0; return pOperator; _error: pTaskInfo->code = code; - destroyLastrowScanOperator(pInfo); + destroyCacheScanOperator(pInfo); taosMemoryFree(pOperator); return NULL; } @@ -234,7 +234,7 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) { } } -void destroyLastrowScanOperator(void* param) { +void destroyCacheScanOperator(void* param) { SLastrowScanInfo* pInfo = (SLastrowScanInfo*)param; blockDataDestroy(pInfo->pRes); blockDataDestroy(pInfo->pBufferredRes); From bcac60ae3510f6950288a8054a5b6f905f5f158d Mon Sep 17 00:00:00 2001 From: kailixu Date: Tue, 22 Nov 2022 15:32:51 +0800 Subject: [PATCH 39/69] fix: init/free SBatchDeleteReq for tsma --- source/dnode/vnode/src/sma/smaTimeRange.c | 5 ++++- source/dnode/vnode/src/tq/tqSink.c | 3 +++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/sma/smaTimeRange.c b/source/dnode/vnode/src/sma/smaTimeRange.c index f5ba7b5014..062a678e0a 100644 --- a/source/dnode/vnode/src/sma/smaTimeRange.c +++ b/source/dnode/vnode/src/sma/smaTimeRange.c @@ -203,10 +203,13 @@ static int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char goto _err; } - SBatchDeleteReq deleteReq; + SBatchDeleteReq deleteReq = {0}; SSubmitReq *pSubmitReq = tqBlockToSubmit(pSma->pVnode, (const SArray *)msg, pTsmaStat->pTSchema, &pTsmaStat->pTSma->schemaTag, true, pTsmaStat->pTSma->dstTbUid, pTsmaStat->pTSma->dstTbName, &deleteReq); + + taosArrayDestroy(deleteReq.deleteReqs); + if (!pSubmitReq) { smaError("vgId:%d, failed to gen submit blk while tsma insert for smaIndex %" PRIi64 " since %s", SMA_VID(pSma), diff --git a/source/dnode/vnode/src/tq/tqSink.c b/source/dnode/vnode/src/tq/tqSink.c index 27bfea0534..4eff52d4a2 100644 --- a/source/dnode/vnode/src/tq/tqSink.c +++ b/source/dnode/vnode/src/tq/tqSink.c @@ -25,6 +25,8 @@ int32_t tqBuildDeleteReq(SVnode* pVnode, const char* stbFullName, const SSDataBl SColumnInfoData* pGidCol = taosArrayGet(pDataBlock->pDataBlock, GROUPID_COLUMN_INDEX); SColumnInfoData* pTbNameCol = taosArrayGet(pDataBlock->pDataBlock, TABLE_NAME_COLUMN_INDEX); + + tqDebug("stream delete msg: row %d", totRow); for (int32_t row = 0; row < totRow; row++) { @@ -236,6 +238,7 @@ SSubmitReq* tqBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchem SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i); if (pDataBlock->info.type == STREAM_DELETE_RESULT) { pDeleteReq->suid = suid; + pDeleteReq->deleteReqs = taosArrayInit(0, sizeof(SSingleDeleteReq)); tqBuildDeleteReq(pVnode, stbFullName, pDataBlock, pDeleteReq); continue; } From d1249017dda6c02506f8c618bf552af22bfee3ac Mon Sep 17 00:00:00 2001 From: kailixu Date: Tue, 22 Nov 2022 15:35:53 +0800 Subject: [PATCH 40/69] fix: init/free SBatchDeleteReq for tsma --- source/dnode/vnode/src/sma/smaTimeRange.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/sma/smaTimeRange.c b/source/dnode/vnode/src/sma/smaTimeRange.c index 062a678e0a..a2c9484693 100644 --- a/source/dnode/vnode/src/sma/smaTimeRange.c +++ b/source/dnode/vnode/src/sma/smaTimeRange.c @@ -207,7 +207,7 @@ static int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char SSubmitReq *pSubmitReq = tqBlockToSubmit(pSma->pVnode, (const SArray *)msg, pTsmaStat->pTSchema, &pTsmaStat->pTSma->schemaTag, true, pTsmaStat->pTSma->dstTbUid, pTsmaStat->pTSma->dstTbName, &deleteReq); - + // TODO deleteReq taosArrayDestroy(deleteReq.deleteReqs); From da535cd04ce37c72c16ad3cec1bd393c8737634b Mon Sep 17 00:00:00 2001 From: kailixu Date: Tue, 22 Nov 2022 15:38:21 +0800 Subject: [PATCH 41/69] fix: init/free SBatchDeleteReq for tsma --- source/dnode/vnode/src/tq/tqSink.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/source/dnode/vnode/src/tq/tqSink.c b/source/dnode/vnode/src/tq/tqSink.c index 4eff52d4a2..30cde5d475 100644 --- a/source/dnode/vnode/src/tq/tqSink.c +++ b/source/dnode/vnode/src/tq/tqSink.c @@ -25,8 +25,6 @@ int32_t tqBuildDeleteReq(SVnode* pVnode, const char* stbFullName, const SSDataBl SColumnInfoData* pGidCol = taosArrayGet(pDataBlock->pDataBlock, GROUPID_COLUMN_INDEX); SColumnInfoData* pTbNameCol = taosArrayGet(pDataBlock->pDataBlock, TABLE_NAME_COLUMN_INDEX); - - tqDebug("stream delete msg: row %d", totRow); for (int32_t row = 0; row < totRow; row++) { From ea591977bcd22ed2163bf53bb22e348cbbb3d5ca Mon Sep 17 00:00:00 2001 From: wenzhouwww Date: Tue, 22 Nov 2022 15:48:47 +0800 Subject: [PATCH 42/69] Update crash_gen_main.py --- tests/pytest/crash_gen/crash_gen_main.py | 39 +++++++++--------------- 1 file changed, 15 insertions(+), 24 deletions(-) diff --git a/tests/pytest/crash_gen/crash_gen_main.py b/tests/pytest/crash_gen/crash_gen_main.py index 90d27c3b8d..f8c5f970c5 100755 --- a/tests/pytest/crash_gen/crash_gen_main.py +++ b/tests/pytest/crash_gen/crash_gen_main.py @@ -1978,15 +1978,8 @@ class TdSuperTable: def drop(self, dbc, skipCheck = False): dbName = self._dbName if self.exists(dbc) : # if myself exists - fullTableName = dbName + '.' + self._stName - - try: - dbc.execute("DROP TABLE {}".format(fullTableName)) - except taos.error.ProgrammingError as err: - errno = Helper.convertErrno(err.errno) - if errno in [1011,0x3F3,0x03f3,0x2662]: # table doesn't exist # Stream must be dropped first, SQL: DROP TABLE db_0.fs_table - pass - + fullTableName = dbName + '.' + self._stName + dbc.execute("DROP TABLE {}".format(fullTableName)) else: if not skipCheck: raise CrashGenError("Cannot drop non-existant super table: {}".format(self._stName)) @@ -2004,16 +1997,9 @@ class TdSuperTable: fullTableName = dbName + '.' + self._stName if dbc.existsSuperTable(self._stName): - if dropIfExists: - try: - dbc.execute("DROP TABLE {}".format(fullTableName)) - except taos.error.ProgrammingError as err: - errno = Helper.convertErrno(err.errno) - if errno in [1011,0x3F3,0x03f3,0x2662]: # table doesn't exist # Stream must be dropped first, SQL: DROP TABLE db_0.fs_table - pass - - pass - # dbc.execute("DROP TABLE {}".format(fullTableName)) + if dropIfExists: + dbc.execute("DROP TABLE {}".format(fullTableName)) + else: # error raise CrashGenError("Cannot create super table, already exists: {}".format(self._stName)) @@ -2049,13 +2035,12 @@ class TdSuperTable: except TmqError as e : pass - # consumer work only 30 sec + # consumer with random work life time_start = time.time() while 1: res = consumer.poll(1000) - if time.time() - time_start >5 : + if time.time() - time_start >random.randint(5,50) : break - # time.sleep(10) try: consumer.unsubscribe() except TmqError as e : @@ -2435,11 +2420,15 @@ class TaskDropSuperTable(StateTransitionTask): for i in tblSeq: regTableName = self.getRegTableName(i) # "db.reg_table_{}".format(i) try: - self.execWtSql(wt, "drop table {}.{}". format(self._db.getName(), regTableName)) # nRows always 0, like MySQL except taos.error.ProgrammingError as err: - pass + # correcting for strange error number scheme + errno2 = Helper.convertErrno(err.errno) + if (errno2 in [0x362]): # mnode invalid table name + isSuccess = False + Logging.debug("[DB] Acceptable error when dropping a table") + continue # try to delete next regular table if (not tickOutput): @@ -2957,6 +2946,8 @@ class ThreadStacks: # stack info for all threads print(" {}".format(frame.line)) stackFrame += 1 print("-----> End of Thread Info ----->\n") + if self.current_time-last_sql_commit_time >100: # dead lock occured + print("maybe dead locked of thread {} ".format(shortTid)) class ClientManager: def __init__(self): From 7a81c2e2d0de73a45eff5e0a79204d6ae51c7227 Mon Sep 17 00:00:00 2001 From: wenzhouwww Date: Tue, 22 Nov 2022 15:49:18 +0800 Subject: [PATCH 43/69] Update db.py From a6ac48700941b1084f386ac034b9f47ec8f1ccf5 Mon Sep 17 00:00:00 2001 From: wenzhouwww Date: Tue, 22 Nov 2022 15:50:32 +0800 Subject: [PATCH 44/69] Update crash_gen_main.py From a8e65a6a2b1b92c6048fbe9f9190af6d1b557fdd Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Tue, 22 Nov 2022 15:56:06 +0800 Subject: [PATCH 45/69] test: add asan case --- tests/parallel_test/cases.task | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index e4c6a40be8..25926e7da7 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -216,7 +216,7 @@ ,,y,script,./test.sh -f tsim/stream/drop_stream.sim ,,y,script,./test.sh -f tsim/stream/fillHistoryBasic1.sim ,,y,script,./test.sh -f tsim/stream/fillHistoryBasic2.sim -,,n,script,./test.sh -f tsim/stream/fillHistoryBasic3.sim +,,y,script,./test.sh -f tsim/stream/fillHistoryBasic3.sim ,,y,script,./test.sh -f tsim/stream/distributeInterval0.sim ,,y,script,./test.sh -f tsim/stream/distributeIntervalRetrive0.sim ,,y,script,./test.sh -f tsim/stream/distributeSession0.sim @@ -227,7 +227,7 @@ ,,y,script,./test.sh -f tsim/stream/triggerSession0.sim ,,y,script,./test.sh -f tsim/stream/partitionby.sim ,,y,script,./test.sh -f tsim/stream/partitionby1.sim -,,n,script,./test.sh -f tsim/stream/schedSnode.sim +,,y,script,./test.sh -f tsim/stream/schedSnode.sim ,,y,script,./test.sh -f tsim/stream/windowClose.sim ,,y,script,./test.sh -f tsim/stream/ignoreExpiredData.sim ,,y,script,./test.sh -f tsim/stream/sliding.sim @@ -800,7 +800,7 @@ ,,,system-test,python3 ./test.py -f 2-query/function_stateduration.py -Q 2 ,,,system-test,python3 ./test.py -f 2-query/statecount.py -Q 2 ,,,system-test,python3 ./test.py -f 2-query/tail.py -Q 2 -,,,system-test,python3 ./test.py -f 2-query/ttl_comment.py -Q 2 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/ttl_comment.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_count.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_max.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_min.py -Q 2 @@ -813,7 +813,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/irate.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_null.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count_partition.py -Q 2 -,,,system-test,python3 ./test.py -f 2-query/max_partition.py -Q 2 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_partition.py -Q 2 ,,,system-test,python3 ./test.py -f 2-query/last_row.py -Q 2 ,,,system-test,python3 ./test.py -f 2-query/tsbsQuery.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sml.py -Q 2 @@ -860,8 +860,8 @@ ,,,system-test,python3 ./test.py -f 2-query/json_tag.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/top.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/bottom.py -Q 3 -,,,system-test,python3 ./test.py -f 2-query/percentile.py -Q 3 -,,,system-test,python3 ./test.py -f 2-query/apercentile.py -Q 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/percentile.py -Q 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/apercentile.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/abs.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/ceil.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/floor.py -Q 3 @@ -874,7 +874,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tan.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/arcsin.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/arccos.py -Q 3 -,,,system-test,python3 ./test.py -f 2-query/arctan.py -Q 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/arctan.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/query_cols_tags_and_or.py -Q 3 ,,,system-test,python3 ./test.py -f 2-query/nestedQuery.py -Q 3 ,,,system-test,python3 ./test.py -f 2-query/nestedQuery_str.py -Q 3 @@ -893,11 +893,11 @@ ,,,system-test,python3 ./test.py -f 2-query/function_stateduration.py -Q 3 ,,,system-test,python3 ./test.py -f 2-query/statecount.py -Q 3 ,,,system-test,python3 ./test.py -f 2-query/tail.py -Q 3 -,,,system-test,python3 ./test.py -f 2-query/ttl_comment.py -Q 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/ttl_comment.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_count.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_max.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_min.py -Q 3 -,,,system-test,python3 ./test.py -f 2-query/distribute_agg_sum.py -Q 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_sum.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_spread.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_apercentile.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_avg.py -Q 3 @@ -919,7 +919,7 @@ ,,,system-test,python3 ./test.py -f 2-query/rtrim.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/length.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/char_length.py -Q 4 -,,,system-test,python3 ./test.py -f 2-query/upper.py -Q 4 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/upper.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/lower.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/join.py -Q 4 ,,,system-test,python3 ./test.py -f 2-query/join2.py -Q 4 @@ -980,14 +980,14 @@ ,,,system-test,python3 ./test.py -f 2-query/csum.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/mavg.py -Q 4 ,,,system-test,python3 ./test.py -f 2-query/sample.py -Q 4 -,,,system-test,python3 ./test.py -f 2-query/cast.py -Q 4 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/cast.py -Q 4 ,,,system-test,python3 ./test.py -f 2-query/function_diff.py -Q 4 ,,,system-test,python3 ./test.py -f 2-query/unique.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stateduration.py -Q 4 ,,,system-test,python3 ./test.py -f 2-query/function_stateduration.py -Q 4 ,,,system-test,python3 ./test.py -f 2-query/statecount.py -Q 4 ,,,system-test,python3 ./test.py -f 2-query/tail.py -Q 4 -,,,system-test,python3 ./test.py -f 2-query/ttl_comment.py -Q 4 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/ttl_comment.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_count.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_max.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_min.py -Q 4 @@ -995,7 +995,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_spread.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_apercentile.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_avg.py -Q 4 -,,,system-test,python3 ./test.py -f 2-query/distribute_agg_stddev.py -Q 4 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_stddev.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/twa.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/irate.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_null.py -Q 4 From e42f895fea91dfcd3f2ab677b3f629378e3bdfd6 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Tue, 22 Nov 2022 16:07:04 +0800 Subject: [PATCH 46/69] fix: fix select null crash issue and asan issues --- source/common/src/tdatablock.c | 2 +- source/libs/executor/inc/tsort.h | 11 ++++--- source/libs/executor/src/scanoperator.c | 1 + source/libs/executor/src/sortoperator.c | 8 +++-- source/libs/executor/src/tsort.c | 44 +++++++++++++++++++++---- source/util/src/tpagedbuf.c | 2 +- 6 files changed, 52 insertions(+), 16 deletions(-) diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 3c2a5377e3..2caa4ed7a4 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -36,7 +36,7 @@ int32_t colDataGetFullLength(const SColumnInfoData* pColumnInfoData, int32_t num if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) { return pColumnInfoData->varmeta.length + sizeof(int32_t) * numOfRows; } else { - return pColumnInfoData->info.bytes * numOfRows + BitmapLen(numOfRows); + return ((pColumnInfoData->info.type == TSDB_DATA_TYPE_NULL) ? 0 : pColumnInfoData->info.bytes * numOfRows) + BitmapLen(numOfRows); } } diff --git a/source/libs/executor/inc/tsort.h b/source/libs/executor/inc/tsort.h index 51440a7f59..cff568aebc 100644 --- a/source/libs/executor/inc/tsort.h +++ b/source/libs/executor/inc/tsort.h @@ -36,12 +36,13 @@ typedef struct SMultiMergeSource { typedef struct SSortSource { SMultiMergeSource src; - union { - struct { - SArray* pageIdList; - int32_t pageIndex; - }; + struct { + SArray* pageIdList; + int32_t pageIndex; + }; + struct { void* param; + bool onlyRef; }; } SSortSource; diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 916b6df969..7128e0a2c9 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -4532,6 +4532,7 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) { SSortSource* ps = taosMemoryCalloc(1, sizeof(SSortSource)); STableMergeScanSortSourceParam* param = taosArrayGet(pInfo->sortSourceParams, i); ps->param = param; + ps->onlyRef = true; tsortAddSource(pInfo->pSortHandle, ps); } diff --git a/source/libs/executor/src/sortoperator.c b/source/libs/executor/src/sortoperator.c index 14e3163455..0e7644151a 100644 --- a/source/libs/executor/src/sortoperator.c +++ b/source/libs/executor/src/sortoperator.c @@ -176,10 +176,10 @@ int32_t doOpenSortOperator(SOperatorInfo* pOperator) { SSortSource* ps = taosMemoryCalloc(1, sizeof(SSortSource)); ps->param = pOperator->pDownstream[0]; + ps->onlyRef = true; tsortAddSource(pInfo->pSortHandle, ps); int32_t code = tsortOpen(pInfo->pSortHandle); - taosMemoryFreeClear(ps); if (code != TSDB_CODE_SUCCESS) { T_LONG_JMP(pTaskInfo->env, terrno); @@ -377,10 +377,10 @@ int32_t beginSortGroup(SOperatorInfo* pOperator) { param->childOpInfo = pOperator->pDownstream[0]; param->grpSortOpInfo = pInfo; ps->param = param; + ps->onlyRef = false; tsortAddSource(pInfo->pCurrSortHandle, ps); int32_t code = tsortOpen(pInfo->pCurrSortHandle); - taosMemoryFreeClear(ps); if (code != TSDB_CODE_SUCCESS) { T_LONG_JMP(pTaskInfo->env, terrno); @@ -471,6 +471,9 @@ void destroyGroupSortOperatorInfo(void* param) { taosArrayDestroy(pInfo->pSortInfo); taosArrayDestroy(pInfo->matchInfo.pList); + tsortDestroySortHandle(pInfo->pCurrSortHandle); + pInfo->pCurrSortHandle = NULL; + taosMemoryFreeClear(param); } @@ -563,6 +566,7 @@ int32_t doOpenMultiwayMergeOperator(SOperatorInfo* pOperator) { for (int32_t i = 0; i < pOperator->numOfDownstream; ++i) { SSortSource* ps = taosMemoryCalloc(1, sizeof(SSortSource)); ps->param = pOperator->pDownstream[i]; + ps->onlyRef = true; tsortAddSource(pInfo->pSortHandle, ps); } diff --git a/source/libs/executor/src/tsort.c b/source/libs/executor/src/tsort.c index 9c10b51b1f..1c31b550c6 100644 --- a/source/libs/executor/src/tsort.c +++ b/source/libs/executor/src/tsort.c @@ -110,6 +110,22 @@ static int32_t sortComparCleanup(SMsortComparParam* cmpParam) { return TSDB_CODE_SUCCESS; } +void tsortClearOrderdSource(SArray *pOrderedSource) { + for (size_t i = 0; i < taosArrayGetSize(pOrderedSource); i++) { + SSortSource** pSource = taosArrayGet(pOrderedSource, i); + if (NULL == *pSource) { + continue; + } + + if ((*pSource)->param && !(*pSource)->onlyRef) { + taosMemoryFree((*pSource)->param); + } + taosMemoryFreeClear(*pSource); + } + + taosArrayClear(pOrderedSource); +} + void tsortDestroySortHandle(SSortHandle* pSortHandle) { if (pSortHandle == NULL) { return; @@ -123,10 +139,8 @@ void tsortDestroySortHandle(SSortHandle* pSortHandle) { destroyDiskbasedBuf(pSortHandle->pBuf); taosMemoryFreeClear(pSortHandle->idStr); blockDataDestroy(pSortHandle->pDataBlock); - for (size_t i = 0; i < taosArrayGetSize(pSortHandle->pOrderedSource); i++) { - SSortSource** pSource = taosArrayGet(pSortHandle->pOrderedSource, i); - taosMemoryFreeClear(*pSource); - } + + tsortClearOrderdSource(pSortHandle->pOrderedSource); taosArrayDestroy(pSortHandle->pOrderedSource); taosMemoryFreeClear(pSortHandle); } @@ -561,7 +575,7 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) { } } - taosArrayClear(pHandle->pOrderedSource); + tsortClearOrderdSource(pHandle->pOrderedSource); taosArrayAddAll(pHandle->pOrderedSource, pResList); taosArrayDestroy(pResList); @@ -598,8 +612,11 @@ static int32_t createInitialSources(SSortHandle* pHandle) { size_t sortBufSize = pHandle->numOfPages * pHandle->pageSize; if (pHandle->type == SORT_SINGLESOURCE_SORT) { - SSortSource* source = taosArrayGetP(pHandle->pOrderedSource, 0); - taosArrayClear(pHandle->pOrderedSource); + SSortSource** pSource = taosArrayGet(pHandle->pOrderedSource, 0); + SSortSource* source = *pSource; + *pSource = NULL; + + tsortClearOrderdSource(pHandle->pOrderedSource); while (1) { SSDataBlock* pBlock = pHandle->fetchfp(source->param); @@ -623,6 +640,10 @@ static int32_t createInitialSources(SSortHandle* pHandle) { int32_t code = blockDataMerge(pHandle->pDataBlock, pBlock); if (code != 0) { + if (source->param && !source->onlyRef) { + taosMemoryFree(source->param); + } + taosMemoryFree(source); return code; } @@ -632,6 +653,10 @@ static int32_t createInitialSources(SSortHandle* pHandle) { int64_t p = taosGetTimestampUs(); code = blockDataSort(pHandle->pDataBlock, pHandle->pSortInfo); if (code != 0) { + if (source->param && !source->onlyRef) { + taosMemoryFree(source->param); + } + taosMemoryFree(source); return code; } @@ -642,6 +667,11 @@ static int32_t createInitialSources(SSortHandle* pHandle) { } } + if (source->param && !source->onlyRef) { + taosMemoryFree(source->param); + } + taosMemoryFree(source); + if (pHandle->pDataBlock != NULL && pHandle->pDataBlock->info.rows > 0) { size_t size = blockDataGetSize(pHandle->pDataBlock); diff --git a/source/util/src/tpagedbuf.c b/source/util/src/tpagedbuf.c index c81888eb95..79ea10552c 100644 --- a/source/util/src/tpagedbuf.c +++ b/source/util/src/tpagedbuf.c @@ -107,7 +107,7 @@ static uint64_t allocatePositionInFile(SDiskbasedBuf* pBuf, size_t size) { static void setPageNotInBuf(SPageInfo* pPageInfo) { pPageInfo->pData = NULL; } -static FORCE_INLINE size_t getAllocPageSize(int32_t pageSize) { return pageSize + POINTER_BYTES + 2; } +static FORCE_INLINE size_t getAllocPageSize(int32_t pageSize) { return pageSize + POINTER_BYTES + sizeof(SFilePage); } /** * +--------------------------+-------------------+--------------+ From ca3c1f714749851f99f26e8d4f6483c57f30a041 Mon Sep 17 00:00:00 2001 From: wenzhouwww Date: Tue, 22 Nov 2022 16:08:14 +0800 Subject: [PATCH 47/69] add docker run exec script --- tests/pytest/docker_exec_service.sh | 31 +++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 tests/pytest/docker_exec_service.sh diff --git a/tests/pytest/docker_exec_service.sh b/tests/pytest/docker_exec_service.sh new file mode 100644 index 0000000000..4156a0bae5 --- /dev/null +++ b/tests/pytest/docker_exec_service.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +mkdir -p /data/wz/crash_gen_logs/ +logdir='/data/wz/crash_gen_logs/' +date_tag=`date +%Y%m%d-%H%M%S` +hostname='vm_valgrind_' + +for i in {1..50} +do + echo $i + # create docker and start crash_gen + log_dir=${logdir}${hostname}${date_tag}_${i} + docker run -d --hostname=${hostname}${date_tag}_${i} --name ${hostname}${date_tag}_${i} --privileged -v ${log_dir}:/corefile/ -- crash_gen:v1.0 sleep 99999999999999 + echo create docker ${hostname}${date_tag}_${i} + docker exec -d ${hostname}${date_tag}_${i} sh -c 'rm -rf /home/taos-connector-python' + docker cp /data/wz/TDengine ${hostname}${date_tag}_${i}:/home/TDengine + docker cp /data/wz/taos-connector-python ${hostname}${date_tag}_${i}:/home/taos-connector-python + echo copy TDengine in container done! + docker exec ${hostname}${date_tag}_${i} sh -c 'sh /home/TDengine/tests/pytest/auto_run_valgrind.sh ' + if [ $? -eq 0 ] + then + echo crash_gen exit as expect , run success + + # # clear docker which success + docker stop ${hostname}${date_tag}_${i} + docker rm -f ${hostname}${date_tag}_${i} + else + docker stop ${hostname}${date_tag}_${i} + echo crash_gen exit error , run failed + fi +done \ No newline at end of file From 0937a2f6b928a408f460716934e8263aad1fb96c Mon Sep 17 00:00:00 2001 From: Pan YANG Date: Tue, 22 Nov 2022 16:23:49 +0800 Subject: [PATCH 48/69] docs: add TDengine Knowledge Map --- docs/en/05-get-started/index.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/docs/en/05-get-started/index.md b/docs/en/05-get-started/index.md index d80ec02268..e42c9b5a41 100644 --- a/docs/en/05-get-started/index.md +++ b/docs/en/05-get-started/index.md @@ -20,6 +20,17 @@ import {useCurrentSidebarCategory} from '@docusaurus/theme-common'; ``` +## Study TDengine Knowledge Map + +The TDengine Knowledge Map covers the various knowledge points of TDengine, revealing the invocation relationships and data flow between various conceptual entities. Learning and understanding the TDengine Knowledge Map will help you quickly master the TDengine knowledge system. + +

+
+ +
Diagram 1. TDengine Knowledge Map
+
+
+ ### Join TDengine Community From 2020ea0a7683d96bedb6361d44737518707abcaf Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Sun, 20 Nov 2022 10:15:26 +0800 Subject: [PATCH 49/69] fix:stream fill crash --- source/common/src/tdatablock.c | 1 + source/libs/executor/inc/executorimpl.h | 17 ++++++++++++++ source/libs/executor/inc/tfill.h | 16 ------------- source/libs/executor/src/executorimpl.c | 2 ++ source/libs/executor/src/tfill.c | 31 +++++++++++-------------- 5 files changed, 34 insertions(+), 33 deletions(-) diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 3c2a5377e3..c2cdfc056a 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -1962,6 +1962,7 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf) memset(pBuf, 0, sizeof(pBuf)); char* pData = colDataGetVarData(pColInfoData, j); int32_t dataSize = TMIN(sizeof(pBuf), varDataLen(pData)); + dataSize = TMIN(dataSize, 50); memcpy(pBuf, varDataVal(pData), dataSize); len += snprintf(dumpBuf + len, size - len, " %15s |", pBuf); if (len >= size - 1) return dumpBuf; diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index f179c7bd41..7dc9e9ab03 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -754,6 +754,23 @@ typedef struct SStreamPartitionOperatorInfo { SSDataBlock* pDelRes; } SStreamPartitionOperatorInfo; +typedef struct SStreamFillSupporter { + int32_t type; // fill type + SInterval interval; + SResultRowData prev; + SResultRowData cur; + SResultRowData next; + SResultRowData nextNext; + SFillColInfo* pAllColInfo; // fill exprs and not fill exprs + SExprSupp notFillExprSup; + int32_t numOfAllCols; // number of all exprs, including the tags columns + int32_t numOfFillCols; + int32_t numOfNotFillCols; + int32_t rowSize; + SSHashObj* pResMap; + bool hasDelete; +} SStreamFillSupporter; + typedef struct SStreamFillOperatorInfo { SStreamFillSupporter* pFillSup; SSDataBlock* pRes; diff --git a/source/libs/executor/inc/tfill.h b/source/libs/executor/inc/tfill.h index 2d8df81dbd..b0017fef50 100644 --- a/source/libs/executor/inc/tfill.h +++ b/source/libs/executor/inc/tfill.h @@ -111,22 +111,6 @@ typedef struct SStreamFillInfo { int32_t delIndex; } SStreamFillInfo; -typedef struct SStreamFillSupporter { - int32_t type; // fill type - SInterval interval; - SResultRowData prev; - SResultRowData cur; - SResultRowData next; - SResultRowData nextNext; - SFillColInfo* pAllColInfo; // fill exprs and not fill exprs - int32_t numOfAllCols; // number of all exprs, including the tags columns - int32_t numOfFillCols; - int32_t numOfNotFillCols; - int32_t rowSize; - SSHashObj* pResMap; - bool hasDelete; -} SStreamFillSupporter; - int64_t getNumOfResultsAfterFillGap(SFillInfo* pFillInfo, int64_t ekey, int32_t maxNumOfRows); void taosFillSetStartInfo(struct SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey); diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index a7e955100c..20c39ec921 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -2056,6 +2056,8 @@ void destroyExprInfo(SExprInfo* pExpr, int32_t numOfExprs) { for (int32_t j = 0; j < pExprInfo->base.numOfParams; ++j) { if (pExprInfo->base.pParam[j].type == FUNC_PARAM_TYPE_COLUMN) { taosMemoryFreeClear(pExprInfo->base.pParam[j].pCol); + } else if (pExprInfo->base.pParam[j].type == FUNC_PARAM_TYPE_VALUE) { + taosVariantDestroy(&pExprInfo->base.pParam[j].param); } } diff --git a/source/libs/executor/src/tfill.c b/source/libs/executor/src/tfill.c index c41376b2dc..9908f35818 100644 --- a/source/libs/executor/src/tfill.c +++ b/source/libs/executor/src/tfill.c @@ -709,6 +709,7 @@ void* destroyStreamFillSupporter(SStreamFillSupporter* pFillSup) { pFillSup->pResMap = NULL; releaseOutputBuf(NULL, NULL, (SResultRow*)pFillSup->cur.pRowVal); pFillSup->cur.pRowVal = NULL; + cleanupExprSupp(&pFillSup->notFillExprSup); taosMemoryFree(pFillSup); return NULL; @@ -1417,25 +1418,13 @@ static void doApplyStreamScalarCalculation(SOperatorInfo* pOperator, SSDataBlock blockDataEnsureCapacity(pDstBlock, pSrcBlock->info.rows); setInputDataBlock(pSup, pSrcBlock, TSDB_ORDER_ASC, MAIN_SCAN, false); projectApplyFunctions(pSup->pExprInfo, pDstBlock, pSrcBlock, pSup->pCtx, pSup->numOfExprs, NULL); + + pDstBlock->info.rows = 0; + pSup = &pInfo->pFillSup->notFillExprSup; + setInputDataBlock(pSup, pSrcBlock, TSDB_ORDER_ASC, MAIN_SCAN, false); + projectApplyFunctions(pSup->pExprInfo, pDstBlock, pSrcBlock, pSup->pCtx, pSup->numOfExprs, NULL); pDstBlock->info.groupId = pSrcBlock->info.groupId; - SColumnInfoData* pDst = taosArrayGet(pDstBlock->pDataBlock, pInfo->primaryTsCol); - SColumnInfoData* pSrc = taosArrayGet(pSrcBlock->pDataBlock, pInfo->primarySrcSlotId); - colDataAssign(pDst, pSrc, pDstBlock->info.rows, &pDstBlock->info); - - int32_t numOfNotFill = pInfo->pFillSup->numOfAllCols - pInfo->pFillSup->numOfFillCols; - for (int32_t i = 0; i < numOfNotFill; ++i) { - SFillColInfo* pCol = &pInfo->pFillSup->pAllColInfo[i + pInfo->pFillSup->numOfFillCols]; - ASSERT(pCol->notFillCol); - - SExprInfo* pExpr = pCol->pExpr; - int32_t srcSlotId = pExpr->base.pParam[0].pCol->slotId; - int32_t dstSlotId = pExpr->base.resSchema.slotId; - - SColumnInfoData* pDst1 = taosArrayGet(pDstBlock->pDataBlock, dstSlotId); - SColumnInfoData* pSrc1 = taosArrayGet(pSrcBlock->pDataBlock, srcSlotId); - colDataAssign(pDst1, pSrc1, pDstBlock->info.rows, &pDstBlock->info); - } blockDataUpdateTsWindow(pDstBlock, pInfo->primaryTsCol); } @@ -1577,6 +1566,14 @@ static SStreamFillSupporter* initStreamFillSup(SStreamFillPhysiNode* pPhyFillNod destroyStreamFillSupporter(pFillSup); return NULL; } + + SExprInfo* noFillExpr = createExprInfo(pPhyFillNode->pNotFillExprs, NULL, &numOfNotFillCols); + code = initExprSupp(&pFillSup->notFillExprSup, noFillExpr, numOfNotFillCols); + if (code != TSDB_CODE_SUCCESS) { + destroyStreamFillSupporter(pFillSup); + return NULL; + } + _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); pFillSup->pResMap = tSimpleHashInit(16, hashFn); pFillSup->hasDelete = false; From ac0ba146b320cc3c6003c7d12f6106be99ac9445 Mon Sep 17 00:00:00 2001 From: wenzhouwww Date: Tue, 22 Nov 2022 16:33:10 +0800 Subject: [PATCH 50/69] fix/release add taosx --- packaging/release.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packaging/release.sh b/packaging/release.sh index c07331a0df..a3334e734d 100755 --- a/packaging/release.sh +++ b/packaging/release.sh @@ -221,12 +221,12 @@ if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" = # community-version compile cmake ../ -DCPUTYPE=${cpuType} -DWEBSOCKET=true -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro} elif [ "$verMode" == "cloud" ]; then - cmake ../../ -DCPUTYPE=${cpuType} -DWEBSOCKET=true -DBUILD_CLOUD=true -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro} + cmake ../../ -DCPUTYPE=${cpuType} -DWEBSOCKET=true -DBUILD_TAOSX=true -DBUILD_CLOUD=true -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro} elif [ "$verMode" == "cluster" ]; then if [[ "$dbName" != "taos" ]]; then replace_enterprise_$dbName fi - cmake ../../ -DCPUTYPE=${cpuType} -DWEBSOCKET=true -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro} + cmake ../../ -DCPUTYPE=${cpuType} -DWEBSOCKET=true -DBUILD_TAOSX=true -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro} fi else echo "input cpuType=${cpuType} error!!!" From 6f5d90658ef0ccfad5c0aeefe02369539581d76d Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Tue, 22 Nov 2022 16:40:18 +0800 Subject: [PATCH 51/69] fix(wal): add debug log --- source/libs/wal/src/walWrite.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/source/libs/wal/src/walWrite.c b/source/libs/wal/src/walWrite.c index caae669e4a..30bdc4eddc 100644 --- a/source/libs/wal/src/walWrite.c +++ b/source/libs/wal/src/walWrite.c @@ -324,12 +324,17 @@ int32_t walEndSnapshot(SWal *pWal) { // find files safe to delete SWalFileInfo *pInfo = taosArraySearch(pWal->fileInfoSet, &tmp, compareWalFileInfo, TD_LE); if (pInfo) { + SWalFileInfo *pLastFileInfo = taosArrayGetLast(pWal->fileInfoSet); + wDebug("vgId:%d, wal search found file info: first:%" PRId64 "last:%" PRId64, pWal->cfg.vgId, pInfo->firstVer, + pInfo->lastVer); if (ver >= pInfo->lastVer) { - //pInfo--; pInfo++; + wDebug("vgId:%d, wal remove advance one file: first:%" PRId64 "last:%" PRId64, pWal->cfg.vgId, pInfo->firstVer, + pInfo->lastVer); } - if (POINTER_DISTANCE(pInfo, pWal->fileInfoSet->pData) > 0) { - wDebug("vgId:%d, wal end remove for %" PRId64, pWal->cfg.vgId, pInfo->firstVer); + if (pInfo <= pLastFileInfo) { + wDebug("vgId:%d, wal end remove for first:%" PRId64 "last:%" PRId64, pWal->cfg.vgId, pInfo->firstVer, + pInfo->lastVer); } else { wDebug("vgId:%d, wal no remove", pWal->cfg.vgId); } From 42e6f59e49ec6bd9869ac15919dec9efe367c1d1 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 22 Nov 2022 16:53:58 +0800 Subject: [PATCH 52/69] avoid invalid vgroup id --- source/libs/transport/src/transSvr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/transport/src/transSvr.c b/source/libs/transport/src/transSvr.c index 8dd3628c5f..f093d84db6 100644 --- a/source/libs/transport/src/transSvr.c +++ b/source/libs/transport/src/transSvr.c @@ -398,7 +398,7 @@ static int uvPrepareSendData(SSvrMsg* smsg, uv_buf_t* wb) { pHead->magicNum = htonl(TRANS_MAGIC_NUM); // handle invalid drop_task resp, TD-20098 - if (pMsg->msgType == TDMT_SCH_DROP_TASK && pMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID) { + if (pConn->inType == TDMT_SCH_DROP_TASK && pMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID) { transQueuePop(&pConn->srvMsgs); destroySmsg(smsg); return -1; From ee18f991aa3b108bb23e19df4401a2f6406533e3 Mon Sep 17 00:00:00 2001 From: plum-lihui Date: Tue, 22 Nov 2022 16:54:42 +0800 Subject: [PATCH 53/69] test: add test case for tmq --- tests/parallel_test/cases.task | 1 + tests/system-test/7-tmq/tmqDnodeRestart1.py | 10 +++++++--- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index e4c6a40be8..835ca61fd6 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -704,6 +704,7 @@ ,,,system-test,python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py ,,,system-test,python3 ./test.py -f 7-tmq/tmqAutoCreateTbl.py ,,,system-test,python3 ./test.py -f 7-tmq/tmqDnodeRestart.py +,,,system-test,python3 ./test.py -f 7-tmq/tmqDnodeRestart1.py ,,,system-test,python3 ./test.py -f 7-tmq/tmqUpdate-1ctb.py ,,,system-test,python3 ./test.py -f 7-tmq/tmqUpdateWithConsume.py ,,,system-test,python3 ./test.py -f 7-tmq/tmqUpdate-multiCtb-snapshot0.py diff --git a/tests/system-test/7-tmq/tmqDnodeRestart1.py b/tests/system-test/7-tmq/tmqDnodeRestart1.py index 714f10b362..982cc0a631 100644 --- a/tests/system-test/7-tmq/tmqDnodeRestart1.py +++ b/tests/system-test/7-tmq/tmqDnodeRestart1.py @@ -149,6 +149,8 @@ class TDTestCase: tmqCom.waitSubscriptionExit(tdSql, topicFromStb) tdSql.query("drop topic %s"%topicFromStb) + + tmqCom.stopTmqSimProcess(processorName="tmq_sim") tdLog.printNoPrefix("======== test case 1 end ...... ") @@ -178,6 +180,8 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl + + tmqCom.initConsumerTable() tdLog.info("create topics from stb") topicFromDb = 'topic_db' @@ -203,10 +207,10 @@ class TDTestCase: tmqCom.getStartCommitNotifyFromTmqsim('cdb',1) tdLog.info("create some new child table and insert data for latest mode") - paraDict["batchNum"] = 100 + paraDict["batchNum"] = 10 paraDict["ctbPrefix"] = 'newCtb' - paraDict["ctbNum"] = 10 - paraDict["rowsPerTbl"] = 10 + paraDict["ctbNum"] = 100 + paraDict["rowsPerTbl"] = 100 tmqCom.insert_data_with_autoCreateTbl(tdSql,paraDict["dbName"],paraDict["stbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"]) tdLog.info("================= restart dnode ===========================") From 067a568d85e0487212c21f436d281cb8b07456ae Mon Sep 17 00:00:00 2001 From: Pan YANG Date: Tue, 22 Nov 2022 17:01:46 +0800 Subject: [PATCH 54/69] docs: fix heading level --- docs/en/05-get-started/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/05-get-started/index.md b/docs/en/05-get-started/index.md index e42c9b5a41..251581e98f 100644 --- a/docs/en/05-get-started/index.md +++ b/docs/en/05-get-started/index.md @@ -31,7 +31,7 @@ The TDengine Knowledge Map covers the various knowledge points of TDengine, reve -### Join TDengine Community +## Join TDengine Community
From 5b8d3d57bb2caa718cc61477e596d6b34711474d Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Tue, 22 Nov 2022 17:01:59 +0800 Subject: [PATCH 55/69] fix:Direct leak in timewindowoperator.c --- source/libs/executor/src/timewindowoperator.c | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 1ec7d6d26b..dd02ce9cd4 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -1618,6 +1618,7 @@ void destroyStreamFinalIntervalOperatorInfo(void* param) { nodesDestroyNode((SNode*)pInfo->pPhyNode); colDataDestroy(&pInfo->twAggSup.timeWindowData); cleanupGroupResInfo(&pInfo->groupResInfo); + cleanupExprSupp(&pInfo->scalarSupp); taosMemoryFreeClear(param); } @@ -5363,15 +5364,6 @@ SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhys pInfo->primaryTsIndex = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId; initResultSizeInfo(&pOperator->resultInfo, 4096); - if (pIntervalPhyNode->window.pExprs != NULL) { - int32_t numOfScalar = 0; - SExprInfo* pScalarExprInfo = createExprInfo(pIntervalPhyNode->window.pExprs, NULL, &numOfScalar); - code = initExprSupp(&pInfo->scalarSupp, pScalarExprInfo, numOfScalar); - if (code != TSDB_CODE_SUCCESS) { - goto _error; - } - } - size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES; code = initAggInfo(pSup, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str); if (code != TSDB_CODE_SUCCESS) { From 13446718697bb51ac11ba31f3a2ef08187fd6bef Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Tue, 22 Nov 2022 17:07:39 +0800 Subject: [PATCH 56/69] fix(wal): add debug log --- source/libs/wal/src/walWrite.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/source/libs/wal/src/walWrite.c b/source/libs/wal/src/walWrite.c index 30bdc4eddc..c5c8173f63 100644 --- a/source/libs/wal/src/walWrite.c +++ b/source/libs/wal/src/walWrite.c @@ -325,28 +325,35 @@ int32_t walEndSnapshot(SWal *pWal) { SWalFileInfo *pInfo = taosArraySearch(pWal->fileInfoSet, &tmp, compareWalFileInfo, TD_LE); if (pInfo) { SWalFileInfo *pLastFileInfo = taosArrayGetLast(pWal->fileInfoSet); - wDebug("vgId:%d, wal search found file info: first:%" PRId64 "last:%" PRId64, pWal->cfg.vgId, pInfo->firstVer, + wDebug("vgId:%d, wal search found file info: first:%" PRId64 " last:%" PRId64, pWal->cfg.vgId, pInfo->firstVer, pInfo->lastVer); if (ver >= pInfo->lastVer) { pInfo++; - wDebug("vgId:%d, wal remove advance one file: first:%" PRId64 "last:%" PRId64, pWal->cfg.vgId, pInfo->firstVer, + wDebug("vgId:%d, wal remove advance one file: first:%" PRId64 " last:%" PRId64, pWal->cfg.vgId, pInfo->firstVer, pInfo->lastVer); } if (pInfo <= pLastFileInfo) { - wDebug("vgId:%d, wal end remove for first:%" PRId64 "last:%" PRId64, pWal->cfg.vgId, pInfo->firstVer, + wDebug("vgId:%d, wal end remove for first:%" PRId64 " last:%" PRId64, pWal->cfg.vgId, pInfo->firstVer, pInfo->lastVer); } else { wDebug("vgId:%d, wal no remove", pWal->cfg.vgId); } // iterate files, until the searched result for (SWalFileInfo *iter = pWal->fileInfoSet->pData; iter < pInfo; iter++) { - if ((pWal->cfg.retentionSize != -1 && newTotSize > pWal->cfg.retentionSize) || - (pWal->cfg.retentionPeriod != -1 && iter->closeTs + pWal->cfg.retentionPeriod > ts)) { + wDebug("vgId:%d, wal check remove file %" PRId64 "(file size %" PRId64 " close ts %" PRId64 + "), new tot size %" PRId64, + pWal->cfg.vgId, iter->firstVer, iter->fileSize, iter->closeTs, newTotSize); + if (((pWal->cfg.retentionSize == 0) || (pWal->cfg.retentionSize != -1 && newTotSize > pWal->cfg.retentionSize)) || + ((pWal->cfg.retentionPeriod == 0) || + (pWal->cfg.retentionPeriod != -1 && iter->closeTs + pWal->cfg.retentionPeriod > ts))) { // delete according to file size or close time + wDebug("vgId:%d, check pass", pWal->cfg.vgId); deleteCnt++; newTotSize -= iter->fileSize; } + wDebug("vgId:%d, check not pass", pWal->cfg.vgId); } + wDebug("vgId:%d, wal should delete %d files", pWal->cfg.vgId, deleteCnt); int32_t actualDelete = 0; char fnameStr[WAL_FILE_LEN]; // remove file From 2c37b1eb6537e4b3bb43b6bfbad5d1361f2637b7 Mon Sep 17 00:00:00 2001 From: wade zhang <95411902+gccgdb1234@users.noreply.github.com> Date: Tue, 22 Nov 2022 17:30:32 +0800 Subject: [PATCH 57/69] Update 06-select.md --- docs/zh/12-taos-sql/06-select.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/12-taos-sql/06-select.md b/docs/zh/12-taos-sql/06-select.md index 9d4faae23a..3b681f401c 100644 --- a/docs/zh/12-taos-sql/06-select.md +++ b/docs/zh/12-taos-sql/06-select.md @@ -302,7 +302,7 @@ SELECT TIMEZONE(); ### 语法 ```txt -WHERE (column|tbname) **match/MATCH/nmatch/NMATCH** _regex_ +WHERE (column|tbname) match/MATCH/nmatch/NMATCH _regex_ ``` ### 正则表达式规范 From 7715dab468c61602acefca092b4cfa64f2d9561a Mon Sep 17 00:00:00 2001 From: wade zhang <95411902+gccgdb1234@users.noreply.github.com> Date: Tue, 22 Nov 2022 17:31:07 +0800 Subject: [PATCH 58/69] Update 06-select.md --- docs/en/12-taos-sql/06-select.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/12-taos-sql/06-select.md b/docs/en/12-taos-sql/06-select.md index 0c55578efa..c087a9e9fb 100644 --- a/docs/en/12-taos-sql/06-select.md +++ b/docs/en/12-taos-sql/06-select.md @@ -301,7 +301,7 @@ SELECT TIMEZONE(); ### Syntax ```txt -WHERE (column|tbname) **match/MATCH/nmatch/NMATCH** _regex_ +WHERE (column|tbname) match/MATCH/nmatch/NMATCH _regex_ ``` ### Specification From 7c7b2f1cef5539dc2d8f58bfba9189f0ed98cbaa Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 22 Nov 2022 18:06:01 +0800 Subject: [PATCH 59/69] fix(query): fix memory leak. --- source/dnode/vnode/src/tsdb/tsdbCacheRead.c | 15 ++++++++++----- source/dnode/vnode/src/tsdb/tsdbRead.c | 10 +++++----- source/libs/executor/src/cachescanoperator.c | 11 ++++++++--- source/libs/function/src/builtinsimpl.c | 4 +++- 4 files changed, 26 insertions(+), 14 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c index 2ae3115c0a..b87e5d5503 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c @@ -287,12 +287,17 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32 hasRes = true; p->ts = pColVal->ts; - uint8_t* px = p->colVal.value.pData; - p->colVal = pColVal->colVal; + if (!IS_VAR_DATA_TYPE(pColVal->colVal.type)) { + p->colVal = pColVal->colVal; + } else { + if (COL_VAL_IS_VALUE(&pColVal->colVal)) { + memcpy(p->colVal.value.pData, pColVal->colVal.value.pData, pColVal->colVal.value.nData); + } - if (COL_VAL_IS_VALUE(&pColVal->colVal) && IS_VAR_DATA_TYPE(pColVal->colVal.type)) { - p->colVal.value.pData = px; - memcpy(px, pColVal->colVal.value.pData, pColVal->colVal.value.nData); + p->colVal.value.nData = pColVal->colVal.value.nData; + p->colVal.type = pColVal->colVal.type; + p->colVal.flag = pColVal->colVal.flag; + p->colVal.cid = pColVal->colVal.cid; } } } diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index 69fae4d481..117729b3bd 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -305,7 +305,7 @@ static void* getPosInBlockInfoBuf(SBlockInfoBuf* pBuf, int32_t index) { } // NOTE: speedup the whole processing by preparing the buffer for STableBlockScanInfo in batch model -static SHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, const STableKeyInfo* idList, int32_t numOfTables) { +static SHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, SBlockInfoBuf* pBuf, const STableKeyInfo* idList, int32_t numOfTables) { // allocate buffer in order to load data blocks from file // todo use simple hash instead, optimize the memory consumption SHashObj* pTableMap = @@ -315,10 +315,10 @@ static SHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, const STableK } int64_t st = taosGetTimestampUs(); - initBlockScanInfoBuf(&pTsdbReader->blockInfoBuf, numOfTables); + initBlockScanInfoBuf(pBuf, numOfTables); for (int32_t j = 0; j < numOfTables; ++j) { - STableBlockScanInfo* pScanInfo = getPosInBlockInfoBuf(&pTsdbReader->blockInfoBuf, j); + STableBlockScanInfo* pScanInfo = getPosInBlockInfoBuf(pBuf, j); pScanInfo->uid = idList[j].uid; if (ASCENDING_TRAVERSE(pTsdbReader->order)) { int64_t skey = pTsdbReader->window.skey; @@ -3785,9 +3785,9 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, void* pTableL } STsdbReader* p = (pReader->innerReader[0] != NULL)? pReader->innerReader[0]:pReader; - pReader->status.pTableMap = createDataBlockScanInfo(p, pTableList, numOfTables); + pReader->status.pTableMap = createDataBlockScanInfo(p, &pReader->blockInfoBuf, pTableList, numOfTables); if (pReader->status.pTableMap == NULL) { - tsdbReaderClose(pReader); + tsdbReaderClose(p); *ppReader = NULL; code = TSDB_CODE_TDB_OUT_OF_MEMORY; diff --git a/source/libs/executor/src/cachescanoperator.c b/source/libs/executor/src/cachescanoperator.c index 0c97ce3f30..5e5c01201f 100644 --- a/source/libs/executor/src/cachescanoperator.c +++ b/source/libs/executor/src/cachescanoperator.c @@ -157,9 +157,12 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) { SColumnInfoData* pSrc = taosArrayGet(pInfo->pBufferredRes->pDataBlock, slotId); SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, slotId); - char* p = colDataGetData(pSrc, pInfo->indexOfBufferedRes); - bool isNull = colDataIsNull_s(pSrc, pInfo->indexOfBufferedRes); - colDataAppend(pDst, 0, p, isNull); + if (colDataIsNull_s(pSrc, pInfo->indexOfBufferedRes)) { + colDataAppendNULL(pDst, 0); + } else { + char* p = colDataGetData(pSrc, pInfo->indexOfBufferedRes); + colDataAppend(pDst, 0, p, false); + } } pRes->info.uid = *(tb_uid_t*)taosArrayGet(pInfo->pUidList, pInfo->indexOfBufferedRes); @@ -226,6 +229,8 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) { pInfo->pLastrowReader = tsdbCacherowsReaderClose(pInfo->pLastrowReader); return pInfo->pRes; + } else { + pInfo->pLastrowReader = tsdbCacherowsReaderClose(pInfo->pLastrowReader); } } diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 87e15370e4..4d69b4d45c 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -6568,7 +6568,9 @@ int32_t cachedLastRowFunction(SqlFunctionCtx* pCtx) { for (int32_t i = pInput->numOfRows + pInput->startRowIndex - 1; i >= pInput->startRowIndex; --i) { numOfElems++; - char* data = colDataGetData(pInputCol, i); + bool isNull = colDataIsNull(pInputCol, pInput->numOfRows, i, NULL); + char* data = isNull ? NULL : colDataGetData(pInputCol, i); + TSKEY cts = getRowPTs(pInput->pPTS, i); if (pResInfo->numOfRes == 0 || pInfo->ts < cts) { doSaveLastrow(pCtx, data, i, cts, pInfo); From f2e2aa77227d57b3655e6f82b80c7b5906152bc7 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Tue, 22 Nov 2022 18:18:31 +0800 Subject: [PATCH 60/69] fix: insert stable error --- source/libs/parser/src/parInsertSql.c | 33 ++++++++++++++++----------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/source/libs/parser/src/parInsertSql.c b/source/libs/parser/src/parInsertSql.c index 411adc680c..61951b171b 100644 --- a/source/libs/parser/src/parInsertSql.c +++ b/source/libs/parser/src/parInsertSql.c @@ -792,6 +792,8 @@ static int32_t getTableMeta(SInsertParseContext* pCxt, SName* pTbName, bool isSt *pMissCache = true; } else if (isStb && TSDB_SUPER_TABLE != (*pTableMeta)->tableType) { code = buildInvalidOperationMsg(&pCxt->msg, "create table only from super table is allowed"); + } else if (!isStb && TSDB_SUPER_TABLE == (*pTableMeta)->tableType) { + code = buildInvalidOperationMsg(&pCxt->msg, "insert data into super table is not supported"); } } return code; @@ -1571,16 +1573,16 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt, SVnodeModifOpStmt* pSt static void destroySubTableHashElem(void* p) { taosMemoryFree(*(STableMeta**)p); } -static int32_t createVnodeModifOpStmt(SParseContext* pCxt, bool reentry, SNode** pOutput) { +static int32_t createVnodeModifOpStmt(SInsertParseContext* pCxt, bool reentry, SNode** pOutput) { SVnodeModifOpStmt* pStmt = (SVnodeModifOpStmt*)nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT); if (NULL == pStmt) { return TSDB_CODE_OUT_OF_MEMORY; } - if (pCxt->pStmtCb) { + if (pCxt->pComCxt->pStmtCb) { TSDB_QUERY_SET_TYPE(pStmt->insertType, TSDB_QUERY_TYPE_STMT_INSERT); } - pStmt->pSql = pCxt->pSql; + pStmt->pSql = pCxt->pComCxt->pSql; pStmt->freeHashFunc = insDestroyBlockHashmap; pStmt->freeArrayFunc = insDestroyBlockArrayList; @@ -1604,7 +1606,7 @@ static int32_t createVnodeModifOpStmt(SParseContext* pCxt, bool reentry, SNode** return TSDB_CODE_SUCCESS; } -static int32_t createInsertQuery(SParseContext* pCxt, SQuery** pOutput) { +static int32_t createInsertQuery(SInsertParseContext* pCxt, SQuery** pOutput) { SQuery* pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY); if (NULL == pQuery) { return TSDB_CODE_OUT_OF_MEMORY; @@ -1667,11 +1669,15 @@ static int32_t getTableVgroupFromMetaData(const SArray* pTables, SVnodeModifOpSt sizeof(SVgroupInfo)); } -static int32_t getTableSchemaFromMetaData(const SMetaData* pMetaData, SVnodeModifOpStmt* pStmt, bool isStb) { +static int32_t getTableSchemaFromMetaData(SInsertParseContext* pCxt, const SMetaData* pMetaData, + SVnodeModifOpStmt* pStmt, bool isStb) { int32_t code = checkAuthFromMetaData(pMetaData->pUser); if (TSDB_CODE_SUCCESS == code) { code = getTableMetaFromMetaData(pMetaData->pTableMeta, &pStmt->pTableMeta); } + if (TSDB_CODE_SUCCESS == code && !isStb && TSDB_SUPER_TABLE == pStmt->pTableMeta->tableType) { + code = buildInvalidOperationMsg(&pCxt->msg, "insert data into super table is not supported"); + } if (TSDB_CODE_SUCCESS == code) { code = getTableVgroupFromMetaData(pMetaData->pTableHash, pStmt, isStb); } @@ -1696,24 +1702,25 @@ static void clearCatalogReq(SCatalogReq* pCatalogReq) { pCatalogReq->pUser = NULL; } -static int32_t setVnodeModifOpStmt(SParseContext* pCxt, SCatalogReq* pCatalogReq, const SMetaData* pMetaData, +static int32_t setVnodeModifOpStmt(SInsertParseContext* pCxt, SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SVnodeModifOpStmt* pStmt) { clearCatalogReq(pCatalogReq); if (pStmt->usingTableProcessing) { - return getTableSchemaFromMetaData(pMetaData, pStmt, true); + return getTableSchemaFromMetaData(pCxt, pMetaData, pStmt, true); } - return getTableSchemaFromMetaData(pMetaData, pStmt, false); + return getTableSchemaFromMetaData(pCxt, pMetaData, pStmt, false); } -static int32_t resetVnodeModifOpStmt(SParseContext* pCxt, SQuery* pQuery) { +static int32_t resetVnodeModifOpStmt(SInsertParseContext* pCxt, SQuery* pQuery) { nodesDestroyNode(pQuery->pRoot); int32_t code = createVnodeModifOpStmt(pCxt, true, &pQuery->pRoot); if (TSDB_CODE_SUCCESS == code) { SVnodeModifOpStmt* pStmt = (SVnodeModifOpStmt*)pQuery->pRoot; - (*pCxt->pStmtCb->getExecInfoFn)(pCxt->pStmtCb->pStmt, &pStmt->pVgroupsHashObj, &pStmt->pTableBlockHashObj); + (*pCxt->pComCxt->pStmtCb->getExecInfoFn)(pCxt->pComCxt->pStmtCb->pStmt, &pStmt->pVgroupsHashObj, + &pStmt->pTableBlockHashObj); if (NULL == pStmt->pVgroupsHashObj) { pStmt->pVgroupsHashObj = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); } @@ -1729,13 +1736,13 @@ static int32_t resetVnodeModifOpStmt(SParseContext* pCxt, SQuery* pQuery) { return code; } -static int32_t initInsertQuery(SParseContext* pCxt, SCatalogReq* pCatalogReq, const SMetaData* pMetaData, +static int32_t initInsertQuery(SInsertParseContext* pCxt, SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SQuery** pQuery) { if (NULL == *pQuery) { return createInsertQuery(pCxt, pQuery); } - if (NULL != pCxt->pStmtCb) { + if (NULL != pCxt->pComCxt->pStmtCb) { return resetVnodeModifOpStmt(pCxt, *pQuery); } @@ -1896,7 +1903,7 @@ int32_t parseInsertSql(SParseContext* pCxt, SQuery** pQuery, SCatalogReq* pCatal .usingDuplicateTable = false, }; - int32_t code = initInsertQuery(pCxt, pCatalogReq, pMetaData, pQuery); + int32_t code = initInsertQuery(&context, pCatalogReq, pMetaData, pQuery); if (TSDB_CODE_SUCCESS == code) { code = parseInsertSqlImpl(&context, (SVnodeModifOpStmt*)(*pQuery)->pRoot); } From b6b00cf3a4ced5b23ac9c211b5aa435e17c9ca2d Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 22 Nov 2022 18:30:57 +0800 Subject: [PATCH 61/69] fix: remove double free one-liners --- source/dnode/vnode/src/tq/tqSink.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/source/dnode/vnode/src/tq/tqSink.c b/source/dnode/vnode/src/tq/tqSink.c index b2624d1bc1..f54ed3fc63 100644 --- a/source/dnode/vnode/src/tq/tqSink.c +++ b/source/dnode/vnode/src/tq/tqSink.c @@ -349,7 +349,6 @@ void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* d .contLen = len + sizeof(SMsgHead), }; if (tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &msg) != 0) { - rpcFreeCont(serializedDeleteReq); tqDebug("failed to put delete req into write-queue since %s", terrstr()); } } else { @@ -541,7 +540,6 @@ void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* d }; if (tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &msg) != 0) { - rpcFreeCont(ret); tqDebug("failed to put into write-queue since %s", terrstr()); } } From 842217e77d7848c754b522c074b7dec99b9886a6 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Tue, 22 Nov 2022 18:53:48 +0800 Subject: [PATCH 62/69] fix: crash issue caused by insert from query clause --- source/client/src/clientMain.c | 2 ++ source/libs/executor/src/dataInserter.c | 2 ++ 2 files changed, 4 insertions(+) diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index 0aa88382fe..423cfcdc5b 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -1106,6 +1106,8 @@ int taos_get_table_vgId(TAOS *taos, const char *db, const char *table, int *vgId return terrno; } + pRequest->syncQuery = true; + STscObj *pTscObj = pRequest->pTscObj; SCatalog *pCtg = NULL; code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCtg); diff --git a/source/libs/executor/src/dataInserter.c b/source/libs/executor/src/dataInserter.c index 78afdd16b7..09ca1d27b9 100644 --- a/source/libs/executor/src/dataInserter.c +++ b/source/libs/executor/src/dataInserter.c @@ -250,6 +250,8 @@ static int32_t putDataBlock(SDataSinkHandle* pHandle, const SInputData* pInput, return code; } + taosArrayClear(pInserter->pDataBlocks); + code = sendSubmitRequest(pInserter, pMsg, pInserter->pParam->readHandle->pMsgCb->clientRpc, &pInserter->pNode->epSet); if (code) { return code; From 5922c725b1a7d589c4f78afbadd4d98c806e2309 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Tue, 22 Nov 2022 20:55:24 +0800 Subject: [PATCH 63/69] fix: insert stable error --- source/libs/parser/src/parInsertSql.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/source/libs/parser/src/parInsertSql.c b/source/libs/parser/src/parInsertSql.c index 61951b171b..9c16bf645f 100644 --- a/source/libs/parser/src/parInsertSql.c +++ b/source/libs/parser/src/parInsertSql.c @@ -227,6 +227,10 @@ static int32_t parseBoundColumns(SInsertParseContext* pCxt, const char** pSql, S } } + if (pColList->cols[0].valStat == VAL_STAT_NONE) { + return buildInvalidOperationMsg(&pCxt->msg, "primary timestamp column can not be null"); + } + pColList->orderStatus = isOrdered ? ORDER_STATUS_ORDERED : ORDER_STATUS_DISORDERED; if (!isOrdered) { From d4c739414300a31e681fe236974ad5bcc47f01f5 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Tue, 22 Nov 2022 22:03:08 +0800 Subject: [PATCH 64/69] fix(stream): memory leak --- source/dnode/vnode/src/tq/tqPush.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/tq/tqPush.c b/source/dnode/vnode/src/tq/tqPush.c index 12d5b4112b..f89bc20362 100644 --- a/source/dnode/vnode/src/tq/tqPush.c +++ b/source/dnode/vnode/src/tq/tqPush.c @@ -308,9 +308,8 @@ int tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver) } if (vnodeIsRoleLeader(pTq->pVnode)) { + if (taosHashGetSize(pTq->pStreamMeta->pTasks) == 0) return 0; if (msgType == TDMT_VND_SUBMIT) { - if (taosHashGetSize(pTq->pStreamMeta->pTasks) == 0) return 0; - void* data = taosMemoryMalloc(msgLen); if (data == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; From 0d60f24db59ddcbb2f9d0701ad08c15d127361db Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 22 Nov 2022 22:42:44 +0800 Subject: [PATCH 65/69] refactor: do some internal refactor. --- source/common/src/tdatablock.c | 2 - source/libs/executor/CMakeLists.txt | 4 - source/libs/executor/inc/executorimpl.h | 146 +- source/libs/executor/src/cachescanoperator.c | 20 +- source/libs/executor/src/exchangeoperator.c | 88 + source/libs/executor/src/executorimpl.c | 192 -- source/libs/executor/src/groupoperator.c | 30 + source/libs/executor/src/joinoperator.c | 15 + source/libs/executor/src/projectoperator.c | 18 + source/libs/executor/src/scanoperator.c | 1710 +---------------- source/libs/executor/src/sortoperator.c | 12 + source/libs/executor/src/sysscanoperator.c | 1774 ++++++++++++++++++ 12 files changed, 1964 insertions(+), 2047 deletions(-) create mode 100644 source/libs/executor/src/sysscanoperator.c diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 3c2a5377e3..b5fffea73d 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -1084,8 +1084,6 @@ int32_t dataBlockCompar_rv(const void* p1, const void* p2, const void* param) { return 0; } -int32_t varColSort(SColumnInfoData* pColumnInfoData, SBlockOrderInfo* pOrder) { return 0; } - int32_t blockDataSort_rv(SSDataBlock* pDataBlock, SArray* pOrderInfo, bool nullFirst) { // Allocate the additional buffer. int64_t p0 = taosGetTimestampUs(); diff --git a/source/libs/executor/CMakeLists.txt b/source/libs/executor/CMakeLists.txt index 89d08b3078..8b3d04e32c 100644 --- a/source/libs/executor/CMakeLists.txt +++ b/source/libs/executor/CMakeLists.txt @@ -2,10 +2,6 @@ aux_source_directory(src EXECUTOR_SRC) #add_library(executor ${EXECUTOR_SRC}) add_library(executor STATIC ${EXECUTOR_SRC}) -#set_target_properties(executor PROPERTIES -# IMPORTED_LOCATION "${CMAKE_CURRENT_SOURCE_DIR}/libexecutor.a" -# INTERFACE_INCLUDE_DIRECTORIES "${TD_SOURCE_DIR}/include/libs/executor" -# ) target_link_libraries(executor PRIVATE os util common function parser planner qcom vnode scalar nodes index stream diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index f179c7bd41..f85bddcc31 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -235,16 +235,6 @@ typedef enum { #define COL_MATCH_FROM_COL_ID 0x1 #define COL_MATCH_FROM_SLOT_ID 0x2 -typedef struct SSourceDataInfo { - int32_t index; - SRetrieveTableRsp* pRsp; - uint64_t totalRows; - int64_t startTime; - int32_t code; - EX_SOURCE_STATUS status; - const char* taskId; -} SSourceDataInfo; - typedef struct SLoadRemoteDataInfo { uint64_t totalSize; // total load bytes from remote uint64_t totalRows; // total number of rows @@ -371,23 +361,8 @@ typedef struct STagScanInfo { SColMatchInfo matchInfo; int32_t curPos; SReadHandle readHandle; - STableListInfo* pTableList; } STagScanInfo; -typedef struct SLastrowScanInfo { - SSDataBlock* pRes; - SReadHandle readHandle; - void* pLastrowReader; - SColMatchInfo matchInfo; - int32_t* pSlotIds; - SExprSupp pseudoExprSup; - int32_t retrieveType; - int32_t currentGroupIndex; - SSDataBlock* pBufferredRes; - SArray* pUidList; - int32_t indexOfBufferedRes; -} SLastrowScanInfo; - typedef enum EStreamScanMode { STREAM_SCAN_FROM_READERHANDLE = 1, STREAM_SCAN_FROM_RES, @@ -504,40 +479,6 @@ typedef struct { SSnapContext* sContext; } SStreamRawScanInfo; -typedef struct SSysTableIndex { - int8_t init; - SArray* uids; - int32_t lastIdx; -} SSysTableIndex; - -typedef struct SSysTableScanInfo { - SRetrieveMetaTableRsp* pRsp; - SRetrieveTableReq req; - SEpSet epSet; - tsem_t ready; - SReadHandle readHandle; - int32_t accountId; - const char* pUser; - bool sysInfo; - bool showRewrite; - SNode* pCondition; // db_name filter condition, to discard data that are not in current database - SMTbCursor* pCur; // cursor for iterate the local table meta store. - SSysTableIndex* pIdx; // idx for local table meta - SColMatchInfo matchInfo; - SName name; - SSDataBlock* pRes; - int64_t numOfBlocks; // extract basic running information. - SLoadRemoteDataInfo loadInfo; -} SSysTableScanInfo; - -typedef struct SBlockDistInfo { - SSDataBlock* pResBlock; - STsdbReader* pHandle; - SReadHandle readHandle; - uint64_t uid; // table uid -} SBlockDistInfo; - -// todo remove this typedef struct SOptrBasicInfo { SResultRowInfo resultRowInfo; SSDataBlock* pRes; @@ -603,24 +544,6 @@ typedef struct SAggOperatorInfo { SExprSupp scalarExprSup; } SAggOperatorInfo; -typedef struct SProjectOperatorInfo { - SOptrBasicInfo binfo; - SAggSupporter aggSup; - SArray* pPseudoColInfo; - SLimitInfo limitInfo; - bool mergeDataBlocks; - SSDataBlock* pFinalRes; -} SProjectOperatorInfo; - -typedef struct SIndefOperatorInfo { - SOptrBasicInfo binfo; - SAggSupporter aggSup; - SArray* pPseudoColInfo; - SExprSupp scalarSup; - uint64_t groupId; - SSDataBlock* pNextGroupRes; -} SIndefOperatorInfo; - typedef struct SFillOperatorInfo { struct SFillInfo* pFillInfo; SSDataBlock* pRes; @@ -638,42 +561,12 @@ typedef struct SFillOperatorInfo { SExprSupp noFillExprSupp; } SFillOperatorInfo; -typedef struct SGroupbyOperatorInfo { - SOptrBasicInfo binfo; - SAggSupporter aggSup; - SArray* pGroupCols; // group by columns, SArray - SArray* pGroupColVals; // current group column values, SArray - bool isInit; // denote if current val is initialized or not - char* keyBuf; // group by keys for hash - int32_t groupKeyLen; // total group by column width - SGroupResInfo groupResInfo; - SExprSupp scalarSup; -} SGroupbyOperatorInfo; - typedef struct SDataGroupInfo { uint64_t groupId; int64_t numOfRows; SArray* pPageList; } SDataGroupInfo; -// The sort in partition may be needed later. -typedef struct SPartitionOperatorInfo { - SOptrBasicInfo binfo; - SArray* pGroupCols; - SArray* pGroupColVals; // current group column values, SArray - char* keyBuf; // group by keys for hash - int32_t groupKeyLen; // total group by column width - SHashObj* pGroupSet; // quick locate the window object for each result - - SDiskbasedBuf* pBuf; // query result buffer based on blocked-wised disk file - int32_t rowCapacity; // maximum number of rows for each buffer page - int32_t* columnOffset; // start position for each column data - SArray* sortedGroupArray; // SDataGroupInfo sorted by group id - int32_t groupIndex; // group index - int32_t pageIndex; // page index of current group - SExprSupp scalarSup; -} SPartitionOperatorInfo; - typedef struct SWindowRowsSup { STimeWindow win; TSKEY prevTs; @@ -800,33 +693,6 @@ typedef struct SStateWindowOperatorInfo { STimeWindowAggSupp twAggSup; } SStateWindowOperatorInfo; -typedef struct SSortOperatorInfo { - SOptrBasicInfo binfo; - uint32_t sortBufSize; // max buffer size for in-memory sort - SArray* pSortInfo; - SSortHandle* pSortHandle; - SColMatchInfo matchInfo; - int32_t bufPageSize; - int64_t startTs; // sort start time - uint64_t sortElapsed; // sort elapsed time, time to flush to disk not included. - SLimitInfo limitInfo; -} SSortOperatorInfo; - -typedef struct SJoinOperatorInfo { - SSDataBlock* pRes; - int32_t joinType; - int32_t inputOrder; - - SSDataBlock* pLeft; - int32_t leftPos; - SColumnInfo leftCol; - - SSDataBlock* pRight; - int32_t rightPos; - SColumnInfo rightCol; - SNode* pCondAfterMerge; -} SJoinOperatorInfo; - #define OPTR_IS_OPENED(_optr) (((_optr)->status & OP_OPENED) == OP_OPENED) #define OPTR_SET_OPENED(_optr) ((_optr)->status |= OP_OPENED) @@ -850,7 +716,6 @@ void doBuildStreamResBlock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SGr void doBuildResultDatablock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SGroupResInfo* pGroupResInfo, SDiskbasedBuf* pBuf); -int32_t handleLimitOffset(SOperatorInfo* pOperator, SLimitInfo* pLimitInfo, SSDataBlock* pBlock, bool holdDataInBuf); bool hasLimitOffsetInfo(SLimitInfo* pLimitInfo); void initLimitInfo(const SNode* pLimit, const SNode* pSLimit, SLimitInfo* pLimitInfo); void applyLimitOffset(SLimitInfo* pLimitInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo, SOperatorInfo* pOperator); @@ -880,9 +745,6 @@ void cleanupAggSup(SAggSupporter* pAggSup); void appendOneRowToDataBlock(SSDataBlock* pBlock, STupleHandle* pTupleHandle); void setTbNameColData(const SSDataBlock* pBlock, SColumnInfoData* pColInfoData, int32_t functionId, const char* name); -int32_t doPrepareScan(SOperatorInfo* pOperator, uint64_t uid, int64_t ts); -int32_t doGetScanStatus(SOperatorInfo* pOperator, uint64_t* uid, int64_t* ts); - SSDataBlock* loadNextDataBlock(void* param); void setResultRowInitCtx(SResultRow* pResult, SqlFunctionCtx* pCtx, int32_t numOfOutput, int32_t* rowEntryInfoOffset); @@ -965,9 +827,8 @@ void setInputDataBlock(SExprSupp* pExprSupp, SSDataBlock* pBlock, int32_t order, bool isTaskKilled(SExecTaskInfo* pTaskInfo); int32_t checkForQueryBuf(size_t numOfTables); -void setTaskKilled(SExecTaskInfo* pTaskInfo); -void queryCostStatis(SExecTaskInfo* pTaskInfo); - +void setTaskKilled(SExecTaskInfo* pTaskInfo); +void queryCostStatis(SExecTaskInfo* pTaskInfo); void doDestroyTask(SExecTaskInfo* pTaskInfo); void destroyOperatorInfo(SOperatorInfo* pOperator); int32_t getMaximumIdleDurationSec(); @@ -995,9 +856,6 @@ int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SRead int32_t createDataSinkParam(SDataSinkNode* pNode, void** pParam, qTaskInfo_t* pTaskInfo, SReadHandle* readHandle); int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SArray* pExecInfoList); -int32_t aggDecodeResultRow(SOperatorInfo* pOperator, char* result); -int32_t aggEncodeResultRow(SOperatorInfo* pOperator, char** result, int32_t* length); - STimeWindow getActiveTimeWindow(SDiskbasedBuf* pBuf, SResultRowInfo* pResultRowInfo, int64_t ts, SInterval* pInterval, int32_t order); int32_t getNumOfRowsInTimeWindow(SDataBlockInfo* pDataBlockInfo, TSKEY* pPrimaryColumn, int32_t startPos, TSKEY ekey, diff --git a/source/libs/executor/src/cachescanoperator.c b/source/libs/executor/src/cachescanoperator.c index 5e5c01201f..6b5f773fe3 100644 --- a/source/libs/executor/src/cachescanoperator.c +++ b/source/libs/executor/src/cachescanoperator.c @@ -25,6 +25,20 @@ #include "thash.h" #include "ttypes.h" +typedef struct SCacheRowsScanInfo { + SSDataBlock* pRes; + SReadHandle readHandle; + void* pLastrowReader; + SColMatchInfo matchInfo; + int32_t* pSlotIds; + SExprSupp pseudoExprSup; + int32_t retrieveType; + int32_t currentGroupIndex; + SSDataBlock* pBufferredRes; + SArray* pUidList; + int32_t indexOfBufferedRes; +} SCacheRowsScanInfo; + static SSDataBlock* doScanCache(SOperatorInfo* pOperator); static void destroyCacheScanOperator(void* param); static int32_t extractCacheScanSlotId(const SArray* pColMatchInfo, SExecTaskInfo* pTaskInfo, int32_t** pSlotIds); @@ -33,7 +47,7 @@ static int32_t removeRedundantTsCol(SLastRowScanPhysiNode* pScanNode, SColM SOperatorInfo* createCacherowsScanOperator(SLastRowScanPhysiNode* pScanNode, SReadHandle* readHandle, SExecTaskInfo* pTaskInfo) { int32_t code = TSDB_CODE_SUCCESS; - SLastrowScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SLastrowScanInfo)); + SCacheRowsScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SCacheRowsScanInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; @@ -114,7 +128,7 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) { return NULL; } - SLastrowScanInfo* pInfo = pOperator->info; + SCacheRowsScanInfo* pInfo = pOperator->info; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; STableListInfo* pTableList = pTaskInfo->pTableInfoList; @@ -240,7 +254,7 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) { } void destroyCacheScanOperator(void* param) { - SLastrowScanInfo* pInfo = (SLastrowScanInfo*)param; + SCacheRowsScanInfo* pInfo = (SCacheRowsScanInfo*)param; blockDataDestroy(pInfo->pRes); blockDataDestroy(pInfo->pBufferredRes); taosMemoryFree(pInfo->pSlotIds); diff --git a/source/libs/executor/src/exchangeoperator.c b/source/libs/executor/src/exchangeoperator.c index a28066003a..c858536bb1 100644 --- a/source/libs/executor/src/exchangeoperator.c +++ b/source/libs/executor/src/exchangeoperator.c @@ -41,6 +41,16 @@ typedef struct SFetchRspHandleWrapper { int32_t sourceIndex; } SFetchRspHandleWrapper; +typedef struct SSourceDataInfo { + int32_t index; + SRetrieveTableRsp* pRsp; + uint64_t totalRows; + int64_t startTime; + int32_t code; + EX_SOURCE_STATUS status; + const char* taskId; +} SSourceDataInfo; + static void destroyExchangeOperatorInfo(void* param); static void freeBlock(void* pParam); static void freeSourceDataInfo(void* param); @@ -52,6 +62,7 @@ static int32_t getCompletedSources(const SArray* pArray); static int32_t prepareConcurrentlyLoad(SOperatorInfo* pOperator); static int32_t seqLoadRemoteData(SOperatorInfo* pOperator); static int32_t prepareLoadRemoteData(SOperatorInfo* pOperator); +static int32_t handleLimitOffset(SOperatorInfo* pOperator, SLimitInfo* pLimitInfo, SSDataBlock* pBlock, bool holdDataInBuf); static void concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SExchangeInfo* pExchangeInfo, SExecTaskInfo* pTaskInfo) { @@ -647,3 +658,80 @@ int32_t prepareLoadRemoteData(SOperatorInfo* pOperator) { pOperator->cost.openCost = (taosGetTimestampUs() - st) / 1000.0; return TSDB_CODE_SUCCESS; } + +int32_t handleLimitOffset(SOperatorInfo* pOperator, SLimitInfo* pLimitInfo, SSDataBlock* pBlock, bool holdDataInBuf) { + if (pLimitInfo->remainGroupOffset > 0) { + if (pLimitInfo->currentGroupId == 0) { // it is the first group + pLimitInfo->currentGroupId = pBlock->info.groupId; + blockDataCleanup(pBlock); + return PROJECT_RETRIEVE_CONTINUE; + } else if (pLimitInfo->currentGroupId != pBlock->info.groupId) { + // now it is the data from a new group + pLimitInfo->remainGroupOffset -= 1; + + // ignore data block in current group + if (pLimitInfo->remainGroupOffset > 0) { + blockDataCleanup(pBlock); + return PROJECT_RETRIEVE_CONTINUE; + } + } + + // set current group id of the project operator + pLimitInfo->currentGroupId = pBlock->info.groupId; + } + + // here check for a new group data, we need to handle the data of the previous group. + if (pLimitInfo->currentGroupId != 0 && pLimitInfo->currentGroupId != pBlock->info.groupId) { + pLimitInfo->numOfOutputGroups += 1; + if ((pLimitInfo->slimit.limit > 0) && (pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups)) { + pOperator->status = OP_EXEC_DONE; + blockDataCleanup(pBlock); + + return PROJECT_RETRIEVE_DONE; + } + + // reset the value for a new group data + pLimitInfo->numOfOutputRows = 0; + pLimitInfo->remainOffset = pLimitInfo->limit.offset; + + // existing rows that belongs to previous group. + if (pBlock->info.rows > 0) { + return PROJECT_RETRIEVE_DONE; + } + } + + // here we reach the start position, according to the limit/offset requirements. + + // set current group id + pLimitInfo->currentGroupId = pBlock->info.groupId; + + if (pLimitInfo->remainOffset >= pBlock->info.rows) { + pLimitInfo->remainOffset -= pBlock->info.rows; + blockDataCleanup(pBlock); + return PROJECT_RETRIEVE_CONTINUE; + } else if (pLimitInfo->remainOffset < pBlock->info.rows && pLimitInfo->remainOffset > 0) { + blockDataTrimFirstNRows(pBlock, pLimitInfo->remainOffset); + pLimitInfo->remainOffset = 0; + } + + // check for the limitation in each group + if (pLimitInfo->limit.limit >= 0 && pLimitInfo->numOfOutputRows + pBlock->info.rows >= pLimitInfo->limit.limit) { + int32_t keepRows = (int32_t)(pLimitInfo->limit.limit - pLimitInfo->numOfOutputRows); + blockDataKeepFirstNRows(pBlock, keepRows); + if (pLimitInfo->slimit.limit > 0 && pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups) { + pOperator->status = OP_EXEC_DONE; + } + + return PROJECT_RETRIEVE_DONE; + } + + // todo optimize performance + // If there are slimit/soffset value exists, multi-round result can not be packed into one group, since the + // they may not belong to the same group the limit/offset value is not valid in this case. + if ((!holdDataInBuf) || (pBlock->info.rows >= pOperator->resultInfo.threshold) || pLimitInfo->slimit.offset != -1 || + pLimitInfo->slimit.limit != -1) { + return PROJECT_RETRIEVE_DONE; + } else { // not full enough, continue to accumulate the output data in the buffer. + return PROJECT_RETRIEVE_CONTINUE; + } +} diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index a7e955100c..9b1bc81dbe 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -24,7 +24,6 @@ #include "tdatablock.h" #include "tglobal.h" #include "tmsg.h" -#include "tsort.h" #include "ttime.h" #include "executorimpl.h" @@ -297,8 +296,6 @@ void initExecTimeWindowInfo(SColumnInfoData* pColData, STimeWindow* pQueryWindow colDataAppendInt64(pColData, 4, &pQueryWindow->ekey); } -void cleanupExecTimeWindowInfo(SColumnInfoData* pColData) { colDataDestroy(pColData); } - typedef struct { bool hasAgg; int32_t numOfRows; @@ -1347,42 +1344,6 @@ void queryCostStatis(SExecTaskInfo* pTaskInfo) { } } -// static void updateOffsetVal(STaskRuntimeEnv *pRuntimeEnv, SDataBlockInfo *pBlockInfo) { -// STaskAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; -// STableQueryInfo* pTableQueryInfo = pRuntimeEnv->current; -// -// int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQueryAttr->order.order); -// -// if (pQueryAttr->limit.offset == pBlockInfo->rows) { // current block will ignore completed -// pTableQueryInfo->lastKey = QUERY_IS_ASC_QUERY(pQueryAttr) ? pBlockInfo->window.ekey + step : -// pBlockInfo->window.skey + step; pQueryAttr->limit.offset = 0; return; -// } -// -// if (QUERY_IS_ASC_QUERY(pQueryAttr)) { -// pQueryAttr->pos = (int32_t)pQueryAttr->limit.offset; -// } else { -// pQueryAttr->pos = pBlockInfo->rows - (int32_t)pQueryAttr->limit.offset - 1; -// } -// -// assert(pQueryAttr->pos >= 0 && pQueryAttr->pos <= pBlockInfo->rows - 1); -// -// SArray * pDataBlock = tsdbRetrieveDataBlock(pRuntimeEnv->pTsdbReadHandle, NULL); -// SColumnInfoData *pColInfoData = taosArrayGet(pDataBlock, 0); -// -// // update the pQueryAttr->limit.offset value, and pQueryAttr->pos value -// TSKEY *keys = (TSKEY *) pColInfoData->pData; -// -// // update the offset value -// pTableQueryInfo->lastKey = keys[pQueryAttr->pos]; -// pQueryAttr->limit.offset = 0; -// -// int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, pBlockInfo, NULL, binarySearchForKey, pDataBlock); -// -// //qDebug("QInfo:0x%"PRIx64" check data block, brange:%" PRId64 "-%" PRId64 ", numBlocksOfStep:%d, numOfRes:%d, -// lastKey:%"PRId64, GET_TASKID(pRuntimeEnv), -// pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows, numOfRes, pQuery->current->lastKey); -// } - // void skipBlocks(STaskRuntimeEnv *pRuntimeEnv) { // STaskAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; // @@ -1723,159 +1684,6 @@ static SSDataBlock* getAggregateResult(SOperatorInfo* pOperator) { return (rows == 0) ? NULL : pInfo->pRes; } -int32_t aggEncodeResultRow(SOperatorInfo* pOperator, char** result, int32_t* length) { - if (result == NULL || length == NULL) { - return TSDB_CODE_TSC_INVALID_INPUT; - } - SOptrBasicInfo* pInfo = (SOptrBasicInfo*)(pOperator->info); - SAggSupporter* pSup = (SAggSupporter*)POINTER_SHIFT(pOperator->info, sizeof(SOptrBasicInfo)); - int32_t size = tSimpleHashGetSize(pSup->pResultRowHashTable); - size_t keyLen = sizeof(uint64_t) * 2; // estimate the key length - int32_t totalSize = - sizeof(int32_t) + sizeof(int32_t) + size * (sizeof(int32_t) + keyLen + sizeof(int32_t) + pSup->resultRowSize); - - // no result - if (getTotalBufSize(pSup->pResultBuf) == 0) { - *result = NULL; - *length = 0; - return TSDB_CODE_SUCCESS; - } - - *result = (char*)taosMemoryCalloc(1, totalSize); - if (*result == NULL) { - return TSDB_CODE_OUT_OF_MEMORY; - } - - int32_t offset = sizeof(int32_t); - *(int32_t*)(*result + offset) = size; - offset += sizeof(int32_t); - - // prepare memory - SResultRowPosition* pos = &pInfo->resultRowInfo.cur; - void* pPage = getBufPage(pSup->pResultBuf, pos->pageId); - SResultRow* pRow = (SResultRow*)((char*)pPage + pos->offset); - setBufPageDirty(pPage, true); - releaseBufPage(pSup->pResultBuf, pPage); - - int32_t iter = 0; - void* pIter = NULL; - while ((pIter = tSimpleHashIterate(pSup->pResultRowHashTable, pIter, &iter))) { - void* key = tSimpleHashGetKey(pIter, &keyLen); - SResultRowPosition* p1 = (SResultRowPosition*)pIter; - - pPage = (SFilePage*)getBufPage(pSup->pResultBuf, p1->pageId); - pRow = (SResultRow*)((char*)pPage + p1->offset); - setBufPageDirty(pPage, true); - releaseBufPage(pSup->pResultBuf, pPage); - - // recalculate the result size - int32_t realTotalSize = offset + sizeof(int32_t) + keyLen + sizeof(int32_t) + pSup->resultRowSize; - if (realTotalSize > totalSize) { - char* tmp = (char*)taosMemoryRealloc(*result, realTotalSize); - if (tmp == NULL) { - taosMemoryFree(*result); - *result = NULL; - return TSDB_CODE_OUT_OF_MEMORY; - } else { - *result = tmp; - } - } - // save key - *(int32_t*)(*result + offset) = keyLen; - offset += sizeof(int32_t); - memcpy(*result + offset, key, keyLen); - offset += keyLen; - - // save value - *(int32_t*)(*result + offset) = pSup->resultRowSize; - offset += sizeof(int32_t); - memcpy(*result + offset, pRow, pSup->resultRowSize); - offset += pSup->resultRowSize; - } - - *(int32_t*)(*result) = offset; - *length = offset; - - return TDB_CODE_SUCCESS; -} - -int32_t handleLimitOffset(SOperatorInfo* pOperator, SLimitInfo* pLimitInfo, SSDataBlock* pBlock, bool holdDataInBuf) { - if (pLimitInfo->remainGroupOffset > 0) { - if (pLimitInfo->currentGroupId == 0) { // it is the first group - pLimitInfo->currentGroupId = pBlock->info.groupId; - blockDataCleanup(pBlock); - return PROJECT_RETRIEVE_CONTINUE; - } else if (pLimitInfo->currentGroupId != pBlock->info.groupId) { - // now it is the data from a new group - pLimitInfo->remainGroupOffset -= 1; - - // ignore data block in current group - if (pLimitInfo->remainGroupOffset > 0) { - blockDataCleanup(pBlock); - return PROJECT_RETRIEVE_CONTINUE; - } - } - - // set current group id of the project operator - pLimitInfo->currentGroupId = pBlock->info.groupId; - } - - // here check for a new group data, we need to handle the data of the previous group. - if (pLimitInfo->currentGroupId != 0 && pLimitInfo->currentGroupId != pBlock->info.groupId) { - pLimitInfo->numOfOutputGroups += 1; - if ((pLimitInfo->slimit.limit > 0) && (pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups)) { - pOperator->status = OP_EXEC_DONE; - blockDataCleanup(pBlock); - - return PROJECT_RETRIEVE_DONE; - } - - // reset the value for a new group data - pLimitInfo->numOfOutputRows = 0; - pLimitInfo->remainOffset = pLimitInfo->limit.offset; - - // existing rows that belongs to previous group. - if (pBlock->info.rows > 0) { - return PROJECT_RETRIEVE_DONE; - } - } - - // here we reach the start position, according to the limit/offset requirements. - - // set current group id - pLimitInfo->currentGroupId = pBlock->info.groupId; - - if (pLimitInfo->remainOffset >= pBlock->info.rows) { - pLimitInfo->remainOffset -= pBlock->info.rows; - blockDataCleanup(pBlock); - return PROJECT_RETRIEVE_CONTINUE; - } else if (pLimitInfo->remainOffset < pBlock->info.rows && pLimitInfo->remainOffset > 0) { - blockDataTrimFirstNRows(pBlock, pLimitInfo->remainOffset); - pLimitInfo->remainOffset = 0; - } - - // check for the limitation in each group - if (pLimitInfo->limit.limit >= 0 && pLimitInfo->numOfOutputRows + pBlock->info.rows >= pLimitInfo->limit.limit) { - int32_t keepRows = (int32_t)(pLimitInfo->limit.limit - pLimitInfo->numOfOutputRows); - blockDataKeepFirstNRows(pBlock, keepRows); - if (pLimitInfo->slimit.limit > 0 && pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups) { - pOperator->status = OP_EXEC_DONE; - } - - return PROJECT_RETRIEVE_DONE; - } - - // todo optimize performance - // If there are slimit/soffset value exists, multi-round result can not be packed into one group, since the - // they may not belong to the same group the limit/offset value is not valid in this case. - if ((!holdDataInBuf) || (pBlock->info.rows >= pOperator->resultInfo.threshold) || pLimitInfo->slimit.offset != -1 || - pLimitInfo->slimit.limit != -1) { - return PROJECT_RETRIEVE_DONE; - } else { // not full enough, continue to accumulate the output data in the buffer. - return PROJECT_RETRIEVE_CONTINUE; - } -} - static void doApplyScalarCalculation(SOperatorInfo* pOperator, SSDataBlock* pBlock, int32_t order, int32_t scanFlag); static void doHandleRemainBlockForNewGroupImpl(SOperatorInfo* pOperator, SFillOperatorInfo* pInfo, SResultInfo* pResultInfo, SExecTaskInfo* pTaskInfo) { diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c index 26a5f6838d..fcfb79eb4b 100644 --- a/source/libs/executor/src/groupoperator.c +++ b/source/libs/executor/src/groupoperator.c @@ -27,6 +27,36 @@ #include "thash.h" #include "ttypes.h" +typedef struct SGroupbyOperatorInfo { + SOptrBasicInfo binfo; + SAggSupporter aggSup; + SArray* pGroupCols; // group by columns, SArray + SArray* pGroupColVals; // current group column values, SArray + bool isInit; // denote if current val is initialized or not + char* keyBuf; // group by keys for hash + int32_t groupKeyLen; // total group by column width + SGroupResInfo groupResInfo; + SExprSupp scalarSup; +} SGroupbyOperatorInfo; + +// The sort in partition may be needed later. +typedef struct SPartitionOperatorInfo { + SOptrBasicInfo binfo; + SArray* pGroupCols; + SArray* pGroupColVals; // current group column values, SArray + char* keyBuf; // group by keys for hash + int32_t groupKeyLen; // total group by column width + SHashObj* pGroupSet; // quick locate the window object for each result + + SDiskbasedBuf* pBuf; // query result buffer based on blocked-wised disk file + int32_t rowCapacity; // maximum number of rows for each buffer page + int32_t* columnOffset; // start position for each column data + SArray* sortedGroupArray; // SDataGroupInfo sorted by group id + int32_t groupIndex; // group index + int32_t pageIndex; // page index of current group + SExprSupp scalarSup; +} SPartitionOperatorInfo; + static void* getCurrentDataGroupInfo(const SPartitionOperatorInfo* pInfo, SDataGroupInfo** pGroupInfo, int32_t len); static int32_t* setupColumnOffset(const SSDataBlock* pBlock, int32_t rowCapacity); static int32_t setGroupResultOutputBuf(SOperatorInfo* pOperator, SOptrBasicInfo* binfo, int32_t numOfCols, char* pData, diff --git a/source/libs/executor/src/joinoperator.c b/source/libs/executor/src/joinoperator.c index 4e1daac643..61dc7a1b76 100644 --- a/source/libs/executor/src/joinoperator.c +++ b/source/libs/executor/src/joinoperator.c @@ -24,6 +24,21 @@ #include "tmsg.h" #include "ttypes.h" +typedef struct SJoinOperatorInfo { + SSDataBlock* pRes; + int32_t joinType; + int32_t inputOrder; + + SSDataBlock* pLeft; + int32_t leftPos; + SColumnInfo leftCol; + + SSDataBlock* pRight; + int32_t rightPos; + SColumnInfo rightCol; + SNode* pCondAfterMerge; +} SJoinOperatorInfo; + static void setJoinColumnInfo(SColumnInfo* pColumn, const SColumnNode* pColumnNode); static SSDataBlock* doMergeJoin(struct SOperatorInfo* pOperator); static void destroyMergeJoinOperator(void* param); diff --git a/source/libs/executor/src/projectoperator.c b/source/libs/executor/src/projectoperator.c index ce1d13775c..ada7964c67 100644 --- a/source/libs/executor/src/projectoperator.c +++ b/source/libs/executor/src/projectoperator.c @@ -17,6 +17,24 @@ #include "executorimpl.h" #include "functionMgt.h" +typedef struct SProjectOperatorInfo { + SOptrBasicInfo binfo; + SAggSupporter aggSup; + SArray* pPseudoColInfo; + SLimitInfo limitInfo; + bool mergeDataBlocks; + SSDataBlock* pFinalRes; +} SProjectOperatorInfo; + +typedef struct SIndefOperatorInfo { + SOptrBasicInfo binfo; + SAggSupporter aggSup; + SArray* pPseudoColInfo; + SExprSupp scalarSup; + uint64_t groupId; + SSDataBlock* pNextGroupRes; +} SIndefOperatorInfo; + static SSDataBlock* doGenerateSourceData(SOperatorInfo* pOperator); static SSDataBlock* doProjectOperation(SOperatorInfo* pOperator); static SSDataBlock* doApplyIndefinitFunction(SOperatorInfo* pOperator); diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 916b6df969..24ca3e2eed 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -13,7 +13,6 @@ * along with this program. If not, see . */ -#include #include "executorimpl.h" #include "filter.h" #include "function.h" @@ -33,31 +32,17 @@ #include "ttypes.h" #include "vnode.h" +typedef struct SBlockDistInfo { + SSDataBlock* pResBlock; + STsdbReader* pHandle; + SReadHandle readHandle; + uint64_t uid; // table uid +} SBlockDistInfo; + #define SET_REVERSE_SCAN_FLAG(_info) ((_info)->scanFlag = REVERSE_SCAN) #define SWITCH_ORDER(n) (((n) = ((n) == TSDB_ORDER_ASC) ? TSDB_ORDER_DESC : TSDB_ORDER_ASC)) -static int32_t buildSysDbTableInfo(const SSysTableScanInfo* pInfo, int32_t capacity); -static int32_t buildDbTableInfoBlock(bool sysInfo, const SSDataBlock* p, const SSysTableMeta* pSysDbTableMeta, - size_t size, const char* dbName); -static char* SYSTABLE_IDX_COLUMN[] = {"table_name", "db_name", "create_time", "columns", - "ttl", "stable_name", "vgroup_id', 'uid", "type"}; - -static char* SYSTABLE_SPECIAL_COL[] = {"db_name", "vgroup_id"}; - -typedef int32_t (*__sys_filte)(void* pMeta, SNode* cond, SArray* result); -typedef int32_t (*__sys_check)(SNode* cond); - -typedef struct { - const char* name; - __sys_check chkFunc; - __sys_filte fltFunc; -} SSTabFltFuncDef; - -typedef struct { - void* pMeta; - void* pVnode; -} SSTabFltArg; typedef struct STableMergeScanExecInfo { SFileBlockLoadRecorder blockRecorder; @@ -71,54 +56,8 @@ typedef struct STableMergeScanSortSourceParam { SSDataBlock* inputBlock; } STableMergeScanSortSourceParam; -static int32_t sysChkFilter__Comm(SNode* pNode); -static int32_t sysChkFilter__DBName(SNode* pNode); -static int32_t sysChkFilter__VgroupId(SNode* pNode); -static int32_t sysChkFilter__TableName(SNode* pNode); -static int32_t sysChkFilter__CreateTime(SNode* pNode); -static int32_t sysChkFilter__Ncolumn(SNode* pNode); -static int32_t sysChkFilter__Ttl(SNode* pNode); -static int32_t sysChkFilter__STableName(SNode* pNode); -static int32_t sysChkFilter__Uid(SNode* pNode); -static int32_t sysChkFilter__Type(SNode* pNode); - -static int32_t sysFilte__DbName(void* arg, SNode* pNode, SArray* result); -static int32_t sysFilte__VgroupId(void* arg, SNode* pNode, SArray* result); -static int32_t sysFilte__TableName(void* arg, SNode* pNode, SArray* result); -static int32_t sysFilte__CreateTime(void* arg, SNode* pNode, SArray* result); -static int32_t sysFilte__Ncolumn(void* arg, SNode* pNode, SArray* result); -static int32_t sysFilte__Ttl(void* arg, SNode* pNode, SArray* result); -static int32_t sysFilte__STableName(void* arg, SNode* pNode, SArray* result); -static int32_t sysFilte__Uid(void* arg, SNode* pNode, SArray* result); -static int32_t sysFilte__Type(void* arg, SNode* pNode, SArray* result); - -const SSTabFltFuncDef filterDict[] = { - {.name = "table_name", .chkFunc = sysChkFilter__TableName, .fltFunc = sysFilte__TableName}, - {.name = "db_name", .chkFunc = sysChkFilter__DBName, .fltFunc = sysFilte__DbName}, - {.name = "create_time", .chkFunc = sysChkFilter__CreateTime, .fltFunc = sysFilte__CreateTime}, - {.name = "columns", .chkFunc = sysChkFilter__Ncolumn, .fltFunc = sysFilte__Ncolumn}, - {.name = "ttl", .chkFunc = sysChkFilter__Ttl, .fltFunc = sysFilte__Ttl}, - {.name = "stable_name", .chkFunc = sysChkFilter__STableName, .fltFunc = sysFilte__STableName}, - {.name = "vgroup_id", .chkFunc = sysChkFilter__VgroupId, .fltFunc = sysFilte__VgroupId}, - {.name = "uid", .chkFunc = sysChkFilter__Uid, .fltFunc = sysFilte__Uid}, - {.name = "type", .chkFunc = sysChkFilter__Type, .fltFunc = sysFilte__Type}}; - -#define SYSTAB_FILTER_DICT_SIZE (sizeof(filterDict) / sizeof(filterDict[0])) - -static int32_t optSysTabFilte(void* arg, SNode* cond, SArray* result); -static int32_t optSysTabFilteImpl(void* arg, SNode* cond, SArray* result); -static int32_t optSysCheckOper(SNode* pOpear); -static int32_t optSysMergeRslt(SArray* mRslt, SArray* rslt); - static bool processBlockWithProbability(const SSampleExecInfo* pInfo); -static int32_t sysTableUserTagsFillOneTableTags(const SSysTableScanInfo* pInfo, SMetaReader* smrSuperTable, - SMetaReader* smrChildTable, const char* dbname, const char* tableName, - int32_t* pNumOfRows, const SSDataBlock* dataBlock); - -static void relocateAndFilterSysTagsScanResult(SSysTableScanInfo* pInfo, int32_t numOfRows, SSDataBlock* dataBlock, - SFilterInfo* pFilterInfo); - bool processBlockWithProbability(const SSampleExecInfo* pInfo) { #if 0 if (pInfo->sampleRatio == 1) { @@ -2615,1638 +2554,6 @@ _error: return NULL; } -static void destroySysScanOperator(void* param) { - SSysTableScanInfo* pInfo = (SSysTableScanInfo*)param; - tsem_destroy(&pInfo->ready); - blockDataDestroy(pInfo->pRes); - - const char* name = tNameGetTableName(&pInfo->name); - if (strncasecmp(name, TSDB_INS_TABLE_TABLES, TSDB_TABLE_FNAME_LEN) == 0 || - strncasecmp(name, TSDB_INS_TABLE_TAGS, TSDB_TABLE_FNAME_LEN) == 0 || pInfo->pCur != NULL) { - metaCloseTbCursor(pInfo->pCur); - pInfo->pCur = NULL; - } - if (pInfo->pIdx) { - taosArrayDestroy(pInfo->pIdx->uids); - taosMemoryFree(pInfo->pIdx); - pInfo->pIdx = NULL; - } - - taosArrayDestroy(pInfo->matchInfo.pList); - taosMemoryFreeClear(pInfo->pUser); - - taosMemoryFreeClear(param); -} - -static int32_t getSysTableDbNameColId(const char* pTable) { - // if (0 == strcmp(TSDB_INS_TABLE_INDEXES, pTable)) { - // return 1; - // } - return TSDB_INS_USER_STABLES_DBNAME_COLID; -} - -EDealRes getDBNameFromConditionWalker(SNode* pNode, void* pContext) { - int32_t code = TSDB_CODE_SUCCESS; - ENodeType nType = nodeType(pNode); - - switch (nType) { - case QUERY_NODE_OPERATOR: { - SOperatorNode* node = (SOperatorNode*)pNode; - if (OP_TYPE_EQUAL == node->opType) { - *(int32_t*)pContext = 1; - return DEAL_RES_CONTINUE; - } - - *(int32_t*)pContext = 0; - return DEAL_RES_IGNORE_CHILD; - } - case QUERY_NODE_COLUMN: { - if (1 != *(int32_t*)pContext) { - return DEAL_RES_CONTINUE; - } - - SColumnNode* node = (SColumnNode*)pNode; - if (getSysTableDbNameColId(node->tableName) == node->colId) { - *(int32_t*)pContext = 2; - return DEAL_RES_CONTINUE; - } - - *(int32_t*)pContext = 0; - return DEAL_RES_CONTINUE; - } - case QUERY_NODE_VALUE: { - if (2 != *(int32_t*)pContext) { - return DEAL_RES_CONTINUE; - } - - SValueNode* node = (SValueNode*)pNode; - char* dbName = nodesGetValueFromNode(node); - strncpy(pContext, varDataVal(dbName), varDataLen(dbName)); - *((char*)pContext + varDataLen(dbName)) = 0; - return DEAL_RES_END; // stop walk - } - default: - break; - } - return DEAL_RES_CONTINUE; -} - -static void getDBNameFromCondition(SNode* pCondition, const char* dbName) { - if (NULL == pCondition) { - return; - } - nodesWalkExpr(pCondition, getDBNameFromConditionWalker, (char*)dbName); -} - -static int32_t loadSysTableCallback(void* param, SDataBuf* pMsg, int32_t code) { - SOperatorInfo* operator=(SOperatorInfo*) param; - SSysTableScanInfo* pScanResInfo = (SSysTableScanInfo*)operator->info; - if (TSDB_CODE_SUCCESS == code) { - pScanResInfo->pRsp = pMsg->pData; - - SRetrieveMetaTableRsp* pRsp = pScanResInfo->pRsp; - pRsp->numOfRows = htonl(pRsp->numOfRows); - pRsp->useconds = htobe64(pRsp->useconds); - pRsp->handle = htobe64(pRsp->handle); - pRsp->compLen = htonl(pRsp->compLen); - } else { - operator->pTaskInfo->code = code; - } - - tsem_post(&pScanResInfo->ready); - return TSDB_CODE_SUCCESS; -} - -static SSDataBlock* doFilterResult(SSDataBlock* pDataBlock, SFilterInfo* pFilterInfo) { - if (pFilterInfo == NULL) { - return pDataBlock->info.rows == 0 ? NULL : pDataBlock; - } - - doFilter(pDataBlock, pFilterInfo, NULL); - return pDataBlock->info.rows == 0 ? NULL : pDataBlock; -} - -static SSDataBlock* buildInfoSchemaTableMetaBlock(char* tableName) { - size_t size = 0; - const SSysTableMeta* pMeta = NULL; - getInfosDbMeta(&pMeta, &size); - - int32_t index = 0; - for (int32_t i = 0; i < size; ++i) { - if (strcmp(pMeta[i].name, tableName) == 0) { - index = i; - break; - } - } - - SSDataBlock* pBlock = createDataBlock(); - for (int32_t i = 0; i < pMeta[index].colNum; ++i) { - SColumnInfoData colInfoData = - createColumnInfoData(pMeta[index].schema[i].type, pMeta[index].schema[i].bytes, i + 1); - blockDataAppendColInfo(pBlock, &colInfoData); - } - - return pBlock; -} - -int32_t convertTagDataToStr(char* str, int type, void* buf, int32_t bufSize, int32_t* len) { - int32_t n = 0; - - switch (type) { - case TSDB_DATA_TYPE_NULL: - n = sprintf(str, "null"); - break; - - case TSDB_DATA_TYPE_BOOL: - n = sprintf(str, (*(int8_t*)buf) ? "true" : "false"); - break; - - case TSDB_DATA_TYPE_TINYINT: - n = sprintf(str, "%d", *(int8_t*)buf); - break; - - case TSDB_DATA_TYPE_SMALLINT: - n = sprintf(str, "%d", *(int16_t*)buf); - break; - - case TSDB_DATA_TYPE_INT: - n = sprintf(str, "%d", *(int32_t*)buf); - break; - - case TSDB_DATA_TYPE_BIGINT: - case TSDB_DATA_TYPE_TIMESTAMP: - n = sprintf(str, "%" PRId64, *(int64_t*)buf); - break; - - case TSDB_DATA_TYPE_FLOAT: - n = sprintf(str, "%.5f", GET_FLOAT_VAL(buf)); - break; - - case TSDB_DATA_TYPE_DOUBLE: - n = sprintf(str, "%.9f", GET_DOUBLE_VAL(buf)); - break; - - case TSDB_DATA_TYPE_BINARY: - if (bufSize < 0) { - return TSDB_CODE_TSC_INVALID_VALUE; - } - - memcpy(str, buf, bufSize); - n = bufSize; - break; - case TSDB_DATA_TYPE_NCHAR: - if (bufSize < 0) { - return TSDB_CODE_TSC_INVALID_VALUE; - } - - int32_t length = taosUcs4ToMbs((TdUcs4*)buf, bufSize, str); - if (length <= 0) { - return TSDB_CODE_TSC_INVALID_VALUE; - } - n = length; - break; - case TSDB_DATA_TYPE_UTINYINT: - n = sprintf(str, "%u", *(uint8_t*)buf); - break; - - case TSDB_DATA_TYPE_USMALLINT: - n = sprintf(str, "%u", *(uint16_t*)buf); - break; - - case TSDB_DATA_TYPE_UINT: - n = sprintf(str, "%u", *(uint32_t*)buf); - break; - - case TSDB_DATA_TYPE_UBIGINT: - n = sprintf(str, "%" PRIu64, *(uint64_t*)buf); - break; - - default: - return TSDB_CODE_TSC_INVALID_VALUE; - } - - if (len) *len = n; - - return TSDB_CODE_SUCCESS; -} - -static bool sysTableIsOperatorCondOnOneTable(SNode* pCond, char* condTable) { - SOperatorNode* node = (SOperatorNode*)pCond; - if (node->opType == OP_TYPE_EQUAL) { - if (nodeType(node->pLeft) == QUERY_NODE_COLUMN && - strcasecmp(nodesGetNameFromColumnNode(node->pLeft), "table_name") == 0 && - nodeType(node->pRight) == QUERY_NODE_VALUE) { - SValueNode* pValue = (SValueNode*)node->pRight; - if (pValue->node.resType.type == TSDB_DATA_TYPE_NCHAR || pValue->node.resType.type == TSDB_DATA_TYPE_VARCHAR || - pValue->node.resType.type == TSDB_DATA_TYPE_BINARY) { - char* value = nodesGetValueFromNode(pValue); - strncpy(condTable, varDataVal(value), TSDB_TABLE_NAME_LEN); - return true; - } - } - } - return false; -} - -static bool sysTableIsCondOnOneTable(SNode* pCond, char* condTable) { - if (pCond == NULL) { - return false; - } - if (nodeType(pCond) == QUERY_NODE_LOGIC_CONDITION) { - SLogicConditionNode* node = (SLogicConditionNode*)pCond; - if (LOGIC_COND_TYPE_AND == node->condType) { - SNode* pChild = NULL; - FOREACH(pChild, node->pParameterList) { - if (QUERY_NODE_OPERATOR == nodeType(pChild) && sysTableIsOperatorCondOnOneTable(pChild, condTable)) { - return true; - } - } - } - } - - if (QUERY_NODE_OPERATOR == nodeType(pCond)) { - return sysTableIsOperatorCondOnOneTable(pCond, condTable); - } - - return false; -} - -static SSDataBlock* sysTableScanUserTags(SOperatorInfo* pOperator) { - SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; - SSysTableScanInfo* pInfo = pOperator->info; - if (pOperator->status == OP_EXEC_DONE) { - return NULL; - } - - blockDataCleanup(pInfo->pRes); - int32_t numOfRows = 0; - - SSDataBlock* dataBlock = buildInfoSchemaTableMetaBlock(TSDB_INS_TABLE_TAGS); - blockDataEnsureCapacity(dataBlock, pOperator->resultInfo.capacity); - - const char* db = NULL; - int32_t vgId = 0; - vnodeGetInfo(pInfo->readHandle.vnode, &db, &vgId); - - SName sn = {0}; - char dbname[TSDB_DB_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; - tNameFromString(&sn, db, T_NAME_ACCT | T_NAME_DB); - - tNameGetDbName(&sn, varDataVal(dbname)); - varDataSetLen(dbname, strlen(varDataVal(dbname))); - - char condTableName[TSDB_TABLE_NAME_LEN] = {0}; - // optimize when sql like where table_name='tablename' and xxx. - if (pInfo->pCondition && sysTableIsCondOnOneTable(pInfo->pCondition, condTableName)) { - char tableName[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; - STR_TO_VARSTR(tableName, condTableName); - - SMetaReader smrChildTable = {0}; - metaReaderInit(&smrChildTable, pInfo->readHandle.meta, 0); - int32_t code = metaGetTableEntryByName(&smrChildTable, condTableName); - if (code != TSDB_CODE_SUCCESS) { - // terrno has been set by metaGetTableEntryByName, therefore, return directly - return NULL; - } - - if (smrChildTable.me.type != TSDB_CHILD_TABLE) { - metaReaderClear(&smrChildTable); - blockDataDestroy(dataBlock); - pInfo->loadInfo.totalRows = 0; - return NULL; - } - - SMetaReader smrSuperTable = {0}; - metaReaderInit(&smrSuperTable, pInfo->readHandle.meta, META_READER_NOLOCK); - code = metaGetTableEntryByUid(&smrSuperTable, smrChildTable.me.ctbEntry.suid); - if (code != TSDB_CODE_SUCCESS) { - // terrno has been set by metaGetTableEntryByUid - return NULL; - } - - sysTableUserTagsFillOneTableTags(pInfo, &smrSuperTable, &smrChildTable, dbname, tableName, &numOfRows, dataBlock); - metaReaderClear(&smrSuperTable); - metaReaderClear(&smrChildTable); - if (numOfRows > 0) { - relocateAndFilterSysTagsScanResult(pInfo, numOfRows, dataBlock, pOperator->exprSupp.pFilterInfo); - numOfRows = 0; - } - blockDataDestroy(dataBlock); - pInfo->loadInfo.totalRows += pInfo->pRes->info.rows; - setOperatorCompleted(pOperator); - return (pInfo->pRes->info.rows == 0) ? NULL : pInfo->pRes; - } - - int32_t ret = 0; - if (pInfo->pCur == NULL) { - pInfo->pCur = metaOpenTbCursor(pInfo->readHandle.meta); - } - - while ((ret = metaTbCursorNext(pInfo->pCur)) == 0) { - if (pInfo->pCur->mr.me.type != TSDB_CHILD_TABLE) { - continue; - } - - char tableName[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; - STR_TO_VARSTR(tableName, pInfo->pCur->mr.me.name); - - SMetaReader smrSuperTable = {0}; - metaReaderInit(&smrSuperTable, pInfo->readHandle.meta, 0); - uint64_t suid = pInfo->pCur->mr.me.ctbEntry.suid; - int32_t code = metaGetTableEntryByUid(&smrSuperTable, suid); - if (code != TSDB_CODE_SUCCESS) { - qError("failed to get super table meta, uid:0x%" PRIx64 ", code:%s, %s", suid, tstrerror(terrno), - GET_TASKID(pTaskInfo)); - metaReaderClear(&smrSuperTable); - metaCloseTbCursor(pInfo->pCur); - pInfo->pCur = NULL; - T_LONG_JMP(pTaskInfo->env, terrno); - } - - sysTableUserTagsFillOneTableTags(pInfo, &smrSuperTable, &pInfo->pCur->mr, dbname, tableName, &numOfRows, dataBlock); - - metaReaderClear(&smrSuperTable); - - if (numOfRows >= pOperator->resultInfo.capacity) { - relocateAndFilterSysTagsScanResult(pInfo, numOfRows, dataBlock, pOperator->exprSupp.pFilterInfo); - numOfRows = 0; - - if (pInfo->pRes->info.rows > 0) { - break; - } - } - } - - if (numOfRows > 0) { - relocateAndFilterSysTagsScanResult(pInfo, numOfRows, dataBlock, pOperator->exprSupp.pFilterInfo); - numOfRows = 0; - } - - blockDataDestroy(dataBlock); - if (ret != 0) { - metaCloseTbCursor(pInfo->pCur); - pInfo->pCur = NULL; - setOperatorCompleted(pOperator); - } - - pInfo->loadInfo.totalRows += pInfo->pRes->info.rows; - return (pInfo->pRes->info.rows == 0) ? NULL : pInfo->pRes; -} - -void relocateAndFilterSysTagsScanResult(SSysTableScanInfo* pInfo, int32_t numOfRows, SSDataBlock* dataBlock, - SFilterInfo* pFilterInfo) { - dataBlock->info.rows = numOfRows; - pInfo->pRes->info.rows = numOfRows; - - relocateColumnData(pInfo->pRes, pInfo->matchInfo.pList, dataBlock->pDataBlock, false); - doFilterResult(pInfo->pRes, pFilterInfo); - blockDataCleanup(dataBlock); -} - -static int32_t sysTableUserTagsFillOneTableTags(const SSysTableScanInfo* pInfo, SMetaReader* smrSuperTable, - SMetaReader* smrChildTable, const char* dbname, const char* tableName, - int32_t* pNumOfRows, const SSDataBlock* dataBlock) { - char stableName[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; - STR_TO_VARSTR(stableName, (*smrSuperTable).me.name); - - int32_t numOfRows = *pNumOfRows; - - int32_t numOfTags = (*smrSuperTable).me.stbEntry.schemaTag.nCols; - for (int32_t i = 0; i < numOfTags; ++i) { - SColumnInfoData* pColInfoData = NULL; - - // table name - pColInfoData = taosArrayGet(dataBlock->pDataBlock, 0); - colDataAppend(pColInfoData, numOfRows, tableName, false); - - // database name - pColInfoData = taosArrayGet(dataBlock->pDataBlock, 1); - colDataAppend(pColInfoData, numOfRows, dbname, false); - - // super table name - pColInfoData = taosArrayGet(dataBlock->pDataBlock, 2); - colDataAppend(pColInfoData, numOfRows, stableName, false); - - // tag name - char tagName[TSDB_COL_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; - STR_TO_VARSTR(tagName, (*smrSuperTable).me.stbEntry.schemaTag.pSchema[i].name); - pColInfoData = taosArrayGet(dataBlock->pDataBlock, 3); - colDataAppend(pColInfoData, numOfRows, tagName, false); - - // tag type - int8_t tagType = (*smrSuperTable).me.stbEntry.schemaTag.pSchema[i].type; - pColInfoData = taosArrayGet(dataBlock->pDataBlock, 4); - char tagTypeStr[VARSTR_HEADER_SIZE + 32]; - int tagTypeLen = sprintf(varDataVal(tagTypeStr), "%s", tDataTypes[tagType].name); - if (tagType == TSDB_DATA_TYPE_VARCHAR) { - tagTypeLen += sprintf(varDataVal(tagTypeStr) + tagTypeLen, "(%d)", - (int32_t)((*smrSuperTable).me.stbEntry.schemaTag.pSchema[i].bytes - VARSTR_HEADER_SIZE)); - } else if (tagType == TSDB_DATA_TYPE_NCHAR) { - tagTypeLen += sprintf( - varDataVal(tagTypeStr) + tagTypeLen, "(%d)", - (int32_t)(((*smrSuperTable).me.stbEntry.schemaTag.pSchema[i].bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)); - } - varDataSetLen(tagTypeStr, tagTypeLen); - colDataAppend(pColInfoData, numOfRows, (char*)tagTypeStr, false); - - STagVal tagVal = {0}; - tagVal.cid = (*smrSuperTable).me.stbEntry.schemaTag.pSchema[i].colId; - char* tagData = NULL; - uint32_t tagLen = 0; - - if (tagType == TSDB_DATA_TYPE_JSON) { - tagData = (char*)smrChildTable->me.ctbEntry.pTags; - } else { - bool exist = tTagGet((STag*)smrChildTable->me.ctbEntry.pTags, &tagVal); - if (exist) { - if (IS_VAR_DATA_TYPE(tagType)) { - tagData = (char*)tagVal.pData; - tagLen = tagVal.nData; - } else { - tagData = (char*)&tagVal.i64; - tagLen = tDataTypes[tagType].bytes; - } - } - } - - char* tagVarChar = NULL; - if (tagData != NULL) { - if (tagType == TSDB_DATA_TYPE_JSON) { - char* tagJson = parseTagDatatoJson(tagData); - tagVarChar = taosMemoryMalloc(strlen(tagJson) + VARSTR_HEADER_SIZE); - memcpy(varDataVal(tagVarChar), tagJson, strlen(tagJson)); - varDataSetLen(tagVarChar, strlen(tagJson)); - taosMemoryFree(tagJson); - } else { - int32_t bufSize = IS_VAR_DATA_TYPE(tagType) ? (tagLen + VARSTR_HEADER_SIZE) - : (3 + DBL_MANT_DIG - DBL_MIN_EXP + VARSTR_HEADER_SIZE); - tagVarChar = taosMemoryMalloc(bufSize); - int32_t len = -1; - convertTagDataToStr(varDataVal(tagVarChar), tagType, tagData, tagLen, &len); - varDataSetLen(tagVarChar, len); - } - } - pColInfoData = taosArrayGet(dataBlock->pDataBlock, 5); - colDataAppend(pColInfoData, numOfRows, tagVarChar, - (tagData == NULL) || (tagType == TSDB_DATA_TYPE_JSON && tTagIsJsonNull(tagData))); - taosMemoryFree(tagVarChar); - ++numOfRows; - } - - *pNumOfRows = numOfRows; - - return TSDB_CODE_SUCCESS; -} - -typedef int (*__optSysFilter)(void* a, void* b, int16_t dtype); - -int optSysDoCompare(__compar_fn_t func, int8_t comparType, void* a, void* b) { - int32_t cmp = func(a, b); - switch (comparType) { - case OP_TYPE_LOWER_THAN: - if (cmp < 0) return 0; - break; - case OP_TYPE_LOWER_EQUAL: { - if (cmp <= 0) return 0; - break; - } - case OP_TYPE_GREATER_THAN: { - if (cmp > 0) return 0; - break; - } - case OP_TYPE_GREATER_EQUAL: { - if (cmp >= 0) return 0; - break; - } - case OP_TYPE_EQUAL: { - if (cmp == 0) return 0; - break; - } - default: - return -1; - } - return cmp; -} - -static int optSysFilterFuncImpl__LowerThan(void* a, void* b, int16_t dtype) { - __compar_fn_t func = getComparFunc(dtype, 0); - return optSysDoCompare(func, OP_TYPE_LOWER_THAN, a, b); -} -static int optSysFilterFuncImpl__LowerEqual(void* a, void* b, int16_t dtype) { - __compar_fn_t func = getComparFunc(dtype, 0); - return optSysDoCompare(func, OP_TYPE_LOWER_EQUAL, a, b); -} -static int optSysFilterFuncImpl__GreaterThan(void* a, void* b, int16_t dtype) { - __compar_fn_t func = getComparFunc(dtype, 0); - return optSysDoCompare(func, OP_TYPE_GREATER_THAN, a, b); -} -static int optSysFilterFuncImpl__GreaterEqual(void* a, void* b, int16_t dtype) { - __compar_fn_t func = getComparFunc(dtype, 0); - return optSysDoCompare(func, OP_TYPE_GREATER_EQUAL, a, b); -} -static int optSysFilterFuncImpl__Equal(void* a, void* b, int16_t dtype) { - __compar_fn_t func = getComparFunc(dtype, 0); - return optSysDoCompare(func, OP_TYPE_EQUAL, a, b); -} - -static int optSysFilterFuncImpl__NoEqual(void* a, void* b, int16_t dtype) { - __compar_fn_t func = getComparFunc(dtype, 0); - return optSysDoCompare(func, OP_TYPE_NOT_EQUAL, a, b); -} -static __optSysFilter optSysGetFilterFunc(int32_t ctype, bool* reverse) { - if (ctype == OP_TYPE_LOWER_EQUAL || ctype == OP_TYPE_LOWER_THAN) { - *reverse = true; - } - if (ctype == OP_TYPE_LOWER_THAN) - return optSysFilterFuncImpl__LowerThan; - else if (ctype == OP_TYPE_LOWER_EQUAL) - return optSysFilterFuncImpl__LowerEqual; - else if (ctype == OP_TYPE_GREATER_THAN) - return optSysFilterFuncImpl__GreaterThan; - else if (ctype == OP_TYPE_GREATER_EQUAL) - return optSysFilterFuncImpl__GreaterEqual; - else if (ctype == OP_TYPE_EQUAL) - return optSysFilterFuncImpl__Equal; - else if (ctype == OP_TYPE_NOT_EQUAL) - return optSysFilterFuncImpl__NoEqual; - return NULL; -} -static int32_t sysFilte__DbName(void* arg, SNode* pNode, SArray* result) { - void* pVnode = ((SSTabFltArg*)arg)->pVnode; - - const char* db = NULL; - vnodeGetInfo(pVnode, &db, NULL); - - SName sn = {0}; - char dbname[TSDB_DB_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; - tNameFromString(&sn, db, T_NAME_ACCT | T_NAME_DB); - - tNameGetDbName(&sn, varDataVal(dbname)); - varDataSetLen(dbname, strlen(varDataVal(dbname))); - - SOperatorNode* pOper = (SOperatorNode*)pNode; - SValueNode* pVal = (SValueNode*)pOper->pRight; - - bool reverse = false; - __optSysFilter func = optSysGetFilterFunc(pOper->opType, &reverse); - if (func == NULL) return -1; - - int ret = func(dbname, pVal->datum.p, TSDB_DATA_TYPE_VARCHAR); - if (ret == 0) return 0; - - return -2; -} -static int32_t sysFilte__VgroupId(void* arg, SNode* pNode, SArray* result) { - void* pVnode = ((SSTabFltArg*)arg)->pVnode; - - int64_t vgId = 0; - vnodeGetInfo(pVnode, NULL, (int32_t*)&vgId); - - SOperatorNode* pOper = (SOperatorNode*)pNode; - SValueNode* pVal = (SValueNode*)pOper->pRight; - - bool reverse = false; - - __optSysFilter func = optSysGetFilterFunc(pOper->opType, &reverse); - if (func == NULL) return -1; - - int ret = func(&vgId, &pVal->datum.i, TSDB_DATA_TYPE_BIGINT); - if (ret == 0) return 0; - - return -1; -} -static int32_t sysFilte__TableName(void* arg, SNode* pNode, SArray* result) { - void* pMeta = ((SSTabFltArg*)arg)->pMeta; - - SOperatorNode* pOper = (SOperatorNode*)pNode; - SValueNode* pVal = (SValueNode*)pOper->pRight; - bool reverse = false; - - __optSysFilter func = optSysGetFilterFunc(pOper->opType, &reverse); - if (func == NULL) return -1; - - SMetaFltParam param = {.suid = 0, - .cid = 0, - .type = TSDB_DATA_TYPE_VARCHAR, - .val = pVal->datum.p, - .reverse = reverse, - .filterFunc = func}; - return -1; -} - -static int32_t sysFilte__CreateTime(void* arg, SNode* pNode, SArray* result) { - void* pMeta = ((SSTabFltArg*)arg)->pMeta; - - SOperatorNode* pOper = (SOperatorNode*)pNode; - SValueNode* pVal = (SValueNode*)pOper->pRight; - bool reverse = false; - - __optSysFilter func = optSysGetFilterFunc(pOper->opType, &reverse); - if (func == NULL) return -1; - - SMetaFltParam param = {.suid = 0, - .cid = 0, - .type = TSDB_DATA_TYPE_BIGINT, - .val = &pVal->datum.i, - .reverse = reverse, - .filterFunc = func}; - - int32_t ret = metaFilterCreateTime(pMeta, ¶m, result); - return ret; -} -static int32_t sysFilte__Ncolumn(void* arg, SNode* pNode, SArray* result) { - void* pMeta = ((SSTabFltArg*)arg)->pMeta; - - SOperatorNode* pOper = (SOperatorNode*)pNode; - SValueNode* pVal = (SValueNode*)pOper->pRight; - bool reverse = false; - - __optSysFilter func = optSysGetFilterFunc(pOper->opType, &reverse); - if (func == NULL) return -1; - return -1; -} - -static int32_t sysFilte__Ttl(void* arg, SNode* pNode, SArray* result) { - void* pMeta = ((SSTabFltArg*)arg)->pMeta; - - SOperatorNode* pOper = (SOperatorNode*)pNode; - SValueNode* pVal = (SValueNode*)pOper->pRight; - bool reverse = false; - - __optSysFilter func = optSysGetFilterFunc(pOper->opType, &reverse); - if (func == NULL) return -1; - return -1; -} -static int32_t sysFilte__STableName(void* arg, SNode* pNode, SArray* result) { - void* pMeta = ((SSTabFltArg*)arg)->pMeta; - - SOperatorNode* pOper = (SOperatorNode*)pNode; - SValueNode* pVal = (SValueNode*)pOper->pRight; - bool reverse = false; - - __optSysFilter func = optSysGetFilterFunc(pOper->opType, &reverse); - if (func == NULL) return -1; - return -1; -} -static int32_t sysFilte__Uid(void* arg, SNode* pNode, SArray* result) { - void* pMeta = ((SSTabFltArg*)arg)->pMeta; - - SOperatorNode* pOper = (SOperatorNode*)pNode; - SValueNode* pVal = (SValueNode*)pOper->pRight; - bool reverse = false; - - __optSysFilter func = optSysGetFilterFunc(pOper->opType, &reverse); - if (func == NULL) return -1; - return -1; -} -static int32_t sysFilte__Type(void* arg, SNode* pNode, SArray* result) { - void* pMeta = ((SSTabFltArg*)arg)->pMeta; - - SOperatorNode* pOper = (SOperatorNode*)pNode; - SValueNode* pVal = (SValueNode*)pOper->pRight; - bool reverse = false; - - __optSysFilter func = optSysGetFilterFunc(pOper->opType, &reverse); - if (func == NULL) return -1; - return -1; -} -static int32_t sysChkFilter__Comm(SNode* pNode) { - // impl - SOperatorNode* pOper = (SOperatorNode*)pNode; - EOperatorType opType = pOper->opType; - if (opType != OP_TYPE_EQUAL && opType != OP_TYPE_LOWER_EQUAL && opType != OP_TYPE_LOWER_THAN && - opType != OP_TYPE_GREATER_EQUAL && opType != OP_TYPE_GREATER_THAN) { - return -1; - } - return 0; -} - -static int32_t sysChkFilter__DBName(SNode* pNode) { - SOperatorNode* pOper = (SOperatorNode*)pNode; - - if (pOper->opType != OP_TYPE_EQUAL && pOper->opType != OP_TYPE_NOT_EQUAL) { - return -1; - } - - SValueNode* pVal = (SValueNode*)pOper->pRight; - if (!IS_STR_DATA_TYPE(pVal->node.resType.type)) { - return -1; - } - - return 0; -} -static int32_t sysChkFilter__VgroupId(SNode* pNode) { - SOperatorNode* pOper = (SOperatorNode*)pNode; - SValueNode* pVal = (SValueNode*)pOper->pRight; - if (!IS_INTEGER_TYPE(pVal->node.resType.type)) { - return -1; - } - return sysChkFilter__Comm(pNode); -} -static int32_t sysChkFilter__TableName(SNode* pNode) { - SOperatorNode* pOper = (SOperatorNode*)pNode; - SValueNode* pVal = (SValueNode*)pOper->pRight; - if (!IS_STR_DATA_TYPE(pVal->node.resType.type)) { - return -1; - } - return sysChkFilter__Comm(pNode); -} -static int32_t sysChkFilter__CreateTime(SNode* pNode) { - SOperatorNode* pOper = (SOperatorNode*)pNode; - SValueNode* pVal = (SValueNode*)pOper->pRight; - - if (!IS_TIMESTAMP_TYPE(pVal->node.resType.type)) { - return -1; - } - return sysChkFilter__Comm(pNode); -} - -static int32_t sysChkFilter__Ncolumn(SNode* pNode) { - SOperatorNode* pOper = (SOperatorNode*)pNode; - SValueNode* pVal = (SValueNode*)pOper->pRight; - - if (!IS_INTEGER_TYPE(pVal->node.resType.type)) { - return -1; - } - return sysChkFilter__Comm(pNode); -} -static int32_t sysChkFilter__Ttl(SNode* pNode) { - SOperatorNode* pOper = (SOperatorNode*)pNode; - SValueNode* pVal = (SValueNode*)pOper->pRight; - - if (!IS_INTEGER_TYPE(pVal->node.resType.type)) { - return -1; - } - return sysChkFilter__Comm(pNode); -} -static int32_t sysChkFilter__STableName(SNode* pNode) { - SOperatorNode* pOper = (SOperatorNode*)pNode; - SValueNode* pVal = (SValueNode*)pOper->pRight; - if (!IS_STR_DATA_TYPE(pVal->node.resType.type)) { - return -1; - } - return sysChkFilter__Comm(pNode); -} -static int32_t sysChkFilter__Uid(SNode* pNode) { - SOperatorNode* pOper = (SOperatorNode*)pNode; - SValueNode* pVal = (SValueNode*)pOper->pRight; - if (!IS_INTEGER_TYPE(pVal->node.resType.type)) { - return -1; - } - return sysChkFilter__Comm(pNode); -} -static int32_t sysChkFilter__Type(SNode* pNode) { - SOperatorNode* pOper = (SOperatorNode*)pNode; - SValueNode* pVal = (SValueNode*)pOper->pRight; - if (!IS_INTEGER_TYPE(pVal->node.resType.type)) { - return -1; - } - return sysChkFilter__Comm(pNode); -} -static int32_t optSysTabFilteImpl(void* arg, SNode* cond, SArray* result) { - if (optSysCheckOper(cond) != 0) return -1; - - SOperatorNode* pNode = (SOperatorNode*)cond; - - int8_t i = 0; - for (; i < SYSTAB_FILTER_DICT_SIZE; i++) { - if (strcmp(filterDict[i].name, ((SColumnNode*)(pNode->pLeft))->colName) == 0) { - break; - } - } - if (i >= SYSTAB_FILTER_DICT_SIZE) return -1; - - if (filterDict[i].chkFunc(cond) != 0) return -1; - - return filterDict[i].fltFunc(arg, cond, result); -} - -static int32_t optSysCheckOper(SNode* pOpear) { - if (nodeType(pOpear) != QUERY_NODE_OPERATOR) return -1; - - SOperatorNode* pOper = (SOperatorNode*)pOpear; - if (pOper->opType < OP_TYPE_GREATER_THAN || pOper->opType > OP_TYPE_NOT_EQUAL) { - return -1; - } - - if (nodeType(pOper->pLeft) != QUERY_NODE_COLUMN || nodeType(pOper->pRight) != QUERY_NODE_VALUE) { - return -1; - } - return 0; -} - -static int tableUidCompare(const void* a, const void* b) { - int64_t u1 = *(int64_t*)a; - int64_t u2 = *(int64_t*)b; - if (u1 == u2) { - return 0; - } - return u1 < u2 ? -1 : 1; -} - -typedef struct MergeIndex { - int idx; - int len; -} MergeIndex; - -static FORCE_INLINE int optSysBinarySearch(SArray* arr, int s, int e, uint64_t k) { - uint64_t v; - int32_t m; - while (s <= e) { - m = s + (e - s) / 2; - v = *(uint64_t*)taosArrayGet(arr, m); - if (v >= k) { - e = m - 1; - } else { - s = m + 1; - } - } - return s; -} - -void optSysIntersection(SArray* in, SArray* out) { - int32_t sz = (int32_t)taosArrayGetSize(in); - if (sz <= 0) { - return; - } - MergeIndex* mi = taosMemoryCalloc(sz, sizeof(MergeIndex)); - for (int i = 0; i < sz; i++) { - SArray* t = taosArrayGetP(in, i); - mi[i].len = (int32_t)taosArrayGetSize(t); - mi[i].idx = 0; - } - - SArray* base = taosArrayGetP(in, 0); - for (int i = 0; i < taosArrayGetSize(base); i++) { - uint64_t tgt = *(uint64_t*)taosArrayGet(base, i); - bool has = true; - for (int j = 1; j < taosArrayGetSize(in); j++) { - SArray* oth = taosArrayGetP(in, j); - int mid = optSysBinarySearch(oth, mi[j].idx, mi[j].len - 1, tgt); - if (mid >= 0 && mid < mi[j].len) { - uint64_t val = *(uint64_t*)taosArrayGet(oth, mid); - has = (val == tgt ? true : false); - mi[j].idx = mid; - } else { - has = false; - } - } - if (has == true) { - taosArrayPush(out, &tgt); - } - } - taosMemoryFreeClear(mi); -} - -static int32_t optSysMergeRslt(SArray* mRslt, SArray* rslt) { - // TODO, find comm mem from mRslt - for (int i = 0; i < taosArrayGetSize(mRslt); i++) { - SArray* arslt = taosArrayGetP(mRslt, i); - taosArraySort(arslt, tableUidCompare); - } - optSysIntersection(mRslt, rslt); - return 0; -} - -static int32_t optSysSpecialColumn(SNode* cond) { - SOperatorNode* pOper = (SOperatorNode*)cond; - SColumnNode* pCol = (SColumnNode*)pOper->pLeft; - for (int i = 0; i < sizeof(SYSTABLE_SPECIAL_COL) / sizeof(SYSTABLE_SPECIAL_COL[0]); i++) { - if (0 == strcmp(pCol->colName, SYSTABLE_SPECIAL_COL[i])) { - return 1; - } - } - return 0; -} - -static int32_t optSysTabFilte(void* arg, SNode* cond, SArray* result) { - int ret = -1; - if (nodeType(cond) == QUERY_NODE_OPERATOR) { - ret = optSysTabFilteImpl(arg, cond, result); - if (ret == 0) { - SOperatorNode* pOper = (SOperatorNode*)cond; - SColumnNode* pCol = (SColumnNode*)pOper->pLeft; - if (0 == strcmp(pCol->colName, "create_time")) { - return 0; - } - return -1; - } - return ret; - } - - if (nodeType(cond) != QUERY_NODE_LOGIC_CONDITION || ((SLogicConditionNode*)cond)->condType != LOGIC_COND_TYPE_AND) { - return ret; - } - - SLogicConditionNode* pNode = (SLogicConditionNode*)cond; - SNodeList* pList = (SNodeList*)pNode->pParameterList; - - int32_t len = LIST_LENGTH(pList); - - bool hasIdx = false; - bool hasRslt = true; - SArray* mRslt = taosArrayInit(len, POINTER_BYTES); - - SListCell* cell = pList->pHead; - for (int i = 0; i < len; i++) { - if (cell == NULL) break; - - SArray* aRslt = taosArrayInit(16, sizeof(int64_t)); - - ret = optSysTabFilteImpl(arg, cell->pNode, aRslt); - if (ret == 0) { - // has index - hasIdx = true; - if (optSysSpecialColumn(cell->pNode) == 0) { - taosArrayPush(mRslt, &aRslt); - } else { - // db_name/vgroup not result - taosArrayDestroy(aRslt); - } - } else if (ret == -2) { - // current vg - hasIdx = true; - hasRslt = false; - taosArrayDestroy(aRslt); - break; - } else { - taosArrayDestroy(aRslt); - } - cell = cell->pNext; - } - if (hasRslt && hasIdx) { - optSysMergeRslt(mRslt, result); - } - - for (int i = 0; i < taosArrayGetSize(mRslt); i++) { - SArray* aRslt = taosArrayGetP(mRslt, i); - taosArrayDestroy(aRslt); - } - taosArrayDestroy(mRslt); - if (hasRslt == false) { - return -2; - } - if (hasRslt && hasIdx) { - cell = pList->pHead; - for (int i = 0; i < len; i++) { - if (cell == NULL) break; - SOperatorNode* pOper = (SOperatorNode*)cell->pNode; - SColumnNode* pCol = (SColumnNode*)pOper->pLeft; - if (0 == strcmp(pCol->colName, "create_time")) { - return 0; - } - cell = cell->pNext; - } - return -1; - } - return -1; -} - -static SSDataBlock* sysTableBuildUserTablesByUids(SOperatorInfo* pOperator) { - SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; - SSysTableScanInfo* pInfo = pOperator->info; - - SSysTableIndex* pIdx = pInfo->pIdx; - blockDataCleanup(pInfo->pRes); - int32_t numOfRows = 0; - - int ret = 0; - - const char* db = NULL; - int32_t vgId = 0; - vnodeGetInfo(pInfo->readHandle.vnode, &db, &vgId); - - SName sn = {0}; - char dbname[TSDB_DB_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; - tNameFromString(&sn, db, T_NAME_ACCT | T_NAME_DB); - - tNameGetDbName(&sn, varDataVal(dbname)); - varDataSetLen(dbname, strlen(varDataVal(dbname))); - - SSDataBlock* p = buildInfoSchemaTableMetaBlock(TSDB_INS_TABLE_TABLES); - blockDataEnsureCapacity(p, pOperator->resultInfo.capacity); - - char n[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; - int32_t i = pIdx->lastIdx; - for (; i < taosArrayGetSize(pIdx->uids); i++) { - tb_uid_t* uid = taosArrayGet(pIdx->uids, i); - - SMetaReader mr = {0}; - metaReaderInit(&mr, pInfo->readHandle.meta, 0); - ret = metaGetTableEntryByUid(&mr, *uid); - if (ret < 0) { - metaReaderClear(&mr); - continue; - } - STR_TO_VARSTR(n, mr.me.name); - - // table name - SColumnInfoData* pColInfoData = taosArrayGet(p->pDataBlock, 0); - colDataAppend(pColInfoData, numOfRows, n, false); - - // database name - pColInfoData = taosArrayGet(p->pDataBlock, 1); - colDataAppend(pColInfoData, numOfRows, dbname, false); - - // vgId - pColInfoData = taosArrayGet(p->pDataBlock, 6); - colDataAppend(pColInfoData, numOfRows, (char*)&vgId, false); - - int32_t tableType = mr.me.type; - if (tableType == TSDB_CHILD_TABLE) { - // create time - int64_t ts = mr.me.ctbEntry.ctime; - pColInfoData = taosArrayGet(p->pDataBlock, 2); - colDataAppend(pColInfoData, numOfRows, (char*)&ts, false); - - SMetaReader mr1 = {0}; - metaReaderInit(&mr1, pInfo->readHandle.meta, META_READER_NOLOCK); - - int64_t suid = mr.me.ctbEntry.suid; - int32_t code = metaGetTableEntryByUid(&mr1, suid); - if (code != TSDB_CODE_SUCCESS) { - qError("failed to get super table meta, cname:%s, suid:0x%" PRIx64 ", code:%s, %s", pInfo->pCur->mr.me.name, - suid, tstrerror(terrno), GET_TASKID(pTaskInfo)); - metaReaderClear(&mr1); - metaReaderClear(&mr); - T_LONG_JMP(pTaskInfo->env, terrno); - } - pColInfoData = taosArrayGet(p->pDataBlock, 3); - colDataAppend(pColInfoData, numOfRows, (char*)&mr1.me.stbEntry.schemaRow.nCols, false); - - // super table name - STR_TO_VARSTR(n, mr1.me.name); - pColInfoData = taosArrayGet(p->pDataBlock, 4); - colDataAppend(pColInfoData, numOfRows, n, false); - metaReaderClear(&mr1); - - // table comment - pColInfoData = taosArrayGet(p->pDataBlock, 8); - if (mr.me.ctbEntry.commentLen > 0) { - char comment[TSDB_TB_COMMENT_LEN + VARSTR_HEADER_SIZE] = {0}; - STR_TO_VARSTR(comment, mr.me.ctbEntry.comment); - colDataAppend(pColInfoData, numOfRows, comment, false); - } else if (mr.me.ctbEntry.commentLen == 0) { - char comment[VARSTR_HEADER_SIZE + VARSTR_HEADER_SIZE] = {0}; - STR_TO_VARSTR(comment, ""); - colDataAppend(pColInfoData, numOfRows, comment, false); - } else { - colDataAppendNULL(pColInfoData, numOfRows); - } - - // uid - pColInfoData = taosArrayGet(p->pDataBlock, 5); - colDataAppend(pColInfoData, numOfRows, (char*)&mr.me.uid, false); - - // ttl - pColInfoData = taosArrayGet(p->pDataBlock, 7); - colDataAppend(pColInfoData, numOfRows, (char*)&mr.me.ctbEntry.ttlDays, false); - - STR_TO_VARSTR(n, "CHILD_TABLE"); - - } else if (tableType == TSDB_NORMAL_TABLE) { - // create time - pColInfoData = taosArrayGet(p->pDataBlock, 2); - colDataAppend(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.ntbEntry.ctime, false); - - // number of columns - pColInfoData = taosArrayGet(p->pDataBlock, 3); - colDataAppend(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.ntbEntry.schemaRow.nCols, false); - - // super table name - pColInfoData = taosArrayGet(p->pDataBlock, 4); - colDataAppendNULL(pColInfoData, numOfRows); - - // table comment - pColInfoData = taosArrayGet(p->pDataBlock, 8); - if (mr.me.ntbEntry.commentLen > 0) { - char comment[TSDB_TB_COMMENT_LEN + VARSTR_HEADER_SIZE] = {0}; - STR_TO_VARSTR(comment, mr.me.ntbEntry.comment); - colDataAppend(pColInfoData, numOfRows, comment, false); - } else if (mr.me.ntbEntry.commentLen == 0) { - char comment[VARSTR_HEADER_SIZE + VARSTR_HEADER_SIZE] = {0}; - STR_TO_VARSTR(comment, ""); - colDataAppend(pColInfoData, numOfRows, comment, false); - } else { - colDataAppendNULL(pColInfoData, numOfRows); - } - - // uid - pColInfoData = taosArrayGet(p->pDataBlock, 5); - colDataAppend(pColInfoData, numOfRows, (char*)&mr.me.uid, false); - - // ttl - pColInfoData = taosArrayGet(p->pDataBlock, 7); - colDataAppend(pColInfoData, numOfRows, (char*)&mr.me.ntbEntry.ttlDays, false); - - STR_TO_VARSTR(n, "NORMAL_TABLE"); - // impl later - } - - metaReaderClear(&mr); - - pColInfoData = taosArrayGet(p->pDataBlock, 9); - colDataAppend(pColInfoData, numOfRows, n, false); - - if (++numOfRows >= pOperator->resultInfo.capacity) { - p->info.rows = numOfRows; - pInfo->pRes->info.rows = numOfRows; - - relocateColumnData(pInfo->pRes, pInfo->matchInfo.pList, p->pDataBlock, false); - doFilterResult(pInfo->pRes, pOperator->exprSupp.pFilterInfo); - - blockDataCleanup(p); - numOfRows = 0; - - if (pInfo->pRes->info.rows > 0) { - break; - } - } - } - - if (numOfRows > 0) { - p->info.rows = numOfRows; - pInfo->pRes->info.rows = numOfRows; - - relocateColumnData(pInfo->pRes, pInfo->matchInfo.pList, p->pDataBlock, false); - doFilterResult(pInfo->pRes, pOperator->exprSupp.pFilterInfo); - - blockDataCleanup(p); - numOfRows = 0; - } - - if (i >= taosArrayGetSize(pIdx->uids)) { - setOperatorCompleted(pOperator); - } else { - pIdx->lastIdx = i + 1; - } - - blockDataDestroy(p); - - pInfo->loadInfo.totalRows += pInfo->pRes->info.rows; - return (pInfo->pRes->info.rows == 0) ? NULL : pInfo->pRes; -} - -static SSDataBlock* sysTableBuildUserTables(SOperatorInfo* pOperator) { - SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; - - SSysTableScanInfo* pInfo = pOperator->info; - if (pInfo->pCur == NULL) { - pInfo->pCur = metaOpenTbCursor(pInfo->readHandle.meta); - } - - blockDataCleanup(pInfo->pRes); - int32_t numOfRows = 0; - - const char* db = NULL; - int32_t vgId = 0; - vnodeGetInfo(pInfo->readHandle.vnode, &db, &vgId); - - SName sn = {0}; - char dbname[TSDB_DB_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; - tNameFromString(&sn, db, T_NAME_ACCT | T_NAME_DB); - - tNameGetDbName(&sn, varDataVal(dbname)); - varDataSetLen(dbname, strlen(varDataVal(dbname))); - - SSDataBlock* p = buildInfoSchemaTableMetaBlock(TSDB_INS_TABLE_TABLES); - blockDataEnsureCapacity(p, pOperator->resultInfo.capacity); - - char n[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; - - int32_t ret = 0; - while ((ret = metaTbCursorNext(pInfo->pCur)) == 0) { - STR_TO_VARSTR(n, pInfo->pCur->mr.me.name); - - // table name - SColumnInfoData* pColInfoData = taosArrayGet(p->pDataBlock, 0); - colDataAppend(pColInfoData, numOfRows, n, false); - - // database name - pColInfoData = taosArrayGet(p->pDataBlock, 1); - colDataAppend(pColInfoData, numOfRows, dbname, false); - - // vgId - pColInfoData = taosArrayGet(p->pDataBlock, 6); - colDataAppend(pColInfoData, numOfRows, (char*)&vgId, false); - - int32_t tableType = pInfo->pCur->mr.me.type; - if (tableType == TSDB_CHILD_TABLE) { - // create time - int64_t ts = pInfo->pCur->mr.me.ctbEntry.ctime; - pColInfoData = taosArrayGet(p->pDataBlock, 2); - colDataAppend(pColInfoData, numOfRows, (char*)&ts, false); - - SMetaReader mr = {0}; - metaReaderInit(&mr, pInfo->readHandle.meta, META_READER_NOLOCK); - - uint64_t suid = pInfo->pCur->mr.me.ctbEntry.suid; - int32_t code = metaGetTableEntryByUid(&mr, suid); - if (code != TSDB_CODE_SUCCESS) { - qError("failed to get super table meta, cname:%s, suid:0x%" PRIx64 ", code:%s, %s", pInfo->pCur->mr.me.name, - suid, tstrerror(terrno), GET_TASKID(pTaskInfo)); - metaReaderClear(&mr); - metaCloseTbCursor(pInfo->pCur); - pInfo->pCur = NULL; - T_LONG_JMP(pTaskInfo->env, terrno); - } - - // number of columns - pColInfoData = taosArrayGet(p->pDataBlock, 3); - colDataAppend(pColInfoData, numOfRows, (char*)&mr.me.stbEntry.schemaRow.nCols, false); - - // super table name - STR_TO_VARSTR(n, mr.me.name); - pColInfoData = taosArrayGet(p->pDataBlock, 4); - colDataAppend(pColInfoData, numOfRows, n, false); - metaReaderClear(&mr); - - // table comment - pColInfoData = taosArrayGet(p->pDataBlock, 8); - if (pInfo->pCur->mr.me.ctbEntry.commentLen > 0) { - char comment[TSDB_TB_COMMENT_LEN + VARSTR_HEADER_SIZE] = {0}; - STR_TO_VARSTR(comment, pInfo->pCur->mr.me.ctbEntry.comment); - colDataAppend(pColInfoData, numOfRows, comment, false); - } else if (pInfo->pCur->mr.me.ctbEntry.commentLen == 0) { - char comment[VARSTR_HEADER_SIZE + VARSTR_HEADER_SIZE] = {0}; - STR_TO_VARSTR(comment, ""); - colDataAppend(pColInfoData, numOfRows, comment, false); - } else { - colDataAppendNULL(pColInfoData, numOfRows); - } - - // uid - pColInfoData = taosArrayGet(p->pDataBlock, 5); - colDataAppend(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.uid, false); - - // ttl - pColInfoData = taosArrayGet(p->pDataBlock, 7); - colDataAppend(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.ctbEntry.ttlDays, false); - - STR_TO_VARSTR(n, "CHILD_TABLE"); - } else if (tableType == TSDB_NORMAL_TABLE) { - // create time - pColInfoData = taosArrayGet(p->pDataBlock, 2); - colDataAppend(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.ntbEntry.ctime, false); - - // number of columns - pColInfoData = taosArrayGet(p->pDataBlock, 3); - colDataAppend(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.ntbEntry.schemaRow.nCols, false); - - // super table name - pColInfoData = taosArrayGet(p->pDataBlock, 4); - colDataAppendNULL(pColInfoData, numOfRows); - - // table comment - pColInfoData = taosArrayGet(p->pDataBlock, 8); - if (pInfo->pCur->mr.me.ntbEntry.commentLen > 0) { - char comment[TSDB_TB_COMMENT_LEN + VARSTR_HEADER_SIZE] = {0}; - STR_TO_VARSTR(comment, pInfo->pCur->mr.me.ntbEntry.comment); - colDataAppend(pColInfoData, numOfRows, comment, false); - } else if (pInfo->pCur->mr.me.ntbEntry.commentLen == 0) { - char comment[VARSTR_HEADER_SIZE + VARSTR_HEADER_SIZE] = {0}; - STR_TO_VARSTR(comment, ""); - colDataAppend(pColInfoData, numOfRows, comment, false); - } else { - colDataAppendNULL(pColInfoData, numOfRows); - } - - // uid - pColInfoData = taosArrayGet(p->pDataBlock, 5); - colDataAppend(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.uid, false); - - // ttl - pColInfoData = taosArrayGet(p->pDataBlock, 7); - colDataAppend(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.ntbEntry.ttlDays, false); - - STR_TO_VARSTR(n, "NORMAL_TABLE"); - } - - pColInfoData = taosArrayGet(p->pDataBlock, 9); - colDataAppend(pColInfoData, numOfRows, n, false); - - if (++numOfRows >= pOperator->resultInfo.capacity) { - p->info.rows = numOfRows; - pInfo->pRes->info.rows = numOfRows; - - relocateColumnData(pInfo->pRes, pInfo->matchInfo.pList, p->pDataBlock, false); - doFilterResult(pInfo->pRes, pOperator->exprSupp.pFilterInfo); - - blockDataCleanup(p); - numOfRows = 0; - - if (pInfo->pRes->info.rows > 0) { - break; - } - } - } - - if (numOfRows > 0) { - p->info.rows = numOfRows; - pInfo->pRes->info.rows = numOfRows; - - relocateColumnData(pInfo->pRes, pInfo->matchInfo.pList, p->pDataBlock, false); - doFilterResult(pInfo->pRes, pOperator->exprSupp.pFilterInfo); - - blockDataCleanup(p); - numOfRows = 0; - } - - blockDataDestroy(p); - - // todo temporarily free the cursor here, the true reason why the free is not valid needs to be found - if (ret != 0) { - metaCloseTbCursor(pInfo->pCur); - pInfo->pCur = NULL; - setOperatorCompleted(pOperator); - } - - pInfo->loadInfo.totalRows += pInfo->pRes->info.rows; - return (pInfo->pRes->info.rows == 0) ? NULL : pInfo->pRes; -} - -static SSDataBlock* sysTableScanUserTables(SOperatorInfo* pOperator) { - SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; - SSysTableScanInfo* pInfo = pOperator->info; - - SNode* pCondition = pInfo->pCondition; - if (pOperator->status == OP_EXEC_DONE) { - return NULL; - } - - // the retrieve is executed on the mnode, so return tables that belongs to the information schema database. - if (pInfo->readHandle.mnd != NULL) { - buildSysDbTableInfo(pInfo, pOperator->resultInfo.capacity); - doFilterResult(pInfo->pRes, pOperator->exprSupp.pFilterInfo); - pInfo->loadInfo.totalRows += pInfo->pRes->info.rows; - - setOperatorCompleted(pOperator); - return (pInfo->pRes->info.rows == 0) ? NULL : pInfo->pRes; - } else { - if (pInfo->showRewrite == false) { - if (pCondition != NULL && pInfo->pIdx == NULL) { - SSTabFltArg arg = {.pMeta = pInfo->readHandle.meta, .pVnode = pInfo->readHandle.vnode}; - - SSysTableIndex* idx = taosMemoryMalloc(sizeof(SSysTableIndex)); - idx->init = 0; - idx->uids = taosArrayInit(128, sizeof(int64_t)); - idx->lastIdx = 0; - - pInfo->pIdx = idx; // set idx arg - - int flt = optSysTabFilte(&arg, pCondition, idx->uids); - if (flt == 0) { - pInfo->pIdx->init = 1; - SSDataBlock* blk = sysTableBuildUserTablesByUids(pOperator); - return blk; - } else if (flt == -2) { - qDebug("%s failed to get sys table info by idx, empty result", GET_TASKID(pTaskInfo)); - return NULL; - } else if (flt == -1) { - // not idx - qDebug("%s failed to get sys table info by idx, scan sys table one by one", GET_TASKID(pTaskInfo)); - } - } else if (pCondition != NULL && (pInfo->pIdx != NULL && pInfo->pIdx->init == 1)) { - SSDataBlock* blk = sysTableBuildUserTablesByUids(pOperator); - return blk; - } - } - - return sysTableBuildUserTables(pOperator); - } - return NULL; -} - -static SSDataBlock* sysTableScanUserSTables(SOperatorInfo* pOperator) { - SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; - SSysTableScanInfo* pInfo = pOperator->info; - if (pOperator->status == OP_EXEC_DONE) { - return NULL; - } - - pInfo->pRes->info.rows = 0; - pOperator->status = OP_EXEC_DONE; - - pInfo->loadInfo.totalRows += pInfo->pRes->info.rows; - return (pInfo->pRes->info.rows == 0) ? NULL : pInfo->pRes; -} - -static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) { - // build message and send to mnode to fetch the content of system tables. - SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; - SSysTableScanInfo* pInfo = pOperator->info; - char dbName[TSDB_DB_NAME_LEN] = {0}; - - const char* name = tNameGetTableName(&pInfo->name); - if (pInfo->showRewrite) { - getDBNameFromCondition(pInfo->pCondition, dbName); - sprintf(pInfo->req.db, "%d.%s", pInfo->accountId, dbName); - } - - if (strncasecmp(name, TSDB_INS_TABLE_TABLES, TSDB_TABLE_FNAME_LEN) == 0) { - return sysTableScanUserTables(pOperator); - } else if (strncasecmp(name, TSDB_INS_TABLE_TAGS, TSDB_TABLE_FNAME_LEN) == 0) { - return sysTableScanUserTags(pOperator); - } else if (strncasecmp(name, TSDB_INS_TABLE_STABLES, TSDB_TABLE_FNAME_LEN) == 0 && pInfo->showRewrite && - IS_SYS_DBNAME(dbName)) { - return sysTableScanUserSTables(pOperator); - } else { // load the meta from mnode of the given epset - if (pOperator->status == OP_EXEC_DONE) { - return NULL; - } - - while (1) { - int64_t startTs = taosGetTimestampUs(); - tstrncpy(pInfo->req.tb, tNameGetTableName(&pInfo->name), tListLen(pInfo->req.tb)); - tstrncpy(pInfo->req.user, pInfo->pUser, tListLen(pInfo->req.user)); - - int32_t contLen = tSerializeSRetrieveTableReq(NULL, 0, &pInfo->req); - char* buf1 = taosMemoryCalloc(1, contLen); - tSerializeSRetrieveTableReq(buf1, contLen, &pInfo->req); - - // send the fetch remote task result reques - SMsgSendInfo* pMsgSendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo)); - if (NULL == pMsgSendInfo) { - qError("%s prepare message %d failed", GET_TASKID(pTaskInfo), (int32_t)sizeof(SMsgSendInfo)); - pTaskInfo->code = TSDB_CODE_QRY_OUT_OF_MEMORY; - return NULL; - } - - int32_t msgType = (strcasecmp(name, TSDB_INS_TABLE_DNODE_VARIABLES) == 0) ? TDMT_DND_SYSTABLE_RETRIEVE - : TDMT_MND_SYSTABLE_RETRIEVE; - - pMsgSendInfo->param = pOperator; - pMsgSendInfo->msgInfo.pData = buf1; - pMsgSendInfo->msgInfo.len = contLen; - pMsgSendInfo->msgType = msgType; - pMsgSendInfo->fp = loadSysTableCallback; - pMsgSendInfo->requestId = pTaskInfo->id.queryId; - - int64_t transporterId = 0; - int32_t code = - asyncSendMsgToServer(pInfo->readHandle.pMsgCb->clientRpc, &pInfo->epSet, &transporterId, pMsgSendInfo); - tsem_wait(&pInfo->ready); - - if (pTaskInfo->code) { - qDebug("%s load meta data from mnode failed, totalRows:%" PRIu64 ", code:%s", GET_TASKID(pTaskInfo), - pInfo->loadInfo.totalRows, tstrerror(pTaskInfo->code)); - return NULL; - } - - SRetrieveMetaTableRsp* pRsp = pInfo->pRsp; - pInfo->req.showId = pRsp->handle; - - if (pRsp->numOfRows == 0 || pRsp->completed) { - pOperator->status = OP_EXEC_DONE; - qDebug("%s load meta data from mnode completed, rowsOfSource:%d, totalRows:%" PRIu64, GET_TASKID(pTaskInfo), - pRsp->numOfRows, pInfo->loadInfo.totalRows); - - if (pRsp->numOfRows == 0) { - taosMemoryFree(pRsp); - return NULL; - } - } - - char* pStart = pRsp->data; - extractDataBlockFromFetchRsp(pInfo->pRes, pRsp->data, pInfo->matchInfo.pList, &pStart); - updateLoadRemoteInfo(&pInfo->loadInfo, pRsp->numOfRows, pRsp->compLen, startTs, pOperator); - - // todo log the filter info - doFilterResult(pInfo->pRes, pOperator->exprSupp.pFilterInfo); - taosMemoryFree(pRsp); - if (pInfo->pRes->info.rows > 0) { - return pInfo->pRes; - } else if (pOperator->status == OP_EXEC_DONE) { - return NULL; - } - } - } -} - -int32_t buildSysDbTableInfo(const SSysTableScanInfo* pInfo, int32_t capacity) { - SSDataBlock* p = buildInfoSchemaTableMetaBlock(TSDB_INS_TABLE_TABLES); - blockDataEnsureCapacity(p, capacity); - - size_t size = 0; - const SSysTableMeta* pSysDbTableMeta = NULL; - - getInfosDbMeta(&pSysDbTableMeta, &size); - p->info.rows = buildDbTableInfoBlock(pInfo->sysInfo, p, pSysDbTableMeta, size, TSDB_INFORMATION_SCHEMA_DB); - - getPerfDbMeta(&pSysDbTableMeta, &size); - p->info.rows = buildDbTableInfoBlock(pInfo->sysInfo, p, pSysDbTableMeta, size, TSDB_PERFORMANCE_SCHEMA_DB); - - pInfo->pRes->info.rows = p->info.rows; - relocateColumnData(pInfo->pRes, pInfo->matchInfo.pList, p->pDataBlock, false); - blockDataDestroy(p); - - return pInfo->pRes->info.rows; -} - -int32_t buildDbTableInfoBlock(bool sysInfo, const SSDataBlock* p, const SSysTableMeta* pSysDbTableMeta, size_t size, - const char* dbName) { - char n[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; - int32_t numOfRows = p->info.rows; - - for (int32_t i = 0; i < size; ++i) { - const SSysTableMeta* pm = &pSysDbTableMeta[i]; - if (!sysInfo && pm->sysInfo) { - continue; - } - - SColumnInfoData* pColInfoData = taosArrayGet(p->pDataBlock, 0); - - STR_TO_VARSTR(n, pm->name); - colDataAppend(pColInfoData, numOfRows, n, false); - - // database name - STR_TO_VARSTR(n, dbName); - pColInfoData = taosArrayGet(p->pDataBlock, 1); - colDataAppend(pColInfoData, numOfRows, n, false); - - // create time - pColInfoData = taosArrayGet(p->pDataBlock, 2); - colDataAppendNULL(pColInfoData, numOfRows); - - // number of columns - pColInfoData = taosArrayGet(p->pDataBlock, 3); - colDataAppend(pColInfoData, numOfRows, (char*)&pm->colNum, false); - - for (int32_t j = 4; j <= 8; ++j) { - pColInfoData = taosArrayGet(p->pDataBlock, j); - colDataAppendNULL(pColInfoData, numOfRows); - } - - STR_TO_VARSTR(n, "SYSTEM_TABLE"); - - pColInfoData = taosArrayGet(p->pDataBlock, 9); - colDataAppend(pColInfoData, numOfRows, n, false); - - numOfRows += 1; - } - - return numOfRows; -} - -SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScanPhysiNode* pScanPhyNode, - const char* pUser, SExecTaskInfo* pTaskInfo) { - SSysTableScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SSysTableScanInfo)); - SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); - if (pInfo == NULL || pOperator == NULL) { - goto _error; - } - - SScanPhysiNode* pScanNode = &pScanPhyNode->scan; - SDataBlockDescNode* pDescNode = pScanNode->node.pOutputDataBlockDesc; - - int32_t num = 0; - int32_t code = extractColMatchInfo(pScanNode->pScanCols, pDescNode, &num, COL_MATCH_FROM_COL_ID, &pInfo->matchInfo); - if (code != TSDB_CODE_SUCCESS) { - goto _error; - } - - pInfo->accountId = pScanPhyNode->accountId; - pInfo->pUser = taosMemoryStrDup((void*)pUser); - pInfo->sysInfo = pScanPhyNode->sysInfo; - pInfo->showRewrite = pScanPhyNode->showRewrite; - pInfo->pRes = createResDataBlock(pDescNode); - - pInfo->pCondition = pScanNode->node.pConditions; - code = filterInitFromNode(pScanNode->node.pConditions, &pOperator->exprSupp.pFilterInfo, 0); - if (code != TSDB_CODE_SUCCESS) { - goto _error; - } - - initResultSizeInfo(&pOperator->resultInfo, 4096); - blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity); - - tNameAssign(&pInfo->name, &pScanNode->tableName); - const char* name = tNameGetTableName(&pInfo->name); - - if (strncasecmp(name, TSDB_INS_TABLE_TABLES, TSDB_TABLE_FNAME_LEN) == 0 || - strncasecmp(name, TSDB_INS_TABLE_TAGS, TSDB_TABLE_FNAME_LEN) == 0) { - pInfo->readHandle = *(SReadHandle*)readHandle; - } else { - tsem_init(&pInfo->ready, 0, 0); - pInfo->epSet = pScanPhyNode->mgmtEpSet; - pInfo->readHandle = *(SReadHandle*)readHandle; - } - - setOperatorInfo(pOperator, "SysTableScanOperator", QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN, false, OP_NOT_OPENED, - pInfo, pTaskInfo); - pOperator->exprSupp.numOfExprs = taosArrayGetSize(pInfo->pRes->pDataBlock); - pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doSysTableScan, NULL, destroySysScanOperator, NULL); - return pOperator; - -_error: - if (pInfo != NULL) { - destroySysScanOperator(pInfo); - } - taosMemoryFreeClear(pOperator); - pTaskInfo->code = code; - return NULL; -} - static SSDataBlock* doTagScan(SOperatorInfo* pOperator) { if (pOperator->status == OP_EXEC_DONE) { return NULL; @@ -4271,7 +2578,7 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) { metaReaderInit(&mr, pInfo->readHandle.meta, 0); while (pInfo->curPos < size && count < pOperator->resultInfo.capacity) { - STableKeyInfo* item = tableListGetInfo(pInfo->pTableList, pInfo->curPos); + STableKeyInfo* item = tableListGetInfo(pTaskInfo->pTableInfoList, pInfo->curPos); int32_t code = metaGetTableEntryByUid(&mr, item->uid); tDecoderClear(&mr.coder); if (code != TSDB_CODE_SUCCESS) { @@ -4358,7 +2665,6 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi goto _error; } - pInfo->pTableList = pTableListInfo; pInfo->pRes = createResDataBlock(pDescNode); pInfo->readHandle = *pReadHandle; pInfo->curPos = 0; diff --git a/source/libs/executor/src/sortoperator.c b/source/libs/executor/src/sortoperator.c index 14e3163455..0efa27dd46 100644 --- a/source/libs/executor/src/sortoperator.c +++ b/source/libs/executor/src/sortoperator.c @@ -17,6 +17,18 @@ #include "executorimpl.h" #include "tdatablock.h" +typedef struct SSortOperatorInfo { + SOptrBasicInfo binfo; + uint32_t sortBufSize; // max buffer size for in-memory sort + SArray* pSortInfo; + SSortHandle* pSortHandle; + SColMatchInfo matchInfo; + int32_t bufPageSize; + int64_t startTs; // sort start time + uint64_t sortElapsed; // sort elapsed time, time to flush to disk not included. + SLimitInfo limitInfo; +} SSortOperatorInfo; + static SSDataBlock* doSort(SOperatorInfo* pOperator); static int32_t doOpenSortOperator(SOperatorInfo* pOperator); static int32_t getExplainExecInfo(SOperatorInfo* pOptr, void** pOptrExplain, uint32_t* len); diff --git a/source/libs/executor/src/sysscanoperator.c b/source/libs/executor/src/sysscanoperator.c new file mode 100644 index 0000000000..f6a0c57b66 --- /dev/null +++ b/source/libs/executor/src/sysscanoperator.c @@ -0,0 +1,1774 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "executorimpl.h" +#include "filter.h" +#include "function.h" +#include "functionMgt.h" +#include "os.h" +#include "querynodes.h" +#include "systable.h" +#include "tname.h" +#include "ttime.h" + +#include "tdatablock.h" +#include "tmsg.h" + +#include "query.h" +#include "tcompare.h" +#include "thash.h" +#include "ttypes.h" +#include "vnode.h" + +typedef int (*__optSysFilter)(void* a, void* b, int16_t dtype); +typedef int32_t (*__sys_filte)(void* pMeta, SNode* cond, SArray* result); +typedef int32_t (*__sys_check)(SNode* cond); + +typedef struct SSTabFltArg { + void* pMeta; + void* pVnode; +} SSTabFltArg; + +typedef struct SSysTableIndex { + int8_t init; + SArray* uids; + int32_t lastIdx; +} SSysTableIndex; + +typedef struct SSysTableScanInfo { + SRetrieveMetaTableRsp* pRsp; + SRetrieveTableReq req; + SEpSet epSet; + tsem_t ready; + SReadHandle readHandle; + int32_t accountId; + const char* pUser; + bool sysInfo; + bool showRewrite; + SNode* pCondition; // db_name filter condition, to discard data that are not in current database + SMTbCursor* pCur; // cursor for iterate the local table meta store. + SSysTableIndex* pIdx; // idx for local table meta + SColMatchInfo matchInfo; + SName name; + SSDataBlock* pRes; + int64_t numOfBlocks; // extract basic running information. + SLoadRemoteDataInfo loadInfo; +} SSysTableScanInfo; + +typedef struct { + const char* name; + __sys_check chkFunc; + __sys_filte fltFunc; +} SSTabFltFuncDef; + +typedef struct MergeIndex { + int idx; + int len; +} MergeIndex; + +static int32_t sysChkFilter__Comm(SNode* pNode); +static int32_t sysChkFilter__DBName(SNode* pNode); +static int32_t sysChkFilter__VgroupId(SNode* pNode); +static int32_t sysChkFilter__TableName(SNode* pNode); +static int32_t sysChkFilter__CreateTime(SNode* pNode); +static int32_t sysChkFilter__Ncolumn(SNode* pNode); +static int32_t sysChkFilter__Ttl(SNode* pNode); +static int32_t sysChkFilter__STableName(SNode* pNode); +static int32_t sysChkFilter__Uid(SNode* pNode); +static int32_t sysChkFilter__Type(SNode* pNode); + +static int32_t sysFilte__DbName(void* arg, SNode* pNode, SArray* result); +static int32_t sysFilte__VgroupId(void* arg, SNode* pNode, SArray* result); +static int32_t sysFilte__TableName(void* arg, SNode* pNode, SArray* result); +static int32_t sysFilte__CreateTime(void* arg, SNode* pNode, SArray* result); +static int32_t sysFilte__Ncolumn(void* arg, SNode* pNode, SArray* result); +static int32_t sysFilte__Ttl(void* arg, SNode* pNode, SArray* result); +static int32_t sysFilte__STableName(void* arg, SNode* pNode, SArray* result); +static int32_t sysFilte__Uid(void* arg, SNode* pNode, SArray* result); +static int32_t sysFilte__Type(void* arg, SNode* pNode, SArray* result); + +const SSTabFltFuncDef filterDict[] = { + {.name = "table_name", .chkFunc = sysChkFilter__TableName, .fltFunc = sysFilte__TableName}, + {.name = "db_name", .chkFunc = sysChkFilter__DBName, .fltFunc = sysFilte__DbName}, + {.name = "create_time", .chkFunc = sysChkFilter__CreateTime, .fltFunc = sysFilte__CreateTime}, + {.name = "columns", .chkFunc = sysChkFilter__Ncolumn, .fltFunc = sysFilte__Ncolumn}, + {.name = "ttl", .chkFunc = sysChkFilter__Ttl, .fltFunc = sysFilte__Ttl}, + {.name = "stable_name", .chkFunc = sysChkFilter__STableName, .fltFunc = sysFilte__STableName}, + {.name = "vgroup_id", .chkFunc = sysChkFilter__VgroupId, .fltFunc = sysFilte__VgroupId}, + {.name = "uid", .chkFunc = sysChkFilter__Uid, .fltFunc = sysFilte__Uid}, + {.name = "type", .chkFunc = sysChkFilter__Type, .fltFunc = sysFilte__Type}}; + +#define SYSTAB_FILTER_DICT_SIZE (sizeof(filterDict) / sizeof(filterDict[0])) + +static int32_t buildDbTableInfoBlock(bool sysInfo, const SSDataBlock* p, const SSysTableMeta* pSysDbTableMeta, + size_t size, const char* dbName); + +static char* SYSTABLE_IDX_COLUMN[] = {"table_name", "db_name", "create_time", "columns", + "ttl", "stable_name", "vgroup_id', 'uid", "type"}; + +static char* SYSTABLE_SPECIAL_COL[] = {"db_name", "vgroup_id"}; + +static int32_t buildSysDbTableInfo(const SSysTableScanInfo* pInfo, int32_t capacity); +static SSDataBlock* buildInfoSchemaTableMetaBlock(char* tableName); +static void destroySysScanOperator(void* param); +static int32_t loadSysTableCallback(void* param, SDataBuf* pMsg, int32_t code); +static SSDataBlock* doFilterResult(SSDataBlock* pDataBlock, SFilterInfo* pFilterInfo); +static __optSysFilter optSysGetFilterFunc(int32_t ctype, bool* reverse); + +static int32_t sysTableUserTagsFillOneTableTags(const SSysTableScanInfo* pInfo, SMetaReader* smrSuperTable, + SMetaReader* smrChildTable, const char* dbname, const char* tableName, + int32_t* pNumOfRows, const SSDataBlock* dataBlock); + +static void relocateAndFilterSysTagsScanResult(SSysTableScanInfo* pInfo, int32_t numOfRows, SSDataBlock* dataBlock, + SFilterInfo* pFilterInfo); + +int32_t sysFilte__DbName(void* arg, SNode* pNode, SArray* result) { + void* pVnode = ((SSTabFltArg*)arg)->pVnode; + + const char* db = NULL; + vnodeGetInfo(pVnode, &db, NULL); + + SName sn = {0}; + char dbname[TSDB_DB_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; + tNameFromString(&sn, db, T_NAME_ACCT | T_NAME_DB); + + tNameGetDbName(&sn, varDataVal(dbname)); + varDataSetLen(dbname, strlen(varDataVal(dbname))); + + SOperatorNode* pOper = (SOperatorNode*)pNode; + SValueNode* pVal = (SValueNode*)pOper->pRight; + + bool reverse = false; + __optSysFilter func = optSysGetFilterFunc(pOper->opType, &reverse); + if (func == NULL) return -1; + + int ret = func(dbname, pVal->datum.p, TSDB_DATA_TYPE_VARCHAR); + if (ret == 0) return 0; + + return -2; +} + +int32_t sysFilte__VgroupId(void* arg, SNode* pNode, SArray* result) { + void* pVnode = ((SSTabFltArg*)arg)->pVnode; + + int64_t vgId = 0; + vnodeGetInfo(pVnode, NULL, (int32_t*)&vgId); + + SOperatorNode* pOper = (SOperatorNode*)pNode; + SValueNode* pVal = (SValueNode*)pOper->pRight; + + bool reverse = false; + + __optSysFilter func = optSysGetFilterFunc(pOper->opType, &reverse); + if (func == NULL) return -1; + + int ret = func(&vgId, &pVal->datum.i, TSDB_DATA_TYPE_BIGINT); + if (ret == 0) return 0; + + return -1; +} + +int32_t sysFilte__TableName(void* arg, SNode* pNode, SArray* result) { + void* pMeta = ((SSTabFltArg*)arg)->pMeta; + + SOperatorNode* pOper = (SOperatorNode*)pNode; + SValueNode* pVal = (SValueNode*)pOper->pRight; + bool reverse = false; + + __optSysFilter func = optSysGetFilterFunc(pOper->opType, &reverse); + if (func == NULL) return -1; + + SMetaFltParam param = {.suid = 0, + .cid = 0, + .type = TSDB_DATA_TYPE_VARCHAR, + .val = pVal->datum.p, + .reverse = reverse, + .filterFunc = func}; + return -1; +} + +int32_t sysFilte__CreateTime(void* arg, SNode* pNode, SArray* result) { + void* pMeta = ((SSTabFltArg*)arg)->pMeta; + + SOperatorNode* pOper = (SOperatorNode*)pNode; + SValueNode* pVal = (SValueNode*)pOper->pRight; + bool reverse = false; + + __optSysFilter func = optSysGetFilterFunc(pOper->opType, &reverse); + if (func == NULL) return -1; + + SMetaFltParam param = {.suid = 0, + .cid = 0, + .type = TSDB_DATA_TYPE_BIGINT, + .val = &pVal->datum.i, + .reverse = reverse, + .filterFunc = func}; + + int32_t ret = metaFilterCreateTime(pMeta, ¶m, result); + return ret; +} + +int32_t sysFilte__Ncolumn(void* arg, SNode* pNode, SArray* result) { + void* pMeta = ((SSTabFltArg*)arg)->pMeta; + + SOperatorNode* pOper = (SOperatorNode*)pNode; + SValueNode* pVal = (SValueNode*)pOper->pRight; + bool reverse = false; + + __optSysFilter func = optSysGetFilterFunc(pOper->opType, &reverse); + if (func == NULL) return -1; + return -1; +} + +int32_t sysFilte__Ttl(void* arg, SNode* pNode, SArray* result) { + void* pMeta = ((SSTabFltArg*)arg)->pMeta; + + SOperatorNode* pOper = (SOperatorNode*)pNode; + SValueNode* pVal = (SValueNode*)pOper->pRight; + bool reverse = false; + + __optSysFilter func = optSysGetFilterFunc(pOper->opType, &reverse); + if (func == NULL) return -1; + return -1; +} + +int32_t sysFilte__STableName(void* arg, SNode* pNode, SArray* result) { + void* pMeta = ((SSTabFltArg*)arg)->pMeta; + + SOperatorNode* pOper = (SOperatorNode*)pNode; + SValueNode* pVal = (SValueNode*)pOper->pRight; + bool reverse = false; + + __optSysFilter func = optSysGetFilterFunc(pOper->opType, &reverse); + if (func == NULL) return -1; + return -1; +} + +int32_t sysFilte__Uid(void* arg, SNode* pNode, SArray* result) { + void* pMeta = ((SSTabFltArg*)arg)->pMeta; + + SOperatorNode* pOper = (SOperatorNode*)pNode; + SValueNode* pVal = (SValueNode*)pOper->pRight; + bool reverse = false; + + __optSysFilter func = optSysGetFilterFunc(pOper->opType, &reverse); + if (func == NULL) return -1; + return -1; +} + +int32_t sysFilte__Type(void* arg, SNode* pNode, SArray* result) { + void* pMeta = ((SSTabFltArg*)arg)->pMeta; + + SOperatorNode* pOper = (SOperatorNode*)pNode; + SValueNode* pVal = (SValueNode*)pOper->pRight; + bool reverse = false; + + __optSysFilter func = optSysGetFilterFunc(pOper->opType, &reverse); + if (func == NULL) return -1; + return -1; +} + +int optSysDoCompare(__compar_fn_t func, int8_t comparType, void* a, void* b) { + int32_t cmp = func(a, b); + switch (comparType) { + case OP_TYPE_LOWER_THAN: + if (cmp < 0) return 0; + break; + case OP_TYPE_LOWER_EQUAL: { + if (cmp <= 0) return 0; + break; + } + case OP_TYPE_GREATER_THAN: { + if (cmp > 0) return 0; + break; + } + case OP_TYPE_GREATER_EQUAL: { + if (cmp >= 0) return 0; + break; + } + case OP_TYPE_EQUAL: { + if (cmp == 0) return 0; + break; + } + default: + return -1; + } + return cmp; +} + +static int optSysFilterFuncImpl__LowerThan(void* a, void* b, int16_t dtype) { + __compar_fn_t func = getComparFunc(dtype, 0); + return optSysDoCompare(func, OP_TYPE_LOWER_THAN, a, b); +} +static int optSysFilterFuncImpl__LowerEqual(void* a, void* b, int16_t dtype) { + __compar_fn_t func = getComparFunc(dtype, 0); + return optSysDoCompare(func, OP_TYPE_LOWER_EQUAL, a, b); +} +static int optSysFilterFuncImpl__GreaterThan(void* a, void* b, int16_t dtype) { + __compar_fn_t func = getComparFunc(dtype, 0); + return optSysDoCompare(func, OP_TYPE_GREATER_THAN, a, b); +} +static int optSysFilterFuncImpl__GreaterEqual(void* a, void* b, int16_t dtype) { + __compar_fn_t func = getComparFunc(dtype, 0); + return optSysDoCompare(func, OP_TYPE_GREATER_EQUAL, a, b); +} +static int optSysFilterFuncImpl__Equal(void* a, void* b, int16_t dtype) { + __compar_fn_t func = getComparFunc(dtype, 0); + return optSysDoCompare(func, OP_TYPE_EQUAL, a, b); +} + +static int optSysFilterFuncImpl__NoEqual(void* a, void* b, int16_t dtype) { + __compar_fn_t func = getComparFunc(dtype, 0); + return optSysDoCompare(func, OP_TYPE_NOT_EQUAL, a, b); +} + +static int32_t optSysTabFilte(void* arg, SNode* cond, SArray* result); +static int32_t optSysTabFilteImpl(void* arg, SNode* cond, SArray* result); +static int32_t optSysCheckOper(SNode* pOpear); +static int32_t optSysMergeRslt(SArray* mRslt, SArray* rslt); + +__optSysFilter optSysGetFilterFunc(int32_t ctype, bool* reverse) { + if (ctype == OP_TYPE_LOWER_EQUAL || ctype == OP_TYPE_LOWER_THAN) { + *reverse = true; + } + if (ctype == OP_TYPE_LOWER_THAN) + return optSysFilterFuncImpl__LowerThan; + else if (ctype == OP_TYPE_LOWER_EQUAL) + return optSysFilterFuncImpl__LowerEqual; + else if (ctype == OP_TYPE_GREATER_THAN) + return optSysFilterFuncImpl__GreaterThan; + else if (ctype == OP_TYPE_GREATER_EQUAL) + return optSysFilterFuncImpl__GreaterEqual; + else if (ctype == OP_TYPE_EQUAL) + return optSysFilterFuncImpl__Equal; + else if (ctype == OP_TYPE_NOT_EQUAL) + return optSysFilterFuncImpl__NoEqual; + return NULL; +} + +static bool sysTableIsOperatorCondOnOneTable(SNode* pCond, char* condTable) { + SOperatorNode* node = (SOperatorNode*)pCond; + if (node->opType == OP_TYPE_EQUAL) { + if (nodeType(node->pLeft) == QUERY_NODE_COLUMN && + strcasecmp(nodesGetNameFromColumnNode(node->pLeft), "table_name") == 0 && + nodeType(node->pRight) == QUERY_NODE_VALUE) { + SValueNode* pValue = (SValueNode*)node->pRight; + if (pValue->node.resType.type == TSDB_DATA_TYPE_NCHAR || pValue->node.resType.type == TSDB_DATA_TYPE_VARCHAR || + pValue->node.resType.type == TSDB_DATA_TYPE_BINARY) { + char* value = nodesGetValueFromNode(pValue); + strncpy(condTable, varDataVal(value), TSDB_TABLE_NAME_LEN); + return true; + } + } + } + return false; +} + +static bool sysTableIsCondOnOneTable(SNode* pCond, char* condTable) { + if (pCond == NULL) { + return false; + } + if (nodeType(pCond) == QUERY_NODE_LOGIC_CONDITION) { + SLogicConditionNode* node = (SLogicConditionNode*)pCond; + if (LOGIC_COND_TYPE_AND == node->condType) { + SNode* pChild = NULL; + FOREACH(pChild, node->pParameterList) { + if (QUERY_NODE_OPERATOR == nodeType(pChild) && sysTableIsOperatorCondOnOneTable(pChild, condTable)) { + return true; + } + } + } + } + + if (QUERY_NODE_OPERATOR == nodeType(pCond)) { + return sysTableIsOperatorCondOnOneTable(pCond, condTable); + } + + return false; +} + +static SSDataBlock* sysTableScanUserTags(SOperatorInfo* pOperator) { + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + SSysTableScanInfo* pInfo = pOperator->info; + if (pOperator->status == OP_EXEC_DONE) { + return NULL; + } + + blockDataCleanup(pInfo->pRes); + int32_t numOfRows = 0; + + SSDataBlock* dataBlock = buildInfoSchemaTableMetaBlock(TSDB_INS_TABLE_TAGS); + blockDataEnsureCapacity(dataBlock, pOperator->resultInfo.capacity); + + const char* db = NULL; + int32_t vgId = 0; + vnodeGetInfo(pInfo->readHandle.vnode, &db, &vgId); + + SName sn = {0}; + char dbname[TSDB_DB_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; + tNameFromString(&sn, db, T_NAME_ACCT | T_NAME_DB); + + tNameGetDbName(&sn, varDataVal(dbname)); + varDataSetLen(dbname, strlen(varDataVal(dbname))); + + char condTableName[TSDB_TABLE_NAME_LEN] = {0}; + // optimize when sql like where table_name='tablename' and xxx. + if (pInfo->pCondition && sysTableIsCondOnOneTable(pInfo->pCondition, condTableName)) { + char tableName[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; + STR_TO_VARSTR(tableName, condTableName); + + SMetaReader smrChildTable = {0}; + metaReaderInit(&smrChildTable, pInfo->readHandle.meta, 0); + int32_t code = metaGetTableEntryByName(&smrChildTable, condTableName); + if (code != TSDB_CODE_SUCCESS) { + // terrno has been set by metaGetTableEntryByName, therefore, return directly + return NULL; + } + + if (smrChildTable.me.type != TSDB_CHILD_TABLE) { + metaReaderClear(&smrChildTable); + blockDataDestroy(dataBlock); + pInfo->loadInfo.totalRows = 0; + return NULL; + } + + SMetaReader smrSuperTable = {0}; + metaReaderInit(&smrSuperTable, pInfo->readHandle.meta, META_READER_NOLOCK); + code = metaGetTableEntryByUid(&smrSuperTable, smrChildTable.me.ctbEntry.suid); + if (code != TSDB_CODE_SUCCESS) { + // terrno has been set by metaGetTableEntryByUid + return NULL; + } + + sysTableUserTagsFillOneTableTags(pInfo, &smrSuperTable, &smrChildTable, dbname, tableName, &numOfRows, dataBlock); + metaReaderClear(&smrSuperTable); + metaReaderClear(&smrChildTable); + if (numOfRows > 0) { + relocateAndFilterSysTagsScanResult(pInfo, numOfRows, dataBlock, pOperator->exprSupp.pFilterInfo); + numOfRows = 0; + } + blockDataDestroy(dataBlock); + pInfo->loadInfo.totalRows += pInfo->pRes->info.rows; + setOperatorCompleted(pOperator); + return (pInfo->pRes->info.rows == 0) ? NULL : pInfo->pRes; + } + + int32_t ret = 0; + if (pInfo->pCur == NULL) { + pInfo->pCur = metaOpenTbCursor(pInfo->readHandle.meta); + } + + while ((ret = metaTbCursorNext(pInfo->pCur)) == 0) { + if (pInfo->pCur->mr.me.type != TSDB_CHILD_TABLE) { + continue; + } + + char tableName[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; + STR_TO_VARSTR(tableName, pInfo->pCur->mr.me.name); + + SMetaReader smrSuperTable = {0}; + metaReaderInit(&smrSuperTable, pInfo->readHandle.meta, 0); + uint64_t suid = pInfo->pCur->mr.me.ctbEntry.suid; + int32_t code = metaGetTableEntryByUid(&smrSuperTable, suid); + if (code != TSDB_CODE_SUCCESS) { + qError("failed to get super table meta, uid:0x%" PRIx64 ", code:%s, %s", suid, tstrerror(terrno), + GET_TASKID(pTaskInfo)); + metaReaderClear(&smrSuperTable); + metaCloseTbCursor(pInfo->pCur); + pInfo->pCur = NULL; + T_LONG_JMP(pTaskInfo->env, terrno); + } + + sysTableUserTagsFillOneTableTags(pInfo, &smrSuperTable, &pInfo->pCur->mr, dbname, tableName, &numOfRows, dataBlock); + + metaReaderClear(&smrSuperTable); + + if (numOfRows >= pOperator->resultInfo.capacity) { + relocateAndFilterSysTagsScanResult(pInfo, numOfRows, dataBlock, pOperator->exprSupp.pFilterInfo); + numOfRows = 0; + + if (pInfo->pRes->info.rows > 0) { + break; + } + } + } + + if (numOfRows > 0) { + relocateAndFilterSysTagsScanResult(pInfo, numOfRows, dataBlock, pOperator->exprSupp.pFilterInfo); + numOfRows = 0; + } + + blockDataDestroy(dataBlock); + if (ret != 0) { + metaCloseTbCursor(pInfo->pCur); + pInfo->pCur = NULL; + setOperatorCompleted(pOperator); + } + + pInfo->loadInfo.totalRows += pInfo->pRes->info.rows; + return (pInfo->pRes->info.rows == 0) ? NULL : pInfo->pRes; +} + +void relocateAndFilterSysTagsScanResult(SSysTableScanInfo* pInfo, int32_t numOfRows, SSDataBlock* dataBlock, + SFilterInfo* pFilterInfo) { + dataBlock->info.rows = numOfRows; + pInfo->pRes->info.rows = numOfRows; + + relocateColumnData(pInfo->pRes, pInfo->matchInfo.pList, dataBlock->pDataBlock, false); + doFilterResult(pInfo->pRes, pFilterInfo); + blockDataCleanup(dataBlock); +} + +int32_t convertTagDataToStr(char* str, int type, void* buf, int32_t bufSize, int32_t* len) { + int32_t n = 0; + + switch (type) { + case TSDB_DATA_TYPE_NULL: + n = sprintf(str, "null"); + break; + + case TSDB_DATA_TYPE_BOOL: + n = sprintf(str, (*(int8_t*)buf) ? "true" : "false"); + break; + + case TSDB_DATA_TYPE_TINYINT: + n = sprintf(str, "%d", *(int8_t*)buf); + break; + + case TSDB_DATA_TYPE_SMALLINT: + n = sprintf(str, "%d", *(int16_t*)buf); + break; + + case TSDB_DATA_TYPE_INT: + n = sprintf(str, "%d", *(int32_t*)buf); + break; + + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_TIMESTAMP: + n = sprintf(str, "%" PRId64, *(int64_t*)buf); + break; + + case TSDB_DATA_TYPE_FLOAT: + n = sprintf(str, "%.5f", GET_FLOAT_VAL(buf)); + break; + + case TSDB_DATA_TYPE_DOUBLE: + n = sprintf(str, "%.9f", GET_DOUBLE_VAL(buf)); + break; + + case TSDB_DATA_TYPE_BINARY: + if (bufSize < 0) { + return TSDB_CODE_TSC_INVALID_VALUE; + } + + memcpy(str, buf, bufSize); + n = bufSize; + break; + case TSDB_DATA_TYPE_NCHAR: + if (bufSize < 0) { + return TSDB_CODE_TSC_INVALID_VALUE; + } + + int32_t length = taosUcs4ToMbs((TdUcs4*)buf, bufSize, str); + if (length <= 0) { + return TSDB_CODE_TSC_INVALID_VALUE; + } + n = length; + break; + case TSDB_DATA_TYPE_UTINYINT: + n = sprintf(str, "%u", *(uint8_t*)buf); + break; + + case TSDB_DATA_TYPE_USMALLINT: + n = sprintf(str, "%u", *(uint16_t*)buf); + break; + + case TSDB_DATA_TYPE_UINT: + n = sprintf(str, "%u", *(uint32_t*)buf); + break; + + case TSDB_DATA_TYPE_UBIGINT: + n = sprintf(str, "%" PRIu64, *(uint64_t*)buf); + break; + + default: + return TSDB_CODE_TSC_INVALID_VALUE; + } + + if (len) *len = n; + + return TSDB_CODE_SUCCESS; +} + +static int32_t sysTableUserTagsFillOneTableTags(const SSysTableScanInfo* pInfo, SMetaReader* smrSuperTable, + SMetaReader* smrChildTable, const char* dbname, const char* tableName, + int32_t* pNumOfRows, const SSDataBlock* dataBlock) { + char stableName[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; + STR_TO_VARSTR(stableName, (*smrSuperTable).me.name); + + int32_t numOfRows = *pNumOfRows; + + int32_t numOfTags = (*smrSuperTable).me.stbEntry.schemaTag.nCols; + for (int32_t i = 0; i < numOfTags; ++i) { + SColumnInfoData* pColInfoData = NULL; + + // table name + pColInfoData = taosArrayGet(dataBlock->pDataBlock, 0); + colDataAppend(pColInfoData, numOfRows, tableName, false); + + // database name + pColInfoData = taosArrayGet(dataBlock->pDataBlock, 1); + colDataAppend(pColInfoData, numOfRows, dbname, false); + + // super table name + pColInfoData = taosArrayGet(dataBlock->pDataBlock, 2); + colDataAppend(pColInfoData, numOfRows, stableName, false); + + // tag name + char tagName[TSDB_COL_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; + STR_TO_VARSTR(tagName, (*smrSuperTable).me.stbEntry.schemaTag.pSchema[i].name); + pColInfoData = taosArrayGet(dataBlock->pDataBlock, 3); + colDataAppend(pColInfoData, numOfRows, tagName, false); + + // tag type + int8_t tagType = (*smrSuperTable).me.stbEntry.schemaTag.pSchema[i].type; + pColInfoData = taosArrayGet(dataBlock->pDataBlock, 4); + char tagTypeStr[VARSTR_HEADER_SIZE + 32]; + int tagTypeLen = sprintf(varDataVal(tagTypeStr), "%s", tDataTypes[tagType].name); + if (tagType == TSDB_DATA_TYPE_VARCHAR) { + tagTypeLen += sprintf(varDataVal(tagTypeStr) + tagTypeLen, "(%d)", + (int32_t)((*smrSuperTable).me.stbEntry.schemaTag.pSchema[i].bytes - VARSTR_HEADER_SIZE)); + } else if (tagType == TSDB_DATA_TYPE_NCHAR) { + tagTypeLen += sprintf( + varDataVal(tagTypeStr) + tagTypeLen, "(%d)", + (int32_t)(((*smrSuperTable).me.stbEntry.schemaTag.pSchema[i].bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)); + } + varDataSetLen(tagTypeStr, tagTypeLen); + colDataAppend(pColInfoData, numOfRows, (char*)tagTypeStr, false); + + STagVal tagVal = {0}; + tagVal.cid = (*smrSuperTable).me.stbEntry.schemaTag.pSchema[i].colId; + char* tagData = NULL; + uint32_t tagLen = 0; + + if (tagType == TSDB_DATA_TYPE_JSON) { + tagData = (char*)smrChildTable->me.ctbEntry.pTags; + } else { + bool exist = tTagGet((STag*)smrChildTable->me.ctbEntry.pTags, &tagVal); + if (exist) { + if (IS_VAR_DATA_TYPE(tagType)) { + tagData = (char*)tagVal.pData; + tagLen = tagVal.nData; + } else { + tagData = (char*)&tagVal.i64; + tagLen = tDataTypes[tagType].bytes; + } + } + } + + char* tagVarChar = NULL; + if (tagData != NULL) { + if (tagType == TSDB_DATA_TYPE_JSON) { + char* tagJson = parseTagDatatoJson(tagData); + tagVarChar = taosMemoryMalloc(strlen(tagJson) + VARSTR_HEADER_SIZE); + memcpy(varDataVal(tagVarChar), tagJson, strlen(tagJson)); + varDataSetLen(tagVarChar, strlen(tagJson)); + taosMemoryFree(tagJson); + } else { + int32_t bufSize = IS_VAR_DATA_TYPE(tagType) ? (tagLen + VARSTR_HEADER_SIZE) + : (3 + DBL_MANT_DIG - DBL_MIN_EXP + VARSTR_HEADER_SIZE); + tagVarChar = taosMemoryMalloc(bufSize); + int32_t len = -1; + convertTagDataToStr(varDataVal(tagVarChar), tagType, tagData, tagLen, &len); + varDataSetLen(tagVarChar, len); + } + } + pColInfoData = taosArrayGet(dataBlock->pDataBlock, 5); + colDataAppend(pColInfoData, numOfRows, tagVarChar, + (tagData == NULL) || (tagType == TSDB_DATA_TYPE_JSON && tTagIsJsonNull(tagData))); + taosMemoryFree(tagVarChar); + ++numOfRows; + } + + *pNumOfRows = numOfRows; + + return TSDB_CODE_SUCCESS; +} + +static SSDataBlock* buildInfoSchemaTableMetaBlock(char* tableName) { + size_t size = 0; + const SSysTableMeta* pMeta = NULL; + getInfosDbMeta(&pMeta, &size); + + int32_t index = 0; + for (int32_t i = 0; i < size; ++i) { + if (strcmp(pMeta[i].name, tableName) == 0) { + index = i; + break; + } + } + + SSDataBlock* pBlock = createDataBlock(); + for (int32_t i = 0; i < pMeta[index].colNum; ++i) { + SColumnInfoData colInfoData = + createColumnInfoData(pMeta[index].schema[i].type, pMeta[index].schema[i].bytes, i + 1); + blockDataAppendColInfo(pBlock, &colInfoData); + } + + return pBlock; +} + +int32_t buildDbTableInfoBlock(bool sysInfo, const SSDataBlock* p, const SSysTableMeta* pSysDbTableMeta, size_t size, + const char* dbName) { + char n[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; + int32_t numOfRows = p->info.rows; + + for (int32_t i = 0; i < size; ++i) { + const SSysTableMeta* pm = &pSysDbTableMeta[i]; + if (!sysInfo && pm->sysInfo) { + continue; + } + + SColumnInfoData* pColInfoData = taosArrayGet(p->pDataBlock, 0); + + STR_TO_VARSTR(n, pm->name); + colDataAppend(pColInfoData, numOfRows, n, false); + + // database name + STR_TO_VARSTR(n, dbName); + pColInfoData = taosArrayGet(p->pDataBlock, 1); + colDataAppend(pColInfoData, numOfRows, n, false); + + // create time + pColInfoData = taosArrayGet(p->pDataBlock, 2); + colDataAppendNULL(pColInfoData, numOfRows); + + // number of columns + pColInfoData = taosArrayGet(p->pDataBlock, 3); + colDataAppend(pColInfoData, numOfRows, (char*)&pm->colNum, false); + + for (int32_t j = 4; j <= 8; ++j) { + pColInfoData = taosArrayGet(p->pDataBlock, j); + colDataAppendNULL(pColInfoData, numOfRows); + } + + STR_TO_VARSTR(n, "SYSTEM_TABLE"); + + pColInfoData = taosArrayGet(p->pDataBlock, 9); + colDataAppend(pColInfoData, numOfRows, n, false); + + numOfRows += 1; + } + + return numOfRows; +} + +int32_t buildSysDbTableInfo(const SSysTableScanInfo* pInfo, int32_t capacity) { + SSDataBlock* p = buildInfoSchemaTableMetaBlock(TSDB_INS_TABLE_TABLES); + blockDataEnsureCapacity(p, capacity); + + size_t size = 0; + const SSysTableMeta* pSysDbTableMeta = NULL; + + getInfosDbMeta(&pSysDbTableMeta, &size); + p->info.rows = buildDbTableInfoBlock(pInfo->sysInfo, p, pSysDbTableMeta, size, TSDB_INFORMATION_SCHEMA_DB); + + getPerfDbMeta(&pSysDbTableMeta, &size); + p->info.rows = buildDbTableInfoBlock(pInfo->sysInfo, p, pSysDbTableMeta, size, TSDB_PERFORMANCE_SCHEMA_DB); + + pInfo->pRes->info.rows = p->info.rows; + relocateColumnData(pInfo->pRes, pInfo->matchInfo.pList, p->pDataBlock, false); + blockDataDestroy(p); + + return pInfo->pRes->info.rows; +} + +static SSDataBlock* sysTableBuildUserTablesByUids(SOperatorInfo* pOperator) { + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + SSysTableScanInfo* pInfo = pOperator->info; + + SSysTableIndex* pIdx = pInfo->pIdx; + blockDataCleanup(pInfo->pRes); + int32_t numOfRows = 0; + + int ret = 0; + + const char* db = NULL; + int32_t vgId = 0; + vnodeGetInfo(pInfo->readHandle.vnode, &db, &vgId); + + SName sn = {0}; + char dbname[TSDB_DB_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; + tNameFromString(&sn, db, T_NAME_ACCT | T_NAME_DB); + + tNameGetDbName(&sn, varDataVal(dbname)); + varDataSetLen(dbname, strlen(varDataVal(dbname))); + + SSDataBlock* p = buildInfoSchemaTableMetaBlock(TSDB_INS_TABLE_TABLES); + blockDataEnsureCapacity(p, pOperator->resultInfo.capacity); + + char n[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; + int32_t i = pIdx->lastIdx; + for (; i < taosArrayGetSize(pIdx->uids); i++) { + tb_uid_t* uid = taosArrayGet(pIdx->uids, i); + + SMetaReader mr = {0}; + metaReaderInit(&mr, pInfo->readHandle.meta, 0); + ret = metaGetTableEntryByUid(&mr, *uid); + if (ret < 0) { + metaReaderClear(&mr); + continue; + } + STR_TO_VARSTR(n, mr.me.name); + + // table name + SColumnInfoData* pColInfoData = taosArrayGet(p->pDataBlock, 0); + colDataAppend(pColInfoData, numOfRows, n, false); + + // database name + pColInfoData = taosArrayGet(p->pDataBlock, 1); + colDataAppend(pColInfoData, numOfRows, dbname, false); + + // vgId + pColInfoData = taosArrayGet(p->pDataBlock, 6); + colDataAppend(pColInfoData, numOfRows, (char*)&vgId, false); + + int32_t tableType = mr.me.type; + if (tableType == TSDB_CHILD_TABLE) { + // create time + int64_t ts = mr.me.ctbEntry.ctime; + pColInfoData = taosArrayGet(p->pDataBlock, 2); + colDataAppend(pColInfoData, numOfRows, (char*)&ts, false); + + SMetaReader mr1 = {0}; + metaReaderInit(&mr1, pInfo->readHandle.meta, META_READER_NOLOCK); + + int64_t suid = mr.me.ctbEntry.suid; + int32_t code = metaGetTableEntryByUid(&mr1, suid); + if (code != TSDB_CODE_SUCCESS) { + qError("failed to get super table meta, cname:%s, suid:0x%" PRIx64 ", code:%s, %s", pInfo->pCur->mr.me.name, + suid, tstrerror(terrno), GET_TASKID(pTaskInfo)); + metaReaderClear(&mr1); + metaReaderClear(&mr); + T_LONG_JMP(pTaskInfo->env, terrno); + } + pColInfoData = taosArrayGet(p->pDataBlock, 3); + colDataAppend(pColInfoData, numOfRows, (char*)&mr1.me.stbEntry.schemaRow.nCols, false); + + // super table name + STR_TO_VARSTR(n, mr1.me.name); + pColInfoData = taosArrayGet(p->pDataBlock, 4); + colDataAppend(pColInfoData, numOfRows, n, false); + metaReaderClear(&mr1); + + // table comment + pColInfoData = taosArrayGet(p->pDataBlock, 8); + if (mr.me.ctbEntry.commentLen > 0) { + char comment[TSDB_TB_COMMENT_LEN + VARSTR_HEADER_SIZE] = {0}; + STR_TO_VARSTR(comment, mr.me.ctbEntry.comment); + colDataAppend(pColInfoData, numOfRows, comment, false); + } else if (mr.me.ctbEntry.commentLen == 0) { + char comment[VARSTR_HEADER_SIZE + VARSTR_HEADER_SIZE] = {0}; + STR_TO_VARSTR(comment, ""); + colDataAppend(pColInfoData, numOfRows, comment, false); + } else { + colDataAppendNULL(pColInfoData, numOfRows); + } + + // uid + pColInfoData = taosArrayGet(p->pDataBlock, 5); + colDataAppend(pColInfoData, numOfRows, (char*)&mr.me.uid, false); + + // ttl + pColInfoData = taosArrayGet(p->pDataBlock, 7); + colDataAppend(pColInfoData, numOfRows, (char*)&mr.me.ctbEntry.ttlDays, false); + + STR_TO_VARSTR(n, "CHILD_TABLE"); + + } else if (tableType == TSDB_NORMAL_TABLE) { + // create time + pColInfoData = taosArrayGet(p->pDataBlock, 2); + colDataAppend(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.ntbEntry.ctime, false); + + // number of columns + pColInfoData = taosArrayGet(p->pDataBlock, 3); + colDataAppend(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.ntbEntry.schemaRow.nCols, false); + + // super table name + pColInfoData = taosArrayGet(p->pDataBlock, 4); + colDataAppendNULL(pColInfoData, numOfRows); + + // table comment + pColInfoData = taosArrayGet(p->pDataBlock, 8); + if (mr.me.ntbEntry.commentLen > 0) { + char comment[TSDB_TB_COMMENT_LEN + VARSTR_HEADER_SIZE] = {0}; + STR_TO_VARSTR(comment, mr.me.ntbEntry.comment); + colDataAppend(pColInfoData, numOfRows, comment, false); + } else if (mr.me.ntbEntry.commentLen == 0) { + char comment[VARSTR_HEADER_SIZE + VARSTR_HEADER_SIZE] = {0}; + STR_TO_VARSTR(comment, ""); + colDataAppend(pColInfoData, numOfRows, comment, false); + } else { + colDataAppendNULL(pColInfoData, numOfRows); + } + + // uid + pColInfoData = taosArrayGet(p->pDataBlock, 5); + colDataAppend(pColInfoData, numOfRows, (char*)&mr.me.uid, false); + + // ttl + pColInfoData = taosArrayGet(p->pDataBlock, 7); + colDataAppend(pColInfoData, numOfRows, (char*)&mr.me.ntbEntry.ttlDays, false); + + STR_TO_VARSTR(n, "NORMAL_TABLE"); + // impl later + } + + metaReaderClear(&mr); + + pColInfoData = taosArrayGet(p->pDataBlock, 9); + colDataAppend(pColInfoData, numOfRows, n, false); + + if (++numOfRows >= pOperator->resultInfo.capacity) { + p->info.rows = numOfRows; + pInfo->pRes->info.rows = numOfRows; + + relocateColumnData(pInfo->pRes, pInfo->matchInfo.pList, p->pDataBlock, false); + doFilterResult(pInfo->pRes, pOperator->exprSupp.pFilterInfo); + + blockDataCleanup(p); + numOfRows = 0; + + if (pInfo->pRes->info.rows > 0) { + break; + } + } + } + + if (numOfRows > 0) { + p->info.rows = numOfRows; + pInfo->pRes->info.rows = numOfRows; + + relocateColumnData(pInfo->pRes, pInfo->matchInfo.pList, p->pDataBlock, false); + doFilterResult(pInfo->pRes, pOperator->exprSupp.pFilterInfo); + + blockDataCleanup(p); + numOfRows = 0; + } + + if (i >= taosArrayGetSize(pIdx->uids)) { + setOperatorCompleted(pOperator); + } else { + pIdx->lastIdx = i + 1; + } + + blockDataDestroy(p); + + pInfo->loadInfo.totalRows += pInfo->pRes->info.rows; + return (pInfo->pRes->info.rows == 0) ? NULL : pInfo->pRes; +} + +static SSDataBlock* sysTableBuildUserTables(SOperatorInfo* pOperator) { + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + + SSysTableScanInfo* pInfo = pOperator->info; + if (pInfo->pCur == NULL) { + pInfo->pCur = metaOpenTbCursor(pInfo->readHandle.meta); + } + + blockDataCleanup(pInfo->pRes); + int32_t numOfRows = 0; + + const char* db = NULL; + int32_t vgId = 0; + vnodeGetInfo(pInfo->readHandle.vnode, &db, &vgId); + + SName sn = {0}; + char dbname[TSDB_DB_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; + tNameFromString(&sn, db, T_NAME_ACCT | T_NAME_DB); + + tNameGetDbName(&sn, varDataVal(dbname)); + varDataSetLen(dbname, strlen(varDataVal(dbname))); + + SSDataBlock* p = buildInfoSchemaTableMetaBlock(TSDB_INS_TABLE_TABLES); + blockDataEnsureCapacity(p, pOperator->resultInfo.capacity); + + char n[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; + + int32_t ret = 0; + while ((ret = metaTbCursorNext(pInfo->pCur)) == 0) { + STR_TO_VARSTR(n, pInfo->pCur->mr.me.name); + + // table name + SColumnInfoData* pColInfoData = taosArrayGet(p->pDataBlock, 0); + colDataAppend(pColInfoData, numOfRows, n, false); + + // database name + pColInfoData = taosArrayGet(p->pDataBlock, 1); + colDataAppend(pColInfoData, numOfRows, dbname, false); + + // vgId + pColInfoData = taosArrayGet(p->pDataBlock, 6); + colDataAppend(pColInfoData, numOfRows, (char*)&vgId, false); + + int32_t tableType = pInfo->pCur->mr.me.type; + if (tableType == TSDB_CHILD_TABLE) { + // create time + int64_t ts = pInfo->pCur->mr.me.ctbEntry.ctime; + pColInfoData = taosArrayGet(p->pDataBlock, 2); + colDataAppend(pColInfoData, numOfRows, (char*)&ts, false); + + SMetaReader mr = {0}; + metaReaderInit(&mr, pInfo->readHandle.meta, META_READER_NOLOCK); + + uint64_t suid = pInfo->pCur->mr.me.ctbEntry.suid; + int32_t code = metaGetTableEntryByUid(&mr, suid); + if (code != TSDB_CODE_SUCCESS) { + qError("failed to get super table meta, cname:%s, suid:0x%" PRIx64 ", code:%s, %s", pInfo->pCur->mr.me.name, + suid, tstrerror(terrno), GET_TASKID(pTaskInfo)); + metaReaderClear(&mr); + metaCloseTbCursor(pInfo->pCur); + pInfo->pCur = NULL; + T_LONG_JMP(pTaskInfo->env, terrno); + } + + // number of columns + pColInfoData = taosArrayGet(p->pDataBlock, 3); + colDataAppend(pColInfoData, numOfRows, (char*)&mr.me.stbEntry.schemaRow.nCols, false); + + // super table name + STR_TO_VARSTR(n, mr.me.name); + pColInfoData = taosArrayGet(p->pDataBlock, 4); + colDataAppend(pColInfoData, numOfRows, n, false); + metaReaderClear(&mr); + + // table comment + pColInfoData = taosArrayGet(p->pDataBlock, 8); + if (pInfo->pCur->mr.me.ctbEntry.commentLen > 0) { + char comment[TSDB_TB_COMMENT_LEN + VARSTR_HEADER_SIZE] = {0}; + STR_TO_VARSTR(comment, pInfo->pCur->mr.me.ctbEntry.comment); + colDataAppend(pColInfoData, numOfRows, comment, false); + } else if (pInfo->pCur->mr.me.ctbEntry.commentLen == 0) { + char comment[VARSTR_HEADER_SIZE + VARSTR_HEADER_SIZE] = {0}; + STR_TO_VARSTR(comment, ""); + colDataAppend(pColInfoData, numOfRows, comment, false); + } else { + colDataAppendNULL(pColInfoData, numOfRows); + } + + // uid + pColInfoData = taosArrayGet(p->pDataBlock, 5); + colDataAppend(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.uid, false); + + // ttl + pColInfoData = taosArrayGet(p->pDataBlock, 7); + colDataAppend(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.ctbEntry.ttlDays, false); + + STR_TO_VARSTR(n, "CHILD_TABLE"); + } else if (tableType == TSDB_NORMAL_TABLE) { + // create time + pColInfoData = taosArrayGet(p->pDataBlock, 2); + colDataAppend(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.ntbEntry.ctime, false); + + // number of columns + pColInfoData = taosArrayGet(p->pDataBlock, 3); + colDataAppend(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.ntbEntry.schemaRow.nCols, false); + + // super table name + pColInfoData = taosArrayGet(p->pDataBlock, 4); + colDataAppendNULL(pColInfoData, numOfRows); + + // table comment + pColInfoData = taosArrayGet(p->pDataBlock, 8); + if (pInfo->pCur->mr.me.ntbEntry.commentLen > 0) { + char comment[TSDB_TB_COMMENT_LEN + VARSTR_HEADER_SIZE] = {0}; + STR_TO_VARSTR(comment, pInfo->pCur->mr.me.ntbEntry.comment); + colDataAppend(pColInfoData, numOfRows, comment, false); + } else if (pInfo->pCur->mr.me.ntbEntry.commentLen == 0) { + char comment[VARSTR_HEADER_SIZE + VARSTR_HEADER_SIZE] = {0}; + STR_TO_VARSTR(comment, ""); + colDataAppend(pColInfoData, numOfRows, comment, false); + } else { + colDataAppendNULL(pColInfoData, numOfRows); + } + + // uid + pColInfoData = taosArrayGet(p->pDataBlock, 5); + colDataAppend(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.uid, false); + + // ttl + pColInfoData = taosArrayGet(p->pDataBlock, 7); + colDataAppend(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.ntbEntry.ttlDays, false); + + STR_TO_VARSTR(n, "NORMAL_TABLE"); + } + + pColInfoData = taosArrayGet(p->pDataBlock, 9); + colDataAppend(pColInfoData, numOfRows, n, false); + + if (++numOfRows >= pOperator->resultInfo.capacity) { + p->info.rows = numOfRows; + pInfo->pRes->info.rows = numOfRows; + + relocateColumnData(pInfo->pRes, pInfo->matchInfo.pList, p->pDataBlock, false); + doFilterResult(pInfo->pRes, pOperator->exprSupp.pFilterInfo); + + blockDataCleanup(p); + numOfRows = 0; + + if (pInfo->pRes->info.rows > 0) { + break; + } + } + } + + if (numOfRows > 0) { + p->info.rows = numOfRows; + pInfo->pRes->info.rows = numOfRows; + + relocateColumnData(pInfo->pRes, pInfo->matchInfo.pList, p->pDataBlock, false); + doFilterResult(pInfo->pRes, pOperator->exprSupp.pFilterInfo); + + blockDataCleanup(p); + numOfRows = 0; + } + + blockDataDestroy(p); + + // todo temporarily free the cursor here, the true reason why the free is not valid needs to be found + if (ret != 0) { + metaCloseTbCursor(pInfo->pCur); + pInfo->pCur = NULL; + setOperatorCompleted(pOperator); + } + + pInfo->loadInfo.totalRows += pInfo->pRes->info.rows; + return (pInfo->pRes->info.rows == 0) ? NULL : pInfo->pRes; +} + +static SSDataBlock* sysTableScanUserTables(SOperatorInfo* pOperator) { + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + SSysTableScanInfo* pInfo = pOperator->info; + + SNode* pCondition = pInfo->pCondition; + if (pOperator->status == OP_EXEC_DONE) { + return NULL; + } + + // the retrieve is executed on the mnode, so return tables that belongs to the information schema database. + if (pInfo->readHandle.mnd != NULL) { + buildSysDbTableInfo(pInfo, pOperator->resultInfo.capacity); + doFilterResult(pInfo->pRes, pOperator->exprSupp.pFilterInfo); + pInfo->loadInfo.totalRows += pInfo->pRes->info.rows; + + setOperatorCompleted(pOperator); + return (pInfo->pRes->info.rows == 0) ? NULL : pInfo->pRes; + } else { + if (pInfo->showRewrite == false) { + if (pCondition != NULL && pInfo->pIdx == NULL) { + SSTabFltArg arg = {.pMeta = pInfo->readHandle.meta, .pVnode = pInfo->readHandle.vnode}; + + SSysTableIndex* idx = taosMemoryMalloc(sizeof(SSysTableIndex)); + idx->init = 0; + idx->uids = taosArrayInit(128, sizeof(int64_t)); + idx->lastIdx = 0; + + pInfo->pIdx = idx; // set idx arg + + int flt = optSysTabFilte(&arg, pCondition, idx->uids); + if (flt == 0) { + pInfo->pIdx->init = 1; + SSDataBlock* blk = sysTableBuildUserTablesByUids(pOperator); + return blk; + } else if (flt == -2) { + qDebug("%s failed to get sys table info by idx, empty result", GET_TASKID(pTaskInfo)); + return NULL; + } else if (flt == -1) { + // not idx + qDebug("%s failed to get sys table info by idx, scan sys table one by one", GET_TASKID(pTaskInfo)); + } + } else if (pCondition != NULL && (pInfo->pIdx != NULL && pInfo->pIdx->init == 1)) { + SSDataBlock* blk = sysTableBuildUserTablesByUids(pOperator); + return blk; + } + } + + return sysTableBuildUserTables(pOperator); + } + return NULL; +} + +static SSDataBlock* sysTableScanUserSTables(SOperatorInfo* pOperator) { + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + SSysTableScanInfo* pInfo = pOperator->info; + if (pOperator->status == OP_EXEC_DONE) { + return NULL; + } + + pInfo->pRes->info.rows = 0; + pOperator->status = OP_EXEC_DONE; + + pInfo->loadInfo.totalRows += pInfo->pRes->info.rows; + return (pInfo->pRes->info.rows == 0) ? NULL : pInfo->pRes; +} + +static int32_t getSysTableDbNameColId(const char* pTable) { + // if (0 == strcmp(TSDB_INS_TABLE_INDEXES, pTable)) { + // return 1; + // } + return TSDB_INS_USER_STABLES_DBNAME_COLID; +} + +static EDealRes getDBNameFromConditionWalker(SNode* pNode, void* pContext) { + int32_t code = TSDB_CODE_SUCCESS; + ENodeType nType = nodeType(pNode); + + switch (nType) { + case QUERY_NODE_OPERATOR: { + SOperatorNode* node = (SOperatorNode*)pNode; + if (OP_TYPE_EQUAL == node->opType) { + *(int32_t*)pContext = 1; + return DEAL_RES_CONTINUE; + } + + *(int32_t*)pContext = 0; + return DEAL_RES_IGNORE_CHILD; + } + case QUERY_NODE_COLUMN: { + if (1 != *(int32_t*)pContext) { + return DEAL_RES_CONTINUE; + } + + SColumnNode* node = (SColumnNode*)pNode; + if (getSysTableDbNameColId(node->tableName) == node->colId) { + *(int32_t*)pContext = 2; + return DEAL_RES_CONTINUE; + } + + *(int32_t*)pContext = 0; + return DEAL_RES_CONTINUE; + } + case QUERY_NODE_VALUE: { + if (2 != *(int32_t*)pContext) { + return DEAL_RES_CONTINUE; + } + + SValueNode* node = (SValueNode*)pNode; + char* dbName = nodesGetValueFromNode(node); + strncpy(pContext, varDataVal(dbName), varDataLen(dbName)); + *((char*)pContext + varDataLen(dbName)) = 0; + return DEAL_RES_END; // stop walk + } + default: + break; + } + return DEAL_RES_CONTINUE; +} + +static void getDBNameFromCondition(SNode* pCondition, const char* dbName) { + if (NULL == pCondition) { + return; + } + nodesWalkExpr(pCondition, getDBNameFromConditionWalker, (char*)dbName); +} + +static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) { + // build message and send to mnode to fetch the content of system tables. + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + SSysTableScanInfo* pInfo = pOperator->info; + char dbName[TSDB_DB_NAME_LEN] = {0}; + + const char* name = tNameGetTableName(&pInfo->name); + if (pInfo->showRewrite) { + getDBNameFromCondition(pInfo->pCondition, dbName); + sprintf(pInfo->req.db, "%d.%s", pInfo->accountId, dbName); + } + + if (strncasecmp(name, TSDB_INS_TABLE_TABLES, TSDB_TABLE_FNAME_LEN) == 0) { + return sysTableScanUserTables(pOperator); + } else if (strncasecmp(name, TSDB_INS_TABLE_TAGS, TSDB_TABLE_FNAME_LEN) == 0) { + return sysTableScanUserTags(pOperator); + } else if (strncasecmp(name, TSDB_INS_TABLE_STABLES, TSDB_TABLE_FNAME_LEN) == 0 && pInfo->showRewrite && + IS_SYS_DBNAME(dbName)) { + return sysTableScanUserSTables(pOperator); + } else { // load the meta from mnode of the given epset + if (pOperator->status == OP_EXEC_DONE) { + return NULL; + } + + while (1) { + int64_t startTs = taosGetTimestampUs(); + tstrncpy(pInfo->req.tb, tNameGetTableName(&pInfo->name), tListLen(pInfo->req.tb)); + tstrncpy(pInfo->req.user, pInfo->pUser, tListLen(pInfo->req.user)); + + int32_t contLen = tSerializeSRetrieveTableReq(NULL, 0, &pInfo->req); + char* buf1 = taosMemoryCalloc(1, contLen); + tSerializeSRetrieveTableReq(buf1, contLen, &pInfo->req); + + // send the fetch remote task result reques + SMsgSendInfo* pMsgSendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo)); + if (NULL == pMsgSendInfo) { + qError("%s prepare message %d failed", GET_TASKID(pTaskInfo), (int32_t)sizeof(SMsgSendInfo)); + pTaskInfo->code = TSDB_CODE_QRY_OUT_OF_MEMORY; + return NULL; + } + + int32_t msgType = (strcasecmp(name, TSDB_INS_TABLE_DNODE_VARIABLES) == 0) ? TDMT_DND_SYSTABLE_RETRIEVE + : TDMT_MND_SYSTABLE_RETRIEVE; + + pMsgSendInfo->param = pOperator; + pMsgSendInfo->msgInfo.pData = buf1; + pMsgSendInfo->msgInfo.len = contLen; + pMsgSendInfo->msgType = msgType; + pMsgSendInfo->fp = loadSysTableCallback; + pMsgSendInfo->requestId = pTaskInfo->id.queryId; + + int64_t transporterId = 0; + int32_t code = + asyncSendMsgToServer(pInfo->readHandle.pMsgCb->clientRpc, &pInfo->epSet, &transporterId, pMsgSendInfo); + tsem_wait(&pInfo->ready); + + if (pTaskInfo->code) { + qDebug("%s load meta data from mnode failed, totalRows:%" PRIu64 ", code:%s", GET_TASKID(pTaskInfo), + pInfo->loadInfo.totalRows, tstrerror(pTaskInfo->code)); + return NULL; + } + + SRetrieveMetaTableRsp* pRsp = pInfo->pRsp; + pInfo->req.showId = pRsp->handle; + + if (pRsp->numOfRows == 0 || pRsp->completed) { + pOperator->status = OP_EXEC_DONE; + qDebug("%s load meta data from mnode completed, rowsOfSource:%d, totalRows:%" PRIu64, GET_TASKID(pTaskInfo), + pRsp->numOfRows, pInfo->loadInfo.totalRows); + + if (pRsp->numOfRows == 0) { + taosMemoryFree(pRsp); + return NULL; + } + } + + char* pStart = pRsp->data; + extractDataBlockFromFetchRsp(pInfo->pRes, pRsp->data, pInfo->matchInfo.pList, &pStart); + updateLoadRemoteInfo(&pInfo->loadInfo, pRsp->numOfRows, pRsp->compLen, startTs, pOperator); + + // todo log the filter info + doFilterResult(pInfo->pRes, pOperator->exprSupp.pFilterInfo); + taosMemoryFree(pRsp); + if (pInfo->pRes->info.rows > 0) { + return pInfo->pRes; + } else if (pOperator->status == OP_EXEC_DONE) { + return NULL; + } + } + } +} + +SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScanPhysiNode* pScanPhyNode, + const char* pUser, SExecTaskInfo* pTaskInfo) { + SSysTableScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SSysTableScanInfo)); + SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); + if (pInfo == NULL || pOperator == NULL) { + goto _error; + } + + SScanPhysiNode* pScanNode = &pScanPhyNode->scan; + SDataBlockDescNode* pDescNode = pScanNode->node.pOutputDataBlockDesc; + + int32_t num = 0; + int32_t code = extractColMatchInfo(pScanNode->pScanCols, pDescNode, &num, COL_MATCH_FROM_COL_ID, &pInfo->matchInfo); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + + pInfo->accountId = pScanPhyNode->accountId; + pInfo->pUser = taosMemoryStrDup((void*)pUser); + pInfo->sysInfo = pScanPhyNode->sysInfo; + pInfo->showRewrite = pScanPhyNode->showRewrite; + pInfo->pRes = createResDataBlock(pDescNode); + + pInfo->pCondition = pScanNode->node.pConditions; + code = filterInitFromNode(pScanNode->node.pConditions, &pOperator->exprSupp.pFilterInfo, 0); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + + initResultSizeInfo(&pOperator->resultInfo, 4096); + blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity); + + tNameAssign(&pInfo->name, &pScanNode->tableName); + const char* name = tNameGetTableName(&pInfo->name); + + if (strncasecmp(name, TSDB_INS_TABLE_TABLES, TSDB_TABLE_FNAME_LEN) == 0 || + strncasecmp(name, TSDB_INS_TABLE_TAGS, TSDB_TABLE_FNAME_LEN) == 0) { + pInfo->readHandle = *(SReadHandle*)readHandle; + } else { + tsem_init(&pInfo->ready, 0, 0); + pInfo->epSet = pScanPhyNode->mgmtEpSet; + pInfo->readHandle = *(SReadHandle*)readHandle; + } + + setOperatorInfo(pOperator, "SysTableScanOperator", QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN, false, OP_NOT_OPENED, + pInfo, pTaskInfo); + pOperator->exprSupp.numOfExprs = taosArrayGetSize(pInfo->pRes->pDataBlock); + pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doSysTableScan, NULL, destroySysScanOperator, NULL); + return pOperator; + + _error: + if (pInfo != NULL) { + destroySysScanOperator(pInfo); + } + taosMemoryFreeClear(pOperator); + pTaskInfo->code = code; + return NULL; +} + +void destroySysScanOperator(void* param) { + SSysTableScanInfo* pInfo = (SSysTableScanInfo*)param; + tsem_destroy(&pInfo->ready); + blockDataDestroy(pInfo->pRes); + + const char* name = tNameGetTableName(&pInfo->name); + if (strncasecmp(name, TSDB_INS_TABLE_TABLES, TSDB_TABLE_FNAME_LEN) == 0 || + strncasecmp(name, TSDB_INS_TABLE_TAGS, TSDB_TABLE_FNAME_LEN) == 0 || pInfo->pCur != NULL) { + metaCloseTbCursor(pInfo->pCur); + pInfo->pCur = NULL; + } + if (pInfo->pIdx) { + taosArrayDestroy(pInfo->pIdx->uids); + taosMemoryFree(pInfo->pIdx); + pInfo->pIdx = NULL; + } + + taosArrayDestroy(pInfo->matchInfo.pList); + taosMemoryFreeClear(pInfo->pUser); + + taosMemoryFreeClear(param); +} + +int32_t loadSysTableCallback(void* param, SDataBuf* pMsg, int32_t code) { + SOperatorInfo* operator=(SOperatorInfo*) param; + SSysTableScanInfo* pScanResInfo = (SSysTableScanInfo*)operator->info; + if (TSDB_CODE_SUCCESS == code) { + pScanResInfo->pRsp = pMsg->pData; + + SRetrieveMetaTableRsp* pRsp = pScanResInfo->pRsp; + pRsp->numOfRows = htonl(pRsp->numOfRows); + pRsp->useconds = htobe64(pRsp->useconds); + pRsp->handle = htobe64(pRsp->handle); + pRsp->compLen = htonl(pRsp->compLen); + } else { + operator->pTaskInfo->code = code; + } + + tsem_post(&pScanResInfo->ready); + return TSDB_CODE_SUCCESS; +} + +SSDataBlock* doFilterResult(SSDataBlock* pDataBlock, SFilterInfo* pFilterInfo) { + if (pFilterInfo == NULL) { + return pDataBlock->info.rows == 0 ? NULL : pDataBlock; + } + + doFilter(pDataBlock, pFilterInfo, NULL); + return pDataBlock->info.rows == 0 ? NULL : pDataBlock; +} + +static int32_t sysChkFilter__Comm(SNode* pNode) { + // impl + SOperatorNode* pOper = (SOperatorNode*)pNode; + EOperatorType opType = pOper->opType; + if (opType != OP_TYPE_EQUAL && opType != OP_TYPE_LOWER_EQUAL && opType != OP_TYPE_LOWER_THAN && + opType != OP_TYPE_GREATER_EQUAL && opType != OP_TYPE_GREATER_THAN) { + return -1; + } + return 0; +} + +static int32_t sysChkFilter__DBName(SNode* pNode) { + SOperatorNode* pOper = (SOperatorNode*)pNode; + + if (pOper->opType != OP_TYPE_EQUAL && pOper->opType != OP_TYPE_NOT_EQUAL) { + return -1; + } + + SValueNode* pVal = (SValueNode*)pOper->pRight; + if (!IS_STR_DATA_TYPE(pVal->node.resType.type)) { + return -1; + } + + return 0; +} +static int32_t sysChkFilter__VgroupId(SNode* pNode) { + SOperatorNode* pOper = (SOperatorNode*)pNode; + SValueNode* pVal = (SValueNode*)pOper->pRight; + if (!IS_INTEGER_TYPE(pVal->node.resType.type)) { + return -1; + } + return sysChkFilter__Comm(pNode); +} +static int32_t sysChkFilter__TableName(SNode* pNode) { + SOperatorNode* pOper = (SOperatorNode*)pNode; + SValueNode* pVal = (SValueNode*)pOper->pRight; + if (!IS_STR_DATA_TYPE(pVal->node.resType.type)) { + return -1; + } + return sysChkFilter__Comm(pNode); +} +static int32_t sysChkFilter__CreateTime(SNode* pNode) { + SOperatorNode* pOper = (SOperatorNode*)pNode; + SValueNode* pVal = (SValueNode*)pOper->pRight; + + if (!IS_TIMESTAMP_TYPE(pVal->node.resType.type)) { + return -1; + } + return sysChkFilter__Comm(pNode); +} + +static int32_t sysChkFilter__Ncolumn(SNode* pNode) { + SOperatorNode* pOper = (SOperatorNode*)pNode; + SValueNode* pVal = (SValueNode*)pOper->pRight; + + if (!IS_INTEGER_TYPE(pVal->node.resType.type)) { + return -1; + } + return sysChkFilter__Comm(pNode); +} +static int32_t sysChkFilter__Ttl(SNode* pNode) { + SOperatorNode* pOper = (SOperatorNode*)pNode; + SValueNode* pVal = (SValueNode*)pOper->pRight; + + if (!IS_INTEGER_TYPE(pVal->node.resType.type)) { + return -1; + } + return sysChkFilter__Comm(pNode); +} +static int32_t sysChkFilter__STableName(SNode* pNode) { + SOperatorNode* pOper = (SOperatorNode*)pNode; + SValueNode* pVal = (SValueNode*)pOper->pRight; + if (!IS_STR_DATA_TYPE(pVal->node.resType.type)) { + return -1; + } + return sysChkFilter__Comm(pNode); +} +static int32_t sysChkFilter__Uid(SNode* pNode) { + SOperatorNode* pOper = (SOperatorNode*)pNode; + SValueNode* pVal = (SValueNode*)pOper->pRight; + if (!IS_INTEGER_TYPE(pVal->node.resType.type)) { + return -1; + } + return sysChkFilter__Comm(pNode); +} +static int32_t sysChkFilter__Type(SNode* pNode) { + SOperatorNode* pOper = (SOperatorNode*)pNode; + SValueNode* pVal = (SValueNode*)pOper->pRight; + if (!IS_INTEGER_TYPE(pVal->node.resType.type)) { + return -1; + } + return sysChkFilter__Comm(pNode); +} +static int32_t optSysTabFilteImpl(void* arg, SNode* cond, SArray* result) { + if (optSysCheckOper(cond) != 0) return -1; + + SOperatorNode* pNode = (SOperatorNode*)cond; + + int8_t i = 0; + for (; i < SYSTAB_FILTER_DICT_SIZE; i++) { + if (strcmp(filterDict[i].name, ((SColumnNode*)(pNode->pLeft))->colName) == 0) { + break; + } + } + if (i >= SYSTAB_FILTER_DICT_SIZE) return -1; + + if (filterDict[i].chkFunc(cond) != 0) return -1; + + return filterDict[i].fltFunc(arg, cond, result); +} + +static int32_t optSysCheckOper(SNode* pOpear) { + if (nodeType(pOpear) != QUERY_NODE_OPERATOR) return -1; + + SOperatorNode* pOper = (SOperatorNode*)pOpear; + if (pOper->opType < OP_TYPE_GREATER_THAN || pOper->opType > OP_TYPE_NOT_EQUAL) { + return -1; + } + + if (nodeType(pOper->pLeft) != QUERY_NODE_COLUMN || nodeType(pOper->pRight) != QUERY_NODE_VALUE) { + return -1; + } + return 0; +} + +static FORCE_INLINE int optSysBinarySearch(SArray* arr, int s, int e, uint64_t k) { + uint64_t v; + int32_t m; + while (s <= e) { + m = s + (e - s) / 2; + v = *(uint64_t*)taosArrayGet(arr, m); + if (v >= k) { + e = m - 1; + } else { + s = m + 1; + } + } + return s; +} + +void optSysIntersection(SArray* in, SArray* out) { + int32_t sz = (int32_t)taosArrayGetSize(in); + if (sz <= 0) { + return; + } + MergeIndex* mi = taosMemoryCalloc(sz, sizeof(MergeIndex)); + for (int i = 0; i < sz; i++) { + SArray* t = taosArrayGetP(in, i); + mi[i].len = (int32_t)taosArrayGetSize(t); + mi[i].idx = 0; + } + + SArray* base = taosArrayGetP(in, 0); + for (int i = 0; i < taosArrayGetSize(base); i++) { + uint64_t tgt = *(uint64_t*)taosArrayGet(base, i); + bool has = true; + for (int j = 1; j < taosArrayGetSize(in); j++) { + SArray* oth = taosArrayGetP(in, j); + int mid = optSysBinarySearch(oth, mi[j].idx, mi[j].len - 1, tgt); + if (mid >= 0 && mid < mi[j].len) { + uint64_t val = *(uint64_t*)taosArrayGet(oth, mid); + has = (val == tgt ? true : false); + mi[j].idx = mid; + } else { + has = false; + } + } + if (has == true) { + taosArrayPush(out, &tgt); + } + } + taosMemoryFreeClear(mi); +} + +static int tableUidCompare(const void* a, const void* b) { + int64_t u1 = *(int64_t*)a; + int64_t u2 = *(int64_t*)b; + if (u1 == u2) { + return 0; + } + return u1 < u2 ? -1 : 1; +} + +static int32_t optSysMergeRslt(SArray* mRslt, SArray* rslt) { + // TODO, find comm mem from mRslt + for (int i = 0; i < taosArrayGetSize(mRslt); i++) { + SArray* arslt = taosArrayGetP(mRslt, i); + taosArraySort(arslt, tableUidCompare); + } + optSysIntersection(mRslt, rslt); + return 0; +} + +static int32_t optSysSpecialColumn(SNode* cond) { + SOperatorNode* pOper = (SOperatorNode*)cond; + SColumnNode* pCol = (SColumnNode*)pOper->pLeft; + for (int i = 0; i < sizeof(SYSTABLE_SPECIAL_COL) / sizeof(SYSTABLE_SPECIAL_COL[0]); i++) { + if (0 == strcmp(pCol->colName, SYSTABLE_SPECIAL_COL[i])) { + return 1; + } + } + return 0; +} + +static int32_t optSysTabFilte(void* arg, SNode* cond, SArray* result) { + int ret = -1; + if (nodeType(cond) == QUERY_NODE_OPERATOR) { + ret = optSysTabFilteImpl(arg, cond, result); + if (ret == 0) { + SOperatorNode* pOper = (SOperatorNode*)cond; + SColumnNode* pCol = (SColumnNode*)pOper->pLeft; + if (0 == strcmp(pCol->colName, "create_time")) { + return 0; + } + return -1; + } + return ret; + } + + if (nodeType(cond) != QUERY_NODE_LOGIC_CONDITION || ((SLogicConditionNode*)cond)->condType != LOGIC_COND_TYPE_AND) { + return ret; + } + + SLogicConditionNode* pNode = (SLogicConditionNode*)cond; + SNodeList* pList = (SNodeList*)pNode->pParameterList; + + int32_t len = LIST_LENGTH(pList); + + bool hasIdx = false; + bool hasRslt = true; + SArray* mRslt = taosArrayInit(len, POINTER_BYTES); + + SListCell* cell = pList->pHead; + for (int i = 0; i < len; i++) { + if (cell == NULL) break; + + SArray* aRslt = taosArrayInit(16, sizeof(int64_t)); + + ret = optSysTabFilteImpl(arg, cell->pNode, aRslt); + if (ret == 0) { + // has index + hasIdx = true; + if (optSysSpecialColumn(cell->pNode) == 0) { + taosArrayPush(mRslt, &aRslt); + } else { + // db_name/vgroup not result + taosArrayDestroy(aRslt); + } + } else if (ret == -2) { + // current vg + hasIdx = true; + hasRslt = false; + taosArrayDestroy(aRslt); + break; + } else { + taosArrayDestroy(aRslt); + } + cell = cell->pNext; + } + if (hasRslt && hasIdx) { + optSysMergeRslt(mRslt, result); + } + + for (int i = 0; i < taosArrayGetSize(mRslt); i++) { + SArray* aRslt = taosArrayGetP(mRslt, i); + taosArrayDestroy(aRslt); + } + taosArrayDestroy(mRslt); + if (hasRslt == false) { + return -2; + } + if (hasRslt && hasIdx) { + cell = pList->pHead; + for (int i = 0; i < len; i++) { + if (cell == NULL) break; + SOperatorNode* pOper = (SOperatorNode*)cell->pNode; + SColumnNode* pCol = (SColumnNode*)pOper->pLeft; + if (0 == strcmp(pCol->colName, "create_time")) { + return 0; + } + cell = cell->pNext; + } + return -1; + } + return -1; +} \ No newline at end of file From 50a64b1ee8670a117d723bc588a6d8202e78d8b6 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 22 Nov 2022 22:46:06 +0800 Subject: [PATCH 66/69] refactor: do some internal refactor. --- source/libs/executor/src/scanoperator.c | 172 --------------------- source/libs/executor/src/sysscanoperator.c | 170 ++++++++++++++++++++ 2 files changed, 170 insertions(+), 172 deletions(-) diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 24ca3e2eed..0173dd1c6c 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -32,18 +32,9 @@ #include "ttypes.h" #include "vnode.h" -typedef struct SBlockDistInfo { - SSDataBlock* pResBlock; - STsdbReader* pHandle; - SReadHandle readHandle; - uint64_t uid; // table uid -} SBlockDistInfo; - #define SET_REVERSE_SCAN_FLAG(_info) ((_info)->scanFlag = REVERSE_SCAN) #define SWITCH_ORDER(n) (((n) = ((n) == TSDB_ORDER_ASC) ? TSDB_ORDER_DESC : TSDB_ORDER_ASC)) - - typedef struct STableMergeScanExecInfo { SFileBlockLoadRecorder blockRecorder; SSortExecInfo sortExecInfo; @@ -946,169 +937,6 @@ SOperatorInfo* createTableSeqScanOperatorInfo(void* pReadHandle, SExecTaskInfo* return pOperator; } -static int32_t doGetTableRowSize(void* pMeta, uint64_t uid, int32_t* rowLen, const char* idstr) { - *rowLen = 0; - - SMetaReader mr = {0}; - metaReaderInit(&mr, pMeta, 0); - int32_t code = metaGetTableEntryByUid(&mr, uid); - if (code != TSDB_CODE_SUCCESS) { - qError("failed to get table meta, uid:0x%" PRIx64 ", code:%s, %s", uid, tstrerror(terrno), idstr); - metaReaderClear(&mr); - return terrno; - } - - if (mr.me.type == TSDB_SUPER_TABLE) { - int32_t numOfCols = mr.me.stbEntry.schemaRow.nCols; - for (int32_t i = 0; i < numOfCols; ++i) { - (*rowLen) += mr.me.stbEntry.schemaRow.pSchema[i].bytes; - } - } else if (mr.me.type == TSDB_CHILD_TABLE) { - uint64_t suid = mr.me.ctbEntry.suid; - tDecoderClear(&mr.coder); - code = metaGetTableEntryByUid(&mr, suid); - if (code != TSDB_CODE_SUCCESS) { - qError("failed to get table meta, uid:0x%" PRIx64 ", code:%s, %s", suid, tstrerror(terrno), idstr); - metaReaderClear(&mr); - return terrno; - } - - int32_t numOfCols = mr.me.stbEntry.schemaRow.nCols; - - for (int32_t i = 0; i < numOfCols; ++i) { - (*rowLen) += mr.me.stbEntry.schemaRow.pSchema[i].bytes; - } - } else if (mr.me.type == TSDB_NORMAL_TABLE) { - int32_t numOfCols = mr.me.ntbEntry.schemaRow.nCols; - for (int32_t i = 0; i < numOfCols; ++i) { - (*rowLen) += mr.me.ntbEntry.schemaRow.pSchema[i].bytes; - } - } - - metaReaderClear(&mr); - return TSDB_CODE_SUCCESS; -} - -static SSDataBlock* doBlockInfoScan(SOperatorInfo* pOperator) { - if (pOperator->status == OP_EXEC_DONE) { - return NULL; - } - - SBlockDistInfo* pBlockScanInfo = pOperator->info; - SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; - - STableBlockDistInfo blockDistInfo = {.minRows = INT_MAX, .maxRows = INT_MIN}; - int32_t code = doGetTableRowSize(pBlockScanInfo->readHandle.meta, pBlockScanInfo->uid, - (int32_t*)&blockDistInfo.rowSize, GET_TASKID(pTaskInfo)); - if (code != TSDB_CODE_SUCCESS) { - T_LONG_JMP(pTaskInfo->env, code); - } - - tsdbGetFileBlocksDistInfo(pBlockScanInfo->pHandle, &blockDistInfo); - blockDistInfo.numOfInmemRows = (int32_t)tsdbGetNumOfRowsInMemTable(pBlockScanInfo->pHandle); - - SSDataBlock* pBlock = pBlockScanInfo->pResBlock; - - int32_t slotId = pOperator->exprSupp.pExprInfo->base.resSchema.slotId; - SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, slotId); - - int32_t len = tSerializeBlockDistInfo(NULL, 0, &blockDistInfo); - char* p = taosMemoryCalloc(1, len + VARSTR_HEADER_SIZE); - tSerializeBlockDistInfo(varDataVal(p), len, &blockDistInfo); - varDataSetLen(p, len); - - colDataAppend(pColInfo, 0, p, false); - taosMemoryFree(p); - - pBlock->info.rows = 1; - pOperator->status = OP_EXEC_DONE; - return pBlock; -} - -static void destroyBlockDistScanOperatorInfo(void* param) { - SBlockDistInfo* pDistInfo = (SBlockDistInfo*)param; - blockDataDestroy(pDistInfo->pResBlock); - tsdbReaderClose(pDistInfo->pHandle); - taosMemoryFreeClear(param); -} - -static int32_t initTableblockDistQueryCond(uint64_t uid, SQueryTableDataCond* pCond) { - memset(pCond, 0, sizeof(SQueryTableDataCond)); - - pCond->order = TSDB_ORDER_ASC; - pCond->numOfCols = 1; - pCond->colList = taosMemoryCalloc(1, sizeof(SColumnInfo)); - if (pCond->colList == NULL) { - terrno = TSDB_CODE_QRY_OUT_OF_MEMORY; - return terrno; - } - - pCond->colList->colId = 1; - pCond->colList->type = TSDB_DATA_TYPE_TIMESTAMP; - pCond->colList->bytes = sizeof(TSKEY); - - pCond->twindows = (STimeWindow){.skey = INT64_MIN, .ekey = INT64_MAX}; - pCond->suid = uid; - pCond->type = TIMEWINDOW_RANGE_CONTAINED; - pCond->startVersion = -1; - pCond->endVersion = -1; - - return TSDB_CODE_SUCCESS; -} - -SOperatorInfo* createDataBlockInfoScanOperator(SReadHandle* readHandle, SBlockDistScanPhysiNode* pBlockScanNode, - SExecTaskInfo* pTaskInfo) { - SBlockDistInfo* pInfo = taosMemoryCalloc(1, sizeof(SBlockDistInfo)); - SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); - if (pInfo == NULL || pOperator == NULL) { - pTaskInfo->code = TSDB_CODE_OUT_OF_MEMORY; - goto _error; - } - - { - SQueryTableDataCond cond = {0}; - - int32_t code = initTableblockDistQueryCond(pBlockScanNode->suid, &cond); - if (code != TSDB_CODE_SUCCESS) { - goto _error; - } - - STableListInfo* pTableListInfo = pTaskInfo->pTableInfoList; - size_t num = tableListGetSize(pTableListInfo); - void* pList = tableListGetInfo(pTableListInfo, 0); - - code = tsdbReaderOpen(readHandle->vnode, &cond, pList, num, &pInfo->pHandle, pTaskInfo->id.str); - cleanupQueryTableDataCond(&cond); - if (code != 0) { - goto _error; - } - } - - pInfo->readHandle = *readHandle; - pInfo->uid = pBlockScanNode->suid; - - pInfo->pResBlock = createResDataBlock(pBlockScanNode->node.pOutputDataBlockDesc); - blockDataEnsureCapacity(pInfo->pResBlock, 1); - - int32_t numOfCols = 0; - SExprInfo* pExprInfo = createExprInfo(pBlockScanNode->pScanPseudoCols, NULL, &numOfCols); - int32_t code = initExprSupp(&pOperator->exprSupp, pExprInfo, numOfCols); - if (code != TSDB_CODE_SUCCESS) { - goto _error; - } - - setOperatorInfo(pOperator, "DataBlockDistScanOperator", QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN, false, - OP_NOT_OPENED, pInfo, pTaskInfo); - pOperator->fpSet = - createOperatorFpSet(operatorDummyOpenFn, doBlockInfoScan, NULL, destroyBlockDistScanOperatorInfo, NULL); - return pOperator; - -_error: - taosMemoryFreeClear(pInfo); - taosMemoryFreeClear(pOperator); - return NULL; -} - static FORCE_INLINE void doClearBufferedBlocks(SStreamScanInfo* pInfo) { taosArrayClear(pInfo->pBlockLists); pInfo->validBlockIndex = 0; diff --git a/source/libs/executor/src/sysscanoperator.c b/source/libs/executor/src/sysscanoperator.c index f6a0c57b66..c5e1f2c214 100644 --- a/source/libs/executor/src/sysscanoperator.c +++ b/source/libs/executor/src/sysscanoperator.c @@ -78,6 +78,13 @@ typedef struct MergeIndex { int len; } MergeIndex; +typedef struct SBlockDistInfo { + SSDataBlock* pResBlock; + STsdbReader* pHandle; + SReadHandle readHandle; + uint64_t uid; // table uid +} SBlockDistInfo; + static int32_t sysChkFilter__Comm(SNode* pNode); static int32_t sysChkFilter__DBName(SNode* pNode); static int32_t sysChkFilter__VgroupId(SNode* pNode); @@ -1771,4 +1778,167 @@ static int32_t optSysTabFilte(void* arg, SNode* cond, SArray* result) { return -1; } return -1; +} + +static int32_t doGetTableRowSize(void* pMeta, uint64_t uid, int32_t* rowLen, const char* idstr) { + *rowLen = 0; + + SMetaReader mr = {0}; + metaReaderInit(&mr, pMeta, 0); + int32_t code = metaGetTableEntryByUid(&mr, uid); + if (code != TSDB_CODE_SUCCESS) { + qError("failed to get table meta, uid:0x%" PRIx64 ", code:%s, %s", uid, tstrerror(terrno), idstr); + metaReaderClear(&mr); + return terrno; + } + + if (mr.me.type == TSDB_SUPER_TABLE) { + int32_t numOfCols = mr.me.stbEntry.schemaRow.nCols; + for (int32_t i = 0; i < numOfCols; ++i) { + (*rowLen) += mr.me.stbEntry.schemaRow.pSchema[i].bytes; + } + } else if (mr.me.type == TSDB_CHILD_TABLE) { + uint64_t suid = mr.me.ctbEntry.suid; + tDecoderClear(&mr.coder); + code = metaGetTableEntryByUid(&mr, suid); + if (code != TSDB_CODE_SUCCESS) { + qError("failed to get table meta, uid:0x%" PRIx64 ", code:%s, %s", suid, tstrerror(terrno), idstr); + metaReaderClear(&mr); + return terrno; + } + + int32_t numOfCols = mr.me.stbEntry.schemaRow.nCols; + + for (int32_t i = 0; i < numOfCols; ++i) { + (*rowLen) += mr.me.stbEntry.schemaRow.pSchema[i].bytes; + } + } else if (mr.me.type == TSDB_NORMAL_TABLE) { + int32_t numOfCols = mr.me.ntbEntry.schemaRow.nCols; + for (int32_t i = 0; i < numOfCols; ++i) { + (*rowLen) += mr.me.ntbEntry.schemaRow.pSchema[i].bytes; + } + } + + metaReaderClear(&mr); + return TSDB_CODE_SUCCESS; +} + +static SSDataBlock* doBlockInfoScan(SOperatorInfo* pOperator) { + if (pOperator->status == OP_EXEC_DONE) { + return NULL; + } + + SBlockDistInfo* pBlockScanInfo = pOperator->info; + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + + STableBlockDistInfo blockDistInfo = {.minRows = INT_MAX, .maxRows = INT_MIN}; + int32_t code = doGetTableRowSize(pBlockScanInfo->readHandle.meta, pBlockScanInfo->uid, + (int32_t*)&blockDistInfo.rowSize, GET_TASKID(pTaskInfo)); + if (code != TSDB_CODE_SUCCESS) { + T_LONG_JMP(pTaskInfo->env, code); + } + + tsdbGetFileBlocksDistInfo(pBlockScanInfo->pHandle, &blockDistInfo); + blockDistInfo.numOfInmemRows = (int32_t)tsdbGetNumOfRowsInMemTable(pBlockScanInfo->pHandle); + + SSDataBlock* pBlock = pBlockScanInfo->pResBlock; + + int32_t slotId = pOperator->exprSupp.pExprInfo->base.resSchema.slotId; + SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, slotId); + + int32_t len = tSerializeBlockDistInfo(NULL, 0, &blockDistInfo); + char* p = taosMemoryCalloc(1, len + VARSTR_HEADER_SIZE); + tSerializeBlockDistInfo(varDataVal(p), len, &blockDistInfo); + varDataSetLen(p, len); + + colDataAppend(pColInfo, 0, p, false); + taosMemoryFree(p); + + pBlock->info.rows = 1; + pOperator->status = OP_EXEC_DONE; + return pBlock; +} + +static void destroyBlockDistScanOperatorInfo(void* param) { + SBlockDistInfo* pDistInfo = (SBlockDistInfo*)param; + blockDataDestroy(pDistInfo->pResBlock); + tsdbReaderClose(pDistInfo->pHandle); + taosMemoryFreeClear(param); +} + +static int32_t initTableblockDistQueryCond(uint64_t uid, SQueryTableDataCond* pCond) { + memset(pCond, 0, sizeof(SQueryTableDataCond)); + + pCond->order = TSDB_ORDER_ASC; + pCond->numOfCols = 1; + pCond->colList = taosMemoryCalloc(1, sizeof(SColumnInfo)); + if (pCond->colList == NULL) { + terrno = TSDB_CODE_QRY_OUT_OF_MEMORY; + return terrno; + } + + pCond->colList->colId = 1; + pCond->colList->type = TSDB_DATA_TYPE_TIMESTAMP; + pCond->colList->bytes = sizeof(TSKEY); + + pCond->twindows = (STimeWindow){.skey = INT64_MIN, .ekey = INT64_MAX}; + pCond->suid = uid; + pCond->type = TIMEWINDOW_RANGE_CONTAINED; + pCond->startVersion = -1; + pCond->endVersion = -1; + + return TSDB_CODE_SUCCESS; +} + +SOperatorInfo* createDataBlockInfoScanOperator(SReadHandle* readHandle, SBlockDistScanPhysiNode* pBlockScanNode, + SExecTaskInfo* pTaskInfo) { + SBlockDistInfo* pInfo = taosMemoryCalloc(1, sizeof(SBlockDistInfo)); + SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); + if (pInfo == NULL || pOperator == NULL) { + pTaskInfo->code = TSDB_CODE_OUT_OF_MEMORY; + goto _error; + } + + { + SQueryTableDataCond cond = {0}; + + int32_t code = initTableblockDistQueryCond(pBlockScanNode->suid, &cond); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + + STableListInfo* pTableListInfo = pTaskInfo->pTableInfoList; + size_t num = tableListGetSize(pTableListInfo); + void* pList = tableListGetInfo(pTableListInfo, 0); + + code = tsdbReaderOpen(readHandle->vnode, &cond, pList, num, &pInfo->pHandle, pTaskInfo->id.str); + cleanupQueryTableDataCond(&cond); + if (code != 0) { + goto _error; + } + } + + pInfo->readHandle = *readHandle; + pInfo->uid = pBlockScanNode->suid; + + pInfo->pResBlock = createResDataBlock(pBlockScanNode->node.pOutputDataBlockDesc); + blockDataEnsureCapacity(pInfo->pResBlock, 1); + + int32_t numOfCols = 0; + SExprInfo* pExprInfo = createExprInfo(pBlockScanNode->pScanPseudoCols, NULL, &numOfCols); + int32_t code = initExprSupp(&pOperator->exprSupp, pExprInfo, numOfCols); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + + setOperatorInfo(pOperator, "DataBlockDistScanOperator", QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN, false, + OP_NOT_OPENED, pInfo, pTaskInfo); + pOperator->fpSet = + createOperatorFpSet(operatorDummyOpenFn, doBlockInfoScan, NULL, destroyBlockDistScanOperatorInfo, NULL); + return pOperator; + + _error: + taosMemoryFreeClear(pInfo); + taosMemoryFreeClear(pOperator); + return NULL; } \ No newline at end of file From 937e5d20bf171496649d48f3cf06b93dd9bb1436 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Wed, 23 Nov 2022 09:04:11 +0800 Subject: [PATCH 67/69] fix: insert stable error --- source/libs/parser/src/parInsertSql.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/source/libs/parser/src/parInsertSql.c b/source/libs/parser/src/parInsertSql.c index 9c16bf645f..155fc7f831 100644 --- a/source/libs/parser/src/parInsertSql.c +++ b/source/libs/parser/src/parInsertSql.c @@ -172,8 +172,8 @@ static int32_t parseDuplicateUsingClause(SInsertParseContext* pCxt, SVnodeModifO } // pStmt->pSql -> field1_name, ...) -static int32_t parseBoundColumns(SInsertParseContext* pCxt, const char** pSql, SParsedDataColInfo* pColList, - SSchema* pSchema) { +static int32_t parseBoundColumns(SInsertParseContext* pCxt, const char** pSql, bool isTags, + SParsedDataColInfo* pColList, SSchema* pSchema) { col_id_t nCols = pColList->numOfCols; pColList->numOfBound = 0; @@ -227,7 +227,7 @@ static int32_t parseBoundColumns(SInsertParseContext* pCxt, const char** pSql, S } } - if (pColList->cols[0].valStat == VAL_STAT_NONE) { + if (!isTags && pColList->cols[0].valStat == VAL_STAT_NONE) { return buildInvalidOperationMsg(&pCxt->msg, "primary timestamp column can not be null"); } @@ -529,7 +529,7 @@ static int32_t parseBoundTagsClause(SInsertParseContext* pCxt, SVnodeModifOpStmt } pStmt->pSql += index; - return parseBoundColumns(pCxt, &pStmt->pSql, &pCxt->tags, pTagsSchema); + return parseBoundColumns(pCxt, &pStmt->pSql, true, &pCxt->tags, pTagsSchema); } static int32_t parseTagValue(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt, SSchema* pTagSchema, SToken* pToken, @@ -941,11 +941,12 @@ static int32_t parseBoundColumnsClause(SInsertParseContext* pCxt, SVnodeModifOpS return buildSyntaxErrMsg(&pCxt->msg, "keyword VALUES or FILE is expected", token.z); } // pStmt->pSql -> field1_name, ...) - return parseBoundColumns(pCxt, &pStmt->pSql, &pDataBuf->boundColumnInfo, getTableColumnSchema(pStmt->pTableMeta)); + return parseBoundColumns(pCxt, &pStmt->pSql, false, &pDataBuf->boundColumnInfo, + getTableColumnSchema(pStmt->pTableMeta)); } if (NULL != pStmt->pBoundCols) { - return parseBoundColumns(pCxt, &pStmt->pBoundCols, &pDataBuf->boundColumnInfo, + return parseBoundColumns(pCxt, &pStmt->pBoundCols, false, &pDataBuf->boundColumnInfo, getTableColumnSchema(pStmt->pTableMeta)); } From e36566f96dd4a959ff42815cfbacdca4ba3084f4 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 23 Nov 2022 09:09:51 +0800 Subject: [PATCH 68/69] fix: add windows taosdump for 3.0 (#18358) * fix: taostools 05c2030 * fix: taostools 05c2030 * fix: update taos-tools b232ec3 * fix: update taostools commit * fix: update taos-tools 656e8f9 * fix: update taos-tools 7c244b4 * fix: update a61cc65 taos-tools * fix: update taos-tools 32a10ee * fix: update taos-tools 540175c * fix: update taos-tools 579a77b * fix: update taos-tools bb30d7f * fix: update taos-tools 55c217c * fix: update taos-tools d0a9f4e * fix: updata taostools 8ae6f8b * fix: update taos-tools f32ec94 * fix: update taos-tools 2cb1d69 * fix: update taos-tools d981cee * fix: cmake file format * fix: taos-tools e718155 * fix: update taos-tools 904c558 * test: build tools on windows * fix: update taos-tools fd458f0 * fix: update taos-tools 4b268d2 * fix: update taos-tools c390746 * fix: update taos-tools c6d53d4 * fix: update taos-tools d422e0a * fix: taos-tools efa2a5f --- Jenkinsfile2 | 2 +- cmake/taostools_CMakeLists.txt.in | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 33c3ef55c9..de39f4fcec 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -303,7 +303,7 @@ def pre_test_build_win() { set CL=/MP8 echo ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> cmake" time /t - cmake .. -G "NMake Makefiles JOM" -DBUILD_TEST=true || exit 7 + cmake .. -G "NMake Makefiles JOM" -DBUILD_TEST=true -DBUILD_TOOLS=true || exit 7 echo ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> jom -j 6" time /t jom -j 6 || exit 8 diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index b996ffcd17..d1c448b094 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG e00ebd9 + GIT_TAG efa2a5f SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE From 1a12fa92d486d49c74700547f48f9ba912290234 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 23 Nov 2022 09:36:26 +0800 Subject: [PATCH 69/69] docs: move topic drop to early phase in python tmq example (#18350) * docs: update examples/python/tmq_example.py with connector repo * Update tmq_example.py * docs: remove tb variable after get_table_name removed * docs: move topic drop to early phase Co-authored-by: Liu Jicong --- docs/examples/python/tmq_example.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/examples/python/tmq_example.py b/docs/examples/python/tmq_example.py index 836beb2417..a4625ca11a 100644 --- a/docs/examples/python/tmq_example.py +++ b/docs/examples/python/tmq_example.py @@ -4,6 +4,7 @@ from taos.tmq import * conn = taos.connect() print("init") +conn.execute("drop topic if exists topic_ctb_column") conn.execute("drop database if exists py_tmq") conn.execute("create database if not exists py_tmq vgroups 2") conn.select_db("py_tmq") @@ -15,7 +16,6 @@ conn.execute("create table if not exists tb2 using stb1 tags(2)") conn.execute("create table if not exists tb3 using stb1 tags(3)") print("create topic") -conn.execute("drop topic if exists topic_ctb_column") conn.execute( "create topic if not exists topic_ctb_column as select ts, c1, c2, c3 from stb1" )