From e1502677e544b36c56e6f564d1295eca4945b516 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Sun, 5 Jul 2020 00:09:02 +0800 Subject: [PATCH 01/38] jdbc test --- .../java/com/taosdata/jdbc/utils/TDNode.java | 233 ++++++++++++++++++ .../java/com/taosdata/jdbc/utils/TDNodes.java | 120 +++++++++ .../com/taosdata/jdbc/utils/TDSimClient.java | 91 +++++++ .../test/java/com/taosdata/jdbc/BaseTest.java | 34 ++- 4 files changed, 470 insertions(+), 8 deletions(-) create mode 100644 src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNode.java create mode 100644 src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNodes.java create mode 100644 src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDSimClient.java diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNode.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNode.java new file mode 100644 index 0000000000..d4ea6a5934 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNode.java @@ -0,0 +1,233 @@ +package com.taosdata.jdbc.utils; + +import java.io.File; +import java.util.concurrent.TimeUnit; + +public class TDNode { + + private int index; + private int running; + private int deployed; + private boolean testCluster; + private int valgrind; + private String path; + private String cfgDir; + private String dataDir; + private String logDir; + private String cfgPath; + + public TDNode(int index) { + this.index = index; + running = 0; + deployed = 0; + testCluster = false; + valgrind = 0; + } + + public void setPath(String path) { + this.path = path; + } + + public void setValgrind(int valgrind) { + this.valgrind = valgrind; + } + + public void setTestCluster(boolean testCluster) { + this.testCluster = testCluster; + } + + public void start() { + String selfPath = System.getProperty("user.dir"); + String binPath = ""; + String projDir = selfPath + "../../../"; + + File dir = new File(projDir); + + File[] fileList = dir.listFiles(); + if(fileList == null || fileList.length == 0) { + System.out.println("The project path doens't exist"); + return; + } + + for(File file : fileList) { + if(file.getName().equals("taosd") && !file.getAbsolutePath().contains("packing")) { + binPath = file.getAbsolutePath(); + break; + } + } + + if(binPath.equals("")) { + System.out.println("taosd not found"); + return; + } else { + System.out.println("taosd found in " + binPath); + } + + if(this.deployed == 0) { + System.out.println("dnode" + index + "is not deployed"); + return; + } + + String cmd = ""; + if(this.valgrind == 0) { + cmd = "nohup " + binPath + " -c " + this.cfgDir + " > /dev/null 2>&1 & "; + } else { + String valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"; + cmd = "nohup " + valgrindCmdline + " " + binPath + " -c " + this.cfgDir + " 2>&1 & "; + } + + try{ + if(Runtime.getRuntime().exec(cmd).waitFor() != 0) { + return; + } + } catch (Exception e) { + e.printStackTrace(); + } + + this.running = 1; + } + + public void stop() { + String toBeKilled = ""; + if (this.valgrind == 0) { + toBeKilled = "taosd"; + } else { + toBeKilled = "valgrind.bin"; + } + + if (this.running != 0) { + String psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print " + toBeKilled + "}'"; + try { + Process ps = Runtime.getRuntime().exec(psCmd); + ps.waitFor(); + long pid = ps.pid(); + + String killCmd = "kill -9 " + pid; + Runtime.getRuntime().exec(killCmd).waitFor(); + + for(int port = 6030; port < 6041; port ++) { + String fuserCmd = "fuser -k -n tcp " + port; + Runtime.getRuntime().exec(fuserCmd).waitFor(); + } + + if (this.valgrind == 1) { + TimeUnit.SECONDS.sleep(2); + } + } catch (Exception e) { + e.printStackTrace(); + } + + this.running = 0; + System.out.println("dnode:" + this.index + "is stopped by kill -9"); + } + } + + public void startIP() { + try{ + String cmd = "sudo ifconfig lo:" + index + "192.168.0." + index + " up"; + Runtime.getRuntime().exec(cmd).waitFor(); + } catch (Exception e) { + e.printStackTrace(); + } + } + + + public void stopIP() { + try{ + String cmd = "sudo ifconfig lo:" + index + "192.168.0." + index + " down"; + Runtime.getRuntime().exec(cmd).waitFor(); + } catch (Exception e) { + e.printStackTrace(); + } + } + + public void setCfgConfig(String option, String value) { + try{ + String cmd = "echo " + option + " " + value + " >> " + this.cfgPath; + Runtime.getRuntime().exec(cmd).waitFor(); + } catch (Exception e) { + e.printStackTrace(); + } + } + + public String getDnodeRootDir() { + String dnodeRootDir = this.path + "/sim/psim/dnode" + this.index; + return dnodeRootDir; + } + + public String getDnodesRootDir() { + String dnodesRootDir = this.path + "/sim/psim" + this.index; + return dnodesRootDir; + } + + public void deploy() { + this.logDir = this.path + "/sim/dnode" + this.index + "/log"; + this.dataDir = this.path + "/sim/dnode" + this.index + "/data"; + this.cfgDir = this.path + "/sim/dnode" + this.index + "/cfg"; + this.cfgPath = this.path + "/sim/dnode" + this.index + "/cfg/taos.cfg"; + + try { + String cmd = "rm -rf " + this.logDir; + Runtime.getRuntime().exec(cmd).waitFor(); + + cmd = "rm -rf " + this.cfgDir; + Runtime.getRuntime().exec(cmd).waitFor(); + + cmd = "rm -rf " + this.dataDir; + Runtime.getRuntime().exec(cmd).waitFor(); + + cmd = "mkdir -p " + this.logDir; + Runtime.getRuntime().exec(cmd).waitFor(); + + cmd = "mkdir -p " + this.cfgDir; + Runtime.getRuntime().exec(cmd).waitFor(); + + cmd = "mkdir -p " + this.dataDir; + Runtime.getRuntime().exec(cmd).waitFor(); + + cmd = "touch " + this.cfgPath; + Runtime.getRuntime().exec(cmd).waitFor(); + } catch (Exception e) { + e.printStackTrace(); + } + + if(this.testCluster) { + startIP(); + setCfgConfig("masterIp", "192.168.0.1"); + setCfgConfig("secondIp", "192.168.0.2"); + setCfgConfig("publicIp", "192.168.0." + this.index); + setCfgConfig("internalIp", "192.168.0." + this.index); + setCfgConfig("privateIp", "192.168.0." + this.index); + } + setCfgConfig("dataDir", this.dataDir); + setCfgConfig("logDir", this.logDir); + setCfgConfig("numOfLogLines", "100000000"); + setCfgConfig("mnodeEqualVnodeNum", "0"); + setCfgConfig("walLevel", "1"); + setCfgConfig("statusInterval", "1"); + setCfgConfig("numOfTotalVnodes", "64"); + setCfgConfig("numOfMnodes", "3"); + setCfgConfig("numOfThreadsPerCore", "2.0"); + setCfgConfig("monitor", "0"); + setCfgConfig("maxVnodeConnections", "30000"); + setCfgConfig("maxMgmtConnections", "30000"); + setCfgConfig("maxMeterConnections", "30000"); + setCfgConfig("maxShellConns", "30000"); + setCfgConfig("locale", "en_US.UTF-8"); + setCfgConfig("charset", "UTF-8"); + setCfgConfig("asyncLog", "0"); + setCfgConfig("anyIp", "0"); + setCfgConfig("dDebugFlag", "135"); + setCfgConfig("mDebugFlag", "135"); + setCfgConfig("sdbDebugFlag", "135"); + setCfgConfig("rpcDebugFlag", "135"); + setCfgConfig("tmrDebugFlag", "131"); + setCfgConfig("cDebugFlag", "135"); + setCfgConfig("httpDebugFlag", "135"); + setCfgConfig("monitorDebugFlag", "135"); + setCfgConfig("udebugFlag", "135"); + setCfgConfig("jnidebugFlag", "135"); + setCfgConfig("qdebugFlag", "135"); + this.deployed = 1; + } +} \ No newline at end of file diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNodes.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNodes.java new file mode 100644 index 0000000000..f6dd42be58 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNodes.java @@ -0,0 +1,120 @@ +package com.taosdata.jdbc.utils; + +import java.io.File; +import java.util.*; + + +public class TDNodes { + private ArrayList tdNodes; + private boolean simDeployed; + private boolean testCluster; + private int valgrind; + private String path; + + public TDNodes () { + tdNodes = new ArrayList<>(); + for(int i = 1; i < 11; i ++) { + tdNodes.add(new TDNode(i)); + } + this.simDeployed = false; + path = ""; + } + + public TDNodes(String path) { + try { + String psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'" ; + Process ps = Runtime.getRuntime().exec(psCmd); + ps.wait(); + String killCmd = "kill -9 " + ps.pid(); + Runtime.getRuntime().exec(killCmd).waitFor(); + + psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"; + ps = Runtime.getRuntime().exec(psCmd); + ps.wait(); + killCmd = "kill -9 " + ps.pid(); + Runtime.getRuntime().exec(killCmd).waitFor(); + + String binPath = System.getProperty("user.dir"); + binPath += "/../../../debug"; + System.out.println("binPath: " + binPath); + + File file = new File(path); + binPath = file.getCanonicalPath(); + System.out.println("binPath real path: " + binPath); + + if (path.isEmpty()) { + file = new File(path + "/../../"); + path = file.getCanonicalPath(); + } + + for(int i = 0; i < tdNodes.size(); i++) { + tdNodes.get(i).setPath(path); + } + } catch (Exception e) { + e.printStackTrace(); + } + } + + public void setTestCluster(boolean testCluster) { + this.testCluster = testCluster; + } + + public void setValgrid(int valgrind) { + this.valgrind = valgrind; + } + + public void setPath(String path) { + this.path = path; + } + + public void check(int index) { + if(index < 1 || index > 10) { + System.out.println("index: " + index + " should on a scale of [1, 10]"); + return; + } + } + + public void deploy(int index) { + System.out.println("======Start deploying tsim====="); + TDSimClient sim = new TDSimClient(); + + sim.setPath(path); + System.out.println("====== " + path + "====="); + sim.setTestCluster(this.testCluster); + if(this.simDeployed == false ) { + sim.deploy(); + this.simDeployed = true; + } + + check(index); + tdNodes.get(index - 1).setTestCluster(this.testCluster); + tdNodes.get(index - 1).setValgrind(valgrind); + tdNodes.get(index - 1).deploy(); + } + + public void cfg(int index, String option, String value) { + check(index); + tdNodes.get(index - 1).setCfgConfig(option, value); + } + + public void start(int index) { + check(index); + tdNodes.get(index - 1).start(); + } + + public void stop(int index) { + check(index); + tdNodes.get(index - 1).stop(); + } + + public void startIP(int index) { + check(index); + tdNodes.get(index - 1).startIP(); + } + + public void stopIP(int index) { + check(index); + tdNodes.get(index - 1).stopIP(); + } + +} \ No newline at end of file diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDSimClient.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDSimClient.java new file mode 100644 index 0000000000..1f3e813010 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDSimClient.java @@ -0,0 +1,91 @@ +package com.taosdata.jdbc.utils; + +import java.io.BufferedReader; +import java.io.InputStreamReader; + +public class TDSimClient { + + private boolean testCluster; + private String path; + private String cfgDir; + private String logDir; + private String cfgPath; + + public TDSimClient() { + testCluster = false; + } + + public void setTestCluster(boolean testCluster) { + this.testCluster = testCluster; + } + + public void setPath(String path) { + this.path = path; + } + + public void setCfgConfig(String option, String value) { + String cmd = "echo " + option + " " + value + " >> " + this.cfgPath; + + try { + Process ps = Runtime.getRuntime().exec(cmd); + + BufferedReader br = new BufferedReader(new InputStreamReader(ps.getInputStream())); + while(br.readLine() != null) { + System.out.println(br.readLine()); + } + + ps.waitFor(); + } catch (Exception e) { + e.printStackTrace(); + } + } + + public void deploy() { + this.logDir = this.path + "/sim/psim/log"; + System.out.println("======logDir: " + logDir + "====="); + this.cfgDir = this.path + "/sim/psim/cfg"; + System.out.println("======cfgDir: " + cfgDir + "====="); + this.cfgPath = this.path + "/sim/psim/cfg/taos.cfg"; + System.out.println("======cfgPath: " + cfgPath + "====="); + + try { + String cmd = "rm -rf " + this.logDir; + Runtime.getRuntime().exec(cmd).waitFor(); + + cmd = "rm -rf " + this.cfgDir; + Runtime.getRuntime().exec(cmd).waitFor(); + + cmd = "mkdir -p " + this.logDir; + Runtime.getRuntime().exec(cmd).waitFor(); + + cmd = "mkdir -p " + this.cfgDir; + Runtime.getRuntime().exec(cmd).waitFor(); + + cmd = "touch " + this.cfgPath; + Runtime.getRuntime().exec(cmd).waitFor(); + } catch (Exception e) { + e.printStackTrace(); + } + + if(this.testCluster) { + setCfgConfig("masterIp", "192.168.0.1"); + setCfgConfig("secondIp", "192.168.0.2"); + } + setCfgConfig("logDir", this.logDir); + setCfgConfig("numOfLogLines", "100000000"); + setCfgConfig("numOfThreadsPerCore", "2.0"); + setCfgConfig("locale", "en_US.UTF-8"); + setCfgConfig("charset", "UTF-8"); + setCfgConfig("asyncLog", "0"); + setCfgConfig("anyIp", "0"); + setCfgConfig("sdbDebugFlag", "135"); + setCfgConfig("rpcDebugFlag", "135"); + setCfgConfig("tmrDebugFlag", "131"); + setCfgConfig("cDebugFlag", "135"); + setCfgConfig("udebugFlag", "135"); + setCfgConfig("jnidebugFlag", "135"); + setCfgConfig("qdebugFlag", "135"); + } + + +} \ No newline at end of file diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BaseTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BaseTest.java index fd9ab49c49..a547127d7e 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BaseTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BaseTest.java @@ -1,25 +1,43 @@ package com.taosdata.jdbc; import java.io.BufferedReader; +import java.io.File; import java.io.InputStreamReader; +import com.taosdata.jdbc.utils.TDNodes; + import org.junit.BeforeClass; public class BaseTest { + + private static boolean testCluster = false; + private static String deployPath = System.getProperty("user.dir"); + private static int valgrind = 0; @BeforeClass public static void setupEnv() { try{ - String path = System.getProperty("user.dir"); - String bashPath = path + "/buildTDengine.sh"; + // String path = System.getProperty("user.dir"); + // String bashPath = path + "/buildTDengine.sh"; - Process ps = Runtime.getRuntime().exec(bashPath); - ps.waitFor(); + // Process ps = Runtime.getRuntime().exec(bashPath); + // ps.waitFor(); - BufferedReader br = new BufferedReader(new InputStreamReader(ps.getInputStream())); - while(br.readLine() != null) { - System.out.println(br.readLine()); - } + // BufferedReader br = new BufferedReader(new InputStreamReader(ps.getInputStream())); + // while(br.readLine() != null) { + // System.out.println(br.readLine()); + // } + + File file = new File(deployPath + "/../../../"); + String rootPath = file.getCanonicalPath(); + + TDNodes tdNodes = new TDNodes(); + tdNodes.setPath(rootPath); + tdNodes.setTestCluster(testCluster); + tdNodes.setValgrid(valgrind); + + tdNodes.deploy(1); + tdNodes.start(1); } catch (Exception e) { e.printStackTrace(); } From 9f233e8cf785de56d82f913f0862311b20badd5d Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Tue, 7 Jul 2020 14:00:24 +0800 Subject: [PATCH 02/38] start taosd before jdbc tests --- .../java/com/taosdata/jdbc/utils/TDNode.java | 62 ++++++++++++++----- .../java/com/taosdata/jdbc/utils/TDNodes.java | 7 ++- .../com/taosdata/jdbc/utils/TDSimClient.java | 26 ++++---- .../test/java/com/taosdata/jdbc/BaseTest.java | 18 +++--- 4 files changed, 69 insertions(+), 44 deletions(-) diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNode.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNode.java index d4ea6a5934..4db4d8f7c1 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNode.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNode.java @@ -1,6 +1,7 @@ package com.taosdata.jdbc.utils; import java.io.File; +import java.util.*; import java.util.concurrent.TimeUnit; public class TDNode { @@ -36,24 +37,51 @@ public class TDNode { this.testCluster = testCluster; } + + public void searchTaosd(File dir, ArrayList taosdPath) { + File[] fileList = dir.listFiles(); + + if(fileList != null && fileList.length != 0) { + for(File file : fileList) { + if(file.isFile()) { + if(file.getName().equals("taosd")) { + taosdPath.add(file.getAbsolutePath()); + } + } else { + searchTaosd(file, taosdPath); + } + } + } + + return; + } + public void start() { String selfPath = System.getProperty("user.dir"); String binPath = ""; - String projDir = selfPath + "../../../"; + String projDir = selfPath + "../../../../"; - File dir = new File(projDir); - - File[] fileList = dir.listFiles(); - if(fileList == null || fileList.length == 0) { - System.out.println("The project path doens't exist"); - return; - } - - for(File file : fileList) { - if(file.getName().equals("taosd") && !file.getAbsolutePath().contains("packing")) { - binPath = file.getAbsolutePath(); - break; + try { + ArrayList taosdPath = new ArrayList<>(); + + File dir = new File(projDir); + String realProjDir = dir.getCanonicalPath(); + dir = new File(realProjDir); + System.out.println("project Dir: " + projDir); + searchTaosd(dir, taosdPath); + + if(taosdPath.size() == 0) { + System.out.println("The project path doens't exist"); + return; + } else { + for(String p : taosdPath) { + if(!p.contains("packing")) { + binPath = p; + } + } } + } catch (Exception e) { + e.printStackTrace(); } if(binPath.equals("")) { @@ -70,16 +98,16 @@ public class TDNode { String cmd = ""; if(this.valgrind == 0) { - cmd = "nohup " + binPath + " -c " + this.cfgDir + " > /dev/null 2>&1 & "; + cmd = "nohup " + binPath + " > /dev/null 2>&1 & "; + System.out.println("start taosd cmd: " + cmd); } else { String valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"; cmd = "nohup " + valgrindCmdline + " " + binPath + " -c " + this.cfgDir + " 2>&1 & "; } try{ - if(Runtime.getRuntime().exec(cmd).waitFor() != 0) { - return; - } + Runtime.getRuntime().exec(cmd); + TimeUnit.SECONDS.sleep(5); } catch (Exception e) { e.printStackTrace(); } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNodes.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNodes.java index f6dd42be58..bce985bcc0 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNodes.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNodes.java @@ -42,7 +42,7 @@ public class TDNodes { binPath = file.getCanonicalPath(); System.out.println("binPath real path: " + binPath); - if (path.isEmpty()) { + if (!path.isEmpty()) { file = new File(path + "/../../"); path = file.getCanonicalPath(); } @@ -79,7 +79,7 @@ public class TDNodes { TDSimClient sim = new TDSimClient(); sim.setPath(path); - System.out.println("====== " + path + "====="); + System.out.println("======path: " + path + "====="); sim.setTestCluster(this.testCluster); if(this.simDeployed == false ) { sim.deploy(); @@ -88,7 +88,8 @@ public class TDNodes { check(index); tdNodes.get(index - 1).setTestCluster(this.testCluster); - tdNodes.get(index - 1).setValgrind(valgrind); + tdNodes.get(index - 1).setValgrind(valgrind); + tdNodes.get(index - 1).setPath(System.getProperty("user.dir")); tdNodes.get(index - 1).deploy(); } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDSimClient.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDSimClient.java index 1f3e813010..fec824f7dd 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDSimClient.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDSimClient.java @@ -1,7 +1,5 @@ package com.taosdata.jdbc.utils; -import java.io.BufferedReader; -import java.io.InputStreamReader; public class TDSimClient { @@ -25,16 +23,11 @@ public class TDSimClient { public void setCfgConfig(String option, String value) { String cmd = "echo " + option + " " + value + " >> " + this.cfgPath; + System.out.println("set cfg cmd " + cmd); try { - Process ps = Runtime.getRuntime().exec(cmd); - - BufferedReader br = new BufferedReader(new InputStreamReader(ps.getInputStream())); - while(br.readLine() != null) { - System.out.println(br.readLine()); - } - - ps.waitFor(); + Process ps = Runtime.getRuntime().exec(cmd); + System.out.println("cfg command result: " + ps.waitFor()); } catch (Exception e) { e.printStackTrace(); } @@ -50,19 +43,26 @@ public class TDSimClient { try { String cmd = "rm -rf " + this.logDir; - Runtime.getRuntime().exec(cmd).waitFor(); + System.out.println("cmd: = " + cmd); + Process ps = Runtime.getRuntime().exec(cmd); + System.out.println("return value " + ps.waitFor()); + System.out.println(Runtime.getRuntime().exec(cmd).waitFor()); + cmd = "rm -rf " + this.cfgDir; Runtime.getRuntime().exec(cmd).waitFor(); + System.out.println(cmd + " result: " +Runtime.getRuntime().exec(cmd).waitFor()); cmd = "mkdir -p " + this.logDir; Runtime.getRuntime().exec(cmd).waitFor(); + System.out.println(cmd + " result: " +Runtime.getRuntime().exec(cmd).waitFor()); cmd = "mkdir -p " + this.cfgDir; - Runtime.getRuntime().exec(cmd).waitFor(); + System.out.println(cmd + " result: " +Runtime.getRuntime().exec(cmd).waitFor()); cmd = "touch " + this.cfgPath; - Runtime.getRuntime().exec(cmd).waitFor(); + System.out.println(cmd + " result: " +Runtime.getRuntime().exec(cmd).waitFor()); + } catch (Exception e) { e.printStackTrace(); } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BaseTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BaseTest.java index a547127d7e..17ea54dd75 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BaseTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BaseTest.java @@ -6,6 +6,7 @@ import java.io.InputStreamReader; import com.taosdata.jdbc.utils.TDNodes; +import org.junit.AfterClass; import org.junit.BeforeClass; public class BaseTest { @@ -17,17 +18,6 @@ public class BaseTest { @BeforeClass public static void setupEnv() { try{ - // String path = System.getProperty("user.dir"); - // String bashPath = path + "/buildTDengine.sh"; - - // Process ps = Runtime.getRuntime().exec(bashPath); - // ps.waitFor(); - - // BufferedReader br = new BufferedReader(new InputStreamReader(ps.getInputStream())); - // while(br.readLine() != null) { - // System.out.println(br.readLine()); - // } - File file = new File(deployPath + "/../../../"); String rootPath = file.getCanonicalPath(); @@ -38,8 +28,14 @@ public class BaseTest { tdNodes.deploy(1); tdNodes.start(1); + } catch (Exception e) { e.printStackTrace(); } } + + @AfterClass + public static void clearUpEnv() { + + } } \ No newline at end of file From 8c92b6043fc8b74b75aaefbab0a11923a8b0d5c8 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Tue, 7 Jul 2020 14:24:05 +0800 Subject: [PATCH 03/38] TD-867: Remove unsupported python test cases --- tests/pytest/query/filterCombo.py | 3 +-- tests/pytest/query/queryNormal.py | 21 ++++++++------------- tests/pytest/query/select_last_crash.py | 6 +----- 3 files changed, 10 insertions(+), 20 deletions(-) diff --git a/tests/pytest/query/filterCombo.py b/tests/pytest/query/filterCombo.py index f72b913c92..e769addb52 100644 --- a/tests/pytest/query/filterCombo.py +++ b/tests/pytest/query/filterCombo.py @@ -52,8 +52,7 @@ class TDTestCase: # illegal condition tdSql.error( - "select * from db.st where ts != '2020-05-13 10:00:00.002' OR tagtype < 2") - tdSql.error("select * from db.st where tagtype <> 1 OR tagtype < 2") + "select * from db.st where ts != '2020-05-13 10:00:00.002' OR tagtype < 2") def stop(self): tdSql.close() diff --git a/tests/pytest/query/queryNormal.py b/tests/pytest/query/queryNormal.py index 814c627d89..712a56d2d7 100644 --- a/tests/pytest/query/queryNormal.py +++ b/tests/pytest/query/queryNormal.py @@ -36,18 +36,17 @@ class TDTestCase: "insert into tb2 using stb1 tags(2,'tb2', '่กจ2') values ('2020-04-18 15:00:02.000', 3, 2.1), ('2020-04-18 15:00:03.000', 4, 2.2)") # inner join --- bug - tdSql.query("select * from tb1 a, tb2 b where a.ts = b.ts") - tdSql.checkRows(1) + tdSql.error("select * from tb1 a, tb2 b where a.ts = b.ts") # join 3 tables -- bug exists - tdSql.query("select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_p.id, stb_p.dscrption, stb_p.pressure,stb_v.velocity from stb_p, stb_t, stb_v where stb_p.ts=stb_t.ts and stb_p.ts=stb_v.ts and stb_p.id = stb_t.id") + tdSql.error("select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_p.id, stb_p.dscrption, stb_p.pressure,stb_v.velocity from stb_p, stb_t, stb_v where stb_p.ts=stb_t.ts and stb_p.ts=stb_v.ts and stb_p.id = stb_t.id") # query show stable tdSql.query("show stables") tdSql.checkRows(1) # query show tables - tdSql.query("show table") + tdSql.query("show tables") tdSql.checkRows(2) # query count @@ -71,16 +70,13 @@ class TDTestCase: tdSql.checkRows(2) # query first ... as - tdSql.query("select first(*) as begin from stb1") - tdSql.checkData(0, 1, 1) + tdSql.error("select first(*) as begin from stb1") # query last ... as - tdSql.query("select last(*) as end from stb1") - tdSql.checkData(0, 1, 4) + tdSql.error("select last(*) as end from stb1") # query last_row ... as - tdSql.query("select last_row(*) as end from stb1") - tdSql.checkData(0, 1, 4) + tdSql.error("select last_row(*) as end from stb1") # query group .. by tdSql.query("select sum(c1), t2 from stb1 group by t2") @@ -95,8 +91,7 @@ class TDTestCase: tdSql.checkRows(1) # query ... alias for table ---- bug - tdSql.query("select t.ts from tb1 t") - tdSql.checkRows(2) + tdSql.error("select t.ts from tb1 t") # query ... tbname tdSql.query("select tbname from stb1") @@ -104,7 +99,7 @@ class TDTestCase: # query ... tbname count ---- bug tdSql.query("select count(tbname) from stb1") - tdSql.checkRows(2) + tdSql.checkData(0, 0, 2) # query ... select database ---- bug tdSql.query("SELECT database()") diff --git a/tests/pytest/query/select_last_crash.py b/tests/pytest/query/select_last_crash.py index 9aeb122f82..e49002716e 100644 --- a/tests/pytest/query/select_last_crash.py +++ b/tests/pytest/query/select_last_crash.py @@ -40,11 +40,7 @@ class TDTestCase: tdSql.query("select last(*) from st") tdSql.checkRows(1) - - print( - "======= Verify filter for %s type finished =========" % - curType) - + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) From f9c9684b38f44ba03c4c3ee85426743ffdc20fca Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 8 Jul 2020 10:42:48 +0800 Subject: [PATCH 04/38] add crash_gen to CI [TD-862] --- .travis.yml | 5 + tests/pytest/crash_gen.py | 1447 ++++++++++++++++++++++--------------- tests/pytest/crash_gen.sh | 14 +- 3 files changed, 877 insertions(+), 589 deletions(-) diff --git a/.travis.yml b/.travis.yml index 2a0aa6372b..7f8d0fdb67 100644 --- a/.travis.yml +++ b/.travis.yml @@ -61,6 +61,11 @@ matrix: cd ${TRAVIS_BUILD_DIR}/tests ./test-all.sh smoke || travis_terminate $? + sleep 1 + + cd ${TRAVIS_BUILD_DIR}/tests/pytest + ./crash_gen.sh -p -t 5 -s 50|| travis_terminate $? + sleep 1 cd ${TRAVIS_BUILD_DIR}/tests/pytest ./valgrind-test.sh 2>&1 > mem-error-out.log diff --git a/tests/pytest/crash_gen.py b/tests/pytest/crash_gen.py index 49c428b7f1..c8b34fe7f3 100755 --- a/tests/pytest/crash_gen.py +++ b/tests/pytest/crash_gen.py @@ -1,4 +1,4 @@ -#-----!/usr/bin/python3.7 +# -----!/usr/bin/python3.7 ################################################################### # Copyright (c) 2016 by TAOS Technologies, Inc. # All rights reserved. @@ -11,7 +11,31 @@ ################################################################### # -*- coding: utf-8 -*- -from __future__ import annotations # For type hinting before definition, ref: https://stackoverflow.com/questions/33533148/how-do-i-specify-that-the-return-type-of-a-method-is-the-same-as-the-class-itsel +# For type hinting before definition, ref: +# https://stackoverflow.com/questions/33533148/how-do-i-specify-that-the-return-type-of-a-method-is-the-same-as-the-class-itsel +from __future__ import annotations +import taos +import crash_gen +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.log import * +from queue import Queue, Empty +from typing import IO +from typing import Set +from typing import Dict +from typing import List +from requests.auth import HTTPBasicAuth +import textwrap +import datetime +import logging +import time +import random +import threading +import requests +import copy +import argparse +import getopt import sys import os @@ -22,71 +46,48 @@ import traceback if sys.version_info[0] < 3: raise Exception("Must be using Python 3") -import getopt -import argparse -import copy -import requests -import threading -import random -import time -import logging -import datetime -import textwrap -import requests -from requests.auth import HTTPBasicAuth - -from typing import List -from typing import Dict -from typing import Set -from typing import IO -from queue import Queue, Empty - -from util.log import * -from util.dnodes import * -from util.cases import * -from util.sql import * - -import crash_gen -import taos - -# Global variables, tried to keep a small number. +# Global variables, tried to keep a small number. # Command-line/Environment Configurations, will set a bit later # ConfigNameSpace = argparse.Namespace -gConfig = argparse.Namespace() # Dummy value, will be replaced later +gConfig = argparse.Namespace() # Dummy value, will be replaced later logger = None -def runThread(wt: WorkerThread): + +def runThread(wt: WorkerThread): wt.run() + class CrashGenError(Exception): def __init__(self, msg=None, errno=None): - self.msg = msg + self.msg = msg self.errno = errno - + def __str__(self): return self.msg + class WorkerThread: - def __init__(self, pool: ThreadPool, tid, - tc: ThreadCoordinator, - # te: TaskExecutor, - ): # note: main thread context! - # self._curStep = -1 + def __init__(self, pool: ThreadPool, tid, + tc: ThreadCoordinator, + # te: TaskExecutor, + ): # note: main thread context! + # self._curStep = -1 self._pool = pool - self._tid = tid - self._tc = tc # type: ThreadCoordinator + self._tid = tid + self._tc = tc # type: ThreadCoordinator # self.threadIdent = threading.get_ident() self._thread = threading.Thread(target=runThread, args=(self,)) self._stepGate = threading.Event() # Let us have a DB connection of our own - if ( gConfig.per_thread_db_connection ): # type: ignore + if (gConfig.per_thread_db_connection): # type: ignore # print("connector_type = {}".format(gConfig.connector_type)) - self._dbConn = DbConn.createNative() if (gConfig.connector_type == 'native') else DbConn.createRest() + self._dbConn = DbConn.createNative() if ( + gConfig.connector_type == 'native') else DbConn.createRest() - self._dbInUse = False # if "use db" was executed already + self._dbInUse = False # if "use db" was executed already def logDebug(self, msg): logger.debug(" TRD[{}] {}".format(self._tid, msg)) @@ -98,137 +99,153 @@ class WorkerThread: return self._dbInUse def useDb(self): - if ( not self._dbInUse ): + if (not self._dbInUse): self.execSql("use db") self._dbInUse = True def getTaskExecutor(self): - return self._tc.getTaskExecutor() + return self._tc.getTaskExecutor() def start(self): self._thread.start() # AFTER the thread is recorded - def run(self): + def run(self): # initialization after thread starts, in the thread context # self.isSleeping = False logger.info("Starting to run thread: {}".format(self._tid)) - if ( gConfig.per_thread_db_connection ): # type: ignore + if (gConfig.per_thread_db_connection): # type: ignore logger.debug("Worker thread openning database connection") self._dbConn.open() - self._doTaskLoop() - + self._doTaskLoop() + # clean up - if ( gConfig.per_thread_db_connection ): # type: ignore + if (gConfig.per_thread_db_connection): # type: ignore self._dbConn.close() - def _doTaskLoop(self) : + def _doTaskLoop(self): # while self._curStep < self._pool.maxSteps: # tc = ThreadCoordinator(None) - while True: - tc = self._tc # Thread Coordinator, the overall master + while True: + tc = self._tc # Thread Coordinator, the overall master tc.crossStepBarrier() # shared barrier first, INCLUDING the last one - logger.debug("[TRD] Worker thread [{}] exited barrier...".format(self._tid)) + logger.debug( + "[TRD] Worker thread [{}] exited barrier...".format( + self._tid)) self.crossStepGate() # then per-thread gate, after being tapped - logger.debug("[TRD] Worker thread [{}] exited step gate...".format(self._tid)) + logger.debug( + "[TRD] Worker thread [{}] exited step gate...".format( + self._tid)) if not self._tc.isRunning(): - logger.debug("[TRD] Thread Coordinator not running any more, worker thread now stopping...") + logger.debug( + "[TRD] Thread Coordinator not running any more, worker thread now stopping...") break # Fetch a task from the Thread Coordinator - logger.debug("[TRD] Worker thread [{}] about to fetch task".format(self._tid)) + logger.debug( + "[TRD] Worker thread [{}] about to fetch task".format( + self._tid)) task = tc.fetchTask() # Execute such a task - logger.debug("[TRD] Worker thread [{}] about to execute task: {}".format(self._tid, task.__class__.__name__)) + logger.debug( + "[TRD] Worker thread [{}] about to execute task: {}".format( + self._tid, task.__class__.__name__)) task.execute(self) tc.saveExecutedTask(task) - logger.debug("[TRD] Worker thread [{}] finished executing task".format(self._tid)) + logger.debug( + "[TRD] Worker thread [{}] finished executing task".format( + self._tid)) - self._dbInUse = False # there may be changes between steps - - def verifyThreadSelf(self): # ensure we are called by this own thread - if ( threading.get_ident() != self._thread.ident ): + self._dbInUse = False # there may be changes between steps + + def verifyThreadSelf(self): # ensure we are called by this own thread + if (threading.get_ident() != self._thread.ident): raise RuntimeError("Unexpectly called from other threads") - def verifyThreadMain(self): # ensure we are called by the main thread - if ( threading.get_ident() != threading.main_thread().ident ): + def verifyThreadMain(self): # ensure we are called by the main thread + if (threading.get_ident() != threading.main_thread().ident): raise RuntimeError("Unexpectly called from other threads") def verifyThreadAlive(self): - if ( not self._thread.is_alive() ): + if (not self._thread.is_alive()): raise RuntimeError("Unexpected dead thread") # A gate is different from a barrier in that a thread needs to be "tapped" def crossStepGate(self): self.verifyThreadAlive() - self.verifyThreadSelf() # only allowed by ourselves - + self.verifyThreadSelf() # only allowed by ourselves + # Wait again at the "gate", waiting to be "tapped" - logger.debug("[TRD] Worker thread {} about to cross the step gate".format(self._tid)) - self._stepGate.wait() + logger.debug( + "[TRD] Worker thread {} about to cross the step gate".format( + self._tid)) + self._stepGate.wait() self._stepGate.clear() - + # self._curStep += 1 # off to a new step... - def tapStepGate(self): # give it a tap, release the thread waiting there + def tapStepGate(self): # give it a tap, release the thread waiting there self.verifyThreadAlive() - self.verifyThreadMain() # only allowed for main thread - - logger.debug("[TRD] Tapping worker thread {}".format(self._tid)) - self._stepGate.set() # wake up! - time.sleep(0) # let the released thread run a bit + self.verifyThreadMain() # only allowed for main thread - def execSql(self, sql): # TODO: expose DbConn directly - if ( gConfig.per_thread_db_connection ): - return self._dbConn.execute(sql) + logger.debug("[TRD] Tapping worker thread {}".format(self._tid)) + self._stepGate.set() # wake up! + time.sleep(0) # let the released thread run a bit + + def execSql(self, sql): # TODO: expose DbConn directly + if (gConfig.per_thread_db_connection): + return self._dbConn.execute(sql) else: return self._tc.getDbManager().getDbConn().execute(sql) - def querySql(self, sql): # TODO: expose DbConn directly - if ( gConfig.per_thread_db_connection ): - return self._dbConn.query(sql) + def querySql(self, sql): # TODO: expose DbConn directly + if (gConfig.per_thread_db_connection): + return self._dbConn.query(sql) else: return self._tc.getDbManager().getDbConn().query(sql) def getQueryResult(self): - if ( gConfig.per_thread_db_connection ): - return self._dbConn.getQueryResult() + if (gConfig.per_thread_db_connection): + return self._dbConn.getQueryResult() else: return self._tc.getDbManager().getDbConn().getQueryResult() def getDbConn(self): - if ( gConfig.per_thread_db_connection ): - return self._dbConn + if (gConfig.per_thread_db_connection): + return self._dbConn else: return self._tc.getDbManager().getDbConn() # def querySql(self, sql): # not "execute", since we are out side the DB context # if ( gConfig.per_thread_db_connection ): - # return self._dbConn.query(sql) + # return self._dbConn.query(sql) # else: # return self._tc.getDbState().getDbConn().query(sql) # The coordinator of all worker threads, mostly running in main thread + + class ThreadCoordinator: def __init__(self, pool: ThreadPool, dbManager): - self._curStep = -1 # first step is 0 + self._curStep = -1 # first step is 0 self._pool = pool # self._wd = wd - self._te = None # prepare for every new step + self._te = None # prepare for every new step self._dbManager = dbManager - self._executedTasks: List[Task] = [] # in a given step - self._lock = threading.RLock() # sync access for a few things + self._executedTasks: List[Task] = [] # in a given step + self._lock = threading.RLock() # sync access for a few things - self._stepBarrier = threading.Barrier(self._pool.numThreads + 1) # one barrier for all threads + self._stepBarrier = threading.Barrier( + self._pool.numThreads + 1) # one barrier for all threads self._execStats = ExecutionStats() self._runStatus = MainExec.STATUS_RUNNING def getTaskExecutor(self): return self._te - def getDbManager(self) -> DbManager : + def getDbManager(self) -> DbManager: return self._dbManager def crossStepBarrier(self): @@ -238,89 +255,103 @@ class ThreadCoordinator: self._runStatus = MainExec.STATUS_STOPPING self._execStats.registerFailure("User Interruption") - def run(self): + def run(self): self._pool.createAndStartThreads(self) # Coordinate all threads step by step - self._curStep = -1 # not started yet - maxSteps = gConfig.max_steps # type: ignore - self._execStats.startExec() # start the stop watch + self._curStep = -1 # not started yet + maxSteps = gConfig.max_steps # type: ignore + self._execStats.startExec() # start the stop watch transitionFailed = False hasAbortedTask = False - while(self._curStep < maxSteps-1 and - (not transitionFailed) and - (self._runStatus==MainExec.STATUS_RUNNING) and - (not hasAbortedTask)): # maxStep==10, last curStep should be 9 + while(self._curStep < maxSteps - 1 and + (not transitionFailed) and + (self._runStatus == MainExec.STATUS_RUNNING) and + (not hasAbortedTask)): # maxStep==10, last curStep should be 9 - if not gConfig.debug: - print(".", end="", flush=True) # print this only if we are not in debug mode + if not gConfig.debug: + # print this only if we are not in debug mode + print(".", end="", flush=True) logger.debug("[TRD] Main thread going to sleep") # Now main thread (that's us) is ready to enter a step - self.crossStepBarrier() # let other threads go past the pool barrier, but wait at the thread gate - self._stepBarrier.reset() # Other worker threads should now be at the "gate" + # let other threads go past the pool barrier, but wait at the + # thread gate + self.crossStepBarrier() + self._stepBarrier.reset() # Other worker threads should now be at the "gate" # At this point, all threads should be pass the overall "barrier" and before the per-thread "gate" - # We use this period to do house keeping work, when all worker threads are QUIET. + # We use this period to do house keeping work, when all worker + # threads are QUIET. hasAbortedTask = False - for task in self._executedTasks : - if task.isAborted() : + for task in self._executedTasks: + if task.isAborted(): print("Task aborted: {}".format(task)) hasAbortedTask = True break - if hasAbortedTask : # do transition only if tasks are error free + if hasAbortedTask: # do transition only if tasks are error free self._execStats.registerFailure("Aborted Task Encountered") - else: + else: try: sm = self._dbManager.getStateMachine() logger.debug("[STT] starting transitions") - sm.transition(self._executedTasks) # at end of step, transiton the DB state + # at end of step, transiton the DB state + sm.transition(self._executedTasks) logger.debug("[STT] transition ended") - # Due to limitation (or maybe not) of the Python library, we cannot share connections across threads - if sm.hasDatabase() : + # Due to limitation (or maybe not) of the Python library, + # we cannot share connections across threads + if sm.hasDatabase(): for t in self._pool.threadList: logger.debug("[DB] use db for all worker threads") t.useDb() - # t.execSql("use db") # main thread executing "use db" on behalf of every worker thread + # t.execSql("use db") # main thread executing "use + # db" on behalf of every worker thread except taos.error.ProgrammingError as err: - if ( err.msg == 'network unavailable' ): # broken DB connection + if (err.msg == 'network unavailable'): # broken DB connection logger.info("DB connection broken, execution failed") traceback.print_stack() transitionFailed = True - self._te = None # Not running any more + self._te = None # Not running any more self._execStats.registerFailure("Broken DB Connection") - # continue # don't do that, need to tap all threads at end, and maybe signal them to stop + # continue # don't do that, need to tap all threads at + # end, and maybe signal them to stop else: - raise + raise # finally: # pass - - self.resetExecutedTasks() # clear the tasks after we are done + + self.resetExecutedTasks() # clear the tasks after we are done # Get ready for next step logger.debug("<-- Step {} finished".format(self._curStep)) - self._curStep += 1 # we are about to get into next step. TODO: race condition here! - logger.debug("\r\n\n--> Step {} starts with main thread waking up".format(self._curStep)) # Now not all threads had time to go to sleep + self._curStep += 1 # we are about to get into next step. TODO: race condition here! + # Now not all threads had time to go to sleep + logger.debug( + "\r\n\n--> Step {} starts with main thread waking up".format(self._curStep)) # A new TE for the new step - if not transitionFailed: # only if not failed + if not transitionFailed: # only if not failed self._te = TaskExecutor(self._curStep) - logger.debug("[TRD] Main thread waking up at step {}, tapping worker threads".format(self._curStep)) # Now not all threads had time to go to sleep - self.tapAllThreads() # Worker threads will wake up at this point, and each execute it's own task + logger.debug( + "[TRD] Main thread waking up at step {}, tapping worker threads".format( + self._curStep)) # Now not all threads had time to go to sleep + # Worker threads will wake up at this point, and each execute it's + # own task + self.tapAllThreads() logger.debug("Main thread ready to finish up...") - if not transitionFailed: # only in regular situations - self.crossStepBarrier() # Cross it one last time, after all threads finish + if not transitionFailed: # only in regular situations + self.crossStepBarrier() # Cross it one last time, after all threads finish self._stepBarrier.reset() logger.debug("Main thread in exclusive zone...") - self._te = None # No more executor, time to end + self._te = None # No more executor, time to end logger.debug("Main thread tapping all threads one last time...") - self.tapAllThreads() # Let the threads run one last time + self.tapAllThreads() # Let the threads run one last time logger.debug("Main thread joining all threads") - self._pool.joinAll() # Get all threads to finish + self._pool.joinAll() # Get all threads to finish logger.info("\nAll worker threads finished") self._execStats.endExec() @@ -333,24 +364,27 @@ class ThreadCoordinator: def getExecStats(self): return self._execStats - def tapAllThreads(self): # in a deterministic manner + def tapAllThreads(self): # in a deterministic manner wakeSeq = [] - for i in range(self._pool.numThreads): # generate a random sequence - if Dice.throw(2) == 1 : + for i in range(self._pool.numThreads): # generate a random sequence + if Dice.throw(2) == 1: wakeSeq.append(i) else: wakeSeq.insert(0, i) - logger.debug("[TRD] Main thread waking up worker threads: {}".format(str(wakeSeq))) + logger.debug( + "[TRD] Main thread waking up worker threads: {}".format( + str(wakeSeq))) # TODO: set dice seed to a deterministic value for i in wakeSeq: - self._pool.threadList[i].tapStepGate() # TODO: maybe a bit too deep?! - time.sleep(0) # yield + # TODO: maybe a bit too deep?! + self._pool.threadList[i].tapStepGate() + time.sleep(0) # yield def isRunning(self): - return self._te != None + return self._te is not None - def fetchTask(self) -> Task : - if ( not self.isRunning() ): # no task + def fetchTask(self) -> Task: + if (not self.isRunning()): # no task raise RuntimeError("Cannot fetch task when not running") # return self._wd.pickTask() # Alternatively, let's ask the DbState for the appropriate task @@ -361,31 +395,36 @@ class ThreadCoordinator: # logger.debug(" (dice:{}/{}) ".format(i, nTasks)) # # return copy.copy(tasks[i]) # Needs a fresh copy, to save execution results, etc. # return tasks[i].clone() # TODO: still necessary? - taskType = self.getDbManager().getStateMachine().pickTaskType() # pick a task type for current state - return taskType(self.getDbManager(), self._execStats) # create a task from it + # pick a task type for current state + taskType = self.getDbManager().getStateMachine().pickTaskType() + return taskType( + self.getDbManager(), + self._execStats) # create a task from it def resetExecutedTasks(self): - self._executedTasks = [] # should be under single thread + self._executedTasks = [] # should be under single thread def saveExecutedTask(self, task): with self._lock: self._executedTasks.append(task) # We define a class to run a number of threads in locking steps. + + class ThreadPool: def __init__(self, numThreads, maxSteps): self.numThreads = numThreads self.maxSteps = maxSteps # Internal class variables self.curStep = 0 - self.threadList = [] # type: List[WorkerThread] - + self.threadList = [] # type: List[WorkerThread] + # starting to run all the threads, in locking steps def createAndStartThreads(self, tc: ThreadCoordinator): - for tid in range(0, self.numThreads): # Create the threads - workerThread = WorkerThread(self, tid, tc) + for tid in range(0, self.numThreads): # Create the threads + workerThread = WorkerThread(self, tid, tc) self.threadList.append(workerThread) - workerThread.start() # start, but should block immediately before step 0 + workerThread.start() # start, but should block immediately before step 0 def joinAll(self): for workerThread in self.threadList: @@ -394,21 +433,24 @@ class ThreadPool: # A queue of continguous POSITIVE integers, used by DbManager to generate continuous numbers # for new table names + + class LinearQueue(): def __init__(self): self.firstIndex = 1 # 1st ever element self.lastIndex = 0 - self._lock = threading.RLock() # our functions may call each other - self.inUse = set() # the indexes that are in use right now + self._lock = threading.RLock() # our functions may call each other + self.inUse = set() # the indexes that are in use right now def toText(self): - return "[{}..{}], in use: {}".format(self.firstIndex, self.lastIndex, self.inUse) + return "[{}..{}], in use: {}".format( + self.firstIndex, self.lastIndex, self.inUse) # Push (add new element, largest) to the tail, and mark it in use - def push(self): + def push(self): with self._lock: - # if ( self.isEmpty() ): - # self.lastIndex = self.firstIndex + # if ( self.isEmpty() ): + # self.lastIndex = self.firstIndex # return self.firstIndex # Otherwise we have something self.lastIndex += 1 @@ -418,12 +460,12 @@ class LinearQueue(): def pop(self): with self._lock: - if ( self.isEmpty() ): - # raise RuntimeError("Cannot pop an empty queue") - return False # TODO: None? - + if (self.isEmpty()): + # raise RuntimeError("Cannot pop an empty queue") + return False # TODO: None? + index = self.firstIndex - if ( index in self.inUse ): + if (index in self.inUse): return False self.firstIndex += 1 @@ -441,33 +483,35 @@ class LinearQueue(): def allocate(self, i): with self._lock: # logger.debug("LQ allocating item {}".format(i)) - if ( i in self.inUse ): - raise RuntimeError("Cannot re-use same index in queue: {}".format(i)) + if (i in self.inUse): + raise RuntimeError( + "Cannot re-use same index in queue: {}".format(i)) self.inUse.add(i) def release(self, i): with self._lock: # logger.debug("LQ releasing item {}".format(i)) - self.inUse.remove(i) # KeyError possible, TODO: why? + self.inUse.remove(i) # KeyError possible, TODO: why? def size(self): return self.lastIndex + 1 - self.firstIndex def pickAndAllocate(self): - if ( self.isEmpty() ): + if (self.isEmpty()): return None with self._lock: - cnt = 0 # counting the interations + cnt = 0 # counting the interations while True: cnt += 1 - if ( cnt > self.size()*10 ): # 10x iteration already + if (cnt > self.size() * 10): # 10x iteration already # raise RuntimeError("Failed to allocate LinearQueue element") return None - ret = Dice.throwRange(self.firstIndex, self.lastIndex+1) - if ( not ret in self.inUse ): + ret = Dice.throwRange(self.firstIndex, self.lastIndex + 1) + if (ret not in self.inUse): self.allocate(ret) return ret + class DbConn: TYPE_NATIVE = "native-c" TYPE_REST = "rest-api" @@ -480,7 +524,8 @@ class DbConn: elif connType == cls.TYPE_REST: return DbConnRest() else: - raise RuntimeError("Unexpected connection type: {}".format(connType)) + raise RuntimeError( + "Unexpected connection type: {}".format(connType)) @classmethod def createNative(cls): @@ -495,18 +540,21 @@ class DbConn: self._type = self.TYPE_INVALID def open(self): - if ( self.isOpen ): + if (self.isOpen): raise RuntimeError("Cannot re-open an existing DB connection") # below implemented by child classes self.openByType() - logger.debug("[DB] data connection opened, type = {}".format(self._type)) + logger.debug( + "[DB] data connection opened, type = {}".format( + self._type)) self.isOpen = True - def resetDb(self): # reset the whole database, etc. - if ( not self.isOpen ): - raise RuntimeError("Cannot reset database until connection is open") + def resetDb(self): # reset the whole database, etc. + if (not self.isOpen): + raise RuntimeError( + "Cannot reset database until connection is open") # self._tdSql.prepare() # Recreate database, etc. self.execute('drop database if exists db') @@ -515,83 +563,99 @@ class DbConn: # self._cursor.execute('use db') # tdSql.execute('show databases') - def queryScalar(self, sql) -> int : + def queryScalar(self, sql) -> int: return self._queryAny(sql) - def queryString(self, sql) -> str : + def queryString(self, sql) -> str: return self._queryAny(sql) - def _queryAny(self, sql) : # actual query result as an int - if ( not self.isOpen ): - raise RuntimeError("Cannot query database until connection is open") + def _queryAny(self, sql): # actual query result as an int + if (not self.isOpen): + raise RuntimeError( + "Cannot query database until connection is open") nRows = self.query(sql) - if nRows != 1 : - raise RuntimeError("Unexpected result for query: {}, rows = {}".format(sql, nRows)) + if nRows != 1: + raise RuntimeError( + "Unexpected result for query: {}, rows = {}".format( + sql, nRows)) if self.getResultRows() != 1 or self.getResultCols() != 1: - raise RuntimeError("Unexpected result set for query: {}".format(sql)) + raise RuntimeError( + "Unexpected result set for query: {}".format(sql)) return self.getQueryResult()[0][0] def execute(self, sql): raise RuntimeError("Unexpected execution, should be overriden") + def openByType(self): raise RuntimeError("Unexpected execution, should be overriden") + def getQueryResult(self): raise RuntimeError("Unexpected execution, should be overriden") + def getResultRows(self): raise RuntimeError("Unexpected execution, should be overriden") + def getResultCols(self): raise RuntimeError("Unexpected execution, should be overriden") # Sample: curl -u root:taosdata -d "show databases" localhost:6020/rest/sql + + class DbConnRest(DbConn): def __init__(self): super().__init__() self._type = self.TYPE_REST - self._url = "http://localhost:6020/rest/sql" # fixed for now + self._url = "http://localhost:6020/rest/sql" # fixed for now self._result = None - def openByType(self): # Open connection - pass # do nothing, always open - + def openByType(self): # Open connection + pass # do nothing, always open + def close(self): - if ( not self.isOpen ): - raise RuntimeError("Cannot clean up database until connection is open") + if (not self.isOpen): + raise RuntimeError( + "Cannot clean up database until connection is open") # Do nothing for REST logger.debug("[DB] REST Database connection closed") self.isOpen = False def _doSql(self, sql): - r = requests.post(self._url, - data = sql, - auth = HTTPBasicAuth('root', 'taosdata')) + r = requests.post(self._url, + data=sql, + auth=HTTPBasicAuth('root', 'taosdata')) rj = r.json() # Sanity check for the "Json Result" - if (not 'status' in rj): + if ('status' not in rj): raise RuntimeError("No status in REST response") - if rj['status'] == 'error': # clearly reported error - if (not 'code' in rj): # error without code - raise RuntimeError("REST error return without code") - errno = rj['code'] # May need to massage this in the future + if rj['status'] == 'error': # clearly reported error + if ('code' not in rj): # error without code + raise RuntimeError("REST error return without code") + errno = rj['code'] # May need to massage this in the future # print("Raising programming error with REST return: {}".format(rj)) - raise taos.error.ProgrammingError(rj['desc'], errno) # todo: check existance of 'desc' + raise taos.error.ProgrammingError( + rj['desc'], errno) # todo: check existance of 'desc' - if rj['status'] != 'succ': # better be this - raise RuntimeError("Unexpected REST return status: {}".format(rj['status'])) + if rj['status'] != 'succ': # better be this + raise RuntimeError( + "Unexpected REST return status: {}".format( + rj['status'])) nRows = rj['rows'] if ('rows' in rj) else 0 - self._result = rj + self._result = rj return nRows - def execute(self, sql): - if ( not self.isOpen ): - raise RuntimeError("Cannot execute database commands until connection is open") + def execute(self, sql): + if (not self.isOpen): + raise RuntimeError( + "Cannot execute database commands until connection is open") logger.debug("[SQL-REST] Executing SQL: {}".format(sql)) nRows = self._doSql(sql) - logger.debug("[SQL-REST] Execution Result, nRows = {}, SQL = {}".format(nRows, sql)) + logger.debug( + "[SQL-REST] Execution Result, nRows = {}, SQL = {}".format(nRows, sql)) return nRows - def query(self, sql) : # return rows affected + def query(self, sql): # return rows affected return self.execute(sql) def getQueryResult(self): @@ -605,48 +669,59 @@ class DbConnRest(DbConn): def getResultCols(self): print(self._result) raise RuntimeError("TBD") - + + class DbConnNative(DbConn): def __init__(self): super().__init__() self._type = self.TYPE_REST - self._conn = None + self._conn = None self._cursor = None - - def openByType(self): # Open connection - cfgPath = "../../build/test/cfg" - self._conn = taos.connect(host="127.0.0.1", config=cfgPath) # TODO: make configurable + + def openByType(self): # Open connection + cfgPath = "../../build/test/cfg" + self._conn = taos.connect( + host="127.0.0.1", + config=cfgPath) # TODO: make configurable self._cursor = self._conn.cursor() # Get the connection/cursor ready self._cursor.execute('reset query cache') - # self._cursor.execute('use db') # do this at the beginning of every step + # self._cursor.execute('use db') # do this at the beginning of every + # step # Open connection self._tdSql = TDSql() self._tdSql.init(self._cursor) - + def close(self): - if ( not self.isOpen ): - raise RuntimeError("Cannot clean up database until connection is open") + if (not self.isOpen): + raise RuntimeError( + "Cannot clean up database until connection is open") self._tdSql.close() logger.debug("[DB] Database connection closed") self.isOpen = False - def execute(self, sql): - if ( not self.isOpen ): - raise RuntimeError("Cannot execute database commands until connection is open") + def execute(self, sql): + if (not self.isOpen): + raise RuntimeError( + "Cannot execute database commands until connection is open") logger.debug("[SQL] Executing SQL: {}".format(sql)) nRows = self._tdSql.execute(sql) - logger.debug("[SQL] Execution Result, nRows = {}, SQL = {}".format(nRows, sql)) + logger.debug( + "[SQL] Execution Result, nRows = {}, SQL = {}".format( + nRows, sql)) return nRows - def query(self, sql) : # return rows affected - if ( not self.isOpen ): - raise RuntimeError("Cannot query database until connection is open") + def query(self, sql): # return rows affected + if (not self.isOpen): + raise RuntimeError( + "Cannot query database until connection is open") logger.debug("[SQL] Executing SQL: {}".format(sql)) nRows = self._tdSql.query(sql) - logger.debug("[SQL] Query Result, nRows = {}, SQL = {}".format(nRows, sql)) + logger.debug( + "[SQL] Query Result, nRows = {}, SQL = {}".format( + nRows, sql)) return nRows # results are in: return self._tdSql.queryResult @@ -659,13 +734,13 @@ class DbConnNative(DbConn): def getResultCols(self): return self._tdSql.queryCols - + class AnyState: - STATE_INVALID = -1 - STATE_EMPTY = 0 # nothing there, no even a DB - STATE_DB_ONLY = 1 # we have a DB, but nothing else + STATE_INVALID = -1 + STATE_EMPTY = 0 # nothing there, no even a DB + STATE_DB_ONLY = 1 # we have a DB, but nothing else STATE_TABLE_ONLY = 2 # we have a table, but totally empty - STATE_HAS_DATA = 3 # we have some data in the table + STATE_HAS_DATA = 3 # we have some data in the table _stateNames = ["Invalid", "Empty", "DB_Only", "Table_Only", "Has_Data"] STATE_VAL_IDX = 0 @@ -680,7 +755,8 @@ class AnyState: self._info = self.getInfo() def __str__(self): - return self._stateNames[self._info[self.STATE_VAL_IDX] + 1] # -1 hack to accomodate the STATE_INVALID case + # -1 hack to accomodate the STATE_INVALID case + return self._stateNames[self._info[self.STATE_VAL_IDX] + 1] def getInfo(self): raise RuntimeError("Must be overriden by child classes") @@ -691,7 +767,9 @@ class AnyState: elif isinstance(other, AnyState): return self.getValIndex() == other.getValIndex() else: - raise RuntimeError("Unexpected comparison, type = {}".format(type(other))) + raise RuntimeError( + "Unexpected comparison, type = {}".format( + type(other))) def verifyTasksToState(self, tasks, newState): raise RuntimeError("Must be overriden by child classes") @@ -701,55 +779,65 @@ class AnyState: def getValue(self): return self._info[self.STATE_VAL_IDX] + def canCreateDb(self): return self._info[self.CAN_CREATE_DB] + def canDropDb(self): return self._info[self.CAN_DROP_DB] + def canCreateFixedSuperTable(self): return self._info[self.CAN_CREATE_FIXED_SUPER_TABLE] + def canDropFixedSuperTable(self): return self._info[self.CAN_DROP_FIXED_SUPER_TABLE] + def canAddData(self): return self._info[self.CAN_ADD_DATA] + def canReadData(self): return self._info[self.CAN_READ_DATA] def assertAtMostOneSuccess(self, tasks, cls): sCnt = 0 - for task in tasks : + for task in tasks: if not isinstance(task, cls): continue if task.isSuccess(): # task.logDebug("Task success found") sCnt += 1 - if ( sCnt >= 2 ): - raise RuntimeError("Unexpected more than 1 success with task: {}".format(cls)) + if (sCnt >= 2): + raise RuntimeError( + "Unexpected more than 1 success with task: {}".format(cls)) def assertIfExistThenSuccess(self, tasks, cls): sCnt = 0 exists = False - for task in tasks : + for task in tasks: if not isinstance(task, cls): continue - exists = True # we have a valid instance + exists = True # we have a valid instance if task.isSuccess(): sCnt += 1 - if ( exists and sCnt <= 0 ): - raise RuntimeError("Unexpected zero success for task: {}".format(cls)) + if (exists and sCnt <= 0): + raise RuntimeError( + "Unexpected zero success for task: {}".format(cls)) def assertNoTask(self, tasks, cls): - for task in tasks : + for task in tasks: if isinstance(task, cls): - raise CrashGenError("This task: {}, is not expected to be present, given the success/failure of others".format(cls.__name__)) + raise CrashGenError( + "This task: {}, is not expected to be present, given the success/failure of others".format(cls.__name__)) def assertNoSuccess(self, tasks, cls): - for task in tasks : + for task in tasks: if isinstance(task, cls): if task.isSuccess(): - raise RuntimeError("Unexpected successful task: {}".format(cls)) + raise RuntimeError( + "Unexpected successful task: {}".format(cls)) def hasSuccess(self, tasks, cls): - for task in tasks : + for task in tasks: if not isinstance(task, cls): continue if task.isSuccess(): @@ -757,35 +845,40 @@ class AnyState: return False def hasTask(self, tasks, cls): - for task in tasks : + for task in tasks: if isinstance(task, cls): return True return False + class StateInvalid(AnyState): def getInfo(self): return [ self.STATE_INVALID, - False, False, # can create/drop Db - False, False, # can create/drop fixed table - False, False, # can insert/read data with fixed table + False, False, # can create/drop Db + False, False, # can create/drop fixed table + False, False, # can insert/read data with fixed table ] # def verifyTasksToState(self, tasks, newState): + class StateEmpty(AnyState): def getInfo(self): return [ self.STATE_EMPTY, - True, False, # can create/drop Db - False, False, # can create/drop fixed table - False, False, # can insert/read data with fixed table + True, False, # can create/drop Db + False, False, # can create/drop fixed table + False, False, # can insert/read data with fixed table ] - def verifyTasksToState(self, tasks, newState): - if ( self.hasSuccess(tasks, TaskCreateDb) ): # at EMPTY, if there's succes in creating DB - if ( not self.hasTask(tasks, TaskDropDb) ) : # and no drop_db tasks - self.assertAtMostOneSuccess(tasks, TaskCreateDb) # we must have at most one. TODO: compare numbers + def verifyTasksToState(self, tasks, newState): + if (self.hasSuccess(tasks, TaskCreateDb) + ): # at EMPTY, if there's succes in creating DB + if (not self.hasTask(tasks, TaskDropDb)): # and no drop_db tasks + # we must have at most one. TODO: compare numbers + self.assertAtMostOneSuccess(tasks, TaskCreateDb) + class StateDbOnly(AnyState): def getInfo(self): @@ -797,32 +890,34 @@ class StateDbOnly(AnyState): ] def verifyTasksToState(self, tasks, newState): - if ( not self.hasTask(tasks, TaskCreateDb) ): - self.assertAtMostOneSuccess(tasks, TaskDropDb) # only if we don't create any more + if (not self.hasTask(tasks, TaskCreateDb)): + # only if we don't create any more + self.assertAtMostOneSuccess(tasks, TaskDropDb) self.assertIfExistThenSuccess(tasks, TaskDropDb) # self.assertAtMostOneSuccess(tasks, CreateFixedTableTask) # not true in massively parrallel cases # Nothing to be said about adding data task # if ( self.hasSuccess(tasks, DropDbTask) ): # dropped the DB - # self.assertHasTask(tasks, DropDbTask) # implied by hasSuccess - # self.assertAtMostOneSuccess(tasks, DropDbTask) - # self._state = self.STATE_EMPTY + # self.assertHasTask(tasks, DropDbTask) # implied by hasSuccess + # self.assertAtMostOneSuccess(tasks, DropDbTask) + # self._state = self.STATE_EMPTY # if ( self.hasSuccess(tasks, TaskCreateSuperTable) ): # did not drop db, create table success # # self.assertHasTask(tasks, CreateFixedTableTask) # tried to create table - # if ( not self.hasTask(tasks, TaskDropSuperTable) ): + # if ( not self.hasTask(tasks, TaskDropSuperTable) ): # self.assertAtMostOneSuccess(tasks, TaskCreateSuperTable) # at most 1 attempt is successful, if we don't drop anything - # self.assertNoTask(tasks, DropDbTask) # should have have tried - # if ( not self.hasSuccess(tasks, AddFixedDataTask) ): # just created table, no data yet - # # can't say there's add-data attempts, since they may all fail - # self._state = self.STATE_TABLE_ONLY - # else: - # self._state = self.STATE_HAS_DATA + # self.assertNoTask(tasks, DropDbTask) # should have have tried + # if ( not self.hasSuccess(tasks, AddFixedDataTask) ): # just created table, no data yet + # # can't say there's add-data attempts, since they may all fail + # self._state = self.STATE_TABLE_ONLY + # else: + # self._state = self.STATE_HAS_DATA # What about AddFixedData? # elif ( self.hasSuccess(tasks, AddFixedDataTask) ): # self._state = self.STATE_HAS_DATA # else: # no success in dropping db tasks, no success in create fixed table? read data should also fail - # # raise RuntimeError("Unexpected no-success scenario") # We might just landed all failure tasks, + # # raise RuntimeError("Unexpected no-success scenario") # We might just landed all failure tasks, # self._state = self.STATE_DB_ONLY # no change + class StateSuperTableOnly(AnyState): def getInfo(self): return [ @@ -833,9 +928,11 @@ class StateSuperTableOnly(AnyState): ] def verifyTasksToState(self, tasks, newState): - if ( self.hasSuccess(tasks, TaskDropSuperTable) ): # we are able to drop the table + if (self.hasSuccess(tasks, TaskDropSuperTable) + ): # we are able to drop the table #self.assertAtMostOneSuccess(tasks, TaskDropSuperTable) - self.hasSuccess(tasks, TaskCreateSuperTable) # we must have had recreted it + # we must have had recreted it + self.hasSuccess(tasks, TaskCreateSuperTable) # self._state = self.STATE_DB_ONLY # elif ( self.hasSuccess(tasks, AddFixedDataTask) ): # no success dropping the table, but added data @@ -849,6 +946,7 @@ class StateSuperTableOnly(AnyState): # raise RuntimeError("Unexpected no-success scenarios") # TODO: need to revamp!! + class StateHasData(AnyState): def getInfo(self): return [ @@ -859,13 +957,15 @@ class StateHasData(AnyState): ] def verifyTasksToState(self, tasks, newState): - if ( newState.equals(AnyState.STATE_EMPTY) ): + if (newState.equals(AnyState.STATE_EMPTY)): self.hasSuccess(tasks, TaskDropDb) - if ( not self.hasTask(tasks, TaskCreateDb) ) : - self.assertAtMostOneSuccess(tasks, TaskDropDb) # TODO: dicy - elif ( newState.equals(AnyState.STATE_DB_ONLY) ): # in DB only - if ( not self.hasTask(tasks, TaskCreateDb)): # without a create_db task - self.assertNoTask(tasks, TaskDropDb) # we must have drop_db task + if (not self.hasTask(tasks, TaskCreateDb)): + self.assertAtMostOneSuccess(tasks, TaskDropDb) # TODO: dicy + elif (newState.equals(AnyState.STATE_DB_ONLY)): # in DB only + if (not self.hasTask(tasks, TaskCreateDb) + ): # without a create_db task + # we must have drop_db task + self.assertNoTask(tasks, TaskDropDb) self.hasSuccess(tasks, TaskDropSuperTable) # self.assertAtMostOneSuccess(tasks, DropFixedSuperTableTask) # TODO: dicy # elif ( newState.equals(AnyState.STATE_TABLE_ONLY) ): # data deleted @@ -873,19 +973,26 @@ class StateHasData(AnyState): # self.assertNoTask(tasks, TaskDropSuperTable) # self.assertNoTask(tasks, TaskAddData) # self.hasSuccess(tasks, DeleteDataTasks) - else: # should be STATE_HAS_DATA - if (not self.hasTask(tasks, TaskCreateDb) ): # only if we didn't create one - self.assertNoTask(tasks, TaskDropDb) # we shouldn't have dropped it - if (not self.hasTask(tasks, TaskCreateSuperTable)) : # if we didn't create the table - self.assertNoTask(tasks, TaskDropSuperTable) # we should not have a task that drops it + else: # should be STATE_HAS_DATA + if (not self.hasTask(tasks, TaskCreateDb) + ): # only if we didn't create one + # we shouldn't have dropped it + self.assertNoTask(tasks, TaskDropDb) + if (not self.hasTask(tasks, TaskCreateSuperTable) + ): # if we didn't create the table + # we should not have a task that drops it + self.assertNoTask(tasks, TaskDropSuperTable) # self.assertIfExistThenSuccess(tasks, ReadFixedDataTask) + class StateMechine: def __init__(self, dbConn): self._dbConn = dbConn - self._curState = self._findCurrentState() # starting state - self._stateWeights = [1,3,5,15] # transitition target probabilities, indexed with value of STATE_EMPTY, STATE_DB_ONLY, etc. - + self._curState = self._findCurrentState() # starting state + # transitition target probabilities, indexed with value of STATE_EMPTY, + # STATE_DB_ONLY, etc. + self._stateWeights = [1, 3, 5, 15] + def getCurrentState(self): return self._curState @@ -893,142 +1000,178 @@ class StateMechine: return self._curState.canDropDb() # ha, can drop DB means it has one # May be slow, use cautionsly... - def getTaskTypes(self): # those that can run (directly/indirectly) from the current state + def getTaskTypes(self): # those that can run (directly/indirectly) from the current state def typesToStrings(types): ss = [] for t in types: ss.append(t.__name__) return ss - allTaskClasses = StateTransitionTask.__subclasses__() # all state transition tasks + allTaskClasses = StateTransitionTask.__subclasses__() # all state transition tasks firstTaskTypes = [] for tc in allTaskClasses: - # t = tc(self) # create task object + # t = tc(self) # create task object if tc.canBeginFrom(self._curState): firstTaskTypes.append(tc) - # now we have all the tasks that can begin directly from the current state, let's figure out the INDIRECT ones - taskTypes = firstTaskTypes.copy() # have to have these - for task1 in firstTaskTypes: # each task type gathered so far - endState = task1.getEndState() # figure the end state - if endState == None: # does not change end state - continue # no use, do nothing - for tc in allTaskClasses: # what task can further begin from there? + # now we have all the tasks that can begin directly from the current + # state, let's figure out the INDIRECT ones + taskTypes = firstTaskTypes.copy() # have to have these + for task1 in firstTaskTypes: # each task type gathered so far + endState = task1.getEndState() # figure the end state + if endState is None: # does not change end state + continue # no use, do nothing + for tc in allTaskClasses: # what task can further begin from there? if tc.canBeginFrom(endState) and (tc not in firstTaskTypes): - taskTypes.append(tc) # gather it + taskTypes.append(tc) # gather it if len(taskTypes) <= 0: - raise RuntimeError("No suitable task types found for state: {}".format(self._curState)) - logger.debug("[OPS] Tasks found for state {}: {}".format(self._curState, typesToStrings(taskTypes))) + raise RuntimeError( + "No suitable task types found for state: {}".format( + self._curState)) + logger.debug( + "[OPS] Tasks found for state {}: {}".format( + self._curState, + typesToStrings(taskTypes))) return taskTypes def _findCurrentState(self): dbc = self._dbConn - ts = time.time() # we use this to debug how fast/slow it is to do the various queries to find the current DB state - if dbc.query("show databases") == 0 : # no database?! + ts = time.time() # we use this to debug how fast/slow it is to do the various queries to find the current DB state + if dbc.query("show databases") == 0: # no database?! # logger.debug("Found EMPTY state") - logger.debug("[STT] empty database found, between {} and {}".format(ts, time.time())) + logger.debug( + "[STT] empty database found, between {} and {}".format( + ts, time.time())) return StateEmpty() - dbc.execute("use db") # did not do this when openning connection, and this is NOT the worker thread, which does this on their own - if dbc.query("show tables") == 0 : # no tables + # did not do this when openning connection, and this is NOT the worker + # thread, which does this on their own + dbc.execute("use db") + if dbc.query("show tables") == 0: # no tables # logger.debug("Found DB ONLY state") - logger.debug("[STT] DB_ONLY found, between {} and {}".format(ts, time.time())) + logger.debug( + "[STT] DB_ONLY found, between {} and {}".format( + ts, time.time())) return StateDbOnly() - if dbc.query("SELECT * FROM db.{}".format(DbManager.getFixedSuperTableName()) ) == 0 : # no regular tables + if dbc.query("SELECT * FROM db.{}".format(DbManager.getFixedSuperTableName()) + ) == 0: # no regular tables # logger.debug("Found TABLE_ONLY state") - logger.debug("[STT] SUPER_TABLE_ONLY found, between {} and {}".format(ts, time.time())) + logger.debug( + "[STT] SUPER_TABLE_ONLY found, between {} and {}".format( + ts, time.time())) return StateSuperTableOnly() - else: # has actual tables + else: # has actual tables # logger.debug("Found HAS_DATA state") - logger.debug("[STT] HAS_DATA found, between {} and {}".format(ts, time.time())) + logger.debug( + "[STT] HAS_DATA found, between {} and {}".format( + ts, time.time())) return StateHasData() def transition(self, tasks): - if ( len(tasks) == 0 ): # before 1st step, or otherwise empty + if (len(tasks) == 0): # before 1st step, or otherwise empty logger.debug("[STT] Starting State: {}".format(self._curState)) - return # do nothing + return # do nothing - self._dbConn.execute("show dnodes") # this should show up in the server log, separating steps + # this should show up in the server log, separating steps + self._dbConn.execute("show dnodes") # Generic Checks, first based on the start state if self._curState.canCreateDb(): self._curState.assertIfExistThenSuccess(tasks, TaskCreateDb) - # self.assertAtMostOneSuccess(tasks, CreateDbTask) # not really, in case of multiple creation and drops + # self.assertAtMostOneSuccess(tasks, CreateDbTask) # not really, in + # case of multiple creation and drops if self._curState.canDropDb(): self._curState.assertIfExistThenSuccess(tasks, TaskDropDb) - # self.assertAtMostOneSuccess(tasks, DropDbTask) # not really in case of drop-create-drop + # self.assertAtMostOneSuccess(tasks, DropDbTask) # not really in + # case of drop-create-drop # if self._state.canCreateFixedTable(): # self.assertIfExistThenSuccess(tasks, CreateFixedTableTask) # Not true, DB may be dropped - # self.assertAtMostOneSuccess(tasks, CreateFixedTableTask) # not really, in case of create-drop-create + # self.assertAtMostOneSuccess(tasks, CreateFixedTableTask) # not + # really, in case of create-drop-create # if self._state.canDropFixedTable(): # self.assertIfExistThenSuccess(tasks, DropFixedTableTask) # Not True, the whole DB may be dropped - # self.assertAtMostOneSuccess(tasks, DropFixedTableTask) # not really in case of drop-create-drop + # self.assertAtMostOneSuccess(tasks, DropFixedTableTask) # not + # really in case of drop-create-drop # if self._state.canAddData(): - # self.assertIfExistThenSuccess(tasks, AddFixedDataTask) # not true actually + # self.assertIfExistThenSuccess(tasks, AddFixedDataTask) # not true + # actually # if self._state.canReadData(): # Nothing for sure newState = self._findCurrentState() logger.debug("[STT] New DB state determined: {}".format(newState)) - self._curState.verifyTasksToState(tasks, newState) # can old state move to new state through the tasks? + # can old state move to new state through the tasks? + self._curState.verifyTasksToState(tasks, newState) self._curState = newState def pickTaskType(self): - taskTypes = self.getTaskTypes() # all the task types we can choose from at curent state + # all the task types we can choose from at curent state + taskTypes = self.getTaskTypes() weights = [] for tt in taskTypes: endState = tt.getEndState() - if endState != None : - weights.append(self._stateWeights[endState.getValIndex()]) # TODO: change to a method + if endState is not None: + # TODO: change to a method + weights.append(self._stateWeights[endState.getValIndex()]) else: - weights.append(10) # read data task, default to 10: TODO: change to a constant + # read data task, default to 10: TODO: change to a constant + weights.append(10) i = self._weighted_choice_sub(weights) - # logger.debug(" (weighted random:{}/{}) ".format(i, len(taskTypes))) + # logger.debug(" (weighted random:{}/{}) ".format(i, len(taskTypes))) return taskTypes[i] - def _weighted_choice_sub(self, weights): # ref: https://eli.thegreenplace.net/2010/01/22/weighted-random-generation-in-python/ - rnd = random.random() * sum(weights) # TODO: use our dice to ensure it being determinstic? + # ref: + # https://eli.thegreenplace.net/2010/01/22/weighted-random-generation-in-python/ + def _weighted_choice_sub(self, weights): + # TODO: use our dice to ensure it being determinstic? + rnd = random.random() * sum(weights) for i, w in enumerate(weights): rnd -= w if rnd < 0: return i # Manager of the Database Data/Connection -class DbManager(): - def __init__(self, resetDb = True): + + +class DbManager(): + def __init__(self, resetDb=True): self.tableNumQueue = LinearQueue() - self._lastTick = self.setupLastTick() # datetime.datetime(2019, 1, 1) # initial date time tick - self._lastInt = 0 # next one is initial integer + # datetime.datetime(2019, 1, 1) # initial date time tick + self._lastTick = self.setupLastTick() + self._lastInt = 0 # next one is initial integer self._lock = threading.RLock() - + # self.openDbServerConnection() - self._dbConn = DbConn.createNative() if (gConfig.connector_type=='native') else DbConn.createRest() + self._dbConn = DbConn.createNative() if ( + gConfig.connector_type == 'native') else DbConn.createRest() try: - self._dbConn.open() # may throw taos.error.ProgrammingError: disconnected + self._dbConn.open() # may throw taos.error.ProgrammingError: disconnected except taos.error.ProgrammingError as err: # print("Error type: {}, msg: {}, value: {}".format(type(err), err.msg, err)) - if ( err.msg == 'client disconnected' ): # cannot open DB connection - print("Cannot establish DB connection, please re-run script without parameter, and follow the instructions.") + if (err.msg == 'client disconnected'): # cannot open DB connection + print( + "Cannot establish DB connection, please re-run script without parameter, and follow the instructions.") sys.exit(2) else: - raise - except: + raise + except BaseException: print("[=] Unexpected exception") - raise + raise - if resetDb : - self._dbConn.resetDb() # drop and recreate DB + if resetDb: + self._dbConn.resetDb() # drop and recreate DB + + # Do this after dbConn is in proper shape + self._stateMachine = StateMechine(self._dbConn) - self._stateMachine = StateMechine(self._dbConn) # Do this after dbConn is in proper shape - def getDbConn(self): return self._dbConn - def getStateMachine(self) -> StateMechine : + def getStateMachine(self) -> StateMechine: return self._stateMachine # def getState(self): @@ -1043,15 +1186,18 @@ class DbManager(): def setupLastTick(self): t1 = datetime.datetime(2020, 6, 1) t2 = datetime.datetime.now() - elSec = int(t2.timestamp() - t1.timestamp()) # maybe a very large number, takes 69 years to exceed Python int range - elSec2 = ( elSec % (8 * 12 * 30 * 24 * 60 * 60 / 500 ) ) * 500 # a number representing seconds within 10 years + # maybe a very large number, takes 69 years to exceed Python int range + elSec = int(t2.timestamp() - t1.timestamp()) + elSec2 = (elSec % (8 * 12 * 30 * 24 * 60 * 60 / 500)) * \ + 500 # a number representing seconds within 10 years # print("elSec = {}".format(elSec)) - t3 = datetime.datetime(2012, 1, 1) # default "keep" is 10 years - t4 = datetime.datetime.fromtimestamp( t3.timestamp() + elSec2) # see explanation above + t3 = datetime.datetime(2012, 1, 1) # default "keep" is 10 years + t4 = datetime.datetime.fromtimestamp( + t3.timestamp() + elSec2) # see explanation above logger.info("Setting up TICKS to start from: {}".format(t4)) return t4 - def pickAndAllocateTable(self): # pick any table, and "use" it + def pickAndAllocateTable(self): # pick any table, and "use" it return self.tableNumQueue.pickAndAllocate() def addTable(self): @@ -1063,15 +1209,16 @@ class DbManager(): def getFixedSuperTableName(cls): return "fs_table" - def releaseTable(self, i): # return the table back, so others can use it + def releaseTable(self, i): # return the table back, so others can use it self.tableNumQueue.release(i) def getNextTick(self): - with self._lock: # prevent duplicate tick - if Dice.throw(10) == 0 : # 1 in 10 chance + with self._lock: # prevent duplicate tick + if Dice.throw(10) == 0: # 1 in 10 chance return self._lastTick + datetime.timedelta(0, -100) - else: # regular - self._lastTick += datetime.timedelta(0, 1) # add one second to it + else: # regular + # add one second to it + self._lastTick += datetime.timedelta(0, 1) return self._lastTick def getNextInt(self): @@ -1080,29 +1227,31 @@ class DbManager(): return self._lastInt def getNextBinary(self): - return "Beijing_Shanghai_Los_Angeles_New_York_San_Francisco_Chicago_Beijing_Shanghai_Los_Angeles_New_York_San_Francisco_Chicago_{}".format(self.getNextInt()) + return "Beijing_Shanghai_Los_Angeles_New_York_San_Francisco_Chicago_Beijing_Shanghai_Los_Angeles_New_York_San_Francisco_Chicago_{}".format( + self.getNextInt()) def getNextFloat(self): return 0.9 + self.getNextInt() - + def getTableNameToDelete(self): - tblNum = self.tableNumQueue.pop() # TODO: race condition! - if ( not tblNum ): # maybe false + tblNum = self.tableNumQueue.pop() # TODO: race condition! + if (not tblNum): # maybe false return False - + return "table_{}".format(tblNum) def cleanUp(self): - self._dbConn.close() + self._dbConn.close() + class TaskExecutor(): class BoundedList: - def __init__(self, size = 10): + def __init__(self, size=10): self._size = size self._list = [] - def add(self, n: int) : - if not self._list: # empty + def add(self, n: int): + if not self._list: # empty self._list.append(n) return # now we should insert @@ -1110,22 +1259,22 @@ class TaskExecutor(): insPos = 0 for i in range(nItems): insPos = i - if n <= self._list[i] : # smaller than this item, time to insert - break # found the insertion point - insPos += 1 # insert to the right + if n <= self._list[i]: # smaller than this item, time to insert + break # found the insertion point + insPos += 1 # insert to the right - if insPos == 0 : # except for the 1st item, # TODO: elimiate first item as gating item - return # do nothing + if insPos == 0: # except for the 1st item, # TODO: elimiate first item as gating item + return # do nothing # print("Inserting at postion {}, value: {}".format(insPos, n)) - self._list.insert(insPos, n) # insert - + self._list.insert(insPos, n) # insert + newLen = len(self._list) - if newLen <= self._size : - return # do nothing - elif newLen == (self._size + 1) : - del self._list[0] # remove the first item - else : + if newLen <= self._size: + return # do nothing + elif newLen == (self._size + 1): + del self._list[0] # remove the first item + else: raise RuntimeError("Corrupt Bounded List") def __str__(self): @@ -1143,7 +1292,7 @@ class TaskExecutor(): def getCurStep(self): return self._curStep - def execute(self, task: Task, wt: WorkerThread): # execute a task on a thread + def execute(self, task: Task, wt: WorkerThread): # execute a task on a thread task.execute(wt) def recordDataMark(self, n: int): @@ -1156,128 +1305,147 @@ class TaskExecutor(): # def logDebug(self, msg): # logger.debug(" T[{}.x]: ".format(self._curStep) + msg) + class Task(): taskSn = 100 @classmethod def allocTaskNum(cls): - Task.taskSn += 1 # IMPORTANT: cannot use cls.taskSn, since each sub class will have a copy + Task.taskSn += 1 # IMPORTANT: cannot use cls.taskSn, since each sub class will have a copy # logger.debug("Allocating taskSN: {}".format(Task.taskSn)) return Task.taskSn - def __init__(self, dbManager: DbManager, execStats: ExecutionStats): + def __init__(self, dbManager: DbManager, execStats: ExecutionStats): self._dbManager = dbManager - self._workerThread = None + self._workerThread = None self._err = None self._aborted = False self._curStep = None - self._numRows = None # Number of rows affected + self._numRows = None # Number of rows affected - # Assign an incremental task serial number + # Assign an incremental task serial number self._taskNum = self.allocTaskNum() # logger.debug("Creating new task {}...".format(self._taskNum)) self._execStats = execStats - self._lastSql = "" # last SQL executed/attempted + self._lastSql = "" # last SQL executed/attempted def isSuccess(self): - return self._err == None + return self._err is None def isAborted(self): return self._aborted - def clone(self): # TODO: why do we need this again? + def clone(self): # TODO: why do we need this again? newTask = self.__class__(self._dbManager, self._execStats) return newTask def logDebug(self, msg): - self._workerThread.logDebug("Step[{}.{}] {}".format(self._curStep, self._taskNum, msg)) + self._workerThread.logDebug( + "Step[{}.{}] {}".format( + self._curStep, self._taskNum, msg)) def logInfo(self, msg): - self._workerThread.logInfo("Step[{}.{}] {}".format(self._curStep, self._taskNum, msg)) + self._workerThread.logInfo( + "Step[{}.{}] {}".format( + self._curStep, self._taskNum, msg)) def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): - raise RuntimeError("To be implemeted by child classes, class name: {}".format(self.__class__.__name__)) + raise RuntimeError( + "To be implemeted by child classes, class name: {}".format( + self.__class__.__name__)) def execute(self, wt: WorkerThread): wt.verifyThreadSelf() - self._workerThread = wt # type: ignore + self._workerThread = wt # type: ignore te = wt.getTaskExecutor() self._curStep = te.getCurStep() - self.logDebug("[-] executing task {}...".format(self.__class__.__name__)) + self.logDebug( + "[-] executing task {}...".format(self.__class__.__name__)) self._err = None - self._execStats.beginTaskType(self.__class__.__name__) # mark beginning + self._execStats.beginTaskType( + self.__class__.__name__) # mark beginning try: - self._executeInternal(te, wt) # TODO: no return value? + self._executeInternal(te, wt) # TODO: no return value? except taos.error.ProgrammingError as err: - errno2 = err.errno if (err.errno > 0) else 0x80000000 + err.errno # correct error scheme - if ( errno2 in [0x200, 0x360, 0x362, 0x36A, 0x36B, 0x36D, 0x381, 0x380, 0x383, 0x503, 0x600, - 1000 # REST catch-all error - ]) : # allowed errors - self.logDebug("[=] Acceptable Taos library exception: errno=0x{:X}, msg: {}, SQL: {}".format(errno2, err, self._lastSql)) + errno2 = err.errno if ( + err.errno > 0) else 0x80000000 + err.errno # correct error scheme + if (errno2 in [0x200, 0x360, 0x362, 0x36A, 0x36B, 0x36D, 0x381, 0x380, 0x383, 0x503, 0x600, + 1000 # REST catch-all error + ]): # allowed errors + self.logDebug( + "[=] Acceptable Taos library exception: errno=0x{:X}, msg: {}, SQL: {}".format( + errno2, err, self._lastSql)) print("_", end="", flush=True) - self._err = err + self._err = err else: - errMsg = "[=] Unexpected Taos library exception: errno=0x{:X}, msg: {}, SQL: {}".format(errno2, err, self._lastSql) + errMsg = "[=] Unexpected Taos library exception: errno=0x{:X}, msg: {}, SQL: {}".format( + errno2, err, self._lastSql) self.logDebug(errMsg) - if gConfig.debug : - raise # so that we see full stack - else: # non-debug - print("\n\n----------------------------\nProgram ABORTED Due to Unexpected TAOS Error: \n\n{}\n".format(errMsg) + + if gConfig.debug: + raise # so that we see full stack + else: # non-debug + print( + "\n\n----------------------------\nProgram ABORTED Due to Unexpected TAOS Error: \n\n{}\n".format(errMsg) + "----------------------------\n") # sys.exit(-1) self._err = err self._aborted = True - except Exception as e : + except Exception as e: self.logInfo("Non-TAOS exception encountered") - self._err = e + self._err = e self._aborted = True traceback.print_exc() - except : - self.logDebug("[=] Unexpected exception, SQL: {}".format(self._lastSql)) + except BaseException: + self.logDebug( + "[=] Unexpected exception, SQL: {}".format( + self._lastSql)) raise self._execStats.endTaskType(self.__class__.__name__, self.isSuccess()) - - self.logDebug("[X] task execution completed, {}, status: {}".format(self.__class__.__name__, "Success" if self.isSuccess() else "Failure")) - self._execStats.incExecCount(self.__class__.__name__, self.isSuccess()) # TODO: merge with above. + + self.logDebug("[X] task execution completed, {}, status: {}".format( + self.__class__.__name__, "Success" if self.isSuccess() else "Failure")) + # TODO: merge with above. + self._execStats.incExecCount(self.__class__.__name__, self.isSuccess()) def execSql(self, sql): self._lastSql = sql return self._dbManager.execute(sql) - def execWtSql(self, wt: WorkerThread, sql): # execute an SQL on the worker thread + def execWtSql(self, wt: WorkerThread, sql): # execute an SQL on the worker thread self._lastSql = sql return wt.execSql(sql) - def queryWtSql(self, wt: WorkerThread, sql): # execute an SQL on the worker thread + def queryWtSql(self, wt: WorkerThread, sql): # execute an SQL on the worker thread self._lastSql = sql return wt.querySql(sql) - def getQueryResult(self, wt: WorkerThread): # execute an SQL on the worker thread + def getQueryResult(self, wt: WorkerThread): # execute an SQL on the worker thread return wt.getQueryResult() - class ExecutionStats: def __init__(self): - self._execTimes: Dict[str, [int, int]] = {} # total/success times for a task + # total/success times for a task + self._execTimes: Dict[str, [int, int]] = {} self._tasksInProgress = 0 self._lock = threading.Lock() self._firstTaskStartTime = None self._execStartTime = None - self._elapsedTime = 0.0 # total elapsed time - self._accRunTime = 0.0 # accumulated run time + self._elapsedTime = 0.0 # total elapsed time + self._accRunTime = 0.0 # accumulated run time self._failed = False self._failureReason = None def __str__(self): - return "[ExecStats: _failed={}, _failureReason={}".format(self._failed, self._failureReason) + return "[ExecStats: _failed={}, _failureReason={}".format( + self._failed, self._failureReason) def isFailed(self): - return self._failed == True + return self._failed def startExec(self): self._execStartTime = time.time() @@ -1285,24 +1453,24 @@ class ExecutionStats: def endExec(self): self._elapsedTime = time.time() - self._execStartTime - def incExecCount(self, klassName, isSuccess): # TODO: add a lock here + def incExecCount(self, klassName, isSuccess): # TODO: add a lock here if klassName not in self._execTimes: self._execTimes[klassName] = [0, 0] - t = self._execTimes[klassName] # tuple for the data - t[0] += 1 # index 0 has the "total" execution times + t = self._execTimes[klassName] # tuple for the data + t[0] += 1 # index 0 has the "total" execution times if isSuccess: - t[1] += 1 # index 1 has the "success" execution times + t[1] += 1 # index 1 has the "success" execution times def beginTaskType(self, klassName): with self._lock: - if self._tasksInProgress == 0 : # starting a new round - self._firstTaskStartTime = time.time() # I am now the first task + if self._tasksInProgress == 0: # starting a new round + self._firstTaskStartTime = time.time() # I am now the first task self._tasksInProgress += 1 def endTaskType(self, klassName, isSuccess): with self._lock: self._tasksInProgress -= 1 - if self._tasksInProgress == 0 : # all tasks have stopped + if self._tasksInProgress == 0: # all tasks have stopped self._accRunTime += (time.time() - self._firstTaskStartTime) self._firstTaskStartTime = None @@ -1311,23 +1479,36 @@ class ExecutionStats: self._failureReason = reason def printStats(self): - logger.info("----------------------------------------------------------------------") - logger.info("| Crash_Gen test {}, with the following stats:". - format("FAILED (reason: {})".format(self._failureReason) if self._failed else "SUCCEEDED")) + logger.info( + "----------------------------------------------------------------------") + logger.info( + "| Crash_Gen test {}, with the following stats:". format( + "FAILED (reason: {})".format( + self._failureReason) if self._failed else "SUCCEEDED")) logger.info("| Task Execution Times (success/total):") execTimesAny = 0 - for k, n in self._execTimes.items(): + for k, n in self._execTimes.items(): execTimesAny += n[0] - logger.info("| {0:<24}: {1}/{2}".format(k,n[1],n[0])) - - logger.info("| Total Tasks Executed (success or not): {} ".format(execTimesAny)) - logger.info("| Total Tasks In Progress at End: {}".format(self._tasksInProgress)) - logger.info("| Total Task Busy Time (elapsed time when any task is in progress): {:.3f} seconds".format(self._accRunTime)) - logger.info("| Average Per-Task Execution Time: {:.3f} seconds".format(self._accRunTime/execTimesAny)) - logger.info("| Total Elapsed Time (from wall clock): {:.3f} seconds".format(self._elapsedTime)) - logger.info("| Top numbers written: {}".format(TaskExecutor.getBoundedList())) - logger.info("----------------------------------------------------------------------") - + logger.info("| {0:<24}: {1}/{2}".format(k, n[1], n[0])) + + logger.info( + "| Total Tasks Executed (success or not): {} ".format(execTimesAny)) + logger.info( + "| Total Tasks In Progress at End: {}".format( + self._tasksInProgress)) + logger.info( + "| Total Task Busy Time (elapsed time when any task is in progress): {:.3f} seconds".format( + self._accRunTime)) + logger.info( + "| Average Per-Task Execution Time: {:.3f} seconds".format(self._accRunTime / execTimesAny)) + logger.info( + "| Total Elapsed Time (from wall clock): {:.3f} seconds".format( + self._elapsedTime)) + logger.info( + "| Top numbers written: {}".format( + TaskExecutor.getBoundedList())) + logger.info( + "----------------------------------------------------------------------") class StateTransitionTask(Task): @@ -1337,12 +1518,12 @@ class StateTransitionTask(Task): SMALL_NUMBER_OF_RECORDS = 3 @classmethod - def getInfo(cls): # each sub class should supply their own information + def getInfo(cls): # each sub class should supply their own information raise RuntimeError("Overriding method expected") - _endState = None + _endState = None @classmethod - def getEndState(cls): # TODO: optimize by calling it fewer times + def getEndState(cls): # TODO: optimize by calling it fewer times raise RuntimeError("Overriding method expected") # @classmethod @@ -1364,18 +1545,20 @@ class StateTransitionTask(Task): def execute(self, wt: WorkerThread): super().execute(wt) - + + class TaskCreateDb(StateTransitionTask): @classmethod def getEndState(cls): - return StateDbOnly() + return StateDbOnly() @classmethod def canBeginFrom(cls, state: AnyState): return state.canCreateDb() def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): - self.execWtSql(wt, "create database db") + self.execWtSql(wt, "create database db") + class TaskDropDb(StateTransitionTask): @classmethod @@ -1390,6 +1573,7 @@ class TaskDropDb(StateTransitionTask): self.execWtSql(wt, "drop database db") logger.debug("[OPS] database dropped at {}".format(time.time())) + class TaskCreateSuperTable(StateTransitionTask): @classmethod def getEndState(cls): @@ -1400,115 +1584,135 @@ class TaskCreateSuperTable(StateTransitionTask): return state.canCreateFixedSuperTable() def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): - if not wt.dbInUse(): # no DB yet, to the best of our knowledge + if not wt.dbInUse(): # no DB yet, to the best of our knowledge logger.debug("Skipping task, no DB yet") return - tblName = self._dbManager.getFixedSuperTableName() + tblName = self._dbManager.getFixedSuperTableName() # wt.execSql("use db") # should always be in place - self.execWtSql(wt, "create table db.{} (ts timestamp, speed int) tags (b binary(200), f float) ".format(tblName)) - # No need to create the regular tables, INSERT will do that automatically + self.execWtSql( + wt, + "create table db.{} (ts timestamp, speed int) tags (b binary(200), f float) ".format(tblName)) + # No need to create the regular tables, INSERT will do that + # automatically class TaskReadData(StateTransitionTask): @classmethod def getEndState(cls): - return None # meaning doesn't affect state + return None # meaning doesn't affect state @classmethod def canBeginFrom(cls, state: AnyState): return state.canReadData() def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): - sTbName = self._dbManager.getFixedSuperTableName() - self.queryWtSql(wt, "select TBNAME from db.{}".format(sTbName)) # TODO: analyze result set later + sTbName = self._dbManager.getFixedSuperTableName() + self.queryWtSql(wt, "select TBNAME from db.{}".format( + sTbName)) # TODO: analyze result set later - if random.randrange(5) == 0 : # 1 in 5 chance, simulate a broken connection. TODO: break connection in all situations + if random.randrange( + 5) == 0: # 1 in 5 chance, simulate a broken connection. TODO: break connection in all situations wt.getDbConn().close() wt.getDbConn().open() else: - rTables = self.getQueryResult(wt) # wt.getDbConn().getQueryResult() + # wt.getDbConn().getQueryResult() + rTables = self.getQueryResult(wt) # print("rTables[0] = {}, type = {}".format(rTables[0], type(rTables[0]))) - for rTbName in rTables : # regular tables + for rTbName in rTables: # regular tables self.execWtSql(wt, "select * from db.{}".format(rTbName[0])) # tdSql.query(" cars where tbname in ('carzero', 'carone')") + class TaskDropSuperTable(StateTransitionTask): @classmethod def getEndState(cls): - return StateDbOnly() + return StateDbOnly() @classmethod def canBeginFrom(cls, state: AnyState): return state.canDropFixedSuperTable() def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): - # 1/2 chance, we'll drop the regular tables one by one, in a randomized sequence - if Dice.throw(2) == 0 : - tblSeq = list(range(2 + (self.LARGE_NUMBER_OF_TABLES if gConfig.larger_data else self.SMALL_NUMBER_OF_TABLES))) - random.shuffle(tblSeq) - tickOutput = False # if we have spitted out a "d" character for "drop regular table" + # 1/2 chance, we'll drop the regular tables one by one, in a randomized + # sequence + if Dice.throw(2) == 0: + tblSeq = list(range( + 2 + (self.LARGE_NUMBER_OF_TABLES if gConfig.larger_data else self.SMALL_NUMBER_OF_TABLES))) + random.shuffle(tblSeq) + tickOutput = False # if we have spitted out a "d" character for "drop regular table" isSuccess = True - for i in tblSeq: - regTableName = self.getRegTableName(i); # "db.reg_table_{}".format(i) + for i in tblSeq: + regTableName = self.getRegTableName( + i) # "db.reg_table_{}".format(i) try: - self.execWtSql(wt, "drop table {}".format(regTableName)) # nRows always 0, like MySQL - except taos.error.ProgrammingError as err: - errno2 = err.errno if (err.errno > 0) else 0x80000000 + err.errno # correcting for strange error number scheme - if ( errno2 in [0x362]) : # mnode invalid table name + self.execWtSql(wt, "drop table {}".format( + regTableName)) # nRows always 0, like MySQL + except taos.error.ProgrammingError as err: + # correcting for strange error number scheme + errno2 = err.errno if ( + err.errno > 0) else 0x80000000 + err.errno + if (errno2 in [0x362]): # mnode invalid table name isSuccess = False - logger.debug("[DB] Acceptable error when dropping a table") - continue # try to delete next regular table + logger.debug( + "[DB] Acceptable error when dropping a table") + continue # try to delete next regular table if (not tickOutput): - tickOutput = True # Print only one time - if isSuccess : + tickOutput = True # Print only one time + if isSuccess: print("d", end="", flush=True) else: - print("f", end="", flush=True) + print("f", end="", flush=True) # Drop the super table itself - tblName = self._dbManager.getFixedSuperTableName() + tblName = self._dbManager.getFixedSuperTableName() self.execWtSql(wt, "drop table db.{}".format(tblName)) + class TaskAlterTags(StateTransitionTask): @classmethod def getEndState(cls): - return None # meaning doesn't affect state + return None # meaning doesn't affect state @classmethod def canBeginFrom(cls, state: AnyState): - return state.canDropFixedSuperTable() # if we can drop it, we can alter tags + return state.canDropFixedSuperTable() # if we can drop it, we can alter tags def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): - tblName = self._dbManager.getFixedSuperTableName() + tblName = self._dbManager.getFixedSuperTableName() dice = Dice.throw(4) - if dice == 0 : + if dice == 0: sql = "alter table db.{} add tag extraTag int".format(tblName) - elif dice == 1 : + elif dice == 1: sql = "alter table db.{} drop tag extraTag".format(tblName) - elif dice == 2 : + elif dice == 2: sql = "alter table db.{} drop tag newTag".format(tblName) - else: # dice == 3 - sql = "alter table db.{} change tag extraTag newTag".format(tblName) + else: # dice == 3 + sql = "alter table db.{} change tag extraTag newTag".format( + tblName) self.execWtSql(wt, sql) -class TaskAddData(StateTransitionTask): - activeTable : Set[int] = set() # Track which table is being actively worked on - # We use these two files to record operations to DB, useful for power-off tests +class TaskAddData(StateTransitionTask): + # Track which table is being actively worked on + activeTable: Set[int] = set() + + # We use these two files to record operations to DB, useful for power-off + # tests fAddLogReady = None fAddLogDone = None @classmethod def prepToRecordOps(cls): - if gConfig.record_ops : - if ( cls.fAddLogReady == None ): - logger.info("Recording in a file operations to be performed...") + if gConfig.record_ops: + if (cls.fAddLogReady is None): + logger.info( + "Recording in a file operations to be performed...") cls.fAddLogReady = open("add_log_ready.txt", "w") - if ( cls.fAddLogDone == None ): + if (cls.fAddLogDone is None): logger.info("Recording in a file operations completed...") cls.fAddLogDone = open("add_log_done.txt", "w") @@ -1519,71 +1723,84 @@ class TaskAddData(StateTransitionTask): @classmethod def canBeginFrom(cls, state: AnyState): return state.canAddData() - + def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): ds = self._dbManager - # wt.execSql("use db") # TODO: seems to be an INSERT bug to require this - tblSeq = list(range(self.LARGE_NUMBER_OF_TABLES if gConfig.larger_data else self.SMALL_NUMBER_OF_TABLES)) - random.shuffle(tblSeq) - for i in tblSeq: - if ( i in self.activeTable ): # wow already active - # logger.info("Concurrent data insertion into table: {}".format(i)) - # print("ct({})".format(i), end="", flush=True) # Concurrent insertion into table + # wt.execSql("use db") # TODO: seems to be an INSERT bug to require + # this + tblSeq = list( + range( + self.LARGE_NUMBER_OF_TABLES if gConfig.larger_data else self.SMALL_NUMBER_OF_TABLES)) + random.shuffle(tblSeq) + for i in tblSeq: + if (i in self.activeTable): # wow already active + # logger.info("Concurrent data insertion into table: {}".format(i)) + # print("ct({})".format(i), end="", flush=True) # Concurrent + # insertion into table print("x", end="", flush=True) else: - self.activeTable.add(i) # marking it active - # No need to shuffle data sequence, unless later we decide to do non-increment insertion - regTableName = self.getRegTableName(i); # "db.reg_table_{}".format(i) - for j in range(self.LARGE_NUMBER_OF_RECORDS if gConfig.larger_data else self.SMALL_NUMBER_OF_RECORDS) : # number of records per table - nextInt = ds.getNextInt() + self.activeTable.add(i) # marking it active + # No need to shuffle data sequence, unless later we decide to do + # non-increment insertion + regTableName = self.getRegTableName( + i) # "db.reg_table_{}".format(i) + for j in range( + self.LARGE_NUMBER_OF_RECORDS if gConfig.larger_data else self.SMALL_NUMBER_OF_RECORDS): # number of records per table + nextInt = ds.getNextInt() if gConfig.record_ops: self.prepToRecordOps() - self.fAddLogReady.write("Ready to write {} to {}\n".format(nextInt, regTableName)) + self.fAddLogReady.write( + "Ready to write {} to {}\n".format( + nextInt, regTableName)) self.fAddLogReady.flush() os.fsync(self.fAddLogReady) sql = "insert into {} using {} tags ('{}', {}) values ('{}', {});".format( - regTableName, - ds.getFixedSuperTableName(), + regTableName, + ds.getFixedSuperTableName(), ds.getNextBinary(), ds.getNextFloat(), ds.getNextTick(), nextInt) - self.execWtSql(wt, sql) - # Successfully wrote the data into the DB, let's record it somehow + self.execWtSql(wt, sql) + # Successfully wrote the data into the DB, let's record it + # somehow te.recordDataMark(nextInt) if gConfig.record_ops: - self.fAddLogDone.write("Wrote {} to {}\n".format(nextInt, regTableName)) + self.fAddLogDone.write( + "Wrote {} to {}\n".format( + nextInt, regTableName)) self.fAddLogDone.flush() os.fsync(self.fAddLogDone) - self.activeTable.discard(i) # not raising an error, unlike remove + self.activeTable.discard(i) # not raising an error, unlike remove # Deterministic random number generator class Dice(): - seeded = False # static, uninitialized + seeded = False # static, uninitialized @classmethod - def seed(cls, s): # static + def seed(cls, s): # static if (cls.seeded): - raise RuntimeError("Cannot seed the random generator more than once") + raise RuntimeError( + "Cannot seed the random generator more than once") cls.verifyRNG() random.seed(s) cls.seeded = True # TODO: protect against multi-threading @classmethod - def verifyRNG(cls): # Verify that the RNG is determinstic + def verifyRNG(cls): # Verify that the RNG is determinstic random.seed(0) x1 = random.randrange(0, 1000) x2 = random.randrange(0, 1000) x3 = random.randrange(0, 1000) - if ( x1 != 864 or x2!=394 or x3!=776 ): + if (x1 != 864 or x2 != 394 or x3 != 776): raise RuntimeError("System RNG is not deterministic") @classmethod - def throw(cls, stop): # get 0 to stop-1 + def throw(cls, stop): # get 0 to stop-1 return cls.throwRange(0, stop) @classmethod - def throwRange(cls, start, stop): # up to stop-1 - if ( not cls.seeded ): + def throwRange(cls, start, stop): # up to stop-1 + if (not cls.seeded): raise RuntimeError("Cannot throw dice before seeding it") return random.randrange(start, stop) @@ -1599,7 +1816,7 @@ class Dice(): # ] # def throwDice(self): -# max = len(self.tasks) - 1 +# max = len(self.tasks) - 1 # dRes = random.randint(0, max) # # logger.debug("Threw the dice in range [{},{}], and got: {}".format(0,max,dRes)) # return dRes @@ -1614,8 +1831,8 @@ class Dice(): class LoggingFilter(logging.Filter): def filter(self, record: logging.LogRecord): - if ( record.levelno >= logging.INFO ) : - return True # info or above always log + if (record.levelno >= logging.INFO): + return True # info or above always log # Commenting out below to adjust... @@ -1623,13 +1840,15 @@ class LoggingFilter(logging.Filter): # return False return True -class MyLoggingAdapter(logging.LoggerAdapter): + +class MyLoggingAdapter(logging.LoggerAdapter): def process(self, msg, kwargs): return "[{}]{}".format(threading.get_ident() % 10000, msg), kwargs # return '[%s] %s' % (self.extra['connid'], msg), kwargs -class SvcManager: - + +class SvcManager: + def __init__(self): print("Starting service manager") signal.signal(signal.SIGTERM, self.sigIntHandler) @@ -1641,17 +1860,18 @@ class SvcManager: def svcOutputReader(self, out: IO, queue): # print("This is the svcOutput Reader...") - for line in out : # iter(out.readline, b''): + for line in out: # iter(out.readline, b''): # print("Finished reading a line: {}".format(line)) - queue.put(line.rstrip()) # get rid of new lines - print("No more output from incoming IO") # meaning sub process must have died + queue.put(line.rstrip()) # get rid of new lines + # meaning sub process must have died + print("No more output from incoming IO") out.close() def sigIntHandler(self, signalNumber, frame): - if self.status != MainExec.STATUS_RUNNING : + if self.status != MainExec.STATUS_RUNNING: print("Ignoring repeated SIGINT...") - return # do nothing if it's already not running - self.status = MainExec.STATUS_STOPPING # immediately set our status + return # do nothing if it's already not running + self.status = MainExec.STATUS_STOPPING # immediately set our status print("Terminating program...") self.subProcess.send_signal(signal.SIGINT) @@ -1659,33 +1879,40 @@ class SvcManager: self.joinIoThread() def joinIoThread(self): - if self.ioThread : + if self.ioThread: self.ioThread.join() - self.ioThread = None + self.ioThread = None def run(self): ON_POSIX = 'posix' in sys.builtin_module_names svcCmd = ['../../build/build/bin/taosd', '-c', '../../build/test/cfg'] # svcCmd = ['vmstat', '1'] - self.subProcess = subprocess.Popen(svcCmd, stdout=subprocess.PIPE, bufsize=1, close_fds=ON_POSIX, text=True) + self.subProcess = subprocess.Popen( + svcCmd, + stdout=subprocess.PIPE, + bufsize=1, + close_fds=ON_POSIX, + text=True) q = Queue() - self.ioThread = threading.Thread(target=self.svcOutputReader, args=(self.subProcess.stdout, q)) - self.ioThread.daemon = True # thread dies with the program + self.ioThread = threading.Thread( + target=self.svcOutputReader, args=( + self.subProcess.stdout, q)) + self.ioThread.daemon = True # thread dies with the program self.ioThread.start() - # proc = subprocess.Popen(['echo', '"to stdout"'], + # proc = subprocess.Popen(['echo', '"to stdout"'], # stdout=subprocess.PIPE, # ) # stdout_value = proc.communicate()[0] # print('\tstdout: {}'.format(repr(stdout_value))) - while True : - try: - line = q.get_nowait() # getting output at fast speed + while True: + try: + line = q.get_nowait() # getting output at fast speed except Empty: # print('no output yet') - time.sleep(2.3) # wait only if there's no output - else: # got line + time.sleep(2.3) # wait only if there's no output + else: # got line print(line) # print("----end of iteration----") if self.shouldStop: @@ -1693,10 +1920,11 @@ class SvcManager: break print("end of loop") - + self.joinIoThread() print("Finished") + class ClientManager: def __init__(self): print("Starting service manager") @@ -1707,41 +1935,42 @@ class ClientManager: self.tc = None def sigIntHandler(self, signalNumber, frame): - if self.status != MainExec.STATUS_RUNNING : + if self.status != MainExec.STATUS_RUNNING: print("Ignoring repeated SIGINT...") - return # do nothing if it's already not running - self.status = MainExec.STATUS_STOPPING # immediately set our status + return # do nothing if it's already not running + self.status = MainExec.STATUS_STOPPING # immediately set our status print("Terminating program...") self.tc.requestToStop() - def _printLastNumbers(self): # to verify data durability + def _printLastNumbers(self): # to verify data durability dbManager = DbManager(resetDb=False) dbc = dbManager.getDbConn() - if dbc.query("show databases") == 0 : # no databae + if dbc.query("show databases") == 0: # no databae return - if dbc.query("show tables") == 0 : # no tables + if dbc.query("show tables") == 0: # no tables return dbc.execute("use db") - sTbName = dbManager.getFixedSuperTableName() + sTbName = dbManager.getFixedSuperTableName() # get all regular tables - dbc.query("select TBNAME from db.{}".format(sTbName)) # TODO: analyze result set later + # TODO: analyze result set later + dbc.query("select TBNAME from db.{}".format(sTbName)) rTables = dbc.getQueryResult() bList = TaskExecutor.BoundedList() - for rTbName in rTables : # regular tables + for rTbName in rTables: # regular tables dbc.query("select speed from db.{}".format(rTbName[0])) numbers = dbc.getQueryResult() - for row in numbers : + for row in numbers: # print("<{}>".format(n), end="", flush=True) bList.add(row[0]) print("Top numbers in DB right now: {}".format(bList)) print("TDengine client execution is about to start in 2 seconds...") time.sleep(2.0) - dbManager = None # release? + dbManager = None # release? def prepare(self): self._printLastNumbers() @@ -1749,21 +1978,22 @@ class ClientManager: def run(self): self._printLastNumbers() - dbManager = DbManager() # Regular function - Dice.seed(0) # initial seeding of dice + dbManager = DbManager() # Regular function + Dice.seed(0) # initial seeding of dice thPool = ThreadPool(gConfig.num_threads, gConfig.max_steps) self.tc = ThreadCoordinator(thPool, dbManager) - + self.tc.run() # print("exec stats: {}".format(self.tc.getExecStats())) # print("TC failed = {}".format(self.tc.isFailed())) self.conclude() # print("TC failed (2) = {}".format(self.tc.isFailed())) - return 1 if self.tc.isFailed() else 0 # Linux return code: ref https://shapeshed.com/unix-exit-codes/ + # Linux return code: ref https://shapeshed.com/unix-exit-codes/ + return 1 if self.tc.isFailed() else 0 def conclude(self): self.tc.printStats() - self.tc.getDbManager().cleanUp() + self.tc.getDbManager().cleanUp() class MainExec: @@ -1782,13 +2012,13 @@ class MainExec: svcManager.run() @classmethod - def runTemp(cls): # for debugging purposes + def runTemp(cls): # for debugging purposes # # Hack to exercise reading from disk, imcreasing coverage. TODO: fix # dbc = dbState.getDbConn() - # sTbName = dbState.getFixedSuperTableName() + # sTbName = dbState.getFixedSuperTableName() # dbc.execute("create database if not exists db") # if not dbState.getState().equals(StateEmpty()): - # dbc.execute("use db") + # dbc.execute("use db") # rTables = None # try: # the super table may not exist @@ -1800,7 +2030,7 @@ class MainExec: # logger.info("Result: {}".format(rTables)) # except taos.error.ProgrammingError as err: # logger.info("Initial Super table OPS error: {}".format(err)) - + # # sys.exit() # if ( not rTables == None): # # print("rTables[0] = {}, type = {}".format(rTables[0], type(rTables[0]))) @@ -1809,24 +2039,26 @@ class MainExec: # ds = dbState # logger.info("Inserting into table: {}".format(rTbName[0])) # sql = "insert into db.{} values ('{}', {});".format( - # rTbName[0], + # rTbName[0], # ds.getNextTick(), ds.getNextInt()) # dbc.execute(sql) - # for rTbName in rTables : # regular tables + # for rTbName in rTables : # regular tables # dbc.query("select * from db.{}".format(rTbName[0])) # TODO: check success failure - # logger.info("Initial READING operation is successful") + # logger.info("Initial READING operation is successful") # except taos.error.ProgrammingError as err: - # logger.info("Initial WRITE/READ error: {}".format(err)) - + # logger.info("Initial WRITE/READ error: {}".format(err)) + # Sandbox testing code # dbc = dbState.getDbConn() # while True: - # rows = dbc.query("show databases") + # rows = dbc.query("show databases") # print("Rows: {}, time={}".format(rows, time.time())) - return + return + def main(): - # Super cool Python argument library: https://docs.python.org/3/library/argparse.html + # Super cool Python argument library: + # https://docs.python.org/3/library/argparse.html parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent('''\ @@ -1837,22 +2069,52 @@ def main(): ''')) - parser.add_argument('-c', '--connector-type', action='store', default='native', type=str, - help='Connector type to use: native, rest, or mixed (default: 10)') - parser.add_argument('-d', '--debug', action='store_true', - help='Turn on DEBUG mode for more logging (default: false)') - parser.add_argument('-e', '--run-tdengine', action='store_true', - help='Run TDengine service in foreground (default: false)') - parser.add_argument('-l', '--larger-data', action='store_true', - help='Write larger amount of data during write operations (default: false)') - parser.add_argument('-p', '--per-thread-db-connection', action='store_true', - help='Use a single shared db connection (default: false)') - parser.add_argument('-r', '--record-ops', action='store_true', - help='Use a pair of always-fsynced fils to record operations performing + performed, for power-off tests (default: false)') - parser.add_argument('-s', '--max-steps', action='store', default=1000, type=int, - help='Maximum number of steps to run (default: 100)') - parser.add_argument('-t', '--num-threads', action='store', default=5, type=int, - help='Number of threads to run (default: 10)') + parser.add_argument( + '-c', + '--connector-type', + action='store', + default='native', + type=str, + help='Connector type to use: native, rest, or mixed (default: 10)') + parser.add_argument( + '-d', + '--debug', + action='store_true', + help='Turn on DEBUG mode for more logging (default: false)') + parser.add_argument( + '-e', + '--run-tdengine', + action='store_true', + help='Run TDengine service in foreground (default: false)') + parser.add_argument( + '-l', + '--larger-data', + action='store_true', + help='Write larger amount of data during write operations (default: false)') + parser.add_argument( + '-p', + '--per-thread-db-connection', + action='store_true', + help='Use a single shared db connection (default: false)') + parser.add_argument( + '-r', + '--record-ops', + action='store_true', + help='Use a pair of always-fsynced fils to record operations performing + performed, for power-off tests (default: false)') + parser.add_argument( + '-s', + '--max-steps', + action='store', + default=1000, + type=int, + help='Maximum number of steps to run (default: 100)') + parser.add_argument( + '-t', + '--num-threads', + action='store', + default=5, + type=int, + help='Number of threads to run (default: 10)') global gConfig gConfig = parser.parse_args() @@ -1860,31 +2122,40 @@ def main(): # if len(sys.argv) == 1: # parser.print_help() # sys.exit() - + # Logging Stuff global logger - _logger = logging.getLogger('CrashGen') # real logger - _logger.addFilter(LoggingFilter()) + _logger = logging.getLogger('CrashGen') # real logger + _logger.addFilter(LoggingFilter()) ch = logging.StreamHandler() _logger.addHandler(ch) - logger = MyLoggingAdapter(_logger, []) # Logging adapter, to be used as a logger + # Logging adapter, to be used as a logger + logger = MyLoggingAdapter(_logger, []) - if ( gConfig.debug ): - logger.setLevel(logging.DEBUG) # default seems to be INFO + if (gConfig.debug): + logger.setLevel(logging.DEBUG) # default seems to be INFO else: logger.setLevel(logging.INFO) - + # Run server or client - if gConfig.run_tdengine : # run server + if gConfig.run_tdengine: # run server MainExec.runService() - else : + else: return MainExec.runClient() - # logger.info("Crash_Gen execution finished") + if __name__ == "__main__": + tdDnodes.init("") + tdDnodes.setTestCluster(False) + tdDnodes.setValgrind(False) + tdDnodes.stopAll() + tdDnodes.deploy(1) + tdDnodes.start(1) + tdLog.sleep(5) + exitCode = main() # print("Exiting with code: {}".format(exitCode)) sys.exit(exitCode) diff --git a/tests/pytest/crash_gen.sh b/tests/pytest/crash_gen.sh index de80361aa3..3c04cd6198 100755 --- a/tests/pytest/crash_gen.sh +++ b/tests/pytest/crash_gen.sh @@ -31,11 +31,23 @@ then exit -1 fi +CURR_DIR=`pwd` +IN_TDINTERNAL="community" +if [[ "$CURR_DIR" == *"$IN_TDINTERNAL"* ]]; then + TAOS_DIR=$CURR_DIR/../../.. +else + TAOS_DIR=$CURR_DIR/../.. +fi +TAOSD_DIR=`find $TAOS_DIR -name "taosd"|grep bin|head -n1` + +LIB_DIR=`echo $TAOSD_DIR|rev|cut -d '/' -f 3,4,5,6|rev`/lib +echo $LIB_DIR + # First we need to set up a path for Python to find our own TAOS modules, so that "import" can work. export PYTHONPATH=$(pwd)/../../src/connector/python/linux/python3 # Then let us set up the library path so that our compiled SO file can be loaded by Python -export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$(pwd)/../../build/build/lib +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$LIB_DIR # Now we are all let, and let's see if we can find a crash. Note we pass all params python3 ./crash_gen.py $@ From f33b508d86fa2ec01e32fc6faca0301a255806f4 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 8 Jul 2020 11:03:02 +0800 Subject: [PATCH 05/38] change bionic to focal to use python 3.8 --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 7f8d0fdb67..001f2967dc 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,7 +13,7 @@ branches: matrix: - os: linux - dist: bionic + dist: focal language: c git: From 998df217d5845766b08c3f0ca5e278d2048298f8 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 8 Jul 2020 11:10:28 +0800 Subject: [PATCH 06/38] remove python2 packages. --- .travis.yml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.travis.yml b/.travis.yml index 001f2967dc..2d21401289 100644 --- a/.travis.yml +++ b/.travis.yml @@ -28,8 +28,6 @@ matrix: - build-essential - cmake - net-tools - - python-pip - - python-setuptools - python3-pip - python3-setuptools - valgrind @@ -54,8 +52,6 @@ matrix: cd ${TRAVIS_BUILD_DIR}/debug make install > /dev/null || travis_terminate $? - pip install numpy - pip install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python2/ pip3 install numpy pip3 install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python3/ From c63f85505de2c3b6513d63c108aaf2114d01ba7b Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 8 Jul 2020 16:31:47 +0800 Subject: [PATCH 07/38] refactor pathfinding in crash_gen.py --- tests/pytest/crash_gen.py | 32 +++++++++++++++++++++++--------- 1 file changed, 23 insertions(+), 9 deletions(-) diff --git a/tests/pytest/crash_gen.py b/tests/pytest/crash_gen.py index c8b34fe7f3..6eac55f564 100755 --- a/tests/pytest/crash_gen.py +++ b/tests/pytest/crash_gen.py @@ -678,8 +678,25 @@ class DbConnNative(DbConn): self._conn = None self._cursor = None + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("communit")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:root.find("build")] + break + return buildPath + def openByType(self): # Open connection - cfgPath = "../../build/test/cfg" +# cfgPath = "../../build/test/cfg" + cfgPath = self.getBuildPath() + "/test/cfg" + print("CBD: cfgPath=%s" % cfgPath) self._conn = taos.connect( host="127.0.0.1", config=cfgPath) # TODO: make configurable @@ -1885,6 +1902,11 @@ class SvcManager: def run(self): ON_POSIX = 'posix' in sys.builtin_module_names + taosdPath = self.getBuildPath() + "/build/bin/taosd" + cfgPath = self.getBuildPath() + "/test/cfg" + + print ("CBD: taosdPath:%s cfgPath:%s" % (taosPat, cfgPath)) + svcCmd = ['../../build/build/bin/taosd', '-c', '../../build/test/cfg'] # svcCmd = ['vmstat', '1'] self.subProcess = subprocess.Popen( @@ -2148,14 +2170,6 @@ def main(): if __name__ == "__main__": - tdDnodes.init("") - tdDnodes.setTestCluster(False) - tdDnodes.setValgrind(False) - tdDnodes.stopAll() - tdDnodes.deploy(1) - tdDnodes.start(1) - tdLog.sleep(5) - exitCode = main() # print("Exiting with code: {}".format(exitCode)) sys.exit(exitCode) From 4efd4762684209ce324e66f673edfd59c88ca62a Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 8 Jul 2020 18:28:17 +0800 Subject: [PATCH 08/38] fix autopep8 format. --- tests/pytest/crash_gen.py | 264 +++++++++++++++++++++++--------------- 1 file changed, 160 insertions(+), 104 deletions(-) diff --git a/tests/pytest/crash_gen.py b/tests/pytest/crash_gen.py index 173b6c448c..b66e8d48b6 100755 --- a/tests/pytest/crash_gen.py +++ b/tests/pytest/crash_gen.py @@ -889,7 +889,7 @@ class StateEmpty(AnyState): def verifyTasksToState(self, tasks, newState): if (self.hasSuccess(tasks, TaskCreateDb) - ): # at EMPTY, if there's succes in creating DB + ): # at EMPTY, if there's succes in creating DB if (not self.hasTask(tasks, TaskDropDb)): # and no drop_db tasks # we must have at most one. TODO: compare numbers self.assertAtMostOneSuccess(tasks, TaskCreateDb) @@ -944,7 +944,7 @@ class StateSuperTableOnly(AnyState): def verifyTasksToState(self, tasks, newState): if (self.hasSuccess(tasks, TaskDropSuperTable) - ): # we are able to drop the table + ): # we are able to drop the table #self.assertAtMostOneSuccess(tasks, TaskDropSuperTable) # we must have had recreted it self.hasSuccess(tasks, TaskCreateSuperTable) @@ -978,7 +978,7 @@ class StateHasData(AnyState): self.assertAtMostOneSuccess(tasks, TaskDropDb) # TODO: dicy elif (newState.equals(AnyState.STATE_DB_ONLY)): # in DB only if (not self.hasTask(tasks, TaskCreateDb) - ): # without a create_db task + ): # without a create_db task # we must have drop_db task self.assertNoTask(tasks, TaskDropDb) self.hasSuccess(tasks, TaskDropSuperTable) @@ -990,11 +990,11 @@ class StateHasData(AnyState): # self.hasSuccess(tasks, DeleteDataTasks) else: # should be STATE_HAS_DATA if (not self.hasTask(tasks, TaskCreateDb) - ): # only if we didn't create one + ): # only if we didn't create one # we shouldn't have dropped it self.assertNoTask(tasks, TaskDropDb) if (not self.hasTask(tasks, TaskCreateSuperTable) - ): # if we didn't create the table + ): # if we didn't create the table # we should not have a task that drops it self.assertNoTask(tasks, TaskDropSuperTable) # self.assertIfExistThenSuccess(tasks, ReadFixedDataTask) @@ -1385,15 +1385,18 @@ class Task(): try: self._executeInternal(te, wt) # TODO: no return value? except taos.error.ProgrammingError as err: - errno2 = err.errno if (err.errno > 0) else 0x80000000 + err.errno # correct error scheme - if ( errno2 in [ - 0x05, # TSDB_CODE_RPC_NOT_READY - 0x200, 0x360, 0x362, 0x36A, 0x36B, 0x36D, 0x381, 0x380, 0x383, 0x503, - 0x510, # vnode not in ready state + errno2 = err.errno if ( + err.errno > 0) else 0x80000000 + err.errno # correct error scheme + if (errno2 in [ + 0x05, # TSDB_CODE_RPC_NOT_READY + 0x200, 0x360, 0x362, 0x36A, 0x36B, 0x36D, 0x381, 0x380, 0x383, 0x503, + 0x510, # vnode not in ready state 0x600, - 1000 # REST catch-all error - ]) : # allowed errors - self.logDebug("[=] Acceptable Taos library exception: errno=0x{:X}, msg: {}, SQL: {}".format(errno2, err, self._lastSql)) + 1000 # REST catch-all error + ]): # allowed errors + self.logDebug( + "[=] Acceptable Taos library exception: errno=0x{:X}, msg: {}, SQL: {}".format( + errno2, err, self._lastSql)) print("_", end="", flush=True) self._err = err else: @@ -1862,7 +1865,8 @@ class MyLoggingAdapter(logging.LoggerAdapter): return "[{}]{}".format(threading.get_ident() % 10000, msg), kwargs # return '[%s] %s' % (self.extra['connid'], msg), kwargs -class SvcManager: + +class SvcManager: MAX_QUEUE_SIZE = 10000 def __init__(self): @@ -1873,35 +1877,39 @@ class SvcManager: self.ioThread = None self.subProcess = None self.shouldStop = False - # self.status = MainExec.STATUS_RUNNING # set inside _startTaosService() + # self.status = MainExec.STATUS_RUNNING # set inside + # _startTaosService() def svcOutputReader(self, out: IO, queue): - # Important Reference: https://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python + # Important Reference: + # https://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python print("This is the svcOutput Reader...") - # for line in out : + # for line in out : for line in iter(out.readline, b''): # print("Finished reading a line: {}".format(line)) # print("Adding item to queue...") line = line.decode("utf-8").rstrip() - queue.put(line) # This might block, and then causing "out" buffer to block - print("_i", end="", flush=True) + # This might block, and then causing "out" buffer to block + queue.put(line) + print("_i", end="", flush=True) # Trim the queue if necessary oneTenthQSize = self.MAX_QUEUE_SIZE // 10 - if (queue.qsize() >= (self.MAX_QUEUE_SIZE - oneTenthQSize) ) : # 90% full? + if (queue.qsize() >= (self.MAX_QUEUE_SIZE - oneTenthQSize)): # 90% full? print("Triming IPC queue by: {}".format(oneTenthQSize)) - for i in range(0, oneTenthQSize) : + for i in range(0, oneTenthQSize): try: queue.get_nowait() except Empty: - break # break out of for loop, no more trimming + break # break out of for loop, no more trimming - if self.shouldStop : + if self.shouldStop: print("Stopping to read output from sub process") break # queue.put(line) - print("\nNo more output (most likely) from IO thread managing TDengine service") # meaning sub process must have died + # meaning sub process must have died + print("\nNo more output (most likely) from IO thread managing TDengine service") out.close() def _doMenu(self): @@ -1912,30 +1920,32 @@ class SvcManager: print("2: Terminate") print("3: Restart") # Remember to update the if range below - # print("Enter Choice: ", end="", flush=True) + # print("Enter Choice: ", end="", flush=True) while choice == "": choice = input("Enter Choice: ") if choice != "": - break # done with reading repeated input - if choice in ["1", "2", "3"]: - break # we are done with whole method + break # done with reading repeated input + if choice in ["1", "2", "3"]: + break # we are done with whole method print("Invalid choice, please try again.") - choice = "" # reset + choice = "" # reset return choice - def sigUsrHandler(self, signalNumber, frame) : + def sigUsrHandler(self, signalNumber, frame): print("Interrupting main thread execution upon SIGUSR1") - if self.status != MainExec.STATUS_RUNNING : + if self.status != MainExec.STATUS_RUNNING: print("Ignoring repeated SIG...") - return # do nothing if it's already not running + return # do nothing if it's already not running self.status = MainExec.STATUS_STOPPING choice = self._doMenu() - if choice == "1" : - self.sigHandlerResume() # TODO: can the sub-process be blocked due to us not reading from queue? - elif choice == "2" : + if choice == "1": + # TODO: can the sub-process be blocked due to us not reading from + # queue? + self.sigHandlerResume() + elif choice == "2": self.stopTaosService() - elif choice == "3" : + elif choice == "3": self.stopTaosService() self.startTaosService() else: @@ -1943,59 +1953,62 @@ class SvcManager: def sigIntHandler(self, signalNumber, frame): print("Sig INT Handler starting...") - if self.status != MainExec.STATUS_RUNNING : + if self.status != MainExec.STATUS_RUNNING: print("Ignoring repeated SIG_INT...") return - self.status = MainExec.STATUS_STOPPING # immediately set our status - self.stopTaosService() - print("INT signal handler returning...") + self.status = MainExec.STATUS_STOPPING # immediately set our status + self.stopTaosService() + print("INT signal handler returning...") - def sigHandlerResume(self) : + def sigHandlerResume(self): print("Resuming TDengine service manager thread (main thread)...\n\n") self.status = MainExec.STATUS_RUNNING def joinIoThread(self): if self.ioThread: self.ioThread.join() - self.ioThread = None - else : + self.ioThread = None + else: print("Joining empty thread, doing nothing") - + TD_READY_MSG = "TDengine is initialized successfully" + def _procIpcBatch(self): - # Process all the output generated by the underlying sub process, managed by IO thread - while True : - try: - line = self.ipcQueue.get_nowait() # getting output at fast speed - print("_o", end="", flush=True) - if self.status == MainExec.STATUS_STARTING : # we are starting, let's see if we have started - if line.find(self.TD_READY_MSG) != -1 : # found + # Process all the output generated by the underlying sub process, + # managed by IO thread + while True: + try: + line = self.ipcQueue.get_nowait() # getting output at fast speed + print("_o", end="", flush=True) + if self.status == MainExec.STATUS_STARTING: # we are starting, let's see if we have started + if line.find(self.TD_READY_MSG) != -1: # found self.status = MainExec.STATUS_RUNNING - + except Empty: # time.sleep(2.3) # wait only if there's no output # no more output - return # we are done with THIS BATCH - else: # got line + return # we are done with THIS BATCH + else: # got line print(line) def _procIpcAll(self): - while True : + while True: print("<", end="", flush=True) - self._procIpcBatch() # process one batch + self._procIpcBatch() # process one batch # check if the ioThread is still running if (not self.ioThread) or (not self.ioThread.is_alive()): - print("IO Thread (with subprocess) has ended, main thread now exiting...") + print( + "IO Thread (with subprocess) has ended, main thread now exiting...") self.stopTaosService() - self._procIpcBatch() # one more batch - return # TODO: maybe one last batch? + self._procIpcBatch() # one more batch + return # TODO: maybe one last batch? # Maybe handler says we should exit now if self.shouldStop: print("Main thread ending all IPC processing with IOThread/SubProcess") - self._procIpcBatch() # one more batch + self._procIpcBatch() # one more batch return print(">", end="", flush=True) @@ -2024,50 +2037,58 @@ class SvcManager: svcCmd = [taosdPath, '-c', cfgPath] # svcCmd = ['vmstat', '1'] - if self.subProcess : # already there + if self.subProcess: # already there raise RuntimeError("Corrupt process state") self.subProcess = subprocess.Popen( - svcCmd, - stdout=subprocess.PIPE, + svcCmd, + stdout=subprocess.PIPE, # bufsize=1, # not supported in binary mode - close_fds=ON_POSIX) # had text=True, which interferred with reading EOF + close_fds=ON_POSIX) # had text=True, which interferred with reading EOF self.ipcQueue = Queue() - if self.ioThread : + if self.ioThread: raise RuntimeError("Corrupt thread state") - self.ioThread = threading.Thread(target=self.svcOutputReader, args=(self.subProcess.stdout, self.ipcQueue)) - self.ioThread.daemon = True # thread dies with the program + self.ioThread = threading.Thread( + target=self.svcOutputReader, args=( + self.subProcess.stdout, self.ipcQueue)) + self.ioThread.daemon = True # thread dies with the program self.ioThread.start() - self.shouldStop = False # don't let the main loop stop + self.shouldStop = False # don't let the main loop stop self.status = MainExec.STATUS_STARTING # wait for service to start - for i in range(0, 10) : + for i in range(0, 10): time.sleep(1.0) - self._procIpcBatch() # pump messages + self._procIpcBatch() # pump messages print("_zz_", end="", flush=True) - if self.status == MainExec.STATUS_RUNNING : + if self.status == MainExec.STATUS_RUNNING: print("TDengine service READY to process requests") - return # now we've started - raise RuntimeError("TDengine service did not start successfully") # TODO: handle this better? + return # now we've started + # TODO: handle this better? + raise RuntimeError("TDengine service did not start successfully") def stopTaosService(self): # can be called from both main thread or signal handler print("Terminating TDengine service running as the sub process...") - # Linux will send Control-C generated SIGINT to the TDengine process already, ref: https://unix.stackexchange.com/questions/176235/fork-and-how-signals-are-delivered-to-processes - if not self.subProcess : + # Linux will send Control-C generated SIGINT to the TDengine process + # already, ref: + # https://unix.stackexchange.com/questions/176235/fork-and-how-signals-are-delivered-to-processes + if not self.subProcess: print("Process already stopped") return retCode = self.subProcess.poll() - if retCode : # valid return code, process ended + if retCode: # valid return code, process ended self.subProcess = None - else: # process still alive, let's interrupt it - print("Sub process still running, sending SIG_INT and waiting for it to stop...") - self.subProcess.send_signal(signal.SIGINT) # sub process should end, then IPC queue should end, causing IO thread to end - try : + else: # process still alive, let's interrupt it + print( + "Sub process still running, sending SIG_INT and waiting for it to stop...") + # sub process should end, then IPC queue should end, causing IO + # thread to end + self.subProcess.send_signal(signal.SIGINT) + try: self.subProcess.wait(10) except subprocess.TimeoutExpired as err: print("Time out waiting for TDengine service process to exit") @@ -2076,15 +2097,17 @@ class SvcManager: self.subProcess = None if self.subProcess and (not self.subProcess.poll()): - print("Sub process is still running... pid = {}".format(self.subProcess.pid)) - + print( + "Sub process is still running... pid = {}".format( + self.subProcess.pid)) + self.shouldStop = True self.joinIoThread() def run(self): self.startTaosService() - # proc = subprocess.Popen(['echo', '"to stdout"'], + # proc = subprocess.Popen(['echo', '"to stdout"'], # stdout=subprocess.PIPE, # ) # stdout_value = proc.communicate()[0] @@ -2093,7 +2116,7 @@ class SvcManager: self._procIpcAll() print("End of loop reading from IPC queue") - self.joinIoThread() # should have started already + self.joinIoThread() # should have started already print("SvcManager Run Finished") @@ -2148,7 +2171,7 @@ class ClientManager: self._printLastNumbers() def run(self): - if gConfig.auto_start_service : + if gConfig.auto_start_service: svcMgr = SvcManager() svcMgr.startTaosService() @@ -2163,7 +2186,7 @@ class ClientManager: # print("exec stats: {}".format(self.tc.getExecStats())) # print("TC failed = {}".format(self.tc.isFailed())) self.conclude() - if gConfig.auto_start_service : + if gConfig.auto_start_service: svcMgr.stopTaosService() # print("TC failed (2) = {}".format(self.tc.isFailed())) # Linux return code: ref https://shapeshed.com/unix-exit-codes/ @@ -2248,24 +2271,57 @@ def main(): ''')) - parser.add_argument('-a', '--auto-start-service', action='store_true', - help='Automatically start/stop the TDengine service (default: false)') - parser.add_argument('-c', '--connector-type', action='store', default='native', type=str, - help='Connector type to use: native, rest, or mixed (default: 10)') - parser.add_argument('-d', '--debug', action='store_true', - help='Turn on DEBUG mode for more logging (default: false)') - parser.add_argument('-e', '--run-tdengine', action='store_true', - help='Run TDengine service in foreground (default: false)') - parser.add_argument('-l', '--larger-data', action='store_true', - help='Write larger amount of data during write operations (default: false)') - parser.add_argument('-p', '--per-thread-db-connection', action='store_true', - help='Use a single shared db connection (default: false)') - parser.add_argument('-r', '--record-ops', action='store_true', - help='Use a pair of always-fsynced fils to record operations performing + performed, for power-off tests (default: false)') - parser.add_argument('-s', '--max-steps', action='store', default=1000, type=int, - help='Maximum number of steps to run (default: 100)') - parser.add_argument('-t', '--num-threads', action='store', default=5, type=int, - help='Number of threads to run (default: 10)') + parser.add_argument( + '-a', + '--auto-start-service', + action='store_true', + help='Automatically start/stop the TDengine service (default: false)') + parser.add_argument( + '-c', + '--connector-type', + action='store', + default='native', + type=str, + help='Connector type to use: native, rest, or mixed (default: 10)') + parser.add_argument( + '-d', + '--debug', + action='store_true', + help='Turn on DEBUG mode for more logging (default: false)') + parser.add_argument( + '-e', + '--run-tdengine', + action='store_true', + help='Run TDengine service in foreground (default: false)') + parser.add_argument( + '-l', + '--larger-data', + action='store_true', + help='Write larger amount of data during write operations (default: false)') + parser.add_argument( + '-p', + '--per-thread-db-connection', + action='store_true', + help='Use a single shared db connection (default: false)') + parser.add_argument( + '-r', + '--record-ops', + action='store_true', + help='Use a pair of always-fsynced fils to record operations performing + performed, for power-off tests (default: false)') + parser.add_argument( + '-s', + '--max-steps', + action='store', + default=1000, + type=int, + help='Maximum number of steps to run (default: 100)') + parser.add_argument( + '-t', + '--num-threads', + action='store', + default=5, + type=int, + help='Number of threads to run (default: 10)') global gConfig gConfig = parser.parse_args() From 0866751fd8270d152487a2413586e347ea2d9ead Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 8 Jul 2020 18:31:18 +0800 Subject: [PATCH 09/38] kill taosd before test crash_gen. --- .travis.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.travis.yml b/.travis.yml index 2d21401289..a77ba74470 100644 --- a/.travis.yml +++ b/.travis.yml @@ -60,6 +60,9 @@ matrix: sleep 1 cd ${TRAVIS_BUILD_DIR}/tests/pytest + pkill -TERM -x taosd + fuser -k -n tcp 6030 + sleep 1 ./crash_gen.sh -p -t 5 -s 50|| travis_terminate $? sleep 1 From eb92c22251216d0b08bc4a5b24cccba09392a9fc Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 8 Jul 2020 18:46:06 +0800 Subject: [PATCH 10/38] launch taosd by crash_gen. --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index a77ba74470..430cb46552 100644 --- a/.travis.yml +++ b/.travis.yml @@ -63,7 +63,7 @@ matrix: pkill -TERM -x taosd fuser -k -n tcp 6030 sleep 1 - ./crash_gen.sh -p -t 5 -s 50|| travis_terminate $? + ./crash_gen.sh -a -p -t 5 -s 50|| travis_terminate $? sleep 1 cd ${TRAVIS_BUILD_DIR}/tests/pytest From 5aade323bd95e159c020b407fbb066e03541a4cc Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 8 Jul 2020 19:04:53 +0800 Subject: [PATCH 11/38] reduce thread num and step num. --- .travis.yml | 2 +- tests/pytest/crash_gen.sh | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 430cb46552..71cfb46667 100644 --- a/.travis.yml +++ b/.travis.yml @@ -63,7 +63,7 @@ matrix: pkill -TERM -x taosd fuser -k -n tcp 6030 sleep 1 - ./crash_gen.sh -a -p -t 5 -s 50|| travis_terminate $? + ./crash_gen.sh -a -p -t 4 -s 25|| travis_terminate $? sleep 1 cd ${TRAVIS_BUILD_DIR}/tests/pytest diff --git a/tests/pytest/crash_gen.sh b/tests/pytest/crash_gen.sh index 3c04cd6198..f6be6aae49 100755 --- a/tests/pytest/crash_gen.sh +++ b/tests/pytest/crash_gen.sh @@ -41,7 +41,6 @@ fi TAOSD_DIR=`find $TAOS_DIR -name "taosd"|grep bin|head -n1` LIB_DIR=`echo $TAOSD_DIR|rev|cut -d '/' -f 3,4,5,6|rev`/lib -echo $LIB_DIR # First we need to set up a path for Python to find our own TAOS modules, so that "import" can work. export PYTHONPATH=$(pwd)/../../src/connector/python/linux/python3 From 493ad1cc474cfee62c0a198fdd29d135c61b3357 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Thu, 9 Jul 2020 16:45:40 +0800 Subject: [PATCH 12/38] TD-754: build taosd before jdbc tests --- .../java/com/taosdata/jdbc/utils/TDNode.java | 37 ++++---- .../java/com/taosdata/jdbc/utils/TDNodes.java | 53 ++++------- .../com/taosdata/jdbc/utils/TDSimClient.java | 91 ------------------- .../test/java/com/taosdata/jdbc/BaseTest.java | 17 ++-- 4 files changed, 47 insertions(+), 151 deletions(-) delete mode 100644 src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDSimClient.java diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNode.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNode.java index 4db4d8f7c1..12d026c0b3 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNode.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNode.java @@ -37,23 +37,22 @@ public class TDNode { this.testCluster = testCluster; } - public void searchTaosd(File dir, ArrayList taosdPath) { File[] fileList = dir.listFiles(); - if(fileList != null && fileList.length != 0) { - for(File file : fileList) { - if(file.isFile()) { - if(file.getName().equals("taosd")) { - taosdPath.add(file.getAbsolutePath()); - } - } else { - searchTaosd(file, taosdPath); - } - } + if(fileList == null || fileList.length == 0) { + return; } - return; + for(File file : fileList) { + if(file.isFile()) { + if(file.getName().equals("taosd")) { + taosdPath.add(file.getAbsolutePath()); + } + } else { + searchTaosd(file, taosdPath); + } + } } public void start() { @@ -98,10 +97,10 @@ public class TDNode { String cmd = ""; if(this.valgrind == 0) { - cmd = "nohup " + binPath + " > /dev/null 2>&1 & "; + cmd = "nohup " + binPath + " -c " + cfgDir + " > /dev/null 2>&1 & "; System.out.println("start taosd cmd: " + cmd); } else { - String valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"; + String valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reacโˆhable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"; cmd = "nohup " + valgrindCmdline + " " + binPath + " -c " + this.cfgDir + " 2>&1 & "; } @@ -152,7 +151,7 @@ public class TDNode { public void startIP() { try{ - String cmd = "sudo ifconfig lo:" + index + "192.168.0." + index + " up"; + String cmd = "sudo ifconfig lo:" + index + "192.168.0." + index + " up"; Runtime.getRuntime().exec(cmd).waitFor(); } catch (Exception e) { e.printStackTrace(); @@ -162,7 +161,7 @@ public class TDNode { public void stopIP() { try{ - String cmd = "sudo ifconfig lo:" + index + "192.168.0." + index + " down"; + String cmd = "sudo ifconfig lo:" + index + "192.168.0." + index + " down"; Runtime.getRuntime().exec(cmd).waitFor(); } catch (Exception e) { e.printStackTrace(); @@ -172,7 +171,9 @@ public class TDNode { public void setCfgConfig(String option, String value) { try{ String cmd = "echo " + option + " " + value + " >> " + this.cfgPath; - Runtime.getRuntime().exec(cmd).waitFor(); + String[] cmdLine = {"sh", "-c", cmd}; + Process ps = Runtime.getRuntime().exec(cmdLine); + ps.waitFor(); } catch (Exception e) { e.printStackTrace(); } @@ -229,7 +230,7 @@ public class TDNode { } setCfgConfig("dataDir", this.dataDir); setCfgConfig("logDir", this.logDir); - setCfgConfig("numOfLogLines", "100000000"); + setCfgConfig("numOfLogLines", "1000000/00"); setCfgConfig("mnodeEqualVnodeNum", "0"); setCfgConfig("walLevel", "1"); setCfgConfig("statusInterval", "1"); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNodes.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNodes.java index bce985bcc0..2aa33a1840 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNodes.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNodes.java @@ -3,34 +3,29 @@ package com.taosdata.jdbc.utils; import java.io.File; import java.util.*; - public class TDNodes { - private ArrayList tdNodes; - private boolean simDeployed; + private ArrayList tdNodes; private boolean testCluster; - private int valgrind; - private String path; + private int valgrind; public TDNodes () { tdNodes = new ArrayList<>(); for(int i = 1; i < 11; i ++) { tdNodes.add(new TDNode(i)); - } - this.simDeployed = false; - path = ""; + } } - public TDNodes(String path) { + public void setPath(String path) { try { String psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'" ; Process ps = Runtime.getRuntime().exec(psCmd); - ps.wait(); + ps.waitFor(); String killCmd = "kill -9 " + ps.pid(); Runtime.getRuntime().exec(killCmd).waitFor(); psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"; ps = Runtime.getRuntime().exec(psCmd); - ps.wait(); + ps.waitFor(); killCmd = "kill -9 " + ps.pid(); Runtime.getRuntime().exec(killCmd).waitFor(); @@ -41,11 +36,11 @@ public class TDNodes { File file = new File(path); binPath = file.getCanonicalPath(); System.out.println("binPath real path: " + binPath); - - if (!path.isEmpty()) { + + if(path.isEmpty()){ file = new File(path + "/../../"); path = file.getCanonicalPath(); - } + } for(int i = 0; i < tdNodes.size(); i++) { tdNodes.get(i).setPath(path); @@ -63,10 +58,6 @@ public class TDNodes { this.valgrind = valgrind; } - public void setPath(String path) { - this.path = path; - } - public void check(int index) { if(index < 1 || index > 10) { System.out.println("index: " + index + " should on a scale of [1, 10]"); @@ -75,22 +66,18 @@ public class TDNodes { } public void deploy(int index) { - System.out.println("======Start deploying tsim====="); - TDSimClient sim = new TDSimClient(); - - sim.setPath(path); - System.out.println("======path: " + path + "====="); - sim.setTestCluster(this.testCluster); - if(this.simDeployed == false ) { - sim.deploy(); - this.simDeployed = true; + try { + File file = new File(System.getProperty("user.dir") + "/../../../"); + String projectRealPath = file.getCanonicalPath(); + check(index); + tdNodes.get(index - 1).setTestCluster(this.testCluster); + tdNodes.get(index - 1).setValgrind(valgrind); + tdNodes.get(index - 1).setPath(projectRealPath); + tdNodes.get(index - 1).deploy(); + } catch (Exception e) { + e.printStackTrace(); + System.out.println("deploy Test Exception"); } - - check(index); - tdNodes.get(index - 1).setTestCluster(this.testCluster); - tdNodes.get(index - 1).setValgrind(valgrind); - tdNodes.get(index - 1).setPath(System.getProperty("user.dir")); - tdNodes.get(index - 1).deploy(); } public void cfg(int index, String option, String value) { diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDSimClient.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDSimClient.java deleted file mode 100644 index fec824f7dd..0000000000 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDSimClient.java +++ /dev/null @@ -1,91 +0,0 @@ -package com.taosdata.jdbc.utils; - - -public class TDSimClient { - - private boolean testCluster; - private String path; - private String cfgDir; - private String logDir; - private String cfgPath; - - public TDSimClient() { - testCluster = false; - } - - public void setTestCluster(boolean testCluster) { - this.testCluster = testCluster; - } - - public void setPath(String path) { - this.path = path; - } - - public void setCfgConfig(String option, String value) { - String cmd = "echo " + option + " " + value + " >> " + this.cfgPath; - System.out.println("set cfg cmd " + cmd); - - try { - Process ps = Runtime.getRuntime().exec(cmd); - System.out.println("cfg command result: " + ps.waitFor()); - } catch (Exception e) { - e.printStackTrace(); - } - } - - public void deploy() { - this.logDir = this.path + "/sim/psim/log"; - System.out.println("======logDir: " + logDir + "====="); - this.cfgDir = this.path + "/sim/psim/cfg"; - System.out.println("======cfgDir: " + cfgDir + "====="); - this.cfgPath = this.path + "/sim/psim/cfg/taos.cfg"; - System.out.println("======cfgPath: " + cfgPath + "====="); - - try { - String cmd = "rm -rf " + this.logDir; - System.out.println("cmd: = " + cmd); - Process ps = Runtime.getRuntime().exec(cmd); - System.out.println("return value " + ps.waitFor()); - System.out.println(Runtime.getRuntime().exec(cmd).waitFor()); - - - cmd = "rm -rf " + this.cfgDir; - Runtime.getRuntime().exec(cmd).waitFor(); - System.out.println(cmd + " result: " +Runtime.getRuntime().exec(cmd).waitFor()); - - cmd = "mkdir -p " + this.logDir; - Runtime.getRuntime().exec(cmd).waitFor(); - System.out.println(cmd + " result: " +Runtime.getRuntime().exec(cmd).waitFor()); - - cmd = "mkdir -p " + this.cfgDir; - System.out.println(cmd + " result: " +Runtime.getRuntime().exec(cmd).waitFor()); - - cmd = "touch " + this.cfgPath; - System.out.println(cmd + " result: " +Runtime.getRuntime().exec(cmd).waitFor()); - - } catch (Exception e) { - e.printStackTrace(); - } - - if(this.testCluster) { - setCfgConfig("masterIp", "192.168.0.1"); - setCfgConfig("secondIp", "192.168.0.2"); - } - setCfgConfig("logDir", this.logDir); - setCfgConfig("numOfLogLines", "100000000"); - setCfgConfig("numOfThreadsPerCore", "2.0"); - setCfgConfig("locale", "en_US.UTF-8"); - setCfgConfig("charset", "UTF-8"); - setCfgConfig("asyncLog", "0"); - setCfgConfig("anyIp", "0"); - setCfgConfig("sdbDebugFlag", "135"); - setCfgConfig("rpcDebugFlag", "135"); - setCfgConfig("tmrDebugFlag", "131"); - setCfgConfig("cDebugFlag", "135"); - setCfgConfig("udebugFlag", "135"); - setCfgConfig("jnidebugFlag", "135"); - setCfgConfig("qdebugFlag", "135"); - } - - -} \ No newline at end of file diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BaseTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BaseTest.java index 17ea54dd75..8156818c1b 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BaseTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BaseTest.java @@ -1,9 +1,6 @@ package com.taosdata.jdbc; -import java.io.BufferedReader; import java.io.File; -import java.io.InputStreamReader; - import com.taosdata.jdbc.utils.TDNodes; import org.junit.AfterClass; @@ -13,15 +10,16 @@ public class BaseTest { private static boolean testCluster = false; private static String deployPath = System.getProperty("user.dir"); - private static int valgrind = 0; + private static int valgrind = 0; + private static TDNodes tdNodes = new TDNodes(); + @BeforeClass - public static void setupEnv() { + public static void setUpEvn() { try{ File file = new File(deployPath + "/../../../"); String rootPath = file.getCanonicalPath(); - - TDNodes tdNodes = new TDNodes(); + tdNodes.setPath(rootPath); tdNodes.setTestCluster(testCluster); tdNodes.setValgrid(valgrind); @@ -31,11 +29,12 @@ public class BaseTest { } catch (Exception e) { e.printStackTrace(); + System.out.println("Base Test Exception"); } } @AfterClass - public static void clearUpEnv() { - + public static void cleanUpEnv() { + tdNodes.stop(1); } } \ No newline at end of file From 22dbff7c7f7a5322d9aa209e9ad7c4baf6b236f3 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 9 Jul 2020 10:45:32 +0000 Subject: [PATCH 13/38] add csv test --- tests/pytest/import_merge/importCSV.py | 94 ++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) create mode 100644 tests/pytest/import_merge/importCSV.py diff --git a/tests/pytest/import_merge/importCSV.py b/tests/pytest/import_merge/importCSV.py new file mode 100644 index 0000000000..b4441949a1 --- /dev/null +++ b/tests/pytest/import_merge/importCSV.py @@ -0,0 +1,94 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import csv +import random +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.csvfile = "/tmp/file.csv" + self.rows = 10000 + self.ntables = 1 + self.startTime = 1520000010000 + def genRandomStr(self, maxLen): + H = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789' + salt = '' + if maxLen <= 1: + maxLen = 2 + l = random.randint(1,maxLen) + for i in range(l): + salt += random.choice(H) + return salt + def createCSVFile(self): + f = open(self.csvfile,'w',encoding='utf-8') + csv_writer = csv.writer(f, quoting=csv.QUOTE_NONNUMERIC) + for i in range(self.rows): + csv_writer.writerow([self.startTime + i, + self.genRandomStr(5), + self.genRandomStr(6), + self.genRandomStr(7), + self.genRandomStr(8), + self.genRandomStr(9), + self.genRandomStr(10), + self.genRandomStr(11), + self.genRandomStr(12), + self.genRandomStr(13), + self.genRandomStr(14)]) + f.close() + def destroyCSVFile(self): + os.remove(self.csvfile) + def run(self): + self.createCSVFile() + + tdDnodes.stop(1) + tdDnodes.deploy(1) + tdDnodes.start(1) + + tdSql.execute('reset query cache') + tdSql.execute('drop database if exists db') + tdSql.execute('create database db') + tdSql.execute('use db') + tdSql.execute('''create table tbx (ts TIMESTAMP, + collect_area NCHAR(5), + device_id BINARY(6), + imsi BINARY(7), + imei BINARY(8), + mdn BINARY(9), + net_type BINARY(10), + mno NCHAR(11), + province NCHAR(12), + city NCHAR(13), + alarm BINARY(14))''') + + tdSql.execute("import into tbx file \'%s\'"%(self.csvfile)) + tdSql.query('select * from tbx') + tdSql.checkRows(self.rows) + + def stop(self): + self.destroyCSVFile() + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + tdDnodes.stop(1) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) From 9fa5a0030626ee0d16ccbb52b5556d004e5042cd Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 9 Jul 2020 19:57:05 +0000 Subject: [PATCH 14/38] add csv to regressiontest --- tests/pytest/fulltest.sh | 2 +- tests/pytest/regressiontest.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 83f94f727a..7a0f2cb825 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -121,7 +121,7 @@ python3 ./test.py -f import_merge/importTORestart.py python3 ./test.py -f import_merge/importTPORestart.py python3 ./test.py -f import_merge/importTRestart.py python3 ./test.py -f import_merge/importInsertThenImport.py - +python3 ./test.py -f import_merge/importCSV.py # user python3 ./test.py -f user/user_create.py python3 ./test.py -f user/pass_len.py diff --git a/tests/pytest/regressiontest.sh b/tests/pytest/regressiontest.sh index d3a8deaf47..46098f4040 100755 --- a/tests/pytest/regressiontest.sh +++ b/tests/pytest/regressiontest.sh @@ -121,7 +121,7 @@ python3 ./test.py -f import_merge/importTORestart.py python3 ./test.py -f import_merge/importTPORestart.py python3 ./test.py -f import_merge/importTRestart.py python3 ./test.py -f import_merge/importInsertThenImport.py - +python3 ./test.py -f import_merge/importCSV.py # user python3 ./test.py -f user/user_create.py python3 ./test.py -f user/pass_len.py From 6d0a6a6fda8d4c50411c0b168a50cf32202ff6c7 Mon Sep 17 00:00:00 2001 From: Bomin Zhang Date: Fri, 10 Jul 2020 09:50:51 +0800 Subject: [PATCH 15/38] fix td-789: memory leak --- src/query/src/qExecutor.c | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 29e1dc5751..53a32a2356 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -4475,6 +4475,8 @@ static void sequentialTableProcess(SQInfo *pQInfo) { } pRuntimeEnv->pQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &gp, pQInfo); + taosArrayDestroy(g1); + taosArrayDestroy(tx); SArray* s = tsdbGetQueriedTableList(pRuntimeEnv->pQueryHandle); assert(taosArrayGetSize(s) >= 1); @@ -5857,6 +5859,18 @@ _error: return code; } +static void freeColumnFilterInfo(SColumnFilterInfo* pFilter, int32_t numOfFilters) { + if (pFilter == NULL) { + return; + } + for (int32_t i = 0; i < numOfFilters; i++) { + if (pFilter[i].filterstr) { + free((void*)(pFilter[i].pz)); + } + } + free(pFilter); +} + static void freeQInfo(SQInfo *pQInfo) { if (!isValidQInfo(pQInfo)) { return; @@ -5925,7 +5939,15 @@ static void freeQInfo(SQInfo *pQInfo) { tfree(pQuery->tagColList); tfree(pQuery->pFilterInfo); - tfree(pQuery->colList); + + if (pQuery->colList != NULL) { + for (int32_t i = 0; i < pQuery->numOfCols; i++) { + SColumnInfo* column = pQuery->colList + i; + freeColumnFilterInfo(column->filters, column->numOfFilters); + } + tfree(pQuery->colList); + } + tfree(pQuery->sdata); tfree(pQuery); @@ -6122,6 +6144,11 @@ _over: free(pExprMsg); taosArrayDestroy(pTableIdList); + for (int32_t i = 0; i < pQueryMsg->numOfCols; i++) { + SColumnInfo* column = pQueryMsg->colList + i; + freeColumnFilterInfo(column->filters, column->numOfFilters); + } + //pQInfo already freed in initQInfo, but *pQInfo may not pointer to null; if (code != TSDB_CODE_SUCCESS) { *pQInfo = NULL; From 1ae96261bba636c2eb4f59f6335c8fb4235286b7 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Fri, 10 Jul 2020 10:06:52 +0800 Subject: [PATCH 16/38] turn on output for gcc4.8 --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 2a0aa6372b..7aacfd3e58 100644 --- a/.travis.yml +++ b/.travis.yml @@ -160,7 +160,7 @@ matrix: script: - cmake .. > /dev/null - - make > /dev/null + - make - os: linux dist: bionic From c0e352aa5cf2db2647ce11e4bcbadd58a9860d14 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 10 Jul 2020 10:54:51 +0800 Subject: [PATCH 17/38] replace read/write with tread/twrite in wal. --- src/wal/src/walMain.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/wal/src/walMain.c b/src/wal/src/walMain.c index e079653ab3..94a0fdc956 100644 --- a/src/wal/src/walMain.c +++ b/src/wal/src/walMain.c @@ -28,6 +28,7 @@ #include "taoserror.h" #include "twal.h" #include "tqueue.h" +#include "tfile.h" #define walPrefix "wal" @@ -180,7 +181,7 @@ int walWrite(void *handle, SWalHead *pHead) { taosCalcChecksumAppend(0, (uint8_t *)pHead, sizeof(SWalHead)); int contLen = pHead->len + sizeof(SWalHead); - if(write(pWal->fd, pHead, contLen) != contLen) { + if(twrite(pWal->fd, pHead, contLen) != contLen) { wError("wal:%s, failed to write(%s)", pWal->name, strerror(errno)); terrno = TAOS_SYSTEM_ERROR(errno); } else { @@ -325,7 +326,7 @@ static int walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp) { wDebug("wal:%s, start to restore", name); while (1) { - int ret = read(fd, pHead, sizeof(SWalHead)); + int ret = tread(fd, pHead, sizeof(SWalHead)); if ( ret == 0) break; if (ret != sizeof(SWalHead)) { @@ -340,7 +341,7 @@ static int walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp) { break; } - ret = read(fd, pHead->cont, pHead->len); + ret = tread(fd, pHead->cont, pHead->len); if ( ret != pHead->len) { wWarn("wal:%s, failed to read body, skip, len:%d ret:%d", name, pHead->len, ret); terrno = TAOS_SYSTEM_ERROR(errno); From f98804b1da6c9504b4fabbbbb6213c7f4f54a9f8 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Fri, 10 Jul 2020 10:56:13 +0800 Subject: [PATCH 18/38] [TD-815] crash while drop vgroup --- src/mnode/src/mnodeVgroup.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/mnode/src/mnodeVgroup.c b/src/mnode/src/mnodeVgroup.c index cddb9eaf8b..966d4b0dd8 100644 --- a/src/mnode/src/mnodeVgroup.c +++ b/src/mnode/src/mnodeVgroup.c @@ -372,7 +372,6 @@ static int32_t mnodeCreateVgroupCb(SMnodeMsg *pMsg, int32_t code) { pVgroup->vnodeGid[i].dnodeId); } - mnodeIncVgroupRef(pVgroup); pMsg->expected = pVgroup->numOfVnodes; mnodeSendCreateVgroupMsg(pVgroup, pMsg); @@ -393,6 +392,9 @@ int32_t mnodeCreateVgroup(SMnodeMsg *pMsg, SDbObj *pDb) { return TSDB_CODE_MND_NO_ENOUGH_DNODES; } + pMsg->pVgroup = pVgroup; + mnodeIncVgroupRef(pVgroup); + SSdbOper oper = { .type = SDB_OPER_GLOBAL, .table = tsVgroupSdb, @@ -402,8 +404,6 @@ int32_t mnodeCreateVgroup(SMnodeMsg *pMsg, SDbObj *pDb) { .cb = mnodeCreateVgroupCb }; - pMsg->pVgroup = pVgroup; - int32_t code = sdbInsertRow(&oper); if (code != TSDB_CODE_SUCCESS) { pMsg->pVgroup = NULL; @@ -814,19 +814,20 @@ static int32_t mnodeProcessVnodeCfgMsg(SMnodeMsg *pMsg) { mDebug("dnode:%s, vgId:%d, invalid dnode", taosIpStr(pCfg->dnodeId), pCfg->vgId); return TSDB_CODE_MND_VGROUP_NOT_EXIST; } - mnodeDecDnodeRef(pDnode); SVgObj *pVgroup = mnodeGetVgroup(pCfg->vgId); if (pVgroup == NULL) { mDebug("dnode:%s, vgId:%d, no vgroup info", taosIpStr(pCfg->dnodeId), pCfg->vgId); + mnodeDecDnodeRef(pDnode); return TSDB_CODE_MND_VGROUP_NOT_EXIST; } - mnodeDecVgroupRef(pVgroup); mDebug("vgId:%d, send create vnode msg to dnode %s for vnode cfg msg", pVgroup->vgId, pDnode->dnodeEp); SRpcIpSet ipSet = mnodeGetIpSetFromIp(pDnode->dnodeEp); mnodeSendCreateVnodeMsg(pVgroup, &ipSet, NULL); + mnodeDecDnodeRef(pDnode); + mnodeDecVgroupRef(pVgroup); return TSDB_CODE_SUCCESS; } From e98dbee81d2bcb7b430e5912b865534b9c9bd3dd Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 10 Jul 2020 13:47:03 +0800 Subject: [PATCH 19/38] add --random-file-fail-factor argument to cover more scenarios. --- src/dnode/src/dnodeSystem.c | 13 + src/util/inc/tfile.h | 1 + src/util/src/tfile.c | 37 +- tests/pytest/util/dnodes-random-fail.py | 500 ++++++++++++++++++++++++ tests/script/sh/exec-random-fail.sh | 113 ++++++ 5 files changed, 651 insertions(+), 13 deletions(-) create mode 100644 tests/pytest/util/dnodes-random-fail.py create mode 100755 tests/script/sh/exec-random-fail.sh diff --git a/src/dnode/src/dnodeSystem.c b/src/dnode/src/dnodeSystem.c index 01f0cf25c0..971bd0a110 100644 --- a/src/dnode/src/dnodeSystem.c +++ b/src/dnode/src/dnodeSystem.c @@ -20,6 +20,7 @@ #include "tglobal.h" #include "dnodeInt.h" #include "dnodeMain.h" +#include "tfile.h" static void signal_handler(int32_t signum, siginfo_t *sigInfo, void *context); static sem_t exitSem; @@ -67,6 +68,18 @@ int32_t main(int32_t argc, char *argv[]) { taosSetAllocMode(TAOS_ALLOC_MODE_DETECT_LEAK, NULL, true); } } +#endif +#ifdef TAOS_RANDOM_FILE_FAIL + else if (strcmp(argv[i], "--random-file-fail-factor") == 0) { + if ( (i+1) < argc ) { + int factor = atoi(argv[i+1]); + printf("The factor of random failure is %d\n", factor); + taosSetRandomFileFailFactor(factor); + } else { + printf("Please specify a number for random failure factor!"); + exit(EXIT_FAILURE); + } + } #endif } diff --git a/src/util/inc/tfile.h b/src/util/inc/tfile.h index 5bddc76266..04e500743c 100644 --- a/src/util/inc/tfile.h +++ b/src/util/inc/tfile.h @@ -18,6 +18,7 @@ #ifdef TAOS_RANDOM_FILE_FAIL +void taosSetRandomFileFailFactor(int factor); ssize_t taos_tread(int fd, void *buf, size_t count); ssize_t taos_twrite(int fd, void *buf, size_t count); off_t taos_lseek(int fd, off_t offset, int whence); diff --git a/src/util/src/tfile.c b/src/util/src/tfile.c index eb7a2d5a66..92eeaef126 100644 --- a/src/util/src/tfile.c +++ b/src/util/src/tfile.c @@ -26,40 +26,51 @@ #include "os.h" -#define RANDOM_FILE_FAIL_FACTOR 5 +#ifdef TAOS_RANDOM_FILE_FAIL + +static int random_file_fail_factor = 20; + +void taosSetRandomFileFailFactor(int factor) +{ + random_file_fail_factor = factor; +} +#endif ssize_t taos_tread(int fd, void *buf, size_t count) { #ifdef TAOS_RANDOM_FILE_FAIL - if (rand() % RANDOM_FILE_FAIL_FACTOR == 0) { - errno = EIO; - return -1; + if (random_file_fail_factor > 0) { + if (rand() % random_file_fail_factor == 0) { + errno = EIO; + return -1; + } } #endif - return tread(fd, buf, count); } ssize_t taos_twrite(int fd, void *buf, size_t count) { #ifdef TAOS_RANDOM_FILE_FAIL - if (rand() % RANDOM_FILE_FAIL_FACTOR == 0) { - errno = EIO; - return -1; + if (random_file_fail_factor > 0) { + if (rand() % random_file_fail_factor == 0) { + errno = EIO; + return -1; + } } #endif - return twrite(fd, buf, count); } off_t taos_lseek(int fd, off_t offset, int whence) { #ifdef TAOS_RANDOM_FILE_FAIL - if (rand() % RANDOM_FILE_FAIL_FACTOR == 0) { - errno = EIO; - return -1; + if (random_file_fail_factor > 0) { + if (rand() % random_file_fail_factor == 0) { + errno = EIO; + return -1; + } } #endif - return lseek(fd, offset, whence); } diff --git a/tests/pytest/util/dnodes-random-fail.py b/tests/pytest/util/dnodes-random-fail.py new file mode 100644 index 0000000000..db3a5fea93 --- /dev/null +++ b/tests/pytest/util/dnodes-random-fail.py @@ -0,0 +1,500 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import os.path +import subprocess +from util.log import * + + +class TDSimClient: + def __init__(self): + self.testCluster = False + + self.cfgDict = { + "numOfLogLines": "100000000", + "numOfThreadsPerCore": "2.0", + "locale": "en_US.UTF-8", + "charset": "UTF-8", + "asyncLog": "0", + "anyIp": "0", + "sdbDebugFlag": "135", + "rpcDebugFlag": "135", + "tmrDebugFlag": "131", + "cDebugFlag": "135", + "udebugFlag": "135", + "jnidebugFlag": "135", + "qdebugFlag": "135", + } + + def init(self, path): + self.__init__() + self.path = path + + def getLogDir(self): + self.logDir = "%s/sim/psim/log" % (self.path) + return self.logDir + + def getCfgDir(self): + self.cfgDir = "%s/sim/psim/cfg" % (self.path) + return self.cfgDir + + def setTestCluster(self, value): + self.testCluster = value + + def addExtraCfg(self, option, value): + self.cfgDict.update({option: value}) + + def cfg(self, option, value): + cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def deploy(self): + self.logDir = "%s/sim/psim/log" % (self.path) + self.cfgDir = "%s/sim/psim/cfg" % (self.path) + self.cfgPath = "%s/sim/psim/cfg/taos.cfg" % (self.path) + + cmd = "rm -rf " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "rm -rf " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "touch " + self.cfgPath + if os.system(cmd) != 0: + tdLog.exit(cmd) + + if self.testCluster: + self.cfg("masterIp", "192.168.0.1") + self.cfg("secondIp", "192.168.0.2") + self.cfg("logDir", self.logDir) + + for key, value in self.cfgDict.items(): + self.cfg(key, value) + + tdLog.debug("psim is deployed and configured by %s" % (self.cfgPath)) + + +class TDDnode: + def __init__(self, index): + self.index = index + self.running = 0 + self.deployed = 0 + self.testCluster = False + self.valgrind = 0 + + def init(self, path): + self.path = path + + def setTestCluster(self, value): + self.testCluster = value + + def setValgrind(self, value): + self.valgrind = value + + def getDataSize(self): + totalSize = 0 + + if (self.deployed == 1): + for dirpath, dirnames, filenames in os.walk(self.dataDir): + for f in filenames: + fp = os.path.join(dirpath, f) + + if not os.path.islink(fp): + totalSize = totalSize + os.path.getsize(fp) + + return totalSize + + def deploy(self): + self.logDir = "%s/sim/dnode%d/log" % (self.path, self.index) + self.dataDir = "%s/sim/dnode%d/data" % (self.path, self.index) + self.cfgDir = "%s/sim/dnode%d/cfg" % (self.path, self.index) + self.cfgPath = "%s/sim/dnode%d/cfg/taos.cfg" % ( + self.path, self.index) + + cmd = "rm -rf " + self.dataDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "rm -rf " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "rm -rf " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.dataDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "touch " + self.cfgPath + if os.system(cmd) != 0: + tdLog.exit(cmd) + + if self.testCluster: + self.startIP() + + if self.testCluster: + self.cfg("masterIp", "192.168.0.1") + self.cfg("secondIp", "192.168.0.2") + self.cfg("publicIp", "192.168.0.%d" % (self.index)) + self.cfg("internalIp", "192.168.0.%d" % (self.index)) + self.cfg("privateIp", "192.168.0.%d" % (self.index)) + self.cfg("dataDir", self.dataDir) + self.cfg("logDir", self.logDir) + self.cfg("numOfLogLines", "100000000") + self.cfg("mnodeEqualVnodeNum", "0") + self.cfg("walLevel", "1") + self.cfg("statusInterval", "1") + self.cfg("numOfTotalVnodes", "64") + self.cfg("numOfMnodes", "3") + self.cfg("numOfThreadsPerCore", "2.0") + self.cfg("monitor", "0") + self.cfg("maxVnodeConnections", "30000") + self.cfg("maxMgmtConnections", "30000") + self.cfg("maxMeterConnections", "30000") + self.cfg("maxShellConns", "30000") + self.cfg("locale", "en_US.UTF-8") + self.cfg("charset", "UTF-8") + self.cfg("asyncLog", "0") + self.cfg("anyIp", "0") + self.cfg("dDebugFlag", "135") + self.cfg("mDebugFlag", "135") + self.cfg("sdbDebugFlag", "135") + self.cfg("rpcDebugFlag", "135") + self.cfg("tmrDebugFlag", "131") + self.cfg("cDebugFlag", "135") + self.cfg("httpDebugFlag", "135") + self.cfg("monitorDebugFlag", "135") + self.cfg("udebugFlag", "135") + self.cfg("jnidebugFlag", "135") + self.cfg("qdebugFlag", "135") + self.deployed = 1 + tdLog.debug( + "dnode:%d is deployed and configured by %s" % + (self.index, self.cfgPath)) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def start(self): + buildPath = self.getBuildPath() + + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + + binPath = buildPath + "/build/bin/taosd" + + if self.deployed == 0: + tdLog.exit("dnode:%d is not deployed" % (self.index)) + + if self.valgrind == 0: + cmd = "nohup %s -c %s > /dev/null 2>&1 & " % ( + binPath, self.cfgDir) + else: + valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes" + + cmd = "nohup %s %s -c %s --random-file-fail-factor 5 2>&1 & " % ( + valgrindCmdline, binPath, self.cfgDir) + + print(cmd) + + if os.system(cmd) != 0: + tdLog.exit(cmd) + self.running = 1 + tdLog.debug("dnode:%d is running with %s " % (self.index, cmd)) + + tdLog.debug("wait 5 seconds for the dnode:%d to start." % (self.index)) + time.sleep(5) + + def stop(self): + if self.valgrind == 0: + toBeKilled = "taosd" + else: + toBeKilled = "valgrind.bin" + + if self.running != 0: + psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + while(processID): + killCmd = "kill -INT %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + for port in range(6030, 6041): + fuserCmd = "fuser -k -n tcp %d" % port + os.system(fuserCmd) + if self.valgrind: + time.sleep(2) + + self.running = 0 + tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index)) + + def forcestop(self): + if self.valgrind == 0: + toBeKilled = "taosd" + else: + toBeKilled = "valgrind.bin" + + if self.running != 0: + psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + while(processID): + killCmd = "kill -KILL %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + for port in range(6030, 6041): + fuserCmd = "fuser -k -n tcp %d" % port + os.system(fuserCmd) + if self.valgrind: + time.sleep(2) + + self.running = 0 + tdLog.debug("dnode:%d is stopped by kill -KILL" % (self.index)) + + def startIP(self): + cmd = "sudo ifconfig lo:%d 192.168.0.%d up" % (self.index, self.index) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def stopIP(self): + cmd = "sudo ifconfig lo:%d 192.168.0.%d down" % ( + self.index, self.index) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def cfg(self, option, value): + cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def getDnodeRootDir(self, index): + dnodeRootDir = "%s/sim/psim/dnode%d" % (self.path, index) + return dnodeRootDir + + def getDnodesRootDir(self): + dnodesRootDir = "%s/sim/psim" % (self.path) + return dnodesRootDir + + +class TDDnodes: + def __init__(self): + self.dnodes = [] + self.dnodes.append(TDDnode(1)) + self.dnodes.append(TDDnode(2)) + self.dnodes.append(TDDnode(3)) + self.dnodes.append(TDDnode(4)) + self.dnodes.append(TDDnode(5)) + self.dnodes.append(TDDnode(6)) + self.dnodes.append(TDDnode(7)) + self.dnodes.append(TDDnode(8)) + self.dnodes.append(TDDnode(9)) + self.dnodes.append(TDDnode(10)) + self.simDeployed = False + + def init(self, path): + psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + while(processID): + killCmd = "kill -KILL %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + while(processID): + killCmd = "kill -KILL %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + binPath = os.path.dirname(os.path.realpath(__file__)) + binPath = binPath + "/../../../debug/" + tdLog.debug("binPath %s" % (binPath)) + binPath = os.path.realpath(binPath) + tdLog.debug("binPath real path %s" % (binPath)) + + # cmd = "sudo cp %s/build/lib/libtaos.so /usr/local/lib/taos/" % (binPath) + # tdLog.debug(cmd) + # os.system(cmd) + + # cmd = "sudo cp %s/build/bin/taos /usr/local/bin/taos/" % (binPath) + # if os.system(cmd) != 0 : + # tdLog.exit(cmd) + # tdLog.debug("execute %s" % (cmd)) + + # cmd = "sudo cp %s/build/bin/taosd /usr/local/bin/taos/" % (binPath) + # if os.system(cmd) != 0 : + # tdLog.exit(cmd) + # tdLog.debug("execute %s" % (cmd)) + + if path == "": + # self.path = os.path.expanduser('~') + self.path = os.path.abspath(binPath + "../../") + else: + self.path = os.path.realpath(path) + + for i in range(len(self.dnodes)): + self.dnodes[i].init(self.path) + + self.sim = TDSimClient() + self.sim.init(self.path) + + def setTestCluster(self, value): + self.testCluster = value + + def setValgrind(self, value): + self.valgrind = value + + def deploy(self, index): + self.sim.setTestCluster(self.testCluster) + + if (self.simDeployed == False): + self.sim.deploy() + self.simDeployed = True + + self.check(index) + self.dnodes[index - 1].setTestCluster(self.testCluster) + self.dnodes[index - 1].setValgrind(self.valgrind) + self.dnodes[index - 1].deploy() + + def cfg(self, index, option, value): + self.check(index) + self.dnodes[index - 1].cfg(option, value) + + def start(self, index): + self.check(index) + self.dnodes[index - 1].start() + + def stop(self, index): + self.check(index) + self.dnodes[index - 1].stop() + + def getDataSize(self, index): + self.check(index) + return self.dnodes[index - 1].getDataSize() + + def forcestop(self, index): + self.check(index) + self.dnodes[index - 1].forcestop() + + def startIP(self, index): + self.check(index) + + if self.testCluster: + self.dnodes[index - 1].startIP() + + def stopIP(self, index): + self.check(index) + + if self.dnodes[index - 1].testCluster: + self.dnodes[index - 1].stopIP() + + def check(self, index): + if index < 1 or index > 10: + tdLog.exit("index:%d should on a scale of [1, 10]" % (index)) + + def stopAll(self): + tdLog.info("stop all dnodes") + for i in range(len(self.dnodes)): + self.dnodes[i].stop() + + psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + if processID: + cmd = "sudo systemctl stop taosd" + os.system(cmd) + # if os.system(cmd) != 0 : + # tdLog.exit(cmd) + psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + while(processID): + killCmd = "kill -KILL %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + while(processID): + killCmd = "kill -KILL %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + # if os.system(cmd) != 0 : + # tdLog.exit(cmd) + + def getDnodesRootDir(self): + dnodesRootDir = "%s/sim" % (self.path) + return dnodesRootDir + + def getSimCfgPath(self): + return self.sim.getCfgDir() + + def getSimLogPath(self): + return self.sim.getLogDir() + + def addSimExtraCfg(self, option, value): + self.sim.addExtraCfg(option, value) + + +tdDnodes = TDDnodes() diff --git a/tests/script/sh/exec-random-fail.sh b/tests/script/sh/exec-random-fail.sh new file mode 100755 index 0000000000..7ba301617c --- /dev/null +++ b/tests/script/sh/exec-random-fail.sh @@ -0,0 +1,113 @@ +#!/bin/bash + +# if [ $# != 4 || $# != 5 ]; then + # echo "argument list need input : " + # echo " -n nodeName" + # echo " -s start/stop" + # echo " -c clear" + # exit 1 +# fi + +NODE_NAME= +EXEC_OPTON= +CLEAR_OPTION="false" +while getopts "n:s:u:x:ct" arg +do + case $arg in + n) + NODE_NAME=$OPTARG + ;; + s) + EXEC_OPTON=$OPTARG + ;; + c) + CLEAR_OPTION="clear" + ;; + t) + SHELL_OPTION="true" + ;; + u) + USERS=$OPTARG + ;; + x) + SIGNAL=$OPTARG + ;; + ?) + echo "unkown argument" + ;; + esac +done + +SCRIPT_DIR=`dirname $0` +cd $SCRIPT_DIR/../ +SCRIPT_DIR=`pwd` + +IN_TDINTERNAL="community" +if [[ "$SCRIPT_DIR" == *"$IN_TDINTERNAL"* ]]; then + cd ../../.. +else + cd ../../ +fi + +TAOS_DIR=`pwd` +TAOSD_DIR=`find . -name "taosd"|grep bin|head -n1` + +if [[ "$TAOSD_DIR" == *"$IN_TDINTERNAL"* ]]; then + BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2,3` +else + BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2` +fi + +BUILD_DIR=$TAOS_DIR/$BIN_DIR/build + +SIM_DIR=$TAOS_DIR/sim +NODE_DIR=$SIM_DIR/$NODE_NAME +EXE_DIR=$BUILD_DIR/bin +CFG_DIR=$NODE_DIR/cfg +LOG_DIR=$NODE_DIR/log +DATA_DIR=$NODE_DIR/data +MGMT_DIR=$NODE_DIR/data/mgmt +TSDB_DIR=$NODE_DIR/data/tsdb + +TAOS_CFG=$NODE_DIR/cfg/taos.cfg + +echo ------------ $EXEC_OPTON $NODE_NAME + +TAOS_FLAG=$SIM_DIR/tsim/flag +if [ -f "$TAOS_FLAG" ]; then + EXE_DIR=/usr/local/bin/taos +fi + +if [ "$CLEAR_OPTION" = "clear" ]; then + echo rm -rf $MGMT_DIR $TSDB_DIR + rm -rf $TSDB_DIR + rm -rf $MGMT_DIR +fi + +if [ "$EXEC_OPTON" = "start" ]; then + echo "ExcuteCmd:" $EXE_DIR/taosd -c $CFG_DIR + + if [ "$SHELL_OPTION" = "true" ]; then + nohup valgrind --log-file=${LOG_DIR}/valgrind.log --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes $EXE_DIR/taosd -c $CFG_DIR > /dev/null 2>&1 & + else + nohup $EXE_DIR/taosd -c $CFG_DIR --random-file-fail-factor 5 > /dev/null 2>&1 & + fi + +else + #relative path + RCFG_DIR=sim/$NODE_NAME/cfg + PID=`ps -ef|grep taosd | grep $RCFG_DIR | grep -v grep | awk '{print $2}'` + while [ -n "$PID" ] + do + if [ "$SIGNAL" = "SIGINT" ]; then + echo try to kill by signal SIGINT + kill -SIGINT $PID + else + echo try to kill by signal SIGKILL + kill -9 $PID + fi + sleep 1 + PID=`ps -ef|grep taosd | grep $RCFG_DIR | grep -v grep | awk '{print $2}'` + done +fi + From 59bf910719bdf7a34684f5eaaa1eded03fd16331 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Fri, 10 Jul 2020 13:49:34 +0800 Subject: [PATCH 20/38] change some log format --- src/common/src/tglobal.c | 2 +- src/plugins/http/inc/httpInt.h | 2 +- src/plugins/http/src/httpContext.c | 33 +++++++++++++++--------------- src/plugins/http/src/httpHandle.c | 6 +++--- src/plugins/http/src/httpServer.c | 8 ++++---- tests/script/jenkins/basic.txt | 3 --- tests/script/sh/deploy.sh | 1 - 7 files changed, 26 insertions(+), 29 deletions(-) diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index 684fb71af9..fae771e855 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -129,7 +129,7 @@ int32_t tsMnodeEqualVnodeNum = 4; int32_t tsEnableHttpModule = 1; int32_t tsRestRowLimit = 10240; uint16_t tsHttpPort = 6020; // only tcp, range tcp[6020] -int32_t tsHttpCacheSessions = 100; +int32_t tsHttpCacheSessions = 1000; int32_t tsHttpSessionExpire = 36000; int32_t tsHttpMaxThreads = 2; int32_t tsHttpEnableCompress = 0; diff --git a/src/plugins/http/inc/httpInt.h b/src/plugins/http/inc/httpInt.h index 5d94e8456e..8ca1c2ff11 100644 --- a/src/plugins/http/inc/httpInt.h +++ b/src/plugins/http/inc/httpInt.h @@ -206,7 +206,7 @@ typedef struct HttpThread { pthread_mutex_t threadMutex; bool stop; int pollFd; - int numOfFds; + int numOfContexts; int threadId; char label[HTTP_LABEL_SIZE]; bool (*processData)(HttpContext *pContext); diff --git a/src/plugins/http/src/httpContext.c b/src/plugins/http/src/httpContext.c index cdaee53c38..98fba9cb3b 100644 --- a/src/plugins/http/src/httpContext.c +++ b/src/plugins/http/src/httpContext.c @@ -44,7 +44,7 @@ static void httpDestroyContext(void *data) { HttpThread *pThread = pContext->pThread; httpRemoveContextFromEpoll(pContext); httpReleaseSession(pContext); - atomic_sub_fetch_32(&pThread->numOfFds, 1); + atomic_sub_fetch_32(&pThread->numOfContexts, 1); pContext->pThread = 0; pContext->state = HTTP_CONTEXT_STATE_CLOSED; @@ -171,38 +171,39 @@ bool httpInitContext(HttpContext *pContext) { void httpCloseContextByApp(HttpContext *pContext) { pContext->parsed = false; - bool keepAlive = true; + if (pContext->httpVersion == HTTP_VERSION_10 && pContext->httpKeepAlive != HTTP_KEEPALIVE_ENABLE) { keepAlive = false; } else if (pContext->httpVersion != HTTP_VERSION_10 && pContext->httpKeepAlive == HTTP_KEEPALIVE_DISABLE) { keepAlive = false; - } else {} + } else { + } if (keepAlive) { if (httpAlterContextState(pContext, HTTP_CONTEXT_STATE_HANDLING, HTTP_CONTEXT_STATE_READY)) { - httpDebug("context:%p, fd:%d, ip:%s, last state:handling, keepAlive:true, reuse connect", - pContext, pContext->fd, pContext->ipstr); + httpDebug("context:%p, fd:%d, ip:%s, last state:handling, keepAlive:true, reuse context", pContext, pContext->fd, + pContext->ipstr); } else if (httpAlterContextState(pContext, HTTP_CONTEXT_STATE_DROPPING, HTTP_CONTEXT_STATE_CLOSED)) { httpRemoveContextFromEpoll(pContext); - httpDebug("context:%p, fd:%d, ip:%s, last state:dropping, keepAlive:true, close connect", - pContext, pContext->fd, pContext->ipstr); + httpDebug("context:%p, fd:%d, ip:%s, last state:dropping, keepAlive:true, close connect", pContext, pContext->fd, + pContext->ipstr); } else if (httpAlterContextState(pContext, HTTP_CONTEXT_STATE_READY, HTTP_CONTEXT_STATE_READY)) { - httpDebug("context:%p, fd:%d, ip:%s, last state:ready, keepAlive:true, reuse connect", - pContext, pContext->fd, pContext->ipstr); + httpDebug("context:%p, fd:%d, ip:%s, last state:ready, keepAlive:true, reuse context", pContext, pContext->fd, + pContext->ipstr); } else if (httpAlterContextState(pContext, HTTP_CONTEXT_STATE_CLOSED, HTTP_CONTEXT_STATE_CLOSED)) { httpRemoveContextFromEpoll(pContext); - httpDebug("context:%p, fd:%d, ip:%s, last state:ready, keepAlive:true, close connect", - pContext, pContext->fd, pContext->ipstr); + httpDebug("context:%p, fd:%d, ip:%s, last state:ready, keepAlive:true, close connect", pContext, pContext->fd, + pContext->ipstr); } else { httpRemoveContextFromEpoll(pContext); - httpError("context:%p, fd:%d, ip:%s, last state:%s:%d, keepAlive:true, close connect", - pContext, pContext->fd, pContext->ipstr, httpContextStateStr(pContext->state), pContext->state); + httpError("context:%p, fd:%d, ip:%s, last state:%s:%d, keepAlive:true, close connect", pContext, pContext->fd, + pContext->ipstr, httpContextStateStr(pContext->state), pContext->state); } } else { httpRemoveContextFromEpoll(pContext); - httpDebug("context:%p, fd:%d, ip:%s, last state:%s:%d, keepAlive:false, close connect", - pContext, pContext->fd, pContext->ipstr, httpContextStateStr(pContext->state), pContext->state); + httpDebug("context:%p, fd:%d, ip:%s, last state:%s:%d, keepAlive:false, close context", pContext, pContext->fd, + pContext->ipstr, httpContextStateStr(pContext->state), pContext->state); } httpReleaseContext(pContext); @@ -214,7 +215,7 @@ void httpCloseContextByServer(HttpContext *pContext) { } else if (httpAlterContextState(pContext, HTTP_CONTEXT_STATE_DROPPING, HTTP_CONTEXT_STATE_DROPPING)) { httpDebug("context:%p, fd:%d, ip:%s, epoll already finished, wait app finished", pContext, pContext->fd, pContext->ipstr); } else if (httpAlterContextState(pContext, HTTP_CONTEXT_STATE_READY, HTTP_CONTEXT_STATE_CLOSED)) { - httpDebug("context:%p, fd:%d, ip:%s, epoll finished, close context", pContext, pContext->fd, pContext->ipstr); + httpDebug("context:%p, fd:%d, ip:%s, epoll finished, close connect", pContext, pContext->fd, pContext->ipstr); } else if (httpAlterContextState(pContext, HTTP_CONTEXT_STATE_CLOSED, HTTP_CONTEXT_STATE_CLOSED)) { httpDebug("context:%p, fd:%d, ip:%s, epoll finished, will be closed soon", pContext, pContext->fd, pContext->ipstr); } else { diff --git a/src/plugins/http/src/httpHandle.c b/src/plugins/http/src/httpHandle.c index a89ea7d8f1..2c94f61950 100644 --- a/src/plugins/http/src/httpHandle.c +++ b/src/plugins/http/src/httpHandle.c @@ -313,9 +313,9 @@ bool httpParseRequest(HttpContext* pContext) { return true; } - httpTraceL("context:%p, fd:%d, ip:%s, thread:%s, numOfFds:%d, read size:%d, raw data:\n%s", pContext, pContext->fd, - pContext->ipstr, pContext->pThread->label, pContext->pThread->numOfFds, pContext->parser.bufsize, - pContext->parser.buffer); + httpTraceL("context:%p, fd:%d, ip:%s, thread:%s, numOfContexts:%d, read size:%d, raw data:\n%s", pContext, + pContext->fd, pContext->ipstr, pContext->pThread->label, pContext->pThread->numOfContexts, + pContext->parser.bufsize, pContext->parser.buffer); if (!httpGetHttpMethod(pContext)) { return false; diff --git a/src/plugins/http/src/httpServer.c b/src/plugins/http/src/httpServer.c index d7d7da6668..dbe299cef7 100644 --- a/src/plugins/http/src/httpServer.c +++ b/src/plugins/http/src/httpServer.c @@ -293,7 +293,7 @@ static void *httpAcceptHttpConnection(void *arg) { totalFds = 1; for (int i = 0; i < pServer->numOfThreads; ++i) { - totalFds += pServer->pThreads[i].numOfFds; + totalFds += pServer->pThreads[i].numOfContexts; } if (totalFds > tsHttpCacheSessions * 100) { @@ -332,9 +332,9 @@ static void *httpAcceptHttpConnection(void *arg) { } // notify the data process, add into the FdObj list - atomic_add_fetch_32(&pThread->numOfFds, 1); - httpDebug("context:%p, fd:%d, ip:%s, thread:%s numOfFds:%d totalFds:%d, accept a new connection", pContext, connFd, - pContext->ipstr, pThread->label, pThread->numOfFds, totalFds); + atomic_add_fetch_32(&pThread->numOfContexts, 1); + httpDebug("context:%p, fd:%d, ip:%s, thread:%s numOfContexts:%d totalFds:%d, accept a new connection", pContext, + connFd, pContext->ipstr, pThread->label, pThread->numOfContexts, totalFds); // pick up next thread for next connection threadId++; diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index c86b17c4fc..bead4bd095 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -117,8 +117,6 @@ cd ../../../debug; make ./test.sh -f general/parser/import_commit3.sim ./test.sh -f general/parser/insert_tb.sim ./test.sh -f general/parser/first_last.sim -# dyh is processing this script -#./test.sh -f general/parser/import_file.sim ./test.sh -f general/parser/lastrow.sim ./test.sh -f general/parser/nchar.sim ./test.sh -f general/parser/null_char.sim @@ -145,7 +143,6 @@ cd ../../../debug; make ./test.sh -f general/parser/groupby.sim ./test.sh -f general/parser/set_tag_vals.sim #./test.sh -f general/parser/sliding.sim -./test.sh -f general/parser/tags_dynamically_specifiy.sim ./test.sh -f general/parser/tags_filter.sim ./test.sh -f general/parser/slimit_alter_tags.sim ./test.sh -f general/parser/join.sim diff --git a/tests/script/sh/deploy.sh b/tests/script/sh/deploy.sh index 9cd5b8e15f..37be89f8d6 100755 --- a/tests/script/sh/deploy.sh +++ b/tests/script/sh/deploy.sh @@ -125,7 +125,6 @@ echo "mqttDebugFlag 131" >> $TAOS_CFG echo "qdebugFlag 135" >> $TAOS_CFG echo "rpcDebugFlag 135" >> $TAOS_CFG echo "tmrDebugFlag 131" >> $TAOS_CFG -echo "cDebugFlag 135" >> $TAOS_CFG echo "udebugFlag 135" >> $TAOS_CFG echo "sdebugFlag 135" >> $TAOS_CFG echo "wdebugFlag 135" >> $TAOS_CFG From f66ed5b690ef465e996dc38202846ec47b30cd5d Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Fri, 10 Jul 2020 15:05:32 +0800 Subject: [PATCH 21/38] [TD-888] change timer init sequence --- src/dnode/inc/dnodeMgmt.h | 2 ++ src/dnode/src/dnodeMain.c | 1 + src/dnode/src/dnodeMgmt.c | 15 +++++++++++---- src/mnode/src/mnodeMain.c | 2 +- 4 files changed, 15 insertions(+), 5 deletions(-) diff --git a/src/dnode/inc/dnodeMgmt.h b/src/dnode/inc/dnodeMgmt.h index 826f4ff1c1..092c06d84b 100644 --- a/src/dnode/inc/dnodeMgmt.h +++ b/src/dnode/inc/dnodeMgmt.h @@ -22,6 +22,8 @@ extern "C" { int32_t dnodeInitMgmt(); void dnodeCleanupMgmt(); +int32_t dnodeInitMgmtTimer(); +void dnodeCleanupMgmtTimer(); void dnodeDispatchToMgmtQueue(SRpcMsg *rpcMsg); void* dnodeGetVnode(int32_t vgId); diff --git a/src/dnode/src/dnodeMain.c b/src/dnode/src/dnodeMain.c index 8d1ae0a50e..987a189959 100644 --- a/src/dnode/src/dnodeMain.c +++ b/src/dnode/src/dnodeMain.c @@ -57,6 +57,7 @@ static const SDnodeComponent tsDnodeComponents[] = { {"server", dnodeInitServer, dnodeCleanupServer}, {"mgmt", dnodeInitMgmt, dnodeCleanupMgmt}, {"modules", dnodeInitModules, dnodeCleanupModules}, + {"mgmt-tmr",dnodeInitMgmtTimer, dnodeCleanupMgmtTimer}, {"shell", dnodeInitShell, dnodeCleanupShell} }; diff --git a/src/dnode/src/dnodeMgmt.c b/src/dnode/src/dnodeMgmt.c index 4f489d2af2..b1c93d7195 100644 --- a/src/dnode/src/dnodeMgmt.c +++ b/src/dnode/src/dnodeMgmt.c @@ -147,6 +147,12 @@ int32_t dnodeInitMgmt() { return -1; } + dInfo("dnode mgmt is initialized"); + + return TSDB_CODE_SUCCESS; +} + +int32_t dnodeInitMgmtTimer() { tsDnodeTmr = taosTmrInit(100, 200, 60000, "DND-DM"); if (tsDnodeTmr == NULL) { dError("failed to init dnode timer"); @@ -155,13 +161,11 @@ int32_t dnodeInitMgmt() { } taosTmrReset(dnodeSendStatusMsg, 500, NULL, tsDnodeTmr, &tsStatusTimer); - - dInfo("dnode mgmt is initialized"); - + dInfo("dnode mgmt timer is initialized"); return TSDB_CODE_SUCCESS; } -void dnodeCleanupMgmt() { +void dnodeCleanupMgmtTimer() { if (tsStatusTimer != NULL) { taosTmrStopA(&tsStatusTimer); tsStatusTimer = NULL; @@ -171,7 +175,10 @@ void dnodeCleanupMgmt() { taosTmrCleanUp(tsDnodeTmr); tsDnodeTmr = NULL; } +} +void dnodeCleanupMgmt() { + dnodeCleanupMgmtTimer(); dnodeCloseVnodes(); if (tsMgmtQset) taosQsetThreadResume(tsMgmtQset); diff --git a/src/mnode/src/mnodeMain.c b/src/mnode/src/mnodeMain.c index db7c35fe2d..042e356442 100644 --- a/src/mnode/src/mnodeMain.c +++ b/src/mnode/src/mnodeMain.c @@ -41,7 +41,7 @@ typedef struct { void (*cleanup)(); } SMnodeComponent; -void *tsMnodeTmr; +void *tsMnodeTmr = NULL; static bool tsMgmtIsRunning = false; static const SMnodeComponent tsMnodeComponents[] = { From f2a61abac62431b945fd0a328cd96faf85263a0d Mon Sep 17 00:00:00 2001 From: Hui Li Date: Fri, 10 Jul 2020 15:21:55 +0800 Subject: [PATCH 22/38] [TD-889] --- src/mnode/src/mnodeDb.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/mnode/src/mnodeDb.c b/src/mnode/src/mnodeDb.c index c13cd7c95c..8c74c9413d 100644 --- a/src/mnode/src/mnodeDb.c +++ b/src/mnode/src/mnodeDb.c @@ -179,9 +179,14 @@ void mnodeDecDbRef(SDbObj *pDb) { SDbObj *mnodeGetDbByTableId(char *tableId) { char db[TSDB_TABLE_ID_LEN], *pos; - + + // tableId format should be : acct.db.table pos = strstr(tableId, TS_PATH_DELIMITER); + assert(NULL != pos); + pos = strstr(pos + 1, TS_PATH_DELIMITER); + assert(NULL != pos); + memset(db, 0, sizeof(db)); strncpy(db, tableId, pos - tableId); From c4cac5c69a9c91869b47f1fb0eb592ce7daf930b Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Fri, 10 Jul 2020 15:49:34 +0800 Subject: [PATCH 23/38] fix build failure --- .../java/com/taosdata/jdbc/utils/TDNode.java | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNode.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNode.java index 7145673c71..48290d3e62 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNode.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNode.java @@ -77,7 +77,7 @@ public class TDNode { e.printStackTrace(); } - if(binPath.equals("")) { + if(binPath.isEmpty()) { System.out.println("taosd not found"); return; } else { @@ -106,14 +106,10 @@ public class TDNode { String toBeKilled = "taosd"; if (this.running != 0) { - String psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print " + toBeKilled + "}'"; - try { - Process ps = Runtime.getRuntime().exec(psCmd); - ps.waitFor(); - long pid = ps.pid(); - - String killCmd = "kill -9 " + pid; - Runtime.getRuntime().exec(killCmd).waitFor(); + String killCmd = "pkill -kill -x " + toBeKilled; + String[] killCmds = {"sh", "-c", killCmd}; + try { + Runtime.getRuntime().exec(killCmds).waitFor(); for(int port = 6030; port < 6041; port ++) { String fuserCmd = "fuser -k -n tcp " + port; @@ -124,7 +120,7 @@ public class TDNode { } this.running = 0; - System.out.println("dnode:" + this.index + "is stopped by kill -9"); + System.out.println("dnode:" + this.index + " is stopped by pkill"); } } @@ -137,7 +133,6 @@ public class TDNode { } } - public void stopIP() { try{ String cmd = "sudo ifconfig lo:" + index + "192.168.0." + index + " down"; From f1b6c0027b37bde455f77f3baae9aecae8d6a9b3 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 10 Jul 2020 15:59:05 +0800 Subject: [PATCH 24/38] [td-225] opt query perf --- cmake/platform.inc | 2 +- src/client/inc/tscUtil.h | 4 +- src/client/inc/tsclient.h | 39 ++++++++- src/client/src/tscFunctionImpl.c | 12 ++- src/client/src/tscSql.c | 2 +- src/client/src/tscSubquery.c | 32 ++++---- src/client/src/tscUtil.c | 78 +++++++++--------- src/common/src/ttypes.c | 50 ++++++------ src/inc/taosdef.h | 27 ++++++- src/query/src/qExecutor.c | 133 +++++++++++++------------------ src/tsdb/src/tsdbRead.c | 103 ++++++++++++------------ src/util/inc/tarray.h | 2 +- 12 files changed, 266 insertions(+), 218 deletions(-) diff --git a/cmake/platform.inc b/cmake/platform.inc index 2e0e2d6af0..7bad9bc4f3 100755 --- a/cmake/platform.inc +++ b/cmake/platform.inc @@ -109,7 +109,7 @@ IF (TD_LINUX_64) IF (${CMAKE_CXX_COMPILER_ID} MATCHES "Clang") SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -Wno-missing-braces -fPIC -g3 -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ELSE () - SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -malign-double -g3 -gdwarf-2 -malign-stringops -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") + SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -pg -fPIC -malign-double -g3 -gdwarf-2 -malign-stringops -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ENDIF () ELSE () SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index 4992692109..4af929bf41 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -160,7 +160,9 @@ void tscFieldInfoUpdateOffsetForInterResult(SQueryInfo* pQueryInfo); int16_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index); void tscFieldInfoClear(SFieldInfo* pFieldInfo); -int32_t tscNumOfFields(SQueryInfo* pQueryInfo); + +static FORCE_INLINE int32_t tscNumOfFields(SQueryInfo* pQueryInfo) { return pQueryInfo->fieldsInfo.numOfOutput; } + int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2); void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes, int16_t tableIndex); diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index c8754e5beb..b44ea3b325 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -411,7 +411,44 @@ char *tscGetErrorMsgPayload(SSqlCmd *pCmd); int32_t tscInvalidSQLErrMsg(char *msg, const char *additionalInfo, const char *sql); int32_t tscToSQLCmd(SSqlObj *pSql, struct SSqlInfo *pInfo); -void tscGetResultColumnChr(SSqlRes *pRes, SFieldInfo* pFieldInfo, int32_t column); +//void tscGetResultColumnChr(SSqlRes *pRes, SFieldInfo* pFieldInfo, int32_t column); + +static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pFieldInfo, int32_t columnIndex) { + SFieldSupInfo* pInfo = TARRAY_GET_ELEM(pFieldInfo->pSupportInfo, columnIndex); + assert(pInfo->pSqlExpr != NULL); + + int32_t type = pInfo->pSqlExpr->resType; + int32_t bytes = pInfo->pSqlExpr->resBytes; + + char* pData = pRes->data + pInfo->pSqlExpr->offset * pRes->numOfRows + bytes * pRes->row; + + if (type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BINARY) { + int32_t realLen = varDataLen(pData); + assert(realLen <= bytes - VARSTR_HEADER_SIZE); + + if (isNull(pData, type)) { + pRes->tsrow[columnIndex] = NULL; + } else { + pRes->tsrow[columnIndex] = ((tstr*)pData)->data; + } + + if (realLen < pInfo->pSqlExpr->resBytes - VARSTR_HEADER_SIZE) { // todo refactor + *(pData + realLen + VARSTR_HEADER_SIZE) = 0; + } + + pRes->length[columnIndex] = realLen; + } else { + assert(bytes == tDataTypeDesc[type].nSize); + + if (isNull(pData, type)) { + pRes->tsrow[columnIndex] = NULL; + } else { + pRes->tsrow[columnIndex] = pData; + } + + pRes->length[columnIndex] = bytes; + } +} extern void * tscCacheHandle; extern void * tscTmr; diff --git a/src/client/src/tscFunctionImpl.c b/src/client/src/tscFunctionImpl.c index 909338aa4a..bc37c16187 100644 --- a/src/client/src/tscFunctionImpl.c +++ b/src/client/src/tscFunctionImpl.c @@ -2952,10 +2952,14 @@ static void tag_project_function(SQLFunctionCtx *pCtx) { INC_INIT_VAL(pCtx, pCtx->size); assert(pCtx->inputBytes == pCtx->outputBytes); - - for (int32_t i = 0; i < pCtx->size; ++i) { - tVariantDump(&pCtx->tag, pCtx->aOutputBuf, pCtx->outputType, true); - + + tVariantDump(&pCtx->tag, pCtx->aOutputBuf, pCtx->outputType, true); + char* data = pCtx->aOutputBuf; + pCtx->aOutputBuf += pCtx->outputBytes; + + // directly copy from the first one + for (int32_t i = 1; i < pCtx->size; ++i) { + memmove(pCtx->aOutputBuf, data, pCtx->outputBytes); pCtx->aOutputBuf += pCtx->outputBytes; } } diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index f9f93b3f89..6d75aef01f 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -403,7 +403,7 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) { taos_fetch_rows_a(res, waitForRetrieveRsp, pSql->pTscObj); sem_wait(&pSql->rspSem); } - + return doSetResultRowData(pSql, true); } diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index b5b659de0c..044a2aba9d 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2108,9 +2108,9 @@ static char *getArithemicInputSrc(void *param, const char *name, int32_t colId) void **doSetResultRowData(SSqlObj *pSql, bool finalResult) { SSqlCmd *pCmd = &pSql->cmd; SSqlRes *pRes = &pSql->res; - + assert(pRes->row >= 0 && pRes->row <= pRes->numOfRows); - + if(pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE) { if (pRes->completed) { tfree(pRes->tsrow); @@ -2118,29 +2118,31 @@ void **doSetResultRowData(SSqlObj *pSql, bool finalResult) { return pRes->tsrow; } - + if (pRes->row >= pRes->numOfRows) { // all the results has returned to invoker tfree(pRes->tsrow); return pRes->tsrow; } - + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); - + size_t size = tscNumOfFields(pQueryInfo); for (int i = 0; i < size; ++i) { - SFieldSupInfo* pSup = tscFieldInfoGetSupp(&pQueryInfo->fieldsInfo, i); + SFieldSupInfo* pSup = TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.pSupportInfo, i); if (pSup->pSqlExpr != NULL) { tscGetResultColumnChr(pRes, &pQueryInfo->fieldsInfo, i); } - + // primary key column cannot be null in interval query, no need to check if (i == 0 && pQueryInfo->intervalTime > 0) { continue; } - - TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i); - transferNcharData(pSql, i, pField); - + + TAOS_FIELD *pField = TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.pFields, i); + if (pRes->tsrow[i] != NULL && pField->type == TSDB_DATA_TYPE_NCHAR) { + transferNcharData(pSql, i, pField); + } + // calculate the result from several other columns if (pSup->pArithExprInfo != NULL) { if (pRes->pArithSup == NULL) { @@ -2150,10 +2152,10 @@ void **doSetResultRowData(SSqlObj *pSql, bool finalResult) { sas->numOfCols = tscSqlExprNumOfExprs(pQueryInfo); sas->exprList = pQueryInfo->exprList; sas->data = calloc(sas->numOfCols, POINTER_BYTES); - + pRes->pArithSup = sas; } - + if (pRes->buffer[i] == NULL) { TAOS_FIELD* field = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i); pRes->buffer[i] = malloc(field->bytes); @@ -2163,13 +2165,13 @@ void **doSetResultRowData(SSqlObj *pSql, bool finalResult) { SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, k); pRes->pArithSup->data[k] = (pRes->data + pRes->numOfRows* pExpr->offset) + pRes->row*pExpr->resBytes; } - + tExprTreeCalcTraverse(pRes->pArithSup->pArithExpr->pExpr, 1, pRes->buffer[i], pRes->pArithSup, TSDB_ORDER_ASC, getArithemicInputSrc); pRes->tsrow[i] = pRes->buffer[i]; } } - + pRes->row++; // index increase one-step return pRes->tsrow; } diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 54c9cdea65..dea41d2932 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -794,7 +794,7 @@ SFieldSupInfo* tscFieldInfoAppend(SFieldInfo* pFieldInfo, TAOS_FIELD* pField) { } SFieldSupInfo* tscFieldInfoGetSupp(SFieldInfo* pFieldInfo, int32_t index) { - return taosArrayGet(pFieldInfo->pSupportInfo, index); + return TARRAY_GET_ELEM(pFieldInfo->pSupportInfo, index); } SFieldSupInfo* tscFieldInfoInsert(SFieldInfo* pFieldInfo, int32_t index, TAOS_FIELD* field) { @@ -858,11 +858,9 @@ void tscFieldInfoCopy(SFieldInfo* dst, const SFieldInfo* src) { } TAOS_FIELD* tscFieldInfoGetField(SFieldInfo* pFieldInfo, int32_t index) { - return taosArrayGet(pFieldInfo->pFields, index); + return TARRAY_GET_ELEM(pFieldInfo->pFields, index); } -int32_t tscNumOfFields(SQueryInfo* pQueryInfo) { return pQueryInfo->fieldsInfo.numOfOutput; } - int16_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index) { SFieldSupInfo* pInfo = tscFieldInfoGetSupp(&pQueryInfo->fieldsInfo, index); assert(pInfo != NULL); @@ -2086,42 +2084,42 @@ void tscTryQueryNextClause(SSqlObj* pSql, void (*queryFp)()) { } } -void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pFieldInfo, int32_t columnIndex) { - SFieldSupInfo* pInfo = taosArrayGet(pFieldInfo->pSupportInfo, columnIndex); - assert(pInfo->pSqlExpr != NULL); - - int32_t type = pInfo->pSqlExpr->resType; - int32_t bytes = pInfo->pSqlExpr->resBytes; - - char* pData = pRes->data + pInfo->pSqlExpr->offset * pRes->numOfRows + bytes * pRes->row; - - if (type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BINARY) { - int32_t realLen = varDataLen(pData); - assert(realLen <= bytes - VARSTR_HEADER_SIZE); - - if (isNull(pData, type)) { - pRes->tsrow[columnIndex] = NULL; - } else { - pRes->tsrow[columnIndex] = ((tstr*)pData)->data; - } - - if (realLen < pInfo->pSqlExpr->resBytes - VARSTR_HEADER_SIZE) { // todo refactor - *(pData + realLen + VARSTR_HEADER_SIZE) = 0; - } - - pRes->length[columnIndex] = realLen; - } else { - assert(bytes == tDataTypeDesc[type].nSize); - - if (isNull(pData, type)) { - pRes->tsrow[columnIndex] = NULL; - } else { - pRes->tsrow[columnIndex] = pData; - } - - pRes->length[columnIndex] = bytes; - } -} +//void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pFieldInfo, int32_t columnIndex) { +// SFieldSupInfo* pInfo = TARRAY_GET_ELEM(pFieldInfo->pSupportInfo, columnIndex); +// assert(pInfo->pSqlExpr != NULL); +// +// int32_t type = pInfo->pSqlExpr->resType; +// int32_t bytes = pInfo->pSqlExpr->resBytes; +// +// char* pData = pRes->data + pInfo->pSqlExpr->offset * pRes->numOfRows + bytes * pRes->row; +// +// if (type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BINARY) { +// int32_t realLen = varDataLen(pData); +// assert(realLen <= bytes - VARSTR_HEADER_SIZE); +// +// if (isNull(pData, type)) { +// pRes->tsrow[columnIndex] = NULL; +// } else { +// pRes->tsrow[columnIndex] = ((tstr*)pData)->data; +// } +// +// if (realLen < pInfo->pSqlExpr->resBytes - VARSTR_HEADER_SIZE) { // todo refactor +// *(pData + realLen + VARSTR_HEADER_SIZE) = 0; +// } +// +// pRes->length[columnIndex] = realLen; +// } else { +// assert(bytes == tDataTypeDesc[type].nSize); +// +// if (isNull(pData, type)) { +// pRes->tsrow[columnIndex] = NULL; +// } else { +// pRes->tsrow[columnIndex] = pData; +// } +// +// pRes->length[columnIndex] = bytes; +// } +//} void* malloc_throw(size_t size) { void* p = malloc(size); diff --git a/src/common/src/ttypes.c b/src/common/src/ttypes.c index eff25e8c93..44b1d9fc38 100644 --- a/src/common/src/ttypes.c +++ b/src/common/src/ttypes.c @@ -367,31 +367,31 @@ bool isValidDataType(int32_t type) { return type >= TSDB_DATA_TYPE_NULL && type <= TSDB_DATA_TYPE_NCHAR; } -bool isNull(const char *val, int32_t type) { - switch (type) { - case TSDB_DATA_TYPE_BOOL: - return *(uint8_t *)val == TSDB_DATA_BOOL_NULL; - case TSDB_DATA_TYPE_TINYINT: - return *(uint8_t *)val == TSDB_DATA_TINYINT_NULL; - case TSDB_DATA_TYPE_SMALLINT: - return *(uint16_t *)val == TSDB_DATA_SMALLINT_NULL; - case TSDB_DATA_TYPE_INT: - return *(uint32_t *)val == TSDB_DATA_INT_NULL; - case TSDB_DATA_TYPE_BIGINT: - case TSDB_DATA_TYPE_TIMESTAMP: - return *(uint64_t *)val == TSDB_DATA_BIGINT_NULL; - case TSDB_DATA_TYPE_FLOAT: - return *(uint32_t *)val == TSDB_DATA_FLOAT_NULL; - case TSDB_DATA_TYPE_DOUBLE: - return *(uint64_t *)val == TSDB_DATA_DOUBLE_NULL; - case TSDB_DATA_TYPE_NCHAR: - return *(uint32_t*) varDataVal(val) == TSDB_DATA_NCHAR_NULL; - case TSDB_DATA_TYPE_BINARY: - return *(uint8_t *) varDataVal(val) == TSDB_DATA_BINARY_NULL; - default: - return false; - }; -} +//bool isNull(const char *val, int32_t type) { +// switch (type) { +// case TSDB_DATA_TYPE_BOOL: +// return *(uint8_t *)val == TSDB_DATA_BOOL_NULL; +// case TSDB_DATA_TYPE_TINYINT: +// return *(uint8_t *)val == TSDB_DATA_TINYINT_NULL; +// case TSDB_DATA_TYPE_SMALLINT: +// return *(uint16_t *)val == TSDB_DATA_SMALLINT_NULL; +// case TSDB_DATA_TYPE_INT: +// return *(uint32_t *)val == TSDB_DATA_INT_NULL; +// case TSDB_DATA_TYPE_BIGINT: +// case TSDB_DATA_TYPE_TIMESTAMP: +// return *(uint64_t *)val == TSDB_DATA_BIGINT_NULL; +// case TSDB_DATA_TYPE_FLOAT: +// return *(uint32_t *)val == TSDB_DATA_FLOAT_NULL; +// case TSDB_DATA_TYPE_DOUBLE: +// return *(uint64_t *)val == TSDB_DATA_DOUBLE_NULL; +// case TSDB_DATA_TYPE_NCHAR: +// return *(uint32_t*) varDataVal(val) == TSDB_DATA_NCHAR_NULL; +// case TSDB_DATA_TYPE_BINARY: +// return *(uint8_t *) varDataVal(val) == TSDB_DATA_BINARY_NULL; +// default: +// return false; +// }; +//} void setVardataNull(char* val, int32_t type) { if (type == TSDB_DATA_TYPE_BINARY) { diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h index e4ee058cef..ac5b92b4ab 100644 --- a/src/inc/taosdef.h +++ b/src/inc/taosdef.h @@ -160,7 +160,32 @@ extern tDataTypeDescriptor tDataTypeDesc[11]; #define POINTER_BYTES sizeof(void *) // 8 by default assert(sizeof(ptrdiff_t) == sizseof(void*) bool isValidDataType(int32_t type); -bool isNull(const char *val, int32_t type); +//bool isNull(const char *val, int32_t type); +static inline __attribute__((always_inline)) bool isNull(const char *val, int32_t type) { + switch (type) { + case TSDB_DATA_TYPE_BOOL: + return *(uint8_t *)val == TSDB_DATA_BOOL_NULL; + case TSDB_DATA_TYPE_TINYINT: + return *(uint8_t *)val == TSDB_DATA_TINYINT_NULL; + case TSDB_DATA_TYPE_SMALLINT: + return *(uint16_t *)val == TSDB_DATA_SMALLINT_NULL; + case TSDB_DATA_TYPE_INT: + return *(uint32_t *)val == TSDB_DATA_INT_NULL; + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_TIMESTAMP: + return *(uint64_t *)val == TSDB_DATA_BIGINT_NULL; + case TSDB_DATA_TYPE_FLOAT: + return *(uint32_t *)val == TSDB_DATA_FLOAT_NULL; + case TSDB_DATA_TYPE_DOUBLE: + return *(uint64_t *)val == TSDB_DATA_DOUBLE_NULL; + case TSDB_DATA_TYPE_NCHAR: + return *(uint32_t*) varDataVal(val) == TSDB_DATA_NCHAR_NULL; + case TSDB_DATA_TYPE_BINARY: + return *(uint8_t *) varDataVal(val) == TSDB_DATA_BINARY_NULL; + default: + return false; + }; +} void setVardataNull(char* val, int32_t type); void setNull(char *val, int32_t type, int32_t bytes); diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index dfe489e1e8..f556785824 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -44,7 +44,7 @@ #define SET_MASTER_SCAN_FLAG(runtime) ((runtime)->scanFlag = MASTER_SCAN) #define SET_REVERSE_SCAN_FLAG(runtime) ((runtime)->scanFlag = REVERSE_SCAN) -#define GET_QINFO_ADDR(x) ((void *)((char *)(x)-offsetof(SQInfo, runtimeEnv))) +#define GET_QINFO_ADDR(x) ((SQInfo *)((char *)(x)-offsetof(SQInfo, runtimeEnv))) #define GET_COL_DATA_POS(query, index, step) ((query)->pos + (index) * (step)) #define SWITCH_ORDER(n) (((n) = ((n) == TSDB_ORDER_ASC) ? TSDB_ORDER_DESC : TSDB_ORDER_ASC)) @@ -351,27 +351,6 @@ static bool hasTagValOutput(SQuery* pQuery) { return false; } -static SDataStatis *getStatisInfo(SQuery *pQuery, SDataStatis *pStatis, int32_t numOfCols, int32_t index) { - // for a tag column, no corresponding field info - SColIndex *pColIndex = &pQuery->pSelectExpr[index].base.colInfo; - if (TSDB_COL_IS_TAG(pColIndex->flag)) { - return NULL; - } - - /* - * Choose the right column field info by field id, since the file block may be out of date, - * which means the newest table schema is not equalled to the schema of this block. - * TODO: speedup by using bsearch - */ - for (int32_t i = 0; i < numOfCols; ++i) { - if (pColIndex->colId == pStatis[i].colId) { - return &pStatis[i]; - } - } - - return NULL; -} - /** * @param pQuery * @param col @@ -380,19 +359,14 @@ static SDataStatis *getStatisInfo(SQuery *pQuery, SDataStatis *pStatis, int32_t * @param pColStatis * @return */ -static bool hasNullValue(SQuery *pQuery, int32_t col, int32_t numOfCols, SDataStatis *pStatis, SDataStatis **pColStatis) { - SColIndex *pColIndex = &pQuery->pSelectExpr[col].base.colInfo; - if (TSDB_COL_IS_TAG(pColIndex->flag)) { - return false; - } - - // query on primary timestamp column, not null value at all - if (pColIndex->colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { +static bool hasNullValue(SColIndex* pColIndex, SDataStatis *pStatis, SDataStatis **pColStatis) { + if (TSDB_COL_IS_TAG(pColIndex->flag) || pColIndex->colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { return false; } if (pStatis != NULL) { - *pColStatis = getStatisInfo(pQuery, pStatis, numOfCols, col); + *pColStatis = &pStatis[pColIndex->colIndex]; + assert((*pColStatis)->colId == pColIndex->colId); } else { *pColStatis = NULL; } @@ -842,8 +816,8 @@ static char *getDataBlock(SQueryRuntimeEnv *pRuntimeEnv, SArithmeticSupport *sas if (pDataBlock == NULL) { return NULL; } - char *dataBlock = NULL; + char *dataBlock = NULL; SQuery *pQuery = pRuntimeEnv->pQuery; SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx; @@ -887,10 +861,14 @@ static char *getDataBlock(SQueryRuntimeEnv *pRuntimeEnv, SArithmeticSupport *sas } else { // other type of query function SColIndex *pCol = &pQuery->pSelectExpr[col].base.colInfo; - if (TSDB_COL_IS_TAG(pCol->flag) || pDataBlock == NULL) { + if (TSDB_COL_IS_TAG(pCol->flag)) { dataBlock = NULL; } else { - dataBlock = getDataBlockImpl(pDataBlock, pCol->colId); + SColIndex* pColIndex = &pQuery->pSelectExpr[col].base.colInfo; + SColumnInfoData *p = taosArrayGet(pDataBlock, pColIndex->colIndex); + assert(p->info.colId == pColIndex->colId); + + dataBlock = p->pData; } } @@ -1365,7 +1343,7 @@ void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, void* inputData, TSKEY int32_t colId = pQuery->pSelectExpr[colIndex].base.colInfo.colId; SDataStatis *tpField = NULL; - pCtx->hasNull = hasNullValue(pQuery, colIndex, pBlockInfo->numOfCols, pStatis, &tpField); + pCtx->hasNull = hasNullValue(&pQuery->pSelectExpr[colIndex].base.colInfo, pStatis, &tpField); pCtx->aInputElemBuf = inputData; if (tpField != NULL) { @@ -1622,19 +1600,18 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { pRuntimeEnv->pTSBuf = tsBufDestory(pRuntimeEnv->pTSBuf); } -static bool isQueryKilled(SQInfo *pQInfo) { - return (pQInfo->code == TSDB_CODE_TSC_QUERY_CANCELLED); -} +#define IS_QUERY_KILLED(_q) ((_q)->code == TSDB_CODE_TSC_QUERY_CANCELLED) static void setQueryKilled(SQInfo *pQInfo) { pQInfo->code = TSDB_CODE_TSC_QUERY_CANCELLED; } -static bool isFixedOutputQuery(SQuery *pQuery) { - if (pQuery->intervalTime != 0) { +static bool isFixedOutputQuery(SQueryRuntimeEnv* pRuntimeEnv) { + SQuery* pQuery = pRuntimeEnv->pQuery; + if (QUERY_IS_INTERVAL_QUERY(pQuery)) { return false; } // Note:top/bottom query is fixed output query - if (isTopBottomQuery(pQuery) || isGroupbyNormalCol(pQuery->pGroupbyExpr)) { + if (pRuntimeEnv->topBotQuery || pRuntimeEnv->groupbyNormalCol) { return true; } @@ -2054,7 +2031,7 @@ SArray *loadDataBlockOnDemand(SQueryRuntimeEnv *pRuntimeEnv, void* pQueryHandle, // check if this data block is required to load for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { SSqlFuncMsg* pSqlFunc = &pQuery->pSelectExpr[i].base; - + int32_t functionId = pSqlFunc->functionId; int32_t colId = pSqlFunc->colInfo.colId; r |= aAggs[functionId].dataReqFunc(&pRuntimeEnv->pCtx[i], pQuery->window.skey, pQuery->window.ekey, colId); @@ -2066,8 +2043,7 @@ SArray *loadDataBlockOnDemand(SQueryRuntimeEnv *pRuntimeEnv, void* pQueryHandle, } if (r == BLK_DATA_NO_NEEDED) { - qDebug("QInfo:%p data block discard, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_QINFO_ADDR(pRuntimeEnv), - pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows); + qDebug("QInfo:%p data block discard, rows:%d", GET_QINFO_ADDR(pRuntimeEnv), pBlockInfo->rows); pRuntimeEnv->summary.discardBlocks += 1; } else if (r == BLK_DATA_STATIS_NEEDED) { if (tsdbRetrieveDataBlockStatisInfo(pQueryHandle, pStatis) != TSDB_CODE_SUCCESS) { @@ -2199,7 +2175,7 @@ static void ensureOutputBufferSimple(SQueryRuntimeEnv* pRuntimeEnv, int32_t capa static void ensureOutputBuffer(SQueryRuntimeEnv* pRuntimeEnv, SDataBlockInfo* pBlockInfo) { // in case of prj/diff query, ensure the output buffer is sufficient to accommodate the results of current block SQuery* pQuery = pRuntimeEnv->pQuery; - if (!QUERY_IS_INTERVAL_QUERY(pQuery) && !pRuntimeEnv->groupbyNormalCol && !isFixedOutputQuery(pQuery)) { + if (!QUERY_IS_INTERVAL_QUERY(pQuery) && !pRuntimeEnv->groupbyNormalCol && !isFixedOutputQuery(pRuntimeEnv)) { SResultRec *pRec = &pQuery->rec; if (pQuery->rec.capacity - pQuery->rec.rows < pBlockInfo->rows) { @@ -2249,7 +2225,7 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER; while (tsdbNextDataBlock(pQueryHandle)) { summary->totalBlocks += 1; - if (isQueryKilled(GET_QINFO_ADDR(pRuntimeEnv))) { + if (IS_QUERY_KILLED(GET_QINFO_ADDR(pRuntimeEnv))) { return 0; } @@ -3304,7 +3280,7 @@ void scanOneTableDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, TSKEY start) { cond.twindow.skey, cond.twindow.ekey); // check if query is killed or not - if (isQueryKilled(pQInfo)) { + if (IS_QUERY_KILLED(pQInfo)) { return; } } @@ -3695,23 +3671,24 @@ void copyFromWindowResToSData(SQInfo *pQInfo, SWindowResInfo *pResultInfo) { assert(pQuery->rec.rows <= pQuery->rec.capacity); } -static void updateWindowResNumOfRes(SQueryRuntimeEnv *pRuntimeEnv, STableQueryInfo *pTableQueryInfo) { +static void updateWindowResNumOfRes(SQueryRuntimeEnv *pRuntimeEnv) { SQuery *pQuery = pRuntimeEnv->pQuery; // update the number of result for each, only update the number of rows for the corresponding window result. - if (!QUERY_IS_INTERVAL_QUERY(pQuery)) { + if (QUERY_IS_INTERVAL_QUERY(pQuery)) { + return; + } - for (int32_t i = 0; i < pRuntimeEnv->windowResInfo.size; ++i) { - SWindowResult *pResult = &pRuntimeEnv->windowResInfo.pResult[i]; + for (int32_t i = 0; i < pRuntimeEnv->windowResInfo.size; ++i) { + SWindowResult *pResult = &pRuntimeEnv->windowResInfo.pResult[i]; - for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { - int32_t functionId = pRuntimeEnv->pCtx[j].functionId; - if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TAGPRJ) { - continue; - } - - pResult->numOfRows = MAX(pResult->numOfRows, pResult->resultInfo[j].numOfRes); + for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { + int32_t functionId = pRuntimeEnv->pCtx[j].functionId; + if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TAGPRJ) { + continue; } + + pResult->numOfRows = MAX(pResult->numOfRows, pResult->resultInfo[j].numOfRes); } } } @@ -3729,8 +3706,6 @@ void stableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBlockInfo * } else { blockwiseApplyFunctions(pRuntimeEnv, pStatis, pDataBlockInfo, pWindowResInfo, searchFn, pDataBlock); } - - updateWindowResNumOfRes(pRuntimeEnv, pTableQueryInfo); } bool queryHasRemainResults(SQueryRuntimeEnv* pRuntimeEnv) { @@ -3950,7 +3925,7 @@ void skipBlocks(SQueryRuntimeEnv *pRuntimeEnv) { SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER; while (tsdbNextDataBlock(pQueryHandle)) { - if (isQueryKilled(GET_QINFO_ADDR(pRuntimeEnv))) { + if (IS_QUERY_KILLED(GET_QINFO_ADDR(pRuntimeEnv))) { return; } @@ -4099,7 +4074,7 @@ static void setupQueryHandle(void* tsdb, SQInfo* pQInfo, bool isSTableQuery) { return; } - if (isSTableQuery && (!QUERY_IS_INTERVAL_QUERY(pQuery)) && (!isFixedOutputQuery(pQuery))) { + if (isSTableQuery && (!QUERY_IS_INTERVAL_QUERY(pQuery)) && (!isFixedOutputQuery(pRuntimeEnv))) { return; } @@ -4115,7 +4090,7 @@ static void setupQueryHandle(void* tsdb, SQInfo* pQInfo, bool isSTableQuery) { && (cond.order == TSDB_ORDER_ASC) && (!QUERY_IS_INTERVAL_QUERY(pQuery)) && (!isGroupbyNormalCol(pQuery->pGroupbyExpr)) - && (!isFixedOutputQuery(pQuery)) + && (!isFixedOutputQuery(pRuntimeEnv)) ) { SArray* pa = GET_TABLEGROUP(pQInfo, 0); STableQueryInfo* pCheckInfo = taosArrayGetP(pa, 0); @@ -4267,7 +4242,7 @@ static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) { while (tsdbNextDataBlock(pQueryHandle)) { summary->totalBlocks += 1; - if (isQueryKilled(pQInfo)) { + if (IS_QUERY_KILLED(pQInfo)) { break; } @@ -4304,6 +4279,8 @@ static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) { pQInfo, blockInfo.uid, blockInfo.tid, blockInfo.window.skey, blockInfo.window.ekey, blockInfo.rows, pQuery->current->lastKey); } + updateWindowResNumOfRes(pRuntimeEnv); + int64_t et = taosGetTimestampMs(); return et - st; } @@ -4316,7 +4293,9 @@ static bool multiTableMultioutputHelper(SQInfo *pQInfo, int32_t index) { SArray *group = GET_TABLEGROUP(pQInfo, 0); STableQueryInfo* pCheckInfo = taosArrayGetP(group, index); - setTagVal(pRuntimeEnv, pCheckInfo->pTable, pQInfo->tsdb); + if (pRuntimeEnv->hasTagResults || pRuntimeEnv->pTSBuf != NULL) { + setTagVal(pRuntimeEnv, pCheckInfo->pTable, pQInfo->tsdb); + } STableId* id = TSDB_TABLEID(pCheckInfo->pTable); qDebug("QInfo:%p query on (%d): uid:%" PRIu64 ", tid:%d, qrange:%" PRId64 "-%" PRId64, pQInfo, index, @@ -4547,7 +4526,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) { 1 == taosArrayGetSize(pQInfo->tableqinfoGroupInfo.pGroupList)); while (pQInfo->tableIndex < pQInfo->tableqinfoGroupInfo.numOfTables) { - if (isQueryKilled(pQInfo)) { + if (IS_QUERY_KILLED(pQInfo)) { return; } @@ -4742,7 +4721,7 @@ static void multiTableQueryProcess(SQInfo *pQInfo) { qDebug("QInfo:%p master scan completed, elapsed time: %" PRId64 "ms, reverse scan start", pQInfo, el); // query error occurred or query is killed, abort current execution - if (pQInfo->code != TSDB_CODE_SUCCESS || isQueryKilled(pQInfo)) { + if (pQInfo->code != TSDB_CODE_SUCCESS || IS_QUERY_KILLED(pQInfo)) { qDebug("QInfo:%p query killed or error occurred, code:%s, abort", pQInfo, tstrerror(pQInfo->code)); return; } @@ -4764,7 +4743,7 @@ static void multiTableQueryProcess(SQInfo *pQInfo) { setQueryStatus(pQuery, QUERY_COMPLETED); - if (pQInfo->code != TSDB_CODE_SUCCESS || isQueryKilled(pQInfo)) { + if (pQInfo->code != TSDB_CODE_SUCCESS || IS_QUERY_KILLED(pQInfo)) { qDebug("QInfo:%p query killed or error occurred, code:%s, abort", pQInfo, tstrerror(pQInfo->code)); return; } @@ -4804,7 +4783,7 @@ static void tableFixedOutputProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) scanOneTableDataBlocks(pRuntimeEnv, pTableInfo->lastKey); finalizeQueryResult(pRuntimeEnv); - if (isQueryKilled(pQInfo)) { + if (IS_QUERY_KILLED(pQInfo)) { return; } @@ -4837,7 +4816,7 @@ static void tableMultiOutputProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) scanOneTableDataBlocks(pRuntimeEnv, pQuery->current->lastKey); finalizeQueryResult(pRuntimeEnv); - if (isQueryKilled(pQInfo)) { + if (IS_QUERY_KILLED(pQInfo)) { return; } @@ -4885,7 +4864,7 @@ static void tableIntervalProcessImpl(SQueryRuntimeEnv *pRuntimeEnv, TSKEY start) while (1) { scanOneTableDataBlocks(pRuntimeEnv, start); - if (isQueryKilled(GET_QINFO_ADDR(pRuntimeEnv))) { + if (IS_QUERY_KILLED(GET_QINFO_ADDR(pRuntimeEnv))) { return; } @@ -5022,7 +5001,7 @@ static void tableQueryImpl(SQInfo *pQInfo) { // group by normal column, sliding window query, interval query are handled by interval query processor if (QUERY_IS_INTERVAL_QUERY(pQuery) || pRuntimeEnv->groupbyNormalCol) { // interval (down sampling operation) tableIntervalProcess(pQInfo, item); - } else if (isFixedOutputQuery(pQuery)) { + } else if (isFixedOutputQuery(pRuntimeEnv)) { tableFixedOutputProcess(pQInfo, item); } else { // diff/add/multiply/subtract/division assert(pQuery->checkBuffer == 1); @@ -5042,7 +5021,7 @@ static void stableQueryImpl(SQInfo *pQInfo) { int64_t st = taosGetTimestampUs(); if (QUERY_IS_INTERVAL_QUERY(pQuery) || - (isFixedOutputQuery(pQuery) && (!isPointInterpoQuery(pQuery)) && !pRuntimeEnv->groupbyNormalCol && + (isFixedOutputQuery(pRuntimeEnv) && (!isPointInterpoQuery(pQuery)) && !pRuntimeEnv->groupbyNormalCol && !isFirstLastRowQuery(pQuery))) { multiTableQueryProcess(pQInfo); } else { @@ -6173,7 +6152,7 @@ void qTableQuery(qinfo_t qinfo) { return; } - if (isQueryKilled(pQInfo)) { + if (IS_QUERY_KILLED(pQInfo)) { qDebug("QInfo:%p it is already killed, abort", pQInfo); sem_post(&pQInfo->dataReady); @@ -6214,7 +6193,7 @@ void qTableQuery(qinfo_t qinfo) { } SQuery* pQuery = pRuntimeEnv->pQuery; - if (isQueryKilled(pQInfo)) { + if (IS_QUERY_KILLED(pQInfo)) { qDebug("QInfo:%p query is killed", pQInfo); } else if (pQuery->rec.rows == 0) { qDebug("QInfo:%p over, %zu tables queried, %"PRId64" rows are returned", pQInfo, pQInfo->tableqinfoGroupInfo.numOfTables, pQuery->rec.total); @@ -6235,7 +6214,7 @@ int32_t qRetrieveQueryResultInfo(qinfo_t qinfo) { } SQuery *pQuery = pQInfo->runtimeEnv.pQuery; - if (isQueryKilled(pQInfo)) { + if (IS_QUERY_KILLED(pQInfo)) { qDebug("QInfo:%p query is killed, code:%d", pQInfo, pQInfo->code); return pQInfo->code; } @@ -6310,7 +6289,7 @@ int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *co code = pQInfo->code; } - if (isQueryKilled(pQInfo) || Q_STATUS_EQUAL(pQuery->status, QUERY_OVER)) { + if (IS_QUERY_KILLED(pQInfo) || Q_STATUS_EQUAL(pQuery->status, QUERY_OVER)) { (*pRsp)->completed = 1; // notify no more result to client } diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index a4e0151f89..c055a27c39 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -110,9 +110,10 @@ typedef struct STsdbQueryHandle { SFileGroupIter fileIter; SRWHelper rhelper; STableBlockInfo* pDataBlockInfo; + int32_t allocSize; // allocated data block size SMemTable* mem; // mem-table SMemTable* imem; // imem-table, acquired from snapshot - + SArray* defaultLoadColumn;// default load column SDataBlockLoadInfo dataBlockLoadInfo; /* record current block load information */ SLoadCompBlockInfo compBlockLoadInfo; /* record current compblock information in SQuery */ } STsdbQueryHandle; @@ -136,6 +137,34 @@ static void tsdbInitCompBlockLoadInfo(SLoadCompBlockInfo* pCompBlockLoadInfo) { pCompBlockLoadInfo->fileId = -1; } +static SArray* getColumnIdList(STsdbQueryHandle* pQueryHandle) { + size_t numOfCols = QH_GET_NUM_OF_COLS(pQueryHandle); + assert(numOfCols <= TSDB_MAX_COLUMNS); + + SArray* pIdList = taosArrayInit(numOfCols, sizeof(int16_t)); + for (int32_t i = 0; i < numOfCols; ++i) { + SColumnInfoData* pCol = taosArrayGet(pQueryHandle->pColumns, i); + taosArrayPush(pIdList, &pCol->info.colId); + } + + return pIdList; +} + +static SArray* getDefaultLoadColumns(STsdbQueryHandle* pQueryHandle, bool loadTS) { + SArray* pLocalIdList = getColumnIdList(pQueryHandle); + + // check if the primary time stamp column needs to load + int16_t colId = *(int16_t*)taosArrayGet(pLocalIdList, 0); + + // the primary timestamp column does not be included in the the specified load column list, add it + if (loadTS && colId != 0) { + int16_t columnId = 0; + taosArrayInsert(pLocalIdList, 0, &columnId); + } + + return pLocalIdList; +} + TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STableGroupInfo* groupList, void* qinfo) { STsdbQueryHandle* pQueryHandle = calloc(1, sizeof(STsdbQueryHandle)); pQueryHandle->order = pCond->order; @@ -148,7 +177,8 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab pQueryHandle->activeIndex = 0; // current active table index pQueryHandle->qinfo = qinfo; pQueryHandle->outputCapacity = ((STsdbRepo*)tsdb)->config.maxRowsPerFileBlock; - + pQueryHandle->allocSize = 0; + tsdbInitReadHelper(&pQueryHandle->rhelper, (STsdbRepo*) tsdb); tsdbTakeMemSnapshot(pQueryHandle->pTsdb, &pQueryHandle->mem, &pQueryHandle->imem); @@ -195,7 +225,9 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab taosArrayPush(pQueryHandle->pTableCheckInfo, &info); } } - + + pQueryHandle->defaultLoadColumn = getDefaultLoadColumns(pQueryHandle, true); + tsdbDebug("%p total numOfTable:%zu in query", pQueryHandle, taosArrayGetSize(pQueryHandle->pTableCheckInfo)); tsdbInitDataBlockLoadInfo(&pQueryHandle->dataBlockLoadInfo); @@ -546,33 +578,7 @@ static int32_t getFileCompInfo(STsdbQueryHandle* pQueryHandle, int32_t* numOfBlo .tid = (_checkInfo)->tableId.tid, \ .uid = (_checkInfo)->tableId.uid}) -static SArray* getColumnIdList(STsdbQueryHandle* pQueryHandle) { - size_t numOfCols = QH_GET_NUM_OF_COLS(pQueryHandle); - assert(numOfCols <= TSDB_MAX_COLUMNS); - - SArray* pIdList = taosArrayInit(numOfCols, sizeof(int16_t)); - for (int32_t i = 0; i < numOfCols; ++i) { - SColumnInfoData* pCol = taosArrayGet(pQueryHandle->pColumns, i); - taosArrayPush(pIdList, &pCol->info.colId); - } - - return pIdList; -} -static SArray* getDefaultLoadColumns(STsdbQueryHandle* pQueryHandle, bool loadTS) { - SArray* pLocalIdList = getColumnIdList(pQueryHandle); - - // check if the primary time stamp column needs to load - int16_t colId = *(int16_t*)taosArrayGet(pLocalIdList, 0); - - // the primary timestamp column does not be included in the the specified load column list, add it - if (loadTS && colId != 0) { - int16_t columnId = 0; - taosArrayInsert(pLocalIdList, 0, &columnId); - } - - return pLocalIdList; -} static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock, STableCheckInfo* pCheckInfo) { STsdbRepo *pRepo = pQueryHandle->pTsdb; @@ -584,8 +590,6 @@ static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlo data->uid = pCheckInfo->pTableObj->tableId.uid; bool blockLoaded = false; - SArray* sa = getDefaultLoadColumns(pQueryHandle, true); - int64_t st = taosGetTimestampUs(); if (pCheckInfo->pDataCols == NULL) { @@ -613,7 +617,6 @@ static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlo assert(pCols->numOfRows != 0 && pCols->numOfRows <= pBlock->numOfRows); pBlock->numOfRows = pCols->numOfRows; - taosArrayDestroy(sa); tfree(data); int64_t et = taosGetTimestampUs() - st; @@ -656,12 +659,8 @@ static void handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SCompBlock* return; } - SArray* sa = getDefaultLoadColumns(pQueryHandle, true); - doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo); - doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock, sa); - taosArrayDestroy(sa); - + doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock, pQueryHandle->defaultLoadColumn); } else { /* * no data in cache, only load data from file @@ -681,14 +680,12 @@ static void handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SCompBlock* } static bool loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock, STableCheckInfo* pCheckInfo) { - SArray* sa = getDefaultLoadColumns(pQueryHandle, true); SQueryFilePos* cur = &pQueryHandle->cur; if (ASCENDING_TRAVERSE(pQueryHandle->order)) { // query ended in current block if (pQueryHandle->window.ekey < pBlock->keyLast || pCheckInfo->lastKey > pBlock->keyFirst) { if (!doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo)) { - taosArrayDestroy(sa); return false; } @@ -702,7 +699,7 @@ static bool loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock cur->pos = 0; } - doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock, sa); + doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock, pQueryHandle->defaultLoadColumn); } else { // the whole block is loaded in to buffer handleDataMergeIfNeeded(pQueryHandle, pBlock, pCheckInfo); } @@ -719,13 +716,12 @@ static bool loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock cur->pos = pBlock->numOfRows - 1; } - doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock, sa); + doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock, pQueryHandle->defaultLoadColumn); } else { handleDataMergeIfNeeded(pQueryHandle, pBlock, pCheckInfo); } } - taosArrayDestroy(sa); return pQueryHandle->realNumOfRows > 0; } @@ -1250,13 +1246,19 @@ static int32_t dataBlockOrderCompar(const void* pLeft, const void* pRight, void* } static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numOfBlocks, int32_t* numOfAllocBlocks) { - char* tmp = realloc(pQueryHandle->pDataBlockInfo, sizeof(STableBlockInfo) * numOfBlocks); - if (tmp == NULL) { - return TSDB_CODE_TDB_OUT_OF_MEMORY; + size_t size = sizeof(STableBlockInfo) * numOfBlocks; + + if (pQueryHandle->allocSize < size) { + pQueryHandle->allocSize = size; + char* tmp = realloc(pQueryHandle->pDataBlockInfo, pQueryHandle->allocSize); + if (tmp == NULL) { + return TSDB_CODE_TDB_OUT_OF_MEMORY; + } + + pQueryHandle->pDataBlockInfo = (STableBlockInfo*) tmp; } - pQueryHandle->pDataBlockInfo = (STableBlockInfo*) tmp; - memset(pQueryHandle->pDataBlockInfo, 0, sizeof(STableBlockInfo) * numOfBlocks); + memset(pQueryHandle->pDataBlockInfo, 0, size); *numOfAllocBlocks = numOfBlocks; int32_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo); @@ -1492,9 +1494,8 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) { return false; } - SArray* sa = getDefaultLoadColumns(pQueryHandle, true); /*SDataBlockInfo* pBlockInfo =*/ tsdbRetrieveDataBlockInfo(pHandle, &blockInfo); - /*SArray *pDataBlock = */tsdbRetrieveDataBlock(pHandle, sa); + /*SArray *pDataBlock = */tsdbRetrieveDataBlock(pHandle, pQueryHandle->defaultLoadColumn); if (pQueryHandle->cur.win.ekey == pQueryHandle->window.skey) { // data already retrieve, discard other data rows and return @@ -1508,7 +1509,6 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) { pQueryHandle->window = pQueryHandle->cur.win; pQueryHandle->cur.rows = 1; pQueryHandle->type = TSDB_QUERY_TYPE_EXTERNAL; - taosArrayDestroy(sa); return true; } else { STsdbQueryHandle* pSecQueryHandle = calloc(1, sizeof(STsdbQueryHandle)); @@ -1565,7 +1565,7 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) { assert(ret); /*SDataBlockInfo* pBlockInfo =*/ tsdbRetrieveDataBlockInfo((void*) pSecQueryHandle, &blockInfo); - /*SArray *pDataBlock = */tsdbRetrieveDataBlock((void*) pSecQueryHandle, sa); + /*SArray *pDataBlock = */tsdbRetrieveDataBlock((void*) pSecQueryHandle, pSecQueryHandle->defaultLoadColumn); for (int32_t i = 0; i < numOfCols; ++i) { SColumnInfoData* pCol = taosArrayGet(pQueryHandle->pColumns, i); @@ -2333,6 +2333,7 @@ void tsdbCleanupQueryHandle(TsdbQueryHandleT queryHandle) { } taosArrayDestroy(pQueryHandle->pColumns); + taosArrayDestroy(pQueryHandle->defaultLoadColumn); tfree(pQueryHandle->pDataBlockInfo); tfree(pQueryHandle->statis); diff --git a/src/util/inc/tarray.h b/src/util/inc/tarray.h index 4d44e82b1b..c05e2757d6 100644 --- a/src/util/inc/tarray.h +++ b/src/util/inc/tarray.h @@ -23,7 +23,7 @@ extern "C" { #include "os.h" #define TARRAY_MIN_SIZE 8 -#define TARRAY_GET_ELEM(array, index) ((array)->pData + (index) * (array)->elemSize) +#define TARRAY_GET_ELEM(array, index) ((void*)((array)->pData + (index) * (array)->elemSize)) typedef struct SArray { size_t size; From 385f45a1eb72d631d87fb8e6461223b0f0579ccc Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 10 Jul 2020 16:00:33 +0800 Subject: [PATCH 25/38] [td-225] update cmake files --- cmake/platform.inc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/platform.inc b/cmake/platform.inc index 7bad9bc4f3..2e0e2d6af0 100755 --- a/cmake/platform.inc +++ b/cmake/platform.inc @@ -109,7 +109,7 @@ IF (TD_LINUX_64) IF (${CMAKE_CXX_COMPILER_ID} MATCHES "Clang") SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -Wno-missing-braces -fPIC -g3 -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ELSE () - SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -pg -fPIC -malign-double -g3 -gdwarf-2 -malign-stringops -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") + SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -malign-double -g3 -gdwarf-2 -malign-stringops -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ENDIF () ELSE () SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") From 56d4bbf9cd7c0fd7ec557f8d2cbbcca747437339 Mon Sep 17 00:00:00 2001 From: Bomin Zhang Date: Fri, 10 Jul 2020 16:43:54 +0800 Subject: [PATCH 26/38] fix memory leak --- src/client/src/tscLocalMerge.c | 7 +++++++ src/client/src/tscStream.c | 3 +++ 2 files changed, 10 insertions(+) diff --git a/src/client/src/tscLocalMerge.c b/src/client/src/tscLocalMerge.c index 13523818d1..80fc82d90b 100644 --- a/src/client/src/tscLocalMerge.c +++ b/src/client/src/tscLocalMerge.c @@ -274,6 +274,10 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd pReducer->numOfBuffer = idx; SCompareParam *param = malloc(sizeof(SCompareParam)); + if (param == NULL) { + tfree(pReducer); + return; + } param->pLocalData = pReducer->pLocalDataSrc; param->pDesc = pReducer->pDesc; param->num = pReducer->pLocalDataSrc[0]->pMemBuffer->numOfElemsPerPage; @@ -284,6 +288,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd pRes->code = tLoserTreeCreate(&pReducer->pLoserTree, pReducer->numOfBuffer, param, treeComparator); if (pReducer->pLoserTree == NULL || pRes->code != 0) { + tfree(param); tfree(pReducer); return; } @@ -332,6 +337,8 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd tfree(pReducer->pResultBuf); tfree(pReducer->pFinalRes); tfree(pReducer->prevRowOfInput); + tfree(pReducer->pLoserTree); + tfree(param); tfree(pReducer); pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; return; diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c index f214e76cc7..6cc27a4cfe 100644 --- a/src/client/src/tscStream.c +++ b/src/client/src/tscStream.c @@ -255,6 +255,9 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf // release the metric/meter meta information reference, so data in cache can be updated taosCacheRelease(tscCacheHandle, (void**)&(pTableMetaInfo->pTableMeta), false); + tscFreeSqlResult(pSql); + tfree(pSql->pSubs); + pSql->numOfSubs = 0; tfree(pTableMetaInfo->vgroupList); tscSetNextLaunchTimer(pStream, pSql); } From 3b5c64d431c67e854da39049051faae562e6339f Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Fri, 10 Jul 2020 17:52:53 +0800 Subject: [PATCH 27/38] [TD-825] failed to determine if there is a result set --- src/plugins/http/src/httpSql.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/plugins/http/src/httpSql.c b/src/plugins/http/src/httpSql.c index c43d928d1b..7a515d124e 100644 --- a/src/plugins/http/src/httpSql.c +++ b/src/plugins/http/src/httpSql.c @@ -233,10 +233,11 @@ void httpProcessSingleSqlRetrieveCallBack(void *param, TAOS_RES *result, int num } } -void httpProcessSingleSqlCallBack(void *param, TAOS_RES *result, int code) { +void httpProcessSingleSqlCallBack(void *param, TAOS_RES *result, int unUsedCode) { HttpContext *pContext = (HttpContext *)param; if (pContext == NULL) return; + int32_t code = taos_errno(result); HttpEncodeMethod *encode = pContext->encodeMethod; if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { @@ -260,8 +261,8 @@ void httpProcessSingleSqlCallBack(void *param, TAOS_RES *result, int code) { return; } - int num_fields = taos_field_count(result); - if (num_fields == 0) { + bool isUpdate = tscIsUpdateQuery(result); + if (isUpdate) { // not select or show commands int affectRows = taos_affected_rows(result); From 0997261d6404a1cfa4588bd41fdab5d8bb0eccd8 Mon Sep 17 00:00:00 2001 From: Bomin Zhang Date: Fri, 10 Jul 2020 18:41:08 +0800 Subject: [PATCH 28/38] fix another two memory leaks --- src/client/src/tscSubquery.c | 5 ++--- src/client/src/tscUtil.c | 1 - 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index b5b659de0c..d3f298c2b2 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -1447,9 +1447,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { static void tscFreeSubSqlObj(SRetrieveSupport *trsupport, SSqlObj *pSql) { tscDebug("%p start to free subquery result", pSql); - if (pSql->res.code == TSDB_CODE_SUCCESS) { - taos_free_result(pSql); - } + taos_free_result(pSql); tfree(trsupport->localBuffer); @@ -1780,6 +1778,7 @@ static SSqlObj *tscCreateSqlObjForSubquery(SSqlObj *pSql, SRetrieveSupport *trsu pSql->pSubs[trsupport->subqueryIndex] = pNew; } + printf("------------alloc:%p\n", pNew); return pNew; } diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 54c9cdea65..a58423bbaa 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1822,7 +1822,6 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void STableMeta* pPrevTableMeta = taosCacheTransfer(tscCacheHandle, (void**)&pPrevInfo->pTableMeta); SVgroupsInfo* pVgroupsInfo = pPrevInfo->vgroupList; - pPrevInfo->vgroupList = NULL; pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pPrevTableMeta, pVgroupsInfo, pTableMetaInfo->tagColList); } From 424baa43f3a767273ddca11922b183dda4553776 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 10 Jul 2020 19:13:53 +0800 Subject: [PATCH 29/38] add compress test cases to regression and full test list. --- tests/pytest/fulltest.sh | 4 ++++ tests/pytest/regressiontest.sh | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 7a0f2cb825..5ee33c421e 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -156,3 +156,7 @@ python3 ./test.py -f alter/alter_table_crash.py # client python3 ./test.py -f client/client.py + +# Misc +python3 testCompress.py +python3 testNoCompress.py diff --git a/tests/pytest/regressiontest.sh b/tests/pytest/regressiontest.sh index 46098f4040..ccc6635ced 100755 --- a/tests/pytest/regressiontest.sh +++ b/tests/pytest/regressiontest.sh @@ -150,3 +150,7 @@ python3 ./test.py -f alter/alter_table_crash.py # client python3 ./test.py -f client/client.py + +# Misc +python3 testCompress.py +python3 testNoCompress.py From f5cdd5a84580d381c6fe4aae146d6ee0b2d8fe0e Mon Sep 17 00:00:00 2001 From: Hui Li Date: Fri, 10 Jul 2020 19:24:09 +0800 Subject: [PATCH 30/38] [add log] --- src/tsdb/src/tsdbMemTable.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/tsdb/src/tsdbMemTable.c b/src/tsdb/src/tsdbMemTable.c index af86de5aa8..675e44f458 100644 --- a/src/tsdb/src/tsdbMemTable.c +++ b/src/tsdb/src/tsdbMemTable.c @@ -119,7 +119,8 @@ int tsdbInsertRowToMem(STsdbRepo *pRepo, SDataRow row, STable *pTable) { int tsdbRefMemTable(STsdbRepo *pRepo, SMemTable *pMemTable) { if (pMemTable == NULL) return 0; - T_REF_INC(pMemTable); + int ref = T_REF_INC(pMemTable); + tsdbDebug("vgId:%d ref memtable %p ref %d", REPO_ID(pRepo), pMemTable, ref); return 0; } @@ -127,7 +128,9 @@ int tsdbRefMemTable(STsdbRepo *pRepo, SMemTable *pMemTable) { int tsdbUnRefMemTable(STsdbRepo *pRepo, SMemTable *pMemTable) { if (pMemTable == NULL) return 0; - if (T_REF_DEC(pMemTable) == 0) { + int ref = T_REF_DEC(pMemTable); + tsdbDebug("vgId:%d unref memtable %p ref %d", REPO_ID(pRepo), pMemTable, ref); + if (ref == 0) { STsdbCfg * pCfg = &pRepo->config; STsdbBufPool *pBufPool = pRepo->pPool; @@ -167,6 +170,7 @@ int tsdbTakeMemSnapshot(STsdbRepo *pRepo, SMemTable **pMem, SMemTable **pIMem) { tsdbRefMemTable(pRepo, *pIMem); if (tsdbUnlockRepo(pRepo) < 0) return -1; + tsdbDebug("vgId:%d take memory snapshot, pMem %p pIMem %p", REPO_ID(pRepo), *pMem, *pIMem); return 0; } From fc2c479d08b19c060329da1d02695fbff23538b0 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sat, 11 Jul 2020 11:15:22 +0800 Subject: [PATCH 31/38] fix table/del_stable.py for dnodes change. [TD-899] --- tests/pytest/table/del_stable.py | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/tests/pytest/table/del_stable.py b/tests/pytest/table/del_stable.py index e458b6679f..1b078ac519 100644 --- a/tests/pytest/table/del_stable.py +++ b/tests/pytest/table/del_stable.py @@ -37,17 +37,8 @@ class TDTestCase: except Exception as e: tdLog.exit(e) - try: - tdSql.execute("select * from db.st") - except Exception as e: - if e.args[0] != 'mnode invalid table name': - tdLog.exit(e) - - try: - tdSql.execute("select * from db.tb") - except Exception as e: - if e.args[0] != 'mnode invalid table name': - tdLog.exit(e) + tdSql.error("select * from db.st") + tdSql.error("select * from db.tb") def stop(self): tdSql.close() From 81c7f6e7dc6cf93568e3bde7748e0abf2829d9fe Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Sat, 11 Jul 2020 12:17:15 +0800 Subject: [PATCH 32/38] fix a typo --- .../jdbc/src/main/java/com/taosdata/jdbc/utils/TDNode.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNode.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNode.java index 48290d3e62..273bc6920c 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNode.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNode.java @@ -52,7 +52,7 @@ public class TDNode { public void start() { String selfPath = System.getProperty("user.dir"); String binPath = ""; - String projDir = selfPath + "/../../../../"; + String projDir = selfPath + "/../../../"; try { ArrayList taosdPath = new ArrayList<>(); @@ -68,7 +68,7 @@ public class TDNode { return; } else { for(String p : taosdPath) { - if(!p.contains("packing")) { + if(!p.contains("packaging")) { binPath = p; } } From 44b91cac2ae35fa9cd1040a92a1efbd4aef488e1 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 11 Jul 2020 14:24:48 +0800 Subject: [PATCH 33/38] [td-225]fix bugs in qmgmt management. --- src/inc/query.h | 16 +++--- src/util/inc/tcache.h | 21 ++++---- src/util/src/tcache.c | 100 ++++++++++++++++++++++---------------- src/vnode/src/vnodeMain.c | 2 +- src/vnode/src/vnodeRead.c | 16 +++--- 5 files changed, 88 insertions(+), 67 deletions(-) diff --git a/src/inc/query.h b/src/inc/query.h index c648270b21..d201b649f9 100644 --- a/src/inc/query.h +++ b/src/inc/query.h @@ -20,7 +20,6 @@ extern "C" { #endif typedef void* qinfo_t; -typedef void (*_qinfo_free_fn_t)(void*); /** * create the qinfo object according to QueryTableMsg @@ -29,13 +28,8 @@ typedef void (*_qinfo_free_fn_t)(void*); * @param qinfo * @return */ -int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryTableMsg, void* param, _qinfo_free_fn_t fn, qinfo_t* qinfo); +int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryTableMsg, void* param, qinfo_t* qinfo); -/** - * Destroy QInfo object - * @param qinfo qhandle - */ -void qDestroyQueryInfo(qinfo_t qinfo); /** * the main query execution function, including query on both table and multitables, @@ -84,8 +78,14 @@ bool qHasMoreResultsToRetrieve(qinfo_t qinfo); */ int32_t qKillQuery(qinfo_t qinfo); +/** + * destroy query info structure + * @param qHandle + */ +void qDestroyQueryInfo(qinfo_t qHandle); + void* qOpenQueryMgmt(int32_t vgId); -void qSetQueryMgmtClosed(void* pExecutor); +void qQueryMgmtNotifyClosed(void* pExecutor); void qCleanupQueryMgmt(void* pExecutor); void** qRegisterQInfo(void* pMgmt, uint64_t qInfo); void** qAcquireQInfo(void* pMgmt, uint64_t key); diff --git a/src/util/inc/tcache.h b/src/util/inc/tcache.h index 2982b8dc70..3da604d152 100644 --- a/src/util/inc/tcache.h +++ b/src/util/inc/tcache.h @@ -33,17 +33,20 @@ typedef struct SCacheStatis { int64_t refreshCount; } SCacheStatis; +struct STrashElem; + typedef struct SCacheDataNode { - uint64_t addedTime; // the added time when this element is added or updated into cache - uint64_t lifespan; // expiredTime expiredTime when this element should be remove from cache - uint64_t signature; - uint32_t size; // allocated size for current SCacheDataNode + uint64_t addedTime; // the added time when this element is added or updated into cache + uint64_t lifespan; // life duration when this element should be remove from cache + uint64_t expireTime; // expire time + uint64_t signature; + struct STrashElem *pTNodeHeader; // point to trash node head + uint16_t keySize: 15; // max key size: 32kb + bool inTrashCan: 1;// denote if it is in trash or not + uint32_t size; // allocated size for current SCacheDataNode T_REF_DECLARE() - uint16_t keySize: 15; // max key size: 32kb - bool inTrashCan: 1;// denote if it is in trash or not - int32_t extendFactor; // number of life span extend - char *key; - char data[]; + char *key; + char data[]; } SCacheDataNode; typedef struct STrashElem { diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index 688e49a40b..d3c622633d 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -116,11 +116,13 @@ static FORCE_INLINE void taosCacheReleaseNode(SCacheObj *pCacheObj, SCacheDataNo return; } - int32_t size = pNode->size; taosHashRemove(pCacheObj->pHashTable, pNode->key, pNode->keySize); + pCacheObj->totalSize -= pNode->size; uDebug("cache:%s, key:%p, %p is destroyed from cache, totalNum:%d totalSize:%" PRId64 "bytes size:%dbytes", - pCacheObj->name, pNode->key, pNode->data, (int32_t)taosHashGetSize(pCacheObj->pHashTable), pCacheObj->totalSize, size); + pCacheObj->name, pNode->key, pNode->data, (int32_t)taosHashGetSize(pCacheObj->pHashTable), pCacheObj->totalSize, + pNode->size); + if (pCacheObj->freeFp) pCacheObj->freeFp(pNode->data); free(pNode); } @@ -285,7 +287,7 @@ void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const v uDebug("cache:%s, key:%p, %p added into cache, added:%" PRIu64 ", expire:%" PRIu64 ", totalNum:%d totalSize:%" PRId64 "bytes size:%" PRId64 "bytes", - pCacheObj->name, key, pNode->data, pNode->addedTime, (pNode->lifespan * pNode->extendFactor + pNode->addedTime), + pCacheObj->name, key, pNode->data, pNode->addedTime, pNode->expireTime, (int32_t)taosHashGetSize(pCacheObj->pHashTable), pCacheObj->totalSize, dataSize); } else { uError("cache:%s, key:%p, failed to added into cache, out of memory", pCacheObj->name, key); @@ -312,16 +314,6 @@ void *taosCacheAcquireByKey(SCacheObj *pCacheObj, const void *key, size_t keyLen int32_t ref = 0; if (ptNode != NULL) { ref = T_REF_INC(*ptNode); - - // if the remained life span is less then the (*ptNode)->lifeSpan, add up one lifespan - if (pCacheObj->extendLifespan) { - int64_t now = taosGetTimestampMs(); - - if ((now - (*ptNode)->addedTime) < (*ptNode)->lifespan * (*ptNode)->extendFactor) { - (*ptNode)->extendFactor += 1; - uDebug("key:%p extend life time to %"PRId64, key, (*ptNode)->lifespan * (*ptNode)->extendFactor + (*ptNode)->addedTime); - } - } } __cache_unlock(pCacheObj); @@ -347,8 +339,7 @@ void* taosCacheUpdateExpireTimeByName(SCacheObj *pCacheObj, void *key, size_t ke SCacheDataNode **ptNode = (SCacheDataNode **)taosHashGet(pCacheObj->pHashTable, key, keyLen); if (ptNode != NULL) { T_REF_INC(*ptNode); - (*ptNode)->extendFactor += 1; -// (*ptNode)->lifespan = expireTime; + (*ptNode)->expireTime = taosGetTimestampMs() + (*ptNode)->lifespan; } __cache_unlock(pCacheObj); @@ -380,17 +371,6 @@ void *taosCacheAcquireByData(SCacheObj *pCacheObj, void *data) { int32_t ref = T_REF_INC(ptNode); uDebug("cache:%s, data: %p acquired by data in cache, refcnt:%d", pCacheObj->name, ptNode->data, ref); - // if the remained life span is less then the (*ptNode)->lifeSpan, add up one lifespan - if (pCacheObj->extendLifespan) { - int64_t now = taosGetTimestampMs(); - - if ((now - ptNode->addedTime) < ptNode->lifespan * ptNode->extendFactor) { - ptNode->extendFactor += 1; - uDebug("cache:%s, %p extend life time to %" PRId64, pCacheObj->name, ptNode->data, - ptNode->lifespan * ptNode->extendFactor + ptNode->addedTime); - } - } - // the data if referenced by at least one object, so the reference count must be greater than the value of 2. assert(ref >= 2); return data; @@ -431,22 +411,58 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) { } *data = NULL; - int16_t ref = T_REF_DEC(pNode); - uDebug("cache:%s, key:%p, %p is released, refcnt:%d", pCacheObj->name, pNode->key, pNode->data, ref); - if (_remove && (!pNode->inTrashCan)) { - __cache_wr_lock(pCacheObj); + // note: extend lifespan before dec ref count + if (pCacheObj->extendLifespan) { + atomic_store_64(&pNode->expireTime, pNode->lifespan + taosGetTimestampMs()); + uDebug("cache:%s data:%p extend life time to %"PRId64 " before release", pCacheObj->name, pNode->data, pNode->expireTime); + } - if (T_REF_VAL_GET(pNode) == 0) { - // remove directly, if not referenced by other users - taosCacheReleaseNode(pCacheObj, pNode); - } else { - // pNode may be released immediately by other thread after the reference count of pNode is set to 0, - // So we need to lock it in the first place. - taosCacheMoveToTrash(pCacheObj, pNode); + bool inTrashCan = pNode->inTrashCan; + uDebug("cache:%s, key:%p, %p is released, refcnt:%d", pCacheObj->name, pNode->key, pNode->data, T_REF_VAL_GET(pNode) - 1); + + // NOTE: once refcount is decrease, pNode may be free by other thread immediately. + int32_t ref = T_REF_DEC(pNode); + + if (inTrashCan) { + // Remove it if the ref count is 0. + // The ref count does not need to load and check again after lock acquired, since ref count can not be increased when + // the node is in trashcan. + if (ref == 0) { + __cache_wr_lock(pCacheObj); + assert(pNode->pTNodeHeader->pData == pNode); + taosRemoveFromTrashCan(pCacheObj, pNode->pTNodeHeader); + __cache_unlock(pCacheObj); } - __cache_unlock(pCacheObj); + } else { + assert(pNode->pTNodeHeader == NULL); + + if (_remove) { // not in trash can, but need to remove it + __cache_wr_lock(pCacheObj); + + /* + * If not referenced by other users. Otherwise move this node to trashcan wait for all users + * releasing this resources. + * + * NOTE: previous ref is 0, and current ref is still 0, remove it. If previous is not 0, there is another thread + * that tries to do the same thing. + */ + if (ref == 0) { + if (T_REF_VAL_GET(pNode) == 0) { + taosCacheReleaseNode(pCacheObj, pNode); + } else { + taosCacheMoveToTrash(pCacheObj, pNode); + } + } + + __cache_unlock(pCacheObj); +// } else { // extend its life time +// if (pCacheObj->extendLifespan) { +// atomic_store_64(&pNode->expireTime, pNode->lifespan + taosGetTimestampMs()); +// uDebug("cache:%s data:%p extend life time to %"PRId64 " after release", pCacheObj->name, pNode->data, pNode->expireTime); +// } + } } } @@ -486,7 +502,7 @@ void taosCacheCleanup(SCacheObj *pCacheObj) { SCacheDataNode *taosCreateCacheNode(const char *key, size_t keyLen, const char *pData, size_t size, uint64_t duration) { - size_t totalSize = size + sizeof(SCacheDataNode) + keyLen + 1; + size_t totalSize = size + sizeof(SCacheDataNode) + keyLen; SCacheDataNode *pNewNode = calloc(1, totalSize); if (pNewNode == NULL) { @@ -503,7 +519,7 @@ SCacheDataNode *taosCreateCacheNode(const char *key, size_t keyLen, const char * pNewNode->addedTime = (uint64_t)taosGetTimestampMs(); pNewNode->lifespan = duration; - pNewNode->extendFactor = 1; + pNewNode->expireTime = pNewNode->addedTime + pNewNode->lifespan; pNewNode->signature = (uint64_t)pNewNode; pNewNode->size = (uint32_t)totalSize; @@ -512,6 +528,7 @@ SCacheDataNode *taosCreateCacheNode(const char *key, size_t keyLen, const char * void taosAddToTrash(SCacheObj *pCacheObj, SCacheDataNode *pNode) { if (pNode->inTrashCan) { /* node is already in trash */ + assert(pNode->pTNodeHeader != NULL && pNode->pTNodeHeader->pData == pNode); return; } @@ -527,6 +544,7 @@ void taosAddToTrash(SCacheObj *pCacheObj, SCacheDataNode *pNode) { pCacheObj->pTrash = pElem; pNode->inTrashCan = true; + pNode->pTNodeHeader = pElem; pCacheObj->numOfElemsInTrash++; uDebug("key:%p, %p move to trash, numOfElem in trash:%d", pNode->key, pNode->data, pCacheObj->numOfElemsInTrash); @@ -629,7 +647,7 @@ static void doCacheRefresh(SCacheObj* pCacheObj, int64_t time, __cache_free_fn_t __cache_wr_lock(pCacheObj); while (taosHashIterNext(pIter)) { SCacheDataNode *pNode = *(SCacheDataNode **)taosHashIterGet(pIter); - if ((pNode->addedTime + pNode->lifespan * pNode->extendFactor) <= time && T_REF_VAL_GET(pNode) <= 0) { + if (pNode->expireTime < time && T_REF_VAL_GET(pNode) <= 0) { taosCacheReleaseNode(pCacheObj, pNode); continue; } diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index 18c9ebf2e1..d5221bae10 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -508,7 +508,7 @@ static void vnodeCleanUp(SVnodeObj *pVnode) { vTrace("vgId:%d, vnode will cleanup, refCount:%d", pVnode->vgId, pVnode->refCount); // release local resources only after cutting off outside connections - qSetQueryMgmtClosed(pVnode->qMgmt); + qQueryMgmtNotifyClosed(pVnode->qMgmt); vnodeRelease(pVnode); } diff --git a/src/vnode/src/vnodeRead.c b/src/vnode/src/vnodeRead.c index f054ae3904..ff58e219b0 100644 --- a/src/vnode/src/vnodeRead.c +++ b/src/vnode/src/vnodeRead.c @@ -82,6 +82,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { vWarn("QInfo:%p invalid qhandle, no matched query handle, conn:%p", (void*) killQueryMsg->qhandle, pReadMsg->rpcMsg.handle); } else { assert(*qhandle == (void*) killQueryMsg->qhandle); + qKillQuery(*qhandle); qReleaseQInfo(pVnode->qMgmt, (void**) &qhandle, true); } @@ -93,7 +94,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { if (contLen != 0) { qinfo_t pQInfo = NULL; - code = qCreateQueryInfo(pVnode->tsdb, pVnode->vgId, pQueryTableMsg, pVnode, NULL, &pQInfo); + code = qCreateQueryInfo(pVnode->tsdb, pVnode->vgId, pQueryTableMsg, pVnode, &pQInfo); SQueryTableRsp *pRsp = (SQueryTableRsp *) rpcMallocCont(sizeof(SQueryTableRsp)); pRsp->code = code; @@ -108,9 +109,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { handle = qRegisterQInfo(pVnode->qMgmt, (uint64_t) pQInfo); if (handle == NULL) { // failed to register qhandle pRsp->code = TSDB_CODE_QRY_INVALID_QHANDLE; - - qKillQuery(pQInfo); - qKillQuery(pQInfo); + qDestroyQueryInfo(pQInfo); // destroy it directly } else { assert(*handle == pQInfo); pRsp->qhandle = htobe64((uint64_t) pQInfo); @@ -120,10 +119,6 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { if (handle != NULL && vnodeNotifyCurrentQhandle(pReadMsg->rpcMsg.handle, *handle, pVnode->vgId) != TSDB_CODE_SUCCESS) { vError("vgId:%d, QInfo:%p, query discarded since link is broken, %p", pVnode->vgId, *handle, pReadMsg->rpcMsg.handle); pRsp->code = TSDB_CODE_RPC_NETWORK_UNAVAIL; - - // NOTE: there two refcount, needs to kill twice - // query has not been put into qhandle pool, kill it directly. - qKillQuery(*handle); qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true); return pRsp->code; } @@ -134,6 +129,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { dnodePutItemIntoReadQueue(pVnode, *handle); qReleaseQInfo(pVnode->qMgmt, (void**) &handle, false); } + vDebug("vgId:%d, QInfo:%p, dnode query msg disposed", vgId, pQInfo); } else { assert(pCont != NULL); @@ -183,6 +179,7 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { if (pRetrieve->free == 1) { vDebug("vgId:%d, QInfo:%p, retrieve msg received to kill query and free qhandle", pVnode->vgId, *handle); + qKillQuery(*handle); qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true); pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp)); @@ -209,6 +206,9 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { dnodePutItemIntoReadQueue(pVnode, *handle); pRet->qhandle = *handle; freeHandle = false; + } else { + qKillQuery(*handle); + freeHandle = true; } } } From 8fdfc306734d2bd2b52954c73a7d1724861a2c9f Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 11 Jul 2020 14:25:15 +0800 Subject: [PATCH 34/38] [td-225]enable 'us' fill --- tests/script/general/parser/fill_us.sim | 48 ++++++++++++------------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/tests/script/general/parser/fill_us.sim b/tests/script/general/parser/fill_us.sim index a66629c90b..b597d378a2 100644 --- a/tests/script/general/parser/fill_us.sim +++ b/tests/script/general/parser/fill_us.sim @@ -652,25 +652,25 @@ endi if $data01 != 1 then return -1 endi -if $data11 != null then +if $data11 != NULL then return -1 endi if $data21 != 1 then return -1 endi -if $data31 != null then +if $data31 != NULL then return -1 endi if $data41 != 1 then return -1 endi -if $data51 != null then +if $data51 != NULL then return -1 endi if $data61 != 1 then return -1 endi -if $data71 != null then +if $data71 != NULL then return -1 endi if $data81 != 1 then @@ -689,25 +689,25 @@ endi if $data01 != 0.000000000 then return -1 endi -if $data11 != null then +if $data11 != NULL then return -1 endi if $data21 != 1.000000000 then return -1 endi -if $data31 != null then +if $data31 != NULL then return -1 endi if $data41 != 2.000000000 then return -1 endi -if $data51 != null then +if $data51 != NULL then return -1 endi if $data61 != 3.000000000 then return -1 endi -if $data71 != null then +if $data71 != NULL then return -1 endi if $data81 != 4.000000000 then @@ -722,25 +722,25 @@ endi if $data01 != 0 then return -1 endi -if $data11 != null then +if $data11 != NULL then return -1 endi if $data21 != 1 then return -1 endi -if $data31 != null then +if $data31 != NULL then return -1 endi if $data41 != 2 then return -1 endi -if $data51 != null then +if $data51 != NULL then return -1 endi if $data61 != 3 then return -1 endi -if $data71 != null then +if $data71 != NULL then return -1 endi if $data81 != 4 then @@ -755,25 +755,25 @@ endi if $data01 != 0 then return -1 endi -if $data11 != null then +if $data11 != NULL then return -1 endi if $data21 != 1 then return -1 endi -if $data31 != null then +if $data31 != NULL then return -1 endi if $data41 != 2 then return -1 endi -if $data51 != null then +if $data51 != NULL then return -1 endi if $data61 != 3 then return -1 endi -if $data71 != null then +if $data71 != NULL then return -1 endi if $data81 != 4 then @@ -788,25 +788,25 @@ endi if $data01 != 0 then return -1 endi -if $data11 != null then +if $data11 != NULL then return -1 endi if $data21 != 1 then return -1 endi -if $data31 != null then +if $data31 != NULL then return -1 endi if $data41 != 2 then return -1 endi -if $data51 != null then +if $data51 != NULL then return -1 endi if $data61 != 3 then return -1 endi -if $data71 != null then +if $data71 != NULL then return -1 endi if $data81 != 4 then @@ -821,25 +821,25 @@ endi if $data01 != 0 then return -1 endi -if $data11 != null then +if $data11 != NULL then return -1 endi if $data21 != 1 then return -1 endi -if $data31 != null then +if $data31 != NULL then return -1 endi if $data41 != 2 then return -1 endi -if $data51 != null then +if $data51 != NULL then return -1 endi if $data61 != 3 then return -1 endi -if $data71 != null then +if $data71 != NULL then return -1 endi if $data81 != 4 then From 3662e7c39a9fba4d88de2b853e97a3cd5189d15a Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 11 Jul 2020 14:26:37 +0800 Subject: [PATCH 35/38] [td-225] refactor codes. --- src/query/inc/qExecutor.h | 3 --- src/query/inc/tsqlfunction.h | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index 6cd6edd6db..1faad6b141 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -192,7 +192,6 @@ typedef struct SQInfo { int32_t offset; // offset in group result set of subgroup, todo refactor SArray* arrTableIdInfo; - T_REF_DECLARE() /* * the query is executed position on which meter of the whole list. * when the index reaches the last one of the list, it means the query is completed. @@ -201,8 +200,6 @@ typedef struct SQInfo { */ int32_t tableIndex; int32_t numOfGroupResultPages; - _qinfo_free_fn_t freeFn; //todo remove it - void* pBuf; // allocated buffer for STableQueryInfo, sizeof(STableQueryInfo)*numOfTables; } SQInfo; diff --git a/src/query/inc/tsqlfunction.h b/src/query/inc/tsqlfunction.h index 63b7abb379..0c69bc02d3 100644 --- a/src/query/inc/tsqlfunction.h +++ b/src/query/inc/tsqlfunction.h @@ -187,7 +187,7 @@ typedef struct SQLFunctionCtx { } SQLFunctionCtx; typedef struct SQLAggFuncElem { - char aName[TSDB_FUNCTIONS_NAME_MAX_LENGTH]; + char aName[TSDB_FUNCTIONS_NAME_MAX_LENGTH]; uint8_t nAggIdx; // index of function in aAggs int8_t stableFuncId; // transfer function for super table query From 4a7d25c8a7fc7da4cb0af0c2e541893dcfc040ac Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 11 Jul 2020 14:27:30 +0800 Subject: [PATCH 36/38] [td-225] refactor codes. --- src/client/src/tscFunctionImpl.c | 2 +- src/client/src/tscSQLParser.c | 1 - src/client/src/tscSubquery.c | 6 +- src/client/src/tscUtil.c | 2 +- src/query/src/qExecutor.c | 108 +++++++++++++------------------ src/query/src/qtokenizer.c | 9 +-- src/query/src/qtsbuf.c | 18 +++--- 7 files changed, 63 insertions(+), 83 deletions(-) diff --git a/src/client/src/tscFunctionImpl.c b/src/client/src/tscFunctionImpl.c index bc37c16187..06e5ff73bf 100644 --- a/src/client/src/tscFunctionImpl.c +++ b/src/client/src/tscFunctionImpl.c @@ -3945,7 +3945,7 @@ static void ts_comp_finalize(SQLFunctionCtx *pCtx) { tsBufFlush(pTSbuf); strcpy(pCtx->aOutputBuf, pTSbuf->path); - tsBufDestory(pTSbuf); + tsBufDestroy(pTSbuf); doFinalizer(pCtx); } diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 65e2c976e0..44d10ec2c4 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -175,7 +175,6 @@ static int32_t handlePassword(SSqlCmd* pCmd, SSQLToken* pPwd) { return TSDB_CODE_SUCCESS; } -// todo handle memory leak in error handle function int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { if (pInfo == NULL || pSql == NULL || pSql->signature != pSql) { return TSDB_CODE_TSC_APP_ERROR; diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 044a2aba9d..6c3580bad4 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -152,8 +152,8 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSupporter* pSupporter1, SJ tsBufFlush(output1); tsBufFlush(output2); - tsBufDestory(pSupporter1->pTSBuf); - tsBufDestory(pSupporter2->pTSBuf); + tsBufDestroy(pSupporter1->pTSBuf); + tsBufDestroy(pSupporter2->pTSBuf); tscDebug("%p input1:%" PRId64 ", input2:%" PRId64 ", final:%" PRId64 " for secondary query after ts blocks " "intersecting, skey:%" PRId64 ", ekey:%" PRId64, pSql, numOfInput1, numOfInput2, output1->numOfTotal, @@ -762,7 +762,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); tsBufMerge(pSupporter->pTSBuf, pBuf, pTableMetaInfo->vgroupIndex); - tsBufDestory(pBuf); + tsBufDestroy(pBuf); } // continue to retrieve ts-comp data from vnode diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index dea41d2932..0ea95a9979 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1544,7 +1544,7 @@ static void freeQueryInfoImpl(SQueryInfo* pQueryInfo) { pQueryInfo->groupbyExpr.columnInfo = NULL; } - pQueryInfo->tsBuf = tsBufDestory(pQueryInfo->tsBuf); + pQueryInfo->tsBuf = tsBufDestroy(pQueryInfo->tsBuf); tfree(pQueryInfo->fillVal); } diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index f556785824..75fe4e14e1 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -120,6 +120,7 @@ static UNUSED_FUNC void* u_calloc(size_t num, size_t __size) { #define GET_TABLEGROUP(q, _index) ((SArray*) taosArrayGetP((q)->tableqinfoGroupInfo.pGroupList, (_index))) static void setQueryStatus(SQuery *pQuery, int8_t status); +static void finalizeQueryResult(SQueryRuntimeEnv *pRuntimeEnv); #define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->intervalTime > 0) @@ -838,6 +839,7 @@ static char *getDataBlock(SQueryRuntimeEnv *pRuntimeEnv, SArithmeticSupport *sas sas->data = calloc(pQuery->numOfCols, POINTER_BYTES); if (sas->data == NULL) { + finalizeQueryResult(pRuntimeEnv); // clean up allocated resource during query longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } @@ -900,6 +902,7 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis * SArithmeticSupport *sasArray = calloc((size_t)pQuery->numOfOutput, sizeof(SArithmeticSupport)); if (sasArray == NULL) { + finalizeQueryResult(pRuntimeEnv); // clean up allocated resource during query longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } @@ -1146,6 +1149,7 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS SArithmeticSupport *sasArray = calloc((size_t)pQuery->numOfOutput, sizeof(SArithmeticSupport)); if (sasArray == NULL) { + finalizeQueryResult(pRuntimeEnv); // clean up allocated resource during query longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } @@ -1597,7 +1601,7 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { tsdbCleanupQueryHandle(pRuntimeEnv->pQueryHandle); tsdbCleanupQueryHandle(pRuntimeEnv->pSecQueryHandle); - pRuntimeEnv->pTSBuf = tsBufDestory(pRuntimeEnv->pTSBuf); + pRuntimeEnv->pTSBuf = tsBufDestroy(pRuntimeEnv->pTSBuf); } #define IS_QUERY_KILLED(_q) ((_q)->code == TSDB_CODE_TSC_QUERY_CANCELLED) @@ -2225,8 +2229,10 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER; while (tsdbNextDataBlock(pQueryHandle)) { summary->totalBlocks += 1; + if (IS_QUERY_KILLED(GET_QINFO_ADDR(pRuntimeEnv))) { - return 0; + finalizeQueryResult(pRuntimeEnv); // clean up allocated resource during query + longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); } tsdbRetrieveDataBlockInfo(pQueryHandle, &blockInfo); @@ -3281,7 +3287,8 @@ void scanOneTableDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, TSKEY start) { // check if query is killed or not if (IS_QUERY_KILLED(pQInfo)) { - return; + finalizeQueryResult(pRuntimeEnv); // clean up allocated resource during query + longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); } } @@ -3926,7 +3933,8 @@ void skipBlocks(SQueryRuntimeEnv *pRuntimeEnv) { SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER; while (tsdbNextDataBlock(pQueryHandle)) { if (IS_QUERY_KILLED(GET_QINFO_ADDR(pRuntimeEnv))) { - return; + finalizeQueryResult(pRuntimeEnv); // clean up allocated resource during query + longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); } tsdbRetrieveDataBlockInfo(pQueryHandle, &blockInfo); @@ -4133,7 +4141,10 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; SQuery *pQuery = pQInfo->runtimeEnv.pQuery; + pQuery->precision = tsdbGetCfg(tsdb)->precision; + pRuntimeEnv->topBotQuery = isTopBottomQuery(pQuery); + pRuntimeEnv->hasTagResults = hasTagValOutput(pQuery); setScanLimitationByResultBuffer(pQuery); changeExecuteScanOrder(pQInfo, false); @@ -4211,10 +4222,6 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo pQuery->fillType, pColInfo); } - // todo refactor - pRuntimeEnv->topBotQuery = isTopBottomQuery(pQuery); - pRuntimeEnv->hasTagResults = hasTagValOutput(pQuery); - setQueryStatus(pQuery, QUERY_NOT_COMPLETED); return TSDB_CODE_SUCCESS; } @@ -4243,7 +4250,8 @@ static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) { while (tsdbNextDataBlock(pQueryHandle)) { summary->totalBlocks += 1; if (IS_QUERY_KILLED(pQInfo)) { - break; + finalizeQueryResult(pRuntimeEnv); // clean up allocated resource during query + longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); } tsdbRetrieveDataBlockInfo(pQueryHandle, &blockInfo); @@ -4527,7 +4535,8 @@ static void sequentialTableProcess(SQInfo *pQInfo) { while (pQInfo->tableIndex < pQInfo->tableqinfoGroupInfo.numOfTables) { if (IS_QUERY_KILLED(pQInfo)) { - return; + finalizeQueryResult(pRuntimeEnv); // clean up allocated resource during query + longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); } pQuery->current = taosArrayGetP(group, pQInfo->tableIndex); @@ -4723,7 +4732,8 @@ static void multiTableQueryProcess(SQInfo *pQInfo) { // query error occurred or query is killed, abort current execution if (pQInfo->code != TSDB_CODE_SUCCESS || IS_QUERY_KILLED(pQInfo)) { qDebug("QInfo:%p query killed or error occurred, code:%s, abort", pQInfo, tstrerror(pQInfo->code)); - return; + finalizeQueryResult(pRuntimeEnv); // clean up allocated resource during query + longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); } // close all time window results @@ -4745,7 +4755,8 @@ static void multiTableQueryProcess(SQInfo *pQInfo) { if (pQInfo->code != TSDB_CODE_SUCCESS || IS_QUERY_KILLED(pQInfo)) { qDebug("QInfo:%p query killed or error occurred, code:%s, abort", pQInfo, tstrerror(pQInfo->code)); - return; + finalizeQueryResult(pRuntimeEnv); // clean up allocated resource during query + longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); } if (QUERY_IS_INTERVAL_QUERY(pQuery) || isSumAvgRateQuery(pQuery)) { @@ -4784,7 +4795,8 @@ static void tableFixedOutputProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) finalizeQueryResult(pRuntimeEnv); if (IS_QUERY_KILLED(pQInfo)) { - return; + finalizeQueryResult(pRuntimeEnv); // clean up allocated resource during query + longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); } // since the numOfRows must be identical for all sql functions that are allowed to be executed simutaneously. @@ -4816,10 +4828,6 @@ static void tableMultiOutputProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) scanOneTableDataBlocks(pRuntimeEnv, pQuery->current->lastKey); finalizeQueryResult(pRuntimeEnv); - if (IS_QUERY_KILLED(pQInfo)) { - return; - } - pQuery->rec.rows = getNumOfResult(pRuntimeEnv); if (pQuery->limit.offset > 0 && pQuery->numOfFilterCols > 0 && pQuery->rec.rows > 0) { skipResults(pRuntimeEnv); @@ -4864,10 +4872,6 @@ static void tableIntervalProcessImpl(SQueryRuntimeEnv *pRuntimeEnv, TSKEY start) while (1) { scanOneTableDataBlocks(pRuntimeEnv, start); - if (IS_QUERY_KILLED(GET_QINFO_ADDR(pRuntimeEnv))) { - return; - } - assert(!Q_STATUS_EQUAL(pQuery->status, QUERY_NOT_COMPLETED)); finalizeQueryResult(pRuntimeEnv); @@ -5788,7 +5792,7 @@ static bool isValidQInfo(void *param) { return (sig == (uint64_t)pQInfo); } -static int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQInfo *pQInfo, bool isSTable, void* param, _qinfo_free_fn_t fn) { +static int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQInfo *pQInfo, bool isSTable, void* param) { int32_t code = TSDB_CODE_SUCCESS; SQuery *pQuery = pQInfo->runtimeEnv.pQuery; @@ -5813,7 +5817,6 @@ static int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQ } pQInfo->param = param; - pQInfo->freeFn = fn; if (pQInfo->tableqinfoGroupInfo.numOfTables == 0) { qDebug("QInfo:%p no table qualified for tag filter, abort query", pQInfo); @@ -5995,8 +5998,7 @@ typedef struct SQueryMgmt { pthread_mutex_t lock; } SQueryMgmt; -int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, void* param, _qinfo_free_fn_t fn, - qinfo_t* pQInfo) { +int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, void* param, qinfo_t* pQInfo) { assert(pQueryMsg != NULL && tsdb != NULL); int32_t code = TSDB_CODE_SUCCESS; @@ -6086,7 +6088,7 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, vo goto _over; } - code = initQInfo(pQueryMsg, tsdb, vgId, *pQInfo, isSTableQuery, param, fn); + code = initQInfo(pQueryMsg, tsdb, vgId, *pQInfo, isSTableQuery, param); _over: free(tagCond); @@ -6105,43 +6107,25 @@ _over: if (code != TSDB_CODE_SUCCESS) { *pQInfo = NULL; } else { - SQInfo* pq = (SQInfo*) (*pQInfo); +// SQInfo* pq = (SQInfo*) (*pQInfo); - T_REF_INC(pq); - T_REF_INC(pq); +// T_REF_INC(pq); +// T_REF_INC(pq); } // if failed to add ref for all meters in this query, abort current query return code; } -static void doDestoryQueryInfo(SQInfo* pQInfo) { - assert(pQInfo != NULL); - qDebug("QInfo:%p query completed", pQInfo); - queryCostStatis(pQInfo); // print the query cost summary - freeQInfo(pQInfo); -} - void qDestroyQueryInfo(qinfo_t qHandle) { SQInfo* pQInfo = (SQInfo*) qHandle; if (!isValidQInfo(pQInfo)) { return; } - int32_t ref = T_REF_DEC(pQInfo); - qDebug("QInfo:%p dec refCount, value:%d", pQInfo, ref); - - if (ref == 0) { - _qinfo_free_fn_t freeFp = pQInfo->freeFn; - void* param = pQInfo->param; - - doDestoryQueryInfo(pQInfo); - if (freeFp != NULL) { - assert(param != NULL); - freeFp(param); - } - - } + qDebug("QInfo:%p query completed", pQInfo); + queryCostStatis(pQInfo); // print the query cost summary + freeQInfo(pQInfo); } void qTableQuery(qinfo_t qinfo) { @@ -6154,29 +6138,22 @@ void qTableQuery(qinfo_t qinfo) { if (IS_QUERY_KILLED(pQInfo)) { qDebug("QInfo:%p it is already killed, abort", pQInfo); - sem_post(&pQInfo->dataReady); - qDestroyQueryInfo(pQInfo); return; } if (pQInfo->tableqinfoGroupInfo.numOfTables == 0) { qDebug("QInfo:%p no table exists for query, abort", pQInfo); - sem_post(&pQInfo->dataReady); - qDestroyQueryInfo(pQInfo); return; } - int32_t ret = setjmp(pQInfo->runtimeEnv.env); - // error occurs, record the error code and return to client + int32_t ret = setjmp(pQInfo->runtimeEnv.env); if (ret != TSDB_CODE_SUCCESS) { pQInfo->code = ret; - qDebug("QInfo:%p query abort due to error occurs, code:%s", pQInfo, tstrerror(pQInfo->code)); + qDebug("QInfo:%p query abort due to error/cancel occurs, code:%s", pQInfo, tstrerror(pQInfo->code)); sem_post(&pQInfo->dataReady); - qDestroyQueryInfo(pQInfo); - return; } @@ -6203,7 +6180,6 @@ void qTableQuery(qinfo_t qinfo) { } sem_post(&pQInfo->dataReady); - qDestroyQueryInfo(pQInfo); } int32_t qRetrieveQueryResultInfo(qinfo_t qinfo) { @@ -6247,7 +6223,7 @@ bool qHasMoreResultsToRetrieve(qinfo_t qinfo) { } if (ret) { - T_REF_INC(pQInfo); +// T_REF_INC(pQInfo); qDebug("QInfo:%p has more results waits for client retrieve", pQInfo); } @@ -6304,7 +6280,6 @@ int32_t qKillQuery(qinfo_t qinfo) { } setQueryKilled(pQInfo); - qDestroyQueryInfo(pQInfo); return TSDB_CODE_SUCCESS; } @@ -6449,6 +6424,7 @@ void freeqinfoFn(void *qhandle) { } qKillQuery(*handle); + qDestroyQueryInfo(*handle); } void* qOpenQueryMgmt(int32_t vgId) { @@ -6467,7 +6443,11 @@ void* qOpenQueryMgmt(int32_t vgId) { return pQueryHandle; } -void qSetQueryMgmtClosed(void* pQMgmt) { +static void queryMgmtKillQueryFn(void* handle) { + qKillQuery(handle); +} + +void qQueryMgmtNotifyClosed(void* pQMgmt) { if (pQMgmt == NULL) { return; } @@ -6479,7 +6459,7 @@ void qSetQueryMgmtClosed(void* pQMgmt) { pQueryMgmt->closed = true; pthread_mutex_unlock(&pQueryMgmt->lock); - taosCacheRefresh(pQueryMgmt->qinfoPool, freeqinfoFn); + taosCacheRefresh(pQueryMgmt->qinfoPool, queryMgmtKillQueryFn); } void qCleanupQueryMgmt(void* pQMgmt) { diff --git a/src/query/src/qtokenizer.c b/src/query/src/qtokenizer.c index 80d59a384e..784dd9af67 100644 --- a/src/query/src/qtokenizer.c +++ b/src/query/src/qtokenizer.c @@ -509,10 +509,11 @@ uint32_t tSQLGetToken(char* z, uint32_t* tokenType) { for (i = 1; isdigit(z[i]); i++) { } - /* here is the 1a/2s/3m/9y */ - if ((z[i] == 'a' || z[i] == 's' || z[i] == 'm' || z[i] == 'h' || z[i] == 'd' || z[i] == 'n' || z[i] == 'y' || - z[i] == 'w' || z[i] == 'A' || z[i] == 'S' || z[i] == 'M' || z[i] == 'H' || z[i] == 'D' || z[i] == 'N' || - z[i] == 'Y' || z[i] == 'W') && + /* here is the 1u/1a/2s/3m/9y */ + if ((z[i] == 'u' || z[i] == 'a' || z[i] == 's' || z[i] == 'm' || z[i] == 'h' || z[i] == 'd' || z[i] == 'n' || + z[i] == 'y' || z[i] == 'w' || + z[i] == 'U' || z[i] == 'A' || z[i] == 'S' || z[i] == 'M' || z[i] == 'H' || z[i] == 'D' || z[i] == 'N' || + z[i] == 'Y' || z[i] == 'W') && (isIdChar[(uint8_t)z[i + 1]] == 0)) { *tokenType = TK_VARIABLE; i += 1; diff --git a/src/query/src/qtsbuf.c b/src/query/src/qtsbuf.c index fedaa315d3..b84fbded38 100644 --- a/src/query/src/qtsbuf.c +++ b/src/query/src/qtsbuf.c @@ -79,7 +79,7 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) { pTSBuf->numOfAlloc = header.numOfVnode; STSVnodeBlockInfoEx* tmp = realloc(pTSBuf->pData, sizeof(STSVnodeBlockInfoEx) * pTSBuf->numOfAlloc); if (tmp == NULL) { - tsBufDestory(pTSBuf); + tsBufDestroy(pTSBuf); return NULL; } @@ -92,7 +92,7 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) { pTSBuf->tsOrder = header.tsOrder; if (pTSBuf->tsOrder != TSDB_ORDER_ASC && pTSBuf->tsOrder != TSDB_ORDER_DESC) { // tscError("invalid order info in buf:%d", pTSBuf->tsOrder); - tsBufDestory(pTSBuf); + tsBufDestroy(pTSBuf); return NULL; } @@ -100,7 +100,7 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) { STSVnodeBlockInfo* buf = (STSVnodeBlockInfo*)calloc(1, infoSize); if (buf == NULL) { - tsBufDestory(pTSBuf); + tsBufDestroy(pTSBuf); return NULL; } @@ -120,7 +120,7 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) { struct stat fileStat; if (fstat(fileno(pTSBuf->f), &fileStat) != 0) { - tsBufDestory(pTSBuf); + tsBufDestroy(pTSBuf); return NULL; } @@ -137,7 +137,7 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) { return pTSBuf; } -void* tsBufDestory(STSBuf* pTSBuf) { +void* tsBufDestroy(STSBuf* pTSBuf) { if (pTSBuf == NULL) { return NULL; } @@ -920,13 +920,13 @@ static STSBuf* allocResForTSBuf(STSBuf* pTSBuf) { pTSBuf->numOfAlloc = INITIAL_VNODEINFO_SIZE; pTSBuf->pData = calloc(pTSBuf->numOfAlloc, sizeof(STSVnodeBlockInfoEx)); if (pTSBuf->pData == NULL) { - tsBufDestory(pTSBuf); + tsBufDestroy(pTSBuf); return NULL; } pTSBuf->tsData.rawBuf = malloc(MEM_BUF_SIZE); if (pTSBuf->tsData.rawBuf == NULL) { - tsBufDestory(pTSBuf); + tsBufDestroy(pTSBuf); return NULL; } @@ -936,13 +936,13 @@ static STSBuf* allocResForTSBuf(STSBuf* pTSBuf) { pTSBuf->assistBuf = malloc(MEM_BUF_SIZE); if (pTSBuf->assistBuf == NULL) { - tsBufDestory(pTSBuf); + tsBufDestroy(pTSBuf); return NULL; } pTSBuf->block.payload = malloc(MEM_BUF_SIZE); if (pTSBuf->block.payload == NULL) { - tsBufDestory(pTSBuf); + tsBufDestroy(pTSBuf); return NULL; } From 398c3e79884861debef9f5c10a8a7f7fc7414c48 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 11 Jul 2020 14:28:12 +0800 Subject: [PATCH 37/38] [td-225] refactor codes. --- src/common/src/tglobal.c | 2 +- src/common/src/tname.c | 2 +- src/query/inc/qtsbuf.h | 2 +- src/util/src/ttime.c | 20 ++++++++++++-------- 4 files changed, 15 insertions(+), 11 deletions(-) diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index 684fb71af9..fd5c6d8c25 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -560,7 +560,7 @@ static void doInitGlobalConfig() { cfg.ptr = &tsMinIntervalTime; cfg.valType = TAOS_CFG_VTYPE_INT32; cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW; - cfg.minValue = 10; + cfg.minValue = 1; cfg.maxValue = 1000000; cfg.ptrLength = 0; cfg.unitType = TAOS_CFG_UTYPE_MS; diff --git a/src/common/src/tname.c b/src/common/src/tname.c index 8b85ecfbc7..15ec68fc66 100644 --- a/src/common/src/tname.c +++ b/src/common/src/tname.c @@ -82,7 +82,7 @@ int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, in } int64_t start = ((startTime - intervalTime) / slidingTime + 1) * slidingTime; - if (!(timeUnit == 'a' || timeUnit == 'm' || timeUnit == 's' || timeUnit == 'h')) { + if (!(timeUnit == 'u' || timeUnit == 'a' || timeUnit == 'm' || timeUnit == 's' || timeUnit == 'h')) { /* * here we revised the start time of day according to the local time zone, * but in case of DST, the start time of one day need to be dynamically decided. diff --git a/src/query/inc/qtsbuf.h b/src/query/inc/qtsbuf.h index e437e1c4e5..59b224e096 100644 --- a/src/query/inc/qtsbuf.h +++ b/src/query/inc/qtsbuf.h @@ -107,7 +107,7 @@ STSBuf* tsBufCreate(bool autoDelete, int32_t order); STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete); STSBuf* tsBufCreateFromCompBlocks(const char* pData, int32_t numOfBlocks, int32_t len, int32_t tsOrder); -void* tsBufDestory(STSBuf* pTSBuf); +void* tsBufDestroy(STSBuf* pTSBuf); void tsBufAppend(STSBuf* pTSBuf, int32_t vnodeId, int64_t tag, const char* pData, int32_t len); int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf, int32_t vnodeIdx); diff --git a/src/util/src/ttime.c b/src/util/src/ttime.c index 6f67c4a136..018c0d640b 100644 --- a/src/util/src/ttime.c +++ b/src/util/src/ttime.c @@ -317,29 +317,34 @@ int32_t parseLocaltimeWithDst(char* timestr, int64_t* time, int32_t timePrec) { static int32_t getTimestampInUsFromStrImpl(int64_t val, char unit, int64_t* result) { *result = val; + int64_t factor = 1000L; + switch (unit) { case 's': - (*result) *= MILLISECOND_PER_SECOND; + (*result) *= MILLISECOND_PER_SECOND*factor; break; case 'm': - (*result) *= MILLISECOND_PER_MINUTE; + (*result) *= MILLISECOND_PER_MINUTE*factor; break; case 'h': - (*result) *= MILLISECOND_PER_HOUR; + (*result) *= MILLISECOND_PER_HOUR*factor; break; case 'd': - (*result) *= MILLISECOND_PER_DAY; + (*result) *= MILLISECOND_PER_DAY*factor; break; case 'w': - (*result) *= MILLISECOND_PER_WEEK; + (*result) *= MILLISECOND_PER_WEEK*factor; break; case 'n': - (*result) *= MILLISECOND_PER_MONTH; + (*result) *= MILLISECOND_PER_MONTH*factor; break; case 'y': - (*result) *= MILLISECOND_PER_YEAR; + (*result) *= MILLISECOND_PER_YEAR*factor; break; case 'a': + (*result) *= factor; + break; + case 'u': break; default: { ; @@ -348,7 +353,6 @@ static int32_t getTimestampInUsFromStrImpl(int64_t val, char unit, int64_t* resu } /* get the value in microsecond */ - (*result) *= 1000L; return 0; } From f722d06abeb04d730f852a9860099dcafaf18d5f Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 11 Jul 2020 14:28:37 +0800 Subject: [PATCH 38/38] [td-225] refactor codes. --- src/query/tests/tsBufTest.cpp | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/query/tests/tsBufTest.cpp b/src/query/tests/tsBufTest.cpp index 1d2d94f3c0..f813261957 100644 --- a/src/query/tests/tsBufTest.cpp +++ b/src/query/tests/tsBufTest.cpp @@ -47,7 +47,7 @@ void simpleTest() { EXPECT_EQ(pTSBuf->tsData.len, 0); EXPECT_EQ(pTSBuf->block.numOfElem, num); - tsBufDestory(pTSBuf); + tsBufDestroy(pTSBuf); } // one large list of ts, the ts list need to be split into several small blocks @@ -71,7 +71,7 @@ void largeTSTest() { EXPECT_EQ(pTSBuf->tsData.len, 0); EXPECT_EQ(pTSBuf->block.numOfElem, num); - tsBufDestory(pTSBuf); + tsBufDestroy(pTSBuf); } void multiTagsTest() { @@ -101,7 +101,7 @@ void multiTagsTest() { EXPECT_EQ(pTSBuf->tsData.len, 0); EXPECT_EQ(pTSBuf->block.numOfElem, num); - tsBufDestory(pTSBuf); + tsBufDestroy(pTSBuf); } void multiVnodeTagsTest() { @@ -139,7 +139,7 @@ void multiVnodeTagsTest() { EXPECT_EQ(pTSBuf->tsData.len, 0); EXPECT_EQ(pTSBuf->block.numOfElem, num); - tsBufDestory(pTSBuf); + tsBufDestroy(pTSBuf); } void loadDataTest() { @@ -386,8 +386,8 @@ void mergeDiffVnodeBufferTest() { tsBufDisplay(pTSBuf1); - tsBufDestory(pTSBuf2); - tsBufDestory(pTSBuf1); + tsBufDestroy(pTSBuf2); + tsBufDestroy(pTSBuf1); } void mergeIdenticalVnodeBufferTest() { @@ -432,8 +432,8 @@ void mergeIdenticalVnodeBufferTest() { printf("%d-%" PRIu64 "-%" PRIu64 "\n", elem.vnode, elem.tag, elem.ts); } - tsBufDestory(pTSBuf1); - tsBufDestory(pTSBuf2); + tsBufDestroy(pTSBuf1); + tsBufDestroy(pTSBuf2); } } // namespace