From df5d62ed41b4dd0352cba813a9e193b33aae53b4 Mon Sep 17 00:00:00 2001 From: zhaoyanggh Date: Fri, 18 Jun 2021 13:26:30 +0800 Subject: [PATCH 01/33] add test case for jdbc nano precision --- .../TimestampPrecisionInNanoInJniTest.java | 618 ++++++++++++++++++ 1 file changed, 618 insertions(+) create mode 100644 src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimestampPrecisionInNanoInJniTest.java diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimestampPrecisionInNanoInJniTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimestampPrecisionInNanoInJniTest.java new file mode 100644 index 0000000000..d733209f25 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimestampPrecisionInNanoInJniTest.java @@ -0,0 +1,618 @@ +package com.taosdata.jdbc.cases; + + +import com.taosdata.jdbc.TSDBDriver; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.After; +import org.junit.Test; + +import java.sql.*; +import java.util.Properties; +import java.text.Format; +import java.text.SimpleDateFormat; + +public class TimestampPrecisionInNanoInJniTest { + + private static final String host = "127.0.0.1"; + private static final String ns_timestamp_db = "ns_precision_test"; + private static final long timestamp1 = System.currentTimeMillis(); + private static final long timestamp2 = timestamp1 * 1000_000 + 123455; + private static final long timestamp3 = (timestamp1 + 10) * 1000_000 + 123456; + private static final Format format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS"); + private static final String date1 = format.format(new Date(timestamp1)); + private static final String date4 = format.format(new Date(timestamp1 + 10l)); + private static final String date2 = date1 + "123455"; + private static final String date3 = date4 + "123456"; + + + private static Connection conn; + + @BeforeClass + public static void beforeClass() throws SQLException { + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + + String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"; + conn = DriverManager.getConnection(url, properties); + + Statement stmt = conn.createStatement(); + stmt.execute("drop database if exists " + ns_timestamp_db); + stmt.execute("create database if not exists " + ns_timestamp_db + " precision 'ns'"); + stmt.execute("create table " + ns_timestamp_db + ".weather(ts timestamp, ts2 timestamp, f1 int)"); + stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(\"" + date3 + "\", \"" + date3 + "\", 128)"); + stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(" + timestamp2 + "," + timestamp2 + ", 127)"); + stmt.close(); + } + + @After + public void afterEach() throws SQLException { + Statement stmt = conn.createStatement(); + stmt.execute("drop database if exists " + ns_timestamp_db); + stmt.execute("create database if not exists " + ns_timestamp_db + " precision 'ns'"); + stmt.execute("create table " + ns_timestamp_db + ".weather(ts timestamp, ts2 timestamp, f1 int)"); + stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(\"" + date3 + "\", \"" + date3 + "\", 128)"); + stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(" + timestamp2 + "," + timestamp2 + ", 127)"); + stmt.close(); + } + + @AfterClass + public static void afterClass() { + try { + if (conn != null) + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + private void checkCount(long count, ResultSet rs) throws SQLException { + rs.next(); + long test_count = rs.getLong(1); + Assert.assertEquals(count, test_count); + } + + private void checkTime(long ts, ResultSet rs) throws SQLException { + rs.next(); + int nanos = rs.getTimestamp(1).getNanos(); + Assert.assertEquals(ts % 1000_000_000l, nanos); + long test_ts = rs.getLong(1); + Assert.assertEquals(ts / 1000_000l, test_ts); + } + + @Test + public void canInsertTimestampAndQueryByEqualToInDateTypeInBothFirstAndSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + date3 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts = '" + date3 + "'"); + checkTime(timestamp3, rs); + rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + date3 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 = '" + date3 + "'"); + checkTime(timestamp3, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canImportTimestampAndQueryByEqualToInDateTypeInBothFirstAndSecondCol() { + try (Statement stmt = conn.createStatement()) { + stmt.executeUpdate("import into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(\"" + date1 + "123123\", \"" + date1 + "123123\", 127)"); + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + date1 + "123123'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts = '" + date1 + "123123'"); + checkTime(timestamp1 * 1000_000l + 123123l, rs); + rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + date1 + "123123'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 = '" + date1 + "123123'"); + checkTime(timestamp1 * 1000_000l + 123123l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canInsertTimestampAndQueryByEqualToInNumberTypeInBothFirstAndSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + timestamp2 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts = '" + timestamp2 + "'"); + checkTime(timestamp2, rs); + rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + timestamp2 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 = '" + timestamp2 + "'"); + checkTime(timestamp2, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canImportTimestampAndQueryByEqualToInNumberTypeInBothFirstAndSecondCol() { + try (Statement stmt = conn.createStatement()) { + long timestamp4 = timestamp1 * 1000_000 + 123123; + stmt.executeUpdate("import into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(" + timestamp4 + ", " + timestamp4 + ", 127)"); + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + timestamp4 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts = '" + timestamp4 + "'"); + checkTime(timestamp4, rs); + rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + timestamp4 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 = '" + timestamp4 + "'"); + checkTime(timestamp4, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canSelectLastRowFromWeatherForFirstCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select last(ts) from " + ns_timestamp_db + ".weather"); + // int nanos = rs.getTimestamp(1).getNanos(); + // Assert.assertEquals(timestamp3 % 1000_000l * 1000, nanos); + // java.lang.AssertionError: expected:<123456000> but was:<468123456> + checkTime(timestamp3, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canSelectLastRowFromWeatherForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select last(ts2) from " + ns_timestamp_db + ".weather"); + checkTime(timestamp3, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canSelectFirstRowFromWeatherForFirstCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select first(ts) from " + ns_timestamp_db + ".weather"); + checkTime(timestamp2, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canSelectFirstRowFromWeatherForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select first(ts2) from " + ns_timestamp_db + ".weather"); + checkTime(timestamp2, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLargerThanInDateTypeForFirstCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts > '" + date2 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts > '" + date2 + "'"); + checkTime(timestamp3, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLargerThanInDateTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 > '" + date2 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 > '" + date2 + "'"); + checkTime(timestamp3, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLargerThanInNumberTypeForFirstCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts > '" + timestamp2 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts > '" + timestamp2 + "'"); + checkTime(timestamp3, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLargerThanInNumberTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 > '" + timestamp2 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 > '" + timestamp2 + "'"); + checkTime(timestamp3, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLargerThanOrEqualToInDateTypeForFirstCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts >= '" + date2 + "'"); + checkCount(2l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLargerThanOrEqualToInDateTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 >= '" + date2 + "'"); + checkCount(2l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLargerThanOrEqualToInNumberTypeForFirstCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts >= '" + timestamp2 + "'"); + checkCount(2l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLargerThanOrEqualToInNumberTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 >= '" + timestamp2 + "'"); + checkCount(2l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLessThanInDateTypeForFirstCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts < '" + date3 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts < '" + date3 + "'"); + checkTime(timestamp2, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLessThanInDateTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 < '" + date3 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 < '" + date3 + "'"); + checkTime(timestamp2, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLessThanInNumberTypeForFirstCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts < '" + timestamp3 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts < '" + timestamp3 + "'"); + checkTime(timestamp2, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLessThanInNumberTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 < '" + timestamp3 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 < '" + timestamp3 + "'"); + checkTime(timestamp2, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLessThanOrEqualToInDateTypeForFirstCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts <= '" + date3 + "'"); + checkCount(2l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLessThanOrEqualToInDateTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <= '" + date3 + "'"); + checkCount(2l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLessThanOrEqualToInNumberTypeForFirstCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts <= '" + timestamp3 + "'"); + checkCount(2l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLessThanOrEqualToInNumberTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <= '" + timestamp3 + "'"); + checkCount(2l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryBetweenAndInDateTypeForFirstCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts <= '" + date3 + "' AND ts > '" + date2 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts <= '" + date3 + "' AND ts > '" + date2 + "'"); + checkTime(timestamp3, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryBetweenAndInDateTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <= '" + date3 + "' AND ts2 > '" + date2 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 <= '" + date3 + "' AND ts2 > '" + date2 + "'"); + checkTime(timestamp3, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryBetweenAndInNumberTypeForFirstCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts <= '" + timestamp3 + "' AND ts > '" + timestamp2 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts <= '" + timestamp3 + "' AND ts > '" + timestamp2 + "'"); + checkTime(timestamp3, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryBetweenAndInNumberTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <= '" + timestamp3 + "' AND ts2 > '" + timestamp2 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 <= '" + timestamp3 + "' AND ts2 > '" + timestamp2 + "'"); + checkTime(timestamp3, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + // @Test + // public void canQueryNotEqualToInDateTypeForFirstCol() { + // try (Statement stmt = conn.createStatement()) { + // ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts <> '" + date3 + "'"); + // checkCount(1l, rs); + // rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts <> '" + date3 + "'"); + // checkTime(timestamp2, rs); + // } catch (SQLException e) { + // e.printStackTrace(); + // } + // } + + @Test + public void canQueryNotEqualToInDateTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <> '" + date3 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 <> '" + date3 + "'"); + checkTime(timestamp2, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + // @Test + // public void canQueryNotEqualToInNumberTypeForFirstCol() { + // try (Statement stmt = conn.createStatement()) { + // ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts <> '" + timestamp3 + "'"); + // checkCount(1l, rs); + // rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts <> '" + timestamp3 + "'"); + // checkTime(timestamp2, rs); + // } catch (SQLException e) { + // e.printStackTrace(); + // } + // } + + @Test + public void canQueryNotEqualToInNumberTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <> '" + timestamp3 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 <> '" + timestamp3 + "'"); + checkTime(timestamp2, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + // @Test + // public void canQueryNotEqualInDateTypeForFirstCol() { + // try (Statement stmt = conn.createStatement()) { + // ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts != '" + date3 + "'"); + // checkCount(1l, rs); + // rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts != '" + date3 + "'"); + // checkTime(timestamp2, rs); + // } catch (SQLException e) { + // e.printStackTrace(); + // } + // } + + @Test + public void canQueryNotEqualInDateTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 != '" + date3 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 != '" + date3 + "'"); + checkTime(timestamp2, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + // @Test + // public void canQueryNotEqualInNumberTypeForFirstCol() { + // try (Statement stmt = conn.createStatement()) { + // ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts != '" + timestamp3 + "'"); + // checkCount(1l, rs); + // rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts != '" + timestamp3 + "'"); + // checkTime(timestamp2, rs); + // } catch (SQLException e) { + // e.printStackTrace(); + // } + // } + + @Test + public void canQueryNotEqualInNumberTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 != '" + timestamp3 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 != '" + timestamp3 + "'"); + checkTime(timestamp2, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canInsertTimestampWithNowAndNsOffsetInBothFirstAndSecondCol(){ + try (Statement stmt = conn.createStatement()) { + stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(now + 1000b, now - 1000b, 128)"); + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather"); + checkCount(3l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canIntervalAndSlidingAcceptNsUnitForFirstCol(){ + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select sum(f1) from " + ns_timestamp_db + ".weather where ts >= '" + date2 + "' and ts <= '" + date3 + "' interval(10000000b) sliding(10000000b)"); + rs.next(); + long sum = rs.getLong(2); + Assert.assertEquals(127l, sum); + rs.next(); + sum = rs.getLong(2); + Assert.assertEquals(128l, sum); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canIntervalAndSlidingAcceptNsUnitForSecondCol(){ + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select sum(f1) from " + ns_timestamp_db + ".weather where ts2 >= '" + date2 + "' and ts <= '" + date3 + "' interval(10000000b) sliding(10000000b)"); + rs.next(); + long sum = rs.getLong(2); + Assert.assertEquals(127l, sum); + rs.next(); + sum = rs.getLong(2); + Assert.assertEquals(128l, sum); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void testDataOutOfRangeExceptionForFirstCol() { + try (Statement stmt = conn.createStatement()) { + stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(123456789012345678, 1234567890123456789, 127)"); + } catch (SQLException e) { + Assert.assertEquals("TDengine ERROR (8000060b): Timestamp data out of range", e.getMessage()); + } + } + + @Test + public void testDataOutOfRangeExceptionForSecondCol() { + try (Statement stmt = conn.createStatement()) { + stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(1234567890123456789, 123456789012345678, 127)"); + } catch (SQLException e) { + Assert.assertEquals("TDengine ERROR (8000060b): Timestamp data out of range", e.getMessage()); + } + } + + @Test + public void willAutomaticallyFillToNsUnitWithZerosForFirstCol() { + try (Statement stmt = conn.createStatement()) { + stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values('" + date1 + "', '" + date1 + "', 127)"); + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + date1 + "000000'"); + checkCount(1l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void willAutomaticallyFillToNsUnitWithZerosForSecondCol() { + try (Statement stmt = conn.createStatement()) { + stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values('" + date1 + "', '" + date1 + "', 127)"); + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + date1 + "000000'"); + checkCount(1l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void willAutomaticallyDropDigitExceedNsDigitNumberForFirstCol() { + try (Statement stmt = conn.createStatement()) { + stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values('" + date1 + "999999999', '" + date1 + "999999999', 127)"); + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + date1 + "999999'"); + checkCount(1l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void willAutomaticallyDropDigitExceedNsDigitNumberForSecondCol() { + try (Statement stmt = conn.createStatement()) { + stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values('" + date1 + "999999999', '" + date1 + "999999999', 127)"); + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + date1 + "999999'"); + checkCount(1l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } +} From 5b2d908cb27586af7c2620d1319dcc04392d87f1 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sat, 19 Jun 2021 09:16:35 +0800 Subject: [PATCH 02/33] [TD-4734] session_window and state window support main query --- src/client/src/tscSQLParser.c | 12 +++++++++++- src/query/src/qExecutor.c | 10 ++++++++-- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 9532d1e202..93d94c8711 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -7774,8 +7774,15 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf return code; } } + + int32_t timeWindowQuery = + (TPARSER_HAS_TOKEN(pSqlNode->interval.interval) || TPARSER_HAS_TOKEN(pSqlNode->sessionVal.gap)); - if (validateSelectNodeList(pCmd, pQueryInfo, pSqlNode->pSelNodeList, false, false, false) != TSDB_CODE_SUCCESS) { + if (validateSelectNodeList(pCmd, pQueryInfo, pSqlNode->pSelNodeList, false, false, timeWindowQuery) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + // parse the window_state + if (validateStateWindowNode(pCmd, pQueryInfo, pSqlNode, false) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; } @@ -7815,6 +7822,9 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf if (validateIntervalNode(pSql, pQueryInfo, pSqlNode) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; } else { + if (validateSessionNode(pCmd, pQueryInfo, pSqlNode) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } if (isTimeWindowQuery(pQueryInfo) || pQueryInfo->sessionWindow.gap > 0) { // check if the first column of the nest query result is timestamp column SColumn* pCol = taosArrayGetP(pQueryInfo->colList, 0); diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 7b106c178d..f4c0a394d6 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -1751,7 +1751,10 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf case OP_SessionWindow: { pRuntimeEnv->proot = createSWindowOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); - setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream[0]->info, pRuntimeEnv->proot); + int32_t opType = pRuntimeEnv->proot->upstream[0]->operatorType; + if (opType != OP_DummyInput) { + setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream[0]->info, pRuntimeEnv->proot); + } break; } case OP_MultiTableAggregate: { @@ -1787,7 +1790,10 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf } case OP_StateWindow: { pRuntimeEnv->proot = createStatewindowOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); - setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream[0]->info, pRuntimeEnv->proot); + int32_t opType = pRuntimeEnv->proot->upstream[0]->operatorType; + if (opType != OP_DummyInput) { + setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream[0]->info, pRuntimeEnv->proot); + } break; } From 998c5f18421a2cb5fd31c3b7e1187c438864fb56 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Sat, 19 Jun 2021 16:09:41 +0800 Subject: [PATCH 03/33] [TD-4735]:support select last_row from subquery --- src/client/src/tscSQLParser.c | 11 +++++++++-- tests/script/general/parser/lastrow.sim | 15 ++++++++++++++- 2 files changed, 23 insertions(+), 3 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 9532d1e202..d7fbdd3ff8 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -2125,7 +2125,10 @@ void setResultColName(char* name, tSqlExprItem* pItem, int32_t functionId, SStrT } static void updateLastScanOrderIfNeeded(SQueryInfo* pQueryInfo) { - if (pQueryInfo->sessionWindow.gap > 0 || tscGroupbyColumn(pQueryInfo)) { + if (pQueryInfo->sessionWindow.gap > 0 || + pQueryInfo->stateWindow || + taosArrayGetSize(pQueryInfo->pUpstream) > 0 || + tscGroupbyColumn(pQueryInfo)) { size_t numOfExpr = tscNumOfExprs(pQueryInfo); for (int32_t i = 0; i < numOfExpr; ++i) { SExprInfo* pExpr = tscExprGet(pQueryInfo, i); @@ -2385,7 +2388,9 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col // NOTE: has time range condition or normal column filter condition, the last_row query will be transferred to last query SConvertFunc cvtFunc = {.originFuncId = functionId, .execFuncId = functionId}; - if (functionId == TSDB_FUNC_LAST_ROW && ((!TSWINDOW_IS_EQUAL(pQueryInfo->window, TSWINDOW_INITIALIZER)) || (hasNormalColumnFilter(pQueryInfo)))) { + if (functionId == TSDB_FUNC_LAST_ROW && ((!TSWINDOW_IS_EQUAL(pQueryInfo->window, TSWINDOW_INITIALIZER)) || + (hasNormalColumnFilter(pQueryInfo)) || + taosArrayGetSize(pQueryInfo->pUpstream)>0)) { cvtFunc.execFuncId = TSDB_FUNC_LAST; } @@ -7836,6 +7841,8 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf if ((code = doFunctionsCompatibleCheck(pCmd, pQueryInfo, tscGetErrorMsgPayload(pCmd))) != TSDB_CODE_SUCCESS) { return code; } + + updateLastScanOrderIfNeeded(pQueryInfo); } else { pQueryInfo->command = TSDB_SQL_SELECT; diff --git a/tests/script/general/parser/lastrow.sim b/tests/script/general/parser/lastrow.sim index 2b8f294d5d..fea322ec16 100644 --- a/tests/script/general/parser/lastrow.sim +++ b/tests/script/general/parser/lastrow.sim @@ -70,4 +70,17 @@ sleep 100 run general/parser/lastrow_query.sim -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +print =================== last_row + nested query +sql use $db +sql create table lr_nested(ts timestamp, f int) +sql insert into lr_nested values(now, 1) +sql insert into lr_nested values(now+1s, null) +sql select last_row(*) from (select * from lr_nested) +if $rows != 1 then + return -1 +endi +if $data01 != NULL then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT From 5e82ff44cb423198a1647b519f6043bcb1a588eb Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Sat, 19 Jun 2021 16:29:40 +0800 Subject: [PATCH 04/33] [TD-4783]:support timestamp filter expression on master query --- src/client/src/tscSQLParser.c | 11 ++++++----- tests/script/general/parser/where.sim | 15 ++++++++++++++- 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 9532d1e202..775b5f9022 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -4798,6 +4798,12 @@ int32_t validateWhereNode(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSq if ((ret = getColumnQueryCondInfo(&pSql->cmd, pQueryInfo, condExpr.pColumnCond, TK_AND)) != TSDB_CODE_SUCCESS) { goto PARSE_WHERE_EXIT; } + + if (taosArrayGetSize(pQueryInfo->pUpstream) > 0 ) { + if ((ret = getColumnQueryCondInfo(&pSql->cmd, pQueryInfo, condExpr.pTimewindow, TK_AND)) != TSDB_CODE_SUCCESS) { + goto PARSE_WHERE_EXIT; + } + } // 6. join condition if ((ret = getJoinCondInfo(&pSql->cmd, pQueryInfo, condExpr.pJoinExpr)) != TSDB_CODE_SUCCESS) { @@ -7804,11 +7810,6 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf if (validateWhereNode(pQueryInfo, &pSqlNode->pWhere, pSql) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; } - - if (pTableMeta->tableInfo.precision == TSDB_TIME_PRECISION_MILLI) { - pQueryInfo->window.skey = pQueryInfo->window.skey / 1000; - pQueryInfo->window.ekey = pQueryInfo->window.ekey / 1000; - } } // validate the interval info diff --git a/tests/script/general/parser/where.sim b/tests/script/general/parser/where.sim index 6dfea3d2e7..fbd59aafba 100644 --- a/tests/script/general/parser/where.sim +++ b/tests/script/general/parser/where.sim @@ -352,5 +352,18 @@ if $rows != 0 then return -1 endi - +print ==========================> td-4783 +sql create table where_ts(ts timestamp, f int) +sql insert into where_ts values('2021-06-19 16:22:00', 1); +sql insert into where_ts values('2021-06-19 16:23:00', 2); +sql insert into where_ts values('2021-06-19 16:24:00', 3); +sql insert into where_ts values('2021-06-19 16:25:00', 1); +sql select * from (select * from where_ts) where ts<'2021-06-19 16:25:00' and ts>'2021-06-19 16:22:00' +if $row != 2 then + return -1 +endi +print $data00, $data01 +if $data01 != 2 then + return -1 +endi system sh/exec.sh -n dnode1 -s stop -x SIGINT From 65eeca1964fda463446eef892cbe1efd81206255 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sun, 20 Jun 2021 21:51:15 +0800 Subject: [PATCH 05/33] [TD-4734] session_window and state window support main query --- src/client/src/tscSQLParser.c | 11 +++++-- tests/pytest/query/querySession.py | 49 ++++++++++++++++++++++++++++++ 2 files changed, 57 insertions(+), 3 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 93d94c8711..3a8e2407c0 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -7792,6 +7792,12 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf int32_t f = pExpr->base.functionId; if (f == TSDB_FUNC_STDDEV || f == TSDB_FUNC_PERCT) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); + } + + if ((timeWindowQuery || pQueryInfo->stateWindow) && f == TSDB_FUNC_LAST) { + pExpr->base.numOfParams = 1; + pExpr->base.param[0].i64 = TSDB_ORDER_ASC; + pExpr->base.param[0].nType = TSDB_DATA_TYPE_INT; } } @@ -7825,7 +7831,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf if (validateSessionNode(pCmd, pQueryInfo, pSqlNode) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; } - if (isTimeWindowQuery(pQueryInfo) || pQueryInfo->sessionWindow.gap > 0) { + if (isTimeWindowQuery(pQueryInfo)) { // check if the first column of the nest query result is timestamp column SColumn* pCol = taosArrayGetP(pQueryInfo->colList, 0); if (pCol->info.type != TSDB_DATA_TYPE_TIMESTAMP) { @@ -7934,8 +7940,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf return TSDB_CODE_TSC_INVALID_OPERATION; } - if ((isTimeWindowQuery(pQueryInfo) || pQueryInfo->sessionWindow.gap > 0) && - (validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS)) { + if (isTimeWindowQuery(pQueryInfo) && (validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS)) { return TSDB_CODE_TSC_INVALID_OPERATION; } diff --git a/tests/pytest/query/querySession.py b/tests/pytest/query/querySession.py index 620f755bcb..216ff68b71 100644 --- a/tests/pytest/query/querySession.py +++ b/tests/pytest/query/querySession.py @@ -51,38 +51,73 @@ class TDTestCase: tdSql.checkRows(15) tdSql.checkData(0, 1, 2) + # session(ts,5a) main query + tdSql.query("select count(*) from (select * from dev_001) session(ts,5a)") + tdSql.checkRows(15) + tdSql.checkData(0, 1, 2) + # session(ts,1s) tdSql.query("select count(*) from dev_001 session(ts,1s)") tdSql.checkRows(12) tdSql.checkData(0, 1, 5) + # session(ts,1s) main query + tdSql.query("select count(*) from (select * from dev_001) session(ts,1s)") + tdSql.checkRows(12) + tdSql.checkData(0, 1, 5) + tdSql.query("select count(*) from dev_001 session(ts,1000a)") tdSql.checkRows(12) tdSql.checkData(0, 1, 5) + tdSql.query("select count(*) from (select * from dev_001) session(ts,1000a)") + tdSql.checkRows(12) + tdSql.checkData(0, 1, 5) + # session(ts,1m) tdSql.query("select count(*) from dev_001 session(ts,1m)") tdSql.checkRows(9) tdSql.checkData(0, 1, 8) + # session(ts,1m) + tdSql.query("select count(*) from (select * from dev_001) session(ts,1m)") + tdSql.checkRows(9) + tdSql.checkData(0, 1, 8) + # session(ts,1h) tdSql.query("select count(*) from dev_001 session(ts,1h)") tdSql.checkRows(6) tdSql.checkData(0, 1, 11) + # session(ts,1h) + tdSql.query("select count(*) from (select * from dev_001) session(ts,1h)") + tdSql.checkRows(6) + tdSql.checkData(0, 1, 11) + # session(ts,1d) tdSql.query("select count(*) from dev_001 session(ts,1d)") tdSql.checkRows(4) tdSql.checkData(0, 1, 13) + # session(ts,1d) + tdSql.query("select count(*) from (select * from dev_001) session(ts,1d)") + tdSql.checkRows(4) + tdSql.checkData(0, 1, 13) + # session(ts,1w) tdSql.query("select count(*) from dev_001 session(ts,1w)") tdSql.checkRows(2) tdSql.checkData(0, 1, 15) + # session(ts,1w) + tdSql.query("select count(*) from (select * from dev_001) session(ts,1w)") + tdSql.checkRows(2) + tdSql.checkData(0, 1, 15) + # session with where tdSql.query("select count(*),first(tagtype),last(tagtype),avg(tagtype),sum(tagtype),min(tagtype),max(tagtype),leastsquares(tagtype, 1, 1),spread(tagtype),stddev(tagtype),percentile(tagtype,0) from dev_001 where ts <'2020-05-20 0:0:0' session(ts,1d)") + tdSql.checkRows(2) tdSql.checkData(0, 1, 13) tdSql.checkData(0, 2, 1) @@ -97,6 +132,20 @@ class TDTestCase: tdSql.checkData(0, 11, 1) tdSql.checkData(1, 11, 14) + # session with where main + + tdSql.query("select count(*),first(tagtype),last(tagtype),avg(tagtype),sum(tagtype),min(tagtype),max(tagtype),leastsquares(tagtype, 1, 1) from (select * from dev_001 where ts <'2020-05-20 0:0:0') session(ts,1d)") + + tdSql.checkRows(2) + tdSql.checkData(0, 1, 13) + tdSql.checkData(0, 2, 1) + tdSql.checkData(0, 3, 13) + tdSql.checkData(0, 4, 7) + tdSql.checkData(0, 5, 91) + tdSql.checkData(0, 6, 1) + tdSql.checkData(0, 7, 13) + tdSql.checkData(0, 8, '{slop:1.000000, intercept:0.000000}') + # tdsql err tdSql.error("select * from dev_001 session(ts,1w)") tdSql.error("select count(*) from st session(ts,1w)") From c1cd4079db810ccb700eddcaeb2ed31d27b1b826 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 21 Jun 2021 00:24:46 +0800 Subject: [PATCH 06/33] [TD-4734] session_window and state window support main query --- src/client/src/tscSQLParser.c | 1 - 1 file changed, 1 deletion(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 3a8e2407c0..b4115e5842 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -7785,7 +7785,6 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf if (validateStateWindowNode(pCmd, pQueryInfo, pSqlNode, false) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; } - // todo NOT support yet for(int32_t i = 0; i < tscNumOfExprs(pQueryInfo); ++i) { SExprInfo* pExpr = tscExprGet(pQueryInfo, i); From 40cb2f55bb9e53353e24ac1e12855e95d8eb6ced Mon Sep 17 00:00:00 2001 From: zhaoyanggh Date: Mon, 21 Jun 2021 10:13:27 +0800 Subject: [PATCH 07/33] change performance test tool to optimize write test --- tests/comparisonTest/tdengine/tdengineTest.c | 84 ++++++++++++++++++-- 1 file changed, 78 insertions(+), 6 deletions(-) diff --git a/tests/comparisonTest/tdengine/tdengineTest.c b/tests/comparisonTest/tdengine/tdengineTest.c index d1cf3a1f98..0de419e036 100644 --- a/tests/comparisonTest/tdengine/tdengineTest.c +++ b/tests/comparisonTest/tdengine/tdengineTest.c @@ -181,8 +181,8 @@ void writeDataImp(void *param) { if (lastMachineid != machineid) { lastMachineid = machineid; - sqlLen += sprintf(sql + sqlLen, " dev%d using devices tags(%d,'%s',%d) values", - machineid, machineid, machinename, machinegroup); + sqlLen += sprintf(sql + sqlLen, " dev%d values", + machineid); } sqlLen += sprintf(sql + sqlLen, "(%" PRId64 ",%d,%f)", timestamp, temperature, humidity); @@ -192,7 +192,8 @@ void writeDataImp(void *param) { result = taos_query(taos, sql); code = taos_errno(result); if (code != 0) { - printf("thread:%d error:%d reason:%s\n", pThread->threadId, code, taos_errstr(taos)); + printf("insert into dev%d values (%" PRId64 ",%d,%f)\n",machineid, timestamp, temperature, humidity); + printf("thread:%d error:%d reason:%s\n", pThread->threadId, code, taos_errstr(result)); } taos_free_result(result); @@ -210,6 +211,7 @@ void writeDataImp(void *param) { result = taos_query(taos, sql); code = taos_errno(result); if (code != 0) { + // printf("insert into dev%d using devices tags(%d,'%s',%d) values (%" PRId64 ",%d,%f)",machineid, machineid, machinename, machinegroup, timestamp, temperature, humidity); printf("thread:%d error:%d reason:%s\n", pThread->threadId, code, taos_errstr(taos)); } taos_free_result(result); @@ -246,7 +248,7 @@ void writeData() { taos_free_result(result); result = taos_query(taos, - "create table if not exists db.devices(ts timestamp, temperature int, humidity float) " + "create stable if not exists db.devices(ts timestamp, temperature int, humidity float) " "tags(devid int, devname binary(16), devgroup int)"); code = taos_errno(result); if (code != 0) { @@ -254,6 +256,77 @@ void writeData() { } taos_free_result(result); + //create tables before insert the data + result = taos_query(taos, "use db"); + code = taos_errno(result); + if (code != 0) { + taos_error(result, taos); + } + taos_free_result(result); + + char *sql = calloc(1, 8*1024*1024); + int sqlLen = 0; + int lastMachineid = 0; + int counter = 0; + int totalRecords = 0; + for (int i = 0; i < arguments.filesNum; i++) { + char fileName[300]; + sprintf(fileName, "%s/testdata%d.csv", arguments.dataDir, i); + + FILE *fp = fopen(fileName, "r"); + if (fp == NULL) { + printf("failed to open file %s\n", fileName); + exit(1); + } + printf("open file %s success\n", fileName); + + char *line = NULL; + size_t len = 0; + while (!feof(fp)) { + free(line); + line = NULL; + len = 0; + + getline(&line, &len, fp); + if (line == NULL) break; + + if (strlen(line) < 10) continue; + + int machineid; + char machinename[16]; + int machinegroup; + int64_t timestamp; + int temperature; + float humidity; + sscanf(line, "%d%s%d%" PRId64 "%d%f", &machineid, machinename, &machinegroup, ×tamp, &temperature, &humidity); + + if (counter == 0) { + sqlLen = sprintf(sql, "create table if not exists"); + } + + if (lastMachineid != machineid) { + lastMachineid = machineid; + sqlLen += sprintf(sql + sqlLen, " dev%d using devices tags(%d,'%s',%d)", machineid, machineid, machinename, machinegroup); + } + counter++; + + if (counter >= arguments.rowsPerRequest) { + result = taos_query(taos, sql); + code = taos_errno(result); + if (code != 0) { + printf("create table error:%d reason:%s\n", code, taos_errstr(result)); + } + taos_free_result(result); + + totalRecords += counter; + counter = 0; + lastMachineid = -1; + sqlLen = 0; + } + } + fclose(fp); + } + int64_t st = getTimeStampMs(); int a = arguments.filesNum / arguments.clients; @@ -379,5 +452,4 @@ void readData() { } free(threads); -} - +} \ No newline at end of file From 3dc8eca0148bf1783a3783121d6a54bf6f60d053 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 21 Jun 2021 10:18:05 +0800 Subject: [PATCH 08/33] [td-4754] --- src/client/inc/tscUtil.h | 3 +- src/client/src/tscSQLParser.c | 38 +- src/client/src/tscServer.c | 1 + src/client/src/tscSubquery.c | 17 +- src/client/src/tscUtil.c | 34 +- src/query/inc/qAggMain.h | 2 +- src/query/inc/qExecutor.h | 2 +- src/query/inc/sql.y | 2 +- src/query/src/qAggMain.c | 995 +--------------------- src/query/src/qExecutor.c | 180 ++-- src/query/src/sql.c | 50 +- tests/script/general/parser/groupby.sim | 134 +-- tests/script/general/parser/having.sim | 2 +- tests/script/general/parser/testSuite.sim | 1 - 14 files changed, 311 insertions(+), 1150 deletions(-) diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index 35f3b42811..950d81fb61 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -210,7 +210,8 @@ int32_t createProjectionExpr(SQueryInfo* pQueryInfo, STableMetaInfo* pTableMetaI void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, bool removeMeta); SColumn* tscColumnClone(const SColumn* src); -bool tscColumnExists(SArray* pColumnList, int32_t columnIndex, uint64_t uid); +void tscColumnCopy(SColumn* pDest, const SColumn* pSrc); +int32_t tscColumnExists(SArray* pColumnList, int32_t columnId, uint64_t uid); SColumn* tscColumnListInsert(SArray* pColumnList, int32_t columnIndex, uint64_t uid, SSchema* pSchema); void tscColumnListDestroy(SArray* pColList); void tscColumnListCopy(SArray* dst, const SArray* src, uint64_t tableUid); diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 0ab7c38186..cf94aaae35 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -1668,7 +1668,7 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32 // arithmetic expression always return result in the format of double float pExprInfo->base.resBytes = sizeof(double); - pExprInfo->base.interBytes = sizeof(double); + pExprInfo->base.interBytes = 0; pExprInfo->base.resType = TSDB_DATA_TYPE_DOUBLE; pExprInfo->base.functionId = TSDB_FUNC_ARITHM; @@ -1903,14 +1903,14 @@ SExprInfo* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t colIndex, int32_t tab index.columnIndex = colIndex; } - return tscExprAppend(pQueryInfo, functionId, &index, pSchema->type, pSchema->bytes, colId, pSchema->bytes, + return tscExprAppend(pQueryInfo, functionId, &index, pSchema->type, pSchema->bytes, colId, 0, (functionId == TSDB_FUNC_TAGPRJ)); } SExprInfo* tscAddFuncInSelectClause(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId, SColumnIndex* pIndex, SSchema* pColSchema, int16_t flag, int16_t colId) { SExprInfo* pExpr = tscExprInsert(pQueryInfo, outputColIndex, functionId, pIndex, pColSchema->type, - pColSchema->bytes, colId, pColSchema->bytes, TSDB_COL_IS_TAG(flag)); + pColSchema->bytes, colId, 0, TSDB_COL_IS_TAG(flag)); tstrncpy(pExpr->base.aliasName, pColSchema->name, sizeof(pExpr->base.aliasName)); tstrncpy(pExpr->base.token, pColSchema->name, sizeof(pExpr->base.token)); @@ -2065,7 +2065,7 @@ static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SS type = pSchema->type; bytes = pSchema->bytes; } - + SExprInfo* pExpr = tscExprAppend(pQueryInfo, functionID, pColIndex, type, bytes, getNewResColId(pCmd), bytes, false); tstrncpy(pExpr->base.aliasName, name, tListLen(pExpr->base.aliasName)); @@ -2134,6 +2134,17 @@ static void updateLastScanOrderIfNeeded(SQueryInfo* pQueryInfo) { } } +static UNUSED_FUNC void updateFunctionInterBuf(SQueryInfo* pQueryInfo, bool superTable) { + size_t numOfExpr = tscNumOfExprs(pQueryInfo); + for (int32_t i = 0; i < numOfExpr; ++i) { + SExprInfo* pExpr = tscExprGet(pQueryInfo, i); + + int32_t param = (int32_t)pExpr->base.param[0].i64; + getResultDataInfo(pExpr->base.colType, pExpr->base.colBytes, pExpr->base.functionId, param, &pExpr->base.resType, &pExpr->base.resBytes, + &pExpr->base.interBytes, 0, superTable); + } +} + int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t colIndex, tSqlExprItem* pItem, bool finalResult) { STableMetaInfo* pTableMetaInfo = NULL; int32_t functionId = pItem->pNode->functionId; @@ -2562,7 +2573,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col // set the first column ts for top/bottom query SColumnIndex index1 = {index.tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX}; pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS, &index1, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, getNewResColId(pCmd), - TSDB_KEYSIZE, false); + 0, false); tstrncpy(pExpr->base.aliasName, aAggs[TSDB_FUNC_TS].name, sizeof(pExpr->base.aliasName)); const int32_t TS_COLUMN_INDEX = PRIMARYKEY_TIMESTAMP_COL_INDEX; @@ -3077,15 +3088,10 @@ bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) return true; } - if (pQueryInfo->groupbyExpr.numOfGroupCols != 1) { + SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, 0); + if (pColIndex->colIndex != TSDB_TBNAME_COLUMN_INDEX) { invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); return true; - } else { - SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, 0); - if (pColIndex->colIndex != TSDB_TBNAME_COLUMN_INDEX) { - invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); - return true; - } } } else if (tscIsSessionWindowQuery(pQueryInfo)) { invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); @@ -3636,7 +3642,7 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; index.columnIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); - if (!tscColumnExists(pTableMetaInfo->tagColList, index.columnIndex, pTableMetaInfo->pTableMeta->id.uid)) { + if (tscColumnExists(pTableMetaInfo->tagColList, pTagSchema1->colId, pTableMetaInfo->pTableMeta->id.uid) < 0) { tscColumnListInsert(pTableMetaInfo->tagColList, index.columnIndex, pTableMeta->id.uid, pTagSchema1); if (taosArrayGetSize(pTableMetaInfo->tagColList) > 1) { @@ -3668,7 +3674,7 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; index.columnIndex = index.columnIndex - tscGetNumOfColumns(pTableMeta); - if (!tscColumnExists(pTableMetaInfo->tagColList, index.columnIndex, pTableMeta->id.uid)) { + if (tscColumnExists(pTableMetaInfo->tagColList, pTagSchema2->colId, pTableMeta->id.uid) < 0) { tscColumnListInsert(pTableMetaInfo->tagColList, index.columnIndex, pTableMeta->id.uid, pTagSchema2); if (taosArrayGetSize(pTableMetaInfo->tagColList) > 1) { @@ -7830,6 +7836,9 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf if ((code = doFunctionsCompatibleCheck(pCmd, pQueryInfo, tscGetErrorMsgPayload(pCmd))) != TSDB_CODE_SUCCESS) { return code; } + +// updateFunctionInterBuf(pQueryInfo, false); + } else { pQueryInfo->command = TSDB_SQL_SELECT; @@ -7958,6 +7967,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf updateLastScanOrderIfNeeded(pQueryInfo); tscFieldInfoUpdateOffset(pQueryInfo); +// updateFunctionInterBuf(pQueryInfo, isSTable); if ((code = validateFillNode(pCmd, pQueryInfo, pSqlNode)) != TSDB_CODE_SUCCESS) { return code; diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 8c5e99474d..c3cf63bd26 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -795,6 +795,7 @@ static int32_t serializeSqlExpr(SSqlExpr* pExpr, STableMetaInfo* pTableMetaInfo, pSqlExpr->colBytes = htons(pExpr->colBytes); pSqlExpr->resType = htons(pExpr->resType); pSqlExpr->resBytes = htons(pExpr->resBytes); + pSqlExpr->interBytes = htonl(pExpr->interBytes); pSqlExpr->functionId = htons(pExpr->functionId); pSqlExpr->numOfParams = htons(pExpr->numOfParams); pSqlExpr->resColId = htons(pExpr->resColId); diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 22a603b71e..c3df4773e1 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -103,13 +103,6 @@ bool subAndCheckDone(SSqlObj *pSql, SSqlObj *pParentSql, int idx) { pthread_mutex_lock(&subState->mutex); -// bool done = allSubqueryDone(pParentSql); -// if (done) { -// tscDebug("0x%"PRIx64" subquery:0x%"PRIx64",%d all subs already done", pParentSql->self, pSql->self, idx); -// pthread_mutex_unlock(&subState->mutex); -// return false; -// } - tscDebug("0x%"PRIx64" subquery:0x%"PRIx64", index:%d state set to 1", pParentSql->self, pSql->self, idx); subState->states[idx] = 1; @@ -2389,8 +2382,14 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) { SColumn *pCol = taosArrayGetP(pColList, i); if (pCol->info.flist.numOfFilters > 0) { // copy to the pNew->cmd.colList if it is filtered. - SColumn *p = tscColumnClone(pCol); - taosArrayPush(pNewQueryInfo->colList, &p); + int32_t index1 = tscColumnExists(pNewQueryInfo->colList, pCol->columnIndex, pCol->tableUid); + if (index1 >= 0) { + SColumn* x = taosArrayGetP(pNewQueryInfo->colList, index1); + tscColumnCopy(x, pCol); + } else { + SColumn *p = tscColumnClone(pCol); + taosArrayPush(pNewQueryInfo->colList, &p); + } } } diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 9d2c500a92..83ec886e19 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -2270,18 +2270,14 @@ int32_t tscExprCopyAll(SArray* dst, const SArray* src, bool deepcopy) { return 0; } -bool tscColumnExists(SArray* pColumnList, int32_t columnIndex, uint64_t uid) { - // ignore the tbname columnIndex to be inserted into source list - if (columnIndex < 0) { - return false; - } - +// ignore the tbname columnIndex to be inserted into source list +int32_t tscColumnExists(SArray* pColumnList, int32_t columnId, uint64_t uid) { size_t numOfCols = taosArrayGetSize(pColumnList); int32_t i = 0; while (i < numOfCols) { SColumn* pCol = taosArrayGetP(pColumnList, i); - if ((pCol->columnIndex != columnIndex) || (pCol->tableUid != uid)) { + if ((pCol->info.colId != columnId) || (pCol->tableUid != uid)) { ++i; continue; } else { @@ -2290,10 +2286,10 @@ bool tscColumnExists(SArray* pColumnList, int32_t columnIndex, uint64_t uid) { } if (i >= numOfCols || numOfCols == 0) { - return false; + return -1; } - return true; + return i; } void tscExprAssign(SExprInfo* dst, const SExprInfo* src) { @@ -2379,13 +2375,7 @@ SColumn* tscColumnClone(const SColumn* src) { return NULL; } - dst->columnIndex = src->columnIndex; - dst->tableUid = src->tableUid; - dst->info.flist.numOfFilters = src->info.flist.numOfFilters; - dst->info.flist.filterInfo = tFilterInfoDup(src->info.flist.filterInfo, src->info.flist.numOfFilters); - dst->info.type = src->info.type; - dst->info.colId = src->info.colId; - dst->info.bytes = src->info.bytes; + tscColumnCopy(dst, src); return dst; } @@ -2394,6 +2384,18 @@ static void tscColumnDestroy(SColumn* pCol) { free(pCol); } +void tscColumnCopy(SColumn* pDest, const SColumn* pSrc) { + destroyFilterInfo(&pDest->info.flist); + + pDest->columnIndex = pSrc->columnIndex; + pDest->tableUid = pSrc->tableUid; + pDest->info.flist.numOfFilters = pSrc->info.flist.numOfFilters; + pDest->info.flist.filterInfo = tFilterInfoDup(pSrc->info.flist.filterInfo, pSrc->info.flist.numOfFilters); + pDest->info.type = pSrc->info.type; + pDest->info.colId = pSrc->info.colId; + pDest->info.bytes = pSrc->info.bytes; +} + void tscColumnListCopy(SArray* dst, const SArray* src, uint64_t tableUid) { assert(src != NULL && dst != NULL); diff --git a/src/query/inc/qAggMain.h b/src/query/inc/qAggMain.h index 57e7d2982f..044c538f47 100644 --- a/src/query/inc/qAggMain.h +++ b/src/query/inc/qAggMain.h @@ -204,7 +204,7 @@ typedef struct SAggFunctionInfo { bool (*init)(SQLFunctionCtx *pCtx); // setup the execute environment void (*xFunction)(SQLFunctionCtx *pCtx); // blocks version function - void (*xFunctionF)(SQLFunctionCtx *pCtx, int32_t position); // single-row function version, todo merge with blockwise function +// void (*xFunctionF)(SQLFunctionCtx *pCtx, int32_t position); // single-row function version, todo merge with blockwise function // finalizer must be called after all xFunction has been executed to generated final result. void (*xFinalize)(SQLFunctionCtx *pCtx); diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index 955dd734cf..9cd1c5b033 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -295,7 +295,7 @@ enum OPERATOR_TYPE_E { OP_MultiTableAggregate = 14, OP_MultiTableTimeInterval = 15, OP_DummyInput = 16, //TODO remove it after fully refactor. - OP_MultiwayMergeSort = 17, // multi-way data merge into one input stream. + OP_MultiwayMergeSort = 17, // multi-way data merge into one input stream. OP_GlobalAggregate = 18, // global merge for the multi-way data sources. OP_Filter = 19, OP_Distinct = 20, diff --git a/src/query/inc/sql.y b/src/query/inc/sql.y index 63bfd85976..8f9b3a8e61 100644 --- a/src/query/inc/sql.y +++ b/src/query/inc/sql.y @@ -469,7 +469,7 @@ tagitem(A) ::= PLUS(X) FLOAT(Y). { //////////////////////// The SELECT statement ///////////////////////////////// %type select {SSqlNode*} %destructor select {destroySqlNode($$);} -select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) interval_opt(K) session_option(H) windowstate_option(D) fill_opt(F) sliding_opt(S) groupby_opt(P) orderby_opt(Z) having_opt(N) slimit_opt(G) limit_opt(L). { +select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) interval_opt(K) session_option(H) windowstate_option(D) fill_opt(F) sliding_opt(S) groupby_opt(P) having_opt(N) orderby_opt(Z) slimit_opt(G) limit_opt(L). { A = tSetQuerySqlNode(&T, W, X, Y, P, Z, &K, &H, &D, &S, F, &L, &G, N); } diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index bc14c75af5..3dff0f5d50 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -456,20 +456,6 @@ static void count_function(SQLFunctionCtx *pCtx) { SET_VAL(pCtx, numOfElem, 1); } -static void count_function_f(SQLFunctionCtx *pCtx, int32_t index) { - char *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - SET_VAL(pCtx, 1, 1); - *((int64_t *)pCtx->pOutput) += pCtx->size; - - // do not need it actually - SResultRowCellInfo *pInfo = GET_RES_INFO(pCtx); - pInfo->hasResult = DATA_SET_FLAG; -} - static void count_func_merge(SQLFunctionCtx *pCtx) { int64_t *pData = (int64_t *)GET_INPUT_DATA_LIST(pCtx); for (int32_t i = 0; i < pCtx->size; ++i) { @@ -609,46 +595,6 @@ static void do_sum(SQLFunctionCtx *pCtx) { } } -static void do_sum_f(SQLFunctionCtx *pCtx, int32_t index) { - void *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - SET_VAL(pCtx, 1, 1); - int64_t *res = (int64_t*) pCtx->pOutput; - - if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { - *res += GET_INT8_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) { - *res += GET_INT16_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) { - *res += GET_INT32_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT) { - *res += GET_INT64_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_UTINYINT) { - uint64_t *r = (uint64_t *)pCtx->pOutput; - *r += GET_UINT8_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_USMALLINT) { - uint64_t *r = (uint64_t *)pCtx->pOutput; - *r += GET_UINT16_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_UINT) { - uint64_t *r = (uint64_t *)pCtx->pOutput; - *r += GET_UINT32_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_UBIGINT) { - uint64_t *r = (uint64_t *)pCtx->pOutput; - *r += GET_UINT64_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) { - double *retVal = (double*) pCtx->pOutput; - *retVal += GET_DOUBLE_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { - double *retVal = (double*) pCtx->pOutput; - *retVal += GET_FLOAT_VAL(pData); - } - - GET_RES_INFO(pCtx)->hasResult = DATA_SET_FLAG; -} - static void sum_function(SQLFunctionCtx *pCtx) { do_sum(pCtx); @@ -661,17 +607,6 @@ static void sum_function(SQLFunctionCtx *pCtx) { } } -static void sum_function_f(SQLFunctionCtx *pCtx, int32_t index) { - do_sum_f(pCtx, index); - - // keep the result data in output buffer, not in the intermediate buffer - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - if (pResInfo->hasResult == DATA_SET_FLAG && pCtx->stableQuery) { - SSumInfo *pSum = (SSumInfo *)pCtx->pOutput; - pSum->hasResult = DATA_SET_FLAG; - } -} - static void sum_func_merge(SQLFunctionCtx *pCtx) { int32_t notNullElems = 0; @@ -847,53 +782,6 @@ static void avg_function(SQLFunctionCtx *pCtx) { } } -static void avg_function_f(SQLFunctionCtx *pCtx, int32_t index) { - void *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - SET_VAL(pCtx, 1, 1); - - // NOTE: keep the intermediate result into the interResultBuf - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - - SAvgInfo *pAvgInfo = (SAvgInfo *)GET_ROWCELL_INTERBUF(pResInfo); - - if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { - pAvgInfo->sum += GET_INT8_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) { - pAvgInfo->sum += GET_INT16_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) { - pAvgInfo->sum += GET_INT32_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT) { - pAvgInfo->sum += GET_INT64_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) { - pAvgInfo->sum += GET_DOUBLE_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { - pAvgInfo->sum += GET_FLOAT_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_UTINYINT) { - pAvgInfo->sum += GET_UINT8_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_USMALLINT) { - pAvgInfo->sum += GET_UINT16_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_UINT) { - pAvgInfo->sum += GET_UINT32_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_UBIGINT) { - pAvgInfo->sum += GET_UINT64_VAL(pData); - } - - // restore sum and count of elements - pAvgInfo->num += 1; - - // set has result flag - pResInfo->hasResult = DATA_SET_FLAG; - - // keep the data into the final output buffer for super table query since this execution may be the last one - if (pCtx->stableQuery) { - memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SAvgInfo)); - } -} - static void avg_func_merge(SQLFunctionCtx *pCtx) { SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); @@ -1307,78 +1195,6 @@ static void max_func_merge(SQLFunctionCtx *pCtx) { } } -static void minMax_function_f(SQLFunctionCtx *pCtx, int32_t index, int32_t isMin) { - char *pData = GET_INPUT_DATA(pCtx, index); - TSKEY key = GET_TS_DATA(pCtx, index); - - int32_t num = 0; - if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { - int8_t *output = (int8_t *)pCtx->pOutput; - int8_t i = GET_INT8_VAL(pData); - - UPDATE_DATA(pCtx, *output, i, num, isMin, key); - } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) { - int16_t *output = (int16_t*) pCtx->pOutput; - int16_t i = GET_INT16_VAL(pData); - - UPDATE_DATA(pCtx, *output, i, num, isMin, key); - } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) { - int32_t *output = (int32_t*) pCtx->pOutput; - int32_t i = GET_INT32_VAL(pData); - - UPDATE_DATA(pCtx, *output, i, num, isMin, key); - } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT) { - int64_t *output = (int64_t*) pCtx->pOutput; - int64_t i = GET_INT64_VAL(pData); - - UPDATE_DATA(pCtx, *output, i, num, isMin, key); - } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { - float *output = (float*) pCtx->pOutput; - float i = GET_FLOAT_VAL(pData); - - UPDATE_DATA(pCtx, *output, i, num, isMin, key); - } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) { - double *output = (double*) pCtx->pOutput; - double i = GET_DOUBLE_VAL(pData); - - UPDATE_DATA(pCtx, *output, i, num, isMin, key); - } - - GET_RES_INFO(pCtx)->hasResult = DATA_SET_FLAG; -} - -static void max_function_f(SQLFunctionCtx *pCtx, int32_t index) { - char *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - SET_VAL(pCtx, 1, 1); - minMax_function_f(pCtx, index, 0); - - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - if (pResInfo->hasResult == DATA_SET_FLAG && pCtx->stableQuery) { - char *flag = pCtx->pOutput + pCtx->inputBytes; - *flag = DATA_SET_FLAG; - } -} - -static void min_function_f(SQLFunctionCtx *pCtx, int32_t index) { - char *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - SET_VAL(pCtx, 1, 1); - minMax_function_f(pCtx, index, 1); - - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - if (pResInfo->hasResult == DATA_SET_FLAG && pCtx->stableQuery) { - char *flag = pCtx->pOutput + pCtx->inputBytes; - *flag = DATA_SET_FLAG; - } -} - #define LOOP_STDDEV_IMPL(type, r, d, ctx, delta, _type, num) \ for (int32_t i = 0; i < (ctx)->size; ++i) { \ if ((ctx)->hasNull && isNull((char *)&((type *)d)[i], (_type))) { \ @@ -1472,114 +1288,6 @@ static void stddev_function(SQLFunctionCtx *pCtx) { } } -static void stddev_function_f(SQLFunctionCtx *pCtx, int32_t index) { - // the second stage to calculate standard deviation - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - SStddevInfo *pStd = GET_ROWCELL_INTERBUF(pResInfo); - - if (pCtx->currentStage == REPEAT_SCAN && pStd->stage == 0) { - pStd->stage++; - avg_finalizer(pCtx); - - pResInfo->initialized = true; // set it initialized to avoid re-initialization - - // save average value into tmpBuf, for second stage scan - SAvgInfo *pAvg = GET_ROWCELL_INTERBUF(pResInfo); - - pStd->avg = GET_DOUBLE_VAL(pCtx->pOutput); - assert((isnan(pAvg->sum) && pAvg->num == 0) || (pStd->num == pAvg->num && pStd->avg == pAvg->sum)); - } - - /* the first stage is to calculate average value */ - if (pStd->stage == 0) { - avg_function_f(pCtx, index); - } else if (pStd->num > 0) { - double avg = pStd->avg; - void * pData = GET_INPUT_DATA(pCtx, index); - - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - switch (pCtx->inputType) { - case TSDB_DATA_TYPE_INT: { - pStd->res += POW2(GET_INT32_VAL(pData) - avg); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - pStd->res += POW2(GET_FLOAT_VAL(pData) - avg); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - pStd->res += POW2(GET_DOUBLE_VAL(pData) - avg); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - pStd->res += POW2(GET_INT64_VAL(pData) - avg); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - pStd->res += POW2(GET_INT16_VAL(pData) - avg); - break; - } - case TSDB_DATA_TYPE_TINYINT: { - pStd->res += POW2(GET_INT8_VAL(pData) - avg); - break; - } - case TSDB_DATA_TYPE_UINT: { - pStd->res += POW2(GET_UINT32_VAL(pData) - avg); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - pStd->res += POW2(GET_UINT64_VAL(pData) - avg); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - pStd->res += POW2(GET_UINT16_VAL(pData) - avg); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - pStd->res += POW2(GET_UINT8_VAL(pData) - avg); - break; - } - default: - qError("stddev function not support data type:%d", pCtx->inputType); - } - - SET_VAL(pCtx, 1, 1); - } -} - -static UNUSED_FUNC void stddev_next_step(SQLFunctionCtx *pCtx) { - /* - * the stddevInfo and the average info struct share the same buffer area - * And the position of each element in their struct is exactly the same matched - */ - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - SStddevInfo *pStd = GET_ROWCELL_INTERBUF(pResInfo); - - if (pStd->stage == 0) { - /* - * stddev is calculated in two stage: - * 1. get the average value of all data; - * 2. get final result, based on the average values; - * so, if this routine is in second stage, no further step is required - */ - pStd->stage++; - avg_finalizer(pCtx); - - pResInfo->initialized = true; // set it initialized to avoid re-initialization - - // save average value into tmpBuf, for second stage scan - SAvgInfo *pAvg = GET_ROWCELL_INTERBUF(pResInfo); - - pStd->avg = GET_DOUBLE_VAL(pCtx->pOutput); - assert((isnan(pAvg->sum) && pAvg->num == 0) || (pStd->num == pAvg->num && pStd->avg == pAvg->sum)); - } else { - pResInfo->complete = true; - } -} - static void stddev_finalizer(SQLFunctionCtx *pCtx) { SStddevInfo *pStd = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); @@ -1696,97 +1404,6 @@ static void stddev_dst_function(SQLFunctionCtx *pCtx) { memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)), sizeof(SAvgInfo)); } -static void stddev_dst_function_f(SQLFunctionCtx *pCtx, int32_t index) { - void *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - // the second stage to calculate standard deviation - SStddevdstInfo *pStd = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); - double *retVal = &pStd->res; - - // all data are null, no need to proceed - SArray* resList = (SArray*) pCtx->param[0].pz; - if (resList == NULL) { - return; - } - - // find the correct group average results according to the tag value - int32_t len = (int32_t) taosArrayGetSize(resList); - assert(len > 0); - - double avg = 0; - if (len == 1) { - SResPair* p = taosArrayGet(resList, 0); - avg = p->avg; - } else { // todo opt performance by using iterator since the timestamp lsit is matched with the output result - SResPair* p = bsearch(&pCtx->startTs, resList->pData, len, sizeof(SResPair), tsCompare); - assert(p != NULL); - - avg = p->avg; - } - - int32_t num = 0; - switch (pCtx->inputType) { - case TSDB_DATA_TYPE_INT: { - for (int32_t i = 0; i < pCtx->size; ++i) { - if (pCtx->hasNull && isNull((const char*) (&((int32_t *)pData)[i]), pCtx->inputType)) { - continue; - } - num += 1; - *retVal += POW2(((int32_t *)pData)[i] - avg); - } - break; - } - case TSDB_DATA_TYPE_FLOAT: { - LOOP_STDDEV_IMPL(float, *retVal, pData, pCtx, avg, pCtx->inputType, num); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - LOOP_STDDEV_IMPL(double, *retVal, pData, pCtx, avg, pCtx->inputType, num); - break; - } - case TSDB_DATA_TYPE_TINYINT: { - LOOP_STDDEV_IMPL(int8_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - LOOP_STDDEV_IMPL(int8_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - LOOP_STDDEV_IMPL(int16_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - LOOP_STDDEV_IMPL(uint16_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); - break; - } - case TSDB_DATA_TYPE_UINT: { - LOOP_STDDEV_IMPL(uint32_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - LOOP_STDDEV_IMPL(int64_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - LOOP_STDDEV_IMPL(uint64_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); - break; - } - default: - qError("stddev function not support data type:%d", pCtx->inputType); - } - - pStd->num += num; - SET_VAL(pCtx, num, 1); - - // copy to the final output buffer for super table - memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)), sizeof(SAvgInfo)); -} - - static void stddev_dst_merge(SQLFunctionCtx *pCtx) { SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); SStddevdstInfo* pRes = GET_ROWCELL_INTERBUF(pResInfo); @@ -1833,7 +1450,7 @@ static bool first_last_function_setup(SQLFunctionCtx *pCtx) { // todo opt for null block static void first_function(SQLFunctionCtx *pCtx) { - if (pCtx->order == TSDB_ORDER_DESC /*|| pCtx->preAggVals.dataBlockLoaded == false*/) { + if (pCtx->order == TSDB_ORDER_DESC) { return; } @@ -1862,27 +1479,6 @@ static void first_function(SQLFunctionCtx *pCtx) { SET_VAL(pCtx, notNullElems, 1); } -static void first_function_f(SQLFunctionCtx *pCtx, int32_t index) { - if (pCtx->order == TSDB_ORDER_DESC) { - return; - } - - void *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - SET_VAL(pCtx, 1, 1); - memcpy(pCtx->pOutput, pData, pCtx->inputBytes); - - TSKEY ts = GET_TS_DATA(pCtx, index); - DO_UPDATE_TAG_COLUMNS(pCtx, ts); - - SResultRowCellInfo *pInfo = GET_RES_INFO(pCtx); - pInfo->hasResult = DATA_SET_FLAG; - pInfo->complete = true; // get the first not-null data, completed -} - static void first_data_assign_impl(SQLFunctionCtx *pCtx, char *pData, int32_t index) { int64_t *timestamp = GET_TS_LIST(pCtx); @@ -1932,21 +1528,6 @@ static void first_dist_function(SQLFunctionCtx *pCtx) { SET_VAL(pCtx, notNullElems, 1); } -static void first_dist_function_f(SQLFunctionCtx *pCtx, int32_t index) { - char *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - if (pCtx->order == TSDB_ORDER_DESC) { - return; - } - - first_data_assign_impl(pCtx, pData, index); - - SET_VAL(pCtx, 1, 1); -} - static void first_dist_func_merge(SQLFunctionCtx *pCtx) { assert(pCtx->stableQuery); @@ -1978,70 +1559,55 @@ static void first_dist_func_merge(SQLFunctionCtx *pCtx) { * least one data in this block that is not null.(TODO opt for this case) */ static void last_function(SQLFunctionCtx *pCtx) { - if (pCtx->order != pCtx->param[0].i64/* || pCtx->preAggVals.dataBlockLoaded == false*/) { - return; - } - - int32_t notNullElems = 0; - - for (int32_t i = pCtx->size - 1; i >= 0; --i) { - char *data = GET_INPUT_DATA(pCtx, i); - if (pCtx->hasNull && isNull(data, pCtx->inputType)) { - if (!pCtx->requireNull) { - continue; - } - } - - memcpy(pCtx->pOutput, data, pCtx->inputBytes); - - TSKEY ts = GET_TS_DATA(pCtx, i); - DO_UPDATE_TAG_COLUMNS(pCtx, ts); - - SResultRowCellInfo *pInfo = GET_RES_INFO(pCtx); - pInfo->hasResult = DATA_SET_FLAG; - - pInfo->complete = true; // set query completed on this column - notNullElems++; - break; - } - - SET_VAL(pCtx, notNullElems, 1); -} - -static void last_function_f(SQLFunctionCtx *pCtx, int32_t index) { - void *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - // the scan order is not the required order, ignore it if (pCtx->order != pCtx->param[0].i64) { return; } + SResultRowCellInfo* pResInfo = GET_RES_INFO(pCtx); + + int32_t notNullElems = 0; if (pCtx->order == TSDB_ORDER_DESC) { - SET_VAL(pCtx, 1, 1); - memcpy(pCtx->pOutput, pData, pCtx->inputBytes); - TSKEY ts = GET_TS_DATA(pCtx, index); - DO_UPDATE_TAG_COLUMNS(pCtx, ts); + for (int32_t i = pCtx->size - 1; i >= 0; --i) { + char *data = GET_INPUT_DATA(pCtx, i); + if (pCtx->hasNull && isNull(data, pCtx->inputType) && (!pCtx->requireNull)) { + continue; + } - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - pResInfo->hasResult = DATA_SET_FLAG; - pResInfo->complete = true; // set query completed - } else { // in case of ascending order check, all data needs to be checked - SResultRowCellInfo* pResInfo = GET_RES_INFO(pCtx); - TSKEY ts = GET_TS_DATA(pCtx, index); + memcpy(pCtx->pOutput, data, pCtx->inputBytes); - char* buf = GET_ROWCELL_INTERBUF(pResInfo); - if (pResInfo->hasResult != DATA_SET_FLAG || (*(TSKEY*)buf) < ts) { - pResInfo->hasResult = DATA_SET_FLAG; - memcpy(pCtx->pOutput, pData, pCtx->inputBytes); - - *(TSKEY*)buf = ts; + TSKEY ts = GET_TS_DATA(pCtx, i); DO_UPDATE_TAG_COLUMNS(pCtx, ts); + + pResInfo->hasResult = DATA_SET_FLAG; + pResInfo->complete = true; // set query completed on this column + notNullElems++; + break; + } + } else { // ascending order + for (int32_t i = pCtx->size - 1; i >= 0; --i) { + char *data = GET_INPUT_DATA(pCtx, i); + if (pCtx->hasNull && isNull(data, pCtx->inputType) && (!pCtx->requireNull)) { + continue; + } + + TSKEY ts = GET_TS_DATA(pCtx, i); + + char* buf = GET_ROWCELL_INTERBUF(pResInfo); + if (pResInfo->hasResult != DATA_SET_FLAG || (*(TSKEY*)buf) < ts) { + pResInfo->hasResult = DATA_SET_FLAG; + memcpy(pCtx->pOutput, data, pCtx->inputBytes); + + *(TSKEY*)buf = ts; + DO_UPDATE_TAG_COLUMNS(pCtx, ts); + } + + notNullElems++; + break; } } + + SET_VAL(pCtx, notNullElems, 1); } static void last_data_assign_impl(SQLFunctionCtx *pCtx, char *pData, int32_t index) { @@ -2092,29 +1658,6 @@ static void last_dist_function(SQLFunctionCtx *pCtx) { SET_VAL(pCtx, notNullElems, 1); } -static void last_dist_function_f(SQLFunctionCtx *pCtx, int32_t index) { - if (pCtx->size == 0) { - return; - } - - char *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - /* - * 1. for scan data in asc order, no need to check data - * 2. for data blocks that are not loaded, no need to check data - */ - if (pCtx->order != pCtx->param[0].i64) { - return; - } - - last_data_assign_impl(pCtx, pData, index); - - SET_VAL(pCtx, 1, 1); -} - /* * in the secondary merge(local reduce), the output is limited by the * final output size, so the main difference between last_dist_func_merge and second_merge @@ -2616,28 +2159,6 @@ static void top_function(SQLFunctionCtx *pCtx) { } } -static void top_function_f(SQLFunctionCtx *pCtx, int32_t index) { - char *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - STopBotInfo *pRes = getTopBotOutputInfo(pCtx); - assert(pRes->num >= 0); - - if ((void *)pRes->res[0] != (void *)((char *)pRes + sizeof(STopBotInfo) + POINTER_BYTES * pCtx->param[0].i64)) { - buildTopBotStruct(pRes, pCtx); - } - - SET_VAL(pCtx, 1, 1); - TSKEY ts = GET_TS_DATA(pCtx, index); - - do_top_function_add(pRes, (int32_t)pCtx->param[0].i64, pData, ts, pCtx->inputType, &pCtx->tagInfo, NULL, 0); - - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - pResInfo->hasResult = DATA_SET_FLAG; -} - static void top_func_merge(SQLFunctionCtx *pCtx) { STopBotInfo *pInput = (STopBotInfo *)GET_INPUT_DATA_LIST(pCtx); @@ -2695,27 +2216,6 @@ static void bottom_function(SQLFunctionCtx *pCtx) { } } -static void bottom_function_f(SQLFunctionCtx *pCtx, int32_t index) { - char *pData = GET_INPUT_DATA(pCtx, index); - TSKEY ts = GET_TS_DATA(pCtx, index); - - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - STopBotInfo *pRes = getTopBotOutputInfo(pCtx); - - if ((void *)pRes->res[0] != (void *)((char *)pRes + sizeof(STopBotInfo) + POINTER_BYTES * pCtx->param[0].i64)) { - buildTopBotStruct(pRes, pCtx); - } - - SET_VAL(pCtx, 1, 1); - do_bottom_function_add(pRes, (int32_t)pCtx->param[0].i64, pData, ts, pCtx->inputType, &pCtx->tagInfo, NULL, 0); - - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - pResInfo->hasResult = DATA_SET_FLAG; -} - static void bottom_func_merge(SQLFunctionCtx *pCtx) { STopBotInfo *pInput = (STopBotInfo *)GET_INPUT_DATA_LIST(pCtx); @@ -2868,50 +2368,6 @@ static void percentile_function(SQLFunctionCtx *pCtx) { pResInfo->hasResult = DATA_SET_FLAG; } -static void percentile_function_f(SQLFunctionCtx *pCtx, int32_t index) { - void *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - SPercentileInfo *pInfo = (SPercentileInfo *)GET_ROWCELL_INTERBUF(pResInfo); - - if (pCtx->currentStage == REPEAT_SCAN && pInfo->stage == 0) { - pInfo->stage += 1; - - // all data are null, set it completed - if (pInfo->numOfElems == 0) { - pResInfo->complete = true; - - return; - } else { - pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, pInfo->minval, pInfo->maxval); - } - } - - if (pInfo->stage == 0) { - double v = 0; - GET_TYPED_DATA(v, double, pCtx->inputType, pData); - - if (v < GET_DOUBLE_VAL(&pInfo->minval)) { - SET_DOUBLE_VAL(&pInfo->minval, v); - } - - if (v > GET_DOUBLE_VAL(&pInfo->maxval)) { - SET_DOUBLE_VAL(&pInfo->maxval, v); - } - - pInfo->numOfElems += 1; - return; - } - - tMemBucketPut(pInfo->pMemBucket, pData, 1); - - SET_VAL(pCtx, 1, 1); - pResInfo->hasResult = DATA_SET_FLAG; -} - static void percentile_finalizer(SQLFunctionCtx *pCtx) { double v = pCtx->param[0].nType == TSDB_DATA_TYPE_INT ? pCtx->param[0].i64 : pCtx->param[0].dKey; @@ -2930,24 +2386,6 @@ static void percentile_finalizer(SQLFunctionCtx *pCtx) { doFinalizer(pCtx); } -static UNUSED_FUNC void percentile_next_step(SQLFunctionCtx *pCtx) { - SResultRowCellInfo * pResInfo = GET_RES_INFO(pCtx); - SPercentileInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); - - if (pInfo->stage == 0) { - // all data are null, set it completed - if (pInfo->numOfElems == 0) { - pResInfo->complete = true; - } else { - pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, pInfo->minval, pInfo->maxval); - } - - pInfo->stage += 1; - } else { - pResInfo->complete = true; - } -} - ////////////////////////////////////////////////////////////////////////////////// static void buildHistogramInfo(SAPercentileInfo* pInfo) { pInfo->pHisto = (SHistogramInfo*) ((char*) pInfo + sizeof(SAPercentileInfo)); @@ -3012,24 +2450,6 @@ static void apercentile_function(SQLFunctionCtx *pCtx) { } } -static void apercentile_function_f(SQLFunctionCtx *pCtx, int32_t index) { - void *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - SResultRowCellInfo * pResInfo = GET_RES_INFO(pCtx); - SAPercentileInfo *pInfo = getAPerctInfo(pCtx); - - double v = 0; - GET_TYPED_DATA(v, double, pCtx->inputType, pData); - - tHistogramAdd(&pInfo->pHisto, v); - - SET_VAL(pCtx, 1, 1); - pResInfo->hasResult = DATA_SET_FLAG; -} - static void apercentile_func_merge(SQLFunctionCtx *pCtx) { SAPercentileInfo *pInput = (SAPercentileInfo *)GET_INPUT_DATA_LIST(pCtx); @@ -3213,60 +2633,6 @@ static void leastsquares_function(SQLFunctionCtx *pCtx) { SET_VAL(pCtx, numOfElem, 1); } -static void leastsquares_function_f(SQLFunctionCtx *pCtx, int32_t index) { - void *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - SLeastsquaresInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); - - double(*param)[3] = pInfo->mat; - - switch (pCtx->inputType) { - case TSDB_DATA_TYPE_INT: { - int32_t *p = pData; - LEASTSQR_CAL(param, pInfo->startVal, p, 0, pCtx->param[1].dKey); - break; - } - case TSDB_DATA_TYPE_TINYINT: { - int8_t *p = pData; - LEASTSQR_CAL(param, pInfo->startVal, p, 0, pCtx->param[1].dKey); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - int16_t *p = pData; - LEASTSQR_CAL(param, pInfo->startVal, p, 0, pCtx->param[1].dKey); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - int64_t *p = pData; - LEASTSQR_CAL(param, pInfo->startVal, p, 0, pCtx->param[1].dKey); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - float *p = pData; - LEASTSQR_CAL(param, pInfo->startVal, p, 0, pCtx->param[1].dKey); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - double *p = pData; - LEASTSQR_CAL(param, pInfo->startVal, p, 0, pCtx->param[1].dKey); - break; - } - default: - qError("error data type in leastsquare function:%d", pCtx->inputType); - }; - - SET_VAL(pCtx, 1, 1); - pInfo->num += 1; - - if (pInfo->num > 0) { - pResInfo->hasResult = DATA_SET_FLAG; - } -} - static void leastsquares_finalizer(SQLFunctionCtx *pCtx) { // no data in query SResultRowCellInfo * pResInfo = GET_RES_INFO(pCtx); @@ -3319,7 +2685,6 @@ static void col_project_function(SQLFunctionCtx *pCtx) { INC_INIT_VAL(pCtx, pCtx->size); } - char *pData = GET_INPUT_DATA_LIST(pCtx); if (pCtx->order == TSDB_ORDER_ASC) { memcpy(pCtx->pOutput, pData, (size_t) pCtx->size * pCtx->inputBytes); @@ -3331,22 +2696,6 @@ static void col_project_function(SQLFunctionCtx *pCtx) { } } -static void col_project_function_f(SQLFunctionCtx *pCtx, int32_t index) { - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - if (pCtx->numOfParams == 2) { // the number of output rows should not affect the final number of rows, so set it to be 0 - return; - } - - // only one output - if (pCtx->param[0].i64 == 1 && pResInfo->numOfRes >= 1) { - return; - } - - INC_INIT_VAL(pCtx, 1); - char *pData = GET_INPUT_DATA(pCtx, index); - memcpy(pCtx->pOutput, pData, pCtx->inputBytes); -} - /** * only used for tag projection query in select clause * @param pCtx @@ -3368,13 +2717,6 @@ static void tag_project_function(SQLFunctionCtx *pCtx) { } } -static void tag_project_function_f(SQLFunctionCtx *pCtx, int32_t index) { - INC_INIT_VAL(pCtx, 1); - - tVariantDump(&pCtx->tag, pCtx->pOutput, pCtx->tag.nType, true); - pCtx->pOutput += pCtx->outputBytes; -} - /** * used in group by clause. when applying group by tags, the tags value is * assign by using tag function. @@ -3393,11 +2735,6 @@ static void tag_function(SQLFunctionCtx *pCtx) { } } -static void tag_function_f(SQLFunctionCtx *pCtx, int32_t index) { - SET_VAL(pCtx, 1, 1); - tVariantDump(&pCtx->tag, pCtx->pOutput, pCtx->outputType, true); -} - static void copy_function(SQLFunctionCtx *pCtx) { SET_VAL(pCtx, pCtx->size, 1); @@ -3793,61 +3130,6 @@ static void diff_function(SQLFunctionCtx *pCtx) { } } -static void diff_function_f(SQLFunctionCtx *pCtx, int32_t index) { - char *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - // the output start from the second source element - if (pCtx->param[1].nType != INITIAL_VALUE_NOT_ASSIGNED) { // initial value is set - GET_RES_INFO(pCtx)->numOfRes += 1; - } - - int32_t step = 1/*GET_FORWARD_DIRECTION_FACTOR(pCtx->order)*/; - - switch (pCtx->inputType) { - case TSDB_DATA_TYPE_INT: { - if (pCtx->param[1].nType == INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet - pCtx->param[1].nType = pCtx->inputType; - pCtx->param[1].i64 = *(int32_t *)pData; - } else { - *(int32_t *)pCtx->pOutput = *(int32_t *)pData - (int32_t)pCtx->param[1].i64; - pCtx->param[1].i64 = *(int32_t *)pData; - *(int64_t *)pCtx->ptsOutputBuf = GET_TS_DATA(pCtx, index); - } - break; - }; - case TSDB_DATA_TYPE_BIGINT: { - DIFF_IMPL(pCtx, pData, int64_t); - break; - }; - case TSDB_DATA_TYPE_DOUBLE: { - DIFF_IMPL(pCtx, pData, double); - break; - }; - case TSDB_DATA_TYPE_FLOAT: { - DIFF_IMPL(pCtx, pData, float); - break; - }; - case TSDB_DATA_TYPE_SMALLINT: { - DIFF_IMPL(pCtx, pData, int16_t); - break; - }; - case TSDB_DATA_TYPE_TINYINT: { - DIFF_IMPL(pCtx, pData, int8_t); - break; - }; - default: - qError("error input type"); - } - - if (GET_RES_INFO(pCtx)->numOfRes > 0) { - pCtx->pOutput += pCtx->outputBytes * step; - pCtx->ptsOutputBuf = (char *)pCtx->ptsOutputBuf + TSDB_KEYSIZE * step; - } -} - char *getArithColumnData(void *param, const char* name, int32_t colId) { SArithmeticSupport *pSupport = (SArithmeticSupport *)param; @@ -3870,16 +3152,6 @@ static void arithmetic_function(SQLFunctionCtx *pCtx) { arithmeticTreeTraverse(sas->pExprInfo->pExpr, pCtx->size, pCtx->pOutput, sas, pCtx->order, getArithColumnData); } -static void arithmetic_function_f(SQLFunctionCtx *pCtx, int32_t index) { - INC_INIT_VAL(pCtx, 1); - SArithmeticSupport *sas = (SArithmeticSupport *)pCtx->param[1].pz; - - sas->offset = index; - arithmeticTreeTraverse(sas->pExprInfo->pExpr, 1, pCtx->pOutput, sas, pCtx->order, getArithColumnData); - - pCtx->pOutput += pCtx->outputBytes; -} - #define LIST_MINMAX_N(ctx, minOutput, maxOutput, elemCnt, data, type, tsdbType, numOfNotNullElem) \ { \ type *inputData = (type *)data; \ @@ -3998,49 +3270,6 @@ static void spread_function(SQLFunctionCtx *pCtx) { } } -static void spread_function_f(SQLFunctionCtx *pCtx, int32_t index) { - void *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - SET_VAL(pCtx, 1, 1); - - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - SSpreadInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); - - double val = 0.0; - if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { - val = GET_INT8_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) { - val = GET_INT16_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) { - val = GET_INT32_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT || pCtx->inputType == TSDB_DATA_TYPE_TIMESTAMP) { - val = (double)(GET_INT64_VAL(pData)); - } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) { - val = GET_DOUBLE_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { - val = GET_FLOAT_VAL(pData); - } - - // keep the result data in output buffer, not in the intermediate buffer - if (val > pInfo->max) { - pInfo->max = val; - } - - if (val < pInfo->min) { - pInfo->min = val; - } - - pResInfo->hasResult = DATA_SET_FLAG; - pInfo->hasResult = DATA_SET_FLAG; - - if (pCtx->stableQuery) { - memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SSpreadInfo)); - } -} - /* * here we set the result value back to the intermediate buffer, to apply the finalize the function * the final result is generated in spread_function_finalizer @@ -4393,26 +3622,6 @@ static void twa_function(SQLFunctionCtx *pCtx) { } } -static void twa_function_f(SQLFunctionCtx *pCtx, int32_t index) { - void *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - int32_t notNullElems = twa_function_impl(pCtx, index, 1); - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - - SET_VAL(pCtx, notNullElems, 1); - - if (notNullElems > 0) { - pResInfo->hasResult = DATA_SET_FLAG; - } - - if (pCtx->stableQuery) { - memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(pResInfo), sizeof(STwaInfo)); - } -} - /* * To copy the input to interResBuf to avoid the input buffer space be over writen * by next input data. The TWA function only applies to each table, so no merge procedure @@ -4590,23 +3799,6 @@ static void ts_comp_function(SQLFunctionCtx *pCtx) { pResInfo->hasResult = DATA_SET_FLAG; } -static void ts_comp_function_f(SQLFunctionCtx *pCtx, int32_t index) { - void *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - STSCompInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); - - STSBuf *pTSbuf = pInfo->pTSBuf; - - tsBufAppend(pTSbuf, (int32_t)pCtx->param[0].i64, &pCtx->tag, pData, TSDB_KEYSIZE); - SET_VAL(pCtx, pCtx->size, 1); - - pResInfo->hasResult = DATA_SET_FLAG; -} - static void ts_comp_finalize(SQLFunctionCtx *pCtx) { SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); @@ -4736,46 +3928,6 @@ static void rate_function(SQLFunctionCtx *pCtx) { } } -static void rate_function_f(SQLFunctionCtx *pCtx, int32_t index) { - void *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - // NOTE: keep the intermediate result into the interResultBuf - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - SRateInfo *pRateInfo = (SRateInfo *)GET_ROWCELL_INTERBUF(pResInfo); - TSKEY *primaryKey = GET_TS_LIST(pCtx); - - double v = 0; - GET_TYPED_DATA(v, double, pCtx->inputType, pData); - - if ((INT64_MIN == pRateInfo->firstValue) || (INT64_MIN == pRateInfo->firstKey)) { - pRateInfo->firstValue = v; - pRateInfo->firstKey = primaryKey[index]; - } - - if (INT64_MIN == pRateInfo->lastValue) { - pRateInfo->lastValue = v; - } else if (v < pRateInfo->lastValue) { - pRateInfo->correctionValue += pRateInfo->lastValue; - } - - pRateInfo->lastValue = v; - pRateInfo->lastKey = primaryKey[index]; - - SET_VAL(pCtx, 1, 1); - - // set has result flag - pRateInfo->hasResult = DATA_SET_FLAG; - pResInfo->hasResult = DATA_SET_FLAG; - - // keep the data into the final output buffer for super table query since this execution may be the last one - if (pCtx->stableQuery) { - memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SRateInfo)); - } -} - static void rate_func_copy(SQLFunctionCtx *pCtx) { assert(pCtx->inputType == TSDB_DATA_TYPE_BINARY); @@ -4846,39 +3998,6 @@ static void irate_function(SQLFunctionCtx *pCtx) { } } -static void irate_function_f(SQLFunctionCtx *pCtx, int32_t index) { - void *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - // NOTE: keep the intermediate result into the interResultBuf - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - SRateInfo *pRateInfo = (SRateInfo *)GET_ROWCELL_INTERBUF(pResInfo); - TSKEY *primaryKey = GET_TS_LIST(pCtx); - - double v = 0; - GET_TYPED_DATA(v, double, pCtx->inputType, pData); - - pRateInfo->firstKey = pRateInfo->lastKey; - pRateInfo->firstValue = pRateInfo->lastValue; - - pRateInfo->lastValue = v; - pRateInfo->lastKey = primaryKey[index]; - -// qDebug("====%p irate_function_f() index:%d lastValue:%" PRId64 " lastKey:%" PRId64 " firstValue:%" PRId64 " firstKey:%" PRId64, pCtx, index, pRateInfo->lastValue, pRateInfo->lastKey, pRateInfo->firstValue , pRateInfo->firstKey); - SET_VAL(pCtx, 1, 1); - - // set has result flag - pRateInfo->hasResult = DATA_SET_FLAG; - pResInfo->hasResult = DATA_SET_FLAG; - - // keep the data into the final output buffer for super table query since this execution may be the last one - if (pCtx->stableQuery) { - memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SRateInfo)); - } -} - void blockInfo_func(SQLFunctionCtx* pCtx) { SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); STableBlockDist* pDist = (STableBlockDist*) GET_ROWCELL_INTERBUF(pResInfo); @@ -5062,7 +4181,7 @@ int32_t functionCompatList[] = { // tag, colprj, tagprj, arithmetic, diff, first_dist, last_dist, stddev_dst, interp rate irate 1, 1, 1, 1, -1, 1, 1, 1, 5, 1, 1, // tid_tag, blk_info - 6, 7 + 6, 7 }; SAggFunctionInfo aAggs[] = {{ @@ -5073,7 +4192,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_BASE_FUNC_SO, function_setup, count_function, - count_function_f, doFinalizer, count_func_merge, countRequired, @@ -5086,7 +4204,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_BASE_FUNC_SO, function_setup, sum_function, - sum_function_f, function_finalizer, sum_func_merge, statisRequired, @@ -5099,7 +4216,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_BASE_FUNC_SO, function_setup, avg_function, - avg_function_f, avg_finalizer, avg_func_merge, statisRequired, @@ -5112,7 +4228,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_SELECTIVITY, min_func_setup, min_function, - min_function_f, function_finalizer, min_func_merge, statisRequired, @@ -5125,7 +4240,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_SELECTIVITY, max_func_setup, max_function, - max_function_f, function_finalizer, max_func_merge, statisRequired, @@ -5138,7 +4252,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_OF, function_setup, stddev_function, - stddev_function_f, stddev_finalizer, noop1, dataBlockRequired, @@ -5151,7 +4264,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_OF, percentile_function_setup, percentile_function, - percentile_function_f, percentile_finalizer, noop1, dataBlockRequired, @@ -5164,7 +4276,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_OF | TSDB_FUNCSTATE_STABLE, apercentile_function_setup, apercentile_function, - apercentile_function_f, apercentile_finalizer, apercentile_func_merge, dataBlockRequired, @@ -5177,7 +4288,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_SELECTIVITY, function_setup, first_function, - first_function_f, function_finalizer, noop1, firstFuncRequired, @@ -5190,7 +4300,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_SELECTIVITY, function_setup, last_function, - last_function_f, function_finalizer, noop1, lastFuncRequired, @@ -5204,7 +4313,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_FUNCSTATE_SELECTIVITY, first_last_function_setup, last_row_function, - noop2, last_row_finalizer, last_dist_func_merge, dataBlockRequired, @@ -5218,7 +4326,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_FUNCSTATE_SELECTIVITY, top_bottom_function_setup, top_function, - top_function_f, top_bottom_func_finalizer, top_func_merge, dataBlockRequired, @@ -5232,7 +4339,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_FUNCSTATE_SELECTIVITY, top_bottom_function_setup, bottom_function, - bottom_function_f, top_bottom_func_finalizer, bottom_func_merge, dataBlockRequired, @@ -5245,7 +4351,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_BASE_FUNC_SO, spread_function_setup, spread_function, - spread_function_f, spread_function_finalizer, spread_func_merge, countRequired, @@ -5258,7 +4363,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS, twa_function_setup, twa_function, - twa_function_f, twa_function_finalizer, twa_function_copy, dataBlockRequired, @@ -5271,7 +4375,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_OF, leastsquares_function_setup, leastsquares_function, - leastsquares_function_f, leastsquares_finalizer, noop1, dataBlockRequired, @@ -5284,7 +4387,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS, function_setup, date_col_output_function, - date_col_output_function_f, doFinalizer, copy_function, noDataRequired, @@ -5297,7 +4399,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS, function_setup, noop1, - noop2, doFinalizer, copy_function, dataBlockRequired, @@ -5310,7 +4411,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_BASE_FUNC_SO, function_setup, tag_function, - noop2, doFinalizer, copy_function, noDataRequired, @@ -5323,7 +4423,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_NEED_TS, ts_comp_function_setup, ts_comp_function, - ts_comp_function_f, ts_comp_finalize, copy_function, dataBlockRequired, @@ -5336,7 +4435,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_BASE_FUNC_SO, function_setup, tag_function, - tag_function_f, doFinalizer, copy_function, noDataRequired, @@ -5349,7 +4447,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_BASE_FUNC_MO | TSDB_FUNCSTATE_NEED_TS, function_setup, col_project_function, - col_project_function_f, doFinalizer, copy_function, dataBlockRequired, @@ -5362,7 +4459,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_BASE_FUNC_MO, function_setup, tag_project_function, - tag_project_function_f, doFinalizer, copy_function, noDataRequired, @@ -5375,7 +4471,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS, function_setup, arithmetic_function, - arithmetic_function_f, doFinalizer, copy_function, dataBlockRequired, @@ -5388,7 +4483,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SELECTIVITY, diff_function_setup, diff_function, - diff_function_f, doFinalizer, noop1, dataBlockRequired, @@ -5402,7 +4496,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SELECTIVITY, first_last_function_setup, first_dist_function, - first_dist_function_f, function_finalizer, first_dist_func_merge, firstDistFuncRequired, @@ -5415,7 +4508,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SELECTIVITY, first_last_function_setup, last_dist_function, - last_dist_function_f, function_finalizer, last_dist_func_merge, lastDistFuncRequired, @@ -5428,7 +4520,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STABLE, function_setup, stddev_dst_function, - stddev_dst_function_f, stddev_dst_finalizer, stddev_dst_merge, dataBlockRequired, @@ -5441,7 +4532,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_OF | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS , function_setup, interp_function, - do_sum_f, // todo filter handle doFinalizer, copy_function, dataBlockRequired, @@ -5454,7 +4544,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS, rate_function_setup, rate_function, - rate_function_f, rate_finalizer, rate_func_copy, dataBlockRequired, @@ -5467,7 +4556,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS, rate_function_setup, irate_function, - irate_function_f, rate_finalizer, rate_func_copy, dataBlockRequired, @@ -5480,7 +4568,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE, function_setup, noop1, - noop2, noop1, noop1, dataBlockRequired, @@ -5492,7 +4579,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SELECTIVITY, deriv_function_setup, deriv_function, - noop2, doFinalizer, noop1, dataBlockRequired, @@ -5505,7 +4591,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STABLE, function_setup, blockInfo_func, - noop2, blockinfo_func_finalizer, block_func_merge, dataBlockRequired, diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 7b106c178d..689ab08415 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -161,7 +161,7 @@ static void setResultOutputBuf(SQueryRuntimeEnv* pRuntimeEnv, SResultRow* pResul int32_t numOfCols, int32_t* rowCellInfoOffset); void setResultRowOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResult, SQLFunctionCtx* pCtx, int32_t numOfOutput, int32_t* rowCellInfoOffset); -static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t functionId); +static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx); static void setBlockStatisInfo(SQLFunctionCtx *pCtx, SSDataBlock* pSDataBlock, SColIndex* pColIndex); @@ -309,7 +309,7 @@ static bool isProjQuery(SQueryAttr *pQueryAttr) { return true; } -static bool hasNullRv(SColIndex* pColIndex, SDataStatis *pStatis) { +static bool hasNull(SColIndex* pColIndex, SDataStatis *pStatis) { if (TSDB_COL_IS_TAG(pColIndex->flag) || TSDB_COL_IS_UD_COL(pColIndex->flag) || pColIndex->colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { return false; } @@ -708,12 +708,13 @@ static int32_t getNumOfRowsInTimeWindow(SQueryRuntimeEnv* pRuntimeEnv, SDataBloc static void doApplyFunctions(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, STimeWindow* pWin, int32_t offset, int32_t forwardStep, TSKEY* tsCol, int32_t numOfTotal, int32_t numOfOutput) { SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; - bool hasPrev = pCtx[0].preAggVals.isSet; + bool hasAggregates = pCtx[0].preAggVals.isSet; for (int32_t k = 0; k < numOfOutput; ++k) { - pCtx[k].size = forwardStep; + pCtx[k].size = forwardStep; pCtx[k].startTs = pWin->skey; + // keep it temprarily char* start = pCtx[k].pInput; int32_t pos = (QUERY_IS_ASC_QUERY(pQueryAttr)) ? offset : offset - (forwardStep - 1); @@ -725,20 +726,18 @@ static void doApplyFunctions(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx pCtx[k].ptsList = &tsCol[pos]; } - int32_t functionId = pCtx[k].functionId; - // not a whole block involved in query processing, statistics data can not be used // NOTE: the original value of isSet have been changed here if (pCtx[k].preAggVals.isSet && forwardStep < numOfTotal) { pCtx[k].preAggVals.isSet = false; } - if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) { - aAggs[functionId].xFunction(&pCtx[k]); + if (functionNeedToExecute(pRuntimeEnv, &pCtx[k])) { + aAggs[pCtx[k].functionId].xFunction(&pCtx[k]); } // restore it - pCtx[k].preAggVals.isSet = hasPrev; + pCtx[k].preAggVals.isSet = hasAggregates; pCtx[k].pInput = start; } } @@ -847,9 +846,6 @@ static void setNotInterpoWindowKey(SQLFunctionCtx* pCtx, int32_t numOfOutput, in } } -// window start key interpolation - - static void saveDataBlockLastRow(SQueryRuntimeEnv* pRuntimeEnv, SDataBlockInfo* pDataBlockInfo, SArray* pDataBlock, int32_t rowIndex) { if (pDataBlock == NULL) { @@ -975,10 +971,9 @@ static void doAggregateImpl(SOperatorInfo* pOperator, TSKEY startTs, SQLFunction SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv; for (int32_t k = 0; k < pOperator->numOfOutput; ++k) { - int32_t functionId = pCtx[k].functionId; - if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) { + if (functionNeedToExecute(pRuntimeEnv, &pCtx[k])) { pCtx[k].startTs = startTs;// this can be set during create the struct - aAggs[functionId].xFunction(&pCtx[k]); + aAggs[pCtx[k].functionId].xFunction(&pCtx[k]); } } } @@ -1287,6 +1282,15 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn return; } + int64_t* tsList = NULL; + SColumnInfoData* pFirstColData = taosArrayGet(pSDataBlock->pDataBlock, 0); + if (pFirstColData->info.type == TSDB_DATA_TYPE_TIMESTAMP) { + tsList = (int64_t*) pFirstColData->pData; + } + + STimeWindow w = TSWINDOW_INITIALIZER; + + int32_t num = 0; for (int32_t j = 0; j < pSDataBlock->info.rows; ++j) { char* val = ((char*)pColInfoData->pData) + bytes * j; if (isNull(val, type)) { @@ -1294,33 +1298,51 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn } // Compare with the previous row of this column, and do not set the output buffer again if they are identical. - if (pInfo->prevData == NULL || (memcmp(pInfo->prevData, val, bytes) != 0)) { - if (pInfo->prevData == NULL) { - pInfo->prevData = malloc(bytes); - } - + if (pInfo->prevData == NULL) { + pInfo->prevData = malloc(bytes); memcpy(pInfo->prevData, val, bytes); - - if (pQueryAttr->stableQuery && pQueryAttr->stabledev && (pRuntimeEnv->prevResult != NULL)) { - setParamForStableStddevByColData(pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput, pOperator->pExpr, val, bytes); - } - - int32_t ret = - setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, val, type, bytes, item->groupIndex); - if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code - longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_APP_ERROR); - } + num++; + continue; } - - // todo opt perf - for (int32_t k = 0; k < pOperator->numOfOutput; ++k) { - pInfo->binfo.pCtx[k].size = 1; - int32_t functionId = pInfo->binfo.pCtx[k].functionId; - if (functionNeedToExecute(pRuntimeEnv, &pInfo->binfo.pCtx[k], functionId)) { - aAggs[functionId].xFunctionF(&pInfo->binfo.pCtx[k], j); - } + if (memcmp(pInfo->prevData, val, bytes) == 0) { + num++; + continue; } + + if (pQueryAttr->stableQuery && pQueryAttr->stabledev && (pRuntimeEnv->prevResult != NULL)) { + setParamForStableStddevByColData(pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput, pOperator->pExpr, pInfo->prevData, + bytes); + } + + int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, pInfo->prevData, type, bytes, + item->groupIndex); + if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code + longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_APP_ERROR); + } + + doApplyFunctions(pRuntimeEnv, pInfo->binfo.pCtx, &w, j - num, num, tsList, pSDataBlock->info.rows, pOperator->numOfOutput); + + num = 1; + memcpy(pInfo->prevData, val, bytes); + } + + if (num > 0) { + char* val = ((char*)pColInfoData->pData) + bytes * (pSDataBlock->info.rows - num); + memcpy(pInfo->prevData, val, bytes); + + if (pQueryAttr->stableQuery && pQueryAttr->stabledev && (pRuntimeEnv->prevResult != NULL)) { + setParamForStableStddevByColData(pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput, pOperator->pExpr, val, + bytes); + } + + int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, val, type, bytes, + item->groupIndex); + if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code + longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_APP_ERROR); + } + + doApplyFunctions(pRuntimeEnv, pInfo->binfo.pCtx, &w, pSDataBlock->info.rows - num, num, tsList, pSDataBlock->info.rows, pOperator->numOfOutput); } } @@ -1419,7 +1441,7 @@ static int32_t setGroupResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasic // not assign result buffer yet, add new result buffer, TODO remove it char* d = pData; int16_t len = bytes; - if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) { + if (IS_VAR_DATA_TYPE(type)) { d = varDataVal(pData); len = varDataLen(pData); } @@ -1461,11 +1483,12 @@ static int32_t getGroupbyColumnIndex(SGroupbyExpr *pGroupbyExpr, SSDataBlock* pD return -1; } -static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t functionId) { +static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx) { SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; // in case of timestamp column, always generated results. + int32_t functionId = pCtx->functionId; if (functionId == TSDB_FUNC_TS) { return true; } @@ -1505,7 +1528,7 @@ void setBlockStatisInfo(SQLFunctionCtx *pCtx, SSDataBlock* pSDataBlock, SColInde pCtx->preAggVals.isSet = false; } - pCtx->hasNull = hasNullRv(pColIndex, pStatis); + pCtx->hasNull = hasNull(pColIndex, pStatis); // set the statistics data for primary time stamp column if (pCtx->functionId == TSDB_FUNC_SPREAD && pColIndex->colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { @@ -3470,6 +3493,7 @@ int32_t setTimestampListJoinInfo(SQueryRuntimeEnv* pRuntimeEnv, tVariant* pTag, return 0; } +// TODO refactor: this funciton should be merged with setparamForStableStddevColumnData function. void setParamForStableStddev(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, int32_t numOfOutput, SExprInfo* pExprInfo) { SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; @@ -4675,8 +4699,7 @@ SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SMultiwayMergeInfo* pInfo = calloc(1, sizeof(SMultiwayMergeInfo)); pInfo->resultRowFactor = - (int32_t)(GET_ROW_PARAM_FOR_MULTIOUTPUT(pRuntimeEnv->pQueryAttr, pRuntimeEnv->pQueryAttr->topBotQuery, - false)); + (int32_t)(GET_ROW_PARAM_FOR_MULTIOUTPUT(pRuntimeEnv->pQueryAttr, pRuntimeEnv->pQueryAttr->topBotQuery, false)); pRuntimeEnv->scanFlag = MERGE_STAGE; // TODO init when creating pCtx @@ -5248,6 +5271,7 @@ static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorI doApplyFunctions(pRuntimeEnv, pBInfo->pCtx, &pInfo->curWindow, pInfo->start, pInfo->numOfRows, tsList, pSDataBlock->info.rows, pOperator->numOfOutput); } + static SSDataBlock* doStateWindowAgg(void *param, bool* newgroup) { SOperatorInfo* pOperator = (SOperatorInfo*) param; if (pOperator->status == OP_EXEC_DONE) { @@ -6260,7 +6284,7 @@ static bool validateQueryMsg(SQueryTableMsg *pQueryMsg) { return true; } -static UNUSED_FUNC bool validateQueryTableCols(SQueriedTableInfo* pTableInfo, SSqlExpr** pExpr, int32_t numOfOutput, +static bool validateQueryTableCols(SQueriedTableInfo* pTableInfo, SSqlExpr** pExpr, int32_t numOfOutput, SColumnInfo* pTagCols, void* pMsg) { int32_t numOfTotal = pTableInfo->numOfCols + pTableInfo->numOfTags; if (pTableInfo->numOfCols < 0 || pTableInfo->numOfTags < 0 || numOfTotal > TSDB_MAX_COLUMNS) { @@ -6445,6 +6469,7 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) { pExprMsg->resType = htons(pExprMsg->resType); pExprMsg->resBytes = htons(pExprMsg->resBytes); + pExprMsg->interBytes = htonl(pExprMsg->interBytes); pExprMsg->functionId = htons(pExprMsg->functionId); pExprMsg->numOfParams = htons(pExprMsg->numOfParams); @@ -6652,41 +6677,41 @@ _cleanup: return code; } - int32_t cloneExprFilterInfo(SColumnFilterInfo **dst, SColumnFilterInfo* src, int32_t filterNum) { - if (filterNum <= 0) { - return TSDB_CODE_SUCCESS; - } - - *dst = calloc(filterNum, sizeof(*src)); - if (*dst == NULL) { - return TSDB_CODE_QRY_OUT_OF_MEMORY; - } - - memcpy(*dst, src, sizeof(*src) * filterNum); - - for (int32_t i = 0; i < filterNum; i++) { - if ((*dst)[i].filterstr && dst[i]->len > 0) { - void *pz = calloc(1, (size_t)(*dst)[i].len + 1); - - if (pz == NULL) { - if (i == 0) { - free(*dst); - } else { - freeColumnFilterInfo(*dst, i); - } - - return TSDB_CODE_QRY_OUT_OF_MEMORY; - } - - memcpy(pz, (void *)src->pz, (size_t)src->len + 1); - - (*dst)[i].pz = (int64_t)pz; - } - } - +int32_t cloneExprFilterInfo(SColumnFilterInfo **dst, SColumnFilterInfo* src, int32_t filterNum) { + if (filterNum <= 0) { return TSDB_CODE_SUCCESS; } + *dst = calloc(filterNum, sizeof(*src)); + if (*dst == NULL) { + return TSDB_CODE_QRY_OUT_OF_MEMORY; + } + + memcpy(*dst, src, sizeof(*src) * filterNum); + + for (int32_t i = 0; i < filterNum; i++) { + if ((*dst)[i].filterstr && dst[i]->len > 0) { + void *pz = calloc(1, (size_t)(*dst)[i].len + 1); + + if (pz == NULL) { + if (i == 0) { + free(*dst); + } else { + freeColumnFilterInfo(*dst, i); + } + + return TSDB_CODE_QRY_OUT_OF_MEMORY; + } + + memcpy(pz, (void *)src->pz, (size_t)src->len + 1); + + (*dst)[i].pz = (int64_t)pz; + } + } + + return TSDB_CODE_SUCCESS; +} + int32_t buildArithmeticExprFromMsg(SExprInfo *pExprInfo, void *pQueryMsg) { qDebug("qmsg:%p create arithmetic expr from binary", pQueryMsg); @@ -6745,8 +6770,8 @@ int32_t createQueryFunc(SQueriedTableInfo* pTableInfo, int32_t numOfOutput, SExp for (int32_t i = 0; i < numOfOutput; ++i) { pExprs[i].base = *pExprMsg[i]; - memset(pExprs[i].base.param, 0, sizeof(tVariant) * tListLen(pExprs[i].base.param)); + memset(pExprs[i].base.param, 0, sizeof(tVariant) * tListLen(pExprs[i].base.param)); for (int32_t j = 0; j < pExprMsg[i]->numOfParams; ++j) { tVariantAssign(&pExprs[i].base.param[j], &pExprMsg[i]->param[j]); } @@ -6821,6 +6846,7 @@ int32_t createQueryFunc(SQueriedTableInfo* pTableInfo, int32_t numOfOutput, SExp return TSDB_CODE_QRY_INVALID_MSG; } + // todo remove it if (getResultDataInfo(type, bytes, pExprs[i].base.functionId, param, &pExprs[i].base.resType, &pExprs[i].base.resBytes, &pExprs[i].base.interBytes, 0, isSuperTable) != TSDB_CODE_SUCCESS) { tfree(pExprs); diff --git a/src/query/src/sql.c b/src/query/src/sql.c index 8764c6d0d3..8abeb23b0e 100644 --- a/src/query/src/sql.c +++ b/src/query/src/sql.c @@ -259,15 +259,15 @@ static const YYACTIONTYPE yy_action[] = { /* 500 */ 1019, 999, 265, 1036, 146, 150, 981, 274, 1035, 163, /* 510 */ 142, 269, 164, 157, 979, 733, 165, 224, 789, 263, /* 520 */ 166, 153, 894, 283, 290, 43, 191, 271, 39, 299, - /* 530 */ 890, 306, 73, 278, 1113, 996, 70, 47, 104, 154, + /* 530 */ 890, 306, 73, 278, 1113, 996, 70, 154, 104, 47, /* 540 */ 155, 1112, 281, 1109, 170, 313, 1105, 110, 279, 156, /* 550 */ 1104, 277, 158, 275, 273, 1101, 159, 171, 270, 912, /* 560 */ 40, 37, 44, 192, 878, 120, 876, 122, 123, 874, /* 570 */ 873, 250, 182, 871, 870, 869, 868, 867, 866, 185, /* 580 */ 187, 863, 861, 859, 857, 189, 854, 190, 46, 79, /* 590 */ 84, 272, 331, 1037, 115, 323, 324, 325, 326, 327, - /* 600 */ 328, 329, 214, 341, 234, 289, 831, 252, 251, 830, - /* 610 */ 211, 212, 254, 98, 99, 255, 829, 812, 811, 259, + /* 600 */ 328, 329, 214, 234, 289, 341, 831, 252, 251, 830, + /* 610 */ 211, 212, 254, 98, 99, 255, 829, 259, 812, 811, /* 620 */ 10, 264, 872, 284, 133, 710, 174, 134, 173, 913, /* 630 */ 172, 175, 177, 176, 135, 178, 865, 914, 864, 2, /* 640 */ 136, 950, 856, 855, 82, 30, 4, 266, 160, 161, @@ -335,15 +335,15 @@ static const YYCODETYPE yy_lookahead[] = { /* 500 */ 194, 240, 240, 267, 194, 194, 240, 194, 267, 244, /* 510 */ 60, 263, 194, 250, 194, 118, 194, 263, 112, 195, /* 520 */ 194, 254, 194, 124, 194, 194, 194, 263, 194, 194, - /* 530 */ 194, 194, 130, 263, 194, 255, 132, 129, 194, 253, + /* 530 */ 194, 194, 130, 263, 194, 255, 132, 253, 194, 129, /* 540 */ 252, 194, 128, 194, 194, 194, 194, 194, 123, 251, /* 550 */ 194, 122, 249, 121, 120, 194, 248, 194, 119, 194, /* 560 */ 194, 194, 194, 194, 194, 194, 194, 194, 194, 194, /* 570 */ 194, 194, 194, 194, 194, 194, 194, 194, 194, 194, /* 580 */ 194, 194, 194, 194, 194, 194, 194, 194, 134, 195, /* 590 */ 195, 195, 107, 195, 90, 89, 50, 86, 88, 54, - /* 600 */ 87, 85, 195, 82, 195, 195, 5, 5, 148, 5, - /* 610 */ 195, 195, 148, 201, 201, 5, 5, 94, 93, 137, + /* 600 */ 87, 85, 195, 195, 195, 82, 5, 5, 148, 5, + /* 610 */ 195, 195, 148, 201, 201, 5, 5, 137, 94, 93, /* 620 */ 80, 114, 195, 109, 196, 81, 206, 196, 210, 212, /* 630 */ 211, 209, 208, 207, 196, 205, 195, 214, 195, 202, /* 640 */ 196, 230, 195, 195, 115, 80, 197, 91, 247, 246, @@ -395,18 +395,18 @@ static const unsigned short int yy_shift_ofst[] = { /* 120 */ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, /* 130 */ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, /* 140 */ 9, 9, 9, 9, 450, 450, 450, 397, 397, 397, - /* 150 */ 450, 397, 450, 402, 404, 408, 399, 414, 425, 429, + /* 150 */ 450, 397, 450, 402, 404, 399, 410, 414, 425, 429, /* 160 */ 432, 434, 439, 454, 450, 450, 450, 485, 47, 47, /* 170 */ 450, 450, 504, 506, 546, 511, 510, 545, 513, 516, - /* 180 */ 485, 143, 450, 521, 521, 450, 521, 450, 521, 450, + /* 180 */ 485, 143, 450, 523, 523, 450, 523, 450, 523, 450, /* 190 */ 450, 734, 734, 27, 99, 99, 126, 99, 53, 180, /* 200 */ 283, 283, 283, 283, 193, 269, 273, 327, 327, 327, /* 210 */ 327, 230, 251, 250, 238, 172, 172, 234, 307, 342, /* 220 */ 348, 210, 317, 323, 338, 339, 341, 309, 297, 343, /* 230 */ 354, 355, 357, 358, 346, 359, 360, 70, 171, 406, /* 240 */ 362, 312, 316, 319, 455, 459, 325, 329, 364, 333, - /* 250 */ 400, 601, 460, 602, 604, 464, 610, 611, 523, 525, - /* 260 */ 482, 507, 514, 540, 529, 544, 565, 556, 561, 572, + /* 250 */ 400, 601, 460, 602, 604, 464, 610, 611, 524, 526, + /* 260 */ 480, 507, 514, 540, 529, 544, 565, 556, 561, 572, /* 270 */ 574, 575, 564, 577, 578, 580, 657, 581, 582, 584, /* 280 */ 571, 541, 576, 543, 585, 514, 589, 562, 590, 563, /* 290 */ 596, 665, 670, 671, 672, 673, 674, 597, 666, 606, @@ -435,8 +435,8 @@ static const short yy_reduce_ofst[] = { /* 120 */ 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, /* 130 */ 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, /* 140 */ 390, 391, 392, 393, 324, 394, 395, 248, 254, 264, - /* 150 */ 396, 270, 398, 280, 267, 286, 288, 298, 263, 303, - /* 160 */ 308, 401, 403, 405, 407, 409, 410, 411, 412, 413, + /* 150 */ 396, 270, 398, 280, 267, 284, 288, 298, 263, 303, + /* 160 */ 308, 401, 403, 405, 407, 408, 409, 411, 412, 413, /* 170 */ 415, 416, 417, 419, 418, 420, 422, 426, 424, 430, /* 180 */ 421, 423, 427, 428, 431, 441, 438, 443, 444, 447, /* 190 */ 448, 437, 449, @@ -457,7 +457,7 @@ static const YYACTIONTYPE yy_default[] = { /* 120 */ 877, 832, 875, 832, 832, 832, 832, 832, 832, 832, /* 130 */ 832, 832, 832, 832, 832, 832, 832, 862, 832, 832, /* 140 */ 832, 832, 832, 832, 853, 853, 853, 832, 832, 832, - /* 150 */ 853, 832, 853, 1047, 1051, 1045, 1033, 1041, 1032, 1028, + /* 150 */ 853, 832, 853, 1047, 1051, 1033, 1045, 1041, 1032, 1028, /* 160 */ 1026, 1024, 1023, 1055, 853, 853, 853, 893, 889, 889, /* 170 */ 853, 853, 911, 909, 907, 899, 905, 901, 903, 897, /* 180 */ 880, 832, 853, 887, 887, 853, 887, 853, 887, 853, @@ -1024,8 +1024,8 @@ static const char *const yyTokenName[] = { /* 249 */ "fill_opt", /* 250 */ "sliding_opt", /* 251 */ "groupby_opt", - /* 252 */ "orderby_opt", - /* 253 */ "having_opt", + /* 252 */ "having_opt", + /* 253 */ "orderby_opt", /* 254 */ "slimit_opt", /* 255 */ "limit_opt", /* 256 */ "union", @@ -1210,7 +1210,7 @@ static const char *const yyRuleName[] = { /* 158 */ "tagitem ::= MINUS FLOAT", /* 159 */ "tagitem ::= PLUS INTEGER", /* 160 */ "tagitem ::= PLUS FLOAT", - /* 161 */ "select ::= SELECT selcollist from where_opt interval_opt session_option windowstate_option fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt", + /* 161 */ "select ::= SELECT selcollist from where_opt interval_opt session_option windowstate_option fill_opt sliding_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt", /* 162 */ "select ::= LP select RP", /* 163 */ "union ::= select", /* 164 */ "union ::= union UNION ALL select", @@ -1467,7 +1467,7 @@ tSqlExprListDestroy((yypminor->yy441)); case 239: /* tagNamelist */ case 249: /* fill_opt */ case 251: /* groupby_opt */ - case 252: /* orderby_opt */ + case 253: /* orderby_opt */ case 264: /* sortlist */ case 268: /* grouplist */ { @@ -1492,7 +1492,7 @@ destroyRelationInfo((yypminor->yy244)); } break; case 245: /* where_opt */ - case 253: /* having_opt */ + case 252: /* having_opt */ case 259: /* expr */ case 269: /* expritem */ { @@ -1961,7 +1961,7 @@ static const struct { { 242, -2 }, /* (158) tagitem ::= MINUS FLOAT */ { 242, -2 }, /* (159) tagitem ::= PLUS INTEGER */ { 242, -2 }, /* (160) tagitem ::= PLUS FLOAT */ - { 240, -14 }, /* (161) select ::= SELECT selcollist from where_opt interval_opt session_option windowstate_option fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ + { 240, -14 }, /* (161) select ::= SELECT selcollist from where_opt interval_opt session_option windowstate_option fill_opt sliding_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt */ { 240, -3 }, /* (162) select ::= LP select RP */ { 256, -1 }, /* (163) union ::= select */ { 256, -4 }, /* (164) union ::= union UNION ALL select */ @@ -1998,8 +1998,8 @@ static const struct { { 249, -4 }, /* (195) fill_opt ::= FILL LP ID RP */ { 250, -4 }, /* (196) sliding_opt ::= SLIDING LP tmvar RP */ { 250, 0 }, /* (197) sliding_opt ::= */ - { 252, 0 }, /* (198) orderby_opt ::= */ - { 252, -3 }, /* (199) orderby_opt ::= ORDER BY sortlist */ + { 253, 0 }, /* (198) orderby_opt ::= */ + { 253, -3 }, /* (199) orderby_opt ::= ORDER BY sortlist */ { 264, -4 }, /* (200) sortlist ::= sortlist COMMA item sortorder */ { 264, -2 }, /* (201) sortlist ::= item sortorder */ { 266, -2 }, /* (202) item ::= ids cpxName */ @@ -2010,8 +2010,8 @@ static const struct { { 251, -3 }, /* (207) groupby_opt ::= GROUP BY grouplist */ { 268, -3 }, /* (208) grouplist ::= grouplist COMMA item */ { 268, -1 }, /* (209) grouplist ::= item */ - { 253, 0 }, /* (210) having_opt ::= */ - { 253, -2 }, /* (211) having_opt ::= HAVING expr */ + { 252, 0 }, /* (210) having_opt ::= */ + { 252, -2 }, /* (211) having_opt ::= HAVING expr */ { 255, 0 }, /* (212) limit_opt ::= */ { 255, -2 }, /* (213) limit_opt ::= LIMIT signed */ { 255, -4 }, /* (214) limit_opt ::= LIMIT signed OFFSET signed */ @@ -2677,9 +2677,9 @@ static void yy_reduce( } yymsp[-1].minor.yy506 = yylhsminor.yy506; break; - case 161: /* select ::= SELECT selcollist from where_opt interval_opt session_option windowstate_option fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ + case 161: /* select ::= SELECT selcollist from where_opt interval_opt session_option windowstate_option fill_opt sliding_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt */ { - yylhsminor.yy236 = tSetQuerySqlNode(&yymsp[-13].minor.yy0, yymsp[-12].minor.yy441, yymsp[-11].minor.yy244, yymsp[-10].minor.yy166, yymsp[-4].minor.yy441, yymsp[-3].minor.yy441, &yymsp[-9].minor.yy340, &yymsp[-8].minor.yy259, &yymsp[-7].minor.yy348, &yymsp[-5].minor.yy0, yymsp[-6].minor.yy441, &yymsp[0].minor.yy414, &yymsp[-1].minor.yy414, yymsp[-2].minor.yy166); + yylhsminor.yy236 = tSetQuerySqlNode(&yymsp[-13].minor.yy0, yymsp[-12].minor.yy441, yymsp[-11].minor.yy244, yymsp[-10].minor.yy166, yymsp[-4].minor.yy441, yymsp[-2].minor.yy441, &yymsp[-9].minor.yy340, &yymsp[-8].minor.yy259, &yymsp[-7].minor.yy348, &yymsp[-5].minor.yy0, yymsp[-6].minor.yy441, &yymsp[0].minor.yy414, &yymsp[-1].minor.yy414, yymsp[-3].minor.yy166); } yymsp[-13].minor.yy236 = yylhsminor.yy236; break; diff --git a/tests/script/general/parser/groupby.sim b/tests/script/general/parser/groupby.sim index 124e76e85c..507431f536 100644 --- a/tests/script/general/parser/groupby.sim +++ b/tests/script/general/parser/groupby.sim @@ -654,53 +654,91 @@ if $data31 != @20-03-27 05:10:19.000@ then return -1 endi -#sql select irate(c) from st where t1="1" and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' interval(1m) sliding(15s) group by tbname,t1,t2; -#if $rows != 40 then -# return -1 -#endi -# -#if $data01 != 1.000000000 then -# return -1 -#endi -#if $data02 != t1 then -# return -1 -#endi -#if $data03 != 1 then -# return -1 -#endi -#if $data04 != 1 then -# return -1 -#endi -# -#if $data11 != 1.000000000 then -# return -1 -#endi -#if $data12 != t1 then -# return -1 -#endi -#if $data13 != 1 then -# return -1 -#endi -#if $data14 != 1 then -# return -1 -#endi -# -#sql select irate(c) from st where t1="1" and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' interval(1m) sliding(15s) group by tbname,t1,t2 limit 1; -#if $rows != 2 then -# return -1 -#endi -# -#if $data11 != 1.000000000 then -# return -1 -#endi -#if $data12 != t2 then -# return -1 -#endi -#if $data13 != 1 then -# return -1 -#endi -#if $data14 != 2 then -# return -1 -#endi +print ===============> +sql select stddev(c),c from st where t2=1 or t2=2 group by c; +if $rows != 4 then + return -1 +endi + +if $data00 != 0.000000000 then + return -1 +endi + +if $data01 != 1 then + return -1 +endi + +if $data10 != 0.000000000 then + return -1 +endi + +if $data11 != 2 then + return -1 +endi + +if $data20 != 0.000000000 then + return -1 +endi + +if $data21 != 3 then + return -1 +endi + +if $data30 != 0.000000000 then + return -1 +endi + +if $data31 != 4 then + return -1 +endi + +sql select irate(c) from st where t1="1" and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' interval(1m) sliding(15s) group by tbname,t1,t2; +if $rows != 40 then + return -1 +endi + +if $data01 != 1.000000000 then + return -1 +endi +if $data02 != t1 then + return -1 +endi +if $data03 != 1 then + return -1 +endi +if $data04 != 1 then + return -1 +endi + +if $data11 != 1.000000000 then + return -1 +endi +if $data12 != t1 then + return -1 +endi +if $data13 != 1 then + return -1 +endi +if $data14 != 1 then + return -1 +endi + +sql select irate(c) from st where t1="1" and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' interval(1m) sliding(15s) group by tbname,t1,t2 limit 1; +if $rows != 2 then + return -1 +endi + +if $data11 != 1.000000000 then + return -1 +endi +if $data12 != t2 then + return -1 +endi +if $data13 != 1 then + return -1 +endi +if $data14 != 2 then + return -1 +endi system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/having.sim b/tests/script/general/parser/having.sim index ddafdd7329..a8d2102bef 100644 --- a/tests/script/general/parser/having.sim +++ b/tests/script/general/parser/having.sim @@ -1,6 +1,6 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 0 +system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2 system sh/exec.sh -n dnode1 -s start diff --git a/tests/script/general/parser/testSuite.sim b/tests/script/general/parser/testSuite.sim index 545e19edec..5f71138966 100644 --- a/tests/script/general/parser/testSuite.sim +++ b/tests/script/general/parser/testSuite.sim @@ -63,4 +63,3 @@ run general/parser/between_and.sim run general/parser/last_cache.sim run general/parser/nestquery.sim run general/parser/precision_ns.sim - From 9b4f437d49d581b02335ffb62afa24aa7f414e94 Mon Sep 17 00:00:00 2001 From: zhaoyanggh Date: Mon, 21 Jun 2021 10:27:38 +0800 Subject: [PATCH 09/33] [fix] add jdbc nano second test cases --- .../TimestampPrecisionInNanoInJniTest.java | 54 ++----------------- 1 file changed, 3 insertions(+), 51 deletions(-) diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimestampPrecisionInNanoInJniTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimestampPrecisionInNanoInJniTest.java index d733209f25..68541dbe9d 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimestampPrecisionInNanoInJniTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimestampPrecisionInNanoInJniTest.java @@ -70,6 +70,9 @@ public class TimestampPrecisionInNanoInJniTest { } private void checkCount(long count, ResultSet rs) throws SQLException { + if (count == 0) { + Assert(0); + } rs.next(); long test_count = rs.getLong(1); Assert.assertEquals(count, test_count); @@ -154,9 +157,6 @@ public class TimestampPrecisionInNanoInJniTest { public void canSelectLastRowFromWeatherForFirstCol() { try (Statement stmt = conn.createStatement()) { ResultSet rs = stmt.executeQuery("select last(ts) from " + ns_timestamp_db + ".weather"); - // int nanos = rs.getTimestamp(1).getNanos(); - // Assert.assertEquals(timestamp3 % 1000_000l * 1000, nanos); - // java.lang.AssertionError: expected:<123456000> but was:<468123456> checkTime(timestamp3, rs); } catch (SQLException e) { e.printStackTrace(); @@ -417,18 +417,6 @@ public class TimestampPrecisionInNanoInJniTest { } } - // @Test - // public void canQueryNotEqualToInDateTypeForFirstCol() { - // try (Statement stmt = conn.createStatement()) { - // ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts <> '" + date3 + "'"); - // checkCount(1l, rs); - // rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts <> '" + date3 + "'"); - // checkTime(timestamp2, rs); - // } catch (SQLException e) { - // e.printStackTrace(); - // } - // } - @Test public void canQueryNotEqualToInDateTypeForSecondCol() { try (Statement stmt = conn.createStatement()) { @@ -441,18 +429,6 @@ public class TimestampPrecisionInNanoInJniTest { } } - // @Test - // public void canQueryNotEqualToInNumberTypeForFirstCol() { - // try (Statement stmt = conn.createStatement()) { - // ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts <> '" + timestamp3 + "'"); - // checkCount(1l, rs); - // rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts <> '" + timestamp3 + "'"); - // checkTime(timestamp2, rs); - // } catch (SQLException e) { - // e.printStackTrace(); - // } - // } - @Test public void canQueryNotEqualToInNumberTypeForSecondCol() { try (Statement stmt = conn.createStatement()) { @@ -465,18 +441,6 @@ public class TimestampPrecisionInNanoInJniTest { } } - // @Test - // public void canQueryNotEqualInDateTypeForFirstCol() { - // try (Statement stmt = conn.createStatement()) { - // ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts != '" + date3 + "'"); - // checkCount(1l, rs); - // rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts != '" + date3 + "'"); - // checkTime(timestamp2, rs); - // } catch (SQLException e) { - // e.printStackTrace(); - // } - // } - @Test public void canQueryNotEqualInDateTypeForSecondCol() { try (Statement stmt = conn.createStatement()) { @@ -489,18 +453,6 @@ public class TimestampPrecisionInNanoInJniTest { } } - // @Test - // public void canQueryNotEqualInNumberTypeForFirstCol() { - // try (Statement stmt = conn.createStatement()) { - // ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts != '" + timestamp3 + "'"); - // checkCount(1l, rs); - // rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts != '" + timestamp3 + "'"); - // checkTime(timestamp2, rs); - // } catch (SQLException e) { - // e.printStackTrace(); - // } - // } - @Test public void canQueryNotEqualInNumberTypeForSecondCol() { try (Statement stmt = conn.createStatement()) { From a37da4e731a1dc0bf77ddb5abf53a905b7288747 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 21 Jun 2021 10:37:59 +0800 Subject: [PATCH 10/33] [td-4776]:update the error message. --- src/client/src/tscSQLParser.c | 21 +++++++++------------ src/query/src/qAggMain.c | 3 +-- 2 files changed, 10 insertions(+), 14 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index cf94aaae35..d5496584e9 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -2254,10 +2254,11 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col case TSDB_FUNC_LEASTSQR: { // 1. valid the number of parameters int32_t numOfParams = (pItem->pNode->pParam == NULL)? 0: (int32_t) taosArrayGetSize(pItem->pNode->pParam); + + // no parameters or more than one parameter for function if (pItem->pNode->pParam == NULL || (functionId != TSDB_FUNC_LEASTSQR && functionId != TSDB_FUNC_DERIVATIVE && numOfParams != 1) || ((functionId == TSDB_FUNC_LEASTSQR || functionId == TSDB_FUNC_DERIVATIVE) && numOfParams != 3)) { - /* no parameters or more than one parameter for function */ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } @@ -2271,14 +2272,15 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); } - if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { + pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); + STableComInfo info = tscGetTableInfo(pTableMetaInfo->pTableMeta); + + // functions can not be applied to tags + if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX || (index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta))) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); } // 2. check if sql function can be applied on this column data type - pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); - STableComInfo info = tscGetTableInfo(pTableMetaInfo->pTableMeta); - SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex); if (!IS_NUMERIC_TYPE(pSchema->type)) { @@ -2307,11 +2309,6 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col insertResultField(pQueryInfo, 0, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS_DUMMY].name, pExpr); } - // functions can not be applied to tags - if (index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); - } - SExprInfo* pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd), intermediateResSize, false); if (functionId == TSDB_FUNC_LEASTSQR) { // set the leastsquares parameters @@ -2340,9 +2337,9 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col } if (info.precision == TSDB_TIME_PRECISION_MILLI) { - tickPerSec /= 1000000; + tickPerSec /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MICRO); } else if (info.precision == TSDB_TIME_PRECISION_MICRO) { - tickPerSec /= 1000; + tickPerSec /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MILLI); } if (tickPerSec <= 0 || tickPerSec < TSDB_TICK_PER_SECOND(info.precision)) { diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index 3dff0f5d50..150c58ed4d 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -4166,8 +4166,7 @@ void blockinfo_func_finalizer(SQLFunctionCtx* pCtx) { * function compatible list. * tag and ts are not involved in the compatibility check * - * 1. functions that are not simultaneously present with any other functions. e.g., - * diff/ts_z/top/bottom + * 1. functions that are not simultaneously present with any other functions. e.g., diff/ts_z/top/bottom * 2. functions that are only allowed to be present only with same functions. e.g., last_row, interp * 3. functions that are allowed to be present with other functions. * e.g., count/sum/avg/min/max/stddev/percentile/apercentile/first/last... From 349e14a73fef986e4a872f5ef3da6594b33e8897 Mon Sep 17 00:00:00 2001 From: Yang Zhao Date: Mon, 21 Jun 2021 10:59:45 +0800 Subject: [PATCH 11/33] Update TimestampPrecisionInNanoInJniTest.java --- .../taosdata/jdbc/cases/TimestampPrecisionInNanoInJniTest.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimestampPrecisionInNanoInJniTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimestampPrecisionInNanoInJniTest.java index 68541dbe9d..042d76d576 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimestampPrecisionInNanoInJniTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimestampPrecisionInNanoInJniTest.java @@ -71,7 +71,7 @@ public class TimestampPrecisionInNanoInJniTest { private void checkCount(long count, ResultSet rs) throws SQLException { if (count == 0) { - Assert(0); + Assert.fail(); } rs.next(); long test_count = rs.getLong(1); From 5afd77489b504799748fb04061fada9e8fab0beb Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 21 Jun 2021 11:28:48 +0800 Subject: [PATCH 12/33] [td-225]fix compiler error. --- src/inc/taosdef.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h index dac2dc84b6..365f24e126 100644 --- a/src/inc/taosdef.h +++ b/src/inc/taosdef.h @@ -100,7 +100,7 @@ extern const int32_t TYPE_BYTES[15]; #define TSDB_TIME_PRECISION_MICRO_STR "us" #define TSDB_TIME_PRECISION_NANO_STR "ns" -#define TSDB_TICK_PER_SECOND(precision) ((precision)==TSDB_TIME_PRECISION_MILLI ? 1e3L : ((precision)==TSDB_TIME_PRECISION_MICRO ? 1e6L : 1e9L)) +#define TSDB_TICK_PER_SECOND(precision) ((int64_t)((precision)==TSDB_TIME_PRECISION_MILLI ? 1e3L : ((precision)==TSDB_TIME_PRECISION_MICRO ? 1e6L : 1e9L))) #define T_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) #define T_APPEND_MEMBER(dst, ptr, type, member) \ From 4239fd33c6e5ee5423fca48f16ae365055b2ae00 Mon Sep 17 00:00:00 2001 From: zhaoyanggh Date: Mon, 21 Jun 2021 15:41:45 +0800 Subject: [PATCH 13/33] [TD-4746] add test case for irregular locale setting for java --- .../jdbc/cases/BadLocaleSettingTest.java | 58 +++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BadLocaleSettingTest.java diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BadLocaleSettingTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BadLocaleSettingTest.java new file mode 100644 index 0000000000..b351ee94bb --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BadLocaleSettingTest.java @@ -0,0 +1,58 @@ +package com.taosdata.jdbc.cases; + + +import com.taosdata.jdbc.TSDBDriver; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; +import java.io.IOException; + +import java.sql.*; +import java.util.Properties; + +public class BadLocaleSettingTest { + + private static final String host = "127.0.0.1"; + private static final String dbName = "bad_locale_test"; + private static Connection conn; + + @Test + public void canSetLocale() { + try { + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + + String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"; + conn = DriverManager.getConnection(url, properties); + Statement stmt = conn.createStatement(); + stmt.execute("drop database if exists " + dbName); + stmt.execute("create database if not exists " + dbName); + stmt.execute("use " + dbName); + stmt.execute("drop table if exists weather"); + stmt.execute("create table weather(ts timestamp, temperature float, humidity int)"); + stmt.executeUpdate("insert into weather values(1624071506435, 12.3, 4)"); + stmt.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @BeforeClass + public static void beforeClass() { + System.setProperty("sun.jnu.encoding", "ANSI_X3.4-1968"); + System.setProperty("file.encoding", "ANSI_X3.4-1968"); + } + + @AfterClass + public static void afterClass() { + try { + if (conn != null) + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } +} \ No newline at end of file From 5a9dbf41d46d140e1c62daa107848021d2c7ebaa Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 21 Jun 2021 15:46:15 +0800 Subject: [PATCH 14/33] [td-225]fix compiler error. --- src/query/src/qAggMain.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index 150c58ed4d..5044320cc7 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -3945,7 +3945,7 @@ static void rate_finalizer(SQLFunctionCtx *pCtx) { return; } - *(double*) pCtx->pOutput = do_calc_rate(pRateInfo, TSDB_TICK_PER_SECOND(pCtx->param[0].i64)); + *(double*) pCtx->pOutput = do_calc_rate(pRateInfo, (double) TSDB_TICK_PER_SECOND(pCtx->param[0].i64)); // cannot set the numOfIteratedElems again since it is set during previous iteration pResInfo->numOfRes = 1; From e53055b358aeb441df3adb7369cdd4b72d699929 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Mon, 21 Jun 2021 17:43:34 +0800 Subject: [PATCH 15/33] [TD-4807] : fix typos. --- documentation20/cn/08.connector/docs.md | 2 +- documentation20/en/02.getting-started/docs.md | 8 ++++---- documentation20/en/08.connector/docs.md | 16 ++++++++-------- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md index f26928eec7..2d76c866d1 100644 --- a/documentation20/cn/08.connector/docs.md +++ b/documentation20/cn/08.connector/docs.md @@ -805,7 +805,7 @@ C#连接器支持的系统有:Linux 64/Windows x64/Windows x86 * 应用驱动安装请参考[安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector#driver)。 * .NET接口文件TDengineDrivercs.cs和参考程序示例TDengineTest.cs均位于Windows客户端install_directory/examples/C#目录下。 -* 在Windows系统上,C#应用程序可以使用TDengine的原生C接口来执行所有数据库操作,后续版本将提供ORM(dapper)框架驱动。 +* 在Windows系统上,C#应用程序可以使用TDengine的原生C接口来执行所有数据库操作,后续版本将提供ORM(Dapper)框架驱动。 ### 安装验证 diff --git a/documentation20/en/02.getting-started/docs.md b/documentation20/en/02.getting-started/docs.md index fcfb88a6fe..19af3b5f31 100644 --- a/documentation20/en/02.getting-started/docs.md +++ b/documentation20/en/02.getting-started/docs.md @@ -2,7 +2,7 @@ ## Quick Install -TDegnine software consists of 3 parts: server, client, and alarm module. At the moment, TDengine server only runs on Linux (Windows, mac OS and more OS supports will come soon), but client can run on either Windows or Linux. TDengine client can be installed and run on Windows or Linux. Applications based-on any OSes can all connect to server taosd via a RESTful interface. About CPU, TDegnine supports X64/ARM64/MIPS64/Alpha64, and ARM32、RISC-V, other more CPU architectures will be supported soon. You can set up and install TDengine server either from the [source code](https://www.taosdata.com/en/getting-started/#Install-from-Source) or the [packages](https://www.taosdata.com/en/getting-started/#Install-from-Package). +TDengine software consists of 3 parts: server, client, and alarm module. At the moment, TDengine server only runs on Linux (Windows, mac OS and more OS supports will come soon), but client can run on either Windows or Linux. TDengine client can be installed and run on Windows or Linux. Applications based-on any OSes can all connect to server taosd via a RESTful interface. About CPU, TDengine supports X64/ARM64/MIPS64/Alpha64, and ARM32、RISC-V, other more CPU architectures will be supported soon. You can set up and install TDengine server either from the [source code](https://www.taosdata.com/en/getting-started/#Install-from-Source) or the [packages](https://www.taosdata.com/en/getting-started/#Install-from-Package). ### Install from Source @@ -14,7 +14,7 @@ Please visit our [TDengine Official Docker Image: Distribution, Downloading, and ### Install from Package -It’s extremely easy to install for TDegnine, which takes only a few seconds from downloaded to successful installed. The server installation package includes clients and connectors. We provide 3 installation packages, which you can choose according to actual needs: +It’s extremely easy to install for TDengine, which takes only a few seconds from downloaded to successful installed. The server installation package includes clients and connectors. We provide 3 installation packages, which you can choose according to actual needs: Click [here](https://www.taosdata.com/cn/getting-started/#%E9%80%9A%E8%BF%87%E5%AE%89%E8%A3%85%E5%8C%85%E5%AE%89%E8%A3%85) to download the install package. @@ -39,8 +39,8 @@ If the service is running successfully, you can play around through TDengine she **Note:** - The `systemctl` command needs the **root** privilege. Use **sudo** if you are not the **root** user. -- To get better product feedback and improve our solution, TDegnine will collect basic usage information, but you can modify the configuration parameter **telemetryReporting** in the system configuration file taos.cfg, and set it to 0 to turn it off. -- TDegnine uses FQDN (usually hostname) as the node ID. In order to ensure normal operation, you need to set hostname for the server running taosd, and configure DNS service or hosts file for the machine running client application, to ensure the FQDN can be resolved. +- To get better product feedback and improve our solution, TDengine will collect basic usage information, but you can modify the configuration parameter **telemetryReporting** in the system configuration file taos.cfg, and set it to 0 to turn it off. +- TDengine uses FQDN (usually hostname) as the node ID. In order to ensure normal operation, you need to set hostname for the server running taosd, and configure DNS service or hosts file for the machine running client application, to ensure the FQDN can be resolved. - TDengine supports installation on Linux systems with[ systemd ](https://en.wikipedia.org/wiki/Systemd)as the process service management, and uses `which systemctl` command to detect whether `systemd` packages exist in the system: ```bash diff --git a/documentation20/en/08.connector/docs.md b/documentation20/en/08.connector/docs.md index 36dc06a36e..3da9937f98 100644 --- a/documentation20/en/08.connector/docs.md +++ b/documentation20/en/08.connector/docs.md @@ -6,7 +6,7 @@ TDengine provides many connectors for development, including C/C++, JAVA, Python At present, TDengine connectors support a wide range of platforms, including hardware platforms such as X64/X86/ARM64/ARM32/MIPS/Alpha, and development environments such as Linux/Win64/Win32. The comparison matrix is as follows: -| **CPU** | **X64 64bit** | **X64 64bit** | **X64 64bit** | **X86 32bit** | **ARM64** | **ARM32** | **MIPS Godson** | **Alpha Whenwei** | **X64 TimecomTech** | +| **CPU** | **X64 64bit** | **X64 64bit** | **X64 64bit** | **X86 32bit** | **ARM64** | **ARM32** | **MIPS Godson** | **Alpha Sunway** | **X64 TimecomTech** | | ----------- | ------------- | ------------- | ------------- | ------------- | --------- | --------- | --------------- | ----------------- | ------------------- | | **OS** | **Linux** | **Win64** | **Win32** | **Win32** | **Linux** | **Linux** | **Linux** | **Linux** | **Linux** | | **C/C++** | ● | ● | ● | ○ | ● | ● | ○ | ○ | ○ | @@ -75,9 +75,9 @@ Edit the taos.cfg file (default path/etc/taos/taos.cfg) and change firstEP to En * X64 hardware environment: TDengine-client-2.X.X.X-Windows-x64.exe * X86 hardware environment: TDengine-client-2.X.X.X-Windows-x86.exe -**2. Execute installation, select default vales as prompted to complete** +**2. Execute installation, select default values as prompted to complete** -**3. Installatino path** +**3. Installation path** Default installation path is: C:\TDengine, with following files(directories): @@ -327,7 +327,7 @@ typedef struct TAOS_BIND { } TAOS_BIND; ``` -Add the curren bound parameters to the batch. After calling this function, you can call `taos_stmt_bind_param` again to bind the new parameters. It should be noted that this function only supports insert/import statements, and if it is other SQL statements such as select, it will return errors. +Add the current bound parameters to the batch. After calling this function, you can call `taos_stmt_bind_param` again to bind the new parameters. It should be noted that this function only supports insert/import statements, and if it is other SQL statements such as select, it will return errors. - `int taos_stmt_execute(TAOS_STMT *stmt)` @@ -523,7 +523,7 @@ Users can directly view the usage information of the module through Python's hel Refer to help (taos.TDEngineConnection) in python. This class corresponds to a connection between the client and TDengine. In the scenario of client multithreading, it is recommended that each thread apply for an independent connection instance, but not recommended that multiple threads share a connection. -- *TDegnineCursor* class +- *TDengineCursor* class Refer to help (taos.TDengineCursor) in python. This class corresponds to the write and query operations performed by the client. In the scenario of client multithreading, this cursor instance must be kept exclusive to threads and cannot be used by threads, otherwise errors will occur in the returned results. @@ -685,7 +685,7 @@ Return value: } ``` -- Craete a database demo: +- Create a database demo: ```bash curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'create database demo' 192.168.0.1:6041/rest/sql @@ -771,7 +771,7 @@ The C # connector supports: Linux 64/Windows x64/Windows x86. - For application driver installation, please refer to the[ steps of installing connector driver](https://www.taosdata.com/en/documentation/connector#driver). - . NET interface file TDengineDrivercs.cs and reference sample TDengineTest.cs are both located in the Windows client install_directory/examples/C# directory. -- On Windows, C # applications can use the native C interface of TDengine to perform all database operations, and future versions will provide the ORM (dapper) framework driver. +- On Windows, C # applications can use the native C interface of TDengine to perform all database operations, and future versions will provide the ORM (Dapper) framework driver. ### Installation verification @@ -908,7 +908,7 @@ Use Microsoft [windows-build-tools](https://github.com/felixrieseberg/windows-bu #### Solution 2 -Mannually install the following tools: +Manually install the following tools: - Install Visual Studio related tools: [Visual Studio Build Tools](https://visualstudio.microsoft.com/thank-you-downloading-visual-studio/?sku=BuildTools) or [Visual Studio 2017 Community](https://visualstudio.microsoft.com/pl/thank-you-downloading-visual-studio/?sku=Community) - Install [Python](https://www.python.org/downloads/) 2.7 (not supported in v3.x.x) and execute npm config set python python2.7 From 1dd0c56d876f3c7928c8ec95cfac3e379b0ad2a6 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 21 Jun 2021 17:44:09 +0800 Subject: [PATCH 16/33] [td-225]fix compiler error. --- src/query/src/qAggMain.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index 5044320cc7..85b4cb5e8f 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -2670,10 +2670,6 @@ static void date_col_output_function(SQLFunctionCtx *pCtx) { *(int64_t *)(pCtx->pOutput) = pCtx->startTs; } -static FORCE_INLINE void date_col_output_function_f(SQLFunctionCtx *pCtx, int32_t index) { - date_col_output_function(pCtx); -} - static void col_project_function(SQLFunctionCtx *pCtx) { // the number of output rows should not affect the final number of rows, so set it to be 0 if (pCtx->numOfParams == 2) { From 6a95d859e9dbae356bb4ae18b2cf8b4ab87b6673 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 21 Jun 2021 21:04:55 +0800 Subject: [PATCH 17/33] [TD-4827] --- src/client/src/tscSQLParser.c | 2 +- .../general/parser/select_with_tags.sim | 21 +++++++++++++++++++ 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index d78eb65f2e..08c840c1de 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -8151,7 +8151,7 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS int32_t colType = -1; STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta; if (pCols != NULL && taosArrayGetSize(pCols) > 0) { - SColIndex* idx = taosArrayGet(pCols, 0); + SColIndex* idx = taosArrayGet(pCols, taosArrayGetSize(pCols) - 1); SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, idx->colIndex); if (pSchema != NULL) { colType = pSchema->type; diff --git a/tests/script/general/parser/select_with_tags.sim b/tests/script/general/parser/select_with_tags.sim index 45f6f5c49f..f5c94d2ae6 100644 --- a/tests/script/general/parser/select_with_tags.sim +++ b/tests/script/general/parser/select_with_tags.sim @@ -68,6 +68,27 @@ endw sleep 100 + +#======================= only check first table tag, TD-4827 +sql select count(*) from $mt where t1 in (0) +if $rows != 1 then + return -1 +endi +if $data00 != $rowNum then + return -1; +endi + +$secTag = ' . abc +$secTag = $secTag . 0 +$secTag = $secTag . ' +sql select count(*) from $mt where t2 =$secTag and t1 in (0) +if $rows != 1 then + return -1 +endi +if $data00 != $rowNum then + return -1; +endi + #================================ sql select ts from select_tags_mt0 print $rows From 41c7b062bb43e38e5cb73caca154bc96f07de65d Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 22 Jun 2021 08:29:58 +0800 Subject: [PATCH 18/33] [TD-4098] refactor IN filter --- src/client/src/tscSQLParser.c | 101 ++++++++++++---------------------- src/tsdb/src/tsdbRead.c | 2 +- 2 files changed, 36 insertions(+), 67 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index d78eb65f2e..4d189d0245 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -65,7 +65,6 @@ static char* getAccountId(SSqlObj* pSql); static int convertTimestampStrToInt64(tVariant *pVar, int32_t precision); static bool serializeExprListToVariant(SArray* pList, tVariant **dest, int16_t colType, uint8_t precision); -static int32_t validateParamOfRelationIn(tVariant *pVar, int32_t colType); static bool has(SArray* pFieldList, int32_t startIdx, const char* name); static char* cloneCurrentDBName(SSqlObj* pSql); @@ -156,78 +155,60 @@ bool serializeExprListToVariant(SArray* pList, tVariant **dst, int16_t colType, return ret; } - tSqlExprItem* item = (tSqlExprItem *)taosArrayGet(pList, 0); - int32_t firstTokenType = item->pNode->token.type; - int32_t type = firstTokenType; + tSqlExpr* item = ((tSqlExprItem*)(taosArrayGet(pList, 0)))->pNode; + int32_t firstVarType = item->value.nType; - //nchar to binary and other xxint to bigint - toTSDBType(type); - if (colType != TSDB_DATA_TYPE_TIMESTAMP && !IS_UNSIGNED_NUMERIC_TYPE(colType)) { - if (type != colType && (type != TSDB_DATA_TYPE_BINARY || colType != TSDB_DATA_TYPE_NCHAR)) { - return false; - } - } - type = colType; - SBufferWriter bw = tbufInitWriter( NULL, false); - tbufEnsureCapacity(&bw, 512); + if (colType == TSDB_DATA_TYPE_TIMESTAMP) { + tbufWriteUint32(&bw, TSDB_DATA_TYPE_BIGINT); + } else { + tbufWriteUint32(&bw, colType); + } + tbufWriteInt32(&bw, (int32_t)(pList->size)); - int32_t size = (int32_t)(pList->size); - tbufWriteUint32(&bw, type); - tbufWriteInt32(&bw, size); - - for (int32_t i = 0; i < size; i++) { + for (int32_t i = 0; i < (int32_t)pList->size; i++) { tSqlExpr* pSub = ((tSqlExprItem*)(taosArrayGet(pList, i)))->pNode; + tVariant* var = &pSub->value; // check all the token type in expr list same or not - if (firstTokenType != pSub->token.type) { + if (firstVarType != var->nType) { break; } - toTSDBType(pSub->token.type); - - tVariant var; - tVariantCreate(&var, &pSub->token); - if (type == TSDB_DATA_TYPE_BOOL || IS_SIGNED_NUMERIC_TYPE(type)) { - tbufWriteInt64(&bw, var.i64); - } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) { - // ugly code, refactor later - if (IS_UNSIGNED_NUMERIC_TYPE(pSub->token.type) || IS_SIGNED_NUMERIC_TYPE(pSub->token.type)) { - tbufWriteUint64(&bw, var.i64); + if ((colType == TSDB_DATA_TYPE_BOOL || IS_SIGNED_NUMERIC_TYPE(colType))) { + tbufWriteInt64(&bw, var->i64); + } else if (IS_UNSIGNED_NUMERIC_TYPE(colType)) { + tbufWriteUint64(&bw, var->u64); + } else if (colType == TSDB_DATA_TYPE_DOUBLE || colType == TSDB_DATA_TYPE_FLOAT) { + if (IS_SIGNED_NUMERIC_TYPE(var->nType) || IS_UNSIGNED_NUMERIC_TYPE(var->nType)) { + tbufWriteDouble(&bw, (double)(var->i64)); } else { - tVariantDestroy(&var); - break; + tbufWriteDouble(&bw, var->dKey); } - } - else if (type == TSDB_DATA_TYPE_DOUBLE || type == TSDB_DATA_TYPE_FLOAT) { - tbufWriteDouble(&bw, var.dKey); - } else if (type == TSDB_DATA_TYPE_BINARY){ - tbufWriteBinary(&bw, var.pz, var.nLen); - } else if (type == TSDB_DATA_TYPE_NCHAR) { - char *buf = (char *)calloc(1, (var.nLen + 1)*TSDB_NCHAR_SIZE); - if (tVariantDump(&var, buf, type, false) != TSDB_CODE_SUCCESS) { + } else if (colType == TSDB_DATA_TYPE_BINARY) { + tbufWriteBinary(&bw, var->pz, var->nLen); + } else if (colType == TSDB_DATA_TYPE_NCHAR) { + char *buf = (char *)calloc(1, (var->nLen + 1)*TSDB_NCHAR_SIZE); + if (tVariantDump(var, buf, colType, false) != TSDB_CODE_SUCCESS) { free(buf); - tVariantDestroy(&var); break; } tbufWriteBinary(&bw, buf, twcslen((wchar_t *)buf) * TSDB_NCHAR_SIZE); free(buf); - } else if (type == TSDB_DATA_TYPE_TIMESTAMP) { - if (var.nType == TSDB_DATA_TYPE_BINARY) { - if (convertTimestampStrToInt64(&var, precision) < 0) { - tVariantDestroy(&var); + } else if (colType == TSDB_DATA_TYPE_TIMESTAMP) { + if (var->nType == TSDB_DATA_TYPE_BINARY) { + if (convertTimestampStrToInt64(var, precision) < 0) { break; } - tbufWriteInt64(&bw, var.i64); - } else if (var.nType == TSDB_DATA_TYPE_BIGINT) { - tbufWriteInt64(&bw, var.i64); + tbufWriteInt64(&bw, var->i64); + } else if (var->nType == TSDB_DATA_TYPE_BIGINT) { + tbufWriteInt64(&bw, var->i64); } + } else { + break; } - tVariantDestroy(&var); - - if (i == size - 1) { ret = true;} - } - + if (i == (int32_t)(pList->size - 1)) { ret = true;} + } if (ret == true) { if ((*dst = calloc(1, sizeof(tVariant))) != NULL) { tVariantCreateFromBinary(*dst, tbufGetData(&bw, false), tbufTell(&bw), TSDB_DATA_TYPE_BINARY); @@ -239,13 +220,6 @@ bool serializeExprListToVariant(SArray* pList, tVariant **dst, int16_t colType, return ret; } -static int32_t validateParamOfRelationIn(tVariant *pVar, int32_t colType) { - if (pVar->nType != TSDB_DATA_TYPE_BINARY) { - return -1; - } - SBufferReader br = tbufInitReader(pVar->pz, pVar->nLen, false); - return colType == TSDB_DATA_TYPE_NCHAR ? 0 : (tbufReadUint32(&br) == colType ? 0: -1); -} static uint8_t convertOptr(SStrToken *pToken) { switch (pToken->type) { @@ -3366,11 +3340,6 @@ static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, if (pRight->tokenId != TK_SET || !serializeExprListToVariant(pRight->pParam, &pVal, colType, timePrecision)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg); } - if (validateParamOfRelationIn(pVal, colType) != TSDB_CODE_SUCCESS) { - tVariantDestroy(pVal); - free(pVal); - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg); - } pColumnFilter->pz = (int64_t)calloc(1, pVal->nLen + 1); pColumnFilter->len = pVal->nLen; pColumnFilter->filterstr = 1; @@ -8151,7 +8120,7 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS int32_t colType = -1; STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta; if (pCols != NULL && taosArrayGetSize(pCols) > 0) { - SColIndex* idx = taosArrayGet(pCols, 0); + SColIndex* idx = taosArrayGet(pCols, taosArrayGetSize(pCols) - 1); SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, idx->colIndex); if (pSchema != NULL) { colType = pSchema->type; diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 92edd4d160..92a0d489b3 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -3364,7 +3364,7 @@ static bool tableFilterFp(const void* pNode, void* param) { GET_TYPED_DATA(v, uint64_t, pInfo->sch.type, val); return NULL != taosHashGet((SHashObj *)pInfo->q, (char *)&v, sizeof(v)); } - else if (type == TSDB_DATA_TYPE_DOUBLE || type == TSDB_DATA_TYPE_DOUBLE) { + else if (type == TSDB_DATA_TYPE_DOUBLE || type == TSDB_DATA_TYPE_FLOAT) { double v; GET_TYPED_DATA(v, double, pInfo->sch.type, val); return NULL != taosHashGet((SHashObj *)pInfo->q, (char *)&v, sizeof(v)); From b533fb04d6ef8fcc2c7e3cef2b6bc590a86be838 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 22 Jun 2021 08:48:44 +0800 Subject: [PATCH 19/33] [TD-4098] refactor IN filter --- tests/pytest/insert/in_function.py | 146 ++++++++++++++--------------- 1 file changed, 73 insertions(+), 73 deletions(-) diff --git a/tests/pytest/insert/in_function.py b/tests/pytest/insert/in_function.py index d1fbfd702a..79599de7a7 100644 --- a/tests/pytest/insert/in_function.py +++ b/tests/pytest/insert/in_function.py @@ -621,65 +621,65 @@ class TDTestCase: tdLog.info(cmd1) tdSql.execute(cmd1) - cmd2 = 'select * from in_stable_4 where in_float in (\'888\');' - tdLog.info(cmd2) - tdSql.error(cmd2) - try: - tdSql.execute(cmd2) - tdLog.exit("invalid operation: not supported filter condition") - except Exception as e: - tdLog.info(repr(e)) - tdLog.info("invalid operation: not supported filter condition") + #cmd2 = 'select * from in_stable_4 where in_float in (\'888\');' + #tdLog.info(cmd2) + #tdSql.error(cmd2) + #try: + # tdSql.execute(cmd2) + # tdLog.exit("invalid operation: not supported filter condition") + #except Exception as e: + # tdLog.info(repr(e)) + # tdLog.info("invalid operation: not supported filter condition") - cmd3 = 'select * from in_stable_4 where in_double in (\'66666\');' - tdLog.info(cmd3) - tdSql.error(cmd3) - try: - tdSql.execute(cmd3) - tdLog.exit("invalid operation: not supported filter condition") - except Exception as e: - tdLog.info(repr(e)) - tdLog.info("invalid operation: not supported filter condition") + #cmd3 = 'select * from in_stable_4 where in_double in (\'66666\');' + #tdLog.info(cmd3) + #tdSql.error(cmd3) + #try: + # tdSql.execute(cmd3) + # tdLog.exit("invalid operation: not supported filter condition") + #except Exception as e: + # tdLog.info(repr(e)) + # tdLog.info("invalid operation: not supported filter condition") - cmd4 = 'select * from in_stable_4 where tin_float in (\'666\');' - tdLog.info(cmd4) - tdSql.error(cmd4) - try: - tdSql.execute(cmd4) - tdLog.exit("invalid operation: not supported filter condition") - except Exception as e: - tdLog.info(repr(e)) - tdLog.info("invalid operation: not supported filter condition") + #cmd4 = 'select * from in_stable_4 where tin_float in (\'666\');' + #tdLog.info(cmd4) + #tdSql.error(cmd4) + #try: + # tdSql.execute(cmd4) + # tdLog.exit("invalid operation: not supported filter condition") + #except Exception as e: + # tdLog.info(repr(e)) + # tdLog.info("invalid operation: not supported filter condition") - cmd5 = 'select * from in_stable_4 where tin_double in (\'88888\');' - tdLog.info(cmd5) - tdSql.error(cmd5) - try: - tdSql.execute(cmd5) - tdLog.exit("invalid operation: not supported filter condition") - except Exception as e: - tdLog.info(repr(e)) - tdLog.info("invalid operation: not supported filter condition") + #cmd5 = 'select * from in_stable_4 where tin_double in (\'88888\');' + #tdLog.info(cmd5) + #tdSql.error(cmd5) + #try: + # tdSql.execute(cmd5) + # tdLog.exit("invalid operation: not supported filter condition") + #except Exception as e: + # tdLog.info(repr(e)) + # tdLog.info("invalid operation: not supported filter condition") - cmd6 = 'select * from in_float_double_1 where in_float in (\'888\');' - tdLog.info(cmd6) - tdSql.error(cmd6) - try: - tdSql.execute(cmd6) - tdLog.exit("invalid operation: not supported filter condition") - except Exception as e: - tdLog.info(repr(e)) - tdLog.info("invalid operation: not supported filter condition") - - cmd7 = 'select * from in_float_double_1 where in_double in (\'66666\');' - tdLog.info(cmd7) - tdSql.error(cmd7) - try: - tdSql.execute(cmd7) - tdLog.exit("invalid operation: not supported filter condition") - except Exception as e: - tdLog.info(repr(e)) - tdLog.info("invalid operation: not supported filter condition") + #cmd6 = 'select * from in_float_double_1 where in_float in (\'888\');' + #tdLog.info(cmd6) + #tdSql.error(cmd6) + #try: + # tdSql.execute(cmd6) + # tdLog.exit("invalid operation: not supported filter condition") + #except Exception as e: + # tdLog.info(repr(e)) + # tdLog.info("invalid operation: not supported filter condition") + # + #cmd7 = 'select * from in_float_double_1 where in_double in (\'66666\');' + #tdLog.info(cmd7) + #tdSql.error(cmd7) + #try: + # tdSql.execute(cmd7) + # tdLog.exit("invalid operation: not supported filter condition") + #except Exception as e: + # tdLog.info(repr(e)) + # tdLog.info("invalid operation: not supported filter condition") @@ -698,24 +698,24 @@ class TDTestCase: tdSql.execute(cmd1) cmd2 = 'select * from normal_in_float_double_1 where in_float in (\'888\');' - tdLog.info(cmd2) - tdSql.error(cmd2) - try: - tdSql.execute(cmd2) - tdLog.exit("invalid operation: not supported filter condition") - except Exception as e: - tdLog.info(repr(e)) - tdLog.info("invalid operation: not supported filter condition") - - cmd3 = 'select * from normal_in_float_double_1 where in_double in (\'66666\');' - tdLog.info(cmd3) - tdSql.error(cmd3) - try: - tdSql.execute(cmd3) - tdLog.exit("invalid operation: not supported filter condition") - except Exception as e: - tdLog.info(repr(e)) - tdLog.info("invalid operation: not supported filter condition") + #tdLog.info(cmd2) + #tdSql.error(cmd2) + #try: + # tdSql.execute(cmd2) + # tdLog.exit("invalid operation: not supported filter condition") + #except Exception as e: + # tdLog.info(repr(e)) + # tdLog.info("invalid operation: not supported filter condition") + # + #cmd3 = 'select * from normal_in_float_double_1 where in_double in (\'66666\');' + #tdLog.info(cmd3) + #tdSql.error(cmd3) + #try: + # tdSql.execute(cmd3) + # tdLog.exit("invalid operation: not supported filter condition") + #except Exception as e: + # tdLog.info(repr(e)) + # tdLog.info("invalid operation: not supported filter condition") def stop(self): tdSql.close() From ad651eff54a4d06ef6d3741e45240c8e7c03438a Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 22 Jun 2021 09:29:03 +0800 Subject: [PATCH 20/33] [TD-4098] refactor IN filter --- tests/script/general/parser/where.sim | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/tests/script/general/parser/where.sim b/tests/script/general/parser/where.sim index 00a22eede6..2fe63b9892 100644 --- a/tests/script/general/parser/where.sim +++ b/tests/script/general/parser/where.sim @@ -139,18 +139,18 @@ sql_error select * from $mt where c1 like 1 sql create table wh_mt1 (ts timestamp, c1 smallint, c2 int, c3 bigint, c4 float, c5 double, c6 tinyint, c7 binary(10), c8 nchar(10), c9 bool, c10 timestamp) tags (t1 binary(10), t2 smallint, t3 int, t4 bigint, t5 float, t6 double) sql create table wh_mt1_tb1 using wh_mt1 tags ('tb11', 1, 1, 1, 1, 1) sql insert into wh_mt1_tb1 values (now, 1, 1, 1, 1, 1, 1, 'binary', 'nchar', true, '2019-01-01 00:00:00.000') -sql_error select last(*) from wh_mt1 where c1 in ('1') -sql_error select last(*) from wh_mt1_tb1 where c1 in ('1') -sql_error select last(*) from wh_mt1 where c2 in ('1') -sql_error select last(*) from wh_mt1_tb1 where c2 in ('1') -sql_error select last(*) from wh_mt1 where c3 in ('1') -sql_error select last(*) from wh_mt1_tb1 where c3 in ('1') -sql_error select last(*) from wh_mt1 where c4 in ('1') -sql_error select last(*) from wh_mt1_tb1 where c4 in ('1') -sql_error select last(*) from wh_mt1 where c5 in ('1') -sql_error select last(*) from wh_mt1_tb1 where c5 in ('1') -sql_error select last(*) from wh_mt1 where c6 in ('1') -sql_error select last(*) from wh_mt1_tb1 where c6 in ('1') +#sql_error select last(*) from wh_mt1 where c1 in ('1') +#sql_error select last(*) from wh_mt1_tb1 where c1 in ('1') +#sql_error select last(*) from wh_mt1 where c2 in ('1') +#sql_error select last(*) from wh_mt1_tb1 where c2 in ('1') +#sql_error select last(*) from wh_mt1 where c3 in ('1') +#sql_error select last(*) from wh_mt1_tb1 where c3 in ('1') +#sql_error select last(*) from wh_mt1 where c4 in ('1') +#sql_error select last(*) from wh_mt1_tb1 where c4 in ('1') +#sql_error select last(*) from wh_mt1 where c5 in ('1') +#sql_error select last(*) from wh_mt1_tb1 where c5 in ('1') +#sql_error select last(*) from wh_mt1 where c6 in ('1') +#sql_error select last(*) from wh_mt1_tb1 where c6 in ('1') #sql_error select last(*) from wh_mt1 where c7 in ('binary') #sql_error select last(*) from wh_mt1_tb1 where c7 in ('binary') #sql_error select last(*) from wh_mt1 where c8 in ('nchar') From 86f935d4b09da6614e03f1cf1f67dd6b2aef2818 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 22 Jun 2021 14:48:21 +0800 Subject: [PATCH 21/33] [td-225] --- src/query/src/qAggMain.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index 85b4cb5e8f..8efc4aad4c 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -2675,6 +2675,8 @@ static void col_project_function(SQLFunctionCtx *pCtx) { if (pCtx->numOfParams == 2) { return; } + + // only one row is required. if (pCtx->param[0].i64 == 1) { SET_VAL(pCtx, pCtx->size, 1); } else { @@ -2683,7 +2685,8 @@ static void col_project_function(SQLFunctionCtx *pCtx) { char *pData = GET_INPUT_DATA_LIST(pCtx); if (pCtx->order == TSDB_ORDER_ASC) { - memcpy(pCtx->pOutput, pData, (size_t) pCtx->size * pCtx->inputBytes); + int32_t numOfRows = (pCtx->param[0].i64 == 1)? 1:pCtx->size; + memcpy(pCtx->pOutput, pData, (size_t) numOfRows * pCtx->inputBytes); } else { for(int32_t i = 0; i < pCtx->size; ++i) { memcpy(pCtx->pOutput + (pCtx->size - 1 - i) * pCtx->inputBytes, pData + i * pCtx->inputBytes, From 09650a42308ae30fa02927fb93661a6eca6f00c3 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Tue, 22 Jun 2021 15:48:43 +0800 Subject: [PATCH 22/33] [TD-4836] : emphasize setting firstEp in all dnode in cluster. --- documentation20/cn/10.cluster/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/cn/10.cluster/docs.md b/documentation20/cn/10.cluster/docs.md index 62d709c279..db20ca4edb 100644 --- a/documentation20/cn/10.cluster/docs.md +++ b/documentation20/cn/10.cluster/docs.md @@ -85,7 +85,7 @@ taos> 将后续的数据节点添加到现有集群,具体有以下几步: -1. 按照[《立即开始》](https://www.taosdata.com/cn/documentation/getting-started/)一章的方法在每个物理节点启动taosd; +1. 按照[《立即开始》](https://www.taosdata.com/cn/documentation/getting-started/)一章的方法在每个物理节点启动taosd;(注意:每个物理节点都需要在 taos.cfg 文件中将 firstEP 参数配置为新集群首个节点的 End Point——在本例中是 h1.taos.com:6030) 2. 在第一个数据节点,使用CLI程序taos, 登录进TDengine系统, 执行命令: From 1acccacb242c4941e188619704f9c89b05249d7c Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Tue, 22 Jun 2021 15:54:25 +0800 Subject: [PATCH 23/33] [TD-4837] : "update" parameter can not be changed by "ALTER DATABASE". --- documentation20/cn/11.administrator/docs.md | 2 +- documentation20/cn/12.taos-sql/docs.md | 5 ----- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/documentation20/cn/11.administrator/docs.md b/documentation20/cn/11.administrator/docs.md index ae13a36f76..10951ed1fb 100644 --- a/documentation20/cn/11.administrator/docs.md +++ b/documentation20/cn/11.administrator/docs.md @@ -131,7 +131,7 @@ taosd -C - quorum:多副本环境下指令执行的确认数要求。取值范围:1、2,单位为个,默认值:1。(可通过 alter database 修改) - precision:时间戳精度标识。ms表示毫秒,us表示微秒,默认值:ms。(2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。) - cacheLast:是否在内存中缓存子表的最近数据。0:关闭;1:缓存子表最近一行数据;2:缓存子表每一列的最近的非NULL值;3:同时打开缓存最近行和列功能。默认值:0。(可通过 alter database 修改)(从 2.1.2.0 版本开始此参数支持 0~3 的取值范围,在此之前取值只能是 [0, 1];而 2.0.11.0 之前的版本在 SQL 指令中不支持此参数。)(2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。) -- update:是否允许更新。0:不允许;1:允许。默认值:0。(可通过 alter database 修改) +- update:是否允许更新。0:不允许;1:允许。默认值:0。 对于一个应用场景,可能有多种数据特征的数据并存,最佳的设计是将具有相同数据特征的表放在一个库里,这样一个应用有多个库,而每个库可以配置不同的存储参数,从而保证系统有最优的性能。TDengine允许应用在创建库时指定上述存储参数,如果指定,该参数就将覆盖对应的系统配置参数。举例,有下述SQL: diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index b0268a9ed4..278757b81b 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -139,11 +139,6 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传 ``` FSYNC 参数控制执行 fsync 操作的周期。缺省值为 3000,单位是毫秒,取值范围为 [0, 180000]。如果设置为 0,表示每次写入,立即执行 fsync。该设置项主要用于调节 WAL 参数设为 2 时的系统行为。 - ```mysql - ALTER DATABASE db_name UPDATE 0; - ``` - UPDATE 参数控制是否允许更新数据。缺省值为 0,取值范围为 [0, 1]。0 表示会直接丢弃后写入的相同时间戳的数据;1 表示会使用后写入的数据覆盖已有的相同时间戳的数据。 - **Tips**: 以上所有参数修改后都可以用show databases来确认是否修改成功。另外,从 2.1.3.0 版本开始,修改这些参数后无需重启服务器即可生效。 - **显示系统所有数据库** From 565cc797d25361d04471b68dd4bae260e12c0151 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 22 Jun 2021 16:12:38 +0800 Subject: [PATCH 24/33] [td-225]upate the log. --- src/client/src/tscPrepare.c | 27 +++------------------------ src/client/src/tscUtil.c | 4 ++-- 2 files changed, 5 insertions(+), 26 deletions(-) diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index bac8920d8f..8bb776ffee 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -1156,27 +1156,6 @@ static void insertBatchClean(STscStmt* pStmt) { tfree(pCmd->insertParam.pTableNameList); -/* - STableDataBlocks** p = taosHashIterate(pCmd->insertParam.pTableBlockHashList, NULL); - - STableDataBlocks* pOneTableBlock = *p; - - while (1) { - SSubmitBlk* pBlocks = (SSubmitBlk*) pOneTableBlock->pData; - - pOneTableBlock->size = sizeof(SSubmitBlk); - - pBlocks->numOfRows = 0; - - p = taosHashIterate(pCmd->insertParam.pTableBlockHashList, p); - if (p == NULL) { - break; - } - - pOneTableBlock = *p; - } -*/ - pCmd->insertParam.pDataBlocks = tscDestroyBlockArrayList(pCmd->insertParam.pDataBlocks); pCmd->insertParam.numOfTables = 0; @@ -1499,7 +1478,7 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) { pRes->numOfRows = 1; strtolower(pSql->sqlstr, sql); - tscDebugL("%p SQL: %s", pSql, pSql->sqlstr); + tscDebugL("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr); if (tscIsInsertData(pSql->sqlstr)) { pStmt->isInsert = true; @@ -1604,7 +1583,7 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags if (taosHashGetSize(pCmd->insertParam.pTableBlockHashList) > 0) { SHashObj* hashList = pCmd->insertParam.pTableBlockHashList; pCmd->insertParam.pTableBlockHashList = NULL; - tscResetSqlCmd(pCmd, true); + tscResetSqlCmd(pCmd, false); pCmd->insertParam.pTableBlockHashList = hashList; } @@ -1663,7 +1642,7 @@ int taos_stmt_close(TAOS_STMT* stmt) { } else { if (pStmt->multiTbInsert) { taosHashCleanup(pStmt->mtb.pTableHash); - pStmt->mtb.pTableBlockHashList = tscDestroyBlockHashTable(pStmt->mtb.pTableBlockHashList, true); + pStmt->mtb.pTableBlockHashList = tscDestroyBlockHashTable(pStmt->mtb.pTableBlockHashList, false); taosHashCleanup(pStmt->pSql->cmd.insertParam.pTableBlockHashList); pStmt->pSql->cmd.insertParam.pTableBlockHashList = NULL; taosArrayDestroy(pStmt->mtb.tags); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 83ec886e19..74dbe42eeb 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1332,7 +1332,7 @@ void tscFreeSubobj(SSqlObj* pSql) { tscDebug("0x%"PRIx64" start to free sub SqlObj, numOfSub:%d", pSql->self, pSql->subState.numOfSub); for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) { - tscDebug("0x%"PRIx64" free sub SqlObj:%p, index:%d", pSql->self, pSql->pSubs[i], i); + tscDebug("0x%"PRIx64" free sub SqlObj:0x%"PRIx64", index:%d", pSql->self, pSql->pSubs[i]->self, i); taos_free_result(pSql->pSubs[i]); pSql->pSubs[i] = NULL; } @@ -1784,7 +1784,7 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl tscSortRemoveDataBlockDupRows(pOneTableBlock); char* ekey = (char*)pBlocks->data + pOneTableBlock->rowSize*(pBlocks->numOfRows-1); - tscDebug("0x%"PRIx64" name:%s, name:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pInsertParam->objectId, tNameGetTableName(&pOneTableBlock->tableName), + tscDebug("0x%"PRIx64" name:%s, tid:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pInsertParam->objectId, tNameGetTableName(&pOneTableBlock->tableName), pBlocks->tid, pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->data), GET_INT64_VAL(ekey)); int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta); From c5c3ee4c3769511c9d99090f8119172529690582 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 22 Jun 2021 16:56:30 +0800 Subject: [PATCH 25/33] [td-4802]: fix crash caused by too many vgroups; --- src/mnode/src/mnodeTable.c | 52 ++++++++++++++++++++++++++++++-------- src/query/src/qExecutor.c | 20 ++++++++++----- 2 files changed, 56 insertions(+), 16 deletions(-) diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index 032c6ee94b..ae37f74fa3 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -1740,16 +1740,22 @@ static int32_t mnodeGetSuperTableMeta(SMnodeMsg *pMsg) { return TSDB_CODE_SUCCESS; } -static int32_t calculateVgroupMsgLength(SSTableVgroupMsg* pInfo, int32_t numOfTable) { +static int32_t doGetVgroupInfoLength(char* name) { + SSTableObj *pTable = mnodeGetSuperTable(name); + int32_t len = 0; + if (pTable != NULL && pTable->vgHash != NULL) { + len = (taosHashGetSize(pTable->vgHash) * sizeof(SVgroupMsg) + sizeof(SVgroupsMsg)); + } + + mnodeDecTableRef(pTable); + return len; +} + +static int32_t getVgroupInfoLength(SSTableVgroupMsg* pInfo, int32_t numOfTable) { int32_t contLen = sizeof(SSTableVgroupRspMsg) + 32 * sizeof(SVgroupMsg) + sizeof(SVgroupsMsg); for (int32_t i = 0; i < numOfTable; ++i) { char *stableName = (char *)pInfo + sizeof(SSTableVgroupMsg) + (TSDB_TABLE_FNAME_LEN)*i; - SSTableObj *pTable = mnodeGetSuperTable(stableName); - if (pTable != NULL && pTable->vgHash != NULL) { - contLen += (taosHashGetSize(pTable->vgHash) * sizeof(SVgroupMsg) + sizeof(SVgroupsMsg)); - } - - mnodeDecTableRef(pTable); + contLen += doGetVgroupInfoLength(stableName); } return contLen; @@ -1820,7 +1826,7 @@ static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *pMsg) { int32_t numOfTable = htonl(pInfo->numOfTables); // calculate the required space. - int32_t contLen = calculateVgroupMsgLength(pInfo, numOfTable); + int32_t contLen = getVgroupInfoLength(pInfo, numOfTable); SSTableVgroupRspMsg *pRsp = rpcMallocCont(contLen); if (pRsp == NULL) { return TSDB_CODE_MND_OUT_OF_MEMORY; @@ -2860,6 +2866,27 @@ static void mnodeProcessAlterTableRsp(SRpcMsg *rpcMsg) { } } +static SMultiTableMeta* ensureMsgBufferSpace(SMultiTableMeta *pMultiMeta, SArray* pList, int32_t* totalMallocLen, int32_t numOfVgroupList) { + int32_t len = 0; + for (int32_t i = 0; i < numOfVgroupList; ++i) { + char *name = taosArrayGetP(pList, i); + len += doGetVgroupInfoLength(name); + } + + if (len + pMultiMeta->contLen > (*totalMallocLen)) { + while (len + pMultiMeta->contLen > (*totalMallocLen)) { + (*totalMallocLen) *= 2; + } + + pMultiMeta = rpcReallocCont(pMultiMeta, *totalMallocLen); + if (pMultiMeta == NULL) { + return NULL; + } + } + + return pMultiMeta; +} + static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) { SMultiTableInfoMsg *pInfo = pMsg->rpcMsg.pCont; @@ -2950,8 +2977,6 @@ static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) { } } - char* msg = (char*) pMultiMeta + pMultiMeta->contLen; - // add the additional super table names that needs the vgroup info for(;t < num; ++t) { taosArrayPush(pList, &nameList[t]); @@ -2961,6 +2986,13 @@ static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) { int32_t numOfVgroupList = (int32_t) taosArrayGetSize(pList); pMultiMeta->numOfVgroup = htonl(numOfVgroupList); + pMultiMeta = ensureMsgBufferSpace(pMultiMeta, pList, &totalMallocLen, numOfVgroupList); + if (pMultiMeta == NULL) { + code = TSDB_CODE_MND_OUT_OF_MEMORY; + goto _end; + } + + char* msg = (char*) pMultiMeta + pMultiMeta->contLen; for(int32_t i = 0; i < numOfVgroupList; ++i) { char* name = taosArrayGetP(pList, i); diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 3145a7fc3d..719c1de252 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -1305,9 +1305,16 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn continue; } - if (memcmp(pInfo->prevData, val, bytes) == 0) { - num++; - continue; + if (IS_VAR_DATA_TYPE(type)) { + if(varDataLen(val) == varDataLen(pInfo->prevData) && memcmp(pInfo->prevData, val, varDataLen(val)) == 0) { + num++; + continue; + } + } else { + if (memcmp(pInfo->prevData, val, bytes)) { + num++; + continue; + } } if (pQueryAttr->stableQuery && pQueryAttr->stabledev && (pRuntimeEnv->prevResult != NULL)) { @@ -1416,9 +1423,7 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSWindowOperatorInf } static void setResultRowKey(SResultRow* pResultRow, char* pData, int16_t type) { - int64_t v = -1; - GET_TYPED_DATA(v, int64_t, type, pData); - if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) { + if (IS_VAR_DATA_TYPE(type)) { if (pResultRow->key == NULL) { pResultRow->key = malloc(varDataTLen(pData)); varDataCopy(pResultRow->key, pData); @@ -1426,6 +1431,9 @@ static void setResultRowKey(SResultRow* pResultRow, char* pData, int16_t type) { assert(memcmp(pResultRow->key, pData, varDataTLen(pData)) == 0); } } else { + int64_t v = -1; + GET_TYPED_DATA(v, int64_t, type, pData); + pResultRow->win.skey = v; pResultRow->win.ekey = v; } From 837c44ebfb557260d99cc7b667eefb858a70bfe6 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 22 Jun 2021 17:28:26 +0800 Subject: [PATCH 26/33] [td-225]fix typo --- src/query/src/qExecutor.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 719c1de252..b5960b9e58 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -1311,7 +1311,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn continue; } } else { - if (memcmp(pInfo->prevData, val, bytes)) { + if (memcmp(pInfo->prevData, val, bytes) == 0) { num++; continue; } From aeaee423b6fefd6e321dfb071f9b0d483946f744 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 22 Jun 2021 21:48:39 +0800 Subject: [PATCH 27/33] [TD-4823]: taosdemo getTableName return empty. (#6571) * [TD-4823]: taosdemo getTableName return empty. * fix typo. --- src/kit/taosdemo/taosdemo.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 865224d2c3..33ee2a9bc2 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -6004,6 +6004,12 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { verbosePrint("%s() LN%d: tid=%d seq=%"PRId64" tableName=%s\n", __func__, __LINE__, pThreadInfo->threadID, tableSeq, tableName); + if (0 == strlen(tableName)) { + errorPrint("[%d] %s() LN%d, getTableName return null\n", + pThreadInfo->threadID, __func__, __LINE__); + free(pThreadInfo->buffer); + return NULL; + } int64_t remainderBufLen = maxSqlLen; char *pstr = pThreadInfo->buffer; From 6766c3944851b6a0fd519a803d9e0ca4e444c492 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 22 Jun 2021 21:58:36 +0800 Subject: [PATCH 28/33] [TD-4838]: taosdump binary length bug. (#6581) --- src/kit/taosdump/taosdump.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index 15db83297c..165bbdf990 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -1858,13 +1858,13 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* converStringToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE); //pstr = stpcpy(pstr, tbuf); //*(pstr++) = '\''; - pstr += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf); + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf); break; } case TSDB_DATA_TYPE_NCHAR: { char tbuf[COMMAND_SIZE] = {0}; convertNCharToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE); - pstr += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf); + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf); break; } case TSDB_DATA_TYPE_TIMESTAMP: @@ -1897,7 +1897,8 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* total_sqlstr_len += curr_sqlstr_len; - if ((count >= arguments->data_batch) || (sql_buf_len - total_sqlstr_len < TSDB_MAX_BYTES_PER_ROW)) { + if ((count >= arguments->data_batch) + || (sql_buf_len - total_sqlstr_len < TSDB_MAX_BYTES_PER_ROW)) { fprintf(fp, ";\n"); count = 0; } //else { @@ -1905,6 +1906,8 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* //} } + printf("total_sqlstr_len: %d\n", total_sqlstr_len); + fprintf(fp, "\n"); atomic_add_fetch_64(&totalDumpOutRows, totalRows); From d76117d8304d30b16772e0b1a1341a2b22f22b4d Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 22 Jun 2021 23:47:35 +0800 Subject: [PATCH 29/33] [td-225]fix bug by regression test. --- src/query/src/qExecutor.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index b5960b9e58..9b356d57b8 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -1306,7 +1306,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn } if (IS_VAR_DATA_TYPE(type)) { - if(varDataLen(val) == varDataLen(pInfo->prevData) && memcmp(pInfo->prevData, val, varDataLen(val)) == 0) { + if(varDataLen(val) == varDataLen(pInfo->prevData) && memcmp(varDataVal(pInfo->prevData), varDataVal(val), varDataLen(val)) == 0) { num++; continue; } From ed13f4e541b0a21c814dd697b20aeff2cb158837 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 23 Jun 2021 10:48:58 +0800 Subject: [PATCH 30/33] [td-225] refactor. --- src/query/src/qExecutor.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 9b356d57b8..97a6cf807c 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -1306,7 +1306,8 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn } if (IS_VAR_DATA_TYPE(type)) { - if(varDataLen(val) == varDataLen(pInfo->prevData) && memcmp(varDataVal(pInfo->prevData), varDataVal(val), varDataLen(val)) == 0) { + int32_t len = varDataLen(val); + if(len == varDataLen(pInfo->prevData) && memcmp(varDataVal(pInfo->prevData), varDataVal(val), len) == 0) { num++; continue; } From 6bc71839b14e1d8abeb9f3606a7dcc404a916d58 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 23 Jun 2021 11:17:16 +0800 Subject: [PATCH 31/33] [TD-4098] refactor IN filter --- src/client/src/tscSQLParser.c | 18 ++++- tests/pytest/insert/in_function.py | 110 ++++++++++++++--------------- 2 files changed, 72 insertions(+), 56 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 4d189d0245..4ce2356f8b 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -176,18 +176,32 @@ bool serializeExprListToVariant(SArray* pList, tVariant **dst, int16_t colType, break; } if ((colType == TSDB_DATA_TYPE_BOOL || IS_SIGNED_NUMERIC_TYPE(colType))) { + if (var->nType != TSDB_DATA_TYPE_BOOL && !IS_SIGNED_NUMERIC_TYPE(var->nType)) { + break; + } tbufWriteInt64(&bw, var->i64); } else if (IS_UNSIGNED_NUMERIC_TYPE(colType)) { + if (IS_SIGNED_NUMERIC_TYPE(var->nType) && IS_UNSIGNED_NUMERIC_TYPE(var->nType)) { + break; + } tbufWriteUint64(&bw, var->u64); } else if (colType == TSDB_DATA_TYPE_DOUBLE || colType == TSDB_DATA_TYPE_FLOAT) { if (IS_SIGNED_NUMERIC_TYPE(var->nType) || IS_UNSIGNED_NUMERIC_TYPE(var->nType)) { tbufWriteDouble(&bw, (double)(var->i64)); - } else { + } else if (var->nType == TSDB_DATA_TYPE_DOUBLE || var->nType == TSDB_DATA_TYPE_FLOAT){ tbufWriteDouble(&bw, var->dKey); + } else { + break; } } else if (colType == TSDB_DATA_TYPE_BINARY) { + if (var->nType != TSDB_DATA_TYPE_BINARY) { + break; + } tbufWriteBinary(&bw, var->pz, var->nLen); } else if (colType == TSDB_DATA_TYPE_NCHAR) { + if (var->nType != TSDB_DATA_TYPE_BINARY) { + break; + } char *buf = (char *)calloc(1, (var->nLen + 1)*TSDB_NCHAR_SIZE); if (tVariantDump(var, buf, colType, false) != TSDB_CODE_SUCCESS) { free(buf); @@ -203,6 +217,8 @@ bool serializeExprListToVariant(SArray* pList, tVariant **dst, int16_t colType, tbufWriteInt64(&bw, var->i64); } else if (var->nType == TSDB_DATA_TYPE_BIGINT) { tbufWriteInt64(&bw, var->i64); + } else { + break; } } else { break; diff --git a/tests/pytest/insert/in_function.py b/tests/pytest/insert/in_function.py index 79599de7a7..263c8a78aa 100644 --- a/tests/pytest/insert/in_function.py +++ b/tests/pytest/insert/in_function.py @@ -621,65 +621,65 @@ class TDTestCase: tdLog.info(cmd1) tdSql.execute(cmd1) - #cmd2 = 'select * from in_stable_4 where in_float in (\'888\');' - #tdLog.info(cmd2) - #tdSql.error(cmd2) - #try: - # tdSql.execute(cmd2) - # tdLog.exit("invalid operation: not supported filter condition") - #except Exception as e: - # tdLog.info(repr(e)) - # tdLog.info("invalid operation: not supported filter condition") + cmd2 = 'select * from in_stable_4 where in_float in (\'888\');' + tdLog.info(cmd2) + tdSql.error(cmd2) + try: + tdSql.execute(cmd2) + tdLog.exit("invalid operation: not supported filter condition") + except Exception as e: + tdLog.info(repr(e)) + tdLog.info("invalid operation: not supported filter condition") - #cmd3 = 'select * from in_stable_4 where in_double in (\'66666\');' - #tdLog.info(cmd3) - #tdSql.error(cmd3) - #try: - # tdSql.execute(cmd3) - # tdLog.exit("invalid operation: not supported filter condition") - #except Exception as e: - # tdLog.info(repr(e)) - # tdLog.info("invalid operation: not supported filter condition") + cmd3 = 'select * from in_stable_4 where in_double in (\'66666\');' + tdLog.info(cmd3) + tdSql.error(cmd3) + try: + tdSql.execute(cmd3) + tdLog.exit("invalid operation: not supported filter condition") + except Exception as e: + tdLog.info(repr(e)) + tdLog.info("invalid operation: not supported filter condition") - #cmd4 = 'select * from in_stable_4 where tin_float in (\'666\');' - #tdLog.info(cmd4) - #tdSql.error(cmd4) - #try: - # tdSql.execute(cmd4) - # tdLog.exit("invalid operation: not supported filter condition") - #except Exception as e: - # tdLog.info(repr(e)) - # tdLog.info("invalid operation: not supported filter condition") + cmd4 = 'select * from in_stable_4 where tin_float in (\'666\');' + tdLog.info(cmd4) + tdSql.error(cmd4) + try: + tdSql.execute(cmd4) + tdLog.exit("invalid operation: not supported filter condition") + except Exception as e: + tdLog.info(repr(e)) + tdLog.info("invalid operation: not supported filter condition") - #cmd5 = 'select * from in_stable_4 where tin_double in (\'88888\');' - #tdLog.info(cmd5) - #tdSql.error(cmd5) - #try: - # tdSql.execute(cmd5) - # tdLog.exit("invalid operation: not supported filter condition") - #except Exception as e: - # tdLog.info(repr(e)) - # tdLog.info("invalid operation: not supported filter condition") + cmd5 = 'select * from in_stable_4 where tin_double in (\'88888\');' + tdLog.info(cmd5) + tdSql.error(cmd5) + try: + tdSql.execute(cmd5) + tdLog.exit("invalid operation: not supported filter condition") + except Exception as e: + tdLog.info(repr(e)) + tdLog.info("invalid operation: not supported filter condition") - #cmd6 = 'select * from in_float_double_1 where in_float in (\'888\');' - #tdLog.info(cmd6) - #tdSql.error(cmd6) - #try: - # tdSql.execute(cmd6) - # tdLog.exit("invalid operation: not supported filter condition") - #except Exception as e: - # tdLog.info(repr(e)) - # tdLog.info("invalid operation: not supported filter condition") - # - #cmd7 = 'select * from in_float_double_1 where in_double in (\'66666\');' - #tdLog.info(cmd7) - #tdSql.error(cmd7) - #try: - # tdSql.execute(cmd7) - # tdLog.exit("invalid operation: not supported filter condition") - #except Exception as e: - # tdLog.info(repr(e)) - # tdLog.info("invalid operation: not supported filter condition") + cmd6 = 'select * from in_float_double_1 where in_float in (\'888\');' + tdLog.info(cmd6) + tdSql.error(cmd6) + try: + tdSql.execute(cmd6) + tdLog.exit("invalid operation: not supported filter condition") + except Exception as e: + tdLog.info(repr(e)) + tdLog.info("invalid operation: not supported filter condition") + + cmd7 = 'select * from in_float_double_1 where in_double in (\'66666\');' + tdLog.info(cmd7) + tdSql.error(cmd7) + try: + tdSql.execute(cmd7) + tdLog.exit("invalid operation: not supported filter condition") + except Exception as e: + tdLog.info(repr(e)) + tdLog.info("invalid operation: not supported filter condition") From 48a5f68b71ef2571d590fb98aa2bd92936d41930 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Wed, 23 Jun 2021 14:36:12 +0800 Subject: [PATCH 32/33] [TD-2569] : describe SESSION & STATE window. --- documentation20/cn/00.index/docs.md | 2 +- documentation20/cn/12.taos-sql/docs.md | 53 +++++++++++++++----------- 2 files changed, 31 insertions(+), 24 deletions(-) diff --git a/documentation20/cn/00.index/docs.md b/documentation20/cn/00.index/docs.md index 4c37ce598c..c900cd373d 100644 --- a/documentation20/cn/00.index/docs.md +++ b/documentation20/cn/00.index/docs.md @@ -42,7 +42,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专 * [数据写入](/taos-sql#insert):支持单表单条、多条、多表多条写入,支持历史数据写入 * [数据查询](/taos-sql#select):支持时间段、值过滤、排序、查询结果手动分页等 * [SQL函数](/taos-sql#functions):支持各种聚合函数、选择函数、计算函数,如avg, min, diff等 -* [时间维度聚合](/taos-sql#aggregation):将表中数据按照时间段进行切割后聚合,降维处理 +* [窗口切分聚合](/taos-sql#aggregation):将表中数据按照时间段等方式进行切割后聚合,降维处理 * [边界限制](/taos-sql#limitation):库、表、SQL等边界限制条件 * [错误码](/taos-sql/error-code):TDengine 2.0 错误码以及对应的十进制码 diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index 278757b81b..5904abbbaa 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -476,9 +476,10 @@ Query OK, 1 row(s) in set (0.001091s) SELECT select_expr [, select_expr ...] FROM {tb_name_list} [WHERE where_condition] - [INTERVAL (interval_val [, interval_offset])] - [SLIDING sliding_val] - [FILL fill_val] + [SESSION(ts_col, tol_val)] + [STATE_WINDOW(col)] + [INTERVAL(interval_val [, interval_offset]) [SLIDING sliding_val]] + [FILL(fill_mod_and_val)] [GROUP BY col_list] [ORDER BY col_list { DESC | ASC }] [SLIMIT limit_val [SOFFSET offset_val]] @@ -1284,39 +1285,45 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 Query OK, 3 row(s) in set (0.001046s) ``` -## 时间维度聚合 +## 按窗口切分聚合 -TDengine支持按时间段进行聚合,可以将表中数据按照时间段进行切割后聚合生成结果,比如温度传感器每秒采集一次数据,但需查询每隔10分钟的温度平均值。这个聚合适合于降维(down sample)操作, 语法如下: +TDengine 支持按时间段等窗口切分方式进行聚合结果查询,比如温度传感器每秒采集一次数据,但需查询每隔 10 分钟的温度平均值。这类聚合适合于降维(down sample)操作,语法如下: ```mysql SELECT function_list FROM tb_name [WHERE where_condition] - INTERVAL (interval [, offset]) - [SLIDING sliding] - [FILL ({NONE | VALUE | PREV | NULL | LINEAR | NEXT})] + [SESSION(ts_col, tol_val)] + [STATE_WINDOW(col)] + [INTERVAL(interval [, offset]) [SLIDING sliding]] + [FILL({NONE | VALUE | PREV | NULL | LINEAR | NEXT})] SELECT function_list FROM stb_name [WHERE where_condition] - INTERVAL (interval [, offset]) - [SLIDING sliding] - [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})] + [SESSION(ts_col, tol_val)] + [STATE_WINDOW(col)] + [INTERVAL(interval [, offset]) [SLIDING sliding]] + [FILL({NONE | VALUE | PREV | NULL | LINEAR | NEXT})] [GROUP BY tags] ``` -- 聚合时间段的长度由关键词INTERVAL指定,最短时间间隔10毫秒(10a),并且支持偏移(偏移必须小于间隔)。聚合查询中,能够同时执行的聚合和选择函数仅限于单个输出的函数:count、avg、sum 、stddev、leastsquares、percentile、min、max、first、last,不能使用具有多行输出结果的函数(例如:top、bottom、diff以及四则运算)。 -- WHERE语句可以指定查询的起止时间和其他过滤条件。 -- SLIDING语句用于指定聚合时间段的前向增量。 -- FILL语句指定某一时间区间数据缺失的情况下的填充模式。填充模式包括以下几种: - 1. 不进行填充:NONE(默认填充模式)。 - 2. VALUE填充:固定值填充,此时需要指定填充的数值。例如:FILL(VALUE, 1.23)。 - 3. NULL填充:使用NULL填充数据。例如:FILL(NULL)。 - 4. PREV填充:使用前一个非NULL值填充数据。例如:FILL(PREV)。 - 5. NEXT填充:使用下一个非NULL值填充数据。例如:FILL(NEXT)。 +- 在聚合查询中,function_list 位置允许使用聚合和选择函数,并要求每个函数仅输出单个结果(例如:COUNT、AVG、SUM、STDDEV、LEASTSQUARES、PERCENTILE、MIN、MAX、FIRST、LAST),而不能使用具有多行输出结果的函数(例如:TOP、BOTTOM、DIFF 以及四则运算)。 +- 查询过滤、聚合等操作按照每个切分窗口为独立的单位执行。聚合查询目前支持三种窗口的划分方式: + 1. 时间窗口:聚合时间段的窗口宽度由关键词 INTERVAL 指定,最短时间间隔 10 毫秒(10a);并且支持偏移 offset(偏移必须小于间隔),也即时间窗口划分与“UTC 时刻 0”相比的偏移量。SLIDING 语句用于指定聚合时间段的前向增量,也即每次窗口向前滑动的时长。当 SLIDING 与 INTERVAL 取值相等的时候,滑动窗口即为翻转窗口。 + 2. 状态窗口:使用整数(布尔值)或字符串来标识产生记录时设备的状态量,产生的记录如果具有相同的状态量取值则归属于同一个状态窗口,数值改变后该窗口关闭。状态量所对应的列作为 STAT_WINDOW 语句的参数来指定。 + 3. 会话窗口:时间戳所在的列由 SESSION 语句的 ts_col 参数指定,会话窗口根据相邻两条记录的时间戳差值来确定是否属于同一个会话——如果时间戳差异在 tol_val 以内,则认为记录仍属于同一个窗口;如果时间变化超过 tol_val,则自动开启下一个窗口。 +- WHERE 语句可以指定查询的起止时间和其他过滤条件。 +- FILL 语句指定某一窗口区间数据缺失的情况下的填充模式。填充模式包括以下几种: + 1. 不进行填充:NONE(默认填充模式)。 + 2. VALUE 填充:固定值填充,此时需要指定填充的数值。例如:FILL(VALUE, 1.23)。 + 3. PREV 填充:使用前一个非 NULL 值填充数据。例如:FILL(PREV)。 + 4. NULL 填充:使用 NULL 填充数据。例如:FILL(NULL)。 + 5. LINEAR 填充:根据前后距离最近的非 NULL 值做线性插值填充。例如:FILL(LINEAR)。 + 6. NEXT 填充:使用下一个非 NULL 值填充数据。例如:FILL(NEXT)。 说明: - 1. 使用FILL语句的时候可能生成大量的填充输出,务必指定查询的时间区间。针对每次查询,系统可返回不超过1千万条具有插值的结果。 + 1. 使用 FILL 语句的时候可能生成大量的填充输出,务必指定查询的时间区间。针对每次查询,系统可返回不超过 1 千万条具有插值的结果。 2. 在时间维度聚合中,返回的结果中时间序列严格单调递增。 - 3. 如果查询对象是超级表,则聚合函数会作用于该超级表下满足值过滤条件的所有表的数据。如果查询中没有使用GROUP BY语句,则返回的结果按照时间序列严格单调递增;如果查询中使用了GROUP BY语句分组,则返回结果中每个GROUP内不按照时间序列严格单调递增。 + 3. 如果查询对象是超级表,则聚合函数会作用于该超级表下满足值过滤条件的所有表的数据。如果查询中没有使用 GROUP BY 语句,则返回的结果按照时间序列严格单调递增;如果查询中使用了 GROUP BY 语句分组,则返回结果中每个 GROUP 内不按照时间序列严格单调递增。 时间聚合也常被用于连续查询场景,可以参考文档 [连续查询(Continuous Query)](https://www.taosdata.com/cn/documentation/advanced-features#continuous-query)。 @@ -1326,7 +1333,7 @@ SELECT function_list FROM stb_name CREATE TABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT); ``` -针对智能电表采集的数据,以10分钟为一个阶段,计算过去24小时的电流数据的平均值、最大值、电流的中位数、以及随着时间变化的电流走势拟合直线。如果没有计算值,用前一个非NULL值填充。使用的查询语句如下: +针对智能电表采集的数据,以 10 分钟为一个阶段,计算过去 24 小时的电流数据的平均值、最大值、电流的中位数、以及随着时间变化的电流走势拟合直线。如果没有计算值,用前一个非 NULL 值填充。使用的查询语句如下: ```mysql SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), PERCENTILE(current, 50) FROM meters From c2734a60dc829d50c122614386ac4cc950b1787f Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 23 Jun 2021 20:03:36 +0800 Subject: [PATCH 33/33] [TD-4874]: reset customScore to 0 --- src/mnode/src/mnodeDnode.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/mnode/src/mnodeDnode.c b/src/mnode/src/mnodeDnode.c index a087b076a5..e3feea7d3a 100644 --- a/src/mnode/src/mnodeDnode.c +++ b/src/mnode/src/mnodeDnode.c @@ -101,6 +101,8 @@ static int32_t mnodeDnodeActionInsert(SSdbRow *pRow) { pDnode->offlineReason = TAOS_DN_OFF_STATUS_NOT_RECEIVED; } + pDnode->customScore = 0; + dnodeUpdateEp(pDnode->dnodeId, pDnode->dnodeEp, pDnode->dnodeFqdn, &pDnode->dnodePort); mnodeUpdateDnodeEps(); @@ -1296,4 +1298,4 @@ int32_t mnodeCompactDnodes() { mInfo("end to compact dnodes table..."); return 0; -} \ No newline at end of file +}