diff --git a/Jenkinsfile2 b/Jenkinsfile2 index b074a0ee05..5321dd8f10 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -306,9 +306,9 @@ def pre_test_build_win() { cd %WIN_CONNECTOR_ROOT% python.exe -m pip install --upgrade pip python -m pip uninstall taospy -y - python -m pip install taospy==2.7.13 + python -m pip install taospy==2.7.16 python -m pip uninstall taos-ws-py -y - python -m pip install taos-ws-py==0.3.1 + python -m pip install taos-ws-py==0.3.3 xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32 ''' return 1 diff --git a/docs/en/08-develop/01-connect/index.md b/docs/en/08-develop/01-connect/index.md index ab35f6ad63..f4dbccba24 100644 --- a/docs/en/08-develop/01-connect/index.md +++ b/docs/en/08-develop/01-connect/index.md @@ -90,7 +90,7 @@ If `maven` is used to manage the projects, what needs to be done is only adding com.taosdata.jdbc taos-jdbcdriver - 3.3.2 + 3.3.3 ``` diff --git a/docs/examples/JDBC/JDBCDemo/pom.xml b/docs/examples/JDBC/JDBCDemo/pom.xml index a9b981a026..315b147cce 100644 --- a/docs/examples/JDBC/JDBCDemo/pom.xml +++ b/docs/examples/JDBC/JDBCDemo/pom.xml @@ -19,7 +19,7 @@ com.taosdata.jdbc taos-jdbcdriver - 3.3.2 + 3.3.3 org.locationtech.jts diff --git a/docs/examples/JDBC/connectionPools/pom.xml b/docs/examples/JDBC/connectionPools/pom.xml index a3705e6834..1791bfe8bc 100644 --- a/docs/examples/JDBC/connectionPools/pom.xml +++ b/docs/examples/JDBC/connectionPools/pom.xml @@ -18,7 +18,7 @@ com.taosdata.jdbc taos-jdbcdriver - 3.3.2 + 3.3.3 diff --git a/docs/examples/JDBC/consumer-demo/pom.xml b/docs/examples/JDBC/consumer-demo/pom.xml index 0db41bc33f..dcabfc1249 100644 --- a/docs/examples/JDBC/consumer-demo/pom.xml +++ b/docs/examples/JDBC/consumer-demo/pom.xml @@ -17,7 +17,7 @@ com.taosdata.jdbc taos-jdbcdriver - 3.3.2 + 3.3.3 com.google.guava diff --git a/docs/examples/JDBC/taosdemo/pom.xml b/docs/examples/JDBC/taosdemo/pom.xml index 8e61cbecdf..ffe159ea49 100644 --- a/docs/examples/JDBC/taosdemo/pom.xml +++ b/docs/examples/JDBC/taosdemo/pom.xml @@ -67,7 +67,7 @@ com.taosdata.jdbc taos-jdbcdriver - 3.3.2 + 3.3.3 diff --git a/docs/examples/java/pom.xml b/docs/examples/java/pom.xml index c44be4704d..f23b73177e 100644 --- a/docs/examples/java/pom.xml +++ b/docs/examples/java/pom.xml @@ -22,7 +22,7 @@ com.taosdata.jdbc taos-jdbcdriver - 3.3.2 + 3.3.3 diff --git a/docs/examples/java/src/main/java/com/taos/example/ParameterBindingFullDemo.java b/docs/examples/java/src/main/java/com/taos/example/ParameterBindingFullDemo.java index 5eb0cf0a61..dfb2915037 100644 --- a/docs/examples/java/src/main/java/com/taos/example/ParameterBindingFullDemo.java +++ b/docs/examples/java/src/main/java/com/taos/example/ParameterBindingFullDemo.java @@ -3,10 +3,7 @@ package com.taos.example; import com.taosdata.jdbc.TSDBPreparedStatement; import com.taosdata.jdbc.utils.StringUtils; -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.SQLException; -import java.sql.Statement; +import java.sql.*; import java.util.ArrayList; import java.util.List; import java.util.Random; @@ -16,15 +13,32 @@ public class ParameterBindingFullDemo { private static final String host = "127.0.0.1"; private static final Random random = new Random(System.currentTimeMillis()); - private static final int BINARY_COLUMN_SIZE = 50; + private static final int BINARY_COLUMN_SIZE = 100; private static final String[] schemaList = { - "create table stable1(ts timestamp, f1 tinyint, f2 smallint, f3 int, f4 bigint) tags(t1 tinyint, t2 smallint, t3 int, t4 bigint)", - "create table stable2(ts timestamp, f1 float, f2 double) tags(t1 float, t2 double)", - "create table stable3(ts timestamp, f1 bool) tags(t1 bool)", - "create table stable4(ts timestamp, f1 binary(" + BINARY_COLUMN_SIZE + ")) tags(t1 binary(" + BINARY_COLUMN_SIZE + "))", - "create table stable5(ts timestamp, f1 nchar(" + BINARY_COLUMN_SIZE + ")) tags(t1 nchar(" + BINARY_COLUMN_SIZE + "))", - "create table stable6(ts timestamp, f1 varbinary(" + BINARY_COLUMN_SIZE + ")) tags(t1 varbinary(" + BINARY_COLUMN_SIZE + "))", - "create table stable7(ts timestamp, f1 geometry(" + BINARY_COLUMN_SIZE + ")) tags(t1 geometry(" + BINARY_COLUMN_SIZE + "))", + "drop database if exists example_all_type_stmt", + "CREATE DATABASE IF NOT EXISTS example_all_type_stmt", + "USE example_all_type_stmt", + "CREATE STABLE IF NOT EXISTS stb_json (" + + "ts TIMESTAMP, " + + "int_col INT) " + + "tags (json_tag json)", + "CREATE STABLE IF NOT EXISTS stb (" + + "ts TIMESTAMP, " + + "int_col INT, " + + "double_col DOUBLE, " + + "bool_col BOOL, " + + "binary_col BINARY(100), " + + "nchar_col NCHAR(100), " + + "varbinary_col VARBINARY(100), " + + "geometry_col GEOMETRY(100)) " + + "tags (" + + "int_tag INT, " + + "double_tag DOUBLE, " + + "bool_tag BOOL, " + + "binary_tag BINARY(100), " + + "nchar_tag NCHAR(100), " + + "varbinary_tag VARBINARY(100), " + + "geometry_tag GEOMETRY(100))" }; private static final int numOfSubTable = 10, numOfRow = 10; @@ -34,55 +48,37 @@ public class ParameterBindingFullDemo { try (Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata")) { init(conn); + stmtJsonTag(conn); + stmtAll(conn); - bindInteger(conn); - bindFloat(conn); - bindBoolean(conn); - bindBytes(conn); - bindString(conn); - bindVarbinary(conn); - bindGeometry(conn); - - clean(conn); } catch (SQLException ex) { // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Failed to insert to table meters using stmt, url: " + jdbcUrl + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); + System.out.println("Failed to insert data using stmt, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); throw ex; - } catch (Exception ex){ - System.out.println("Failed to insert to table meters using stmt, url: " + jdbcUrl + "; ErrMessage: " + ex.getMessage()); + } catch (Exception ex) { + System.out.println("Failed to insert data using stmt, ErrMessage: " + ex.getMessage()); throw ex; } } private static void init(Connection conn) throws SQLException { - clean(conn); try (Statement stmt = conn.createStatement()) { - stmt.execute("create database if not exists test_parabind"); - stmt.execute("use test_parabind"); for (int i = 0; i < schemaList.length; i++) { stmt.execute(schemaList[i]); } } } - private static void clean(Connection conn) throws SQLException { - try (Statement stmt = conn.createStatement()) { - stmt.execute("drop database if exists test_parabind"); - } - } - private static void bindInteger(Connection conn) throws SQLException { - String sql = "insert into ? using stable1 tags(?,?,?,?) values(?,?,?,?,?)"; + private static void stmtJsonTag(Connection conn) throws SQLException { + String sql = "INSERT INTO ? using stb_json tags(?) VALUES (?,?)"; try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) { for (int i = 1; i <= numOfSubTable; i++) { // set table name - pstmt.setTableName("t1_" + i); + pstmt.setTableName("ntb_json_" + i); // set tags - pstmt.setTagByte(0, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE)))); - pstmt.setTagShort(1, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE)))); - pstmt.setTagInt(2, random.nextInt(Integer.MAX_VALUE)); - pstmt.setTagLong(3, random.nextLong()); + pstmt.setTagJson(0, "{\"device\":\"device_" + i + "\"}"); // set columns ArrayList tsList = new ArrayList<>(); long current = System.currentTimeMillis(); @@ -90,45 +86,42 @@ public class ParameterBindingFullDemo { tsList.add(current + j); pstmt.setTimestamp(0, tsList); - ArrayList f1List = new ArrayList<>(); + ArrayList f1List = new ArrayList<>(); for (int j = 0; j < numOfRow; j++) - f1List.add(Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE)))); - pstmt.setByte(1, f1List); - - ArrayList f2List = new ArrayList<>(); - for (int j = 0; j < numOfRow; j++) - f2List.add(Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE)))); - pstmt.setShort(2, f2List); - - ArrayList f3List = new ArrayList<>(); - for (int j = 0; j < numOfRow; j++) - f3List.add(random.nextInt(Integer.MAX_VALUE)); - pstmt.setInt(3, f3List); - - ArrayList f4List = new ArrayList<>(); - for (int j = 0; j < numOfRow; j++) - f4List.add(random.nextLong()); - pstmt.setLong(4, f4List); + f1List.add(random.nextInt(Integer.MAX_VALUE)); + pstmt.setInt(1, f1List); // add column pstmt.columnDataAddBatch(); } // execute column pstmt.columnDataExecuteBatch(); + System.out.println("Successfully inserted rows to example_all_type_stmt.ntb_json"); } } - private static void bindFloat(Connection conn) throws SQLException { - String sql = "insert into ? using stable2 tags(?,?) values(?,?,?)"; + private static void stmtAll(Connection conn) throws SQLException { + String sql = "INSERT INTO ? using stb tags(?,?,?,?,?,?,?) VALUES (?,?,?,?,?,?,?,?)"; TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class); for (int i = 1; i <= numOfSubTable; i++) { // set table name - pstmt.setTableName("t2_" + i); + pstmt.setTableName("ntb" + i); // set tags - pstmt.setTagFloat(0, random.nextFloat()); - pstmt.setTagDouble(1, random.nextDouble()); + pstmt.setTagInt(0, i); + pstmt.setTagDouble(1, 1.1); + pstmt.setTagBoolean(2, true); + pstmt.setTagString(3, "binary_value"); + pstmt.setTagNString(4, "nchar_value"); + pstmt.setTagVarbinary(5, new byte[]{(byte) 0x98, (byte) 0xf4, 0x6e}); + pstmt.setTagGeometry(6, new byte[]{ + 0x01, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x59, + 0x40, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x59, 0x40}); + // set columns ArrayList tsList = new ArrayList<>(); long current = System.currentTimeMillis(); @@ -136,190 +129,54 @@ public class ParameterBindingFullDemo { tsList.add(current + j); pstmt.setTimestamp(0, tsList); - ArrayList f1List = new ArrayList<>(); + ArrayList f1List = new ArrayList<>(); for (int j = 0; j < numOfRow; j++) - f1List.add(random.nextFloat()); - pstmt.setFloat(1, f1List); + f1List.add(random.nextInt(Integer.MAX_VALUE)); + pstmt.setInt(1, f1List); ArrayList f2List = new ArrayList<>(); for (int j = 0; j < numOfRow; j++) f2List.add(random.nextDouble()); pstmt.setDouble(2, f2List); + ArrayList f3List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) + f3List.add(true); + pstmt.setBoolean(3, f3List); + + ArrayList f4List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) + f4List.add("binary_value"); + pstmt.setString(4, f4List, BINARY_COLUMN_SIZE); + + ArrayList f5List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) + f5List.add("nchar_value"); + pstmt.setNString(5, f5List, BINARY_COLUMN_SIZE); + + ArrayList f6List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) + f6List.add(new byte[]{(byte) 0x98, (byte) 0xf4, 0x6e}); + pstmt.setVarbinary(6, f6List, BINARY_COLUMN_SIZE); + + ArrayList f7List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) + f7List.add(new byte[]{ + 0x01, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x59, + 0x40, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x59, 0x40}); + pstmt.setGeometry(7, f7List, BINARY_COLUMN_SIZE); + // add column pstmt.columnDataAddBatch(); } // execute pstmt.columnDataExecuteBatch(); + System.out.println("Successfully inserted rows to example_all_type_stmt.ntb"); // close if no try-with-catch statement is used pstmt.close(); } - - private static void bindBoolean(Connection conn) throws SQLException { - String sql = "insert into ? using stable3 tags(?) values(?,?)"; - - try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) { - for (int i = 1; i <= numOfSubTable; i++) { - // set table name - pstmt.setTableName("t3_" + i); - // set tags - pstmt.setTagBoolean(0, random.nextBoolean()); - // set columns - ArrayList tsList = new ArrayList<>(); - long current = System.currentTimeMillis(); - for (int j = 0; j < numOfRow; j++) - tsList.add(current + j); - pstmt.setTimestamp(0, tsList); - - ArrayList f1List = new ArrayList<>(); - for (int j = 0; j < numOfRow; j++) - f1List.add(random.nextBoolean()); - pstmt.setBoolean(1, f1List); - - // add column - pstmt.columnDataAddBatch(); - } - // execute - pstmt.columnDataExecuteBatch(); - } - } - - private static void bindBytes(Connection conn) throws SQLException { - String sql = "insert into ? using stable4 tags(?) values(?,?)"; - - try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) { - - for (int i = 1; i <= numOfSubTable; i++) { - // set table name - pstmt.setTableName("t4_" + i); - // set tags - pstmt.setTagString(0, new String("abc")); - - // set columns - ArrayList tsList = new ArrayList<>(); - long current = System.currentTimeMillis(); - for (int j = 0; j < numOfRow; j++) - tsList.add(current + j); - pstmt.setTimestamp(0, tsList); - - ArrayList f1List = new ArrayList<>(); - for (int j = 0; j < numOfRow; j++) { - f1List.add(new String("abc")); - } - pstmt.setString(1, f1List, BINARY_COLUMN_SIZE); - - // add column - pstmt.columnDataAddBatch(); - } - // execute - pstmt.columnDataExecuteBatch(); - } - } - - private static void bindString(Connection conn) throws SQLException { - String sql = "insert into ? using stable5 tags(?) values(?,?)"; - - try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) { - - for (int i = 1; i <= numOfSubTable; i++) { - // set table name - pstmt.setTableName("t5_" + i); - // set tags - pstmt.setTagNString(0, "California.SanFrancisco"); - - // set columns - ArrayList tsList = new ArrayList<>(); - long current = System.currentTimeMillis(); - for (int j = 0; j < numOfRow; j++) - tsList.add(current + j); - pstmt.setTimestamp(0, tsList); - - ArrayList f1List = new ArrayList<>(); - for (int j = 0; j < numOfRow; j++) { - f1List.add("California.LosAngeles"); - } - pstmt.setNString(1, f1List, BINARY_COLUMN_SIZE); - - // add column - pstmt.columnDataAddBatch(); - } - // execute - pstmt.columnDataExecuteBatch(); - } - } - - private static void bindVarbinary(Connection conn) throws SQLException { - String sql = "insert into ? using stable6 tags(?) values(?,?)"; - - try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) { - - for (int i = 1; i <= numOfSubTable; i++) { - // set table name - pstmt.setTableName("t6_" + i); - // set tags - byte[] bTag = new byte[]{0,2,3,4,5}; - bTag[0] = (byte) i; - pstmt.setTagVarbinary(0, bTag); - - // set columns - ArrayList tsList = new ArrayList<>(); - long current = System.currentTimeMillis(); - for (int j = 0; j < numOfRow; j++) - tsList.add(current + j); - pstmt.setTimestamp(0, tsList); - - ArrayList f1List = new ArrayList<>(); - for (int j = 0; j < numOfRow; j++) { - byte[] v = new byte[]{0,2,3,4,5,6}; - v[0] = (byte)j; - f1List.add(v); - } - pstmt.setVarbinary(1, f1List, BINARY_COLUMN_SIZE); - - // add column - pstmt.columnDataAddBatch(); - } - // execute - pstmt.columnDataExecuteBatch(); - } - } - - private static void bindGeometry(Connection conn) throws SQLException { - String sql = "insert into ? using stable7 tags(?) values(?,?)"; - - try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) { - - byte[] g1 = StringUtils.hexToBytes("0101000000000000000000F03F0000000000000040"); - byte[] g2 = StringUtils.hexToBytes("0102000020E610000002000000000000000000F03F000000000000004000000000000008400000000000001040"); - List listGeo = new ArrayList<>(); - listGeo.add(g1); - listGeo.add(g2); - - for (int i = 1; i <= 2; i++) { - // set table name - pstmt.setTableName("t7_" + i); - // set tags - pstmt.setTagGeometry(0, listGeo.get(i - 1)); - - // set columns - ArrayList tsList = new ArrayList<>(); - long current = System.currentTimeMillis(); - for (int j = 0; j < numOfRow; j++) - tsList.add(current + j); - pstmt.setTimestamp(0, tsList); - - ArrayList f1List = new ArrayList<>(); - for (int j = 0; j < numOfRow; j++) { - f1List.add(listGeo.get(i - 1)); - } - pstmt.setGeometry(1, f1List, BINARY_COLUMN_SIZE); - - // add column - pstmt.columnDataAddBatch(); - } - // execute - pstmt.columnDataExecuteBatch(); - } - } } // ANCHOR_END: para_bind diff --git a/docs/examples/java/src/main/java/com/taos/example/WSParameterBindingFullDemo.java b/docs/examples/java/src/main/java/com/taos/example/WSParameterBindingFullDemo.java index ec94f2ded6..f23fb187f4 100644 --- a/docs/examples/java/src/main/java/com/taos/example/WSParameterBindingFullDemo.java +++ b/docs/examples/java/src/main/java/com/taos/example/WSParameterBindingFullDemo.java @@ -11,11 +11,30 @@ public class WSParameterBindingFullDemo { private static final Random random = new Random(System.currentTimeMillis()); private static final int BINARY_COLUMN_SIZE = 30; private static final String[] schemaList = { - "create table stable1(ts timestamp, f1 tinyint, f2 smallint, f3 int, f4 bigint) tags(t1 tinyint, t2 smallint, t3 int, t4 bigint)", - "create table stable2(ts timestamp, f1 float, f2 double) tags(t1 float, t2 double)", - "create table stable3(ts timestamp, f1 bool) tags(t1 bool)", - "create table stable4(ts timestamp, f1 binary(" + BINARY_COLUMN_SIZE + ")) tags(t1 binary(" + BINARY_COLUMN_SIZE + "))", - "create table stable5(ts timestamp, f1 nchar(" + BINARY_COLUMN_SIZE + ")) tags(t1 nchar(" + BINARY_COLUMN_SIZE + "))" + "drop database if exists example_all_type_stmt", + "CREATE DATABASE IF NOT EXISTS example_all_type_stmt", + "USE example_all_type_stmt", + "CREATE STABLE IF NOT EXISTS stb_json (" + + "ts TIMESTAMP, " + + "int_col INT) " + + "tags (json_tag json)", + "CREATE STABLE IF NOT EXISTS stb (" + + "ts TIMESTAMP, " + + "int_col INT, " + + "double_col DOUBLE, " + + "bool_col BOOL, " + + "binary_col BINARY(100), " + + "nchar_col NCHAR(100), " + + "varbinary_col VARBINARY(100), " + + "geometry_col GEOMETRY(100)) " + + "tags (" + + "int_tag INT, " + + "double_tag DOUBLE, " + + "bool_tag BOOL, " + + "binary_tag BINARY(100), " + + "nchar_tag NCHAR(100), " + + "varbinary_tag VARBINARY(100), " + + "geometry_tag GEOMETRY(100))" }; private static final int numOfSubTable = 10, numOfRow = 10; @@ -27,153 +46,91 @@ public class WSParameterBindingFullDemo { init(conn); - bindInteger(conn); + stmtJsonTag(conn); - bindFloat(conn); - - bindBoolean(conn); - - bindBytes(conn); - - bindString(conn); + stmtAll(conn); } catch (SQLException ex) { // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Failed to insert to table meters using stmt, url: " + jdbcUrl + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); + System.out.println("Failed to insert data using stmt, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); throw ex; - } catch (Exception ex){ - System.out.println("Failed to insert to table meters using stmt, url: " + jdbcUrl + "; ErrMessage: " + ex.getMessage()); + } catch (Exception ex) { + System.out.println("Failed to insert data using stmt, ErrMessage: " + ex.getMessage()); throw ex; } } private static void init(Connection conn) throws SQLException { try (Statement stmt = conn.createStatement()) { - stmt.execute("drop database if exists test_ws_parabind"); - stmt.execute("create database if not exists test_ws_parabind"); - stmt.execute("use test_ws_parabind"); for (int i = 0; i < schemaList.length; i++) { stmt.execute(schemaList[i]); } } } - private static void bindInteger(Connection conn) throws SQLException { - String sql = "insert into ? using stable1 tags(?,?,?,?) values(?,?,?,?,?)"; + private static void stmtJsonTag(Connection conn) throws SQLException { + String sql = "INSERT INTO ? using stb_json tags(?) VALUES (?,?)"; try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) { for (int i = 1; i <= numOfSubTable; i++) { // set table name - pstmt.setTableName("t1_" + i); + pstmt.setTableName("ntb_json_" + i); // set tags - pstmt.setTagByte(1, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE)))); - pstmt.setTagShort(2, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE)))); - pstmt.setTagInt(3, random.nextInt(Integer.MAX_VALUE)); - pstmt.setTagLong(4, random.nextLong()); + pstmt.setTagJson(1, "{\"device\":\"device_" + i + "\"}"); // set columns long current = System.currentTimeMillis(); for (int j = 0; j < numOfRow; j++) { pstmt.setTimestamp(1, new Timestamp(current + j)); - pstmt.setByte(2, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE)))); - pstmt.setShort(3, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE)))); - pstmt.setInt(4, random.nextInt(Integer.MAX_VALUE)); - pstmt.setLong(5, random.nextLong()); + pstmt.setInt(2, j); pstmt.addBatch(); } pstmt.executeBatch(); } + System.out.println("Successfully inserted rows to example_all_type_stmt.ntb_json"); } } - private static void bindFloat(Connection conn) throws SQLException { - String sql = "insert into ? using stable2 tags(?,?) values(?,?,?)"; - - try(TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) { - - for (int i = 1; i <= numOfSubTable; i++) { - // set table name - pstmt.setTableName("t2_" + i); - // set tags - pstmt.setTagFloat(1, random.nextFloat()); - pstmt.setTagDouble(2, random.nextDouble()); - // set columns - long current = System.currentTimeMillis(); - for (int j = 0; j < numOfRow; j++) { - pstmt.setTimestamp(1, new Timestamp(current + j)); - pstmt.setFloat(2, random.nextFloat()); - pstmt.setDouble(3, random.nextDouble()); - pstmt.addBatch(); - } - pstmt.executeBatch(); - } - } - } - - private static void bindBoolean(Connection conn) throws SQLException { - String sql = "insert into ? using stable3 tags(?) values(?,?)"; - - try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) { - for (int i = 1; i <= numOfSubTable; i++) { - // set table name - pstmt.setTableName("t3_" + i); - // set tags - pstmt.setTagBoolean(1, random.nextBoolean()); - // set columns - long current = System.currentTimeMillis(); - for (int j = 0; j < numOfRow; j++) { - pstmt.setTimestamp(1, new Timestamp(current + j)); - pstmt.setBoolean(2, random.nextBoolean()); - pstmt.addBatch(); - } - pstmt.executeBatch(); - } - } - } - - private static void bindBytes(Connection conn) throws SQLException { - String sql = "insert into ? using stable4 tags(?) values(?,?)"; + private static void stmtAll(Connection conn) throws SQLException { + String sql = "INSERT INTO ? using stb tags(?,?,?,?,?,?,?) VALUES (?,?,?,?,?,?,?,?)"; try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) { - for (int i = 1; i <= numOfSubTable; i++) { - // set table name - pstmt.setTableName("t4_" + i); - // set tags - pstmt.setTagString(1, new String("abc")); + // set table name + pstmt.setTableName("ntb"); + // set tags + pstmt.setTagInt(1, 1); + pstmt.setTagDouble(2, 1.1); + pstmt.setTagBoolean(3, true); + pstmt.setTagString(4, "binary_value"); + pstmt.setTagNString(5, "nchar_value"); + pstmt.setTagVarbinary(6, new byte[]{(byte) 0x98, (byte) 0xf4, 0x6e}); + pstmt.setTagGeometry(7, new byte[]{ + 0x01, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x59, + 0x40, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x59, 0x40}); - // set columns - long current = System.currentTimeMillis(); - for (int j = 0; j < numOfRow; j++) { - pstmt.setTimestamp(1, new Timestamp(current + j)); - pstmt.setString(2, "abc"); - pstmt.addBatch(); - } - pstmt.executeBatch(); - } - } - } + long current = System.currentTimeMillis(); - private static void bindString(Connection conn) throws SQLException { - String sql = "insert into ? using stable5 tags(?) values(?,?)"; - try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) { - - for (int i = 1; i <= numOfSubTable; i++) { - // set table name - pstmt.setTableName("t5_" + i); - // set tags - pstmt.setTagNString(1, "California.SanFrancisco"); - - // set columns - long current = System.currentTimeMillis(); - for (int j = 0; j < numOfRow; j++) { - pstmt.setTimestamp(0, new Timestamp(current + j)); - pstmt.setNString(1, "California.SanFrancisco"); - pstmt.addBatch(); - } - pstmt.executeBatch(); - } + pstmt.setTimestamp(1, new Timestamp(current)); + pstmt.setInt(2, 1); + pstmt.setDouble(3, 1.1); + pstmt.setBoolean(4, true); + pstmt.setString(5, "binary_value"); + pstmt.setNString(6, "nchar_value"); + pstmt.setVarbinary(7, new byte[]{(byte) 0x98, (byte) 0xf4, 0x6e}); + pstmt.setGeometry(8, new byte[]{ + 0x01, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x59, + 0x40, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x59, 0x40}); + pstmt.addBatch(); + pstmt.executeBatch(); + System.out.println("Successfully inserted rows to example_all_type_stmt.ntb"); } } } diff --git a/docs/examples/java/src/test/java/com/taos/test/TestAll.java b/docs/examples/java/src/test/java/com/taos/test/TestAll.java index 6a45c5fd5b..e014a3b315 100644 --- a/docs/examples/java/src/test/java/com/taos/test/TestAll.java +++ b/docs/examples/java/src/test/java/com/taos/test/TestAll.java @@ -50,36 +50,68 @@ public class TestAll { } @Test - public void testRestInsert() throws SQLException { - dropDB("power"); - RestInsertExample.main(args); - RestQueryExample.main(args); + public void testWsConnect() throws Exception { + WSConnectExample.main(args); } @Test - public void testStmtInsert() throws SQLException { + public void testBase() throws Exception { + JdbcCreatDBDemo.main(args); + JdbcInsertDataDemo.main(args); + JdbcQueryDemo.main(args); + dropDB("power"); - StmtInsertExample.main(args); } @Test - public void testSubscribe() { + public void testWsSchemaless() throws Exception { + dropDB("power"); + SchemalessWsTest.main(args); + } + @Test + public void testJniSchemaless() throws Exception { + dropDB("power"); + SchemalessJniTest.main(args); + } + + @Test + public void testJniStmtBasic() throws Exception { + dropDB("power"); + ParameterBindingBasicDemo.main(args); + } + + @Test + public void testJniStmtFull() throws Exception { + dropDB("power"); + ParameterBindingFullDemo.main(args); + } + + @Test + public void testWsStmtBasic() throws Exception { + dropDB("power"); + WSParameterBindingBasicDemo.main(args); + } + + @Test + public void testWsStmtFull() throws Exception { + dropDB("power"); + WSParameterBindingFullDemo.main(args); + } + + @Test + public void testConsumer() throws Exception { + dropDB("power"); SubscribeDemo.main(args); } - - @Test - public void testSubscribeOverWebsocket() { - WebsocketSubscribeDemo.main(args); - } - - @Test - public void testSchemaless() throws SQLException { - LineProtocolExample.main(args); - TelnetLineProtocolExample.main(args); - // for json protocol, tags may be double type. but for telnet protocol tag must be nchar type. - // To avoid type mismatch, we delete database test. - dropDB("test"); - JSONProtocolExample.main(args); - } +// @Test +// public void testSubscribeJni() throws SQLException, InterruptedException { +// dropDB("power"); +// ConsumerLoopFull.main(args); +// } +// @Test +// public void testSubscribeWs() throws SQLException, InterruptedException { +// dropDB("power"); +// WsConsumerLoopFull.main(args); +// } } diff --git a/docs/examples/node/websocketexample/all_type_query.js b/docs/examples/node/websocketexample/all_type_query.js new file mode 100644 index 0000000000..266d110d24 --- /dev/null +++ b/docs/examples/node/websocketexample/all_type_query.js @@ -0,0 +1,98 @@ +const taos = require("@tdengine/websocket"); + +let dsn = 'ws://localhost:6041'; +async function json_tag_example() { + let wsSql = null; + try { + let conf = new taos.WSConfig(dsn); + conf.setUser('root'); + conf.setPwd('taosdata'); + wsSql = await taos.sqlConnect(conf); + console.log("Connected to " + dsn + " successfully."); + + // create database + await wsSql.exec('CREATE DATABASE IF NOT EXISTS example_json_tag'); + console.log("Create database example_json_tag successfully."); + + // create table + await wsSql.exec('create table if not exists example_json_tag.stb (ts timestamp, v int) tags(jt json)'); + + console.log("Create stable example_json_tag.stb successfully"); + + let insertQuery = 'INSERT INTO ' + + 'example_json_tag.tb1 USING example_json_tag.stb TAGS(\'{"name":"value"}\') ' + + "values(now, 1) "; + taosResult = await wsSql.exec(insertQuery); + console.log("Successfully inserted " + taosResult.getAffectRows() + " rows to example_json_tag.stb."); + + let sql = 'SELECT ts, v, jt FROM example_json_tag.stb limit 100'; + wsRows = await wsSql.query(sql); + while (await wsRows.next()) { + let row = wsRows.getData(); + console.log('ts: ' + row[0] + ', v: ' + row[1] + ', jt: ' + row[2]); + } + + } catch (err) { + console.error(`Failed to create database example_json_tag or stable stb, ErrCode: ${err.code}, ErrMessage: ${err.message}`); + } finally { + if (wsSql) { + await wsSql.close(); + } + } + +} + +async function all_type_example() { + let wsSql = null; + try { + let conf = new taos.WSConfig(dsn); + conf.setUser('root'); + conf.setPwd('taosdata'); + wsSql = await taos.sqlConnect(conf); + console.log("Connected to " + dsn + " successfully."); + + // create database + await wsSql.exec('CREATE DATABASE IF NOT EXISTS all_type_example'); + console.log("Create database all_type_example successfully."); + + // create table + await wsSql.exec('create table if not exists all_type_example.stb (ts timestamp, ' + + 'int_col INT, double_col DOUBLE, bool_col BOOL, binary_col BINARY(100),' + + 'nchar_col NCHAR(100), varbinary_col VARBINARY(100), geometry_col GEOMETRY(100)) ' + + 'tags(int_tag INT, double_tag DOUBLE, bool_tag BOOL, binary_tag BINARY(100),' + + 'nchar_tag NCHAR(100), varbinary_tag VARBINARY(100), geometry_tag GEOMETRY(100));'); + + console.log("Create stable all_type_example.stb successfully"); + + let insertQuery = "INSERT INTO all_type_example.tb1 using all_type_example.stb " + + "tags(1, 1.1, true, 'binary_value', 'nchar_value', '\\x98f46e', 'POINT(100 100)') " + + "values(now, 1, 1.1, true, 'binary_value', 'nchar_value', '\\x98f46e', 'POINT(100 100)')"; + taosResult = await wsSql.exec(insertQuery); + console.log("Successfully inserted " + taosResult.getAffectRows() + " rows to all_type_example.stb."); + + let sql = 'SELECT * FROM all_type_example.stb limit 100'; + let wsRows = await wsSql.query(sql); + let meta = wsRows.getMeta(); + console.log("wsRow:meta:=>", meta); + while (await wsRows.next()) { + let row = wsRows.getData(); + console.log(row); + } + + } catch (err) { + console.error(`Failed to create database all_type_example or stable stb, ErrCode: ${err.code}, ErrMessage: ${err.message}`); + } finally { + if (wsSql) { + await wsSql.close(); + } + } + +} + +async function test() { + await json_tag_example() + await all_type_example() + taos.destroy(); +} + +test() diff --git a/docs/examples/node/websocketexample/all_type_stmt.js b/docs/examples/node/websocketexample/all_type_stmt.js new file mode 100644 index 0000000000..8a0dcf21e1 --- /dev/null +++ b/docs/examples/node/websocketexample/all_type_stmt.js @@ -0,0 +1,149 @@ +const taos = require("@tdengine/websocket"); + + +let dsn = 'ws://localhost:6041'; +async function json_tag_example() { + let wsSql = null; + try { + let conf = new taos.WSConfig(dsn); + conf.setUser('root'); + conf.setPwd('taosdata'); + wsSql = await taos.sqlConnect(conf); + console.log("Connected to " + dsn + " successfully."); + + // create database + await wsSql.exec('CREATE DATABASE IF NOT EXISTS example_json_tag'); + console.log("Create database example_json_tag successfully."); + + await wsSql.exec('use example_json_tag'); + + // create table + await wsSql.exec('create table if not exists stb (ts timestamp, v int) tags(jt json)'); + + console.log("Create stable example_json_tag.stb successfully"); + + let stmt = await wsSql.stmtInit(); + await stmt.prepare("INSERT INTO ? using stb tags(?) VALUES (?,?)"); + await stmt.setTableName(`tb1`); + let tagParams = stmt.newStmtParam(); + tagParams.setJson(['{"name":"value"}']) + await stmt.setTags(tagParams); + let bindParams = stmt.newStmtParam(); + const currentMillis = new Date().getTime(); + bindParams.setTimestamp([currentMillis]); + bindParams.setInt([1]); + await stmt.bind(bindParams); + await stmt.batch(); + await stmt.exec(); + await stmt.close(); + + let sql = 'SELECT ts, v, jt FROM example_json_tag.stb limit 100'; + wsRows = await wsSql.query(sql); + while (await wsRows.next()) { + let row = wsRows.getData(); + console.log('ts: ' + row[0] + ', v: ' + row[1] + ', jt: ' + row[2]); + } + + } catch (err) { + console.error(`Failed to create database example_json_tag or stable stb, ErrCode: ${err.code}, ErrMessage: ${err.message}`); + } finally { + if (wsSql) { + await wsSql.close(); + } + } + +} + +async function all_type_example() { + let wsSql = null; + let stmt = null; + try { + let conf = new taos.WSConfig(dsn); + conf.setUser('root'); + conf.setPwd('taosdata'); + wsSql = await taos.sqlConnect(conf); + console.log("Connected to " + dsn + " successfully."); + + // create database + await wsSql.exec('CREATE DATABASE IF NOT EXISTS all_type_example'); + console.log("Create database all_type_example successfully."); + + await wsSql.exec('use all_type_example'); + + // create table + await wsSql.exec('create table if not exists stb (ts timestamp, ' + + 'int_col INT, double_col DOUBLE, bool_col BOOL, binary_col BINARY(100),' + + 'nchar_col NCHAR(100), varbinary_col VARBINARY(100), geometry_col GEOMETRY(100)) ' + + 'tags(int_tag INT, double_tag DOUBLE, bool_tag BOOL, binary_tag BINARY(100),' + + 'nchar_tag NCHAR(100), varbinary_tag VARBINARY(100), geometry_tag GEOMETRY(100));'); + + console.log("Create stable all_type_example.stb successfully"); + + let geometryData = new Uint8Array([0x01,0x01,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x59,0x40,0x00,0x00,0x00,0x00,0x00,0x00,0x59,0x40,]).buffer; + + const encoder = new TextEncoder(); + let vbData = encoder.encode(`Hello, world!`).buffer; + + stmt = await wsSql.stmtInit(); + await stmt.prepare("INSERT INTO ? using stb tags(?,?,?,?,?,?,?) VALUES (?,?,?,?,?,?,?,?)"); + await stmt.setTableName(`tb1`); + let tagParams = stmt.newStmtParam(); + tagParams.setInt([1]); + tagParams.setDouble([1.1]); + tagParams.setBoolean([true]); + tagParams.setVarchar(["hello"]); + tagParams.setNchar(["stmt"]); + tagParams.setGeometry([geometryData]); + tagParams.setVarBinary([vbData]); + await stmt.setTags(tagParams); + + + let bindParams = stmt.newStmtParam(); + const currentMillis = new Date().getTime(); + bindParams.setTimestamp([currentMillis]); + bindParams.setInt([1]); + bindParams.setDouble([1.1]); + bindParams.setBoolean([true]); + bindParams.setVarchar(["hello"]); + bindParams.setNchar(["stmt"]); + bindParams.setGeometry([geometryData]); + bindParams.setVarBinary([vbData]); + + await stmt.bind(bindParams); + await stmt.batch(); + await stmt.exec(); + + let sql = 'SELECT * FROM all_type_example.stb limit 100'; + let wsRows = await wsSql.query(sql); + let meta = wsRows.getMeta(); + console.log("wsRow:meta:=>", meta); + while (await wsRows.next()) { + let row = wsRows.getData(); + console.log(row); + } + + } catch (err) { + console.error(`Failed to create database all_type_example or stable stb, ErrCode: ${err.code}, ErrMessage: ${err.message}`); + } finally { + if (stmt) { + await stmt.close(); + } + if (wsSql) { + await wsSql.close(); + } + } + +} + + + +async function test() { + taos.setLevel("debug") + await json_tag_example() + await all_type_example() + taos.destroy(); +} + +test() + diff --git a/docs/examples/node/websocketexample/sql_example.js b/docs/examples/node/websocketexample/sql_example.js index 5dc8ba6021..8eb8af989d 100644 --- a/docs/examples/node/websocketexample/sql_example.js +++ b/docs/examples/node/websocketexample/sql_example.js @@ -24,13 +24,18 @@ async function createConnect() { async function createDbAndTable() { let wsSql = null; try { - wsSql = await createConnect(); + let conf = new taos.WSConfig(dsn); + conf.setUser('root'); + conf.setPwd('taosdata'); + conf.setDb('power'); + wsSql = await taos.sqlConnect(conf); + console.log("Connected to " + dsn + " successfully."); // create database await wsSql.exec('CREATE DATABASE IF NOT EXISTS power'); console.log("Create database power successfully."); // create table await wsSql.exec('CREATE STABLE IF NOT EXISTS power.meters ' + - '(_ts timestamp, current float, voltage int, phase float) ' + + '(ts timestamp, current float, voltage int, phase float) ' + 'TAGS (location binary(64), groupId int);'); console.log("Create stable power.meters successfully"); diff --git a/docs/examples/rust/nativeexample/Cargo.toml b/docs/examples/rust/nativeexample/Cargo.toml index 13e68d6d9d..041ca4f617 100644 --- a/docs/examples/rust/nativeexample/Cargo.toml +++ b/docs/examples/rust/nativeexample/Cargo.toml @@ -8,6 +8,7 @@ edition = "2021" anyhow = "1" chrono = "0.4" serde = { version = "1", features = ["derive"] } +serde_json = "1.0" tokio = { version = "1", features = ["rt", "macros", "rt-multi-thread"] } log = "0.4" pretty_env_logger = "0.5.0" diff --git a/docs/examples/rust/nativeexample/examples/stmt_all.rs b/docs/examples/rust/nativeexample/examples/stmt_all.rs new file mode 100644 index 0000000000..6560d8a0ab --- /dev/null +++ b/docs/examples/rust/nativeexample/examples/stmt_all.rs @@ -0,0 +1,121 @@ +use taos::*; +use taos_query::util::hex::hex_string_to_bytes; + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let dsn = "taos://"; + let taos = TaosBuilder::from_dsn(dsn)?.build().await?; + + taos.exec("DROP DATABASE IF EXISTS example_all_type_stmt") + .await?; + taos.create_database("example_all_type_stmt").await?; + taos.use_database("example_all_type_stmt").await?; + taos.exec( + r#" + CREATE STABLE IF NOT EXISTS stb ( + ts TIMESTAMP, + int_col INT, + double_col DOUBLE, + bool_col BOOL, + binary_col BINARY(100), + nchar_col NCHAR(100), + varbinary_col VARBINARY(100), + geometry_col GEOMETRY(100)) + TAGS ( + int_tag INT, + double_tag DOUBLE, + bool_tag BOOL, + binary_tag BINARY(100), + nchar_tag NCHAR(100)) + "#, + ) + .await?; + + let mut stmt = Stmt::init(&taos).await?; + stmt.prepare("INSERT INTO ? using stb tags(?,?,?,?,?) VALUES (?,?,?,?,?,?,?,?)") + .await?; + + const NUM_TABLES: usize = 10; + const NUM_ROWS: usize = 10; + for i in 0..NUM_TABLES { + let table_name = format!("d_bind_{}", i); + let tags = vec![ + Value::Int(i as i32), + Value::Double(1.1), + Value::Bool(true), + Value::VarChar("binary_value".into()), + Value::NChar("nchar_value".into()), + // Value::VarBinary(vec![0x98, 0xf4, 0x6e].into()), + // Value::Geometry( + // vec![ + // 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x59, 0x40, + // 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x59, 0x40, + // ] + // .into(), + // ), + ]; + + // set table name and tags for the prepared statement. + match stmt.set_tbname_tags(&table_name, &tags).await { + Ok(_) => {} + Err(err) => { + eprintln!( + "Failed to set table name and tags, table_name:{}, tags:{:?}, ErrMessage: {}", + table_name, tags, err + ); + return Err(err.into()); + } + } + for j in 0..NUM_ROWS { + let values = vec![ + ColumnView::from_millis_timestamp(vec![1648432611249 + j as i64]), + ColumnView::from_ints(vec![j as i32]), + ColumnView::from_doubles(vec![1.1]), + ColumnView::from_bools(vec![true]), + ColumnView::from_varchar(vec!["ABC"]), + ColumnView::from_nchar(vec!["涛思数据"]), + ColumnView::from_bytes(vec![hex_string_to_bytes("123456").to_vec()]), + ColumnView::from_geobytes(vec![hex_string_to_bytes( + "0101000000000000000000F03F0000000000000040", + ) + .to_vec()]), + ]; + // bind values to the prepared statement. + match stmt.bind(&values).await { + Ok(_) => {} + Err(err) => { + eprintln!( + "Failed to bind values, values:{:?}, ErrMessage: {}", + values, err + ); + return Err(err.into()); + } + } + } + + match stmt.add_batch().await { + Ok(_) => {} + Err(err) => { + eprintln!("Failed to add batch, ErrMessage: {}", err); + return Err(err.into()); + } + } + } + + // execute. + match stmt.execute().await { + Ok(affected_rows) => println!( + "Successfully inserted {} rows to example_all_type_stmt.stb.", + affected_rows + ), + Err(err) => { + eprintln!( + "Failed to insert to table stb using stmt, ErrMessage: {}", + err + ); + return Err(err.into()); + } + } + + Ok(()) +} diff --git a/docs/examples/rust/nativeexample/examples/stmt_json_tag.rs b/docs/examples/rust/nativeexample/examples/stmt_json_tag.rs new file mode 100644 index 0000000000..7c1b26a0f5 --- /dev/null +++ b/docs/examples/rust/nativeexample/examples/stmt_json_tag.rs @@ -0,0 +1,94 @@ +use serde_json::json; +use taos::*; + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let dsn = "taos://"; + let taos = TaosBuilder::from_dsn(dsn)?.build().await?; + + taos.exec("DROP DATABASE IF EXISTS example_all_type_stmt") + .await?; + taos.create_database("example_all_type_stmt").await?; + taos.use_database("example_all_type_stmt").await?; + taos.exec( + r#" +CREATE STABLE IF NOT EXISTS stb_json ( + ts TIMESTAMP, + int_col INT) +TAGS ( + json_tag JSON) +"#, + ) + .await?; + + let mut stmt = Stmt::init(&taos).await?; + stmt.prepare("INSERT INTO ? using stb_json tags(?) VALUES (?,?)") + .await?; + + const NUM_TABLES: usize = 1; + const NUM_ROWS: usize = 1; + for i in 0..NUM_TABLES { + let table_name = format!("d_bind_{}", i); + let json_value: serde_json::Value = json!({ + "name": "value" + }); + + dbg!(json_value.to_string()); + + let tags = vec![Value::Json(json_value)]; + + // set table name and tags for the prepared statement. + match stmt.set_tbname_tags(&table_name, &tags).await { + Ok(_) => {} + Err(err) => { + eprintln!( + "Failed to set table name and tags, table_name:{}, tags:{:?}, ErrMessage: {}", + table_name, tags, err + ); + return Err(err.into()); + } + } + for j in 0..NUM_ROWS { + let values = vec![ + ColumnView::from_millis_timestamp(vec![1648432611249 + j as i64]), + ColumnView::from_ints(vec![j as i32]), + ]; + // bind values to the prepared statement. + match stmt.bind(&values).await { + Ok(_) => {} + Err(err) => { + eprintln!( + "Failed to bind values, values:{:?}, ErrMessage: {}", + values, err + ); + return Err(err.into()); + } + } + } + + match stmt.add_batch().await { + Ok(_) => {} + Err(err) => { + eprintln!("Failed to add batch, ErrMessage: {}", err); + return Err(err.into()); + } + } + } + + // execute. + match stmt.execute().await { + Ok(affected_rows) => println!( + "Successfully inserted {} rows to example_all_type_stmt.stb_json.", + affected_rows + ), + Err(err) => { + eprintln!( + "Failed to insert to table stb_json using stmt, ErrMessage: {}", + err + ); + return Err(err.into()); + } + } + + Ok(()) +} diff --git a/docs/examples/rust/restexample/examples/stmt_all.rs b/docs/examples/rust/restexample/examples/stmt_all.rs new file mode 100644 index 0000000000..07ab658bad --- /dev/null +++ b/docs/examples/rust/restexample/examples/stmt_all.rs @@ -0,0 +1,121 @@ +use taos::*; +use taos_query::util::hex::hex_string_to_bytes; + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let dsn = "ws://"; + let taos = TaosBuilder::from_dsn(dsn)?.build().await?; + + taos.exec("DROP DATABASE IF EXISTS example_all_type_stmt") + .await?; + taos.create_database("example_all_type_stmt").await?; + taos.use_database("example_all_type_stmt").await?; + taos.exec( + r#" + CREATE STABLE IF NOT EXISTS stb ( + ts TIMESTAMP, + int_col INT, + double_col DOUBLE, + bool_col BOOL, + binary_col BINARY(100), + nchar_col NCHAR(100), + varbinary_col VARBINARY(100), + geometry_col GEOMETRY(100)) + TAGS ( + int_tag INT, + double_tag DOUBLE, + bool_tag BOOL, + binary_tag BINARY(100), + nchar_tag NCHAR(100)) + "#, + ) + .await?; + + let mut stmt = Stmt::init(&taos).await?; + stmt.prepare("INSERT INTO ? using stb tags(?,?,?,?,?) VALUES (?,?,?,?,?,?,?,?)") + .await?; + + const NUM_TABLES: usize = 10; + const NUM_ROWS: usize = 10; + for i in 0..NUM_TABLES { + let table_name = format!("d_bind_{}", i); + let tags = vec![ + Value::Int(i as i32), + Value::Double(1.1), + Value::Bool(true), + Value::VarChar("binary_value".into()), + Value::NChar("nchar_value".into()), + // Value::VarBinary(vec![0x98, 0xf4, 0x6e].into()), + // Value::Geometry( + // vec![ + // 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x59, 0x40, + // 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x59, 0x40, + // ] + // .into(), + // ), + ]; + + // set table name and tags for the prepared statement. + match stmt.set_tbname_tags(&table_name, &tags).await { + Ok(_) => {} + Err(err) => { + eprintln!( + "Failed to set table name and tags, table_name:{}, tags:{:?}, ErrMessage: {}", + table_name, tags, err + ); + return Err(err.into()); + } + } + for j in 0..NUM_ROWS { + let values = vec![ + ColumnView::from_millis_timestamp(vec![1648432611249 + j as i64]), + ColumnView::from_ints(vec![j as i32]), + ColumnView::from_doubles(vec![1.1]), + ColumnView::from_bools(vec![true]), + ColumnView::from_varchar(vec!["ABC"]), + ColumnView::from_nchar(vec!["涛思数据"]), + ColumnView::from_bytes(vec![hex_string_to_bytes("123456").to_vec()]), + ColumnView::from_geobytes(vec![hex_string_to_bytes( + "0101000000000000000000F03F0000000000000040", + ) + .to_vec()]), + ]; + // bind values to the prepared statement. + match stmt.bind(&values).await { + Ok(_) => {} + Err(err) => { + eprintln!( + "Failed to bind values, values:{:?}, ErrMessage: {}", + values, err + ); + return Err(err.into()); + } + } + } + + match stmt.add_batch().await { + Ok(_) => {} + Err(err) => { + eprintln!("Failed to add batch, ErrMessage: {}", err); + return Err(err.into()); + } + } + } + + // execute. + match stmt.execute().await { + Ok(affected_rows) => println!( + "Successfully inserted {} rows to example_all_type_stmt.stb.", + affected_rows + ), + Err(err) => { + eprintln!( + "Failed to insert to table stb using stmt, ErrMessage: {}", + err + ); + return Err(err.into()); + } + } + + Ok(()) +} diff --git a/docs/zh/03-intro.md b/docs/zh/03-intro.md index 0167f9323b..4207ab4eb6 100644 --- a/docs/zh/03-intro.md +++ b/docs/zh/03-intro.md @@ -14,9 +14,9 @@ TDengine 是一个高性能、分布式的时序数据库。通过集成的缓 TDengine OSS 是一个开源的高性能时序数据库,与其他时序数据库相比,它的核心优势在于其集群开源、高性能和云原生架构。而且除了基础的写入、查询和存储功能外,TDengine OSS 还集成了缓存、流式计算和数据订阅等高级功能,这些功能显著简化了系统设计,降低了企业的研发和运营成本。 -在 TDengine OSS 的基础上,企业版 TDengine Enterprise 提供了增强的辅助功能,包括数据的备份恢复、异地容灾、多级存储、视图、权限控制、安全加密、IP 白名单、支持 MQTT、OPC-UA、OPC-DA、PI、Wonderware、Kafka 等各种数据源。这些功能为企业提供了更为全面、安全、可靠和高效的时序数据管理解决方案。 +在 TDengine OSS 的基础上,企业版 TDengine Enterprise 提供了增强的辅助功能,包括数据的备份恢复、异地容灾、多级存储、视图、权限控制、安全加密、IP 白名单、支持 MQTT、OPC-UA、OPC-DA、PI、Wonderware、Kafka 等各种数据源。这些功能为企业提供了更为全面、安全、可靠和高效的时序数据管理解决方案。更多的细节请看 [TDengine Enterprise](https://www.taosdata.com/tdengine-pro) -此外,TDengine Cloud 作为一种全托管的云服务,存储与计算分离,分开计费,为企业提供了企业级的工具和服务,彻底解决了运维难题,尤其适合中小规模的用户使用。 +此外,TDengine Cloud 作为一种全托管的云服务,存储与计算分离,分开计费,为企业提供了企业级的工具和服务,彻底解决了运维难题,尤其适合中小规模的用户使用。更多的细节请看[TDengine 云服务](https://cloud.taosdata.com/?utm_source=menu&utm_medium=webcn) ## TDengine 主要功能与特性 @@ -135,9 +135,3 @@ TDengine 经过特别优化,以适应时间序列数据的独特需求,引 - [TDengine 与 InfluxDB、OpenTSDB、Cassandra、MySQL、ClickHouse 等数据库的对比测试报告](https://www.taosdata.com/downloads/TDengine_Testing_Report_cn.pdf) -## 主要产品 - -TDengine 有两个主要产品:TDengine Enterprise (即 TDengine 企业版)和 TDengine Cloud,关于它们的具体定义请参考 -- [TDengine 企业版](https://www.taosdata.com/tdengine-pro) -- [TDengine 云服务](https://cloud.taosdata.com/?utm_source=menu&utm_medium=webcn) - diff --git a/docs/zh/04-get-started/01-docker.md b/docs/zh/04-get-started/01-docker.md index 1d425fed6b..cadde10e0c 100644 --- a/docs/zh/04-get-started/01-docker.md +++ b/docs/zh/04-get-started/01-docker.md @@ -90,7 +90,7 @@ taosBenchmark 提供了丰富的选项,允许用户自定义测试参数,如 taosBenchmark --help ``` -有关taosBenchmark 的详细使用方法,请参考[taosBenchmark 参考手册](../../reference/components/taosbenchmark) +有关taosBenchmark 的详细使用方法,请参考[taosBenchmark 参考手册](../../reference/tools/taosbenchmark) ### 体验查询 diff --git a/docs/zh/04-get-started/03-package.md b/docs/zh/04-get-started/03-package.md index de479fb06e..2a1f594b4f 100644 --- a/docs/zh/04-get-started/03-package.md +++ b/docs/zh/04-get-started/03-package.md @@ -263,7 +263,7 @@ SELECT * FROM t; Query OK, 2 row(s) in set (0.003128s) ``` -除执行 SQL 语句外,系统管理员还可以从 TDengine CLI 进行检查系统运行状态、添加删除用户账号等操作。TDengine CLI 连同应用驱动也可以独立安装在机器上运行,更多细节请参考 [TDengine 命令行](../../reference/components/taos-cli/)。 +除执行 SQL 语句外,系统管理员还可以从 TDengine CLI 进行检查系统运行状态、添加删除用户账号等操作。TDengine CLI 连同应用驱动也可以独立安装在机器上运行,更多细节请参考 [TDengine 命令行](../../reference/tools/taos-cli/)。 ## 快速体验 @@ -286,7 +286,7 @@ taosBenchmark 提供了丰富的选项,允许用户自定义测试参数,如 taosBenchmark --help ``` -有关taosBenchmark 的详细使用方法,请参考[taosBenchmark 参考手册](../../reference/components/taosbenchmark) +有关taosBenchmark 的详细使用方法,请参考[taosBenchmark 参考手册](../../reference/tools/taosbenchmark) ### 体验查询 diff --git a/docs/zh/06-advanced/05-data-in/07-mqtt.md b/docs/zh/06-advanced/05-data-in/07-mqtt.md index eb6753b7b0..af99cd3621 100644 --- a/docs/zh/06-advanced/05-data-in/07-mqtt.md +++ b/docs/zh/06-advanced/05-data-in/07-mqtt.md @@ -72,15 +72,30 @@ TDengine 可以通过 MQTT 连接器从 MQTT 代理订阅数据并将其写入 T taosX 可以使用 JSON 提取器解析数据,并允许用户在数据库中指定数据模型,包括,指定表名称和超级表名,设置普通列和标签列等。 - #### 6.1 解析 + 有三种获取示例数据的方法: 点击 **从服务器检索** 按钮,从 MQTT 获取示例数据。 点击 **文件上传** 按钮,上传 CSV 文件,获取示例数据。 -在 **消息体** 中填写 MQTT 消息体中的示例数据,例如:`{"id": 1, "message": "hello-word"}{"id": 2, "message": "hello-word"}`。之后会使用这条示例数据来配置提取和过滤条件。 +在 **消息体** 中填写 MQTT 消息体中的示例数据。 + +json 数据支持 JSONObject 或者 JSONArray,使用 json 解析器可以解析一下数据: + +``` json +{"id": 1, "message": "hello-word"} +{"id": 2, "message": "hello-word"} +``` + +或者 + +``` json +[{"id": 1, "message": "hello-word"},{"id": 2, "message": "hello-word"}] +``` + +解析结果如下所示: ![mqtt-06.png](./mqtt-06.png) diff --git a/docs/zh/06-advanced/05-data-in/08-kafka.md b/docs/zh/06-advanced/05-data-in/08-kafka.md index 92bfc031ec..e05c205f6e 100644 --- a/docs/zh/06-advanced/05-data-in/08-kafka.md +++ b/docs/zh/06-advanced/05-data-in/08-kafka.md @@ -80,13 +80,29 @@ TDengine 可以高效地从 Kafka 读取数据并将其写入 TDengine,以实 在 **Payload 解析** 区域填写 Payload 解析相关的配置参数。 #### 7.1 解析 + 有三种获取示例数据的方法: 点击 **从服务器检索** 按钮,从 Kafka 获取示例数据。 点击 **文件上传** 按钮,上传 CSV 文件,获取示例数据。 -在 **消息体** 中填写 Kafka 消息体中的示例数据,例如:`{"id": 1, "message": "hello-word"}{"id": 2, "message": "hello-word"}`。之后会使用这条示例数据来配置提取和过滤条件。 +在 **消息体** 中填写 Kafka 消息体中的示例数据。 + +json 数据支持 JSONObject 或者 JSONArray,使用 json 解析器可以解析一下数据: + +``` json +{"id": 1, "message": "hello-word"} +{"id": 2, "message": "hello-word"} +``` + +或者 + +``` json +[{"id": 1, "message": "hello-word"},{"id": 2, "message": "hello-word"}] +``` + +解析结果如下所示: ![kafka-07.png](./kafka-07.png) diff --git a/docs/zh/06-advanced/05-data-in/09-influxdb.md b/docs/zh/06-advanced/05-data-in/09-influxdb.md index 3be24c8d5f..d0b781667d 100644 --- a/docs/zh/06-advanced/05-data-in/09-influxdb.md +++ b/docs/zh/06-advanced/05-data-in/09-influxdb.md @@ -17,7 +17,7 @@ InfluxDB 是一种流行的开源时间序列数据库,它针对处理大量 在数据写入页面中点击左上角的 **+新增数据源** 按钮进入新增数据源页面,如下图所示: -![InfluxDB-01zh-EnterDataSourcePage.png](./pic/InfluxDB-01zh-EnterDataSourcePage.png "进入新增数据源页面") +![Common-zh00-EnterDataSourcePage.png](./pic/Common-zh00-EnterDataSourcePage.png "进入新增数据源页面") ### 2. 配置基本信息 diff --git a/docs/zh/06-advanced/05-data-in/10-opentsdb.md b/docs/zh/06-advanced/05-data-in/10-opentsdb.md index e7f804f86f..3737f2a415 100644 --- a/docs/zh/06-advanced/05-data-in/10-opentsdb.md +++ b/docs/zh/06-advanced/05-data-in/10-opentsdb.md @@ -17,7 +17,7 @@ OpenTSDB 是一个架构在 HBase 系统之上的实时监控信息收集和展 在数据写入页面中点击左上角的 **+新增数据源** 按钮进入新增数据源页面,如下图所示: -![OpenTSDB-01zh-EnterDataSourcePage.png](./pic/OpenTSDB-01zh-EnterDataSourcePage.png "进入新增数据源页面") +![Common-zh00-EnterDataSourcePage.png](./pic/Common-zh00-EnterDataSourcePage.png "进入新增数据源页面") ### 2. 配置基本信息 diff --git a/docs/zh/06-advanced/05-data-in/13-mysql.md b/docs/zh/06-advanced/05-data-in/13-mysql.md index 14b015d67a..4cc84fbfa2 100644 --- a/docs/zh/06-advanced/05-data-in/13-mysql.md +++ b/docs/zh/06-advanced/05-data-in/13-mysql.md @@ -15,7 +15,7 @@ MySQL 是最流行的关系型数据库之一。很多系统都曾经或正在 在数据写入页面中点击左上角的 **+新增数据源** 按钮进入新增数据源页面,如下图所示: -![InfluxDB-01zh-EnterDataSourcePage.png](./pic/InfluxDB-01zh-EnterDataSourcePage.png "进入新增数据源页面") +![Common-zh00-EnterDataSourcePage.png](./pic/Common-zh00-EnterDataSourcePage.png "进入新增数据源页面") ### 2. 配置基本信息 @@ -56,11 +56,19 @@ MySQL 是最流行的关系型数据库之一。很多系统都曾经或正在 ### 6. 配置 SQL 查询 +**子表字段** 用于拆分子表的字段,它是一条 select distinct 的 SQL 语句,查询指定字段组合的非重复项,通常与 transform 中的 tag 相对应: +> 此项配置主要为了解决数据迁移乱序问题,需要结合**SQL 模板**共同使用,否则不能达到预期效果,使用示例如下: +> 1. 子表字段填写语句 `select distinct col_name1, col_name2 from table`,它表示使用源表中的字段 col_name1 与 col_name2 拆分目标超级表的子表 +> 2. 在**SQL 模板**中添加子表字段占位符,例如 `select * from table where ts >= ${start} and ts < ${end} and ${col_name1} and ${col_name2}` 中的 `${col_name1} and ${col_name2}` 部分 +> 3. 在 **transform** 中配置 `col_name1` 与 `col_name2` 两个 tag 映射 + **SQL 模板** 用于查询的 SQL 语句模板,SQL 语句中必须包含时间范围条件,且开始时间和结束时间必须成对出现。SQL 语句模板中定义的时间范围由源数据库中的某个代表时间的列和下面定义的占位符组成。 > SQL使用不同的占位符表示不同的时间格式要求,具体有以下占位符格式: > 1. `${start}`、`${end}`:表示 RFC3339 格式时间戳,如: 2024-03-14T08:00:00+0800 > 2. `${start_no_tz}`、`${end_no_tz}`: 表示不带时区的 RFC3339 字符串:2024-03-14T08:00:00 > 3. `${start_date}`、`${end_date}`:表示仅日期,如:2024-03-14 +> +> 为了解决迁移数据乱序的问题,应在查询语句中添加排序条件,例如 `order by ts asc`。 **起始时间** 迁移数据的起始时间,此项为必填字段。 diff --git a/docs/zh/06-advanced/05-data-in/14-postgres.md b/docs/zh/06-advanced/05-data-in/14-postgres.md index cf86ab6ee3..af8297bfff 100644 --- a/docs/zh/06-advanced/05-data-in/14-postgres.md +++ b/docs/zh/06-advanced/05-data-in/14-postgres.md @@ -17,7 +17,7 @@ TDengine 可以高效地从 PostgreSQL 读取数据并将其写入 TDengine, 在数据写入页面中点击左上角的 **+新增数据源** 按钮进入新增数据源页面,如下图所示: -![PostgreSQL-01zh-EnterDataSourcePage.png](./pic/PostgreSQL-01zh-EnterDataSourcePage.png "进入新增数据源页面") +![Common-zh00-EnterDataSourcePage.png](./pic/Common-zh00-EnterDataSourcePage.png "进入新增数据源页面") ### 2. 配置基本信息 @@ -57,11 +57,19 @@ TDengine 可以高效地从 PostgreSQL 读取数据并将其写入 TDengine, ### 6. 配置 SQL 查询 +**子表字段** 用于拆分子表的字段,它是一条 select distinct 的 SQL 语句,查询指定字段组合的非重复项,通常与 transform 中的 tag 相对应: +> 此项配置主要为了解决数据迁移乱序问题,需要结合**SQL 模板**共同使用,否则不能达到预期效果,使用示例如下: +> 1. 子表字段填写语句 `select distinct col_name1, col_name2 from table`,它表示使用源表中的字段 col_name1 与 col_name2 拆分目标超级表的子表 +> 2. 在**SQL 模板**中添加子表字段占位符,例如 `select * from table where ts >= ${start} and ts < ${end} and ${col_name1} and ${col_name2}` 中的 `${col_name1} and ${col_name2}` 部分 +> 3. 在 **transform** 中配置 `col_name1` 与 `col_name2` 两个 tag 映射 + **SQL 模板** 用于查询的 SQL 语句模板,SQL 语句中必须包含时间范围条件,且开始时间和结束时间必须成对出现。SQL 语句模板中定义的时间范围由源数据库中的某个代表时间的列和下面定义的占位符组成。 > SQL使用不同的占位符表示不同的时间格式要求,具体有以下占位符格式: > 1. `${start}`、`${end}`:表示 RFC3339 格式时间戳,如: 2024-03-14T08:00:00+0800 > 2. `${start_no_tz}`、`${end_no_tz}`: 表示不带时区的 RFC3339 字符串:2024-03-14T08:00:00 > 3. `${start_date}`、`${end_date}`:表示仅日期,如:2024-03-14 +> +> 为了解决迁移数据乱序的问题,应在查询语句中添加排序条件,例如 `order by ts asc`。 **起始时间** 迁移数据的起始时间,此项为必填字段。 diff --git a/docs/zh/06-advanced/05-data-in/15-oracle.md b/docs/zh/06-advanced/05-data-in/15-oracle.md index c99f6aae47..39bbab32d3 100644 --- a/docs/zh/06-advanced/05-data-in/15-oracle.md +++ b/docs/zh/06-advanced/05-data-in/15-oracle.md @@ -17,7 +17,7 @@ TDengine 可以高效地从 Oracle 读取数据并将其写入 TDengine,以实 在数据写入页面中点击左上角的 **+新增数据源** 按钮进入新增数据源页面,如下图所示: -![Oracle-01zh-EnterDataSourcePage.png](./pic/Oracle-01zh-EnterDataSourcePage.png "进入新增数据源页面") +![Common-zh00-EnterDataSourcePage.png](./pic/Common-zh00-EnterDataSourcePage.png "进入新增数据源页面") ### 2. 配置基本信息 @@ -49,11 +49,19 @@ TDengine 可以高效地从 Oracle 读取数据并将其写入 TDengine,以实 ### 5. 配置 SQL 查询 +**子表字段** 用于拆分子表的字段,它是一条 select distinct 的 SQL 语句,查询指定字段组合的非重复项,通常与 transform 中的 tag 相对应: +> 此项配置主要为了解决数据迁移乱序问题,需要结合**SQL 模板**共同使用,否则不能达到预期效果,使用示例如下: +> 1. 子表字段填写语句 `select distinct col_name1, col_name2 from table`,它表示使用源表中的字段 col_name1 与 col_name2 拆分目标超级表的子表 +> 2. 在**SQL 模板**中添加子表字段占位符,例如 `select * from table where ts >= ${start} and ts < ${end} and ${col_name1} and ${col_name2}` 中的 `${col_name1} and ${col_name2}` 部分 +> 3. 在 **transform** 中配置 `col_name1` 与 `col_name2` 两个 tag 映射 + **SQL 模板** 用于查询的 SQL 语句模板,SQL 语句中必须包含时间范围条件,且开始时间和结束时间必须成对出现。SQL 语句模板中定义的时间范围由源数据库中的某个代表时间的列和下面定义的占位符组成。 > SQL使用不同的占位符表示不同的时间格式要求,具体有以下占位符格式: > 1. `${start}`、`${end}`:表示 RFC3339 格式时间戳,如: 2024-03-14T08:00:00+0800 > 2. `${start_no_tz}`、`${end_no_tz}`: 表示不带时区的 RFC3339 字符串:2024-03-14T08:00:00 > 3. `${start_date}`、`${end_date}`:表示仅日期,但 Oracle 中没有纯日期类型,所以它会带零时零分零秒,如:2024-03-14 00:00:00,所以使用 `date <= ${end_date}` 时需要注意,它不能包含 2024-03-14 当天数据 +> +> 为了解决迁移数据乱序的问题,应在查询语句中添加排序条件,例如 `order by ts asc`。 **起始时间** 迁移数据的起始时间,此项为必填字段。 diff --git a/docs/zh/06-advanced/05-data-in/16-mssql.md b/docs/zh/06-advanced/05-data-in/16-mssql.md index ba5451434d..81e9e98013 100644 --- a/docs/zh/06-advanced/05-data-in/16-mssql.md +++ b/docs/zh/06-advanced/05-data-in/16-mssql.md @@ -15,7 +15,7 @@ Microsoft SQL Server 是最流行的关系型数据库之一。很多系统都 在数据写入页面中点击左上角的 **+新增数据源** 按钮进入新增数据源页面,如下图所示: -![InfluxDB-01zh-EnterDataSourcePage.png](./pic/InfluxDB-01zh-EnterDataSourcePage.png "进入新增数据源页面") +![Common-zh00-EnterDataSourcePage.png](./pic/Common-zh00-EnterDataSourcePage.png "进入新增数据源页面") ### 2. 配置基本信息 @@ -61,6 +61,12 @@ Microsoft SQL Server 是最流行的关系型数据库之一。很多系统都 ### 6. 配置 SQL 查询 +**子表字段** 用于拆分子表的字段,它是一条 select distinct 的 SQL 语句,查询指定字段组合的非重复项,通常与 transform 中的 tag 相对应: +> 此项配置主要为了解决数据迁移乱序问题,需要结合**SQL 模板**共同使用,否则不能达到预期效果,使用示例如下: +> 1. 子表字段填写语句 `select distinct col_name1, col_name2 from table`,它表示使用源表中的字段 col_name1 与 col_name2 拆分目标超级表的子表 +> 2. 在**SQL 模板**中添加子表字段占位符,例如 `select * from table where ts >= ${start} and ts < ${end} and ${col_name1} and ${col_name2}` 中的 `${col_name1} and ${col_name2}` 部分 +> 3. 在 **transform** 中配置 `col_name1` 与 `col_name2` 两个 tag 映射 + **SQL 模板** 用于查询的 SQL 语句模板,SQL 语句中必须包含时间范围条件,且开始时间和结束时间必须成对出现。SQL 语句模板中定义的时间范围由源数据库中的某个代表时间的列和下面定义的占位符组成。 > SQL使用不同的占位符表示不同的时间格式要求,具体有以下占位符格式: > 1. `${start}`、`${end}`:表示 RFC3339 格式时间戳,如: 2024-03-14T08:00:00+0800 @@ -68,6 +74,8 @@ Microsoft SQL Server 是最流行的关系型数据库之一。很多系统都 > 3. `${start_date}`、`${end_date}`:表示仅日期,如:2024-03-14 > > 注意:只有 `datetime2` 与 `datetimeoffset` 支持使用 start/end 查询,`datetime` 与 `smalldatetime` 只能使用 start_no_tz/end_no_tz 查询,而 `timestamp` 不能用作查询条件。 +> +> 为了解决迁移数据乱序的问题,应在查询语句中添加排序条件,例如 `order by ts asc`。 **起始时间** 迁移数据的起始时间,此项为必填字段。 diff --git a/docs/zh/06-advanced/05-data-in/17-mongodb.md b/docs/zh/06-advanced/05-data-in/17-mongodb.md new file mode 100644 index 0000000000..2434e2e122 --- /dev/null +++ b/docs/zh/06-advanced/05-data-in/17-mongodb.md @@ -0,0 +1,135 @@ +--- +title: "MongoDB" +sidebar_label: "MongoDB" +--- + +本节讲述如何通过 Explorer 界面创建数据迁移任务,从 MongoDB 迁移数据到当前 TDengine 集群。 + +## 功能概述 + +MongoDB 是一个介于关系型数据库与非关系型数据库之间的产品,被广泛应用于内容管理系统、移动应用与物联网等众多领域。从 TDengine 企业版 3.3.3.0 开始,TDengine 可以高效地从 MongoDB 读取数据并将其写入 TDengine,以实现历史数据迁移或实时数据同步,解决业务面临的技术痛点。 + +## 创建任务 + +### 1. 新增数据源 + +在数据写入页面中点击左上角的 **+新增数据源** 按钮进入新增数据源页面,如下图所示: + +![Common-zh00-EnterDataSourcePage.png](./pic/Common-zh00-EnterDataSourcePage.png "进入新增数据源页面") + +### 2. 配置基本信息 + +在 **名称** 字段中输入任务名称,例如 `test_mongodb_01` 。 + +选择 **类型** 下拉框中的 `MongoDB` ,如下图所示(选择完成后页面中的字段会发生变化)。 + +**代理** 是非必填项,如有需要,可以在下拉框中选择指定的代理,也可以先点击右侧的 **+创建新的代理** 按钮创建一个新的代理。 + +**目标数据库** 是必填项,可以在下拉框中选择指定的数据库,也可以先点击右侧的 **+创建数据库** 按钮创建一个新的数据库。 + +![mongodb-01.png](./pic/mongodb-01.png "选择数据源类型为 MongoDB") + +### 3. 配置连接信息 + +在 **连接配置** 区域填写 *`源 MongoDB 数据库的连接信息`*,如下图所示: + +![mongodb-02.png](./pic/mongodb-02.png "填写源 MongoDB 数据库的连接信息") + +### 4. 配置认证信息 + +**用户** 输入源 MongoDB 数据库的用户,该用户必须在 MongoDB 系统中拥有读取权限。 + +**密码** 输入源 MongoDB 数据库中上方用户的登陆密码。 + +**认证数据库** MongoDB 中存储用户信息的数据库,默认为 admin。 + +![mongodb-03.png](./pic/mongodb-03.png "配置认证信息") + +### 5. 配置连接选项 + +**应用名称** 设置应用程序名称,用于标识连接的应用程序。 + +**SSL 证书** 设置是否使用加密连接,默认关闭,如果开启,需要上传以下两个文件: + +  1. **CA 文件** 上传 SSL 加密的证书授权文件。 + +  2. **证书文件** 上传 SSL 加密的证书文件。 + +![ mongodb-04.png](./pic/mongodb-04.png "配置连接选项") + +然后点击 **检查连通性** 按钮,用户可以点击此按钮检查上方填写的信息是否可以正常获取源 MongoDB 数据库的数据。 + +### 6. 配置数据查询 + +**数据库** MongoDB 中源数据库,可以使用占位符进行动态配置,例如 `database_${Y}`,可用占位符列表见下方表格。 + +**集合** MongoDB 中集合,可以使用占位符进行动态配置,例如 `collection_${md}`,可用占位符列表见下方表格。 + +|占位符|描述|示例数据| +| :-----: | :------------: |:--------:| +|Y|完整的公历年表示,零填充的 4 位整数|2024| +|y|公历年除以 100,零填充的 2 位整数|24| +|M|整数月份(1 - 12)|1| +|m|整数月份(01 - 12)|01| +|B|月份英文全拼|January| +|b|月份英文的缩写(3 个字母)|Jan| +|D|日期的数字表示(1 - 31)|1| +|d|日期的数字表示(01 - 31)|01| +|J|一年中的第几天(1 - 366)|1| +|j|一年中的第几天(001 - 366)|001| +|F|相当于 `${Y}-${m}-${d}`|2024-01-01| + +**子表字段** 用于拆分子表的字段,通常与 transform 中的 tag 相对应,多个字段使用英文逗号分隔,例如 col_name1,col_name2。 +此项配置主要为了解决数据迁移乱序问题,需要结合**查询模板**共同使用,否则不能达到预期效果,使用示例如下: +1. 配置两个子表字段 `col_name1,col_name2` +2. 在**查询模板**中添加子表字段占位符,例如 `{"ddate":{"$gte":${start_datetime},"$lt":${end_datetime}}, ${col_name1}, ${col_name2}}` 中的 `${col_name1}, ${col_name2}` 部分 +3. 在 **transform** 中配置 `col_name1` 与 `col_name2` 两个 tag 映射 + +**查询模板** 用于查询数据的查询语句,JSON格式,语句中必须包含时间范围条件,且开始时间和结束时间必须成对出现。模板中定义的时间范围由源数据库中的某个代表时间的列和下面定义的占位符组成。 +使用不同的占位符表示不同的时间格式要求,具体有以下占位符格式: +1. `${start_datetime}`、`${end_datetime}`:对应后端 datetime 类型字段的筛选,如:`{"ddate":{"$gte":${start_datetime},"$lt":${end_datetime}}}` 将被转换为 `{"ddate":{"$gte":{"$date":"2024-06-01T00:00:00+00:00"},"$lt":{"$date":"2024-07-01T00:00:00+00:00"}}}` +2. `${start_timestamp}`、`${end_timestamp}`: 对应后端 timestamp 类型字段的筛选,如:`{"ttime":{"$gte":${start_timestamp},"$lt":${end_timestamp}}}` 将被转换为 `{"ttime":{"$gte":{"$timestamp":{"t":123,"i":456}},"$lt":{"$timestamp":{"t":123,"i":456}}}}` + +**查询排序** 执行查询时的排序条件,JSON格式,它必须符合 MongoDB 排序条件的格式规范,使用示例如下: +1. `{"createtime":1}`:MongoDB 查询结果按 createtime 正序返回。 +2. `{"createdate":1, "createtime":1}`:MongoDB 查询结果按 createdate 正序、createtime 正序返回。 + +**起始时间** 迁移数据的起始时间,此项为必填字段。 + +**结束时间** 迁移数据的结束时间,可留空。如果设置,则迁移任务执行到结束时间后,任务完成自动停止;如果留空,则持续同步实时数据,任务不会自动停止。 + +**查询间隔** 分段查询数据的时间间隔,默认1天。为了避免查询数据量过大,一次数据同步子任务会使用查询间隔分时间段查询数据。 + +**延迟时长** 实时同步数据场景中,为了避免延迟写入的数据丢失,每次同步任务会读取延迟时长之前的数据。 + +![ mongodb-05.png](./pic/mongodb-05.png "配置数据查询") + +### 7. 配置数据映射 + +在 **Payload 转换** 区域填写数据映射相关的配置参数。 + +点击 **从服务器检索** 按钮,从 MongoDB 服务器获取示例数据。 + +在 **解析** 中选择 JSON/Regex/UDT 三种规则解析原始消息体,配置完成后点击右侧的 **预览** 按钮可以查看解析的结果。 + +在 **从列中提取或拆分** 中填写从消息体中提取或拆分的字段,例如:将 vValue 字段拆分成 `vValue_0` 和 `vValue_1` 这 2 个字段,选择 split 提取器,seperator 填写分割符 `,`, number 填写 2,配置完成后点击右侧的 **预览** 按钮可以查看转换的结果。 + +在 **过滤** 中,填写过滤条件,例如:填写 `Value > 0`,则只有 Value 大于 0 的数据才会被写入 TDengine,配置完成后点击右侧的 **预览** 按钮可以查看过滤的结果。 + +在 **映射** 中,选择要映射到 TDengine 的超级表,以及映射到超级表的列,配置完成后点击右侧的 **预览** 按钮可以查看映射的结果。 + +![mongodb-06.png](pic/mongodb-06.png) + +### 8. 配置高级选项 + +**高级选项** 区域是默认折叠的,点击右侧 `>` 可以展开,如下图所示: + +**最大读取并发数** 数据源连接数或读取线程数限制,当默认参数不满足需要或需要调整资源使用量时修改此参数。 + +**批次大小** 单次发送的最大消息数或行数。默认是 10000。 + +![mongodb-07.png](pic/mongodb-07.png) + +### 9. 创建完成 + +点击 **提交** 按钮,完成创建 MongoDB 到 TDengine 的数据同步任务,回到**数据源列表**页面可查看任务执行情况。 diff --git a/docs/zh/06-advanced/05-data-in/index.md b/docs/zh/06-advanced/05-data-in/index.md index 1a142fd47a..7e5c467010 100644 --- a/docs/zh/06-advanced/05-data-in/index.md +++ b/docs/zh/06-advanced/05-data-in/index.md @@ -38,7 +38,6 @@ TDengine Enterprise 配备了一个强大的可视化数据管理工具—taosEx 下面详细讲解数据转换规则 - ### 解析 仅非结构化的数据源需要这个步骤,目前 MQTT 和 Kafka 数据源会使用这个步骤提供的规则来解析非结构化数据,以初步获取结构化数据,即可以以字段描述的行列数据。在 explorer 中您需要提供示例数据和解析规则,来预览解析出以表格呈现的结构化数据。 @@ -53,13 +52,15 @@ TDengine Enterprise 配备了一个强大的可视化数据管理工具—taosEx 2. 点击右侧按钮 “从服务器检索” 则从配置的服务器获取示例数据,并追加到示例数据 textarea 中; 3. 上传文件,将文件内容追加到示例数据 textarea 中。 +每一条示例数据以回车符结尾。 + #### 解析 解析就是通过解析规则,将非结构化字符串解析为结构化数据。消息体的解析规则目前支持 JSON、Regex 和 UDT。 ##### JSON 解析 -如下 JSON 示例数据,可自动解析出字段:`groupid`、`voltage`、`current`、`ts`、`inuse`、`location`。 +JSON 解析支持 JSONObject 或者 JSONArray。 如下 JSON 示例数据,可自动解析出字段:`groupid`、`voltage`、`current`、`ts`、`inuse`、`location`。 ``` json {"groupid": 170001, "voltage": "221V", "current": 12.3, "ts": "2023-12-18T22:12:00", "inuse": true, "location": "beijing.chaoyang.datun"} @@ -67,6 +68,16 @@ TDengine Enterprise 配备了一个强大的可视化数据管理工具—taosEx {"groupid": 170001, "voltage": "216V", "current": 12.5, "ts": "2023-12-18T22:12:04", "inuse": false, "location": "beijing.chaoyang.datun"} ``` +或者 + +``` json +[{"groupid": 170001, "voltage": "221V", "current": 12.3, "ts": "2023-12-18T22:12:00", "inuse": true, "location": "beijing.chaoyang.datun"}, +{"groupid": 170001, "voltage": "220V", "current": 12.2, "ts": "2023-12-18T22:12:02", "inuse": true, "location": "beijing.chaoyang.datun"}, +{"groupid": 170001, "voltage": "216V", "current": 12.5, "ts": "2023-12-18T22:12:04", "inuse": false, "location": "beijing.chaoyang.datun"}] +``` + +后续示例仅以JSONObject 为例说明。 + 如下嵌套结构的 JSON 数据,可自动解析出字段`groupid`、`data_voltage`、`data_current`、`ts`、`inuse`、`location_0_province`、`location_0_city`、`location_0_datun`,也可以选择要解析的字段,并设置解析的别名。 ``` json diff --git a/docs/zh/06-advanced/05-data-in/pic/Common-zh00-EnterDataSourcePage.png b/docs/zh/06-advanced/05-data-in/pic/Common-zh00-EnterDataSourcePage.png new file mode 100644 index 0000000000..04fa06ce73 Binary files /dev/null and b/docs/zh/06-advanced/05-data-in/pic/Common-zh00-EnterDataSourcePage.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/InfluxDB-01zh-EnterDataSourcePage.png b/docs/zh/06-advanced/05-data-in/pic/InfluxDB-01zh-EnterDataSourcePage.png deleted file mode 100644 index ca8107066f..0000000000 Binary files a/docs/zh/06-advanced/05-data-in/pic/InfluxDB-01zh-EnterDataSourcePage.png and /dev/null differ diff --git a/docs/zh/06-advanced/05-data-in/pic/OpenTSDB-01zh-EnterDataSourcePage.png b/docs/zh/06-advanced/05-data-in/pic/OpenTSDB-01zh-EnterDataSourcePage.png deleted file mode 100644 index ca8107066f..0000000000 Binary files a/docs/zh/06-advanced/05-data-in/pic/OpenTSDB-01zh-EnterDataSourcePage.png and /dev/null differ diff --git a/docs/zh/06-advanced/05-data-in/pic/Oracle-01zh-EnterDataSourcePage.png b/docs/zh/06-advanced/05-data-in/pic/Oracle-01zh-EnterDataSourcePage.png deleted file mode 100644 index ca8107066f..0000000000 Binary files a/docs/zh/06-advanced/05-data-in/pic/Oracle-01zh-EnterDataSourcePage.png and /dev/null differ diff --git a/docs/zh/06-advanced/05-data-in/pic/PostgreSQL-01zh-EnterDataSourcePage.png b/docs/zh/06-advanced/05-data-in/pic/PostgreSQL-01zh-EnterDataSourcePage.png deleted file mode 100644 index ca8107066f..0000000000 Binary files a/docs/zh/06-advanced/05-data-in/pic/PostgreSQL-01zh-EnterDataSourcePage.png and /dev/null differ diff --git a/docs/zh/06-advanced/05-data-in/pic/mongodb-01.png b/docs/zh/06-advanced/05-data-in/pic/mongodb-01.png new file mode 100644 index 0000000000..570dd7692f Binary files /dev/null and b/docs/zh/06-advanced/05-data-in/pic/mongodb-01.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/mongodb-02.png b/docs/zh/06-advanced/05-data-in/pic/mongodb-02.png new file mode 100644 index 0000000000..72ae02cce5 Binary files /dev/null and b/docs/zh/06-advanced/05-data-in/pic/mongodb-02.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/mongodb-03.png b/docs/zh/06-advanced/05-data-in/pic/mongodb-03.png new file mode 100644 index 0000000000..639466c7c9 Binary files /dev/null and b/docs/zh/06-advanced/05-data-in/pic/mongodb-03.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/mongodb-04.png b/docs/zh/06-advanced/05-data-in/pic/mongodb-04.png new file mode 100644 index 0000000000..55b4a718d9 Binary files /dev/null and b/docs/zh/06-advanced/05-data-in/pic/mongodb-04.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/mongodb-05.png b/docs/zh/06-advanced/05-data-in/pic/mongodb-05.png new file mode 100644 index 0000000000..823dfaf0ce Binary files /dev/null and b/docs/zh/06-advanced/05-data-in/pic/mongodb-05.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/mongodb-06.png b/docs/zh/06-advanced/05-data-in/pic/mongodb-06.png new file mode 100644 index 0000000000..cab2c80368 Binary files /dev/null and b/docs/zh/06-advanced/05-data-in/pic/mongodb-06.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/mongodb-07.png b/docs/zh/06-advanced/05-data-in/pic/mongodb-07.png new file mode 100644 index 0000000000..2305ec3d2e Binary files /dev/null and b/docs/zh/06-advanced/05-data-in/pic/mongodb-07.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/mssql-05.png b/docs/zh/06-advanced/05-data-in/pic/mssql-05.png index de12a55e06..a1e7fa3324 100644 Binary files a/docs/zh/06-advanced/05-data-in/pic/mssql-05.png and b/docs/zh/06-advanced/05-data-in/pic/mssql-05.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/mysql-05.png b/docs/zh/06-advanced/05-data-in/pic/mysql-05.png index e168d9e739..dd851ac193 100644 Binary files a/docs/zh/06-advanced/05-data-in/pic/mysql-05.png and b/docs/zh/06-advanced/05-data-in/pic/mysql-05.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/oracle-04.png b/docs/zh/06-advanced/05-data-in/pic/oracle-04.png index e168d9e739..650c807183 100644 Binary files a/docs/zh/06-advanced/05-data-in/pic/oracle-04.png and b/docs/zh/06-advanced/05-data-in/pic/oracle-04.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/postgres-05.png b/docs/zh/06-advanced/05-data-in/pic/postgres-05.png index e168d9e739..2604deddb7 100644 Binary files a/docs/zh/06-advanced/05-data-in/pic/postgres-05.png and b/docs/zh/06-advanced/05-data-in/pic/postgres-05.png differ diff --git a/docs/zh/07-develop/01-connect/index.md b/docs/zh/07-develop/01-connect/index.md index b16e96922f..d15f481b05 100644 --- a/docs/zh/07-develop/01-connect/index.md +++ b/docs/zh/07-develop/01-connect/index.md @@ -89,7 +89,7 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速 com.taosdata.jdbc taos-jdbcdriver - 3.3.2 + 3.3.3 ``` diff --git a/docs/zh/08-operation/tdengine-topology.png b/docs/zh/08-operation/tdengine-topology.png index 7c3f8e078e..f31443ef7f 100644 Binary files a/docs/zh/08-operation/tdengine-topology.png and b/docs/zh/08-operation/tdengine-topology.png differ diff --git a/docs/zh/20-third-party/01-collection/02-prometheus.md b/docs/zh/10-third-party/01-collection/02-prometheus.md similarity index 100% rename from docs/zh/20-third-party/01-collection/02-prometheus.md rename to docs/zh/10-third-party/01-collection/02-prometheus.md diff --git a/docs/zh/20-third-party/01-collection/03-telegraf.md b/docs/zh/10-third-party/01-collection/03-telegraf.md similarity index 100% rename from docs/zh/20-third-party/01-collection/03-telegraf.md rename to docs/zh/10-third-party/01-collection/03-telegraf.md diff --git a/docs/zh/20-third-party/01-collection/05-collectd.md b/docs/zh/10-third-party/01-collection/05-collectd.md similarity index 100% rename from docs/zh/20-third-party/01-collection/05-collectd.md rename to docs/zh/10-third-party/01-collection/05-collectd.md diff --git a/docs/zh/20-third-party/01-collection/06-statsd.md b/docs/zh/10-third-party/01-collection/06-statsd.md similarity index 100% rename from docs/zh/20-third-party/01-collection/06-statsd.md rename to docs/zh/10-third-party/01-collection/06-statsd.md diff --git a/docs/zh/20-third-party/01-collection/07-icinga2.md b/docs/zh/10-third-party/01-collection/07-icinga2.md similarity index 100% rename from docs/zh/20-third-party/01-collection/07-icinga2.md rename to docs/zh/10-third-party/01-collection/07-icinga2.md diff --git a/docs/zh/20-third-party/01-collection/08-tcollector.md b/docs/zh/10-third-party/01-collection/08-tcollector.md similarity index 100% rename from docs/zh/20-third-party/01-collection/08-tcollector.md rename to docs/zh/10-third-party/01-collection/08-tcollector.md diff --git a/docs/zh/20-third-party/01-collection/09-emq-broker.md b/docs/zh/10-third-party/01-collection/09-emq-broker.md similarity index 100% rename from docs/zh/20-third-party/01-collection/09-emq-broker.md rename to docs/zh/10-third-party/01-collection/09-emq-broker.md diff --git a/docs/zh/20-third-party/01-collection/10-hive-mq-broker.md b/docs/zh/10-third-party/01-collection/10-hive-mq-broker.md similarity index 100% rename from docs/zh/20-third-party/01-collection/10-hive-mq-broker.md rename to docs/zh/10-third-party/01-collection/10-hive-mq-broker.md diff --git a/docs/zh/20-third-party/01-collection/11-kafka.md b/docs/zh/10-third-party/01-collection/11-kafka.md similarity index 84% rename from docs/zh/20-third-party/01-collection/11-kafka.md rename to docs/zh/10-third-party/01-collection/11-kafka.md index d9a416aa40..75adefbc50 100644 --- a/docs/zh/20-third-party/01-collection/11-kafka.md +++ b/docs/zh/10-third-party/01-collection/11-kafka.md @@ -243,6 +243,7 @@ vi source-demo.json "topic.per.stable": true, "topic.ignore.db": false, "out.format": "line", + "data.precision": "ms", "key.converter": "org.apache.kafka.connect.storage.StringConverter", "value.converter": "org.apache.kafka.connect.storage.StringConverter" } @@ -331,14 +332,13 @@ curl -X DELETE http://localhost:8083/connectors/TDengineSourceConnector 1. 打开 KAFKA_HOME/config/producer.properties 配置文件。 2. 参数说明及配置建议如下: - | **参数** | **参数说明** | **设置建议** | - | --------| --------------------------------- | -------------- | - | producer.type | 此参数用于设置消息的发送方式,默认值为 `sync` 表示同步发送,`async` 表示异步发送。采用异步发送能够提升消息发送的吞吐量。 | async | - | request.required.acks | 参数用于配置生产者发送消息后需要等待的确认数量。当设置为1时,表示只要领导者副本成功写入消息就会给生产者发送确认,而无需等待集群中的其他副本写入成功。这种设置可以在一定程度上保证消息的可靠性,同时也能保证一定的吞吐量。因为不需要等待所有副本都写入成功,所以可以减少生产者的等待时间,提高发送消息的效率。|1| - | max.request.size| 该参数决定了生产者在一次请求中可以发送的最大数据量。其默认值为 1048576,也就是 1M。如果设置得太小,可能会导致频繁的网络请求,降低吞吐量。如果设置得太大,可能会导致内存占用过高,或者在网络状况不佳时增加请求失败的概率。建议设置为 100M。|104857600| - |batch.size| 此参数用于设定 batch 的大小,默认值为 16384,即 16KB。在消息发送过程中,发送到 Kafka 缓冲区中的消息会被划分成一个个的 batch。故而减小 batch 大小有助于降低消息延迟,而增大 batch 大小则有利于提升吞吐量,可根据实际的数据量大小进行合理配置。可根据实际情况进行调整,建议设置为 512K。|524288| - | buffer.memory| 此参数用于设置生产者缓冲待发送消息的内存总量。较大的缓冲区可以允许生产者积累更多的消息后批量发送,提高吞吐量,但也会增加延迟和内存使用。可根据机器资源来配置,建议配置为 1G。|1073741824| - + | **参数** | **参数说明** | **设置建议** | + | --------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------ | + | producer.type | 此参数用于设置消息的发送方式,默认值为 `sync` 表示同步发送,`async` 表示异步发送。采用异步发送能够提升消息发送的吞吐量。 | async | + | request.required.acks | 参数用于配置生产者发送消息后需要等待的确认数量。当设置为1时,表示只要领导者副本成功写入消息就会给生产者发送确认,而无需等待集群中的其他副本写入成功。这种设置可以在一定程度上保证消息的可靠性,同时也能保证一定的吞吐量。因为不需要等待所有副本都写入成功,所以可以减少生产者的等待时间,提高发送消息的效率。 | 1 | + | max.request.size | 该参数决定了生产者在一次请求中可以发送的最大数据量。其默认值为 1048576,也就是 1M。如果设置得太小,可能会导致频繁的网络请求,降低吞吐量。如果设置得太大,可能会导致内存占用过高,或者在网络状况不佳时增加请求失败的概率。建议设置为 100M。 | 104857600 | + | batch.size | 此参数用于设定 batch 的大小,默认值为 16384,即 16KB。在消息发送过程中,发送到 Kafka 缓冲区中的消息会被划分成一个个的 batch。故而减小 batch 大小有助于降低消息延迟,而增大 batch 大小则有利于提升吞吐量,可根据实际的数据量大小进行合理配置。可根据实际情况进行调整,建议设置为 512K。 | 524288 | + | buffer.memory | 此参数用于设置生产者缓冲待发送消息的内存总量。较大的缓冲区可以允许生产者积累更多的消息后批量发送,提高吞吐量,但也会增加延迟和内存使用。可根据机器资源来配置,建议配置为 1G。 | 1073741824 | ## 配置参考 @@ -370,7 +370,7 @@ curl -X DELETE http://localhost:8083/connectors/TDengineSourceConnector 7. `data.precision`: 使用 InfluxDB 行协议格式时,时间戳的精度。可选值为: 1. ms : 表示毫秒 2. us : 表示微秒 - 3. ns : 表示纳秒。默认为纳秒。 + 3. ns : 表示纳秒。 ### TDengine Source Connector 特有的配置 @@ -381,12 +381,16 @@ curl -X DELETE http://localhost:8083/connectors/TDengineSourceConnector 5. `fetch.max.rows` : 检索数据库时最大检索条数。 默认为 100。 6. `query.interval.ms`: 从 TDengine 一次读取数据的时间跨度,需要根据表中的数据特征合理配置,避免一次查询的数据量过大或过小;在具体的环境中建议通过测试设置一个较优值,默认值为 0,即获取到当前最新时间的所有数据。 7. `out.format` : 结果集输出格式。`line` 表示输出格式为 InfluxDB Line 协议格式,`json` 表示输出格式是 json。默认为 line。 -8. `topic.per.stable`: 如果设置为 true,表示一个超级表对应一个 Kafka topic,topic的命名规则 ``;如果设置为 false,则指定的 DB 中的所有数据进入一个 Kafka topic,topic 的命名规则为 `` -9. `topic.ignore.db`: topic 命名规则是否包含 database 名称,true 表示规则为 ``,false 表示规则为 ``,默认 false。此配置项在 `topic.per.stable` 设置为 false 时不生效。 -10. `topic.delimiter`: topic 名称分割符,默认为 `-`。 -11. `read.method`: 从 TDengine 读取数据方式,query 或是 subscription。默认为 subscription。 -12. `subscription.group.id`: 指定 TDengine 数据订阅的组 id,当 `read.method` 为 subscription 时,此项为必填项。 -13. `subscription.from`: 指定 TDengine 数据订阅起始位置,latest 或是 earliest。默认为 latest。 +8. `data.precision`: 使用 InfluxDB 行协议格式时,时间戳的精度。可选值为: + 1. ms : 表示毫秒, + 2. us : 表示微秒 + 3. ns : 表示纳秒。 +9. `topic.per.stable`: 如果设置为 true,表示一个超级表对应一个 Kafka topic,topic的命名规则 ``;如果设置为 false,则指定的 DB 中的所有数据进入一个 Kafka topic,topic 的命名规则为 `` +10. `topic.ignore.db`: topic 命名规则是否包含 database 名称,true 表示规则为 ``,false 表示规则为 ``,默认 false。此配置项在 `topic.per.stable` 设置为 false 时不生效。 +11. `topic.delimiter`: topic 名称分割符,默认为 `-`。 +12. `read.method`: 从 TDengine 读取数据方式,query 或是 subscription。默认为 subscription。 +13. `subscription.group.id`: 指定 TDengine 数据订阅的组 id,当 `read.method` 为 subscription 时,此项为必填项。 +14. `subscription.from`: 指定 TDengine 数据订阅起始位置,latest 或是 earliest。默认为 latest。 ## 其他说明 diff --git a/docs/zh/20-third-party/01-collection/emqx/add-action-handler.webp b/docs/zh/10-third-party/01-collection/emqx/add-action-handler.webp similarity index 100% rename from docs/zh/20-third-party/01-collection/emqx/add-action-handler.webp rename to docs/zh/10-third-party/01-collection/emqx/add-action-handler.webp diff --git a/docs/zh/20-third-party/01-collection/emqx/check-result-in-taos.webp b/docs/zh/10-third-party/01-collection/emqx/check-result-in-taos.webp similarity index 100% rename from docs/zh/20-third-party/01-collection/emqx/check-result-in-taos.webp rename to docs/zh/10-third-party/01-collection/emqx/check-result-in-taos.webp diff --git a/docs/zh/20-third-party/01-collection/emqx/check-rule-matched.webp b/docs/zh/10-third-party/01-collection/emqx/check-rule-matched.webp similarity index 100% rename from docs/zh/20-third-party/01-collection/emqx/check-rule-matched.webp rename to docs/zh/10-third-party/01-collection/emqx/check-rule-matched.webp diff --git a/docs/zh/20-third-party/01-collection/emqx/client-num.webp b/docs/zh/10-third-party/01-collection/emqx/client-num.webp similarity index 100% rename from docs/zh/20-third-party/01-collection/emqx/client-num.webp rename to docs/zh/10-third-party/01-collection/emqx/client-num.webp diff --git a/docs/zh/20-third-party/01-collection/emqx/create-resource.webp b/docs/zh/10-third-party/01-collection/emqx/create-resource.webp similarity index 100% rename from docs/zh/20-third-party/01-collection/emqx/create-resource.webp rename to docs/zh/10-third-party/01-collection/emqx/create-resource.webp diff --git a/docs/zh/20-third-party/01-collection/emqx/create-rule.webp b/docs/zh/10-third-party/01-collection/emqx/create-rule.webp similarity index 100% rename from docs/zh/20-third-party/01-collection/emqx/create-rule.webp rename to docs/zh/10-third-party/01-collection/emqx/create-rule.webp diff --git a/docs/zh/20-third-party/01-collection/emqx/edit-action.webp b/docs/zh/10-third-party/01-collection/emqx/edit-action.webp similarity index 100% rename from docs/zh/20-third-party/01-collection/emqx/edit-action.webp rename to docs/zh/10-third-party/01-collection/emqx/edit-action.webp diff --git a/docs/zh/20-third-party/01-collection/emqx/edit-resource.webp b/docs/zh/10-third-party/01-collection/emqx/edit-resource.webp similarity index 100% rename from docs/zh/20-third-party/01-collection/emqx/edit-resource.webp rename to docs/zh/10-third-party/01-collection/emqx/edit-resource.webp diff --git a/docs/zh/20-third-party/01-collection/emqx/login-dashboard.webp b/docs/zh/10-third-party/01-collection/emqx/login-dashboard.webp similarity index 100% rename from docs/zh/20-third-party/01-collection/emqx/login-dashboard.webp rename to docs/zh/10-third-party/01-collection/emqx/login-dashboard.webp diff --git a/docs/zh/20-third-party/01-collection/emqx/rule-engine.webp b/docs/zh/10-third-party/01-collection/emqx/rule-engine.webp similarity index 100% rename from docs/zh/20-third-party/01-collection/emqx/rule-engine.webp rename to docs/zh/10-third-party/01-collection/emqx/rule-engine.webp diff --git a/docs/zh/20-third-party/01-collection/emqx/rule-header-key-value.webp b/docs/zh/10-third-party/01-collection/emqx/rule-header-key-value.webp similarity index 100% rename from docs/zh/20-third-party/01-collection/emqx/rule-header-key-value.webp rename to docs/zh/10-third-party/01-collection/emqx/rule-header-key-value.webp diff --git a/docs/zh/20-third-party/01-collection/emqx/run-mock.webp b/docs/zh/10-third-party/01-collection/emqx/run-mock.webp similarity index 100% rename from docs/zh/20-third-party/01-collection/emqx/run-mock.webp rename to docs/zh/10-third-party/01-collection/emqx/run-mock.webp diff --git a/docs/zh/20-third-party/01-collection/index.md b/docs/zh/10-third-party/01-collection/index.md similarity index 100% rename from docs/zh/20-third-party/01-collection/index.md rename to docs/zh/10-third-party/01-collection/index.md diff --git a/docs/zh/20-third-party/01-collection/kafka/Kafka_Connect.webp b/docs/zh/10-third-party/01-collection/kafka/Kafka_Connect.webp similarity index 100% rename from docs/zh/20-third-party/01-collection/kafka/Kafka_Connect.webp rename to docs/zh/10-third-party/01-collection/kafka/Kafka_Connect.webp diff --git a/docs/zh/20-third-party/01-collection/kafka/confluentPlatform.webp b/docs/zh/10-third-party/01-collection/kafka/confluentPlatform.webp similarity index 100% rename from docs/zh/20-third-party/01-collection/kafka/confluentPlatform.webp rename to docs/zh/10-third-party/01-collection/kafka/confluentPlatform.webp diff --git a/docs/zh/20-third-party/01-collection/kafka/streaming-integration-with-kafka-connect.webp b/docs/zh/10-third-party/01-collection/kafka/streaming-integration-with-kafka-connect.webp similarity index 100% rename from docs/zh/20-third-party/01-collection/kafka/streaming-integration-with-kafka-connect.webp rename to docs/zh/10-third-party/01-collection/kafka/streaming-integration-with-kafka-connect.webp diff --git a/docs/zh/20-third-party/03-visual/01-grafana.mdx b/docs/zh/10-third-party/03-visual/01-grafana.mdx similarity index 100% rename from docs/zh/20-third-party/03-visual/01-grafana.mdx rename to docs/zh/10-third-party/03-visual/01-grafana.mdx diff --git a/docs/zh/20-third-party/03-visual/add_datasource1.webp b/docs/zh/10-third-party/03-visual/add_datasource1.webp similarity index 100% rename from docs/zh/20-third-party/03-visual/add_datasource1.webp rename to docs/zh/10-third-party/03-visual/add_datasource1.webp diff --git a/docs/zh/20-third-party/03-visual/add_datasource2.webp b/docs/zh/10-third-party/03-visual/add_datasource2.webp similarity index 100% rename from docs/zh/20-third-party/03-visual/add_datasource2.webp rename to docs/zh/10-third-party/03-visual/add_datasource2.webp diff --git a/docs/zh/20-third-party/03-visual/add_datasource3.webp b/docs/zh/10-third-party/03-visual/add_datasource3.webp similarity index 100% rename from docs/zh/20-third-party/03-visual/add_datasource3.webp rename to docs/zh/10-third-party/03-visual/add_datasource3.webp diff --git a/docs/zh/20-third-party/03-visual/add_datasource4.webp b/docs/zh/10-third-party/03-visual/add_datasource4.webp similarity index 100% rename from docs/zh/20-third-party/03-visual/add_datasource4.webp rename to docs/zh/10-third-party/03-visual/add_datasource4.webp diff --git a/docs/zh/20-third-party/03-visual/alert-annotations.webp b/docs/zh/10-third-party/03-visual/alert-annotations.webp similarity index 100% rename from docs/zh/20-third-party/03-visual/alert-annotations.webp rename to docs/zh/10-third-party/03-visual/alert-annotations.webp diff --git a/docs/zh/20-third-party/03-visual/alert-email.webp b/docs/zh/10-third-party/03-visual/alert-email.webp similarity index 100% rename from docs/zh/20-third-party/03-visual/alert-email.webp rename to docs/zh/10-third-party/03-visual/alert-email.webp diff --git a/docs/zh/20-third-party/03-visual/alert-evaluation.webp b/docs/zh/10-third-party/03-visual/alert-evaluation.webp similarity index 100% rename from docs/zh/20-third-party/03-visual/alert-evaluation.webp rename to docs/zh/10-third-party/03-visual/alert-evaluation.webp diff --git a/docs/zh/20-third-party/03-visual/alert-feishu1.webp b/docs/zh/10-third-party/03-visual/alert-feishu1.webp similarity index 100% rename from docs/zh/20-third-party/03-visual/alert-feishu1.webp rename to docs/zh/10-third-party/03-visual/alert-feishu1.webp diff --git a/docs/zh/20-third-party/03-visual/alert-feishu2.webp b/docs/zh/10-third-party/03-visual/alert-feishu2.webp similarity index 100% rename from docs/zh/20-third-party/03-visual/alert-feishu2.webp rename to docs/zh/10-third-party/03-visual/alert-feishu2.webp diff --git a/docs/zh/20-third-party/03-visual/alert-labels.webp b/docs/zh/10-third-party/03-visual/alert-labels.webp similarity index 100% rename from docs/zh/20-third-party/03-visual/alert-labels.webp rename to docs/zh/10-third-party/03-visual/alert-labels.webp diff --git a/docs/zh/20-third-party/03-visual/alert-notification1.webp b/docs/zh/10-third-party/03-visual/alert-notification1.webp similarity index 100% rename from docs/zh/20-third-party/03-visual/alert-notification1.webp rename to docs/zh/10-third-party/03-visual/alert-notification1.webp diff --git a/docs/zh/20-third-party/03-visual/alert-notification2.webp b/docs/zh/10-third-party/03-visual/alert-notification2.webp similarity index 100% rename from docs/zh/20-third-party/03-visual/alert-notification2.webp rename to docs/zh/10-third-party/03-visual/alert-notification2.webp diff --git a/docs/zh/20-third-party/03-visual/alert-rules1.webp b/docs/zh/10-third-party/03-visual/alert-rules1.webp similarity index 100% rename from docs/zh/20-third-party/03-visual/alert-rules1.webp rename to docs/zh/10-third-party/03-visual/alert-rules1.webp diff --git a/docs/zh/20-third-party/03-visual/create_dashboard1.webp b/docs/zh/10-third-party/03-visual/create_dashboard1.webp similarity index 100% rename from docs/zh/20-third-party/03-visual/create_dashboard1.webp rename to docs/zh/10-third-party/03-visual/create_dashboard1.webp diff --git a/docs/zh/20-third-party/03-visual/create_dashboard2.webp b/docs/zh/10-third-party/03-visual/create_dashboard2.webp similarity index 100% rename from docs/zh/20-third-party/03-visual/create_dashboard2.webp rename to docs/zh/10-third-party/03-visual/create_dashboard2.webp diff --git a/docs/zh/20-third-party/03-visual/create_dashboard3.webp b/docs/zh/10-third-party/03-visual/create_dashboard3.webp similarity index 100% rename from docs/zh/20-third-party/03-visual/create_dashboard3.webp rename to docs/zh/10-third-party/03-visual/create_dashboard3.webp diff --git a/docs/zh/20-third-party/03-visual/dashboard-15146.webp b/docs/zh/10-third-party/03-visual/dashboard-15146.webp similarity index 100% rename from docs/zh/20-third-party/03-visual/dashboard-15146.webp rename to docs/zh/10-third-party/03-visual/dashboard-15146.webp diff --git a/docs/zh/20-third-party/03-visual/grafana-data-source.png b/docs/zh/10-third-party/03-visual/grafana-data-source.png similarity index 100% rename from docs/zh/20-third-party/03-visual/grafana-data-source.png rename to docs/zh/10-third-party/03-visual/grafana-data-source.png diff --git a/docs/zh/20-third-party/03-visual/grafana-install-and-config.png b/docs/zh/10-third-party/03-visual/grafana-install-and-config.png similarity index 100% rename from docs/zh/20-third-party/03-visual/grafana-install-and-config.png rename to docs/zh/10-third-party/03-visual/grafana-install-and-config.png diff --git a/docs/zh/20-third-party/03-visual/grafana-plugin-search-tdengine.png b/docs/zh/10-third-party/03-visual/grafana-plugin-search-tdengine.png similarity index 100% rename from docs/zh/20-third-party/03-visual/grafana-plugin-search-tdengine.png rename to docs/zh/10-third-party/03-visual/grafana-plugin-search-tdengine.png diff --git a/docs/zh/20-third-party/03-visual/import_dashboard.webp b/docs/zh/10-third-party/03-visual/import_dashboard.webp similarity index 100% rename from docs/zh/20-third-party/03-visual/import_dashboard.webp rename to docs/zh/10-third-party/03-visual/import_dashboard.webp diff --git a/docs/zh/20-third-party/03-visual/import_dashboard1.webp b/docs/zh/10-third-party/03-visual/import_dashboard1.webp similarity index 100% rename from docs/zh/20-third-party/03-visual/import_dashboard1.webp rename to docs/zh/10-third-party/03-visual/import_dashboard1.webp diff --git a/docs/zh/20-third-party/03-visual/import_dashboard2.webp b/docs/zh/10-third-party/03-visual/import_dashboard2.webp similarity index 100% rename from docs/zh/20-third-party/03-visual/import_dashboard2.webp rename to docs/zh/10-third-party/03-visual/import_dashboard2.webp diff --git a/docs/zh/20-third-party/03-visual/index.md b/docs/zh/10-third-party/03-visual/index.md similarity index 100% rename from docs/zh/20-third-party/03-visual/index.md rename to docs/zh/10-third-party/03-visual/index.md diff --git a/docs/zh/20-third-party/05-bi/01-looker.md b/docs/zh/10-third-party/05-bi/01-looker.md similarity index 100% rename from docs/zh/20-third-party/05-bi/01-looker.md rename to docs/zh/10-third-party/05-bi/01-looker.md diff --git a/docs/zh/20-third-party/05-bi/03-powerbi.md b/docs/zh/10-third-party/05-bi/03-powerbi.md similarity index 80% rename from docs/zh/20-third-party/05-bi/03-powerbi.md rename to docs/zh/10-third-party/05-bi/03-powerbi.md index f4709da447..bd51b5591d 100644 --- a/docs/zh/20-third-party/05-bi/03-powerbi.md +++ b/docs/zh/10-third-party/05-bi/03-powerbi.md @@ -8,35 +8,35 @@ Power BI是由Microsoft提供的一种商业分析工具。通过配置使用ODB ## 前置条件 -安装完成Power BI Desktop软件并运行(如未安装,请从其官方地址下载最新的Windows操作系统X64版本)。 +安装完成Power BI Desktop软件并运行(如未安装,请从其官方地址下载最新的Windows操作系统 32/64 位版本)。 ## 安装 ODBC 驱动 -从TDengine官网下载最新的Windows操作系统X64客户端驱动程序,并安装在运行Power BI的机器上。安装成功后可在“ODBC数据源(64位)”管理工具中看到 TAOS_ODBC_DSN”驱动程序。 +从TDengine官网下载最新的Windows操作系统X64客户端驱动程序,并安装在运行Power BI的机器上。安装成功后可在“ODBC数据源(32位)”或者“ODBC数据源(64位)”管理工具中看到 TDengine 驱动程序。 ## 配置ODBC数据源 配置ODBC数据源的操作步骤如下。 -第1步,在Windows操作系统的开始菜单中搜索并打开“ODBC数据源(64位)”管理工具。 -第2步,点击“用户DSN”选项卡→“添加”按钮,进入“创建新数据源”对话框。 -第3步,选择想要添加的数据源后选择“TDengine”,点击“完成”按钮,进入TDengine ODBC数据源配置页面。填写如下必要信息。 +第1步,在Windows操作系统的开始菜单中搜索并打开“ODBC数据源(32位)”或者“ODBC数据源(64位)”管理工具。 +第2步,点击“用户DSN”选项卡→“添加”按钮,进入“创建新数据源”对话框。 +第3步,在“选择您想为其安装数据源的驱动程序”列表中选择“TDengine”,点击“完成”按钮,进入TDengine ODBC数据源配置页面。填写如下必要信息。 - DSN:数据源名称,必填,比如“MyTDengine”。 - 连接类型:勾选“WebSocket”复选框。 - - 服务地址:输入“taos://127.0.0.1:6041”。 + - URL:ODBC 数据源 URL,必填,比如“http://127.0.0.1:6041”。 - 数据库:表示需要连接的数据库,可选,比如“test”。 - 用户名:输入用户名,如果不填,默认为“root”。 - - 密码:输入用户密码,如果不填,默认为“taosdata”。 -第4步,点击“测试连接”按钮,测试连接情况,如果成功连接,则会提示“成功连接到taos://root:taosdata@127.0.0.1:6041”。 + - 密码:输入用户密码,如果不填,默认为“taosdata”。 + +第4步,点击“测试连接”按钮,测试连接情况,如果成功连接,则会提示“成功连接到http://127.0.0.1:6041”。 第5步,点击“确定”按钮,即可保存配置并退出。 ## 导入TDengine数据到Power BI -将TDengine数据导入Power BI的操作步骤如下。 - -第1步,打开Power BI并登录后,点击“主页”→“获取数据”→“其他”→“ODBC”→“连接”,添加数据源。 -第2步,选择刚才创建的数据源名称,比如“MyTDengine”,点击“确定”按钮。在弹出的“ODBC驱动程序”对话框中,在左侧导航栏中点击“默认或自定义”→“连接”按钮,即可连接到配置好的数据源。进入“导航器”后,可以浏览对应数据库的数据表并加载。 -第3步,如果需要输入SQL,则可以点击“高级选项”选项卡,在展开的对话框中输入并加载数据。 +将TDengine数据导入Power BI的操作步骤如下: +第1步,打开Power BI并登录后,点击“主页”→“获取数据”→“其他”→“ODBC”→“连接”,添加数据源。 +第2步,选择刚才创建的数据源名称,比如“MyTDengine”,如果需要输入SQL,则可以点击“高级选项”选项卡,在展开的对话框的编辑框中输入SQL语句。点击“确定”按钮,即可连接到配置好的数据源。 +第3步,进入“导航器”后,可以浏览对应数据库的数据表/视图并加载数据。 为了充分发挥Power BI在分析TDengine中数据方面的优势,用户需要先理解维度、度量、窗口切分查询、数据切分查询、时序和相关性等核心概念,之后通过自定义的SQL导入数据。 - 维度:通常是分类(文本)数据,描述设备、测点、型号等类别信息。在TDengine的超级表中,使用标签列存储数据的维度信息,可以通过形如“select distinct tbname, tag1, tag2 from supertable”的SQL语法快速获得维度信息。 @@ -54,25 +54,25 @@ TDengine采用了一种独特的数据模型,以优化时序数据的存储和 在Power BI中,用户可以将TDengine表中的标签列映射为维度列,以便对数据进行分组和筛选。同时,数据列的聚合结果可以导入为度量列,用于计算关键指标和生成报表。通过这种方式,Power BI能够帮助决策者快速获取所需的信息,深入了解业务运营情况,从而制定更加明智的决策。 -根据如下步骤,便可以体验通过Power BI生成时序数据报表的功能。 +根据如下步骤,便可以体验通过Power BI生成时序数据报表的功能。 第1步,使用TDengine的taosBenchMark快速生成1000块智能电表3天的数据,采集频率为1s。 ```shell - taosBenchmark-t1000-n259200-S1000-H200-y + taosBenchmark -t 1000 -n 259200 -S 1000 -y ``` 第2步,导入维度数据。在Power BI中导入表的标签列,取名为tags,通过如下SQL获取超级表下所有智能电表的标签数据。 ```sql - selectdistincttbnamedevice,groupId,locationfromtest.meters + select distinct tbname device, groupId, location from test.meters ``` 第3步,导入度量数据。在Power BI中,按照1小时的时间窗口,导入每块智能电表的电流均值、电压均值、相位均值,取名为data,SQL如下。 ```sql - 第3步,导入度量数据。在Power BI中,按照1小时的时间窗口,导入每块智能电表的电流均值、电压均值、相位均值,取名为data,SQL如下。 + select tbname, _wstart ws, avg(current), avg(voltage), avg(phase) from test.meters PARTITION by tbname interval(1h) ``` 第4步,导入日期数据。按照1天的时间窗口,获得时序数据的时间范围及数据计数,SQL如下。需要在Power Query编辑器中将date列的格式从“文本”转化为“日期”。 ```sql - select_wstartdate,count(*)fromtest.metersinterval(1d)havingcount(*)>0 + select _wstart date, count(*) from test.meters interval(1d) having count(*)>0 ``` -第5步,建立维度和度量的关联关系。打开模型视图,建立表tags和data的关联关系,将tbname设置为关联数据列。 -第6步,建立日期和度量的关联关系。打开模型视图,建立数据集date和data的关联关系,关联的数据列为date和datatime。 -第7步,制作报告。在柱状图、饼图等控件中使用这些数据。 +第5步,建立维度和度量的关联关系。打开模型视图,建立表tags和data的关联关系,将tbname设置为关联数据列。 +第6步,建立日期和度量的关联关系。打开模型视图,建立数据集date和data的关联关系,关联的数据列为date和datatime。 +第7步,制作报告。在柱状图、饼图等控件中使用这些数据。 由于TDengine处理时序数据的超强性能,使得用户在数据导入及每日定期刷新数据时,都可以得到非常好的体验。更多有关Power BI视觉效果的构建方法,请参照Power BI的官方文档。 \ No newline at end of file diff --git a/docs/zh/20-third-party/05-bi/05-yhbi.md b/docs/zh/10-third-party/05-bi/05-yhbi.md similarity index 100% rename from docs/zh/20-third-party/05-bi/05-yhbi.md rename to docs/zh/10-third-party/05-bi/05-yhbi.md diff --git a/docs/zh/20-third-party/05-bi/09-seeq.md b/docs/zh/10-third-party/05-bi/09-seeq.md similarity index 100% rename from docs/zh/20-third-party/05-bi/09-seeq.md rename to docs/zh/10-third-party/05-bi/09-seeq.md diff --git a/docs/zh/20-third-party/05-bi/gds/gds-01.webp b/docs/zh/10-third-party/05-bi/gds/gds-01.webp similarity index 100% rename from docs/zh/20-third-party/05-bi/gds/gds-01.webp rename to docs/zh/10-third-party/05-bi/gds/gds-01.webp diff --git a/docs/zh/20-third-party/05-bi/gds/gds-02.png.webp b/docs/zh/10-third-party/05-bi/gds/gds-02.png.webp similarity index 100% rename from docs/zh/20-third-party/05-bi/gds/gds-02.png.webp rename to docs/zh/10-third-party/05-bi/gds/gds-02.png.webp diff --git a/docs/zh/20-third-party/05-bi/gds/gds-03.png.webp b/docs/zh/10-third-party/05-bi/gds/gds-03.png.webp similarity index 100% rename from docs/zh/20-third-party/05-bi/gds/gds-03.png.webp rename to docs/zh/10-third-party/05-bi/gds/gds-03.png.webp diff --git a/docs/zh/20-third-party/05-bi/gds/gds-04.png.webp b/docs/zh/10-third-party/05-bi/gds/gds-04.png.webp similarity index 100% rename from docs/zh/20-third-party/05-bi/gds/gds-04.png.webp rename to docs/zh/10-third-party/05-bi/gds/gds-04.png.webp diff --git a/docs/zh/20-third-party/05-bi/gds/gds-05.png.webp b/docs/zh/10-third-party/05-bi/gds/gds-05.png.webp similarity index 100% rename from docs/zh/20-third-party/05-bi/gds/gds-05.png.webp rename to docs/zh/10-third-party/05-bi/gds/gds-05.png.webp diff --git a/docs/zh/20-third-party/05-bi/gds/gds-06.png.webp b/docs/zh/10-third-party/05-bi/gds/gds-06.png.webp similarity index 100% rename from docs/zh/20-third-party/05-bi/gds/gds-06.png.webp rename to docs/zh/10-third-party/05-bi/gds/gds-06.png.webp diff --git a/docs/zh/20-third-party/05-bi/gds/gds-07.png.webp b/docs/zh/10-third-party/05-bi/gds/gds-07.png.webp similarity index 100% rename from docs/zh/20-third-party/05-bi/gds/gds-07.png.webp rename to docs/zh/10-third-party/05-bi/gds/gds-07.png.webp diff --git a/docs/zh/20-third-party/05-bi/gds/gds-08.png.webp b/docs/zh/10-third-party/05-bi/gds/gds-08.png.webp similarity index 100% rename from docs/zh/20-third-party/05-bi/gds/gds-08.png.webp rename to docs/zh/10-third-party/05-bi/gds/gds-08.png.webp diff --git a/docs/zh/20-third-party/05-bi/gds/gds-09.png.webp b/docs/zh/10-third-party/05-bi/gds/gds-09.png.webp similarity index 100% rename from docs/zh/20-third-party/05-bi/gds/gds-09.png.webp rename to docs/zh/10-third-party/05-bi/gds/gds-09.png.webp diff --git a/docs/zh/20-third-party/05-bi/gds/gds-10.png.webp b/docs/zh/10-third-party/05-bi/gds/gds-10.png.webp similarity index 100% rename from docs/zh/20-third-party/05-bi/gds/gds-10.png.webp rename to docs/zh/10-third-party/05-bi/gds/gds-10.png.webp diff --git a/docs/zh/20-third-party/05-bi/gds/gds-11.png.webp b/docs/zh/10-third-party/05-bi/gds/gds-11.png.webp similarity index 100% rename from docs/zh/20-third-party/05-bi/gds/gds-11.png.webp rename to docs/zh/10-third-party/05-bi/gds/gds-11.png.webp diff --git a/docs/zh/20-third-party/05-bi/index.md b/docs/zh/10-third-party/05-bi/index.md similarity index 100% rename from docs/zh/20-third-party/05-bi/index.md rename to docs/zh/10-third-party/05-bi/index.md diff --git a/docs/zh/20-third-party/05-bi/seeq/seeq-demo-schema.webp b/docs/zh/10-third-party/05-bi/seeq/seeq-demo-schema.webp similarity index 100% rename from docs/zh/20-third-party/05-bi/seeq/seeq-demo-schema.webp rename to docs/zh/10-third-party/05-bi/seeq/seeq-demo-schema.webp diff --git a/docs/zh/20-third-party/05-bi/seeq/seeq-demo-workbench.webp b/docs/zh/10-third-party/05-bi/seeq/seeq-demo-workbench.webp similarity index 100% rename from docs/zh/20-third-party/05-bi/seeq/seeq-demo-workbench.webp rename to docs/zh/10-third-party/05-bi/seeq/seeq-demo-workbench.webp diff --git a/docs/zh/20-third-party/05-bi/seeq/seeq-forecast-result.webp b/docs/zh/10-third-party/05-bi/seeq/seeq-forecast-result.webp similarity index 100% rename from docs/zh/20-third-party/05-bi/seeq/seeq-forecast-result.webp rename to docs/zh/10-third-party/05-bi/seeq/seeq-forecast-result.webp diff --git a/docs/zh/20-third-party/05-bi/seeq/seeq-workbench-with-tdengine-cloud.webp b/docs/zh/10-third-party/05-bi/seeq/seeq-workbench-with-tdengine-cloud.webp similarity index 100% rename from docs/zh/20-third-party/05-bi/seeq/seeq-workbench-with-tdengine-cloud.webp rename to docs/zh/10-third-party/05-bi/seeq/seeq-workbench-with-tdengine-cloud.webp diff --git a/docs/zh/20-third-party/07-tool/01-dbeaver.md b/docs/zh/10-third-party/07-tool/01-dbeaver.md similarity index 100% rename from docs/zh/20-third-party/07-tool/01-dbeaver.md rename to docs/zh/10-third-party/07-tool/01-dbeaver.md diff --git a/docs/zh/20-third-party/07-tool/03-qstudio.md b/docs/zh/10-third-party/07-tool/03-qstudio.md similarity index 100% rename from docs/zh/20-third-party/07-tool/03-qstudio.md rename to docs/zh/10-third-party/07-tool/03-qstudio.md diff --git a/docs/zh/20-third-party/07-tool/dbeaver/dbeaver-browse-cloud-data-zh.webp b/docs/zh/10-third-party/07-tool/dbeaver/dbeaver-browse-cloud-data-zh.webp similarity index 100% rename from docs/zh/20-third-party/07-tool/dbeaver/dbeaver-browse-cloud-data-zh.webp rename to docs/zh/10-third-party/07-tool/dbeaver/dbeaver-browse-cloud-data-zh.webp diff --git a/docs/zh/20-third-party/07-tool/dbeaver/dbeaver-browse-data-zh.webp b/docs/zh/10-third-party/07-tool/dbeaver/dbeaver-browse-data-zh.webp similarity index 100% rename from docs/zh/20-third-party/07-tool/dbeaver/dbeaver-browse-data-zh.webp rename to docs/zh/10-third-party/07-tool/dbeaver/dbeaver-browse-data-zh.webp diff --git a/docs/zh/20-third-party/07-tool/dbeaver/dbeaver-config-tdengine-zh.webp b/docs/zh/10-third-party/07-tool/dbeaver/dbeaver-config-tdengine-zh.webp similarity index 100% rename from docs/zh/20-third-party/07-tool/dbeaver/dbeaver-config-tdengine-zh.webp rename to docs/zh/10-third-party/07-tool/dbeaver/dbeaver-config-tdengine-zh.webp diff --git a/docs/zh/20-third-party/07-tool/dbeaver/dbeaver-connect-tdengine-cloud-test-zh.webp b/docs/zh/10-third-party/07-tool/dbeaver/dbeaver-connect-tdengine-cloud-test-zh.webp similarity index 100% rename from docs/zh/20-third-party/07-tool/dbeaver/dbeaver-connect-tdengine-cloud-test-zh.webp rename to docs/zh/10-third-party/07-tool/dbeaver/dbeaver-connect-tdengine-cloud-test-zh.webp diff --git a/docs/zh/20-third-party/07-tool/dbeaver/dbeaver-connect-tdengine-cloud-zh.webp b/docs/zh/10-third-party/07-tool/dbeaver/dbeaver-connect-tdengine-cloud-zh.webp similarity index 100% rename from docs/zh/20-third-party/07-tool/dbeaver/dbeaver-connect-tdengine-cloud-zh.webp rename to docs/zh/10-third-party/07-tool/dbeaver/dbeaver-connect-tdengine-cloud-zh.webp diff --git a/docs/zh/20-third-party/07-tool/dbeaver/dbeaver-connect-tdengine-test-zh.webp b/docs/zh/10-third-party/07-tool/dbeaver/dbeaver-connect-tdengine-test-zh.webp similarity index 100% rename from docs/zh/20-third-party/07-tool/dbeaver/dbeaver-connect-tdengine-test-zh.webp rename to docs/zh/10-third-party/07-tool/dbeaver/dbeaver-connect-tdengine-test-zh.webp diff --git a/docs/zh/20-third-party/07-tool/dbeaver/dbeaver-connect-tdengine-zh.webp b/docs/zh/10-third-party/07-tool/dbeaver/dbeaver-connect-tdengine-zh.webp similarity index 100% rename from docs/zh/20-third-party/07-tool/dbeaver/dbeaver-connect-tdengine-zh.webp rename to docs/zh/10-third-party/07-tool/dbeaver/dbeaver-connect-tdengine-zh.webp diff --git a/docs/zh/20-third-party/07-tool/dbeaver/dbeaver-sql-execution-cloud-zh.webp b/docs/zh/10-third-party/07-tool/dbeaver/dbeaver-sql-execution-cloud-zh.webp similarity index 100% rename from docs/zh/20-third-party/07-tool/dbeaver/dbeaver-sql-execution-cloud-zh.webp rename to docs/zh/10-third-party/07-tool/dbeaver/dbeaver-sql-execution-cloud-zh.webp diff --git a/docs/zh/20-third-party/07-tool/dbeaver/dbeaver-sql-execution-zh.webp b/docs/zh/10-third-party/07-tool/dbeaver/dbeaver-sql-execution-zh.webp similarity index 100% rename from docs/zh/20-third-party/07-tool/dbeaver/dbeaver-sql-execution-zh.webp rename to docs/zh/10-third-party/07-tool/dbeaver/dbeaver-sql-execution-zh.webp diff --git a/docs/zh/20-third-party/07-tool/dbeaver/tdengine-cloud-jdbc-dsn-zh.webp b/docs/zh/10-third-party/07-tool/dbeaver/tdengine-cloud-jdbc-dsn-zh.webp similarity index 100% rename from docs/zh/20-third-party/07-tool/dbeaver/tdengine-cloud-jdbc-dsn-zh.webp rename to docs/zh/10-third-party/07-tool/dbeaver/tdengine-cloud-jdbc-dsn-zh.webp diff --git a/docs/zh/20-third-party/07-tool/index.md b/docs/zh/10-third-party/07-tool/index.md similarity index 72% rename from docs/zh/20-third-party/07-tool/index.md rename to docs/zh/10-third-party/07-tool/index.md index 7142e7d425..d7959d48ab 100644 --- a/docs/zh/20-third-party/07-tool/index.md +++ b/docs/zh/10-third-party/07-tool/index.md @@ -1,6 +1,6 @@ --- -sidebar_label: 管理开发 -title: 与各种管理开发工具的集成 +sidebar_label: 数据库管理 +title: 与各种数据库管理开发工具的集成 toc_max_heading_level: 4 --- diff --git a/docs/zh/20-third-party/07-tool/qstudio/qstudio-browse-data.webp b/docs/zh/10-third-party/07-tool/qstudio/qstudio-browse-data.webp similarity index 100% rename from docs/zh/20-third-party/07-tool/qstudio/qstudio-browse-data.webp rename to docs/zh/10-third-party/07-tool/qstudio/qstudio-browse-data.webp diff --git a/docs/zh/20-third-party/07-tool/qstudio/qstudio-chart.webp b/docs/zh/10-third-party/07-tool/qstudio/qstudio-chart.webp similarity index 100% rename from docs/zh/20-third-party/07-tool/qstudio/qstudio-chart.webp rename to docs/zh/10-third-party/07-tool/qstudio/qstudio-chart.webp diff --git a/docs/zh/20-third-party/07-tool/qstudio/qstudio-connect-tdengine-test.webp b/docs/zh/10-third-party/07-tool/qstudio/qstudio-connect-tdengine-test.webp similarity index 100% rename from docs/zh/20-third-party/07-tool/qstudio/qstudio-connect-tdengine-test.webp rename to docs/zh/10-third-party/07-tool/qstudio/qstudio-connect-tdengine-test.webp diff --git a/docs/zh/20-third-party/07-tool/qstudio/qstudio-connect-tdengine.webp b/docs/zh/10-third-party/07-tool/qstudio/qstudio-connect-tdengine.webp similarity index 100% rename from docs/zh/20-third-party/07-tool/qstudio/qstudio-connect-tdengine.webp rename to docs/zh/10-third-party/07-tool/qstudio/qstudio-connect-tdengine.webp diff --git a/docs/zh/20-third-party/07-tool/qstudio/qstudio-jdbc-connector-download.webp b/docs/zh/10-third-party/07-tool/qstudio/qstudio-jdbc-connector-download.webp similarity index 100% rename from docs/zh/20-third-party/07-tool/qstudio/qstudio-jdbc-connector-download.webp rename to docs/zh/10-third-party/07-tool/qstudio/qstudio-jdbc-connector-download.webp diff --git a/docs/zh/20-third-party/07-tool/qstudio/qstudio-sql-execution.webp b/docs/zh/10-third-party/07-tool/qstudio/qstudio-sql-execution.webp similarity index 100% rename from docs/zh/20-third-party/07-tool/qstudio/qstudio-sql-execution.webp rename to docs/zh/10-third-party/07-tool/qstudio/qstudio-sql-execution.webp diff --git a/docs/zh/20-third-party/_75-powerbi.md b/docs/zh/10-third-party/_75-powerbi.md similarity index 100% rename from docs/zh/20-third-party/_75-powerbi.md rename to docs/zh/10-third-party/_75-powerbi.md diff --git a/docs/zh/20-third-party/_76-yonghongbi.md b/docs/zh/10-third-party/_76-yonghongbi.md similarity index 100% rename from docs/zh/20-third-party/_76-yonghongbi.md rename to docs/zh/10-third-party/_76-yonghongbi.md diff --git a/docs/zh/20-third-party/_deploytaosadapter.mdx b/docs/zh/10-third-party/_deploytaosadapter.mdx similarity index 100% rename from docs/zh/20-third-party/_deploytaosadapter.mdx rename to docs/zh/10-third-party/_deploytaosadapter.mdx diff --git a/docs/zh/20-third-party/index.md b/docs/zh/10-third-party/index.md similarity index 100% rename from docs/zh/20-third-party/index.md rename to docs/zh/10-third-party/index.md diff --git a/docs/zh/20-third-party/powerbi-step-zh.png b/docs/zh/10-third-party/powerbi-step-zh.png similarity index 100% rename from docs/zh/20-third-party/powerbi-step-zh.png rename to docs/zh/10-third-party/powerbi-step-zh.png diff --git a/docs/zh/20-third-party/yonghongbi-step-zh.png b/docs/zh/10-third-party/yonghongbi-step-zh.png similarity index 100% rename from docs/zh/20-third-party/yonghongbi-step-zh.png rename to docs/zh/10-third-party/yonghongbi-step-zh.png diff --git a/docs/zh/14-reference/01-components/01-taosd.md b/docs/zh/14-reference/01-components/01-taosd.md index ff6b27092d..fbf086bf6b 100644 --- a/docs/zh/14-reference/01-components/01-taosd.md +++ b/docs/zh/14-reference/01-components/01-taosd.md @@ -33,7 +33,6 @@ taosd 命令行参数如下 | secondEp | taosd 启动时,如果 firstEp 连接不上,尝试连接集群中第二个 dnode 的 endpoint,缺省值:无 | | fqdn | 启动 taosd 后所监听的服务地址,缺省值:所在服务器上配置的第一个 hostname | | serverPort | 启动 taosd 后所监听的端口,缺省值:6030 | -| maxShellConns | 一个 dnode 容许的连接数,取值范围为 10-5000000,缺省值:5000 | | numOfRpcSessions | 允许一个客户端能创建的最大连接数,取值范围 100-100000,缺省值:30000 | | timeToGetAvailableConn | 获得可用连接的最长等待时间,取值范围 10-50000000,单位为毫秒,缺省值:500000 | @@ -433,3 +432,30 @@ taosd 会将监控指标上报给 taosKeeper,这些监控指标会被 taosKeep | duration | VARCHAR | TAG | sql执行耗时,取值范围:3-10s,10-100s,100-1000s,1000s- | | cluster\_id | VARCHAR | TAG | cluster id | +## 日志相关 + +TDengine 通过日志文件记录系统运行状态,帮助用户监控系统运行情况,排查问题,这里主要介绍 taosc 和 taosd 两个系统日志的相关说明。 + +TDengine 的日志文件主要包括普通日志和慢日志两种类型。 + +1. 普通日志行为说明 + 1. 同一台机器上可以起多个客户端进程,所以客户端日志命名方式为 taoslogX.Y,其中 X 为序号,为空或者 0 到 9,Y 为后缀 0 或者 1。 + 2. 同一台机器上只能有一个服务端进程。所以服务端日志命名方式为 taosdlog.Y,其中 Y 为后缀, 0 或者 1。 + + 序号和后缀确定规则如下(假设日志路径为 /var/log/taos/): + 1. 确定序号:使用 10 个序号作为日志命名方式,/var/log/taos/taoslog0.Y - /var/log/taos/taoslog9.Y,依次检测每个序号是否使用,找到第一个没使用的序号作为该进程的日志文件使用的序号。 如果 10 个序号都被进程使用,不使用序号,即 /var/log/taos/taoslog.Y,进程都往相同的文件里写(序号为空)。 + 2. 确定后缀:0 或者 1。比如确定序号为 3,备选的日志文件名就为 /var/log/taos/taoslog3.0 /var/log/taos/taoslog3.1。如果两个文件都不存在用后缀 0,一个存在一个不存在,用存在的后缀。两个都存在,用修改时间最近的那个后缀。 + 3. 如果日志文件超过配置的条数 numOfLogLines,会切换后缀名,继续写日志,比如/var/log/taos/taoslog3.0 写够了,切换到 /var/log/taos/taoslog3.1 继续写日志。/var/log/taos/taoslog3.0 会添加时间戳后缀重命名并压缩存储(异步线程操作)。 + 4. 通过配置 logKeepDays 控制日志文件保存几天,几天之外的日志会被删除。比如配置为 1,则一天之前的日志会在新日志压缩存储时检测删除。不是自然天。 + +系统除了记录普通日志以外,对于执行时间超过配置时间的 SQL 语句,会被记录到慢日志中。慢日志文件主要用于分析系统性能,排查性能问题。 + +2. 慢日志行为说明 + 1. 慢日志一方面会记录到本地慢日志文件中,另一方面会通过 taosAdapter 发送到 taosKeeper 进行结构化存储(需打开 monitorr 开关)。 + 2. 慢日志文件存储规则为: + 1. 慢日志文件一天一个,如果当天没有慢日志,没有当天的文件。 + 2. 文件名为 taosSlowLog.yyyy-mm-dd(taosSlowLog.2024-08-02),日志存储路径通过 logDir 配置。 + 3. 多个客户端的日志存储在相应日志路径下的同一个 taosSlowLog.yyyy.mm.dd 文件里。 + 4. 慢日志文件不自动删除,不压缩。 + 5. 使用和普通日志文件相同的三个参数 logDir, minimalLogDirGB, asyncLog。另外两个参数 numOfLogLines,logKeepDays 不适用于慢日志。 + \ No newline at end of file diff --git a/docs/zh/14-reference/01-components/08-taos-cli.md b/docs/zh/14-reference/01-components/08-taos-cli.md deleted file mode 100644 index c388e7edda..0000000000 --- a/docs/zh/14-reference/01-components/08-taos-cli.md +++ /dev/null @@ -1,102 +0,0 @@ ---- -title: TDengine CLI 参考手册 -sidebar_label: taos -toc_max_heading_level: 4 ---- - -TDengine 命令行程序(以下简称 TDengine CLI)是用户操作 TDengine 实例并与之交互的最简洁最常用的方式。 使用前需要安装 TDengine Server 安装包或 TDengine Client 安装包。 - -## 启动 - -要进入 TDengine CLI,您只要在终端执行 `taos` 即可。 - -```bash -taos -``` - -如果连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息。 - -TDengine CLI 的提示符号如下: - -```shell -taos> -``` - -进入 TDengine CLI 后,你可执行各种 SQL 语句,包括插入、查询以及各种管理命令。 - -## 执行 SQL 脚本 - -在 TDengine CLI 里可以通过 `source` 命令来运行脚本文件中的多条 SQL 命令。 - -```sql -taos> source ; -``` - -## 在线修改显示字符宽度 - -可以在 TDengine CLI 里使用如下命令调整字符显示宽度 - -```sql -taos> SET MAX_BINARY_DISPLAY_WIDTH ; -``` - -如显示的内容后面以 ... 结尾时,表示该内容已被截断,可通过本命令修改显示字符宽度以显示完整的内容。 - -## 命令行参数 - -您可通过配置命令行参数来改变 TDengine CLI 的行为。以下为常用的几个命令行参数: - -- -h HOST: 要连接的 TDengine 服务端所在服务器的 FQDN, 默认为连接本地服务 -- -P PORT: 指定服务端所用端口号 -- -u USER: 连接时使用的用户名 -- -p PASSWORD: 连接服务端时使用的密码 -- -?, --help: 打印出所有命令行参数 - -还有更多其他参数: - -- -a AUTHSTR: 连接服务端的授权信息 -- -A: 通过用户名和密码计算授权信息 -- -B: 设置 BI 工具显示模式,设置后所有输出都遵循 BI 工具的格式进行输出 -- -c CONFIGDIR: 指定配置文件目录,Linux 环境下默认为 `/etc/taos`,该目录下的配置文件默认名称为 `taos.cfg` -- -C: 打印 -c 指定的目录中 `taos.cfg` 的配置参数 -- -d DATABASE: 指定连接到服务端时使用的数据库 -- -E dsn: 使用 WebSocket DSN 连接云服务或者提供 WebSocket 连接的服务端 -- -f FILE: 以非交互模式执行 SQL 脚本文件。文件中一个 SQL 语句只能占一行 -- -k: 测试服务端运行状态,0: unavailable,1: network ok,2: service ok,3: service degraded,4: exiting -- -l PKTLEN: 网络测试时使用的测试包大小 -- -n NETROLE: 网络连接测试时的测试范围,默认为 `client`, 可选值为 `client`、`server` -- -N PKTNUM: 网络测试时使用的测试包数量 -- -r: 将时间输出出无符号 64 位整数类型(即 C 语音中 uint64_t) -- -R: 使用 RESTful 模式连接服务端 -- -s COMMAND: 以非交互模式执行的 SQL 命令 -- -t: 测试服务端启动状态,状态同-k -- -w DISPLAYWIDTH: 客户端列显示宽度 -- -z TIMEZONE: 指定时区,默认为本地时区 -- -V: 打印出当前版本号 - -示例: - -```bash -taos -h h1.taos.com -s "use db; show tables;" -``` - -## 配置文件 - -也可以通过配置文件中的参数设置来控制 TDengine CLI 的行为。可用配置参数请参考[客户端配置](../../components/taosc) - -## TDengine CLI 小技巧 - -- 可以使用上下光标键查看历史输入的指令 -- 在 TDengine CLI 中使用 `alter user` 命令可以修改用户密码,缺省密码为 `taosdata` -- Ctrl+C 中止正在进行中的查询 -- 执行 `RESET QUERY CACHE` 可清除本地表 Schema 的缓存 -- 批量执行 SQL 语句。可以将一系列的 TDengine CLI 命令(以英文 ; 结尾,每个 SQL 语句为一行)按行存放在文件里,在 TDengine CLI 里执行命令 `source ` 自动执行该文件里所有的 SQL 语句 -- 输入 `q` 或 `quit` 或 `exit` 回车,可以退出 TDengine CLI - -## TDengine CLI 导出查询结果到文件中 - -- 可以使用符号 “>>” 导出查询结果到某个文件中,语法为: sql 查询语句 >> ‘输出文件名’; 输出文件如果不写路径的话,将输出至当前目录下。如 select * from d0 >> ‘/root/d0.csv’; 将把查询结果输出到 /root/d0.csv 中。 - -## TDengine CLI 导入文件中的数据到表中 - -- 可以使用 insert into table_name file '输入文件名',把上一步中导出的数据文件再导入到指定表中。如 insert into d0 file '/root/d0.csv'; 表示把上面导出的数据全部再导致至 d0 表中。 diff --git a/docs/zh/14-reference/01-components/09-taosdump.md b/docs/zh/14-reference/01-components/09-taosdump.md deleted file mode 100644 index 7afe8721ee..0000000000 --- a/docs/zh/14-reference/01-components/09-taosdump.md +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: taosdump 参考手册 -sidebar_label: taosdump -toc_max_heading_level: 4 ---- - -taosdump 是一个支持从运行中的 TDengine 集群备份数据并将备份的数据恢复到相同或另一个运行中的 TDengine 集群中的工具应用程序。 - -taosdump 可以用数据库、超级表或普通表作为逻辑数据单元进行备份,也可以对数据库、超级 -表和普通表中指定时间段内的数据记录进行备份。使用时可以指定数据备份的目录路径,如果 -不指定位置,taosdump 默认会将数据备份到当前目录。 - -如果指定的位置已经有数据文件,taosdump 会提示用户并立即退出,避免数据被覆盖。这意味着同一路径只能被用于一次备份。 -如果看到相关提示,请小心操作。 - -taosdump 是一个逻辑备份工具,它不应被用于备份任何原始数据、环境设置、 -硬件信息、服务端配置或集群的拓扑结构。taosdump 使用 -[ Apache AVRO ](https://avro.apache.org/)作为数据文件格式来存储备份数据。 - -## 安装 - -taosdump 有两种安装方式: - -- 安装 taosTools 官方安装包, 请从[发布历史页面](https://docs.taosdata.com/releases/tools/)页面找到 taosTools 并下载安装。 - -- 单独编译 taos-tools 并安装, 详情请参考 [taos-tools](https://github.com/taosdata/taos-tools) 仓库。 - -## 常用使用场景 - -### taosdump 备份数据 - -1. 备份所有数据库:指定 `-A` 或 `--all-databases` 参数; -2. 备份多个指定数据库:使用 `-D db1,db2,...` 参数; -3. 备份指定数据库中的某些超级表或普通表:使用 `dbname stbname1 stbname2 tbname1 tbname2 ...` 参数,注意这种输入序列第一个参数为数据库名称,且只支持一个数据库,第二个和之后的参数为该数据库中的超级表或普通表名称,中间以空格分隔; -4. 备份系统 log 库:TDengine 集群通常会包含一个系统数据库,名为 `log`,这个数据库内的数据为 TDengine 自我运行的数据,taosdump 默认不会对 log 库进行备份。如果有特定需求对 log 库进行备份,可以使用 `-a` 或 `--allow-sys` 命令行参数。 -5. “宽容”模式备份:taosdump 1.4.1 之后的版本提供 `-n` 参数和 `-L` 参数,用于备份数据时不使用转义字符和“宽容”模式,可以在表名、列名、标签名没使用转义字符的情况下减少备份数据时间和备份数据占用空间。如果不确定符合使用 `-n` 和 `-L` 条件时请使用默认参数进行“严格”模式进行备份。转义字符的说明请参考[官方文档](../../taos-sql/escape)。 - -:::tip -- taosdump 1.4.1 之后的版本提供 `-I` 参数,用于解析 avro 文件 schema 和数据,如果指定 `-s` 参数将只解析 schema。 -- taosdump 1.4.2 之后的备份使用 `-B` 参数指定的批次数,默认值为 16384,如果在某些环境下由于网络速度或磁盘性能不足导致 "Error actual dump .. batch .." 可以通过 `-B` 参数调整为更小的值进行尝试。 -- taosdump 的导出不支持中断恢复,所以当进程意外终止后,正确的处理方式是删除当前已导出或生成的所有相关文件。 -- taosdump 的导入支持中断恢复,但是当进程重新启动时,会收到一些“表已经存在”的提示,可以忽视。 - -::: - -### taosdump 恢复数据 - -恢复指定路径下的数据文件:使用 `-i` 参数加上数据文件所在路径。如前面提及,不应该使用同一个目录备份不同数据集合,也不应该在同一路径多次备份同一数据集,否则备份数据会造成覆盖或多次备份。 - -:::tip -taosdump 内部使用 TDengine stmt binding API 进行恢复数据的写入,为提高数据恢复性能,目前使用 16384 为一次写入批次。如果备份数据中有比较多列数据,可能会导致产生 "WAL size exceeds limit" 错误,此时可以通过使用 `-B` 参数调整为一个更小的值进行尝试。 - -::: - -## 详细命令行参数列表 - -以下为 taosdump 详细命令行参数列表: - -``` -Usage: taosdump [OPTION...] dbname [tbname ...] - or: taosdump [OPTION...] --databases db1,db2,... - or: taosdump [OPTION...] --all-databases - or: taosdump [OPTION...] -i inpath - or: taosdump [OPTION...] -o outpath - - -h, --host=HOST Server host dumping data from. Default is - localhost. - -p, --password User password to connect to server. Default is - taosdata. - -P, --port=PORT Port to connect - -u, --user=USER User name used to connect to server. Default is - root. - -c, --config-dir=CONFIG_DIR Configure directory. Default is /etc/taos - -i, --inpath=INPATH Input file path. - -o, --outpath=OUTPATH Output file path. - -r, --resultFile=RESULTFILE DumpOut/In Result file path and name. - -a, --allow-sys Allow to dump system database - -A, --all-databases Dump all databases. - -D, --databases=DATABASES Dump inputted databases. Use comma to separate - databases' name. - -e, --escape-character Use escaped character for database name - -N, --without-property Dump database without its properties. - -s, --schemaonly Only dump tables' schema. - -d, --avro-codec=snappy Choose an avro codec among null, deflate, snappy, - and lzma. - -S, --start-time=START_TIME Start time to dump. Either epoch or - ISO8601/RFC3339 format is acceptable. ISO8601 - format example: 2017-10-01T00:00:00.000+0800 or - 2017-10-0100:00:00:000+0800 or '2017-10-01 - 00:00:00.000+0800' - -E, --end-time=END_TIME End time to dump. Either epoch or ISO8601/RFC3339 - format is acceptable. ISO8601 format example: - 2017-10-01T00:00:00.000+0800 or - 2017-10-0100:00:00.000+0800 or '2017-10-01 - 00:00:00.000+0800' - -B, --data-batch=DATA_BATCH Number of data per query/insert statement when - backup/restore. Default value is 16384. If you see - 'error actual dump .. batch ..' when backup or if - you see 'WAL size exceeds limit' error when - restore, please adjust the value to a smaller one - and try. The workable value is related to the - length of the row and type of table schema. - -I, --inspect inspect avro file content and print on screen - -L, --loose-mode Using loose mode if the table name and column name - use letter and number only. Default is NOT. - -n, --no-escape No escape char '`'. Default is using it. - -Q, --dot-replace Repalce dot character with underline character in - the table name.(Version 2.5.3) - -T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is - 8. - -C, --cloud=CLOUD_DSN specify a DSN to access TDengine cloud service - -R, --restful Use RESTful interface to connect TDengine - -t, --timeout=SECONDS The timeout seconds for websocket to interact. - -g, --debug Print debug info. - -?, --help Give this help list - --usage Give a short usage message - -V, --version Print program version - -W, --rename=RENAME-LIST Rename database name with new name during - importing data. RENAME-LIST: - "db1=newDB1|db2=newDB2" means rename db1 to newDB1 - and rename db2 to newDB2 (Version 2.5.4) - -Mandatory or optional arguments to long options are also mandatory or optional -for any corresponding short options. - -Report bugs to . -``` diff --git a/docs/zh/14-reference/01-components/10-taosbenchmark.md b/docs/zh/14-reference/01-components/10-taosbenchmark.md deleted file mode 100644 index 3f15d6b8e3..0000000000 --- a/docs/zh/14-reference/01-components/10-taosbenchmark.md +++ /dev/null @@ -1,485 +0,0 @@ ---- -title: taosBenchmark 参考手册 -sidebar_label: taosBenchmark -toc_max_heading_level: 4 ---- - -taosBenchmark (曾用名 taosdemo ) 是一个用于测试 TDengine 产品性能的工具。taosBenchmark 可以测试 TDengine 的插入、查询和订阅等功能的性能,它可以模拟由大量设备产生的大量数据,还可以灵活地控制数据库、超级表、标签列的数量和类型、数据列的数量和类型、子表的数量、每张子表的数据量、插入数据的时间间隔、taosBenchmark 的工作线程数量、是否以及如何插入乱序数据等。为了兼容过往用户的使用习惯,安装包提供 了 taosdemo 作为 taosBenchmark 的软链接。 - -## 安装 - -taosBenchmark 有两种安装方式: - -- 安装 TDengine 官方安装包的同时会自动安装 taosBenchmark, 详情请参考[ TDengine 安装](../../../get-started/)。 - -- 单独编译 taos-tools 并安装, 详情请参考 [taos-tools](https://github.com/taosdata/taos-tools) 仓库。 - -## 运行 - -### 配置和运行方式 - -taosBenchmark 需要在操作系统的终端执行,该工具支持两种配置方式:[命令行参数](#命令行参数详解) 和 [JSON 配置文件](#配置文件参数详解)。这两种方式是互斥的,在使用配置文件时只能使用一个命令行参数 `-f ` 指定配置文件。在使用命令行参数运行 taosBenchmark 并控制其行为时则不能使用 `-f` 参数而要用其它参数来进行配置。除此之外,taosBenchmark 还提供了一种特殊的运行方式,即无参数运行。 - -taosBenchmark 支持对 TDengine 做完备的性能测试,其所支持的 TDengine 功能分为三大类:写入、查询和订阅。这三种功能之间是互斥的,每次运行 taosBenchmark 只能选择其中之一。值得注意的是,所要测试的功能类型在使用命令行配置方式时是不可配置的,命令行配置方式只能测试写入性能。若要测试 TDengine 的查询和订阅性能,必须使用配置文件的方式,通过配置文件中的参数 `filetype` 指定所要测试的功能类型。 - -**在运行 taosBenchmark 之前要确保 TDengine 集群已经在正确运行。** - -### 无命令行参数运行 - -执行下列命令即可快速体验 taosBenchmark 对 TDengine 进行基于默认配置的写入性能测试。 - -```bash -taosBenchmark -``` - -在无参数运行时,taosBenchmark 默认连接 `/etc/taos` 下指定的 TDengine 集群,并在 TDengine 中创建一个名为 test 的数据库,test 数据库下创建名为 meters 的一张超级表,超级表下创建 10000 张表,每张表中写入 10000 条记录。注意,如果已有 test 数据库,这个命令会先删除该数据库后建立一个全新的 test 数据库。 - -### 使用命令行配置参数运行 - -在使用命令行参数运行 taosBenchmark 并控制其行为时,`-f ` 参数不能使用。所有配置参数都必须通过命令行指定。以下是使用命令行方式测试 taosBenchmark 写入性能的一个示例。 - -```bash -taosBenchmark -I stmt -n 200 -t 100 -``` - -上面的命令 `taosBenchmark` 将创建一个名为`test`的数据库,在其中建立一张超级表`meters`,在该超级表中建立 100 张子表并使用参数绑定的方式为每张子表插入 200 条记录。 - -### 使用配置文件运行 - -taosBenchmark 安装包中提供了配置文件的示例,位于 `/examples/taosbenchmark-json` 下 - -使用如下命令行即可运行 taosBenchmark 并通过配置文件控制其行为。 - -```bash -taosBenchmark -f -``` - -**下面是几个配置文件的示例:** - -#### 插入场景 JSON 配置文件示例 - -
-insert.json - -```json -{{#include /taos-tools/example/insert.json}} -``` - -
- -#### 查询场景 JSON 配置文件示例 - -
-query.json - -```json -{{#include /taos-tools/example/query.json}} -``` - -
- -#### 订阅场景 JSON 配置文件示例 - -
-tmq.json - -```json -{{#include /taos-tools/example/tmq.json}} -``` - -
- -## 命令行参数详解 - -- **-f/--file \** : - 要使用的 JSON 配置文件,由该文件指定所有参数,本参数与命令行其他参数不能同时使用。没有默认值。 - -- **-c/--config-dir \** : - TDengine 集群配置文件所在的目录,默认路径是 /etc/taos 。 - -- **-h/--host \** : - 指定要连接的 TDengine 服务端的 FQDN,默认值为 localhost 。 - -- **-P/--port \** : - 要连接的 TDengine 服务器的端口号,默认值为 6030 。 - -- **-I/--interface \** : - 插入模式,可选项有 taosc, rest, stmt, sml, sml-rest, 分别对应普通写入、restful 接口写入、参数绑定接口写入、schemaless 接口写入、restful schemaless 接口写入 (由 taosAdapter 提供)。默认值为 taosc。 - -- **-u/--user \** : - 用于连接 TDengine 服务端的用户名,默认为 root 。 - -- **-U/--supplement-insert ** : - 写入数据而不提前建数据库和表,默认关闭。 - -- **-p/--password \** : - 用于连接 TDengine 服务端的密码,默认值为 taosdata。 - -- **-o/--output \** : - 结果输出文件的路径,默认值为 ./output.txt。 - -- **-T/--thread \** : - 插入数据的线程数量,默认为 8 。 - -- **-B/--interlace-rows \** : - 启用交错插入模式并同时指定向每个子表每次插入的数据行数。交错插入模式是指依次向每张子表插入由本参数所指定的行数并重复这个过程,直到所有子表的数据都插入完成。默认值为 0, 即向一张子表完成数据插入后才会向下一张子表进行数据插入。 - -- **-i/--insert-interval \** : - 指定交错插入模式的插入间隔,单位为 ms,默认值为 0。 只有当 `-B/--interlace-rows` 大于 0 时才起作用。意味着数据插入线程在为每个子表插入隔行扫描记录后,会等待该值指定的时间间隔后再进行下一轮写入。 - -- **-r/--rec-per-req \** : - 每次向 TDengine 请求写入的数据行数,默认值为 30000 。 - -- **-t/--tables \** : - 指定子表的数量,默认为 10000 。 - -- **-S/--timestampstep \** : - 每个子表中插入数据的时间戳步长,单位是 ms,默认值是 1。 - -- **-n/--records \** : - 每个子表插入的记录数,默认值为 10000 。 - -- **-d/--database \** : - 所使用的数据库的名称,默认值为 test 。 - -- **-b/--data-type \** : - 超级表的数据列的类型。如果不使用则默认为有三个数据列,其类型分别为 FLOAT, INT, FLOAT 。 - -- **-l/--columns \** : - 超级表的数据列的总数量。如果同时设置了该参数和 `-b/--data-type`,则最后的结果列数为两者取大。如果本参数指定的数量大于 `-b/--data-type` 指定的列数,则未指定的列类型默认为 INT, 例如: `-l 5 -b float,double`, 那么最后的列为 `FLOAT,DOUBLE,INT,INT,INT`。如果 columns 指定的数量小于或等于 `-b/--data-type` 指定的列数,则结果为 `-b/--data-type` 指定的列和类型,例如: `-l 3 -b float,double,float,bigint`,那么最后的列为 `FLOAT,DOUBLE,FLOAT,BIGINT` 。 - -- **-L/--partial-col-num \ **: - 指定某些列写入数据,其他列数据为 NULL。默认所有列都写入数据。 - -- **-A/--tag-type \** : - 超级表的标签列类型。nchar 和 binary 类型可以同时设置长度,例如: - -``` -taosBenchmark -A INT,DOUBLE,NCHAR,BINARY(16) -``` - -如果没有设置标签类型,默认是两个标签,其类型分别为 INT 和 BINARY(16)。 -注意:在有的 shell 比如 bash 命令里面 “()” 需要转义,则上述指令应为: - -``` -taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\) -``` - -- **-w/--binwidth \**: - nchar 和 binary 类型的默认长度,默认值为 64。 - -- **-m/--table-prefix \** : - 子表名称的前缀,默认值为 "d"。 - -- **-E/--escape-character** : - 开关参数,指定在超级表和子表名称中是否使用转义字符。默认值为不使用。 - -- **-C/--chinese** : - 开关参数,指定 nchar 和 binary 是否使用 Unicode 中文字符。默认值为不使用。 - -- **-N/--normal-table** : - 开关参数,指定只创建普通表,不创建超级表。默认值为 false。仅当插入模式为 taosc, stmt, rest 模式下可以使用。 - -- **-M/--random** : - 开关参数,插入数据为生成的随机值。默认值为 false。若配置此参数,则随机生成要插入的数据。对于数值类型的 标签列/数据列,其值为该类型取值范围内的随机值。对于 NCHAR 和 BINARY 类型的 标签列/数据列,其值为指定长度范围内的随机字符串。 - -- **-x/--aggr-func** : - 开关参数,指示插入后查询聚合函数。默认值为 false。 - -- **-y/--answer-yes** : - 开关参数,要求用户在提示后确认才能继续。默认值为 false 。 - -- **-O/--disorder \** : - 指定乱序数据的百分比概率,其值域为 [0,50]。默认为 0,即没有乱序数据。 - -- **-R/--disorder-range \** : - 指定乱序数据的时间戳回退范围。所生成的乱序时间戳为非乱序情况下应该使用的时间戳减去这个范围内的一个随机值。仅在 `-O/--disorder` 指定的乱序数据百分比大于 0 时有效。 - -- **-F/--prepare_rand \** : - 生成的随机数据中唯一值的数量。若为 1 则表示所有数据都相同。默认值为 10000 。 - -- **-a/--replica \** : - 创建数据库时指定其副本数,默认值为 1 。 - -- ** -k/--keep-trying \** : 失败后进行重试的次数,默认不重试。需使用 v3.0.9 以上版本。 - -- ** -z/--trying-interval \** : 失败重试间隔时间,单位为毫秒,仅在 -k 指定重试后有效。需使用 v3.0.9 以上版本。 - -- **-v/--vgroups \** : - 创建数据库时指定 vgroups 数,仅对 TDengine v3.0+ 有效。 - -- **-V/--version** : - 显示版本信息并退出。不能与其它参数混用。 - -- **-?/--help** : - 显示帮助信息并退出。不能与其它参数混用。 - -## 配置文件参数详解 - -### 通用配置参数 - -本节所列参数适用于所有功能模式。 - -- **filetype** : 要测试的功能,可选值为 `insert`, `query` 和 `subscribe`。分别对应插入、查询和订阅功能。每个配置文件中只能指定其中之一。 -- **cfgdir** : TDengine 客户端配置文件所在的目录,默认路径是 /etc/taos 。 - -- **host** : 指定要连接的 TDengine 服务端的 FQDN,默认值为 localhost。 - -- **port** : 要连接的 TDengine 服务器的端口号,默认值为 6030。 - -- **user** : 用于连接 TDengine 服务端的用户名,默认为 root。 - -- **password** : 用于连接 TDengine 服务端的密码,默认值为 taosdata。 - -### 插入场景配置参数 - -插入场景下 `filetype` 必须设置为 `insert`,该参数及其它通用参数详见[通用配置参数](#通用配置参数) - -- ** keep_trying ** : 失败后进行重试的次数,默认不重试。需使用 v3.0.9 以上版本。 - -- ** trying_interval ** : 失败重试间隔时间,单位为毫秒,仅在 keep_trying 指定重试后有效。需使用 v3.0.9 以上版本。 -- ** childtable_from 和 childtable_to ** : 指定写入子表范围,开闭区间为 [childtable_from, childtable_to). -  -- ** continue_if_fail ** : 允许用户定义失败后行为 - - “continue_if_fail”:  “no”, 失败 taosBenchmark 自动退出,默认行为 - “continue_if_fail”: “yes”, 失败 taosBenchmark 警告用户,并继续写入 - “continue_if_fail”: “smart”, 如果子表不存在失败,taosBenchmark 会建立子表并继续写入 - -#### 数据库相关配置参数 - -创建数据库时的相关参数在 json 配置文件中的 `dbinfo` 中配置,个别具体参数如下。其余参数均与 TDengine 中 `create database` 时所指定的数据库参数相对应,详见[../../taos-sql/database] - -- **name** : 数据库名。 - -- **drop** : 插入前是否删除数据库,可选项为 "yes" 或者 "no", 为 "no" 时不创建。默认删除。 - -#### 流式计算相关配置参数 - -创建流式计算的相关参数在 json 配置文件中的 `stream` 中配置,具体参数如下。 - -- **stream_name** : 流式计算的名称,必填项。 - -- **stream_stb** : 流式计算对应的超级表名称,必填项。 - -- **stream_sql** : 流式计算的sql语句,必填项。 - -- **trigger_mode** : 流式计算的触发模式,可选项。 - -- **watermark** : 流式计算的水印,可选项。 - -- **drop** : 是否创建流式计算,可选项为 "yes" 或者 "no", 为 "no" 时不创建。 - -#### 超级表相关配置参数 - -创建超级表时的相关参数在 json 配置文件中的 `super_tables` 中配置,具体参数如下。 - -- **name**: 超级表名,必须配置,没有默认值。 - -- **child_table_exists** : 子表是否已经存在,默认值为 "no",可选值为 "yes" 或 "no"。 - -- **child_table_count** : 子表的数量,默认值为 10。 - -- **child_table_prefix** : 子表名称的前缀,必选配置项,没有默认值。 - -- **escape_character** : 超级表和子表名称中是否包含转义字符,默认值为 "no",可选值为 "yes" 或 "no"。 - -- **auto_create_table** : 仅当 insert_mode 为 taosc, rest, stmt 并且 child_table_exists 为 "no" 时生效,该参数为 "yes" 表示 taosBenchmark 在插入数据时会自动创建不存在的表;为 "no" 则表示先提前建好所有表再进行插入。 - -- **batch_create_tbl_num** : 创建子表时每批次的建表数量,默认为 10。注:实际的批数不一定与该值相同,当执行的 SQL 语句大于支持的最大长度时,会自动截断再执行,继续创建。 - -- **data_source** : 数据的来源,默认为 taosBenchmark 随机产生,可以配置为 "rand" 和 "sample"。为 "sample" 时使用 sample_file 参数指定的文件内的数据。 - -- **insert_mode** : 插入模式,可选项有 taosc, rest, stmt, sml, sml-rest, 分别对应普通写入、restful 接口写入、参数绑定接口写入、schemaless 接口写入、restful schemaless 接口写入 (由 taosAdapter 提供)。默认值为 taosc 。 - -- **non_stop_mode** : 指定是否持续写入,若为 "yes" 则 insert_rows 失效,直到 Ctrl + C 停止程序,写入才会停止。默认值为 "no",即写入指定数量的记录后停止。注:即使在持续写入模式下 insert_rows 失效,但其也必须被配置为一个非零正整数。 - -- **line_protocol** : 使用行协议插入数据,仅当 insert_mode 为 sml 或 sml-rest 时生效,可选项为 line, telnet, json。 - -- **tcp_transfer** : telnet 模式下的通信协议,仅当 insert_mode 为 sml-rest 并且 line_protocol 为 telnet 时生效。如果不配置,则默认为 http 协议。 - -- **insert_rows** : 每个子表插入的记录数,默认为 0 。 - -- **childtable_offset** : 仅当 child_table_exists 为 yes 时生效,指定从超级表获取子表列表时的偏移量,即从第几个子表开始。 - -- **childtable_limit** : 仅当 child_table_exists 为 yes 时生效,指定从超级表获取子表列表的上限。 - -- **interlace_rows** : 启用交错插入模式并同时指定向每个子表每次插入的数据行数。交错插入模式是指依次向每张子表插入由本参数所指定的行数并重复这个过程,直到所有子表的数据都插入完成。默认值为 0, 即向一张子表完成数据插入后才会向下一张子表进行数据插入。 - -- **insert_interval** : 指定交错插入模式的插入间隔,单位为 ms,默认值为 0。 只有当 `-B/--interlace-rows` 大于 0 时才起作用。意味着数据插入线程在为每个子表插入隔行扫描记录后,会等待该值指定的时间间隔后再进行下一轮写入。 - -- **partial_col_num** : 若该值为正数 n 时, 则仅向前 n 列写入,仅当 insert_mode 为 taosc 和 rest 时生效,如果 n 为 0 则是向全部列写入。 - -- **disorder_ratio** : 指定乱序数据的百分比概率,其值域为 [0,50]。默认为 0,即没有乱序数据。 - -- **disorder_range** : 指定乱序数据的时间戳回退范围。所生成的乱序时间戳为非乱序情况下应该使用的时间戳减去这个范围内的一个随机值。仅在 `-O/--disorder` 指定的乱序数据百分比大于 0 时有效。 - -- **timestamp_step** : 每个子表中插入数据的时间戳步长,单位与数据库的 `precision` 一致,默认值是 1。 - -- **start_timestamp** : 每个子表的时间戳起始值,默认值是 now。 - -- **sample_format** : 样本数据文件的类型,现在只支持 "csv" 。 - -- **sample_file** : 指定 csv 格式的文件作为数据源,仅当 data_source 为 sample 时生效。若 csv 文件内的数据行数小于等于 prepared_rand,那么会循环读取 csv 文件数据直到与 prepared_rand 相同;否则则会只读取 prepared_rand 个数的行的数据。也即最终生成的数据行数为二者取小。 - -- **use_sample_ts** : 仅当 data_source 为 sample 时生效,表示 sample_file 指定的 csv 文件内是否包含第一列时间戳,默认为 no。 若设置为 yes, 则使用 csv 文件第一列作为时间戳,由于同一子表时间戳不能重复,生成的数据量取决于 csv 文件内的数据行数相同,此时 insert_rows 失效。 - -- **tags_file** : 仅当 insert_mode 为 taosc, rest 的模式下生效。 最终的 tag 的数值与 childtable_count 有关,如果 csv 文件内的 tag 数据行小于给定的子表数量,那么会循环读取 csv 文件数据直到生成 childtable_count 指定的子表数量;否则则只会读取 childtable_count 行 tag 数据。也即最终生成的子表数量为二者取小。 - -- **primary_key** : 指定超级表是否有复合主键,取值 1 和 0, 复合主键列只能是超级表的第二列,指定生成复合主键后要确保第二列符合复合主键的数据类型,否则会报错 -- **repeat_ts_min** : 数值类型,复合主键开启情况下指定生成相同时间戳记录的最小个数,生成相同时间戳记录的个数是在范围[repeat_ts_min, repeat_ts_max] 内的随机值, 最小值等于最大值时为固定个数 -- **repeat_ts_max** : 数值类型,复合主键开启情况下指定生成相同时间戳记录的最大个数 -- **sqls** : 字符串数组类型,指定超级表创建成功后要执行的 sql 数组,sql 中指定表名前面要带数据库名,否则会报未指定数据库错误 - -#### tsma配置参数 - -指定tsma的配置参数在 `super_tables` 中的 `tsmas` 中,具体参数如下。 - -- **name** : 指定 tsma 的名字,必选项。 - -- **function** : 指定 tsma 的函数,必选项。 - -- **interval** : 指定 tsma 的时间间隔,必选项。 - -- **sliding** : 指定 tsma 的窗口时间位移,必选项。 - -- **custom** : 指定 tsma 的创建语句结尾追加的自定义配置,可选项。 - -- **start_when_inserted** : 指定当插入多少行时创建 tsma,可选项,默认为 0。 - -#### 标签列与数据列配置参数 - -指定超级表标签列与数据列的配置参数分别在 `super_tables` 中的 `columns` 和 `tag` 中。 - -- **type** : 指定列类型,可选值请参考 TDengine 支持的数据类型。 - 注:JSON 数据类型比较特殊,只能用于标签,当使用 JSON 类型作为 tag 时有且只能有这一个标签,此时 count 和 len 代表的意义分别是 JSON tag 内的 key-value pair 的个数和每个 KV pair 的 value 的值的长度,value 默认为 string。 - -- **len** : 指定该数据类型的长度,对 NCHAR,BINARY 和 JSON 数据类型有效。如果对其他数据类型配置了该参数,若为 0 , 则代表该列始终都是以 null 值写入;如果不为 0 则被忽略。 - -- **count** : 指定该类型列连续出现的数量,例如 "count": 4096 即可生成 4096 个指定类型的列。 - -- **name** : 列的名字,若与 count 同时使用,比如 "name":"current", "count":3, 则 3 个列的名字分别为 current, current_2. current_3。 - -- **min** : 数据类型的 列/标签 的最小值。生成的值将大于或等于最小值。 - -- **max** : 数据类型的 列/标签 的最大值。生成的值将小于最小值。 - -- **fun** : 此列数据以函数填充,目前只支持 sin 和 cos 两函数,输入参数为时间戳换算成角度值,换算公式: 角度 x = 输入的时间列ts值 % 360。同时支持系数调节,随机波动因子调节,以固定格式的表达式展现,如 fun=“10\*sin(x)+100\*random(5)” , x 表示角度,取值 0 ~ 360度,增长步长与时间列步长一致。10 表示乘的系数,100 表示加或减的系数,5 表示波动幅度在 5% 的随机范围内。目前支持的数据类型为 int, bigint, float, double 四种数据类型。注意:表达式为固定模式,不可前后颠倒。 - -- **values** : nchar/binary 列/标签的值域,将从值中随机选择。 - -- **sma**: 将该列加入 SMA 中,值为 "yes" 或者 "no",默认为 "no"。 - -- **encode**: 字符串类型,指定此列两级压缩中的第一级编码算法,详细参见创建超级表 - -- **compress**: 字符串类型,指定此列两级压缩中的第二级加密算法,详细参见创建超级表 - -- **level**: 字符串类型,指定此列两级压缩中的第二级加密算法的压缩率高低,详细参见创建超级表 - -- **gen**: 字符串类型,指定此列生成数据的方式,不指定为随机,若指定为 “order”, 会按自然数顺序增长 - -- **fillNull**: 字符串类型,指定此列是否随机插入 NULL 值,可指定为 “true” 或 "false", 只有当 generate_row_rule 为 2 时有效 - -#### 插入行为配置参数 - -- **thread_count** : 插入数据的线程数量,默认为 8。 - -- **thread_bind_vgroup** : 写入时 vgroup 是否和写入线程绑定,绑定后可提升写入速度, 取值为 "yes" 或 "no",默认值为 “no”, 设置为 “no” 后与原来行为一致。 当设为 “yes” 时,如果 thread_count 数量大小写入数据库的 vgroups 数量, thread_count 自动调整为 vgroups 数量;如果 thread_count 数量小于 vgroups 数量,写入线程数量不做调整,一个线程写完一个 vgroup 数据后再写下一个,同时保持一个 vgroup 同时只能由一个线程写入的规则。 - -- **create_table_thread_count** : 建表的线程数量,默认为 8。 - -- **connection_pool_size** : 预先建立的与 TDengine 服务端之间的连接的数量。若不配置,则与所指定的线程数相同。 - -- **result_file** : 结果输出文件的路径,默认值为 ./output.txt。 - -- **confirm_parameter_prompt** : 开关参数,要求用户在提示后确认才能继续。默认值为 false 。 - -- **interlace_rows** : 启用交错插入模式并同时指定向每个子表每次插入的数据行数。交错插入模式是指依次向每张子表插入由本参数所指定的行数并重复这个过程,直到所有子表的数据都插入完成。默认值为 0, 即向一张子表完成数据插入后才会向下一张子表进行数据插入。 - 在 `super_tables` 中也可以配置该参数,若配置则以 `super_tables` 中的配置为高优先级,覆盖全局设置。 - -- **insert_interval** : - 指定交错插入模式的插入间隔,单位为 ms,默认值为 0。 只有当 `-B/--interlace-rows` 大于 0 时才起作用。意味着数据插入线程在为每个子表插入隔行扫描记录后,会等待该值指定的时间间隔后再进行下一轮写入。 - 在 `super_tables` 中也可以配置该参数,若配置则以 `super_tables` 中的配置为高优先级,覆盖全局设置。 - -- **num_of_records_per_req** : - 每次向 TDengine 请求写入的数据行数,默认值为 30000 。当其设置过大时,TDengine 客户端驱动会返回相应的错误信息,此时需要调低这个参数的设置以满足写入要求。 - -- **prepare_rand** : 生成的随机数据中唯一值的数量。若为 1 则表示所有数据都相同。默认值为 10000 。 - -- **pre_load_tb_meta** :是否提前加载子表的 meta 数据,取值为 “yes” or "no"。当子表数量非常多时,打开此选项可提高写入速度。 - -### 查询场景配置参数 - -查询场景下 `filetype` 必须设置为 `query`。 -`query_times` 指定运行查询的次数,数值类型 - -查询场景可以通过设置 `kill_slow_query_threshold` 和 `kill_slow_query_interval` 参数来控制杀掉慢查询语句的执行,threshold 控制如果 exec_usec 超过指定时间的查询将被 taosBenchmark 杀掉,单位为秒;interval 控制休眠时间,避免持续查询慢查询消耗 CPU ,单位为秒。 - -其它通用参数详见[通用配置参数](#通用配置参数)。 - -#### 执行指定查询语句的配置参数 - -查询指定表(可以指定超级表、子表或普通表)的配置参数在 `specified_table_query` 中设置。 - -- **query_interval** : 查询时间间隔,单位是秒,默认值为 0。 - -- **threads** : 执行查询 SQL 的线程数,默认值为 1。 - -- **sqls**: - - **sql**: 执行的 SQL 命令,必填。 - - **result**: 保存查询结果的文件,未指定则不保存。 - -#### 查询超级表的配置参数 - -查询超级表的配置参数在 `super_table_query` 中设置。 - -- **stblname** : 指定要查询的超级表的名称,必填。 - -- **query_interval** : 查询时间间隔,单位是秒,默认值为 0。 - -- **threads** : 执行查询 SQL 的线程数,默认值为 1。 - -- **sqls** : - - **sql** : 执行的 SQL 命令,必填;对于超级表的查询 SQL,在 SQL 命令中保留 "xxxx",程序会自动将其替换为超级表的所有子表名。 - 替换为超级表中所有的子表名。 - - **result** : 保存查询结果的文件,未指定则不保存。 - -### 订阅场景配置参数 - -订阅场景下 `filetype` 必须设置为 `subscribe`,该参数及其它通用参数详见[通用配置参数](#通用配置参数) - -#### 执行指定订阅语句的配置参数 - -订阅指定表(可以指定超级表、子表或者普通表)的配置参数在 `specified_table_query` 中设置。 - -- **threads/concurrent** : 执行 SQL 的线程数,默认为 1。 - -- **sqls** : - - **sql** : 执行的 SQL 命令,必填。 - - -#### 配置文件中数据类型书写对照表 - -| # | **引擎** | **taosBenchmark** -| --- | :----------------: | :---------------: -| 1 | TIMESTAMP | timestamp -| 2 | INT | int -| 3 | INT UNSIGNED | uint -| 4 | BIGINT | bigint -| 5 | BIGINT UNSIGNED | ubigint -| 6 | FLOAT | float -| 7 | DOUBLE | double -| 8 | BINARY | binary -| 9 | SMALLINT | smallint -| 10 | SMALLINT UNSIGNED | usmallint -| 11 | TINYINT | tinyint -| 12 | TINYINT UNSIGNED | utinyint -| 13 | BOOL | bool -| 14 | NCHAR | nchar -| 15 | VARCHAR | varchar -| 15 | JSON | json - -注意:taosBenchmark 配置文件中数据类型必须小写方可识别 - - - diff --git a/docs/zh/14-reference/03-taos-sql/20-keywords.md b/docs/zh/14-reference/03-taos-sql/20-keywords.md index 4d2454ed5a..1ef0d07e14 100644 --- a/docs/zh/14-reference/03-taos-sql/20-keywords.md +++ b/docs/zh/14-reference/03-taos-sql/20-keywords.md @@ -131,6 +131,7 @@ description: TDengine 保留关键字的详细列表 ### H - HAVING +- HOST ### I diff --git a/docs/zh/14-reference/05-connector/14-java.mdx b/docs/zh/14-reference/05-connector/14-java.mdx index ec24f1329d..ba4cb38afd 100644 --- a/docs/zh/14-reference/05-connector/14-java.mdx +++ b/docs/zh/14-reference/05-connector/14-java.mdx @@ -33,6 +33,7 @@ REST 连接支持所有能运行 Java 的平台。 | taos-jdbcdriver 版本 | 主要变化 | TDengine 版本 | | :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------: | +| 3.3.3 | 1. 解决了 Websocket statement 关闭导致的内存泄漏 | - | | 3.3.2 | 1. 优化 Websocket 连接下的参数绑定性能;2. 优化了对 mybatis 的支持 | - | | 3.3.0 | 1. 优化 Websocket 连接下的数据传输性能;2. 支持跳过 SSL 验证,默认关闭 | 3.3.2.0 及更高版本 | | 3.2.11 | 解决了 Native 连接关闭结果集 bug | - | diff --git a/docs/zh/14-reference/05-connector/20-go.mdx b/docs/zh/14-reference/05-connector/20-go.mdx index 8d65fba321..61048f7b85 100644 --- a/docs/zh/14-reference/05-connector/20-go.mdx +++ b/docs/zh/14-reference/05-connector/20-go.mdx @@ -63,10 +63,15 @@ TDengine 其他功能模块的报错,请参考 [错误码](../../../reference/ | BINARY | string | | NCHAR | string | | JSON | []byte | +| GEOMETRY | []byte | +| VARBINARY | []byte | **注意**:JSON 类型仅在 tag 中支持。 +GEOMETRY类型是 little endian 字节序的二进制数据,符合 WKB 规范。详细信息请参考 [数据类型](../../taos-sql/data-type/#数据类型) +WKB规范请参考[Well-Known Binary (WKB)](https://libgeos.org/specifications/wkb/) ## 示例程序汇总 + 示例程序源码请参考:[示例程序](https://github.com/taosdata/driver-go/tree/main/examples) ## 常见问题 diff --git a/docs/zh/14-reference/05-connector/26-rust.mdx b/docs/zh/14-reference/05-connector/26-rust.mdx index 88be297ac6..c5d2a165d4 100644 --- a/docs/zh/14-reference/05-connector/26-rust.mdx +++ b/docs/zh/14-reference/05-connector/26-rust.mdx @@ -80,6 +80,8 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Rust 对 | BINARY | Vec\ | | NCHAR | String | | JSON | serde_json::Value | +| VARBINARY | Bytes | +| GEOMETRY | Bytes | **注意**:JSON 类型仅在 tag 中支持。 diff --git a/docs/zh/14-reference/05-connector/30-python.mdx b/docs/zh/14-reference/05-connector/30-python.mdx index 1a805c692e..8e08bfc103 100644 --- a/docs/zh/14-reference/05-connector/30-python.mdx +++ b/docs/zh/14-reference/05-connector/30-python.mdx @@ -103,7 +103,8 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Python 对 |BINARY|str| |NCHAR|str| |JSON|str| - +|GEOMETRY|bytearray| +|VARBINARY|bytearray| ## 示例程序汇总 | 示例程序链接 | 示例程序内容 | @@ -113,6 +114,13 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Python 对 | [insert_lines.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/insert-lines.py) | InfluxDB 行协议写入 | | [json_tag.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/json-tag.py) | 使用 JSON 类型的标签 | | [tmq_consumer.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/tmq_consumer.py) | tmq 订阅 | +| [native_all_type_query.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/native_all_type_query.py) | 支持全部类型示例 | +| [native_all_type_stmt.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/native_all_type_stmt.py) | 参数绑定支持全部类型示例 | + +示例程序源码请参考: + +1. [原生更多示例程序](https://github.com/taosdata/taos-connector-python/tree/main/examples) +2. [WebSocket 更多示例程序](https://github.com/taosdata/taos-connector-python/tree/main/taos-ws-py/examples) ## 关于纳秒 (nanosecond) diff --git a/docs/zh/14-reference/05-connector/35-node.mdx b/docs/zh/14-reference/05-connector/35-node.mdx index a3d5099235..3d8e82086e 100644 --- a/docs/zh/14-reference/05-connector/35-node.mdx +++ b/docs/zh/14-reference/05-connector/35-node.mdx @@ -88,6 +88,8 @@ Node.js 连接器目前仅支持 Websocket 连接器, 其通过 taosAdapter | [telnet_line_example](https://github.com/taosdata/TDengine/tree/main/docs/examples/node/websocketexample/telnet_line_example.js) | OpenTSDB Telnet 行协议写入示例。 | | [json_line_example](https://github.com/taosdata/TDengine/tree/main/docs/examples/node/websocketexample/json_line_example.js) | OpenTSDB JSON 行协议写入示例。 | | [tmq_example](https://github.com/taosdata/TDengine/tree/main/docs/examples/node/websocketexample/tmq_example.js) | 订阅的使用示例。 | +| [all_type_query](https://github.com/taosdata/TDengine/tree/main/docs/examples/node/websocketexample/all_type_query.js) | 支持全部类型示例。 | +| [all_type_stmt](https://github.com/taosdata/TDengine/tree/main/docs/examples/node/websocketexample/all_type_stmt.js) | 参数绑定支持全部类型示例。 | ## 使用限制 diff --git a/docs/zh/14-reference/05-connector/40-csharp.mdx b/docs/zh/14-reference/05-connector/40-csharp.mdx index 3cfb14b21e..93f592fdd0 100644 --- a/docs/zh/14-reference/05-connector/40-csharp.mdx +++ b/docs/zh/14-reference/05-connector/40-csharp.mdx @@ -67,9 +67,13 @@ TDengine 其他功能模块的报错,请参考 [错误码](../../../reference/ | VARBINARY | byte[] | | GEOMETRY | byte[] | -:::note -JSON 类型仅在 tag 中支持。 -::: +**注意**:JSON 类型仅在 tag 中支持。 +GEOMETRY类型是 little endian 字节序的二进制数据,符合 WKB 规范。详细信息请参考 [数据类型](../../taos-sql/data-type/#数据类型) +WKB规范请参考[Well-Known Binary (WKB)](https://libgeos.org/specifications/wkb/) + +## 示例程序汇总 + +示例程序源码请参考:[示例程序](https://github.com/taosdata/taos-connector-dotnet/tree/3.0/examples) ## API 参考 diff --git a/docs/zh/14-reference/05-connector/50-odbc.mdx b/docs/zh/14-reference/05-connector/50-odbc.mdx index 244a3f8d4e..ee69cf9364 100644 --- a/docs/zh/14-reference/05-connector/50-odbc.mdx +++ b/docs/zh/14-reference/05-connector/50-odbc.mdx @@ -3,11 +3,11 @@ sidebar_label: ODBC title: TDengine ODBC --- -TDengine ODBC 是为 TDengine 实现的 ODBC 驱动程序,支持 Windows 系统的应用(如 [PowerBI](https://powerbi.microsoft.com/zh-cn/) 等)通过 ODBC 标准接口访问本地、远程和云服务的 TDengine 数据库。 +TDengine ODBC 是为 TDengine 实现的 ODBC 驱动程序,支持 Windows 系统的应用(如 [PowerBI](https://powerbi.microsoft.com/zh-cn/) 等)通过 ODBC 标准接口访问本地、远程和云服务的 TDengine 数据库的数据表/视图。 TDengine ODBC 提供基于 WebSocket(推荐)和 原生连接两种方式连接 TDengine 数据库,使用时可以为 TDengine 数据源设置不同的连接方式。访问云服务时必须使用 WebSocket 连接方式。 -注意:TDengine ODBC 只支持 64 位系统,调用 TDengine ODBC 必须通过 64 位的 ODBC 驱动管理器进行。因此调用 ODBC 的程序不能使用 32 位版本。 +注意:TDengine ODBC 支持 32/64 位 Windows 系统,调用 TDengine ODBC 需要通过相应位数的 ODBC 驱动管理器进行。在 32 位 Windows 系统或者 64 位 Windows 系统的 32 位应用程序中,仅支持使用 WebSocket 连接方式访问 TDengine 数据库。 想更多了解 TDengine 时序时序数据库的使用,可访问 [TDengine官方文档](https://docs.taosdata.com/intro/)。 @@ -18,7 +18,7 @@ TDengine ODBC 提供基于 WebSocket(推荐)和 原生连接两种方式连 1. 仅支持 Windows 平台。Windows 上需要安装过 VC 运行时库,可在此下载安装 [VC运行时库](https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist?view=msvc-170) 如果已经安装VS开发工具可忽略。 -2. 安装 TDengine Windows 客户端,版本在 3.2.1.0 或以上,都会包含 TDengine 的 ODBC 驱动。 +2. 安装 TDengine Windows 客户端。版本在 3.2.1.0 或以上,包含 TDengine 的 ODBC 64 位驱动;版本在 3.3.3.0 或以上,包含 TDengine 的 ODBC 32/64 位驱动。 ## 配置数据源 @@ -26,17 +26,19 @@ TDengine ODBC 提供基于 WebSocket(推荐)和 原生连接两种方式连 TDengine ODBC 支持两种连接 TDengine 数据库方式:Websocket 连接与 Native 连接,其区别如下: -1. 只有 Websocket 支持连接云服务 +1. 访问云服务仅支持使用 Websocket 连接方式。 -2. Websocket 连接的兼容性更好,一般不需要随着 TDengine 数据库服务端升级而升级客户端的库。 +2. 32 位应用程序仅支持使用 WebSocket 连接方式。 -3. Native 连接通常性能更好一点,但是必须与 TDengine 数据库服务端的版本保持一致。 +3. Websocket 连接的兼容性更好,一般不需要随着 TDengine 数据库服务端升级而升级客户端的库。 -4. 对于一般用户,建议使用 **Websocket** 连接方式,性能与 Native 差别不大,兼容性更好。 +4. Native 连接通常性能更好一点,但是必须与 TDengine 数据库服务端的版本保持一致。 + +5. 对于一般用户,建议使用 **Websocket** 连接方式,性能与 Native 差别不大,兼容性更好。 ### WebSocket 连接 -1. 【开始】菜单搜索打开【ODBC 数据源(64 位)】管理工具(注意不要选择ODBC 数据源(32 位)) +1. 【开始】菜单搜索打开【ODBC 数据源(32 位)】或者【ODBC 数据源(64 位)】管理工具 2. 选中【用户 DSN】标签页,通过【添加(D)】按钮进入"创建数据源"界面 @@ -64,7 +66,7 @@ TDengine ODBC 支持两种连接 TDengine 数据库方式:Websocket 连接与 7. 也可以在第2步选择已经配置好的数据源名通过【配置】按钮进入配置页面,修改已有配置 -### 原生连接(不支持云服务) +### 原生连接(不支持云服务和 32 位应用程序) 1. 【开始】菜单搜索打开【ODBC 数据源(64 位)】管理工具(注意不要选择ODBC 数据源(32 位)) diff --git a/include/common/tdataformat.h b/include/common/tdataformat.h index 1179e710cd..d8a88d038d 100644 --- a/include/common/tdataformat.h +++ b/include/common/tdataformat.h @@ -116,7 +116,7 @@ typedef struct { int32_t tValueColumnInit(SValueColumn *valCol); int32_t tValueColumnDestroy(SValueColumn *valCol); -int32_t tValueColumnClear(SValueColumn *valCol); +void tValueColumnClear(SValueColumn *valCol); int32_t tValueColumnAppend(SValueColumn *valCol, const SValue *value); int32_t tValueColumnUpdate(SValueColumn *valCol, int32_t idx, const SValue *value); int32_t tValueColumnGet(SValueColumn *valCol, int32_t idx, SValue *value); diff --git a/include/libs/function/function.h b/include/libs/function/function.h index a71a2a6b7f..ec01cf1f6f 100644 --- a/include/libs/function/function.h +++ b/include/libs/function/function.h @@ -258,6 +258,7 @@ typedef struct SqlFunctionCtx { SFuncInputRowIter rowIter; bool bInputFinished; bool hasWindowOrGroup; // denote that the function is used with time window or group + bool needCleanup; // denote that the function need to be cleaned up } SqlFunctionCtx; typedef struct tExprNode { diff --git a/include/libs/wal/wal.h b/include/libs/wal/wal.h index 5a2cf3a3a0..74ab0bf484 100644 --- a/include/libs/wal/wal.h +++ b/include/libs/wal/wal.h @@ -177,7 +177,7 @@ int32_t walRollback(SWal *, int64_t ver); int32_t walBeginSnapshot(SWal *, int64_t ver, int64_t logRetention); int32_t walEndSnapshot(SWal *); int32_t walRestoreFromSnapshot(SWal *, int64_t ver); -int32_t walApplyVer(SWal *, int64_t ver); +void walApplyVer(SWal *, int64_t ver); // wal reader SWalReader *walOpenReader(SWal *, SWalFilterCond *pCond, int64_t id); diff --git a/include/util/tcompression.h b/include/util/tcompression.h index cea648a3a6..182465548b 100644 --- a/include/util/tcompression.h +++ b/include/util/tcompression.h @@ -268,7 +268,7 @@ typedef struct { uint8_t lvl[3]; // l[0] = 'low', l[1] = 'mid', l[2] = 'high' } TCmprLvlSet; -int32_t tcompressDebug(uint32_t cmprAlg, uint8_t *l1Alg, uint8_t *l2Alg, uint8_t *level); +void tcompressDebug(uint32_t cmprAlg, uint8_t *l1Alg, uint8_t *l2Alg, uint8_t *level); #define DEFINE_VAR(cmprAlg) \ uint8_t l1 = COMPRESS_L1_TYPE_U32(cmprAlg); \ diff --git a/include/util/tutil.h b/include/util/tutil.h index 6a8f58e360..87710b091d 100644 --- a/include/util/tutil.h +++ b/include/util/tutil.h @@ -176,6 +176,15 @@ static FORCE_INLINE int32_t taosGetTbHashVal(const char *tbname, int32_t tblen, } \ } while (0) +#define TAOS_CHECK_RETURN_WITH_RELEASE(CMD, PTR1, PTR2) \ + do { \ + int32_t __c = (CMD); \ + if (__c != TSDB_CODE_SUCCESS) { \ + sdbRelease(PTR1, PTR2); \ + TAOS_RETURN(__c); \ + } \ + } while (0) + #define TAOS_CHECK_RETURN_WITH_FREE(CMD, PTR) \ do { \ int32_t __c = (CMD); \ diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 18cdd58639..5b559451da 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -2895,6 +2895,10 @@ TAOS_RES* taosQueryImpl(TAOS* taos, const char* sql, bool validateOnly, int8_t s taosMemoryFree(param); return NULL; } + code = tsem_destroy(¶m->sem); + if(TSDB_CODE_SUCCESS != code) { + tscError("failed to destroy semaphore since %s", tstrerror(code)); + } SRequestObj* pRequest = NULL; if (param->pRequest != NULL) { diff --git a/source/client/src/clientMonitor.c b/source/client/src/clientMonitor.c index dbcc046b4a..f424e4d1a2 100644 --- a/source/client/src/clientMonitor.c +++ b/source/client/src/clientMonitor.c @@ -73,7 +73,7 @@ static void destroyMonitorClient(void* data) { } taosHashCleanup(pMonitor->counters); int ret = taos_collector_registry_destroy(pMonitor->registry); - if (ret){ + if (ret) { tscError("failed to destroy registry, pMonitor:%p ret:%d", pMonitor, ret); } taosMemoryFree(pMonitor); @@ -192,7 +192,7 @@ static void generateClusterReport(taos_collector_registry_t* registry, void* pTr if (strlen(pCont) != 0 && sendReport(pTransporter, epSet, pCont, MONITOR_TYPE_COUNTER, NULL) == 0) { int ret = taos_collector_registry_clear_batch(registry); - if (ret){ + if (ret) { tscError("failed to clear registry, ret:%d", ret); } } @@ -215,7 +215,8 @@ static void reportSendProcess(void* param, void* tmrId) { SEpSet ep = getEpSet_s(&pInst->mgmtEp); generateClusterReport(pMonitor->registry, pInst->pTransporter, &ep); - bool reset = taosTmrReset(reportSendProcess, pInst->monitorParas.tsMonitorInterval * 1000, param, monitorTimer, &tmrId); + bool reset = + taosTmrReset(reportSendProcess, pInst->monitorParas.tsMonitorInterval * 1000, param, monitorTimer, &tmrId); tscDebug("reset timer, pMonitor:%p, %d", pMonitor, reset); taosRUnLockLatch(&monitorLock); } @@ -265,7 +266,7 @@ void monitorCreateClient(int64_t clusterId) { } int r = taos_collector_registry_register_collector(pMonitor->registry, pMonitor->colector); - if (r){ + if (r) { tscError("failed to register collector, ret:%d", r); goto fail; } @@ -318,7 +319,7 @@ void monitorCreateClientCounter(int64_t clusterId, const char* name, const char* if (taos_collector_add_metric(pMonitor->colector, newCounter) != 0) { tscError("failed to add metric to collector"); int r = taos_counter_destroy(newCounter); - if (r){ + if (r) { tscError("failed to destroy counter, code: %d", r); } goto end; @@ -326,7 +327,7 @@ void monitorCreateClientCounter(int64_t clusterId, const char* name, const char* if (taosHashPut(pMonitor->counters, name, strlen(name), &newCounter, POINTER_BYTES) != 0) { tscError("failed to put counter to monitor"); int r = taos_counter_destroy(newCounter); - if (r){ + if (r) { tscError("failed to destroy counter, code: %d", r); } goto end; @@ -394,7 +395,7 @@ static void monitorWriteSlowLog2File(MonitorSlowLogData* slowLogData, char* tmpP if (pClient == NULL) { tscError("failed to allocate memory for slow log client"); int32_t ret = taosCloseFile(&pFile); - if (ret != 0){ + if (ret != 0) { tscError("failed to close file:%p ret:%d", pFile, ret); } return; @@ -406,7 +407,7 @@ static void monitorWriteSlowLog2File(MonitorSlowLogData* slowLogData, char* tmpP if (taosHashPut(monitorSlowLogHash, &slowLogData->clusterId, LONG_BYTES, &pClient, POINTER_BYTES) != 0) { tscError("failed to put clusterId:%" PRId64 " to hash table", slowLogData->clusterId); int32_t ret = taosCloseFile(&pFile); - if (ret != 0){ + if (ret != 0) { tscError("failed to close file:%p ret:%d", pFile, ret); } taosMemoryFree(pClient); @@ -635,7 +636,7 @@ static void processFileRemoved(SlowLogClient* pClient) { return; } int32_t ret = taosCloseFile(&(pClient->pFile)); - if (ret != 0){ + if (ret != 0) { tscError("failed to close file:%p ret:%d", pClient->pFile, ret); return; } @@ -728,7 +729,7 @@ static void monitorSendAllSlowLogFromTempDir(int64_t clusterId) { if (taosLockFile(pFile) < 0) { tscInfo("failed to lock file:%s since %s, maybe used by other process", filename, terrstr()); int32_t ret = taosCloseFile(&pFile); - if (ret != 0){ + if (ret != 0) { tscError("failed to close file:%p ret:%d", pFile, ret); } continue; @@ -749,7 +750,7 @@ static void monitorSendAllSlowLogFromTempDir(int64_t clusterId) { } int32_t ret = taosCloseDir(&pDir); - if (ret != 0){ + if (ret != 0) { tscError("failed to close dir, ret:%d", ret); } } @@ -831,7 +832,7 @@ static int32_t tscMonitortInit() { static void tscMonitorStop() { if (taosCheckPthreadValid(monitorThread)) { (void)taosThreadJoin(monitorThread, NULL); - (void)taosThreadClear(&monitorThread); + taosThreadClear(&monitorThread); } } @@ -897,7 +898,7 @@ void monitorClose() { taosHashCleanup(monitorSlowLogHash); taosTmrCleanUp(monitorTimer); taosCloseQueue(monitorQueue); - if(tsem2_destroy(&monitorSem) != 0) { + if (tsem2_destroy(&monitorSem) != 0) { tscError("failed to destroy semaphore"); } taosWUnLockLatch(&monitorLock); @@ -921,7 +922,7 @@ int32_t monitorPutData2MonitorQueue(MonitorSlowLogData data) { tscDebug("[monitor] write slow log to queue, clusterId:%" PRIx64 " type:%s, data:%s", slowLogData->clusterId, queueTypeStr[slowLogData->type], slowLogData->data); if (taosWriteQitem(monitorQueue, slowLogData) == 0) { - if(tsem2_post(&monitorSem) != 0) { + if (tsem2_post(&monitorSem) != 0) { tscError("failed to post semaphore"); } } else { diff --git a/source/common/src/rsync.c b/source/common/src/rsync.c index 77787db3ce..47a452eab7 100644 --- a/source/common/src/rsync.c +++ b/source/common/src/rsync.c @@ -32,10 +32,14 @@ static void removeEmptyDir() { empty = false; } if (empty) taosRemoveDir(filename); - (void)taosCloseDir(&pDirTmp); + if (taosCloseDir(&pDirTmp) != 0) { + uError("[rsync] close dir error," ERRNO_ERR_FORMAT, ERRNO_ERR_DATA); + } } - (void)taosCloseDir(&pDir); + if (taosCloseDir(&pDir) != 0) { + uError("[rsync] close dir error," ERRNO_ERR_FORMAT, ERRNO_ERR_DATA); + } } #ifdef WINDOWS @@ -297,7 +301,7 @@ int32_t downloadByRsync(const char* id, const char* path, int64_t checkpointId) path, el); } - if (code != TSDB_CODE_SUCCESS) { // if failed, try to load it from data directory + if (code != TSDB_CODE_SUCCESS) { // if failed, try to load it from data directory #ifdef WINDOWS memset(pathTransform, 0, PATH_MAX); changeDirFromWindowsToLinux(path, pathTransform); diff --git a/source/common/src/tcol.c b/source/common/src/tcol.c index 17972c6777..84027c25b6 100644 --- a/source/common/src/tcol.c +++ b/source/common/src/tcol.c @@ -238,7 +238,7 @@ const char* columnLevelStr(uint8_t type) { bool checkColumnEncode(char encode[TSDB_CL_COMPRESS_OPTION_LEN]) { if (0 == strlen(encode)) return true; - (void)strtolower(encode, encode); + TAOS_UNUSED(strtolower(encode, encode)); for (int i = 0; i < supportedEncodeNum; ++i) { if (0 == strcmp((const char*)encode, supportedEncode[i])) { return true; @@ -255,7 +255,7 @@ bool checkColumnEncodeOrSetDefault(uint8_t type, char encode[TSDB_CL_COMPRESS_OP } bool checkColumnCompress(char compress[TSDB_CL_COMPRESS_OPTION_LEN]) { if (0 == strlen(compress)) return true; - (void)strtolower(compress, compress); + TAOS_UNUSED(strtolower(compress, compress)); for (int i = 0; i < supportedCompressNum; ++i) { if (0 == strcmp((const char*)compress, supportedCompress[i])) { return true; @@ -273,7 +273,7 @@ bool checkColumnCompressOrSetDefault(uint8_t type, char compress[TSDB_CL_COMPRES } bool checkColumnLevel(char level[TSDB_CL_COMPRESS_OPTION_LEN]) { if (0 == strlen(level)) return true; - (void)strtolower(level, level); + TAOS_UNUSED(strtolower(level, level)); if (1 == strlen(level)) { if ('h' == level[0] || 'm' == level[0] || 'l' == level[0]) return true; } else { diff --git a/source/common/src/tdataformat.c b/source/common/src/tdataformat.c index 4b44e4af43..4ef9bf481c 100644 --- a/source/common/src/tdataformat.c +++ b/source/common/src/tdataformat.c @@ -481,6 +481,10 @@ int32_t tRowBuildFromBind(SBindInfo *infos, int32_t numOfInfos, bool infoSorted, }; if (IS_VAR_DATA_TYPE(infos[iInfo].type)) { value.nData = infos[iInfo].bind->length[iRow]; + if (value.nData > pTSchema->columns[iInfo].bytes - VARSTR_HEADER_SIZE) { + code = TSDB_CODE_INVALID_PARA; + goto _exit; + } value.pData = (uint8_t *)infos[iInfo].bind->buffer + infos[iInfo].bind->buffer_length * iRow; } else { (void)memcpy(&value.val, (uint8_t *)infos[iInfo].bind->buffer + infos[iInfo].bind->buffer_length * iRow, @@ -1704,7 +1708,7 @@ bool tTagGet(const STag *pTag, STagVal *pTagVal) { offset = pTag->idx[midx]; } - (void)tGetTagVal(p + offset, &tv, isJson); + int32_t nt = tGetTagVal(p + offset, &tv, isJson); if (isJson) { c = tTagValJsonCmprFn(pTagVal, &tv); } else { @@ -1754,7 +1758,7 @@ int32_t tTagToValArray(const STag *pTag, SArray **ppArray) { } else { offset = pTag->idx[iTag]; } - (void)tGetTagVal(p + offset, &tv, pTag->flags & TD_TAG_JSON); + int32_t nt = tGetTagVal(p + offset, &tv, pTag->flags & TD_TAG_JSON); if (taosArrayPush(*ppArray, &tv) == NULL) { code = terrno; goto _err; @@ -1784,7 +1788,7 @@ void tTagSetCid(const STag *pTag, int16_t iTag, int16_t cid) { offset = pTag->idx[iTag]; } - (void)tPutI16v(p + offset, cid); + int32_t nt = tPutI16v(p + offset, cid); } // STSchema ======================================== @@ -3279,6 +3283,10 @@ int32_t tRowBuildFromBind2(SBindInfo2 *infos, int32_t numOfInfos, bool infoSorte int32_t length = infos[iInfo].bind->length[iRow]; uint8_t **data = &((uint8_t **)TARRAY_DATA(bufArray))[iInfo]; value.nData = length; + if (value.nData > pTSchema->columns[iInfo].bytes - VARSTR_HEADER_SIZE) { + code = TSDB_CODE_INVALID_PARA; + goto _exit; + } value.pData = *data; *data += length; // value.pData = (uint8_t *)infos[iInfo].bind->buffer + infos[iInfo].bind->buffer_length * iRow; @@ -3352,16 +3360,16 @@ static int32_t tColDataCopyRowSingleCol(SColData *pFromColData, int32_t iFromRow SET_BIT1(pToColData->pBitMap, iToRow, GET_BIT1(pFromColData->pBitMap, iFromRow)); } break; case HAS_VALUE: { - (void)tColDataCopyRowCell(pFromColData, iFromRow, pToColData, iToRow); + TAOS_CHECK_RETURN(tColDataCopyRowCell(pFromColData, iFromRow, pToColData, iToRow)); } break; case (HAS_VALUE | HAS_NONE): case (HAS_VALUE | HAS_NULL): { SET_BIT1(pToColData->pBitMap, iToRow, GET_BIT1(pFromColData->pBitMap, iFromRow)); - (void)tColDataCopyRowCell(pFromColData, iFromRow, pToColData, iToRow); + TAOS_CHECK_RETURN(tColDataCopyRowCell(pFromColData, iFromRow, pToColData, iToRow)); } break; case (HAS_VALUE | HAS_NULL | HAS_NONE): { SET_BIT2(pToColData->pBitMap, iToRow, GET_BIT2(pFromColData->pBitMap, iFromRow)); - (void)tColDataCopyRowCell(pFromColData, iFromRow, pToColData, iToRow); + TAOS_CHECK_RETURN(tColDataCopyRowCell(pFromColData, iFromRow, pToColData, iToRow)); } break; default: return -1; @@ -3434,24 +3442,24 @@ static int32_t tColDataMergeSortMerge(SColData *aColData, int32_t start, int32_t tColDataArrGetRowKey(aColData, nColData, j, &keyj); while (i <= mid && j <= end) { if (tRowKeyCompare(&keyi, &keyj) <= 0) { - (void)tColDataCopyRowAppend(aColData, i++, aDstColData, nColData); + TAOS_CHECK_RETURN(tColDataCopyRowAppend(aColData, i++, aDstColData, nColData)); tColDataArrGetRowKey(aColData, nColData, i, &keyi); } else { - (void)tColDataCopyRowAppend(aColData, j++, aDstColData, nColData); + TAOS_CHECK_RETURN(tColDataCopyRowAppend(aColData, j++, aDstColData, nColData)); tColDataArrGetRowKey(aColData, nColData, j, &keyj); } } while (i <= mid) { - (void)tColDataCopyRowAppend(aColData, i++, aDstColData, nColData); + TAOS_CHECK_RETURN(tColDataCopyRowAppend(aColData, i++, aDstColData, nColData)); } while (j <= end) { - (void)tColDataCopyRowAppend(aColData, j++, aDstColData, nColData); + TAOS_CHECK_RETURN(tColDataCopyRowAppend(aColData, j++, aDstColData, nColData)); } for (i = start, k = 0; i <= end; ++i, ++k) { - (void)tColDataCopyRow(aDstColData, k, aColData, i, nColData); + TAOS_CHECK_RETURN(tColDataCopyRow(aDstColData, k, aColData, i, nColData)); } if (aDstColData) { @@ -3600,7 +3608,7 @@ int32_t tColDataSortMerge(SArray **arr) { // sort ------- if (doSort) { - (void)tColDataSort(aColData, nColData); + TAOS_CHECK_RETURN(tColDataSort(aColData, nColData)); } if (doMerge != 1) { @@ -4171,12 +4179,12 @@ int32_t tValueColumnDestroy(SValueColumn *valCol) { return 0; } -int32_t tValueColumnClear(SValueColumn *valCol) { +void tValueColumnClear(SValueColumn *valCol) { valCol->type = TSDB_DATA_TYPE_NULL; valCol->numOfValues = 0; tBufferClear(&valCol->data); tBufferClear(&valCol->offsets); - return 0; + return; } int32_t tValueColumnAppend(SValueColumn *valCol, const SValue *value) { @@ -4247,17 +4255,17 @@ int32_t tValueColumnGet(SValueColumn *valCol, int32_t idx, SValue *value) { int32_t offset, nextOffset; SBufferReader reader = BUFFER_READER_INITIALIZER(idx * sizeof(offset), &valCol->offsets); - (void)tBufferGetI32(&reader, &offset); + TAOS_CHECK_RETURN(tBufferGetI32(&reader, &offset)); if (idx == valCol->numOfValues - 1) { nextOffset = tBufferGetSize(&valCol->data); } else { - (void)tBufferGetI32(&reader, &nextOffset); + TAOS_CHECK_RETURN(tBufferGetI32(&reader, &nextOffset)); } value->nData = nextOffset - offset; value->pData = (uint8_t *)tBufferGetDataAt(&valCol->data, offset); } else { SBufferReader reader = BUFFER_READER_INITIALIZER(idx * tDataTypes[value->type].bytes, &valCol->data); - (void)tBufferGet(&reader, tDataTypes[value->type].bytes, &value->val); + TAOS_CHECK_RETURN(tBufferGet(&reader, tDataTypes[value->type].bytes, &value->val)); } return 0; } @@ -4309,7 +4317,7 @@ int32_t tValueColumnDecompress(void *input, const SValueColumnCompressInfo *info SBuffer *assist) { int32_t code; - (void)tValueColumnClear(valCol); + tValueColumnClear(valCol); valCol->type = info->type; // offset if (IS_VAR_DATA_TYPE(valCol->type)) { diff --git a/source/common/src/trow.c b/source/common/src/trow.c index 40b4f863cb..626d1141e0 100644 --- a/source/common/src/trow.c +++ b/source/common/src/trow.c @@ -54,6 +54,7 @@ bool tdSTSRowIterFetch(STSRowIter *pIter, col_id_t colId, col_type_t colType, SC return true; } + bool ret = true; if (TD_IS_TP_ROW(pIter->pRow)) { STColumn *pCol = NULL; STSchema *pSchema = pIter->pSchema; @@ -68,16 +69,16 @@ bool tdSTSRowIterFetch(STSRowIter *pIter, col_id_t colId, col_type_t colType, SC return false; } } - (void)tdSTSRowIterGetTpVal(pIter, pCol->type, pCol->offset, pVal); + ret = tdSTSRowIterGetTpVal(pIter, pCol->type, pCol->offset, pVal); ++pIter->colIdx; } else if (TD_IS_KV_ROW(pIter->pRow)) { - return tdSTSRowIterGetKvVal(pIter, colId, &pIter->kvIdx, pVal); + ret = tdSTSRowIterGetKvVal(pIter, colId, &pIter->kvIdx, pVal); } else { pVal->valType = TD_VTYPE_NONE; terrno = TSDB_CODE_INVALID_PARA; - if (COL_REACH_END(colId, pIter->maxColId)) return false; + if (COL_REACH_END(colId, pIter->maxColId)) ret = false; } - return true; + return ret; } bool tdSTSRowIterGetTpVal(STSRowIter *pIter, col_type_t colType, int32_t offset, SCellVal *pVal) { @@ -138,7 +139,7 @@ int32_t tdGetBitmapValTypeII(const void *pBitmap, int16_t colIdx, TDRowValT *pVa int32_t tdGetBitmapValType(const void *pBitmap, int16_t colIdx, TDRowValT *pValType, int8_t bitmapMode) { switch (bitmapMode) { case 0: - (void)tdGetBitmapValTypeII(pBitmap, colIdx, pValType); + return tdGetBitmapValTypeII(pBitmap, colIdx, pValType); break; #if 0 case -1: @@ -365,7 +366,7 @@ bool tdSTpRowGetVal(STSRow *pRow, col_id_t colId, col_type_t colType, int32_t fl return true; } void *pBitmap = tdGetBitmapAddrTp(pRow, flen); - (void)tdGetTpRowValOfCol(pVal, pRow, pBitmap, colType, offset, colIdx); + if (tdGetTpRowValOfCol(pVal, pRow, pBitmap, colType, offset, colIdx)) return false; return true; } @@ -383,16 +384,17 @@ bool tdSTSRowIterNext(STSRowIter *pIter, SCellVal *pVal) { return true; } + bool ret = true; if (TD_IS_TP_ROW(pIter->pRow)) { - (void)tdSTSRowIterGetTpVal(pIter, pCol->type, pCol->offset, pVal); + ret = tdSTSRowIterGetTpVal(pIter, pCol->type, pCol->offset, pVal); } else if (TD_IS_KV_ROW(pIter->pRow)) { - (void)tdSTSRowIterGetKvVal(pIter, pCol->colId, &pIter->kvIdx, pVal); + ret = tdSTSRowIterGetKvVal(pIter, pCol->colId, &pIter->kvIdx, pVal); } else { return false; } ++pIter->colIdx; - return true; + return ret; } int32_t tdSTSRowNew(SArray *pArray, STSchema *pTSchema, STSRow **ppRow, int8_t rowType) { @@ -488,8 +490,8 @@ int32_t tdSTSRowNew(SArray *pArray, STSchema *pTSchema, STSRow **ppRow, int8_t r SRowBuilder rb = {.rowType = rowType}; tdSRowInit(&rb, pTSchema->version); - (void)tdSRowSetInfo(&rb, pTSchema->numOfCols, nBound, pTSchema->flen); - (void)tdSRowResetBuf(&rb, *ppRow); + TAOS_CHECK_GOTO(tdSRowSetInfo(&rb, pTSchema->numOfCols, nBound, pTSchema->flen), NULL, _exit); + TAOS_CHECK_GOTO(tdSRowResetBuf(&rb, *ppRow), NULL, _exit); int32_t iBound = 0; iColVal = 0; @@ -566,6 +568,7 @@ bool tdSTSRowGetVal(STSRowIter *pIter, col_id_t colId, col_type_t colType, SCell return true; } + bool ret = true; STSRow *pRow = pIter->pRow; int16_t colIdx = -1; if (TD_IS_TP_ROW(pRow)) { @@ -580,7 +583,7 @@ bool tdSTSRowGetVal(STSRowIter *pIter, col_id_t colId, col_type_t colType, SCell #ifdef TD_SUPPORT_BITMAP colIdx = POINTER_DISTANCE(pCol, pSchema->columns) / sizeof(STColumn); #endif - (void)tdGetTpRowValOfCol(pVal, pRow, pIter->pBitmap, pCol->type, pCol->offset, colIdx - 1); + if (tdGetTpRowValOfCol(pVal, pRow, pIter->pBitmap, pCol->type, pCol->offset, colIdx - 1)) ret = false; } else if (TD_IS_KV_ROW(pRow)) { SKvRowIdx *pIdx = (SKvRowIdx *)taosbsearch(&colId, TD_ROW_COL_IDX(pRow), tdRowGetNCols(pRow), sizeof(SKvRowIdx), compareKvRowColId, TD_EQ); @@ -589,13 +592,13 @@ bool tdSTSRowGetVal(STSRowIter *pIter, col_id_t colId, col_type_t colType, SCell colIdx = POINTER_DISTANCE(pIdx, TD_ROW_COL_IDX(pRow)) / sizeof(SKvRowIdx); } #endif - (void)tdGetKvRowValOfCol(pVal, pRow, pIter->pBitmap, pIdx ? pIdx->offset : -1, colIdx); + if (tdGetKvRowValOfCol(pVal, pRow, pIter->pBitmap, pIdx ? pIdx->offset : -1, colIdx)) ret = false; } else { if (COL_REACH_END(colId, pIter->maxColId)) return false; pVal->valType = TD_VTYPE_NONE; } - return true; + return ret; } int32_t tdGetKvRowValOfCol(SCellVal *output, STSRow *pRow, void *pBitmap, int32_t offset, int16_t colIdx) { @@ -694,9 +697,9 @@ int32_t tdAppendColValToRow(SRowBuilder *pBuilder, col_id_t colId, int8_t colTyp } if (TD_IS_TP_ROW(pRow)) { - (void)tdAppendColValToTpRow(pBuilder, valType, val, isCopyVarData, colType, colIdx, offset); + TAOS_CHECK_RETURN(tdAppendColValToTpRow(pBuilder, valType, val, isCopyVarData, colType, colIdx, offset)); } else { - (void)tdAppendColValToKvRow(pBuilder, valType, val, isCopyVarData, colType, colIdx, offset, colId); + TAOS_CHECK_RETURN(tdAppendColValToKvRow(pBuilder, valType, val, isCopyVarData, colType, colIdx, offset, colId)); } TAOS_RETURN(TSDB_CODE_SUCCESS); } @@ -947,9 +950,9 @@ int32_t tTSRowGetVal(STSRow *pRow, STSchema *pTSchema, int16_t iCol, SColVal *pC } if (TD_IS_TP_ROW(pRow)) { - (void)tdSTpRowGetVal(pRow, pTColumn->colId, pTColumn->type, pTSchema->flen, pTColumn->offset, iCol - 1, &cv); + TAOS_UNUSED(tdSTpRowGetVal(pRow, pTColumn->colId, pTColumn->type, pTSchema->flen, pTColumn->offset, iCol - 1, &cv)); } else if (TD_IS_KV_ROW(pRow)) { - (void)tdSKvRowGetVal(pRow, pTColumn->colId, iCol - 1, &cv); + TAOS_UNUSED(tdSKvRowGetVal(pRow, pTColumn->colId, iCol - 1, &cv)); } else { TAOS_RETURN(TSDB_CODE_INVALID_PARA); } diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmWorker.c b/source/dnode/mgmt/mgmt_dnode/src/dmWorker.c index 4cfadc8f59..1ed7c9ecd9 100644 --- a/source/dnode/mgmt/mgmt_dnode/src/dmWorker.c +++ b/source/dnode/mgmt/mgmt_dnode/src/dmWorker.c @@ -347,14 +347,14 @@ int32_t dmStartAuditThread(SDnodeMgmt *pMgmt) { void dmStopMonitorThread(SDnodeMgmt *pMgmt) { if (taosCheckPthreadValid(pMgmt->monitorThread)) { (void)taosThreadJoin(pMgmt->monitorThread, NULL); - (void)taosThreadClear(&pMgmt->monitorThread); + taosThreadClear(&pMgmt->monitorThread); } } void dmStopAuditThread(SDnodeMgmt *pMgmt) { if (taosCheckPthreadValid(pMgmt->auditThread)) { (void)taosThreadJoin(pMgmt->auditThread, NULL); - (void)taosThreadClear(&pMgmt->auditThread); + taosThreadClear(&pMgmt->auditThread); } } @@ -385,7 +385,7 @@ void dmStopCrashReportThread(SDnodeMgmt *pMgmt) { if (taosCheckPthreadValid(pMgmt->crashReportThread)) { (void)taosThreadJoin(pMgmt->crashReportThread, NULL); - (void)taosThreadClear(&pMgmt->crashReportThread); + taosThreadClear(&pMgmt->crashReportThread); } } diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c index 54e2bac66d..3d6ff48dd1 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c @@ -235,7 +235,7 @@ static void vmGenerateVnodeCfg(SCreateVnodeReq *pCreate, SVnodeCfg *pCfg) { pNode->nodePort = pCreate->replicas[pCfg->syncCfg.replicaNum].port; pNode->nodeRole = TAOS_SYNC_ROLE_VOTER; tstrncpy(pNode->nodeFqdn, pCreate->replicas[pCfg->syncCfg.replicaNum].fqdn, TSDB_FQDN_LEN); - (void)tmsgUpdateDnodeInfo(&pNode->nodeId, &pNode->clusterId, pNode->nodeFqdn, &pNode->nodePort); + bool ret = tmsgUpdateDnodeInfo(&pNode->nodeId, &pNode->clusterId, pNode->nodeFqdn, &pNode->nodePort); pCfg->syncCfg.replicaNum++; } if (pCreate->selfIndex != -1) { @@ -247,7 +247,7 @@ static void vmGenerateVnodeCfg(SCreateVnodeReq *pCreate, SVnodeCfg *pCfg) { pNode->nodePort = pCreate->learnerReplicas[pCfg->syncCfg.totalReplicaNum].port; pNode->nodeRole = TAOS_SYNC_ROLE_LEARNER; tstrncpy(pNode->nodeFqdn, pCreate->learnerReplicas[pCfg->syncCfg.totalReplicaNum].fqdn, TSDB_FQDN_LEN); - (void)tmsgUpdateDnodeInfo(&pNode->nodeId, &pNode->clusterId, pNode->nodeFqdn, &pNode->nodePort); + bool ret = tmsgUpdateDnodeInfo(&pNode->nodeId, &pNode->clusterId, pNode->nodeFqdn, &pNode->nodePort); pCfg->syncCfg.totalReplicaNum++; } pCfg->syncCfg.totalReplicaNum += pCfg->syncCfg.replicaNum; diff --git a/source/dnode/mnode/impl/src/mndCluster.c b/source/dnode/mnode/impl/src/mndCluster.c index 5cec3a8f27..5e38a91ab0 100644 --- a/source/dnode/mnode/impl/src/mndCluster.c +++ b/source/dnode/mnode/impl/src/mndCluster.c @@ -14,8 +14,8 @@ */ #define _DEFAULT_SOURCE -#include "mndCluster.h" #include "audit.h" +#include "mndCluster.h" #include "mndGrant.h" #include "mndPrivilege.h" #include "mndShow.h" @@ -257,7 +257,11 @@ static int32_t mndCreateDefaultCluster(SMnode *pMnode) { code = terrno; TAOS_RETURN(code); } - (void)sdbSetRawStatus(pRaw, SDB_STATUS_READY); + code = sdbSetRawStatus(pRaw, SDB_STATUS_READY); + if (code != 0) { + sdbFreeRaw(pRaw); + TAOS_RETURN(code); + } mInfo("cluster:%" PRId64 ", will be created when deploying, raw:%p", clusterObj.id, pRaw); @@ -275,7 +279,12 @@ static int32_t mndCreateDefaultCluster(SMnode *pMnode) { mndTransDrop(pTrans); TAOS_RETURN(code); } - (void)sdbSetRawStatus(pRaw, SDB_STATUS_READY); + code = sdbSetRawStatus(pRaw, SDB_STATUS_READY); + if (code != 0) { + sdbFreeRaw(pRaw); + mndTransDrop(pTrans); + TAOS_RETURN(code); + } if ((code = mndTransPrepare(pMnode, pTrans)) != 0) { mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr()); @@ -317,7 +326,6 @@ static int32_t mndRetrieveClusters(SRpcMsg *pMsg, SShowObj *pShow, SSDataBlock * pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); COL_DATA_SET_VAL_GOTO((const char *)&pCluster->createdTime, false, pCluster, _OVER); - char ver[12] = {0}; STR_WITH_MAXSIZE_TO_VARSTR(ver, tsVersionName, pShow->pMeta->pSchemas[cols].bytes); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); @@ -386,7 +394,12 @@ static int32_t mndProcessUptimeTimer(SRpcMsg *pReq) { mndTransDrop(pTrans); TAOS_RETURN(code); } - (void)sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY); + code = sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY); + if (code != 0) { + sdbFreeRaw(pCommitRaw); + mndTransDrop(pTrans); + TAOS_RETURN(code); + } if ((code = mndTransPrepare(pMnode, pTrans)) != 0) { mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr()); diff --git a/source/dnode/mnode/impl/src/mndCompactDetail.c b/source/dnode/mnode/impl/src/mndCompactDetail.c index d1bdb4734d..cbd0df7e68 100644 --- a/source/dnode/mnode/impl/src/mndCompactDetail.c +++ b/source/dnode/mnode/impl/src/mndCompactDetail.c @@ -67,22 +67,28 @@ int32_t mndRetrieveCompactDetail(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB char tmpBuf[TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE] = {0}; pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)&pCompactDetail->compactId, false); + TAOS_CHECK_RETURN_WITH_RELEASE(colDataSetVal(pColInfo, numOfRows, (const char *)&pCompactDetail->compactId, false), + pSdb, pCompactDetail); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)&pCompactDetail->vgId, false); + TAOS_CHECK_RETURN_WITH_RELEASE(colDataSetVal(pColInfo, numOfRows, (const char *)&pCompactDetail->vgId, false), pSdb, + pCompactDetail); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)&pCompactDetail->dnodeId, false); + TAOS_CHECK_RETURN_WITH_RELEASE(colDataSetVal(pColInfo, numOfRows, (const char *)&pCompactDetail->dnodeId, false), + pSdb, pCompactDetail); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)&pCompactDetail->numberFileset, false); + TAOS_CHECK_RETURN_WITH_RELEASE( + colDataSetVal(pColInfo, numOfRows, (const char *)&pCompactDetail->numberFileset, false), pSdb, pCompactDetail); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)&pCompactDetail->finished, false); + TAOS_CHECK_RETURN_WITH_RELEASE(colDataSetVal(pColInfo, numOfRows, (const char *)&pCompactDetail->finished, false), + pSdb, pCompactDetail); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)&pCompactDetail->startTime, false); + TAOS_CHECK_RETURN_WITH_RELEASE(colDataSetVal(pColInfo, numOfRows, (const char *)&pCompactDetail->startTime, false), + pSdb, pCompactDetail); numOfRows++; sdbRelease(pSdb, pCompactDetail); @@ -302,7 +308,7 @@ int32_t mndAddCompactDetailToTran(SMnode *pMnode, STrans *pTrans, SCompactObj *p if (terrno != 0) code = terrno; TAOS_RETURN(code); } - (void)sdbSetRawStatus(pVgRaw, SDB_STATUS_READY); + code = sdbSetRawStatus(pVgRaw, SDB_STATUS_READY); TAOS_RETURN(code); } diff --git a/source/dnode/mnode/impl/src/mndFunc.c b/source/dnode/mnode/impl/src/mndFunc.c index 9e97aa674b..4c5a695402 100644 --- a/source/dnode/mnode/impl/src/mndFunc.c +++ b/source/dnode/mnode/impl/src/mndFunc.c @@ -621,8 +621,8 @@ static void *mnodeGenTypeStr(char *buf, int32_t buflen, uint8_t type, int32_t le return msg; } - if (type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_VARBINARY || - type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_GEOMETRY) { + if (type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_VARBINARY || type == TSDB_DATA_TYPE_BINARY || + type == TSDB_DATA_TYPE_GEOMETRY) { int32_t bytes = len > 0 ? (int32_t)(len - VARSTR_HEADER_SIZE) : len; (void)snprintf(buf, buflen - 1, "%s(%d)", tDataTypes[type].name, type == TSDB_DATA_TYPE_NCHAR ? bytes / 4 : bytes); @@ -640,6 +640,7 @@ static int32_t mndRetrieveFuncs(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl int32_t numOfRows = 0; SFuncObj *pFunc = NULL; int32_t cols = 0; + int32_t code = 0; char buf[TSDB_TYPE_STR_MAX_LEN]; while (numOfRows < rows) { @@ -652,40 +653,51 @@ static int32_t mndRetrieveFuncs(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl STR_WITH_MAXSIZE_TO_VARSTR(b1, pFunc->name, pShow->pMeta->pSchemas[cols].bytes); SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)b1, false); + TAOS_CHECK_RETURN_WITH_RELEASE(colDataSetVal(pColInfo, numOfRows, (const char *)b1, false), pSdb, pFunc); if (pFunc->pComment) { char *b2 = taosMemoryCalloc(1, pShow->pMeta->pSchemas[cols].bytes); STR_WITH_MAXSIZE_TO_VARSTR(b2, pFunc->pComment, pShow->pMeta->pSchemas[cols].bytes); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)b2, false); + code = colDataSetVal(pColInfo, numOfRows, (const char *)b2, false); + if (code != 0) { + sdbRelease(pSdb, pFunc); + taosMemoryFree(b2); + TAOS_RETURN(code); + } taosMemoryFree(b2); } else { pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, NULL, true); + TAOS_CHECK_RETURN_WITH_RELEASE(colDataSetVal(pColInfo, numOfRows, NULL, true), pSdb, pFunc); + if (code != 0) { + sdbRelease(pSdb, pFunc); + TAOS_RETURN(code); + } } int32_t isAgg = (pFunc->funcType == TSDB_FUNC_TYPE_AGGREGATE) ? 1 : 0; pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)&isAgg, false); - + TAOS_CHECK_RETURN_WITH_RELEASE(colDataSetVal(pColInfo, numOfRows, (const char *)&isAgg, false), pSdb, pFunc); char b3[TSDB_TYPE_STR_MAX_LEN + 1] = {0}; STR_WITH_MAXSIZE_TO_VARSTR(b3, mnodeGenTypeStr(buf, TSDB_TYPE_STR_MAX_LEN, pFunc->outputType, pFunc->outputLen), pShow->pMeta->pSchemas[cols].bytes); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)b3, false); + TAOS_CHECK_RETURN_WITH_RELEASE(colDataSetVal(pColInfo, numOfRows, (const char *)b3, false), pSdb, pFunc); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)&pFunc->createdTime, false); + TAOS_CHECK_RETURN_WITH_RELEASE(colDataSetVal(pColInfo, numOfRows, (const char *)&pFunc->createdTime, false), pSdb, + pFunc); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)&pFunc->codeSize, false); + TAOS_CHECK_RETURN_WITH_RELEASE(colDataSetVal(pColInfo, numOfRows, (const char *)&pFunc->codeSize, false), pSdb, + pFunc); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)&pFunc->bufSize, false); + TAOS_CHECK_RETURN_WITH_RELEASE(colDataSetVal(pColInfo, numOfRows, (const char *)&pFunc->bufSize, false), pSdb, + pFunc); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); char *language = ""; @@ -697,7 +709,7 @@ static int32_t mndRetrieveFuncs(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl char varLang[TSDB_TYPE_STR_MAX_LEN + 1] = {0}; varDataSetLen(varLang, strlen(language)); strcpy(varDataVal(varLang), language); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)varLang, false); + TAOS_CHECK_RETURN_WITH_RELEASE(colDataSetVal(pColInfo, numOfRows, (const char *)varLang, false), pSdb, pFunc); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); int32_t varCodeLen = (pFunc->codeSize + VARSTR_HEADER_SIZE) > TSDB_MAX_BINARY_LEN @@ -706,11 +718,17 @@ static int32_t mndRetrieveFuncs(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl char *b4 = taosMemoryMalloc(varCodeLen); (void)memcpy(varDataVal(b4), pFunc->pCode, varCodeLen - VARSTR_HEADER_SIZE); varDataSetLen(b4, varCodeLen - VARSTR_HEADER_SIZE); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)b4, false); + code = colDataSetVal(pColInfo, numOfRows, (const char *)b4, false); + if (code < 0) { + sdbRelease(pSdb, pFunc); + taosMemoryFree(b4); + TAOS_RETURN(code); + } taosMemoryFree(b4); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)&pFunc->funcVersion, false); + TAOS_CHECK_RETURN_WITH_RELEASE(colDataSetVal(pColInfo, numOfRows, (const char *)&pFunc->funcVersion, false), pSdb, + pFunc); numOfRows++; sdbRelease(pSdb, pFunc); diff --git a/source/dnode/mnode/impl/src/mndMain.c b/source/dnode/mnode/impl/src/mndMain.c index 6e1a745cfa..eb855d28a8 100644 --- a/source/dnode/mnode/impl/src/mndMain.c +++ b/source/dnode/mnode/impl/src/mndMain.c @@ -85,7 +85,9 @@ static void *mndBuildTimerMsg(int32_t *pContLen) { void *pReq = rpcMallocCont(contLen); if (pReq == NULL) return NULL; - (void)tSerializeSMTimerMsg(pReq, contLen, &timerReq); + if (tSerializeSMTimerMsg(pReq, contLen, &timerReq) < 0) { + mError("failed to serialize timer msg since %s", terrstr()); + } *pContLen = contLen; return pReq; } @@ -97,7 +99,9 @@ static void mndPullupTrans(SMnode *pMnode) { if (pReq != NULL) { SRpcMsg rpcMsg = {.msgType = TDMT_MND_TRANS_TIMER, .pCont = pReq, .contLen = contLen}; // TODO check return value - (void)tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg); + if (tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg) < 0) { + mError("failed to put into write-queue since %s, line:%d", terrstr(), __LINE__); + } } } @@ -108,7 +112,9 @@ static void mndPullupCompacts(SMnode *pMnode) { if (pReq != NULL) { SRpcMsg rpcMsg = {.msgType = TDMT_MND_COMPACT_TIMER, .pCont = pReq, .contLen = contLen}; // TODO check return value - (void)tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg); + if (tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg) < 0) { + mError("failed to put into write-queue since %s, line:%d", terrstr(), __LINE__); + } } } @@ -118,7 +124,9 @@ static void mndPullupTtl(SMnode *pMnode) { void *pReq = mndBuildTimerMsg(&contLen); SRpcMsg rpcMsg = {.msgType = TDMT_MND_TTL_TIMER, .pCont = pReq, .contLen = contLen}; // TODO check return value - (void)tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg); + if (tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg) < 0) { + mError("failed to put into write-queue since %s, line:%d", terrstr(), __LINE__); + } } static void mndPullupTrimDb(SMnode *pMnode) { @@ -127,7 +135,9 @@ static void mndPullupTrimDb(SMnode *pMnode) { void *pReq = mndBuildTimerMsg(&contLen); SRpcMsg rpcMsg = {.msgType = TDMT_MND_TRIM_DB_TIMER, .pCont = pReq, .contLen = contLen}; // TODO check return value - (void)tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg); + if (tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg) < 0) { + mError("failed to put into write-queue since %s, line:%d", terrstr(), __LINE__); + } } static void mndPullupS3MigrateDb(SMnode *pMnode) { @@ -136,7 +146,9 @@ static void mndPullupS3MigrateDb(SMnode *pMnode) { void *pReq = mndBuildTimerMsg(&contLen); // TODO check return value SRpcMsg rpcMsg = {.msgType = TDMT_MND_S3MIGRATE_DB_TIMER, .pCont = pReq, .contLen = contLen}; - (void)tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg); + if (tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg) < 0) { + mError("failed to put into write-queue since %s, line:%d", terrstr(), __LINE__); + } } static int32_t mndPullupArbHeartbeat(SMnode *pMnode) { @@ -160,7 +172,9 @@ static void mndCalMqRebalance(SMnode *pMnode) { void *pReq = mndBuildTimerMsg(&contLen); if (pReq != NULL) { SRpcMsg rpcMsg = {.msgType = TDMT_MND_TMQ_TIMER, .pCont = pReq, .contLen = contLen}; - (void)tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg); + if (tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg) < 0) { + mError("failed to put into write-queue since %s, line:%d", terrstr(), __LINE__); + } } } @@ -170,7 +184,9 @@ static void mndStreamCheckpointTimer(SMnode *pMnode) { int32_t size = sizeof(SMStreamDoCheckpointMsg); SRpcMsg rpcMsg = {.msgType = TDMT_MND_STREAM_BEGIN_CHECKPOINT, .pCont = pMsg, .contLen = size}; // TODO check return value - (void)tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg); + if (tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg) < 0) { + mError("failed to put into write-queue since %s, line:%d", terrstr(), __LINE__); + } } } @@ -180,7 +196,9 @@ static void mndStreamCheckNode(SMnode *pMnode) { if (pReq != NULL) { SRpcMsg rpcMsg = {.msgType = TDMT_MND_NODECHECK_TIMER, .pCont = pReq, .contLen = contLen}; // TODO check return value - (void)tmsgPutToQueue(&pMnode->msgCb, READ_QUEUE, &rpcMsg); + if (tmsgPutToQueue(&pMnode->msgCb, READ_QUEUE, &rpcMsg) < 0) { + mError("failed to put into read-queue since %s, line:%d", terrstr(), __LINE__); + } } } @@ -190,7 +208,9 @@ static void mndStreamConsensusChkpt(SMnode *pMnode) { if (pReq != NULL) { SRpcMsg rpcMsg = {.msgType = TDMT_MND_STREAM_CONSEN_TIMER, .pCont = pReq, .contLen = contLen}; // TODO check return value - (void)tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg); + if (tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg) < 0) { + mError("failed to put into write-queue since %s, line:%d", terrstr(), __LINE__); + } } } @@ -201,7 +221,9 @@ static void mndPullupTelem(SMnode *pMnode) { if (pReq != NULL) { SRpcMsg rpcMsg = {.msgType = TDMT_MND_TELEM_TIMER, .pCont = pReq, .contLen = contLen}; // TODO check return value - (void)tmsgPutToQueue(&pMnode->msgCb, READ_QUEUE, &rpcMsg); + if (tmsgPutToQueue(&pMnode->msgCb, READ_QUEUE, &rpcMsg) < 0) { + mError("failed to put into read-queue since %s, line:%d", terrstr(), __LINE__); + } } } @@ -216,7 +238,9 @@ static void mndPullupGrant(SMnode *pMnode) { .info.notFreeAhandle = 1, .info.ahandle = (void *)0x9527}; // TODO check return value - (void)tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg); + if (tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg) < 0) { + mError("failed to put into write-queue since %s, line:%d", terrstr(), __LINE__); + } } } @@ -231,7 +255,9 @@ static void mndIncreaseUpTime(SMnode *pMnode) { .info.notFreeAhandle = 1, .info.ahandle = (void *)0x9527}; // TODO check return value - (void)tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg); + if (tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg) < 0) { + mError("failed to put into write-queue since %s, line:%d", terrstr(), __LINE__); + } } } @@ -682,7 +708,13 @@ SMnode *mndOpen(const char *path, const SMnodeOpt *pOption) { } char timestr[24] = "1970-01-01 00:00:00.00"; - (void)taosParseTime(timestr, &pMnode->checkTime, (int32_t)strlen(timestr), TSDB_TIME_PRECISION_MILLI, 0); + code = taosParseTime(timestr, &pMnode->checkTime, (int32_t)strlen(timestr), TSDB_TIME_PRECISION_MILLI, 0); + if (code < 0) { + mError("failed to parse time since %s", tstrerror(code)); + (void)taosThreadRwlockDestroy(&pMnode->lock); + taosMemoryFree(pMnode); + return NULL; + } mndSetOptions(pMnode, pOption); pMnode->deploy = pOption->deploy; @@ -727,10 +759,17 @@ SMnode *mndOpen(const char *path, const SMnodeOpt *pOption) { void mndPreClose(SMnode *pMnode) { if (pMnode != NULL) { + int32_t code = 0; // TODO check return value - (void)syncLeaderTransfer(pMnode->syncMgmt.sync); + code = syncLeaderTransfer(pMnode->syncMgmt.sync); + if (code < 0) { + mError("failed to transfer leader since %s", tstrerror(code)); + } syncPreStop(pMnode->syncMgmt.sync); - (void)sdbWriteFile(pMnode->pSdb, 0); + code = sdbWriteFile(pMnode->pSdb, 0); + if (code < 0) { + mError("failed to write sdb since %s", tstrerror(code)); + } } } @@ -878,7 +917,9 @@ _OVER: int32_t contLen = tSerializeSEpSet(NULL, 0, &epSet); pMsg->info.rsp = rpcMallocCont(contLen); if (pMsg->info.rsp != NULL) { - (void)tSerializeSEpSet(pMsg->info.rsp, contLen, &epSet); + if (tSerializeSEpSet(pMsg->info.rsp, contLen, &epSet) < 0) { + mError("failed to serialize ep set"); + } pMsg->info.hasEpSet = 1; pMsg->info.rspLen = contLen; } @@ -1045,7 +1086,12 @@ int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgr desc.vgroup_id = pVgroup->vgId; SName name = {0}; - (void)tNameFromString(&name, pVgroup->dbName, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); + code = tNameFromString(&name, pVgroup->dbName, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); + if (code < 0) { + mError("failed to get db name since %s", tstrerror(code)); + sdbRelease(pSdb, pVgroup); + TAOS_RETURN(code); + } (void)tNameGetDbName(&name, desc.database_name); desc.tables_num = pVgroup->numOfTables; @@ -1083,11 +1129,21 @@ int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgr SMonStbDesc desc = {0}; SName name1 = {0}; - (void)tNameFromString(&name1, pStb->db, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); + code = tNameFromString(&name1, pStb->db, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); + if (code < 0) { + mError("failed to get db name since %s", tstrerror(code)); + sdbRelease(pSdb, pStb); + TAOS_RETURN(code); + } (void)tNameGetDbName(&name1, desc.database_name); SName name2 = {0}; - (void)tNameFromString(&name2, pStb->name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); + code = tNameFromString(&name2, pStb->name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); + if (code < 0) { + mError("failed to get table name since %s", tstrerror(code)); + sdbRelease(pSdb, pStb); + TAOS_RETURN(code); + } tstrncpy(desc.stb_name, tNameGetTableName(&name2), TSDB_TABLE_NAME_LEN); if (taosArrayPush(pStbInfo->stbs, &desc) == NULL) { diff --git a/source/dnode/mnode/impl/src/mndProfile.c b/source/dnode/mnode/impl/src/mndProfile.c index 645a187abb..4dc2f093e8 100644 --- a/source/dnode/mnode/impl/src/mndProfile.c +++ b/source/dnode/mnode/impl/src/mndProfile.c @@ -14,12 +14,12 @@ */ #define _DEFAULT_SOURCE -#include "mndProfile.h" #include "audit.h" #include "mndDb.h" #include "mndDnode.h" #include "mndMnode.h" #include "mndPrivilege.h" +#include "mndProfile.h" #include "mndQnode.h" #include "mndShow.h" #include "mndSma.h" @@ -65,7 +65,7 @@ typedef struct { int64_t ipWhiteListVer; } SConnPreparedObj; -#define CACHE_OBJ_KEEP_TIME 3 // s +#define CACHE_OBJ_KEEP_TIME 3 // s static SConnObj *mndCreateConn(SMnode *pMnode, const char *user, int8_t connType, uint32_t ip, uint16_t port, int32_t pid, const char *app, int64_t startTime); @@ -377,7 +377,8 @@ static SAppObj *mndCreateApp(SMnode *pMnode, uint32_t clientIp, SAppHbReq *pReq) (void)memcpy(&app.summary, &pReq->summary, sizeof(pReq->summary)); app.lastAccessTimeMs = taosGetTimestampMs(); - SAppObj *pApp = taosCachePut(pMgmt->appCache, &pReq->appId, sizeof(pReq->appId), &app, sizeof(app), CACHE_OBJ_KEEP_TIME * 1000); + SAppObj *pApp = + taosCachePut(pMgmt->appCache, &pReq->appId, sizeof(pReq->appId), &app, sizeof(app), CACHE_OBJ_KEEP_TIME * 1000); if (pApp == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; mError("failed to app %" PRIx64 " into cache since %s", pReq->appId, terrstr()); @@ -841,6 +842,7 @@ static int32_t mndRetrieveConns(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl SSdb *pSdb = pMnode->pSdb; int32_t numOfRows = 0; int32_t cols = 0; + int32_t code = 0; SConnObj *pConn = NULL; if (pShow->pIter == NULL) { @@ -863,32 +865,60 @@ static int32_t mndRetrieveConns(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl cols = 0; SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)&pConn->id, false); + code = colDataSetVal(pColInfo, numOfRows, (const char *)&pConn->id, false); + if (code != 0) { + mError("failed to set conn id:%u since %s", pConn->id, tstrerror(code)); + return code; + } char user[TSDB_USER_LEN + VARSTR_HEADER_SIZE] = {0}; STR_TO_VARSTR(user, pConn->user); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)user, false); + code = colDataSetVal(pColInfo, numOfRows, (const char *)user, false); + if (code != 0) { + mError("failed to set user since %s", tstrerror(code)); + return code; + } char app[TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE]; STR_TO_VARSTR(app, pConn->app); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)app, false); + code = colDataSetVal(pColInfo, numOfRows, (const char *)app, false); + if (code != 0) { + mError("failed to set app since %s", tstrerror(code)); + return code; + } pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)&pConn->pid, false); + code = colDataSetVal(pColInfo, numOfRows, (const char *)&pConn->pid, false); + if (code != 0) { + mError("failed to set conn id:%u since %s", pConn->id, tstrerror(code)); + return code; + } char endpoint[TSDB_IPv4ADDR_LEN + 6 + VARSTR_HEADER_SIZE] = {0}; (void)sprintf(&endpoint[VARSTR_HEADER_SIZE], "%s:%d", taosIpStr(pConn->ip), pConn->port); varDataLen(endpoint) = strlen(&endpoint[VARSTR_HEADER_SIZE]); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)endpoint, false); + code = colDataSetVal(pColInfo, numOfRows, (const char *)endpoint, false); + if (code != 0) { + mError("failed to set endpoint since %s", tstrerror(code)); + return code; + } pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)&pConn->loginTimeMs, false); + code = colDataSetVal(pColInfo, numOfRows, (const char *)&pConn->loginTimeMs, false); + if (code != 0) { + mError("failed to set login time since %s", tstrerror(code)); + return code; + } pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)&pConn->lastAccessTimeMs, false); + code = colDataSetVal(pColInfo, numOfRows, (const char *)&pConn->lastAccessTimeMs, false); + if (code != 0) { + mError("failed to set last access time since %s", tstrerror(code)); + return code; + } numOfRows++; } @@ -907,6 +937,7 @@ static int32_t mndRetrieveConns(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl static int32_t packQueriesIntoBlock(SShowObj *pShow, SConnObj *pConn, SSDataBlock *pBlock, uint32_t offset, uint32_t rowsToPack) { int32_t cols = 0; + int32_t code = 0; taosRLockLatch(&pConn->queryLock); int32_t numOfQueries = taosArrayGetSize(pConn->pQueries); if (NULL == pConn->pQueries || numOfQueries <= offset) { @@ -924,47 +955,107 @@ static int32_t packQueriesIntoBlock(SShowObj *pShow, SConnObj *pConn, SSDataBloc (void)sprintf(&queryId[VARSTR_HEADER_SIZE], "%x:%" PRIx64, pConn->id, pQuery->reqRid); varDataLen(queryId) = strlen(&queryId[VARSTR_HEADER_SIZE]); SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, curRowIndex, (const char *)queryId, false); + code = colDataSetVal(pColInfo, curRowIndex, (const char *)queryId, false); + if (code != 0) { + mError("failed to set query id:%s since %s", queryId, tstrerror(code)); + taosRUnLockLatch(&pConn->queryLock); + return code; + } pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, curRowIndex, (const char *)&pQuery->queryId, false); + code = colDataSetVal(pColInfo, curRowIndex, (const char *)&pQuery->queryId, false); + if (code != 0) { + mError("failed to set query id:%" PRIx64 " since %s", pQuery->queryId, tstrerror(code)); + taosRUnLockLatch(&pConn->queryLock); + return code; + } pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, curRowIndex, (const char *)&pConn->id, false); + code = colDataSetVal(pColInfo, curRowIndex, (const char *)&pConn->id, false); + if (code != 0) { + mError("failed to set conn id:%u since %s", pConn->id, tstrerror(code)); + taosRUnLockLatch(&pConn->queryLock); + return code; + } char app[TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE]; STR_TO_VARSTR(app, pConn->app); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, curRowIndex, (const char *)app, false); + code = colDataSetVal(pColInfo, curRowIndex, (const char *)app, false); + if (code != 0) { + mError("failed to set app since %s", tstrerror(code)); + taosRUnLockLatch(&pConn->queryLock); + return code; + } pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, curRowIndex, (const char *)&pConn->pid, false); + code = colDataSetVal(pColInfo, curRowIndex, (const char *)&pConn->pid, false); + if (code != 0) { + mError("failed to set conn id:%u since %s", pConn->id, tstrerror(code)); + taosRUnLockLatch(&pConn->queryLock); + return code; + } char user[TSDB_USER_LEN + VARSTR_HEADER_SIZE] = {0}; STR_TO_VARSTR(user, pConn->user); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, curRowIndex, (const char *)user, false); + code = colDataSetVal(pColInfo, curRowIndex, (const char *)user, false); + if (code != 0) { + mError("failed to set user since %s", tstrerror(code)); + taosRUnLockLatch(&pConn->queryLock); + return code; + } char endpoint[TSDB_IPv4ADDR_LEN + 6 + VARSTR_HEADER_SIZE] = {0}; (void)sprintf(&endpoint[VARSTR_HEADER_SIZE], "%s:%d", taosIpStr(pConn->ip), pConn->port); varDataLen(endpoint) = strlen(&endpoint[VARSTR_HEADER_SIZE]); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, curRowIndex, (const char *)endpoint, false); + code = colDataSetVal(pColInfo, curRowIndex, (const char *)endpoint, false); + if (code != 0) { + mError("failed to set endpoint since %s", tstrerror(code)); + taosRUnLockLatch(&pConn->queryLock); + return code; + } pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, curRowIndex, (const char *)&pQuery->stime, false); + code = colDataSetVal(pColInfo, curRowIndex, (const char *)&pQuery->stime, false); + if (code != 0) { + mError("failed to set start time since %s", tstrerror(code)); + taosRUnLockLatch(&pConn->queryLock); + return code; + } pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, curRowIndex, (const char *)&pQuery->useconds, false); + code = colDataSetVal(pColInfo, curRowIndex, (const char *)&pQuery->useconds, false); + if (code != 0) { + mError("failed to set useconds since %s", tstrerror(code)); + taosRUnLockLatch(&pConn->queryLock); + return code; + } pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, curRowIndex, (const char *)&pQuery->stableQuery, false); + code = colDataSetVal(pColInfo, curRowIndex, (const char *)&pQuery->stableQuery, false); + if (code != 0) { + mError("failed to set stable query since %s", tstrerror(code)); + taosRUnLockLatch(&pConn->queryLock); + return code; + } pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, curRowIndex, (const char *)&pQuery->isSubQuery, false); + code = colDataSetVal(pColInfo, curRowIndex, (const char *)&pQuery->isSubQuery, false); + if (code != 0) { + mError("failed to set sub query since %s", tstrerror(code)); + taosRUnLockLatch(&pConn->queryLock); + return code; + } pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, curRowIndex, (const char *)&pQuery->subPlanNum, false); + code = colDataSetVal(pColInfo, curRowIndex, (const char *)&pQuery->subPlanNum, false); + if (code != 0) { + mError("failed to set sub plan num since %s", tstrerror(code)); + taosRUnLockLatch(&pConn->queryLock); + return code; + } char subStatus[TSDB_SHOW_SUBQUERY_LEN + VARSTR_HEADER_SIZE] = {0}; int64_t reserve = 64; @@ -983,12 +1074,22 @@ static int32_t packQueriesIntoBlock(SShowObj *pShow, SConnObj *pConn, SSDataBloc } varDataLen(subStatus) = strlen(&subStatus[VARSTR_HEADER_SIZE]); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, curRowIndex, subStatus, (varDataLen(subStatus) == 0) ? true : false); + code = colDataSetVal(pColInfo, curRowIndex, subStatus, (varDataLen(subStatus) == 0) ? true : false); + if (code != 0) { + mError("failed to set sub status since %s", tstrerror(code)); + taosRUnLockLatch(&pConn->queryLock); + return code; + } char sql[TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE] = {0}; STR_TO_VARSTR(sql, pQuery->sql); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, curRowIndex, (const char *)sql, false); + code = colDataSetVal(pColInfo, curRowIndex, (const char *)sql, false); + if (code != 0) { + mError("failed to set sql since %s", tstrerror(code)); + taosRUnLockLatch(&pConn->queryLock); + return code; + } pBlock->info.rows++; } @@ -1040,6 +1141,7 @@ static int32_t mndRetrieveApps(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlo int32_t numOfRows = 0; int32_t cols = 0; SAppObj *pApp = NULL; + int32_t code = 0; if (pShow->pIter == NULL) { SProfileMgmt *pMgmt = &pMnode->profileMgmt; @@ -1057,55 +1159,115 @@ static int32_t mndRetrieveApps(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlo cols = 0; SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)&pApp->appId, false); + code = colDataSetVal(pColInfo, numOfRows, (const char *)&pApp->appId, false); + if (code != 0) { + mError("failed to set app id since %s", tstrerror(code)); + return code; + } char ip[TSDB_IPv4ADDR_LEN + 6 + VARSTR_HEADER_SIZE] = {0}; (void)sprintf(&ip[VARSTR_HEADER_SIZE], "%s", taosIpStr(pApp->ip)); varDataLen(ip) = strlen(&ip[VARSTR_HEADER_SIZE]); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)ip, false); + code = colDataSetVal(pColInfo, numOfRows, (const char *)ip, false); + if (code != 0) { + mError("failed to set ip since %s", tstrerror(code)); + return code; + } pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)&pApp->pid, false); + code = colDataSetVal(pColInfo, numOfRows, (const char *)&pApp->pid, false); + if (code != 0) { + mError("failed to set pid since %s", tstrerror(code)); + return code; + } char name[TSDB_APP_NAME_LEN + 6 + VARSTR_HEADER_SIZE] = {0}; (void)sprintf(&name[VARSTR_HEADER_SIZE], "%s", pApp->name); varDataLen(name) = strlen(&name[VARSTR_HEADER_SIZE]); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)name, false); + code = colDataSetVal(pColInfo, numOfRows, (const char *)name, false); + if (code != 0) { + mError("failed to set app name since %s", tstrerror(code)); + return code; + } pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)&pApp->startTime, false); + code = colDataSetVal(pColInfo, numOfRows, (const char *)&pApp->startTime, false); + if (code != 0) { + mError("failed to set start time since %s", tstrerror(code)); + return code; + } pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)&pApp->summary.numOfInsertsReq, false); + code = colDataSetVal(pColInfo, numOfRows, (const char *)&pApp->summary.numOfInsertsReq, false); + if (code != 0) { + mError("failed to set insert req since %s", tstrerror(code)); + return code; + } pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)&pApp->summary.numOfInsertRows, false); + code = colDataSetVal(pColInfo, numOfRows, (const char *)&pApp->summary.numOfInsertRows, false); + if (code != 0) { + mError("failed to set insert rows since %s", tstrerror(code)); + return code; + } pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)&pApp->summary.insertElapsedTime, false); + code = colDataSetVal(pColInfo, numOfRows, (const char *)&pApp->summary.insertElapsedTime, false); + if (code != 0) { + mError("failed to set insert elapsed time since %s", tstrerror(code)); + return code; + } pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)&pApp->summary.insertBytes, false); + code = colDataSetVal(pColInfo, numOfRows, (const char *)&pApp->summary.insertBytes, false); + if (code != 0) { + mError("failed to set insert bytes since %s", tstrerror(code)); + return code; + } pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)&pApp->summary.fetchBytes, false); + code = colDataSetVal(pColInfo, numOfRows, (const char *)&pApp->summary.fetchBytes, false); + if (code != 0) { + mError("failed to set fetch bytes since %s", tstrerror(code)); + return code; + } pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)&pApp->summary.queryElapsedTime, false); + code = colDataSetVal(pColInfo, numOfRows, (const char *)&pApp->summary.queryElapsedTime, false); + if (code != 0) { + mError("failed to set query elapsed time since %s", tstrerror(code)); + return code; + } pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)&pApp->summary.numOfSlowQueries, false); + code = colDataSetVal(pColInfo, numOfRows, (const char *)&pApp->summary.numOfSlowQueries, false); + if (code != 0) { + mError("failed to set slow queries since %s", tstrerror(code)); + return code; + } pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)&pApp->summary.totalRequests, false); + code = colDataSetVal(pColInfo, numOfRows, (const char *)&pApp->summary.totalRequests, false); + if (code != 0) { + mError("failed to set total requests since %s", tstrerror(code)); + return code; + } pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)&pApp->summary.currentRequests, false); + code = colDataSetVal(pColInfo, numOfRows, (const char *)&pApp->summary.currentRequests, false); + if (code != 0) { + mError("failed to set current requests since %s", tstrerror(code)); + return code; + } pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)&pApp->lastAccessTimeMs, false); + code = colDataSetVal(pColInfo, numOfRows, (const char *)&pApp->lastAccessTimeMs, false); + if (code != 0) { + mError("failed to set last access time since %s", tstrerror(code)); + return code; + } numOfRows++; } diff --git a/source/dnode/mnode/impl/src/mndQnode.c b/source/dnode/mnode/impl/src/mndQnode.c index 41d292a62f..ba1a88aab3 100644 --- a/source/dnode/mnode/impl/src/mndQnode.c +++ b/source/dnode/mnode/impl/src/mndQnode.c @@ -14,13 +14,13 @@ */ #define _DEFAULT_SOURCE -#include "mndQnode.h" +#include "audit.h" #include "mndDnode.h" #include "mndPrivilege.h" +#include "mndQnode.h" #include "mndShow.h" #include "mndTrans.h" #include "mndUser.h" -#include "audit.h" #define QNODE_VER_NUMBER 1 #define QNODE_RESERVE_SIZE 64 @@ -209,9 +209,7 @@ int32_t mndSetCreateQnodeCommitLogs(STrans *pTrans, SQnodeObj *pObj) { TAOS_RETURN(code); } -bool mndQnodeInDnode(SQnodeObj *pQnode, int32_t dnodeId) { - return pQnode->pDnode->id == dnodeId; -} +bool mndQnodeInDnode(SQnodeObj *pQnode, int32_t dnodeId) { return pQnode->pDnode->id == dnodeId; } int32_t mndSetCreateQnodeRedoActions(STrans *pTrans, SDnodeObj *pDnode, SQnodeObj *pObj) { int32_t code = 0; @@ -224,8 +222,10 @@ int32_t mndSetCreateQnodeRedoActions(STrans *pTrans, SDnodeObj *pDnode, SQnodeOb terrno = TSDB_CODE_OUT_OF_MEMORY; return -1; } - (void)tSerializeSCreateDropMQSNodeReq(pReq, contLen, &createReq); - + code = tSerializeSCreateDropMQSNodeReq(pReq, contLen, &createReq); + if (code < 0) { + mError("qnode:%d, failed to serialize create drop qnode request since %s", createReq.dnodeId, terrstr()); + } STransAction action = {0}; action.epSet = mndGetDnodeEpset(pDnode); action.pCont = pReq; @@ -252,7 +252,10 @@ static int32_t mndSetCreateQnodeUndoActions(STrans *pTrans, SDnodeObj *pDnode, S code = terrno; TAOS_RETURN(code); } - (void)tSerializeSCreateDropMQSNodeReq(pReq, contLen, &dropReq); + code = tSerializeSCreateDropMQSNodeReq(pReq, contLen, &dropReq); + if (code < 0) { + mError("qnode:%d, failed to serialize create drop qnode request since %s", dropReq.dnodeId, terrstr()); + } STransAction action = {0}; action.epSet = mndGetDnodeEpset(pDnode); @@ -383,7 +386,10 @@ static int32_t mndSetDropQnodeRedoActions(STrans *pTrans, SDnodeObj *pDnode, SQn code = terrno; TAOS_RETURN(code); } - (void)tSerializeSCreateDropMQSNodeReq(pReq, contLen, &dropReq); + code = tSerializeSCreateDropMQSNodeReq(pReq, contLen, &dropReq); + if (code < 0) { + mError("qnode:%d, failed to serialize create drop qnode request since %s", dropReq.dnodeId, terrstr()); + } STransAction action = {0}; action.epSet = mndGetDnodeEpset(pDnode); @@ -536,7 +542,10 @@ static int32_t mndProcessQnodeListReq(SRpcMsg *pReq) { goto _OVER; } - (void)tSerializeSQnodeListRsp(pRsp, rspLen, &qlistRsp); + code = tSerializeSQnodeListRsp(pRsp, rspLen, &qlistRsp); + if (code < 0) { + mError("failed to serialize qnode list response since %s", terrstr()); + } pReq->info.rspLen = rspLen; pReq->info.rsp = pRsp; @@ -561,15 +570,16 @@ static int32_t mndRetrieveQnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB cols = 0; SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)&pObj->id, false); + TAOS_CHECK_RETURN_WITH_RELEASE(colDataSetVal(pColInfo, numOfRows, (const char *)&pObj->id, false), pSdb, pObj); char ep[TSDB_EP_LEN + VARSTR_HEADER_SIZE] = {0}; STR_WITH_MAXSIZE_TO_VARSTR(ep, pObj->pDnode->ep, pShow->pMeta->pSchemas[cols].bytes); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)ep, false); + TAOS_CHECK_RETURN_WITH_RELEASE(colDataSetVal(pColInfo, numOfRows, (const char *)ep, false), pSdb, pObj); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)&pObj->createdTime, false); + TAOS_CHECK_RETURN_WITH_RELEASE(colDataSetVal(pColInfo, numOfRows, (const char *)&pObj->createdTime, false), pSdb, + pObj); numOfRows++; sdbRelease(pSdb, pObj); diff --git a/source/dnode/mnode/impl/src/mndSnode.c b/source/dnode/mnode/impl/src/mndSnode.c index 4616f50a79..2fb3d377c4 100644 --- a/source/dnode/mnode/impl/src/mndSnode.c +++ b/source/dnode/mnode/impl/src/mndSnode.c @@ -14,10 +14,10 @@ */ #define _DEFAULT_SOURCE -#include "mndSnode.h" #include "mndDnode.h" #include "mndPrivilege.h" #include "mndShow.h" +#include "mndSnode.h" #include "mndTrans.h" #include "mndUser.h" @@ -223,7 +223,10 @@ static int32_t mndSetCreateSnodeRedoActions(STrans *pTrans, SDnodeObj *pDnode, S code = terrno; TAOS_RETURN(code); } - (void)tSerializeSCreateDropMQSNodeReq(pReq, contLen, &createReq); + code = tSerializeSCreateDropMQSNodeReq(pReq, contLen, &createReq); + if (code < 0) { + mError("snode:%d, failed to serialize create drop snode request since %s", createReq.dnodeId, terrstr()); + } STransAction action = {0}; action.epSet = mndGetDnodeEpset(pDnode); @@ -251,7 +254,10 @@ static int32_t mndSetCreateSnodeUndoActions(STrans *pTrans, SDnodeObj *pDnode, S code = terrno; TAOS_RETURN(code); } - (void)tSerializeSCreateDropMQSNodeReq(pReq, contLen, &dropReq); + code = tSerializeSCreateDropMQSNodeReq(pReq, contLen, &dropReq); + if (code < 0) { + mError("snode:%d, failed to serialize create drop snode request since %s", dropReq.dnodeId, terrstr()); + } STransAction action = {0}; action.epSet = mndGetDnodeEpset(pDnode); @@ -320,7 +326,7 @@ static int32_t mndProcessCreateSnodeReq(SRpcMsg *pReq) { // goto _OVER; // } - if (sdbGetSize(pMnode->pSdb, SDB_SNODE) >= 1){ + if (sdbGetSize(pMnode->pSdb, SDB_SNODE) >= 1) { code = TSDB_CODE_MND_SNODE_ALREADY_EXIST; goto _OVER; } @@ -340,7 +346,7 @@ _OVER: TAOS_RETURN(code); } -// mndReleaseSnode(pMnode, pObj); + // mndReleaseSnode(pMnode, pObj); mndReleaseDnode(pMnode, pDnode); tFreeSMCreateQnodeReq(&createReq); TAOS_RETURN(code); @@ -383,7 +389,10 @@ static int32_t mndSetDropSnodeRedoActions(STrans *pTrans, SDnodeObj *pDnode, SSn code = terrno; TAOS_RETURN(code); } - (void)tSerializeSCreateDropMQSNodeReq(pReq, contLen, &dropReq); + code = tSerializeSCreateDropMQSNodeReq(pReq, contLen, &dropReq); + if (code < 0) { + mError("snode:%d, failed to serialize create drop snode request since %s", dropReq.dnodeId, terrstr()); + } STransAction action = {0}; action.epSet = mndGetDnodeEpset(pDnode); @@ -482,16 +491,17 @@ static int32_t mndRetrieveSnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB cols = 0; SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)&pObj->id, false); + TAOS_CHECK_RETURN_WITH_RELEASE(colDataSetVal(pColInfo, numOfRows, (const char *)&pObj->id, false), pSdb, pObj); char ep[TSDB_EP_LEN + VARSTR_HEADER_SIZE] = {0}; STR_WITH_MAXSIZE_TO_VARSTR(ep, pObj->pDnode->ep, pShow->pMeta->pSchemas[cols].bytes); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)ep, false); + TAOS_CHECK_RETURN_WITH_RELEASE(colDataSetVal(pColInfo, numOfRows, (const char *)ep, false), pSdb, pObj); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - (void)colDataSetVal(pColInfo, numOfRows, (const char *)&pObj->createdTime, false); + TAOS_CHECK_RETURN_WITH_RELEASE(colDataSetVal(pColInfo, numOfRows, (const char *)&pObj->createdTime, false), pSdb, + pObj); numOfRows++; sdbRelease(pSdb, pObj); diff --git a/source/dnode/mnode/impl/src/mndSync.c b/source/dnode/mnode/impl/src/mndSync.c index e73cc1b5db..50b8b3e275 100644 --- a/source/dnode/mnode/impl/src/mndSync.c +++ b/source/dnode/mnode/impl/src/mndSync.c @@ -497,11 +497,9 @@ int32_t mndInitSync(SMnode *pMnode) { pNode->nodePort = pMgmt->replicas[i].port; tstrncpy(pNode->nodeFqdn, pMgmt->replicas[i].fqdn, sizeof(pNode->nodeFqdn)); pNode->nodeRole = pMgmt->nodeRoles[i]; - if (tmsgUpdateDnodeInfo(&pNode->nodeId, &pNode->clusterId, pNode->nodeFqdn, &pNode->nodePort) != true) { - mError("failed to open sync, tmsgUpdateDnodeInfo is false"); - } - mInfo("vgId:1, index:%d ep:%s:%u dnode:%d cluster:%" PRId64, i, pNode->nodeFqdn, pNode->nodePort, pNode->nodeId, - pNode->clusterId); + bool update = tmsgUpdateDnodeInfo(&pNode->nodeId, &pNode->clusterId, pNode->nodeFqdn, &pNode->nodePort); + mInfo("vgId:1, index:%d ep:%s:%u dnode:%d cluster:%" PRId64 ", update:%d", i, pNode->nodeFqdn, pNode->nodePort, + pNode->nodeId, pNode->clusterId, update); } int32_t code = 0; diff --git a/source/dnode/mnode/impl/src/mndUser.c b/source/dnode/mnode/impl/src/mndUser.c index 8ca3d59868..99472ca457 100644 --- a/source/dnode/mnode/impl/src/mndUser.c +++ b/source/dnode/mnode/impl/src/mndUser.c @@ -220,14 +220,18 @@ _OVER: TAOS_RETURN(code); } int32_t ipWhiteMgtRemove(char *user) { - bool update = true; + bool update = true; + int32_t code = 0; (void)taosThreadRwlockWrlock(&ipWhiteMgt.rw); SIpWhiteList **ppList = taosHashGet(ipWhiteMgt.pIpWhiteTab, user, strlen(user)); if (ppList == NULL || *ppList == NULL) { update = false; } else { taosMemoryFree(*ppList); - (void)taosHashRemove(ipWhiteMgt.pIpWhiteTab, user, strlen(user)); + code = taosHashRemove(ipWhiteMgt.pIpWhiteTab, user, strlen(user)); + if (code != 0) { + update = false; + } } if (update) ipWhiteMgt.ver++; @@ -391,7 +395,9 @@ int32_t mndUpdateIpWhiteImpl(SHashObj *pIpWhiteTab, char *user, char *fqdn, int8 if (pList != NULL) { if (isRangeInWhiteList(pList, &range)) { if (pList->num == 1) { - (void)taosHashRemove(pIpWhiteTab, user, strlen(user)); + if (taosHashRemove(pIpWhiteTab, user, strlen(user)) < 0) { + mError("failed to remove ip-white-list for user: %s at line %d", user, lino); + } taosMemoryFree(pList); } else { int32_t idx = 0; @@ -842,6 +848,7 @@ static int32_t createDefaultIpWhiteList(SIpWhiteList **ppWhiteList) { static int32_t mndCreateDefaultUser(SMnode *pMnode, char *acct, char *user, char *pass) { int32_t code = 0; + int32_t lino = 0; SUserObj userObj = {0}; taosEncryptPass_c((uint8_t *)pass, strlen(pass), userObj.pass); tstrncpy(userObj.user, user, TSDB_USER_LEN); @@ -859,7 +866,7 @@ static int32_t mndCreateDefaultUser(SMnode *pMnode, char *acct, char *user, char SSdbRaw *pRaw = mndUserActionEncode(&userObj); if (pRaw == NULL) goto _ERROR; - (void)sdbSetRawStatus(pRaw, SDB_STATUS_READY); + TAOS_CHECK_GOTO(sdbSetRawStatus(pRaw, SDB_STATUS_READY), &lino, _ERROR); mInfo("user:%s, will be created when deploying, raw:%p", userObj.user, pRaw); @@ -876,7 +883,7 @@ static int32_t mndCreateDefaultUser(SMnode *pMnode, char *acct, char *user, char mndTransDrop(pTrans); goto _ERROR; } - (void)sdbSetRawStatus(pRaw, SDB_STATUS_READY); + TAOS_CHECK_GOTO(sdbSetRawStatus(pRaw, SDB_STATUS_READY), &lino, _ERROR); if (mndTransPrepare(pMnode, pTrans) != 0) { mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr()); @@ -1775,7 +1782,7 @@ static int32_t mndCreateUser(SMnode *pMnode, char *acct, SCreateUserReq *pCreate mndTransDrop(pTrans); TAOS_CHECK_GOTO(TSDB_CODE_OUT_OF_MEMORY, &lino, _OVER); } - (void)sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY); + TAOS_CHECK_GOTO(sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY), &lino, _OVER); if (mndTransPrepare(pMnode, pTrans) != 0) { mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr()); @@ -1990,7 +1997,11 @@ static int32_t mndAlterUser(SMnode *pMnode, SUserObj *pOld, SUserObj *pNew, SRpc mndTransDrop(pTrans); TAOS_RETURN(terrno); } - (void)sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY); + code = sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY); + if (code < 0) { + mndTransDrop(pTrans); + TAOS_RETURN(code); + } if (mndTransPrepare(pMnode, pTrans) != 0) { mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr()); @@ -2189,7 +2200,10 @@ static int32_t mndProcessAlterUserPrivilegesReq(SAlterUserReq *pAlterReq, SMnode mndReleaseDb(pMnode, pDb); TAOS_CHECK_GOTO(terrno, &lino, _OVER); // TODO: refactor the terrno to code } - (void)taosHashRemove(pNewUser->readDbs, pAlterReq->objname, len); + code = taosHashRemove(pNewUser->readDbs, pAlterReq->objname, len); + if (code < 0) { + mError("read db:%s, failed to remove db:%s since %s", pNewUser->user, pAlterReq->objname, terrstr()); + } mndReleaseDb(pMnode, pDb); } else { taosHashClear(pNewUser->readDbs); @@ -2205,7 +2219,10 @@ static int32_t mndProcessAlterUserPrivilegesReq(SAlterUserReq *pAlterReq, SMnode mndReleaseDb(pMnode, pDb); TAOS_CHECK_GOTO(terrno, &lino, _OVER); // TODO: refactor the terrno to code } - (void)taosHashRemove(pNewUser->writeDbs, pAlterReq->objname, len); + code = taosHashRemove(pNewUser->writeDbs, pAlterReq->objname, len); + if (code < 0) { + mError("user:%s, failed to remove db:%s since %s", pNewUser->user, pAlterReq->objname, terrstr()); + } mndReleaseDb(pMnode, pDb); } else { taosHashClear(pNewUser->writeDbs); @@ -2275,7 +2292,10 @@ static int32_t mndProcessAlterUserPrivilegesReq(SAlterUserReq *pAlterReq, SMnode mndReleaseTopic(pMnode, pTopic); TAOS_CHECK_GOTO(code, &lino, _OVER); } - (void)taosHashRemove(pNewUser->topics, pAlterReq->objname, len); + code = taosHashRemove(pNewUser->topics, pAlterReq->objname, len); + if (code < 0) { + mError("user:%s, failed to remove topic:%s since %s", pNewUser->user, pAlterReq->objname, tstrerror(code)); + } mndReleaseTopic(pMnode, pTopic); } @@ -2461,7 +2481,7 @@ static int32_t mndProcessAlterUserReq(SRpcMsg *pReq) { ALTER_USER_ADD_ALL_TB_PRIV(alterReq.alterType, alterReq.privileges, alterReq.tabName)) { if (strcmp(alterReq.objname, "1.*") != 0) { SName name = {0}; - (void)tNameFromString(&name, alterReq.objname, T_NAME_ACCT | T_NAME_DB); + TAOS_CHECK_GOTO(tNameFromString(&name, alterReq.objname, T_NAME_ACCT | T_NAME_DB), &lino, _OVER); auditRecord(pReq, pMnode->clusterId, "GrantPrivileges", name.dbname, alterReq.user, alterReq.sql, alterReq.sqlLen); } else { @@ -2476,7 +2496,7 @@ static int32_t mndProcessAlterUserReq(SRpcMsg *pReq) { } else { if (strcmp(alterReq.objname, "1.*") != 0) { SName name = {0}; - (void)tNameFromString(&name, alterReq.objname, T_NAME_ACCT | T_NAME_DB); + TAOS_CHECK_GOTO(tNameFromString(&name, alterReq.objname, T_NAME_ACCT | T_NAME_DB), &lino, _OVER); auditRecord(pReq, pMnode->clusterId, "RevokePrivileges", name.dbname, alterReq.user, alterReq.sql, alterReq.sqlLen); } else { @@ -2511,7 +2531,10 @@ static int32_t mndDropUser(SMnode *pMnode, SRpcMsg *pReq, SUserObj *pUser) { mndTransDrop(pTrans); TAOS_RETURN(terrno); } - (void)sdbSetRawStatus(pCommitRaw, SDB_STATUS_DROPPED); + if (sdbSetRawStatus(pCommitRaw, SDB_STATUS_DROPPED) < 0) { + mndTransDrop(pTrans); + TAOS_RETURN(terrno); + } if (mndTransPrepare(pMnode, pTrans) != 0) { mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr()); @@ -2982,7 +3005,11 @@ static int32_t mndRetrievePrivileges(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock SName name = {0}; char objName[TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; - (void)tNameFromString(&name, db, T_NAME_ACCT | T_NAME_DB); + code = tNameFromString(&name, db, T_NAME_ACCT | T_NAME_DB); + if (code < 0) { + sdbRelease(pSdb, pUser); + TAOS_CHECK_GOTO(code, &lino, _exit); + } (void)tNameGetDbName(&name, varDataVal(objName)); varDataSetLen(objName, strlen(varDataVal(objName))); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); @@ -3026,7 +3053,11 @@ static int32_t mndRetrievePrivileges(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock SName name = {0}; char objName[TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; - (void)tNameFromString(&name, db, T_NAME_ACCT | T_NAME_DB); + code = tNameFromString(&name, db, T_NAME_ACCT | T_NAME_DB); + if (code < 0) { + sdbRelease(pSdb, pUser); + TAOS_CHECK_GOTO(code, &lino, _exit); + } (void)tNameGetDbName(&name, varDataVal(objName)); varDataSetLen(objName, strlen(varDataVal(objName))); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); @@ -3213,6 +3244,7 @@ _OVER: int32_t mndUserRemoveDb(SMnode *pMnode, STrans *pTrans, char *db) { int32_t code = 0; + int32_t lino = 0; SSdb *pSdb = pMnode->pSdb; int32_t len = strlen(db) + 1; void *pIter = NULL; @@ -3230,15 +3262,21 @@ int32_t mndUserRemoveDb(SMnode *pMnode, STrans *pTrans, char *db) { bool inRead = (taosHashGet(newUser.readDbs, db, len) != NULL); bool inWrite = (taosHashGet(newUser.writeDbs, db, len) != NULL); if (inRead || inWrite) { - (void)taosHashRemove(newUser.readDbs, db, len); - (void)taosHashRemove(newUser.writeDbs, db, len); + code = taosHashRemove(newUser.readDbs, db, len); + if (code < 0) { + mError("failed to remove readDbs:%s from user:%s", db, pUser->user); + } + code = taosHashRemove(newUser.writeDbs, db, len); + if (code < 0) { + mError("failed to remove writeDbs:%s from user:%s", db, pUser->user); + } SSdbRaw *pCommitRaw = mndUserActionEncode(&newUser); if (pCommitRaw == NULL || (code = mndTransAppendCommitlog(pTrans, pCommitRaw)) != 0) { code = TSDB_CODE_OUT_OF_MEMORY; break; } - (void)sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY); + TAOS_CHECK_GOTO(sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY), &lino, _OVER); } mndUserFreeObj(&newUser); @@ -3272,16 +3310,30 @@ int32_t mndUserRemoveStb(SMnode *pMnode, STrans *pTrans, char *stb) { bool inWrite = (taosHashGet(newUser.writeTbs, stb, len) != NULL); bool inAlter = (taosHashGet(newUser.alterTbs, stb, len) != NULL); if (inRead || inWrite || inAlter) { - (void)taosHashRemove(newUser.readTbs, stb, len); - (void)taosHashRemove(newUser.writeTbs, stb, len); - (void)taosHashRemove(newUser.alterTbs, stb, len); + code = taosHashRemove(newUser.readTbs, stb, len); + if (code < 0) { + mError("failed to remove readTbs:%s from user:%s", stb, pUser->user); + } + code = taosHashRemove(newUser.writeTbs, stb, len); + if (code < 0) { + mError("failed to remove writeTbs:%s from user:%s", stb, pUser->user); + } + code = taosHashRemove(newUser.alterTbs, stb, len); + if (code < 0) { + mError("failed to remove alterTbs:%s from user:%s", stb, pUser->user); + } SSdbRaw *pCommitRaw = mndUserActionEncode(&newUser); if (pCommitRaw == NULL || (code = mndTransAppendCommitlog(pTrans, pCommitRaw)) != 0) { code = TSDB_CODE_OUT_OF_MEMORY; break; } - (void)sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY); + code = sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY); + if (code != 0) { + mndUserFreeObj(&newUser); + sdbRelease(pSdb, pUser); + TAOS_RETURN(code); + } } mndUserFreeObj(&newUser); @@ -3314,16 +3366,30 @@ int32_t mndUserRemoveView(SMnode *pMnode, STrans *pTrans, char *view) { bool inWrite = (taosHashGet(newUser.writeViews, view, len) != NULL); bool inAlter = (taosHashGet(newUser.alterViews, view, len) != NULL); if (inRead || inWrite || inAlter) { - (void)taosHashRemove(newUser.readViews, view, len); - (void)taosHashRemove(newUser.writeViews, view, len); - (void)taosHashRemove(newUser.alterViews, view, len); + code = taosHashRemove(newUser.readViews, view, len); + if (code < 0) { + mError("failed to remove readViews:%s from user:%s", view, pUser->user); + } + code = taosHashRemove(newUser.writeViews, view, len); + if (code < 0) { + mError("failed to remove writeViews:%s from user:%s", view, pUser->user); + } + code = taosHashRemove(newUser.alterViews, view, len); + if (code < 0) { + mError("failed to remove alterViews:%s from user:%s", view, pUser->user); + } SSdbRaw *pCommitRaw = mndUserActionEncode(&newUser); if (pCommitRaw == NULL || (code = mndTransAppendCommitlog(pTrans, pCommitRaw)) != 0) { code = TSDB_CODE_OUT_OF_MEMORY; break; } - (void)sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY); + code = sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY); + if (code < 0) { + mndUserFreeObj(&newUser); + sdbRelease(pSdb, pUser); + TAOS_RETURN(code); + } } mndUserFreeObj(&newUser); @@ -3356,13 +3422,21 @@ int32_t mndUserRemoveTopic(SMnode *pMnode, STrans *pTrans, char *topic) { bool inTopic = (taosHashGet(newUser.topics, topic, len) != NULL); if (inTopic) { - (void)taosHashRemove(newUser.topics, topic, len); + code = taosHashRemove(newUser.topics, topic, len); + if (code < 0) { + mError("failed to remove topic:%s from user:%s", topic, pUser->user); + } SSdbRaw *pCommitRaw = mndUserActionEncode(&newUser); if (pCommitRaw == NULL || (code = mndTransAppendCommitlog(pTrans, pCommitRaw)) != 0) { code = TSDB_CODE_OUT_OF_MEMORY; break; } - (void)sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY); + code = sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY); + if (code < 0) { + mndUserFreeObj(&newUser); + sdbRelease(pSdb, pUser); + TAOS_RETURN(code); + } } mndUserFreeObj(&newUser); diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index e50ced2ebb..1bd4317234 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -143,7 +143,7 @@ typedef struct STbUidStore STbUidStore; int metaOpen(SVnode* pVnode, SMeta** ppMeta, int8_t rollback); int metaUpgrade(SVnode* pVnode, SMeta** ppMeta); -int metaClose(SMeta** pMeta); +void metaClose(SMeta** pMeta); int metaBegin(SMeta* pMeta, int8_t fromSys); TXN* metaGetTxn(SMeta* pMeta); int metaCommit(SMeta* pMeta, TXN* txn); @@ -207,7 +207,7 @@ int32_t metaGetInfo(SMeta* pMeta, int64_t uid, SMetaInfo* pInfo, SMetaReader* pR // tsdb int32_t tsdbOpen(SVnode* pVnode, STsdb** ppTsdb, const char* dir, STsdbKeepCfg* pKeepCfg, int8_t rollback, bool force); -int32_t tsdbClose(STsdb** pTsdb); +void tsdbClose(STsdb** pTsdb); int32_t tsdbBegin(STsdb* pTsdb); // int32_t tsdbPrepareCommit(STsdb* pTsdb); // int32_t tsdbCommit(STsdb* pTsdb, SCommitInfo* pInfo); @@ -284,7 +284,7 @@ int32_t tqProcessTaskConsenChkptIdReq(STQ* pTq, SRpcMsg* pMsg); int32_t smaInit(); void smaCleanUp(); int32_t smaOpen(SVnode* pVnode, int8_t rollback, bool force); -int32_t smaClose(SSma* pSma); +void smaClose(SSma* pSma); int32_t smaBegin(SSma* pSma); int32_t smaPrepareAsyncCommit(SSma* pSma); int32_t smaCommit(SSma* pSma, SCommitInfo* pInfo); @@ -314,7 +314,7 @@ int32_t metaSnapWriterClose(SMetaSnapWriter** ppWriter, int8_t rollback); // STsdbSnapReader ======================================== int32_t tsdbSnapReaderOpen(STsdb* pTsdb, int64_t sver, int64_t ever, int8_t type, void* pRanges, STsdbSnapReader** ppReader); -int32_t tsdbSnapReaderClose(STsdbSnapReader** ppReader); +void tsdbSnapReaderClose(STsdbSnapReader** ppReader); int32_t tsdbSnapRead(STsdbSnapReader* pReader, uint8_t** ppData); // STsdbSnapWriter ======================================== int32_t tsdbSnapWriterOpen(STsdb* pTsdb, int64_t sver, int64_t ever, void* pRanges, STsdbSnapWriter** ppWriter); @@ -323,7 +323,7 @@ int32_t tsdbSnapWriterPrepareClose(STsdbSnapWriter* pWriter, bool rollback); int32_t tsdbSnapWriterClose(STsdbSnapWriter** ppWriter, int8_t rollback); // STsdbSnapRAWReader ======================================== int32_t tsdbSnapRAWReaderOpen(STsdb* pTsdb, int64_t ever, int8_t type, STsdbSnapRAWReader** ppReader); -int32_t tsdbSnapRAWReaderClose(STsdbSnapRAWReader** ppReader); +void tsdbSnapRAWReaderClose(STsdbSnapRAWReader** ppReader); int32_t tsdbSnapRAWRead(STsdbSnapRAWReader* pReader, uint8_t** ppData); // STsdbSnapRAWWriter ======================================== int32_t tsdbSnapRAWWriterOpen(STsdb* pTsdb, int64_t ever, STsdbSnapRAWWriter** ppWriter); @@ -368,7 +368,7 @@ int32_t streamStateLoadTasks(SStreamStateWriter* pWriter); // SStreamStateReader ===================================== // SRSmaSnapReader ======================================== int32_t rsmaSnapReaderOpen(SSma* pSma, int64_t sver, int64_t ever, SRSmaSnapReader** ppReader); -int32_t rsmaSnapReaderClose(SRSmaSnapReader** ppReader); +void rsmaSnapReaderClose(SRSmaSnapReader** ppReader); int32_t rsmaSnapRead(SRSmaSnapReader* pReader, uint8_t** ppData); // SRSmaSnapWriter ======================================== int32_t rsmaSnapWriterOpen(SSma* pSma, int64_t sver, int64_t ever, void** ppRanges, SRSmaSnapWriter** ppWriter); diff --git a/source/dnode/vnode/src/meta/metaCache.c b/source/dnode/vnode/src/meta/metaCache.c index 06576c0671..16cf9335fe 100644 --- a/source/dnode/vnode/src/meta/metaCache.c +++ b/source/dnode/vnode/src/meta/metaCache.c @@ -495,7 +495,7 @@ static int checkAllEntriesInCache(const STagFilterResEntry* pEntry, SArray* pInv return terrno; } } else { - (void)taosLRUCacheRelease(pCache, pRes, false); + bool ret = taosLRUCacheRelease(pCache, pRes, false); } } @@ -562,7 +562,7 @@ int32_t metaGetCachedTableUidList(void* pVnode, tb_uid_t suid, const uint8_t* pK ((double)(*pEntry)->hitTimes) / acc); } - (void)taosLRUCacheRelease(pCache, pHandle, false); + bool ret = taosLRUCacheRelease(pCache, pHandle, false); // unlock meta (void)taosThreadMutexUnlock(pLock); @@ -618,7 +618,7 @@ static int32_t addNewEntry(SHashObj* pTableEntry, const void* pKey, int32_t keyL p->hitTimes = 0; tdListInit(&p->list, keyLen); TAOS_CHECK_RETURN(taosHashPut(pTableEntry, &suid, sizeof(uint64_t), &p, POINTER_BYTES)); - (void)tdListAppend(&p->list, pKey); + TAOS_CHECK_RETURN(tdListAppend(&p->list, pKey)); return 0; } @@ -662,7 +662,10 @@ int32_t metaUidFilterCachePut(void* pVnode, uint64_t suid, const void* pKey, int } else { // check if it exists or not size_t size = listNEles(&(*pEntry)->list); if (size == 0) { - (void)tdListAppend(&(*pEntry)->list, pKey); + code = tdListAppend(&(*pEntry)->list, pKey); + if (code) { + goto _end; + } } else { SListNode* pNode = listHead(&(*pEntry)->list); uint64_t* p = (uint64_t*)pNode->data; @@ -671,7 +674,10 @@ int32_t metaUidFilterCachePut(void* pVnode, uint64_t suid, const void* pKey, int (void)taosThreadMutexUnlock(pLock); return TSDB_CODE_SUCCESS; } else { // not equal, append it - (void)tdListAppend(&(*pEntry)->list, pKey); + code = tdListAppend(&(*pEntry)->list, pKey); + if (code) { + goto _end; + } } } } @@ -761,7 +767,7 @@ int32_t metaGetCachedTbGroup(void* pVnode, tb_uid_t suid, const uint8_t* pKey, i ((double)(*pEntry)->hitTimes) / acc); } - (void)taosLRUCacheRelease(pCache, pHandle, false); + bool ret = taosLRUCacheRelease(pCache, pHandle, false); // unlock meta (void)taosThreadMutexUnlock(pLock); @@ -839,7 +845,10 @@ int32_t metaPutTbGroupToCache(void* pVnode, uint64_t suid, const void* pKey, int } else { // check if it exists or not size_t size = listNEles(&(*pEntry)->list); if (size == 0) { - (void)tdListAppend(&(*pEntry)->list, pKey); + code = tdListAppend(&(*pEntry)->list, pKey); + if (code) { + goto _end; + } } else { SListNode* pNode = listHead(&(*pEntry)->list); uint64_t* p = (uint64_t*)pNode->data; @@ -848,7 +857,10 @@ int32_t metaPutTbGroupToCache(void* pVnode, uint64_t suid, const void* pKey, int (void)taosThreadMutexUnlock(pLock); return TSDB_CODE_SUCCESS; } else { // not equal, append it - (void)tdListAppend(&(*pEntry)->list, pKey); + code = tdListAppend(&(*pEntry)->list, pKey); + if (code) { + goto _end; + } } } } diff --git a/source/dnode/vnode/src/meta/metaCommit.c b/source/dnode/vnode/src/meta/metaCommit.c index ec9e2d90ec..d8afd6aeaf 100644 --- a/source/dnode/vnode/src/meta/metaCommit.c +++ b/source/dnode/vnode/src/meta/metaCommit.c @@ -66,7 +66,10 @@ int metaPrepareAsyncCommit(SMeta *pMeta) { int32_t lino; metaWLock(pMeta); - TAOS_UNUSED(ttlMgrFlush(pMeta->pTtlMgr, pMeta->txn)); + int32_t ret = ttlMgrFlush(pMeta->pTtlMgr, pMeta->txn); + if (ret < 0) { + metaError("vgId:%d, failed to flush ttl since %s", TD_VID(pMeta->pVnode), tstrerror(ret)); + } metaULock(pMeta); code = tdbCommit(pMeta->pEnv, pMeta->txn); @@ -91,12 +94,7 @@ int metaAbort(SMeta *pMeta) { return 0; } - int code = tdbAbort(pMeta->pEnv, pMeta->txn); - if (code) { - metaError("vgId:%d, failed to abort meta since %s", TD_VID(pMeta->pVnode), tstrerror(terrno)); - } else { - pMeta->txn = NULL; - } - - return code; + tdbAbort(pMeta->pEnv, pMeta->txn); + pMeta->txn = NULL; + return 0; } diff --git a/source/dnode/vnode/src/meta/metaOpen.c b/source/dnode/vnode/src/meta/metaOpen.c index 591c40332a..d1ffe82e32 100644 --- a/source/dnode/vnode/src/meta/metaOpen.c +++ b/source/dnode/vnode/src/meta/metaOpen.c @@ -65,7 +65,8 @@ int32_t metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) { pMeta->pVnode = pVnode; // create path if not created yet - (void)taosMkDir(pMeta->path); + code = taosMkDir(pMeta->path); + TSDB_CHECK_CODE(code, lino, _exit); // open env code = tdbOpen(pMeta->path, pVnode->config.szPage, pVnode->config.szCache, &pMeta->pEnv, rollback, @@ -169,9 +170,9 @@ _exit: return code; } -int metaClose(SMeta **ppMeta) { +void metaClose(SMeta **ppMeta) { metaCleanup(ppMeta); - return 0; + return; } int metaAlterCache(SMeta *pMeta, int32_t nPage) { diff --git a/source/dnode/vnode/src/meta/metaSnapshot.c b/source/dnode/vnode/src/meta/metaSnapshot.c index 12ef5088b8..ecd5feeb10 100644 --- a/source/dnode/vnode/src/meta/metaSnapshot.c +++ b/source/dnode/vnode/src/meta/metaSnapshot.c @@ -87,7 +87,9 @@ int32_t metaSnapRead(SMetaSnapReader* pReader, uint8_t** ppData) { if (key.version < pReader->sver // || metaGetInfo(pReader->pMeta, key.uid, &info, NULL) == TSDB_CODE_NOT_FOUND) { - (void)tdbTbcMoveToNext(pReader->pTbc); + if (tdbTbcMoveToNext(pReader->pTbc) != 0) { + metaTrace("vgId:%d, vnode snapshot meta read data done", TD_VID(pReader->pMeta->pVnode)); + } continue; } @@ -110,7 +112,9 @@ int32_t metaSnapRead(SMetaSnapReader* pReader, uint8_t** ppData) { metaDebug("vgId:%d, vnode snapshot meta read data, version:%" PRId64 " uid:%" PRId64 " blockLen:%d", TD_VID(pReader->pMeta->pVnode), key.version, key.uid, nData); - (void)tdbTbcMoveToNext(pReader->pTbc); + if (tdbTbcMoveToNext(pReader->pTbc) != 0) { + metaTrace("vgId:%d, vnode snapshot meta read data done", TD_VID(pReader->pMeta->pVnode)); + } break; } @@ -233,7 +237,9 @@ static int32_t MoveToSnapShotVersion(SSnapContext* ctx) { return TAOS_GET_TERRNO(code); } if (c < 0) { - (void)tdbTbcMoveToPrev((TBC*)ctx->pCur); + if (tdbTbcMoveToPrev((TBC*)ctx->pCur) != 0) { + metaTrace("vgId:%d, vnode snapshot move to prev failed", TD_VID(ctx->pMeta->pVnode)); + } } return 0; } diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c index 0fb8ca3fb1..1024803083 100644 --- a/source/dnode/vnode/src/meta/metaTable.c +++ b/source/dnode/vnode/src/meta/metaTable.c @@ -22,7 +22,7 @@ static int metaDelJsonVarFromIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry, con static int metaSaveToTbDb(SMeta *pMeta, const SMetaEntry *pME); static int metaUpdateUidIdx(SMeta *pMeta, const SMetaEntry *pME); static int metaUpdateNameIdx(SMeta *pMeta, const SMetaEntry *pME); -static int metaUpdateTtl(SMeta *pMeta, const SMetaEntry *pME); +static void metaUpdateTtl(SMeta *pMeta, const SMetaEntry *pME); static int metaUpdateChangeTime(SMeta *pMeta, tb_uid_t uid, int64_t changeTimeMs); static int metaSaveToSkmDb(SMeta *pMeta, const SMetaEntry *pME); static int metaUpdateCtbIdx(SMeta *pMeta, const SMetaEntry *pME); @@ -1441,8 +1441,8 @@ static int metaBuildNColIdxKey(SNcolIdxKey *ncolKey, const SMetaEntry *pME) { return 0; } -static int metaDeleteTtl(SMeta *pMeta, const SMetaEntry *pME) { - if (pME->type != TSDB_CHILD_TABLE && pME->type != TSDB_NORMAL_TABLE) return 0; +static void metaDeleteTtl(SMeta *pMeta, const SMetaEntry *pME) { + if (pME->type != TSDB_CHILD_TABLE && pME->type != TSDB_NORMAL_TABLE) return; STtlDelTtlCtx ctx = {.uid = pME->uid, .pTxn = pMeta->txn}; if (pME->type == TSDB_CHILD_TABLE) { @@ -1451,7 +1451,12 @@ static int metaDeleteTtl(SMeta *pMeta, const SMetaEntry *pME) { ctx.ttlDays = pME->ntbEntry.ttlDays; } - return ttlMgrDeleteTtl(pMeta->pTtlMgr, &ctx); + int32_t ret = ttlMgrDeleteTtl(pMeta->pTtlMgr, &ctx); + if (ret < 0) { + metaError("vgId:%d, failed to delete ttl for table:%s uid:%" PRId64 " since %s", TD_VID(pMeta->pVnode), pME->name, + pME->uid, tstrerror(ret)); + } + return; } static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type, tb_uid_t *pSuid, int8_t *pSysTbl) { @@ -1831,12 +1836,19 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl if (!TSDB_CACHE_NO(pMeta->pVnode->config)) { int16_t cid = pSchema->pSchema[entry.ntbEntry.schemaRow.nCols - 1].colId; int8_t col_type = pSchema->pSchema[entry.ntbEntry.schemaRow.nCols - 1].type; - (void)tsdbCacheNewNTableColumn(pMeta->pVnode->pTsdb, entry.uid, cid, col_type); + int32_t ret = tsdbCacheNewNTableColumn(pMeta->pVnode->pTsdb, entry.uid, cid, col_type); + if (ret < 0) { + terrno = ret; + goto _err; + } } SSchema *pCol = &pSchema->pSchema[entry.ntbEntry.schemaRow.nCols - 1]; uint32_t compress = pAlterTbReq->action == TSDB_ALTER_TABLE_ADD_COLUMN ? createDefaultColCmprByType(pCol->type) : pAlterTbReq->compress; - (void)updataTableColCmpr(&entry.colCmpr, pCol, 1, compress); + if (updataTableColCmpr(&entry.colCmpr, pCol, 1, compress) != 0) { + metaError("vgId:%d, failed to update table col cmpr:%s uid:%" PRId64, TD_VID(pMeta->pVnode), entry.name, + entry.uid); + } freeColCmpr = true; if (entry.colCmpr.nCols != pSchema->nCols) { if (pNewSchema) taosMemoryFree(pNewSchema); @@ -1876,10 +1888,16 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl if (!TSDB_CACHE_NO(pMeta->pVnode->config)) { int16_t cid = pColumn->colId; - (void)tsdbCacheDropNTableColumn(pMeta->pVnode->pTsdb, entry.uid, cid, hasPrimayKey); + if (tsdbCacheDropNTableColumn(pMeta->pVnode->pTsdb, entry.uid, cid, hasPrimayKey) != 0) { + metaError("vgId:%d, failed to drop ntable column:%s uid:%" PRId64, TD_VID(pMeta->pVnode), entry.name, + entry.uid); + } } - (void)updataTableColCmpr(&entry.colCmpr, &tScheam, 0, 0); + if (updataTableColCmpr(&entry.colCmpr, &tScheam, 0, 0) != 0) { + metaError("vgId:%d, failed to update table col cmpr:%s uid:%" PRId64, TD_VID(pMeta->pVnode), entry.name, + entry.uid); + } if (entry.colCmpr.nCols != pSchema->nCols) { terrno = TSDB_CODE_VND_INVALID_TABLE_ACTION; goto _err; @@ -1928,20 +1946,36 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl // do actual write metaWLock(pMeta); - (void)metaDeleteNcolIdx(pMeta, &oldEntry); - (void)metaUpdateNcolIdx(pMeta, &entry); + if (metaDeleteNcolIdx(pMeta, &oldEntry) < 0) { + metaError("vgId:%d, failed to delete ncol idx:%s uid:%" PRId64, TD_VID(pMeta->pVnode), entry.name, entry.uid); + } + + if (metaUpdateNcolIdx(pMeta, &entry) < 0) { + metaError("vgId:%d, failed to update ncol idx:%s uid:%" PRId64, TD_VID(pMeta->pVnode), entry.name, entry.uid); + } + // save to table db - (void)metaSaveToTbDb(pMeta, &entry); + if (metaSaveToTbDb(pMeta, &entry) < 0) { + metaError("vgId:%d, failed to save to tb db:%s uid:%" PRId64, TD_VID(pMeta->pVnode), entry.name, entry.uid); + } - (void)metaUpdateUidIdx(pMeta, &entry); + if (metaUpdateUidIdx(pMeta, &entry) < 0) { + metaError("vgId:%d, failed to update uid idx:%s uid:%" PRId64, TD_VID(pMeta->pVnode), entry.name, entry.uid); + } - (void)metaSaveToSkmDb(pMeta, &entry); + if (metaSaveToSkmDb(pMeta, &entry) < 0) { + metaError("vgId:%d, failed to save to skm db:%s uid:%" PRId64, TD_VID(pMeta->pVnode), entry.name, entry.uid); + } - (void)metaUpdateChangeTime(pMeta, entry.uid, pAlterTbReq->ctimeMs); + if (metaUpdateChangeTime(pMeta, entry.uid, pAlterTbReq->ctimeMs) < 0) { + metaError("vgId:%d, failed to update change time:%s uid:%" PRId64, TD_VID(pMeta->pVnode), entry.name, entry.uid); + } metaULock(pMeta); - (void)metaUpdateMetaRsp(uid, pAlterTbReq->tbName, pSchema, pMetaRsp); + if (metaUpdateMetaRsp(uid, pAlterTbReq->tbName, pSchema, pMetaRsp) < 0) { + metaError("vgId:%d, failed to update meta rsp:%s uid:%" PRId64, TD_VID(pMeta->pVnode), entry.name, entry.uid); + } for (int32_t i = 0; i < entry.colCmpr.nCols; i++) { SColCmpr *p = &entry.colCmpr.pColCmpr[i]; pMetaRsp->pSchemaExt[i].colId = p->id; @@ -1997,14 +2031,18 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA TBC *pUidIdxc = NULL; TAOS_CHECK_RETURN(tdbTbcOpen(pMeta->pUidIdx, &pUidIdxc, NULL)); - (void)tdbTbcMoveTo(pUidIdxc, &uid, sizeof(uid), &c); + if (tdbTbcMoveTo(pUidIdxc, &uid, sizeof(uid), &c) < 0) { + metaTrace("meta/table: failed to move to uid index, uid:%" PRId64, uid); + } if (c != 0) { tdbTbcClose(pUidIdxc); metaError("meta/table: invalide c: %" PRId32 " update tb tag val failed.", c); return terrno = TSDB_CODE_TDB_TABLE_NOT_EXIST; } - (void)tdbTbcGet(pUidIdxc, NULL, NULL, &pData, &nData); + if (tdbTbcGet(pUidIdxc, NULL, NULL, &pData, &nData) != 0) { + metaError("meta/table: failed to get uid index, uid:%" PRId64, uid); + } oversion = ((SUidIdxVal *)pData)[0].version; // search table.db @@ -2014,7 +2052,9 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA /* get ctbEntry */ TAOS_CHECK_RETURN(tdbTbcOpen(pMeta->pTbDb, &pTbDbc, NULL)); - (void)tdbTbcMoveTo(pTbDbc, &((STbDbKey){.uid = uid, .version = oversion}), sizeof(STbDbKey), &c); + if (tdbTbcMoveTo(pTbDbc, &((STbDbKey){.uid = uid, .version = oversion}), sizeof(STbDbKey), &c) != 0) { + metaError("meta/table: failed to move to tb db, uid:%" PRId64, uid); + } if (c != 0) { tdbTbcClose(pUidIdxc); tdbTbcClose(pTbDbc); @@ -2022,29 +2062,43 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA return terrno = TSDB_CODE_TDB_TABLE_NOT_EXIST; } - (void)tdbTbcGet(pTbDbc, NULL, NULL, &pData, &nData); + if (tdbTbcGet(pTbDbc, NULL, NULL, &pData, &nData) != 0) { + metaError("meta/table: failed to get tb db, uid:%" PRId64, uid); + } if ((ctbEntry.pBuf = taosMemoryMalloc(nData)) == NULL) { - (void)tdbTbcClose(pUidIdxc); - (void)tdbTbcClose(pTbDbc); + tdbTbcClose(pUidIdxc); + tdbTbcClose(pTbDbc); return terrno; } memcpy(ctbEntry.pBuf, pData, nData); tDecoderInit(&dc1, ctbEntry.pBuf, nData); - (void)metaDecodeEntry(&dc1, &ctbEntry); + ret = metaDecodeEntry(&dc1, &ctbEntry); + if (ret < 0) { + terrno = ret; + goto _err; + } /* get stbEntry*/ - (void)tdbTbGet(pMeta->pUidIdx, &ctbEntry.ctbEntry.suid, sizeof(tb_uid_t), &pVal, &nVal); + if (tdbTbGet(pMeta->pUidIdx, &ctbEntry.ctbEntry.suid, sizeof(tb_uid_t), &pVal, &nVal) != 0) { + metaError("meta/table: failed to get uid index, uid:%" PRId64, ctbEntry.ctbEntry.suid); + } if (!pVal) { terrno = TSDB_CODE_INVALID_MSG; goto _err; } - (void)tdbTbGet(pMeta->pTbDb, &((STbDbKey){.uid = ctbEntry.ctbEntry.suid, .version = ((SUidIdxVal *)pVal)[0].version}), - sizeof(STbDbKey), (void **)&stbEntry.pBuf, &nVal); + if (tdbTbGet(pMeta->pTbDb, &((STbDbKey){.uid = ctbEntry.ctbEntry.suid, .version = ((SUidIdxVal *)pVal)[0].version}), + sizeof(STbDbKey), (void **)&stbEntry.pBuf, &nVal) != 0) { + metaError("meta/table: failed to get tb db, uid:%" PRId64, ctbEntry.ctbEntry.suid); + } tdbFree(pVal); tDecoderInit(&dc2, stbEntry.pBuf, nVal); - (void)metaDecodeEntry(&dc2, &stbEntry); + ret = metaDecodeEntry(&dc2, &stbEntry); + if (ret < 0) { + terrno = ret; + goto _err; + } SSchemaWrapper *pTagSchema = &stbEntry.stbEntry.schemaTag; SSchema *pColumn = NULL; @@ -2122,12 +2176,18 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA metaWLock(pMeta); // save to table.db - (void)metaSaveToTbDb(pMeta, &ctbEntry); + if (metaSaveToTbDb(pMeta, &ctbEntry) < 0) { + metaError("meta/table: failed to save to tb db:%s uid:%" PRId64, ctbEntry.name, ctbEntry.uid); + } // save to uid.idx - (void)metaUpdateUidIdx(pMeta, &ctbEntry); + if (metaUpdateUidIdx(pMeta, &ctbEntry) < 0) { + metaError("meta/table: failed to update uid idx:%s uid:%" PRId64, ctbEntry.name, ctbEntry.uid); + } - (void)metaUpdateTagIdx(pMeta, &ctbEntry); + if (metaUpdateTagIdx(pMeta, &ctbEntry) < 0) { + metaError("meta/table: failed to update tag idx:%s uid:%" PRId64, ctbEntry.name, ctbEntry.uid); + } if (NULL == ctbEntry.ctbEntry.pTags) { metaError("meta/table: null tags, update tag val failed."); @@ -2135,13 +2195,22 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA } SCtbIdxKey ctbIdxKey = {.suid = ctbEntry.ctbEntry.suid, .uid = uid}; - (void)tdbTbUpsert(pMeta->pCtbIdx, &ctbIdxKey, sizeof(ctbIdxKey), ctbEntry.ctbEntry.pTags, - ((STag *)(ctbEntry.ctbEntry.pTags))->len, pMeta->txn); + if (tdbTbUpsert(pMeta->pCtbIdx, &ctbIdxKey, sizeof(ctbIdxKey), ctbEntry.ctbEntry.pTags, + ((STag *)(ctbEntry.ctbEntry.pTags))->len, pMeta->txn) < 0) { + metaError("meta/table: failed to upsert ctb idx:%s uid:%" PRId64, ctbEntry.name, ctbEntry.uid); + } - (void)metaUidCacheClear(pMeta, ctbEntry.ctbEntry.suid); - (void)metaTbGroupCacheClear(pMeta, ctbEntry.ctbEntry.suid); + if (metaUidCacheClear(pMeta, ctbEntry.ctbEntry.suid) < 0) { + metaError("meta/table: failed to clear uid cache:%s uid:%" PRId64, ctbEntry.name, ctbEntry.uid); + } - (void)metaUpdateChangeTime(pMeta, ctbEntry.uid, pAlterTbReq->ctimeMs); + if (metaTbGroupCacheClear(pMeta, ctbEntry.ctbEntry.suid) < 0) { + metaError("meta/table: failed to clear group cache:%s uid:%" PRId64, ctbEntry.name, ctbEntry.uid); + } + + if (metaUpdateChangeTime(pMeta, ctbEntry.uid, pAlterTbReq->ctimeMs) < 0) { + metaError("meta/table: failed to update change time:%s uid:%" PRId64, ctbEntry.name, ctbEntry.uid); + } metaULock(pMeta); @@ -2189,21 +2258,27 @@ static int metaUpdateTableOptions(SMeta *pMeta, int64_t version, SVAlterTbReq *p TBC *pUidIdxc = NULL; TAOS_CHECK_RETURN(tdbTbcOpen(pMeta->pUidIdx, &pUidIdxc, NULL)); - (void)tdbTbcMoveTo(pUidIdxc, &uid, sizeof(uid), &c); + if (tdbTbcMoveTo(pUidIdxc, &uid, sizeof(uid), &c) < 0) { + metaError("meta/table: failed to move to uid index, uid:%" PRId64, uid); + } if (c != 0) { tdbTbcClose(pUidIdxc); metaError("meta/table: invalide c: %" PRId32 " update tb options failed.", c); return TSDB_CODE_FAILED; } - (void)tdbTbcGet(pUidIdxc, NULL, NULL, &pData, &nData); + if (tdbTbcGet(pUidIdxc, NULL, NULL, &pData, &nData) < 0) { + metaError("meta/table: failed to get uid index, uid:%" PRId64, uid); + } oversion = ((SUidIdxVal *)pData)[0].version; // search table.db TBC *pTbDbc = NULL; TAOS_CHECK_RETURN(tdbTbcOpen(pMeta->pTbDb, &pTbDbc, NULL)); - (void)tdbTbcMoveTo(pTbDbc, &((STbDbKey){.uid = uid, .version = oversion}), sizeof(STbDbKey), &c); + if (tdbTbcMoveTo(pTbDbc, &((STbDbKey){.uid = uid, .version = oversion}), sizeof(STbDbKey), &c) < 0) { + metaError("meta/table: failed to move to tb db, uid:%" PRId64, uid); + } if (c != 0) { tdbTbcClose(pUidIdxc); tdbTbcClose(pTbDbc); @@ -2211,13 +2286,15 @@ static int metaUpdateTableOptions(SMeta *pMeta, int64_t version, SVAlterTbReq *p return TSDB_CODE_FAILED; } - (void)tdbTbcGet(pTbDbc, NULL, NULL, &pData, &nData); + if (tdbTbcGet(pTbDbc, NULL, NULL, &pData, &nData) < 0) { + metaError("meta/table: failed to get tb db, uid:%" PRId64, uid); + } // get table entry SDecoder dc = {0}; if ((entry.pBuf = taosMemoryMalloc(nData)) == NULL) { - (void)tdbTbcClose(pUidIdxc); - (void)tdbTbcClose(pTbDbc); + tdbTbcClose(pUidIdxc); + tdbTbcClose(pTbDbc); return terrno; } memcpy(entry.pBuf, pData, nData); @@ -2236,9 +2313,9 @@ static int metaUpdateTableOptions(SMeta *pMeta, int64_t version, SVAlterTbReq *p // build SMetaEntry if (entry.type == TSDB_CHILD_TABLE) { if (pAlterTbReq->updateTTL) { - (void)metaDeleteTtl(pMeta, &entry); + metaDeleteTtl(pMeta, &entry); entry.ctbEntry.ttlDays = pAlterTbReq->newTTL; - (void)metaUpdateTtl(pMeta, &entry); + metaUpdateTtl(pMeta, &entry); } if (pAlterTbReq->newCommentLen >= 0) { entry.ctbEntry.commentLen = pAlterTbReq->newCommentLen; @@ -2246,9 +2323,9 @@ static int metaUpdateTableOptions(SMeta *pMeta, int64_t version, SVAlterTbReq *p } } else { if (pAlterTbReq->updateTTL) { - (void)metaDeleteTtl(pMeta, &entry); + metaDeleteTtl(pMeta, &entry); entry.ntbEntry.ttlDays = pAlterTbReq->newTTL; - (void)metaUpdateTtl(pMeta, &entry); + metaUpdateTtl(pMeta, &entry); } if (pAlterTbReq->newCommentLen >= 0) { entry.ntbEntry.commentLen = pAlterTbReq->newCommentLen; @@ -2257,9 +2334,17 @@ static int metaUpdateTableOptions(SMeta *pMeta, int64_t version, SVAlterTbReq *p } // save to table db - (void)metaSaveToTbDb(pMeta, &entry); - (void)metaUpdateUidIdx(pMeta, &entry); - (void)metaUpdateChangeTime(pMeta, entry.uid, pAlterTbReq->ctimeMs); + if (metaSaveToTbDb(pMeta, &entry) < 0) { + metaError("meta/table: failed to save to tb db:%s uid:%" PRId64, entry.name, entry.uid); + } + + if (metaUpdateUidIdx(pMeta, &entry) < 0) { + metaError("meta/table: failed to update uid idx:%s uid:%" PRId64, entry.name, entry.uid); + } + + if (metaUpdateChangeTime(pMeta, entry.uid, pAlterTbReq->ctimeMs) < 0) { + metaError("meta/table: failed to update change time:%s uid:%" PRId64, entry.name, entry.uid); + } metaULock(pMeta); @@ -2305,7 +2390,10 @@ static int metaAddTagIndex(SMeta *pMeta, int64_t version, SVAlterTbReq *pAlterTb STbDbKey tbDbKey = {0}; tbDbKey.uid = suid; tbDbKey.version = ((SUidIdxVal *)pVal)[0].version; - (void)tdbTbGet(pMeta->pTbDb, &tbDbKey, sizeof(tbDbKey), &pVal, &nVal); + ret = tdbTbGet(pMeta->pTbDb, &tbDbKey, sizeof(tbDbKey), &pVal, &nVal); + if (ret < 0) { + goto _err; + } tDecoderInit(&dc, pVal, nVal); ret = metaDecodeEntry(&dc, &stbEntry); if (ret < 0) { @@ -2384,7 +2472,10 @@ static int metaAddTagIndex(SMeta *pMeta, int64_t version, SVAlterTbReq *pAlterTb tdbTbcClose(pCtbIdxc); goto _err; } - (void)tdbTbUpsert(pMeta->pTagIdx, pTagIdxKey, nTagIdxKey, NULL, 0, pMeta->txn); + ret = tdbTbUpsert(pMeta->pTagIdx, pTagIdxKey, nTagIdxKey, NULL, 0, pMeta->txn); + if (ret < 0) { + metaError("meta/table: failed to upsert tag idx:%s uid:%" PRId64, stbEntry.name, stbEntry.uid); + } metaDestroyTagIdxKey(pTagIdxKey); pTagIdxKey = NULL; } @@ -2439,7 +2530,10 @@ static int metaDropTagIndex(SMeta *pMeta, int64_t version, SVAlterTbReq *pAlterT STbDbKey tbDbKey = {0}; tbDbKey.uid = suid; tbDbKey.version = ((SUidIdxVal *)pVal)[0].version; - (void)tdbTbGet(pMeta->pTbDb, &tbDbKey, sizeof(tbDbKey), &pVal, &nVal); + ret = tdbTbGet(pMeta->pTbDb, &tbDbKey, sizeof(tbDbKey), &pVal, &nVal); + if (ret < 0) { + goto _err; + } tDecoderInit(&dc, pVal, nVal); ret = metaDecodeEntry(&dc, &stbEntry); @@ -2507,7 +2601,10 @@ static int metaDropTagIndex(SMeta *pMeta, int64_t version, SVAlterTbReq *pAlterT metaWLock(pMeta); for (int i = 0; i < taosArrayGetSize(tagIdxList); i++) { SMetaPair *pair = taosArrayGet(tagIdxList, i); - (void)tdbTbDelete(pMeta->pTagIdx, pair->key, pair->nkey, pMeta->txn); + ret = tdbTbDelete(pMeta->pTagIdx, pair->key, pair->nkey, pMeta->txn); + if (ret < 0) { + metaError("meta/table: failed to delete tag idx:%s uid:%" PRId64, stbEntry.name, stbEntry.uid); + } } metaULock(pMeta); @@ -2594,9 +2691,17 @@ int32_t metaUpdateTableColCompress(SMeta *pMeta, int64_t version, SVAlterTbReq * tbEntry.version = version; metaWLock(pMeta); - (void)metaSaveToTbDb(pMeta, &tbEntry); - (void)metaUpdateUidIdx(pMeta, &tbEntry); - (void)metaUpdateChangeTime(pMeta, suid, pReq->ctimeMs); + if (metaSaveToTbDb(pMeta, &tbEntry) < 0) { + metaError("meta/table: failed to save to tb db:%s uid:%" PRId64, tbEntry.name, tbEntry.uid); + } + + if (metaUpdateUidIdx(pMeta, &tbEntry) < 0) { + metaError("meta/table: failed to update uid idx:%s uid:%" PRId64, tbEntry.name, tbEntry.uid); + } + + if (metaUpdateChangeTime(pMeta, suid, pReq->ctimeMs) < 0) { + metaError("meta/table: failed to update change time:%s uid:%" PRId64, tbEntry.name, tbEntry.uid); + } metaULock(pMeta); @@ -2691,7 +2796,10 @@ static int metaUpdateUidIdx(SMeta *pMeta, const SMetaEntry *pME) { // upsert cache SMetaInfo info; metaGetEntryInfo(pME, &info); - (void)metaCacheUpsert(pMeta, &info); + int32_t ret = metaCacheUpsert(pMeta, &info); + if (ret < 0) { + metaError("vgId:%d, failed to upsert cache, uid: %" PRId64 " %s", TD_VID(pMeta->pVnode), pME->uid, tstrerror(ret)); + } SUidIdxVal uidIdxVal = {.suid = info.suid, .version = info.version, .skmVer = info.skmVer}; @@ -2706,8 +2814,8 @@ static int metaUpdateNameIdx(SMeta *pMeta, const SMetaEntry *pME) { return tdbTbUpsert(pMeta->pNameIdx, pME->name, strlen(pME->name) + 1, &pME->uid, sizeof(tb_uid_t), pMeta->txn); } -static int metaUpdateTtl(SMeta *pMeta, const SMetaEntry *pME) { - if (pME->type != TSDB_CHILD_TABLE && pME->type != TSDB_NORMAL_TABLE) return 0; +static void metaUpdateTtl(SMeta *pMeta, const SMetaEntry *pME) { + if (pME->type != TSDB_CHILD_TABLE && pME->type != TSDB_NORMAL_TABLE) return; STtlUpdTtlCtx ctx = {.uid = pME->uid, .pTxn = pMeta->txn}; if (pME->type == TSDB_CHILD_TABLE) { @@ -2718,7 +2826,12 @@ static int metaUpdateTtl(SMeta *pMeta, const SMetaEntry *pME) { ctx.changeTimeMs = pME->ntbEntry.btime; } - return ttlMgrInsertTtl(pMeta->pTtlMgr, &ctx); + int32_t ret = ttlMgrInsertTtl(pMeta->pTtlMgr, &ctx); + if (ret < 0) { + metaError("vgId:%d, failed to insert ttl, uid: %" PRId64 " %s", TD_VID(pMeta->pVnode), pME->uid, tstrerror(ret)); + } + + return; } static int metaUpdateChangeTime(SMeta *pMeta, tb_uid_t uid, int64_t changeTimeMs) { @@ -2806,7 +2919,11 @@ static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry) { } tbDbKey.uid = pCtbEntry->ctbEntry.suid; tbDbKey.version = ((SUidIdxVal *)pData)[0].version; - (void)tdbTbGet(pMeta->pTbDb, &tbDbKey, sizeof(tbDbKey), &pData, &nData); + ret = tdbTbGet(pMeta->pTbDb, &tbDbKey, sizeof(tbDbKey), &pData, &nData); + if (ret < 0) { + metaError("vgId:%d, failed to get stable for update. version:%" PRId64, TD_VID(pMeta->pVnode), pCtbEntry->version); + goto end; + } tDecoderInit(&dc, pData, nData); ret = metaDecodeEntry(&dc, &stbEntry); @@ -2854,7 +2971,9 @@ static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry) { ret = -1; goto end; } - (void)tdbTbUpsert(pMeta->pTagIdx, pTagIdxKey, nTagIdxKey, NULL, 0, pMeta->txn); + if (tdbTbUpsert(pMeta->pTagIdx, pTagIdxKey, nTagIdxKey, NULL, 0, pMeta->txn) < 0) { + metaError("vgId:%d, failed to update tag index. version:%" PRId64, TD_VID(pMeta->pVnode), pCtbEntry->version); + } metaDestroyTagIdxKey(pTagIdxKey); pTagIdxKey = NULL; } @@ -2905,7 +3024,11 @@ static int metaSaveToSkmDb(SMeta *pMeta, const SMetaEntry *pME) { } tEncoderInit(&coder, pVal, vLen); - (void)tEncodeSSchemaWrapper(&coder, pSW); + ret = tEncodeSSchemaWrapper(&coder, pSW); + if (ret < 0) { + rcode = -1; + goto _exit; + } if (tdbTbInsert(pMeta->pSkmDb, &skmDbKey, sizeof(skmDbKey), pVal, vLen, pMeta->txn) < 0) { rcode = -1; @@ -2966,8 +3089,7 @@ int metaHandleEntry(SMeta *pMeta, const SMetaEntry *pME) { } if (pME->type != TSDB_SUPER_TABLE) { - code = metaUpdateTtl(pMeta, pME); - VND_CHECK_CODE(code, line, _err); + metaUpdateTtl(pMeta, pME); } if (pME->type == TSDB_SUPER_TABLE || pME->type == TSDB_NORMAL_TABLE) { @@ -2985,7 +3107,7 @@ _err: return TSDB_CODE_FAILED; } -int32_t colCompressDebug(SHashObj *pColCmprObj) { +static void colCompressDebug(SHashObj *pColCmprObj) { void *p = taosHashIterate(pColCmprObj, NULL); while (p) { uint32_t cmprAlg = *(uint32_t *)p; @@ -2993,14 +3115,14 @@ int32_t colCompressDebug(SHashObj *pColCmprObj) { p = taosHashIterate(pColCmprObj, p); uint8_t l1, l2, lvl; - (void)tcompressDebug(cmprAlg, &l1, &l2, &lvl); + tcompressDebug(cmprAlg, &l1, &l2, &lvl); const char *l1str = columnEncodeStr(l1); const char *l2str = columnCompressStr(l2); const char *lvlstr = columnLevelStr(lvl); metaDebug("colId: %d, encode:%s, compress:%s,level:%s", colId, l1str, l2str, lvlstr); } - return 0; + return; } int32_t metaGetColCmpr(SMeta *pMeta, tb_uid_t uid, SHashObj **ppColCmprObj) { int rc = 0; @@ -3063,7 +3185,7 @@ int32_t metaGetColCmpr(SMeta *pMeta, tb_uid_t uid, SHashObj **ppColCmprObj) { metaULock(pMeta); *ppColCmprObj = pColCmprObj; - (void)colCompressDebug(pColCmprObj); + colCompressDebug(pColCmprObj); return 0; } diff --git a/source/dnode/vnode/src/meta/metaTtl.c b/source/dnode/vnode/src/meta/metaTtl.c index e3d6e2cf9b..4077e9fa5d 100644 --- a/source/dnode/vnode/src/meta/metaTtl.c +++ b/source/dnode/vnode/src/meta/metaTtl.c @@ -144,7 +144,7 @@ static void ttlMgrCleanup(STtlManger *pTtlMgr) { taosMemoryFree(pTtlMgr->logPrefix); taosHashCleanup(pTtlMgr->pTtlCache); taosHashCleanup(pTtlMgr->pDirtyUids); - (void)tdbTbClose(pTtlMgr->pTtlIdx); + tdbTbClose(pTtlMgr->pTtlIdx); taosMemoryFree(pTtlMgr); } @@ -302,7 +302,10 @@ int32_t ttlMgrInsertTtl(STtlManger *pTtlMgr, const STtlUpdTtlCtx *updCtx) { } if (ttlMgrNeedFlush(pTtlMgr)) { - (void)ttlMgrFlush(pTtlMgr, updCtx->pTxn); + int32_t ret = ttlMgrFlush(pTtlMgr, updCtx->pTxn); + if (ret < 0) { + metaError("%s, ttlMgr insert failed to flush since %s", pTtlMgr->logPrefix, tstrerror(ret)); + } } code = TSDB_CODE_SUCCESS; @@ -326,7 +329,10 @@ int32_t ttlMgrDeleteTtl(STtlManger *pTtlMgr, const STtlDelTtlCtx *delCtx) { } if (ttlMgrNeedFlush(pTtlMgr)) { - (void)ttlMgrFlush(pTtlMgr, delCtx->pTxn); + int32_t ret = ttlMgrFlush(pTtlMgr, delCtx->pTxn); + if (ret < 0) { + metaError("%s, ttlMgr del failed to flush since %s", pTtlMgr->logPrefix, tstrerror(ret)); + } } code = TSDB_CODE_SUCCESS; @@ -350,7 +356,8 @@ int32_t ttlMgrUpdateChangeTime(STtlManger *pTtlMgr, const STtlUpdCtimeCtx *pUpdC .changeTimeMsDirty = pUpdCtimeCtx->changeTimeMs}; STtlDirtyEntry dirtryEntry = {.type = ENTRY_TYPE_UPSERT}; - code = taosHashPut(pTtlMgr->pTtlCache, &pUpdCtimeCtx->uid, sizeof(pUpdCtimeCtx->uid), &cacheEntry, sizeof(cacheEntry)); + code = + taosHashPut(pTtlMgr->pTtlCache, &pUpdCtimeCtx->uid, sizeof(pUpdCtimeCtx->uid), &cacheEntry, sizeof(cacheEntry)); if (TSDB_CODE_SUCCESS != code) { metaError("%s, ttlMgr update ctime failed to update cache since %s", pTtlMgr->logPrefix, tstrerror(code)); goto _out; @@ -359,13 +366,15 @@ int32_t ttlMgrUpdateChangeTime(STtlManger *pTtlMgr, const STtlUpdCtimeCtx *pUpdC code = taosHashPut(pTtlMgr->pDirtyUids, &pUpdCtimeCtx->uid, sizeof(pUpdCtimeCtx->uid), &dirtryEntry, sizeof(dirtryEntry)); if (TSDB_CODE_SUCCESS != code) { - metaError("%s, ttlMgr update ctime failed to update dirty uids since %s", pTtlMgr->logPrefix, - tstrerror(code)); + metaError("%s, ttlMgr update ctime failed to update dirty uids since %s", pTtlMgr->logPrefix, tstrerror(code)); goto _out; } if (ttlMgrNeedFlush(pTtlMgr)) { - (void)ttlMgrFlush(pTtlMgr, pUpdCtimeCtx->pTxn); + int32_t ret = ttlMgrFlush(pTtlMgr, pUpdCtimeCtx->pTxn); + if (ret < 0) { + metaError("%s, ttlMgr update ctime failed to flush since %s", pTtlMgr->logPrefix, tstrerror(ret)); + } } code = TSDB_CODE_SUCCESS; @@ -420,7 +429,7 @@ int32_t ttlMgrFlush(STtlManger *pTtlMgr, TXN *pTxn) { STtlCacheEntry *cacheEntry = taosHashGet(pTtlMgr->pTtlCache, pUid, sizeof(*pUid)); if (cacheEntry == NULL) { metaError("%s, ttlMgr flush failed to get ttl cache, uid: %" PRId64 ", type: %d", pTtlMgr->logPrefix, *pUid, - pEntry->type); + pEntry->type); continue; } diff --git a/source/dnode/vnode/src/sma/smaOpen.c b/source/dnode/vnode/src/sma/smaOpen.c index 19df6edead..7bc9237965 100644 --- a/source/dnode/vnode/src/sma/smaOpen.c +++ b/source/dnode/vnode/src/sma/smaOpen.c @@ -171,7 +171,7 @@ _exit: TAOS_RETURN(code); } -int32_t smaClose(SSma *pSma) { +void smaClose(SSma *pSma) { if (pSma) { TAOS_UNUSED(smaPreClose(pSma)); (void)taosThreadMutexDestroy(&pSma->mutex); @@ -182,7 +182,7 @@ int32_t smaClose(SSma *pSma) { if SMA_RSMA_TSDB2 (pSma) tsdbClose(&SMA_RSMA_TSDB2(pSma)); taosMemoryFreeClear(pSma); } - return 0; + return; } /** diff --git a/source/dnode/vnode/src/sma/smaSnapshot.c b/source/dnode/vnode/src/sma/smaSnapshot.c index 348d378081..5836b69b5f 100644 --- a/source/dnode/vnode/src/sma/smaSnapshot.c +++ b/source/dnode/vnode/src/sma/smaSnapshot.c @@ -93,30 +93,26 @@ int32_t rsmaSnapRead(SRSmaSnapReader* pReader, uint8_t** ppData) { _exit: if (code) { smaError("vgId:%d, %s failed at line %d since %s", SMA_VID(pReader->pSma), __func__, lino, tstrerror(code)); - TAOS_UNUSED(rsmaSnapReaderClose(&pReader)); + rsmaSnapReaderClose(&pReader); } else { smaInfo("vgId:%d, vnode snapshot rsma read succeed", SMA_VID(pReader->pSma)); } TAOS_RETURN(code); } -int32_t rsmaSnapReaderClose(SRSmaSnapReader** ppReader) { - int32_t code = 0; +void rsmaSnapReaderClose(SRSmaSnapReader** ppReader) { SRSmaSnapReader* pReader = *ppReader; for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) { if (pReader->pDataReader[i]) { - if ((code = tsdbSnapReaderClose(&pReader->pDataReader[i])) < 0) { - smaError("vgId:%d, vnode snapshot rsma , failed to close tsdbSnapReader since %s", SMA_VID(pReader->pSma), - tstrerror(code)); - } + tsdbSnapReaderClose(&pReader->pDataReader[i]); } } smaInfo("vgId:%d, vnode snapshot rsma reader closed", SMA_VID(pReader->pSma)); taosMemoryFreeClear(*ppReader); - TAOS_RETURN(code); + return; } // SRSmaSnapWriter ======================================== diff --git a/source/dnode/vnode/src/tq/tqMeta.c b/source/dnode/vnode/src/tq/tqMeta.c index b7c842d05c..89350e761f 100644 --- a/source/dnode/vnode/src/tq/tqMeta.c +++ b/source/dnode/vnode/src/tq/tqMeta.c @@ -127,7 +127,6 @@ int32_t tqMetaSaveOffset(STQ* pTq, STqOffset* pOffset) { goto END; } - buf = taosMemoryCalloc(1, vlen); if (buf == NULL) { code = terrno; @@ -152,7 +151,8 @@ int32_t tqMetaSaveInfo(STQ* pTq, TTB* ttb, const void* key, int32_t kLen, const int32_t code = TDB_CODE_SUCCESS; TXN* txn = NULL; - TQ_ERR_GO_TO_END(tdbBegin(pTq->pMetaDB, &txn, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED)); + TQ_ERR_GO_TO_END( + tdbBegin(pTq->pMetaDB, &txn, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED)); TQ_ERR_GO_TO_END(tdbTbUpsert(ttb, key, kLen, value, vLen, txn)); TQ_ERR_GO_TO_END(tdbCommit(pTq->pMetaDB, txn)); TQ_ERR_GO_TO_END(tdbPostCommit(pTq->pMetaDB, txn)); @@ -168,7 +168,8 @@ int32_t tqMetaDeleteInfo(STQ* pTq, TTB* ttb, const void* key, int32_t kLen) { int32_t code = TDB_CODE_SUCCESS; TXN* txn = NULL; - TQ_ERR_GO_TO_END(tdbBegin(pTq->pMetaDB, &txn, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED)); + TQ_ERR_GO_TO_END( + tdbBegin(pTq->pMetaDB, &txn, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED)); TQ_ERR_GO_TO_END(tdbTbDelete(ttb, key, kLen, txn)); TQ_ERR_GO_TO_END(tdbCommit(pTq->pMetaDB, txn)); TQ_ERR_GO_TO_END(tdbPostCommit(pTq->pMetaDB, txn)); @@ -180,7 +181,7 @@ END: return code; } -int32_t tqMetaGetOffset(STQ* pTq, const char* subkey, STqOffset** pOffset){ +int32_t tqMetaGetOffset(STQ* pTq, const char* subkey, STqOffset** pOffset) { void* data = taosHashGet(pTq->pOffset, subkey, strlen(subkey)); if (data == NULL) { int vLen = 0; @@ -203,7 +204,7 @@ int32_t tqMetaGetOffset(STQ* pTq, const char* subkey, STqOffset** pOffset){ tdbFree(data); *pOffset = taosHashGet(pTq->pOffset, subkey, strlen(subkey)); - if(*pOffset == NULL){ + if (*pOffset == NULL) { return TSDB_CODE_OUT_OF_MEMORY; } } else { @@ -266,8 +267,8 @@ static int tqMetaInitHandle(STQ* pTq, STqHandle* handle) { initStorageAPI(&reader.api); if (handle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) { - handle->execHandle.task = - qCreateQueueExecTaskInfo(handle->execHandle.execCol.qmsg, &reader, vgId, &handle->execHandle.numOfCols, handle->consumerId); + handle->execHandle.task = qCreateQueueExecTaskInfo(handle->execHandle.execCol.qmsg, &reader, vgId, + &handle->execHandle.numOfCols, handle->consumerId); TQ_NULL_GO_TO_END(handle->execHandle.task); void* scanner = NULL; qExtractStreamScanner(handle->execHandle.task, &scanner); @@ -280,20 +281,21 @@ static int tqMetaInitHandle(STQ* pTq, STqHandle* handle) { handle->execHandle.pTqReader = tqReaderOpen(pVnode); TQ_NULL_GO_TO_END(handle->execHandle.pTqReader); TQ_ERR_GO_TO_END(buildSnapContext(reader.vnode, reader.version, 0, handle->execHandle.subType, handle->fetchMeta, - (SSnapContext**)(&reader.sContext))); + (SSnapContext**)(&reader.sContext))); handle->execHandle.task = qCreateQueueExecTaskInfo(NULL, &reader, vgId, NULL, handle->consumerId); TQ_NULL_GO_TO_END(handle->execHandle.task); } else if (handle->execHandle.subType == TOPIC_SUB_TYPE__TABLE) { handle->pWalReader = walOpenReader(pVnode->pWal, NULL, 0); TQ_NULL_GO_TO_END(handle->pWalReader); - if(handle->execHandle.execTb.qmsg != NULL && strcmp(handle->execHandle.execTb.qmsg, "") != 0) { + if (handle->execHandle.execTb.qmsg != NULL && strcmp(handle->execHandle.execTb.qmsg, "") != 0) { if (nodesStringToNode(handle->execHandle.execTb.qmsg, &handle->execHandle.execTb.node) != 0) { tqError("nodesStringToNode error in sub stable, since %s", terrstr()); return TSDB_CODE_SCH_INTERNAL_ERROR; } } - TQ_ERR_GO_TO_END(buildSnapContext(reader.vnode, reader.version, handle->execHandle.execTb.suid, handle->execHandle.subType, - handle->fetchMeta, (SSnapContext**)(&reader.sContext))); + TQ_ERR_GO_TO_END(buildSnapContext(reader.vnode, reader.version, handle->execHandle.execTb.suid, + handle->execHandle.subType, handle->fetchMeta, + (SSnapContext**)(&reader.sContext))); handle->execHandle.task = qCreateQueueExecTaskInfo(NULL, &reader, vgId, NULL, handle->consumerId); TQ_NULL_GO_TO_END(handle->execHandle.task); SArray* tbUidList = NULL; @@ -341,7 +343,7 @@ int32_t tqMetaCreateHandle(STQ* pTq, SMqRebVgReq* req, STqHandle* handle) { handle->execHandle.subType = req->subType; handle->fetchMeta = req->withMeta; if (req->subType == TOPIC_SUB_TYPE__COLUMN) { - void *tmp = taosStrdup(req->qmsg); + void* tmp = taosStrdup(req->qmsg); if (tmp == NULL) { return terrno; } @@ -349,12 +351,12 @@ int32_t tqMetaCreateHandle(STQ* pTq, SMqRebVgReq* req, STqHandle* handle) { } else if (req->subType == TOPIC_SUB_TYPE__DB) { handle->execHandle.execDb.pFilterOutTbUid = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_ENTRY_LOCK); - if(handle->execHandle.execDb.pFilterOutTbUid == NULL){ + if (handle->execHandle.execDb.pFilterOutTbUid == NULL) { return terrno; } - }else if(req->subType == TOPIC_SUB_TYPE__TABLE){ + } else if (req->subType == TOPIC_SUB_TYPE__TABLE) { handle->execHandle.execTb.suid = req->suid; - void *tmp = taosStrdup(req->qmsg); + void* tmp = taosStrdup(req->qmsg); if (tmp == NULL) { return terrno; } @@ -364,7 +366,7 @@ int32_t tqMetaCreateHandle(STQ* pTq, SMqRebVgReq* req, STqHandle* handle) { handle->snapshotVer = walGetCommittedVer(pTq->pVnode->pWal); int32_t code = tqMetaInitHandle(pTq, handle); - if (code != 0){ + if (code != 0) { return code; } tqInfo("tqMetaCreateHandle %s consumer 0x%" PRIx64 " vgId:%d, snapshotVer:%" PRId64, handle->subKey, @@ -437,10 +439,10 @@ END: return code; } -static int32_t replaceTqPath(char** path){ - char* tpath = NULL; +static int32_t replaceTqPath(char** path) { + char* tpath = NULL; int32_t code = tqBuildFName(&tpath, *path, TQ_SUBSCRIBE_NAME); - if (code != 0){ + if (code != 0) { return code; } taosMemoryFree(*path); @@ -475,7 +477,7 @@ END: } int32_t tqMetaOpen(STQ* pTq) { - char* maindb = NULL; + char* maindb = NULL; char* offsetNew = NULL; int32_t code = TDB_CODE_SUCCESS; TQ_ERR_GO_TO_END(tqBuildFName(&maindb, pTq->path, TDB_MAINDB_NAME)); @@ -488,7 +490,7 @@ int32_t tqMetaOpen(STQ* pTq) { } TQ_ERR_GO_TO_END(tqBuildFName(&offsetNew, pTq->path, TQ_OFFSET_NAME)); - if(taosCheckExistFile(offsetNew)){ + if (taosCheckExistFile(offsetNew)) { TQ_ERR_GO_TO_END(tqOffsetRestoreFromFile(pTq, offsetNew)); TQ_ERR_GO_TO_END(taosRemoveFile(offsetNew)); } @@ -522,7 +524,7 @@ int32_t tqMetaTransform(STQ* pTq) { TQ_ERR_GO_TO_END(tqBuildFName(&offsetNew, pTq->path, TQ_OFFSET_NAME)); - if(taosCheckExistFile(offset)) { + if (taosCheckExistFile(offset)) { if (taosCopyFile(offset, offsetNew) < 0) { tqError("copy offset file error"); } else { @@ -534,44 +536,22 @@ END: taosMemoryFree(offset); taosMemoryFree(offsetNew); - int32_t ret = tdbTbClose(pExecStore); - if (ret != 0) { - tqError("failed to close tb, ret:%d", ret); - } - ret = tdbTbClose(pCheckStore); - if (ret != 0) { - tqError("failed to close tb, ret:%d", ret); - } - ret = tdbClose(pMetaDB); - if (ret != 0) { - tqError("failed to close tdb, ret:%d", ret); - } - + tdbTbClose(pExecStore); + tdbTbClose(pCheckStore); + tdbClose(pMetaDB); return code; } void tqMetaClose(STQ* pTq) { int32_t ret = 0; if (pTq->pExecStore) { - ret = tdbTbClose(pTq->pExecStore); - if (ret != 0) { - tqError("failed to close tb, ret:%d", ret); - } + tdbTbClose(pTq->pExecStore); } if (pTq->pCheckStore) { - ret = tdbTbClose(pTq->pCheckStore); - if (ret != 0) { - tqError("failed to close tb, ret:%d", ret); - } + tdbTbClose(pTq->pCheckStore); } if (pTq->pOffsetStore) { - ret = tdbTbClose(pTq->pOffsetStore); - if (ret != 0) { - tqError("failed to close tb, ret:%d", ret); - } - } - ret = tdbClose(pTq->pMetaDB); - if (ret != 0) { - tqError("failed to close tdb, ret:%d", ret); + tdbTbClose(pTq->pOffsetStore); } + tdbClose(pTq->pMetaDB); } diff --git a/source/dnode/vnode/src/tq/tqSnapshot.c b/source/dnode/vnode/src/tq/tqSnapshot.c index 8861f1f49f..cfa97def74 100644 --- a/source/dnode/vnode/src/tq/tqSnapshot.c +++ b/source/dnode/vnode/src/tq/tqSnapshot.c @@ -42,7 +42,7 @@ int32_t tqSnapReaderOpen(STQ* pTq, int64_t sver, int64_t ever, int8_t type, STqS pReader->type = type; // impl - TTB *pTb = NULL; + TTB* pTb = NULL; if (type == SNAP_DATA_TQ_CHECKINFO) { pTb = pTq->pCheckStore; } else if (type == SNAP_DATA_TQ_HANDLE) { @@ -132,7 +132,8 @@ int32_t tqSnapWriterOpen(STQ* pTq, int64_t sver, int64_t ever, STqSnapWriter** p // alloc pWriter = (STqSnapWriter*)taosMemoryCalloc(1, sizeof(*pWriter)); if (pWriter == NULL) { - code = TAOS_GET_TERRNO(TSDB_CODE_OUT_OF_MEMORY);; + code = TAOS_GET_TERRNO(TSDB_CODE_OUT_OF_MEMORY); + ; goto _err; } pWriter->pTq = pTq; @@ -160,7 +161,7 @@ int32_t tqSnapWriterClose(STqSnapWriter** ppWriter, int8_t rollback) { STQ* pTq = pWriter->pTq; if (rollback) { - (void)tdbAbort(pWriter->pTq->pMetaDB, pWriter->txn); + tdbAbort(pWriter->pTq->pMetaDB, pWriter->txn); } else { code = tdbCommit(pWriter->pTq->pMetaDB, pWriter->txn); if (code) goto _err; @@ -189,7 +190,8 @@ int32_t tqSnapHandleWrite(STqSnapWriter* pWriter, uint8_t* pData, uint32_t nData code = tDecodeSTqHandle(pDecoder, &handle); if (code) goto end; taosWLockLatch(&pTq->lock); - code = tqMetaSaveInfo(pTq, pTq->pExecStore, handle.subKey, (int)strlen(handle.subKey), pData + sizeof(SSnapDataHdr), nData - sizeof(SSnapDataHdr)); + code = tqMetaSaveInfo(pTq, pTq->pExecStore, handle.subKey, (int)strlen(handle.subKey), pData + sizeof(SSnapDataHdr), + nData - sizeof(SSnapDataHdr)); taosWUnLockLatch(&pTq->lock); end: @@ -200,15 +202,16 @@ end: } int32_t tqSnapCheckInfoWrite(STqSnapWriter* pWriter, uint8_t* pData, uint32_t nData) { - int32_t code = 0; - STQ* pTq = pWriter->pTq; + int32_t code = 0; + STQ* pTq = pWriter->pTq; STqCheckInfo info = {0}; code = tqMetaDecodeCheckInfo(&info, pData + sizeof(SSnapDataHdr), nData - sizeof(SSnapDataHdr)); - if(code != 0){ + if (code != 0) { goto _err; } - code = tqMetaSaveInfo(pTq, pTq->pCheckStore, &info.topic, strlen(info.topic), pData + sizeof(SSnapDataHdr), nData - sizeof(SSnapDataHdr)); + code = tqMetaSaveInfo(pTq, pTq->pCheckStore, &info.topic, strlen(info.topic), pData + sizeof(SSnapDataHdr), + nData - sizeof(SSnapDataHdr)); tDeleteSTqCheckInfo(&info); if (code) goto _err; @@ -220,22 +223,23 @@ _err: } int32_t tqSnapOffsetWrite(STqSnapWriter* pWriter, uint8_t* pData, uint32_t nData) { - int32_t code = 0; - STQ* pTq = pWriter->pTq; + int32_t code = 0; + STQ* pTq = pWriter->pTq; STqOffset info = {0}; code = tqMetaDecodeOffsetInfo(&info, pData + sizeof(SSnapDataHdr), nData - sizeof(SSnapDataHdr)); - if(code != 0){ + if (code != 0) { goto _err; } - code = tqMetaSaveInfo(pTq, pTq->pOffsetStore, info.subKey, strlen(info.subKey), pData + sizeof(SSnapDataHdr), nData - sizeof(SSnapDataHdr)); + code = tqMetaSaveInfo(pTq, pTq->pOffsetStore, info.subKey, strlen(info.subKey), pData + sizeof(SSnapDataHdr), + nData - sizeof(SSnapDataHdr)); tDeleteSTqOffset(&info); if (code) goto _err; return code; - _err: +_err: tqError("vgId:%d, vnode check info tq write failed since %s", TD_VID(pTq->pVnode), tstrerror(code)); return code; } diff --git a/source/dnode/vnode/src/tq/tqStreamTaskSnap.c b/source/dnode/vnode/src/tq/tqStreamTaskSnap.c index 2605d70c37..7c05b405ce 100644 --- a/source/dnode/vnode/src/tq/tqStreamTaskSnap.c +++ b/source/dnode/vnode/src/tq/tqStreamTaskSnap.c @@ -210,7 +210,7 @@ int32_t streamTaskSnapWriterClose(SStreamTaskWriter* pWriter, int8_t rollback, i streamMetaWLock(pTq->pStreamMeta); tqDebug("vgId:%d, vnode stream-task snapshot writer closed", TD_VID(pTq->pVnode)); if (rollback) { - (void)tdbAbort(pTq->pStreamMeta->db, pTq->pStreamMeta->txn); + tdbAbort(pTq->pStreamMeta->db, pTq->pStreamMeta->txn); } else { code = tdbCommit(pTq->pStreamMeta->db, pTq->pStreamMeta->txn); if (code) goto _err; diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c index 3983b6eded..482b194a1f 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCache.c +++ b/source/dnode/vnode/src/tsdb/tsdbCache.c @@ -577,7 +577,10 @@ static void tsdbCacheDeleter(const void *key, size_t klen, void *value, void *ud SLastCol *pLastCol = (SLastCol *)value; if (pLastCol->dirty) { - (void)tsdbCacheFlushDirty(key, klen, pLastCol, ud); + if (tsdbCacheFlushDirty(key, klen, pLastCol, ud) != 0) { + STsdb *pTsdb = (STsdb *)ud; + tsdbError("tsdb/cache: vgId:%d, flush cache %s failed at line %d.", TD_VID(pTsdb->pVnode), __func__, __LINE__); + } } for (uint8_t i = 0; i < pLastCol->rowKey.numOfPKs; ++i) { @@ -719,14 +722,22 @@ static int32_t tsdbCacheDropTableColumn(STsdb *pTsdb, int64_t uid, int16_t cid, rocksdb_writebatch_t *wb = pTsdb->rCache.writebatch; { SLastCol *pLastCol = NULL; - (void)tsdbCacheDeserialize(values_list[0], values_list_sizes[0], &pLastCol); + code = tsdbCacheDeserialize(values_list[0], values_list_sizes[0], &pLastCol); + if (code != TSDB_CODE_SUCCESS) { + tsdbWarn("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + } if (NULL != pLastCol) { rocksdb_writebatch_delete(wb, keys_list[0], klen); } taosMemoryFreeClear(pLastCol); pLastCol = NULL; - (void)tsdbCacheDeserialize(values_list[1], values_list_sizes[1], &pLastCol); + code = tsdbCacheDeserialize(values_list[1], values_list_sizes[1], &pLastCol); + if (code != TSDB_CODE_SUCCESS) { + tsdbWarn("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + } if (NULL != pLastCol) { rocksdb_writebatch_delete(wb, keys_list[1], klen); } @@ -738,7 +749,9 @@ static int32_t tsdbCacheDropTableColumn(STsdb *pTsdb, int64_t uid, int16_t cid, for (int i = 0; i < 2; i++) { LRUHandle *h = taosLRUCacheLookup(pTsdb->lruCache, keys_list[i], klen); if (h) { - (void)taosLRUCacheRelease(pTsdb->lruCache, h, true); + if (taosLRUCacheRelease(pTsdb->lruCache, h, true)) { + tsdbInfo("vgId:%d, %s release lru cache failed at line %d.", TD_VID(pTsdb->pVnode), __func__, __LINE__); + } taosLRUCacheErase(pTsdb->lruCache, keys_list[i], klen); } } @@ -765,8 +778,20 @@ int32_t tsdbCacheNewTable(STsdb *pTsdb, tb_uid_t uid, tb_uid_t suid, SSchemaWrap int16_t cid = pSchemaRow->pSchema[i].colId; int8_t col_type = pSchemaRow->pSchema[i].type; - (void)tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, LFLAG_LAST_ROW); - (void)tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, LFLAG_LAST); + code = tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, LFLAG_LAST_ROW); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s new table column failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + (void)taosThreadMutexUnlock(&pTsdb->lruMutex); + TAOS_RETURN(code); + } + code = tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, LFLAG_LAST); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s new table column failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + (void)taosThreadMutexUnlock(&pTsdb->lruMutex); + TAOS_RETURN(code); + } } } else { STSchema *pTSchema = NULL; @@ -781,8 +806,20 @@ int32_t tsdbCacheNewTable(STsdb *pTsdb, tb_uid_t uid, tb_uid_t suid, SSchemaWrap int16_t cid = pTSchema->columns[i].colId; int8_t col_type = pTSchema->columns[i].type; - (void)tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, LFLAG_LAST_ROW); - (void)tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, LFLAG_LAST); + code = tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, LFLAG_LAST_ROW); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s new table column failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + (void)taosThreadMutexUnlock(&pTsdb->lruMutex); + TAOS_RETURN(code); + } + code = tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, LFLAG_LAST); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s new table column failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + (void)taosThreadMutexUnlock(&pTsdb->lruMutex); + TAOS_RETURN(code); + } } taosMemoryFree(pTSchema); @@ -798,7 +835,13 @@ int32_t tsdbCacheDropTable(STsdb *pTsdb, tb_uid_t uid, tb_uid_t suid, SSchemaWra (void)taosThreadMutexLock(&pTsdb->lruMutex); - (void)tsdbCacheCommitNoLock(pTsdb); + code = tsdbCacheCommitNoLock(pTsdb); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s commit with no lock failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + (void)taosThreadMutexUnlock(&pTsdb->lruMutex); + TAOS_RETURN(code); + } if (pSchemaRow != NULL) { bool hasPrimayKey = false; @@ -810,7 +853,13 @@ int32_t tsdbCacheDropTable(STsdb *pTsdb, tb_uid_t uid, tb_uid_t suid, SSchemaWra int16_t cid = pSchemaRow->pSchema[i].colId; int8_t col_type = pSchemaRow->pSchema[i].type; - (void)tsdbCacheDropTableColumn(pTsdb, uid, cid, hasPrimayKey); + code = tsdbCacheDropTableColumn(pTsdb, uid, cid, hasPrimayKey); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s drop table column failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + (void)taosThreadMutexUnlock(&pTsdb->lruMutex); + TAOS_RETURN(code); + } } } else { STSchema *pTSchema = NULL; @@ -830,7 +879,13 @@ int32_t tsdbCacheDropTable(STsdb *pTsdb, tb_uid_t uid, tb_uid_t suid, SSchemaWra int16_t cid = pTSchema->columns[i].colId; int8_t col_type = pTSchema->columns[i].type; - (void)tsdbCacheDropTableColumn(pTsdb, uid, cid, hasPrimayKey); + code = tsdbCacheDropTableColumn(pTsdb, uid, cid, hasPrimayKey); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s drop table column failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + (void)taosThreadMutexUnlock(&pTsdb->lruMutex); + TAOS_RETURN(code); + } } taosMemoryFree(pTSchema); @@ -848,7 +903,13 @@ int32_t tsdbCacheDropSubTables(STsdb *pTsdb, SArray *uids, tb_uid_t suid) { (void)taosThreadMutexLock(&pTsdb->lruMutex); - (void)tsdbCacheCommitNoLock(pTsdb); + code = tsdbCacheCommitNoLock(pTsdb); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s commit with no lock failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + (void)taosThreadMutexUnlock(&pTsdb->lruMutex); + TAOS_RETURN(code); + } STSchema *pTSchema = NULL; code = metaGetTbTSchemaEx(pTsdb->pVnode->pMeta, suid, suid, -1, &pTSchema); @@ -871,7 +932,14 @@ int32_t tsdbCacheDropSubTables(STsdb *pTsdb, SArray *uids, tb_uid_t suid) { int16_t cid = pTSchema->columns[i].colId; int8_t col_type = pTSchema->columns[i].type; - (void)tsdbCacheDropTableColumn(pTsdb, uid, cid, hasPrimayKey); + code = tsdbCacheDropTableColumn(pTsdb, uid, cid, hasPrimayKey); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s drop table column failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + (void)taosThreadMutexUnlock(&pTsdb->lruMutex); + taosMemoryFree(pTSchema); + TAOS_RETURN(code); + } } } @@ -889,12 +957,22 @@ int32_t tsdbCacheNewNTableColumn(STsdb *pTsdb, int64_t uid, int16_t cid, int8_t (void)taosThreadMutexLock(&pTsdb->lruMutex); - (void)tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, 0); - (void)tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, 1); - + code = tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, 0); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s new table column failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + (void)taosThreadMutexUnlock(&pTsdb->lruMutex); + TAOS_RETURN(code); + } + code = tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, 1); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s new table column failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + (void)taosThreadMutexUnlock(&pTsdb->lruMutex); + TAOS_RETURN(code); + } // rocksMayWrite(pTsdb, true, false, false); (void)taosThreadMutexUnlock(&pTsdb->lruMutex); - //(void)tsdbCacheCommit(pTsdb); TAOS_RETURN(code); } @@ -904,9 +982,21 @@ int32_t tsdbCacheDropNTableColumn(STsdb *pTsdb, int64_t uid, int16_t cid, bool h (void)taosThreadMutexLock(&pTsdb->lruMutex); - (void)tsdbCacheCommitNoLock(pTsdb); + code = tsdbCacheCommitNoLock(pTsdb); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s commit with no lock failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + (void)taosThreadMutexUnlock(&pTsdb->lruMutex); + TAOS_RETURN(code); + } - (void)tsdbCacheDropTableColumn(pTsdb, uid, cid, hasPrimayKey); + code = tsdbCacheDropTableColumn(pTsdb, uid, cid, hasPrimayKey); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s drop table column failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + (void)taosThreadMutexUnlock(&pTsdb->lruMutex); + TAOS_RETURN(code); + } rocksMayWrite(pTsdb, false); @@ -923,14 +1013,24 @@ int32_t tsdbCacheNewSTableColumn(STsdb *pTsdb, SArray *uids, int16_t cid, int8_t for (int i = 0; i < TARRAY_SIZE(uids); ++i) { tb_uid_t uid = ((tb_uid_t *)TARRAY_DATA(uids))[i]; - (void)tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, 0); - (void)tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, 1); + code = tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, 0); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s new table column failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + (void)taosThreadMutexUnlock(&pTsdb->lruMutex); + TAOS_RETURN(code); + } + code = tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, 1); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s new table column failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + (void)taosThreadMutexUnlock(&pTsdb->lruMutex); + TAOS_RETURN(code); + } } // rocksMayWrite(pTsdb, true, false, false); (void)taosThreadMutexUnlock(&pTsdb->lruMutex); - //(void)tsdbCacheCommit(pTsdb); - TAOS_RETURN(code); } @@ -939,12 +1039,24 @@ int32_t tsdbCacheDropSTableColumn(STsdb *pTsdb, SArray *uids, int16_t cid, bool (void)taosThreadMutexLock(&pTsdb->lruMutex); - (void)tsdbCacheCommitNoLock(pTsdb); + code = tsdbCacheCommitNoLock(pTsdb); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s commit with no lock failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + (void)taosThreadMutexUnlock(&pTsdb->lruMutex); + TAOS_RETURN(code); + } for (int i = 0; i < TARRAY_SIZE(uids); ++i) { int64_t uid = ((tb_uid_t *)TARRAY_DATA(uids))[i]; - (void)tsdbCacheDropTableColumn(pTsdb, uid, cid, hasPrimayKey); + code = tsdbCacheDropTableColumn(pTsdb, uid, cid, hasPrimayKey); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s drop table column failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + (void)taosThreadMutexUnlock(&pTsdb->lruMutex); + TAOS_RETURN(code); + } } rocksMayWrite(pTsdb, false); @@ -1109,7 +1221,9 @@ static int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, SArray } } - (void)taosLRUCacheRelease(pCache, h, false); + if (!taosLRUCacheRelease(pCache, h, false)) { + tsdbInfo("vgId:%d, %s release lru cache failed at line %d", TD_VID(pTsdb->pVnode), __func__, __LINE__); + } TAOS_CHECK_EXIT(code); } else { if (!remainCols) { @@ -1151,7 +1265,7 @@ static int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, SArray keys_list_sizes[i] = ROCKS_KEY_LEN; } - rocksMayWrite(pTsdb, true); // flush writebatch cache + rocksMayWrite(pTsdb, true); // flush writebatch cache code = tsdbCacheGetValuesFromRocks(pTsdb, num_keys, (const char *const *)keys_list, keys_list_sizes, &values_list, &values_list_sizes); @@ -1169,7 +1283,11 @@ static int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, SArray SColVal *pColVal = &updCtx->colVal; SLastCol *pLastCol = NULL; - (void)tsdbCacheDeserialize(values_list[i], values_list_sizes[i], &pLastCol); + code = tsdbCacheDeserialize(values_list[i], values_list_sizes[i], &pLastCol); + if (code != TSDB_CODE_SUCCESS) { + tsdbWarn("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + } /* if (code) { tsdbError("tsdb/cache: vgId:%d, deserialize failed since %s.", TD_VID(pTsdb->pVnode), tstrerror(code)); @@ -1237,7 +1355,8 @@ _exit: taosArrayDestroy(remainCols); if (code) { - tsdbError("tsdb/cache: vgId:%d, update failed at line %d since %s.", TD_VID(pTsdb->pVnode), lino, tstrerror(code)); + tsdbError("tsdb/cache: vgId:%d, update failed at line %d since %s.", TD_VID(pTsdb->pVnode), __LINE__, + tstrerror(code)); } TAOS_RETURN(code); @@ -1268,7 +1387,12 @@ int32_t tsdbCacheRowFormatUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, int6 tsdbRowGetKey(&lRow, &tsdbRowKey); STSDBRowIter iter = {0}; - (void)tsdbRowIterOpen(&iter, &lRow, pTSchema); + code = tsdbRowIterOpen(&iter, &lRow, pTSchema); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s tsdbRowIterOpen failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + TAOS_CHECK_GOTO(code, &lino, _exit); + } int32_t iCol = 0; for (SColVal *pColVal = tsdbRowIterNext(&iter); pColVal && iCol < nCol; pColVal = tsdbRowIterNext(&iter), iCol++) { SLastUpdateCtx updateCtx = {.lflag = LFLAG_LAST_ROW, .tsdbRowKey = tsdbRowKey, .colVal = *pColVal}; @@ -1312,13 +1436,23 @@ int32_t tsdbCacheRowFormatUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, int6 if (!taosArrayPush(ctxArray, &updateCtx)) { TAOS_CHECK_GOTO(terrno, &lino, _exit); } - (void)tSimpleHashIterateRemove(iColHash, &iCol, sizeof(iCol), &pIte, &iter); + code = tSimpleHashIterateRemove(iColHash, &iCol, sizeof(iCol), &pIte, &iter); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s tSimpleHashIterateRemove failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, + __LINE__, tstrerror(code)); + TAOS_CHECK_GOTO(code, &lino, _exit); + } } } } // 3. do update - (void)tsdbCacheUpdate(pTsdb, suid, uid, ctxArray); + code = tsdbCacheUpdate(pTsdb, suid, uid, ctxArray); + if (code < TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s tsdbCacheUpdate failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + TAOS_CHECK_GOTO(code, &lino, _exit); + } _exit: taosMemoryFreeClear(pTSchema); @@ -1384,7 +1518,12 @@ int32_t tsdbCacheColFormatUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, SBlo // 2. prepare last row STSDBRowIter iter = {0}; - (void)tsdbRowIterOpen(&iter, &lRow, pTSchema); + code = tsdbRowIterOpen(&iter, &lRow, pTSchema); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s tsdbRowIterOpen failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + TAOS_CHECK_GOTO(code, &lino, _exit); + } for (SColVal *pColVal = tsdbRowIterNext(&iter); pColVal; pColVal = tsdbRowIterNext(&iter)) { SLastUpdateCtx updateCtx = {.lflag = LFLAG_LAST_ROW, .tsdbRowKey = tsdbRowKey, .colVal = *pColVal}; if (!taosArrayPush(ctxArray, &updateCtx)) { @@ -1394,7 +1533,12 @@ int32_t tsdbCacheColFormatUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, SBlo tsdbRowClose(&iter); // 3. do update - (void)tsdbCacheUpdate(pTsdb, suid, uid, ctxArray); + code = tsdbCacheUpdate(pTsdb, suid, uid, ctxArray); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s tsdbCacheUpdate failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + TAOS_CHECK_GOTO(code, &lino, _exit); + } _exit: taosMemoryFreeClear(pTSchema); @@ -1604,7 +1748,7 @@ static int32_t tsdbCacheLoadFromRocks(STsdb *pTsdb, tb_uid_t uid, SArray *pLastA keys_list_sizes[i] = ROCKS_KEY_LEN; } - rocksMayWrite(pTsdb, true); // flush writebatch cache + rocksMayWrite(pTsdb, true); // flush writebatch cache code = tsdbCacheGetValuesFromRocks(pTsdb, num_keys, (const char *const *)keys_list, keys_list_sizes, &values_list, &values_list_sizes); @@ -1624,7 +1768,11 @@ static int32_t tsdbCacheLoadFromRocks(STsdb *pTsdb, tb_uid_t uid, SArray *pLastA continue; } - (void)tsdbCacheDeserialize(values_list[i], values_list_sizes[i], &pLastCol); + code = tsdbCacheDeserialize(values_list[i], values_list_sizes[i], &pLastCol); + if (code != TSDB_CODE_SUCCESS) { + tsdbWarn("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + } SLastCol *pToFree = pLastCol; SIdxKey *idxKey = &((SIdxKey *)TARRAY_DATA(remainCols))[j]; if (pLastCol && pLastCol->cacheStatus != TSDB_LAST_CACHE_NO_CACHE) { @@ -1757,7 +1905,11 @@ int32_t tsdbCacheGetBatch(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCache } if (h) { - (void)taosLRUCacheRelease(pCache, h, false); + code = taosLRUCacheRelease(pCache, h, false); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s release lru cache failed at line %d.", TD_VID(pTsdb->pVnode), __func__, __LINE__); + goto _exit; + } } } @@ -1786,7 +1938,11 @@ int32_t tsdbCacheGetBatch(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCache } if (h) { - (void)taosLRUCacheRelease(pCache, h, false); + code = taosLRUCacheRelease(pCache, h, false); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s release lru cache failed at line %d.", TD_VID(pTsdb->pVnode), __func__, __LINE__); + goto _exit; + } } } @@ -1820,7 +1976,13 @@ int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKE int numKeys = 0; SArray *remainCols = NULL; - (void)tsdbCacheCommit(pTsdb); + code = tsdbCacheCommit(pTsdb); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s commit failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + (void)taosThreadMutexUnlock(&pTsdb->lruMutex); + TAOS_RETURN(code); + } (void)taosThreadMutexLock(&pTsdb->lruMutex); @@ -1837,7 +1999,9 @@ int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKE .cacheStatus = TSDB_LAST_CACHE_NO_CACHE}; code = tsdbCachePutToLRU(pTsdb, &lastKey, &noneCol); } - (void)taosLRUCacheRelease(pTsdb->lruCache, h, false); + if (taosLRUCacheRelease(pTsdb->lruCache, h, false) != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s release lru cache failed at line %d.", TD_VID(pTsdb->pVnode), __func__, __LINE__); + } TAOS_CHECK_EXIT(code); } else { if (!remainCols) { @@ -1871,7 +2035,7 @@ int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKE code = terrno; goto _exit; } - SIdxKey* idxKey = taosArrayGet(remainCols, i); + SIdxKey *idxKey = taosArrayGet(remainCols, i); ((SLastKey *)key)[0] = idxKey->key; @@ -1879,7 +2043,7 @@ int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKE keys_list_sizes[i] = klen; } - rocksMayWrite(pTsdb, true); // flush writebatch cache + rocksMayWrite(pTsdb, true); // flush writebatch cache TAOS_CHECK_GOTO(tsdbCacheGetValuesFromRocks(pTsdb, numKeys, (const char *const *)keys_list, keys_list_sizes, &values_list, &values_list_sizes), @@ -1888,8 +2052,12 @@ int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKE rocksdb_writebatch_t *wb = pTsdb->rCache.writebatch; for (int i = 0; i < numKeys; ++i) { SLastCol *pLastCol = NULL; - (void)tsdbCacheDeserialize(values_list[i], values_list_sizes[i], &pLastCol); - SIdxKey* idxKey = taosArrayGet(remainCols, i); + code = tsdbCacheDeserialize(values_list[i], values_list_sizes[i], &pLastCol); + if (code != TSDB_CODE_SUCCESS) { + tsdbWarn("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + } + SIdxKey *idxKey = taosArrayGet(remainCols, i); SLastKey *pLastKey = &idxKey->key; if (NULL != pLastCol && (pLastCol->rowKey.ts <= eKey && pLastCol->rowKey.ts >= sKey)) { SLastCol noCacheCol = {.rowKey.ts = TSKEY_MIN, @@ -2389,7 +2557,12 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow, bool *pIgnoreEarlie state->pr->pCurFileSet = state->pFileSet; - (void)loadDataTomb(state->pr, state->pr->pFileReader); + code = loadDataTomb(state->pr, state->pr->pFileReader); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s load tomb failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + TAOS_CHECK_GOTO(code, &lino, _err); + } TAOS_CHECK_GOTO(tsdbDataFileReadBrinBlk(state->pr->pFileReader, &state->pr->pBlkArray), &lino, _err); } @@ -2467,7 +2640,12 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow, bool *pIgnoreEarlie if (!state->pLastRow) { if (state->pLastIter) { - (void)lastIterClose(&state->pLastIter); + code = lastIterClose(&state->pLastIter); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s close last iter failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + TAOS_RETURN(code); + } } clearLastFileSet(state); @@ -2575,7 +2753,12 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow, bool *pIgnoreEarlie if (!state->pLastRow) { if (state->pLastIter) { - (void)lastIterClose(&state->pLastIter); + code = lastIterClose(&state->pLastIter); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s close last iter failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + TAOS_RETURN(code); + } } *ppRow = &state->row; @@ -2599,7 +2782,12 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow, bool *pIgnoreEarlie } else { // TODO: merge rows and *ppRow = mergedRow SRowMerger *pMerger = &state->rowMerger; - (void)tsdbRowMergerInit(pMerger, state->pTSchema); + code = tsdbRowMergerInit(pMerger, state->pTSchema); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s init row merger failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + TAOS_RETURN(code); + } TAOS_CHECK_GOTO(tsdbRowMergerAdd(pMerger, &state->row, state->pTSchema), &lino, _err); TAOS_CHECK_GOTO(tsdbRowMergerAdd(pMerger, state->pLastRow, state->pTSchema), &lino, _err); @@ -2765,7 +2953,11 @@ int32_t clearNextRowFromFS(void *iter) { } if (state->pLastIter) { - (void)lastIterClose(&state->pLastIter); + code = lastIterClose(&state->pLastIter); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("%s close last iter failed at line %d since %s", __func__, __LINE__, tstrerror(code)); + TAOS_RETURN(code); + } } if (state->pBlockData) { @@ -2798,7 +2990,11 @@ int32_t clearNextRowFromFS(void *iter) { static void clearLastFileSet(SFSNextRowIter *state) { if (state->pLastIter) { - (void)lastIterClose(&state->pLastIter); + int code = lastIterClose(&state->pLastIter); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("%s close last iter failed at line %d since %s", __func__, __LINE__, tstrerror(code)); + return; + } } if (state->pBlockData) { @@ -3363,7 +3559,11 @@ _err: TAOS_RETURN(code); } -void tsdbCacheRelease(SLRUCache *pCache, LRUHandle *h) { (void)taosLRUCacheRelease(pCache, h, false); } +void tsdbCacheRelease(SLRUCache *pCache, LRUHandle *h) { + if (taosLRUCacheRelease(pCache, h, false)) { + tsdbError("%s release lru cache failed at line %d.", __func__, __LINE__); + } +} void tsdbCacheSetCapacity(SVnode *pVnode, size_t capacity) { taosLRUCacheSetCapacity(pVnode->pTsdb->lruCache, capacity); diff --git a/source/dnode/vnode/src/tsdb/tsdbOpen.c b/source/dnode/vnode/src/tsdb/tsdbOpen.c index 1923c8bafc..c1f8f45d7e 100644 --- a/source/dnode/vnode/src/tsdb/tsdbOpen.c +++ b/source/dnode/vnode/src/tsdb/tsdbOpen.c @@ -102,7 +102,7 @@ _exit: return code; } -int32_t tsdbClose(STsdb **pTsdb) { +void tsdbClose(STsdb **pTsdb) { if (*pTsdb) { STsdb *pdb = *pTsdb; tsdbDebug("vgId:%d, tsdb is close at %s, days:%d, keep:%d,%d,%d, keepTimeOffset:%d", TD_VID(pdb->pVnode), pdb->path, @@ -121,5 +121,5 @@ int32_t tsdbClose(STsdb **pTsdb) { (void)taosThreadMutexDestroy(&(*pTsdb)->mutex); taosMemoryFreeClear(*pTsdb); } - return 0; + return; } diff --git a/source/dnode/vnode/src/tsdb/tsdbRead2.c b/source/dnode/vnode/src/tsdb/tsdbRead2.c index 1fc67ec66f..0e958b155b 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead2.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead2.c @@ -82,7 +82,7 @@ static bool hasDataInFileBlock(const SBlockData* pBlockData, const SFil static bool hasDataInSttBlock(STableBlockScanInfo* pInfo); static void initBlockDumpInfo(STsdbReader* pReader, SDataBlockIter* pBlockIter); static int32_t getInitialDelIndex(const SArray* pDelSkyline, int32_t order); -static void resetTableListIndex(SReaderStatus* pStatus); +static int32_t resetTableListIndex(SReaderStatus* pStatus, const char* id); static void getMemTableTimeRange(STsdbReader* pReader, int64_t* pMaxKey, int64_t* pMinKey); static void updateComposedBlockInfo(STsdbReader* pReader, double el, STableBlockScanInfo* pBlockScanInfo); static int32_t buildFromPreFilesetBuffer(STsdbReader* pReader); @@ -257,9 +257,8 @@ static int32_t initFilesetIterator(SFilesetIter* pIter, TFileSetArray* pFileSetA if (pIter->pSttBlockReader == NULL) { pIter->pSttBlockReader = taosMemoryCalloc(1, sizeof(struct SSttBlockReader)); if (pIter->pSttBlockReader == NULL) { - code = terrno; - tsdbError("failed to prepare the last block iterator, since:%s %s", tstrerror(code), pReader->idStr); - return code; + tsdbError("failed to prepare the last block iterator, since:%s %s", tstrerror(terrno), pReader->idStr); + return terrno; } } @@ -381,13 +380,15 @@ _err: bool shouldFreePkBuf(SBlockLoadSuppInfo* pSupp) { return (pSupp->numOfPks > 0) && IS_VAR_DATA_TYPE(pSupp->pk.type); } -int32_t resetDataBlockIterator(SDataBlockIter* pIter, int32_t order, bool needFree) { +int32_t resetDataBlockIterator(SDataBlockIter* pIter, int32_t order, bool needFree, const char* id) { pIter->order = order; pIter->index = -1; pIter->numOfBlocks = 0; + if (pIter->blockList == NULL) { pIter->blockList = taosArrayInit(4, sizeof(SFileDataBlockInfo)); if (pIter->blockList == NULL) { + tsdbError("%s failed to reset block iter, func:%s at line:%d code:%s", id, __func__, __LINE__, tstrerror(terrno)); return terrno; } } else { @@ -584,7 +585,6 @@ static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, void code = tBlockDataCreate(&pReader->status.fileBlockData); if (code != TSDB_CODE_SUCCESS) { - terrno = code; goto _end; } @@ -3072,12 +3072,18 @@ static int32_t moveToNextFile(STsdbReader* pReader, SBlockNumber* pBlockNum, SAr } // pTableIter can be NULL, no need to handle the return value -static void resetTableListIndex(SReaderStatus* pStatus) { +static int32_t resetTableListIndex(SReaderStatus* pStatus, const char* id) { STableUidList* pList = &pStatus->uidList; pList->currentIndex = 0; uint64_t uid = pList->tableUidList[0]; pStatus->pTableIter = tSimpleHashGet(pStatus->pTableMap, &uid, sizeof(uid)); + if (pStatus->pTableIter == NULL) { + tsdbError("%s failed to load tableBlockScanInfo for uid:%"PRId64", code: internal error", id, uid); + return TSDB_CODE_INTERNAL_ERROR; + } + + return 0; } static void resetPreFilesetMemTableListIndex(SReaderStatus* pStatus) { @@ -3209,8 +3215,8 @@ static int32_t doLoadSttBlockSequentially(STsdbReader* pReader) { } // load the last data block of current table - STableBlockScanInfo* pScanInfo = *(STableBlockScanInfo**)pStatus->pTableIter; - if (pScanInfo == NULL) { + STableBlockScanInfo* pScanInfo = NULL; + if (pStatus->pTableIter == NULL) { tsdbError("table Iter is null, invalid pScanInfo, try next table %s", pReader->idStr); bool hasNexTable = moveToNextTable(pUidList, pStatus); if (!hasNexTable) { @@ -3218,6 +3224,8 @@ static int32_t doLoadSttBlockSequentially(STsdbReader* pReader) { } continue; + } else { + pScanInfo = *(STableBlockScanInfo**) pStatus->pTableIter; } if (pReader->pIgnoreTables && taosHashGet(*pReader->pIgnoreTables, &pScanInfo->uid, sizeof(pScanInfo->uid))) { @@ -3562,14 +3570,21 @@ static int32_t initForFirstBlockInFile(STsdbReader* pReader, SDataBlockIter* pBl code = initBlockIterator(pReader, pBlockIter, num.numOfBlocks, pTableList); } else { // no block data, only last block exists tBlockDataReset(&pReader->status.fileBlockData); - code = resetDataBlockIterator(pBlockIter, pReader->info.order, shouldFreePkBuf(&pReader->suppInfo)); - resetTableListIndex(&pReader->status); + code = resetDataBlockIterator(pBlockIter, pReader->info.order, shouldFreePkBuf(&pReader->suppInfo), pReader->idStr); + if (code) { + return code; + } + + code = resetTableListIndex(&pReader->status, pReader->idStr); + if (code) { + return code; + } } - if (code == TSDB_CODE_SUCCESS) { - // set the correct start position according to the query time window + if (code == TSDB_CODE_SUCCESS) { // set the correct start position according to the query time window initBlockDumpInfo(pReader, pBlockIter); } + taosArrayDestroy(pTableList); return code; } @@ -3584,38 +3599,40 @@ typedef enum { TSDB_READ_CONTINUE = 0x2, } ERetrieveType; -static ERetrieveType doReadDataFromSttFiles(STsdbReader* pReader) { +static int32_t doReadDataFromSttFiles(STsdbReader* pReader, ERetrieveType* pReturnType) { int32_t code = TSDB_CODE_SUCCESS; SSDataBlock* pResBlock = pReader->resBlockInfo.pResBlock; SDataBlockIter* pBlockIter = &pReader->status.blockIter; + *pReturnType = TSDB_READ_RETURN; + tsdbDebug("seq load data blocks from stt files %s", pReader->idStr); while (1) { - terrno = 0; - code = doLoadSttBlockSequentially(pReader); if (code != TSDB_CODE_SUCCESS) { - terrno = code; - return TSDB_READ_RETURN; + *pReturnType = TSDB_READ_RETURN; + return code; } if (pResBlock->info.rows > 0) { - return TSDB_READ_RETURN; + *pReturnType = TSDB_READ_RETURN; + return code; } // all data blocks are checked in this stt file, now let's try the next file set if (pReader->status.pTableIter != NULL) { - terrno = TSDB_CODE_INTERNAL_ERROR; - tsdbError("tsdb reader failed at: %s:%d", __func__, __LINE__); - return TSDB_READ_RETURN; + code = TSDB_CODE_INTERNAL_ERROR; + tsdbError("tsdb reader failed at: %s:%d, code:%s", __func__, __LINE__, tstrerror(code)); + return code; } + code = initForFirstBlockInFile(pReader, pBlockIter); // error happens or all the data files are completely checked if ((code != TSDB_CODE_SUCCESS) || (pReader->status.loadFromFile == false)) { - terrno = code; - return TSDB_READ_RETURN; + *pReturnType = TSDB_READ_RETURN; + return code; } if (pReader->status.bProcMemPreFileset) { @@ -3625,14 +3642,19 @@ static ERetrieveType doReadDataFromSttFiles(STsdbReader* pReader) { } if (pResBlock->info.rows > 0) { pReader->status.processingMemPreFileSet = true; - return TSDB_READ_RETURN; + *pReturnType = TSDB_READ_RETURN; + return code; } } if (pBlockIter->numOfBlocks > 0) { // there are data blocks existed. - return TSDB_READ_CONTINUE; - } else { // all blocks in data file are checked, let's check the data in last files - resetTableListIndex(&pReader->status); + *pReturnType = TSDB_READ_CONTINUE; + return code; + } else { // all blocks in data file are checked, let's check the data in stt-files + code = resetTableListIndex(&pReader->status, pReader->idStr); + if (code) { + return code; + } } } } @@ -3641,15 +3663,18 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) { int32_t code = TSDB_CODE_SUCCESS; bool asc = ASCENDING_TRAVERSE(pReader->info.order); - SDataBlockIter* pBlockIter = &pReader->status.blockIter; - SSDataBlock* pResBlock = pReader->resBlockInfo.pResBlock; + SDataBlockIter* pBlockIter = &pReader->status.blockIter; + SSDataBlock* pResBlock = pReader->resBlockInfo.pResBlock; + SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo; + SBlockData* pBlockData = &pReader->status.fileBlockData; + const char* id = pReader->idStr; if (pBlockIter->numOfBlocks == 0) { // let's try to extract data from stt files. - terrno = 0; - ERetrieveType type = doReadDataFromSttFiles(pReader); - if (type == TSDB_READ_RETURN) { - return terrno; + ERetrieveType type = 0; + code = doReadDataFromSttFiles(pReader, &type); + if (code != 0 || type == TSDB_READ_RETURN) { + return code; } code = doBuildDataBlock(pReader); @@ -3658,9 +3683,6 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) { } } - SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo; - SBlockData* pBlockData = &pReader->status.fileBlockData; - while (1) { if (fileBlockPartiallyRead(pDumpInfo, asc)) { // file data block is partially loaded code = buildComposedDataBlock(pReader); @@ -3679,15 +3701,20 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) { } tBlockDataReset(pBlockData); - code = resetDataBlockIterator(pBlockIter, pReader->info.order, shouldFreePkBuf(&pReader->suppInfo)); + code = resetDataBlockIterator(pBlockIter, pReader->info.order, shouldFreePkBuf(&pReader->suppInfo), id); if (code != TSDB_CODE_SUCCESS) { return code; } - resetTableListIndex(&pReader->status); - ERetrieveType type = doReadDataFromSttFiles(pReader); - if (type == TSDB_READ_RETURN) { - return terrno; + code = resetTableListIndex(&pReader->status, id); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + ERetrieveType type = 0; + code = doReadDataFromSttFiles(pReader, &type); + if (code != 0 || type == TSDB_READ_RETURN) { + return code; } } } @@ -4649,7 +4676,7 @@ uint64_t tsdbGetReaderMaxVersion2(STsdbReader* pReader) { return pReader->info.v static int32_t doOpenReaderImpl(STsdbReader* pReader) { SReaderStatus* pStatus = &pReader->status; SDataBlockIter* pBlockIter = &pStatus->blockIter; - int32_t code = TSDB_CODE_SUCCESS; + int32_t code = TSDB_CODE_SUCCESS; if (pReader->bFilesetDelimited) { getMemTableTimeRange(pReader, &pReader->status.memTableMaxKey, &pReader->status.memTableMinKey); @@ -4661,7 +4688,8 @@ static int32_t doOpenReaderImpl(STsdbReader* pReader) { return code; } - code = resetDataBlockIterator(&pStatus->blockIter, pReader->info.order, shouldFreePkBuf(&pReader->suppInfo)); + code = resetDataBlockIterator(&pStatus->blockIter, pReader->info.order, shouldFreePkBuf(&pReader->suppInfo), + pReader->idStr); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -4673,7 +4701,7 @@ static int32_t doOpenReaderImpl(STsdbReader* pReader) { } if (!pStatus->loadFromFile) { - resetTableListIndex(pStatus); + code = resetTableListIndex(pStatus, pReader->idStr); } return code; @@ -5146,7 +5174,11 @@ static int32_t doTsdbNextDataBlockFilesetDelimited(STsdbReader* pReader) { } if (pBlock->info.rows <= 0) { - resetTableListIndex(&pReader->status); + code = resetTableListIndex(&pReader->status, pReader->idStr); + if (code) { + return code; + } + int64_t endKey = (ASCENDING_TRAVERSE(pReader->info.order)) ? INT64_MAX : INT64_MIN; code = buildBlockFromBufferSequentially(pReader, endKey); } @@ -5169,7 +5201,11 @@ static int32_t doTsdbNextDataBlockFilesFirst(STsdbReader* pReader) { } if (pBlock->info.rows <= 0) { - resetTableListIndex(&pReader->status); + code = resetTableListIndex(&pReader->status, pReader->idStr); + if (code) { + return code; + } + int64_t endKey = (ASCENDING_TRAVERSE(pReader->info.order)) ? INT64_MAX : INT64_MIN; code = buildBlockFromBufferSequentially(pReader, endKey); } @@ -5587,13 +5623,17 @@ int32_t tsdbReaderReset2(STsdbReader* pReader, SQueryTableDataCond* pCond) { return code; } - code = resetDataBlockIterator(pBlockIter, pReader->info.order, shouldFreePkBuf(&pReader->suppInfo)); + code = resetDataBlockIterator(pBlockIter, pReader->info.order, shouldFreePkBuf(&pReader->suppInfo), pReader->idStr); if (code != TSDB_CODE_SUCCESS) { (void) tsdbReleaseReader(pReader); return code; } - resetTableListIndex(&pReader->status); + code = resetTableListIndex(&pReader->status, pReader->idStr); + if (code != TSDB_CODE_SUCCESS) { + (void) tsdbReleaseReader(pReader); + return code; + } bool asc = ASCENDING_TRAVERSE(pReader->info.order); int32_t step = asc ? 1 : -1; @@ -5608,7 +5648,11 @@ int32_t tsdbReaderReset2(STsdbReader* pReader, SQueryTableDataCond* pCond) { // no data in files, let's try buffer in memory if (pStatus->fileIter.numOfFiles == 0) { pStatus->loadFromFile = false; - resetTableListIndex(pStatus); + code = resetTableListIndex(pStatus, pReader->idStr); + if (code != TSDB_CODE_SUCCESS) { + (void) tsdbReleaseReader(pReader); + return code; + } } else { code = initForFirstBlockInFile(pReader, pBlockIter); if (code != TSDB_CODE_SUCCESS) { @@ -5845,9 +5889,9 @@ int32_t tsdbGetTableSchema(SMeta* pMeta, int64_t uid, STSchema** pSchema, int64_ metaReaderDoInit(&mr, pMeta, META_READER_LOCK); int32_t code = metaReaderGetTableEntryByUidCache(&mr, uid); if (code != TSDB_CODE_SUCCESS) { - terrno = TSDB_CODE_TDB_INVALID_TABLE_ID; + code = TSDB_CODE_TDB_INVALID_TABLE_ID; metaReaderClear(&mr); - return terrno; + return code; } *suid = 0; @@ -5858,15 +5902,15 @@ int32_t tsdbGetTableSchema(SMeta* pMeta, int64_t uid, STSchema** pSchema, int64_ *suid = mr.me.ctbEntry.suid; code = metaReaderGetTableEntryByUidCache(&mr, *suid); if (code != TSDB_CODE_SUCCESS) { - terrno = TSDB_CODE_TDB_INVALID_TABLE_ID; + code = TSDB_CODE_TDB_INVALID_TABLE_ID; metaReaderClear(&mr); - return terrno; + return code; } } else if (mr.me.type == TSDB_NORMAL_TABLE) { // do nothing } else { - terrno = TSDB_CODE_INVALID_PARA; + code = TSDB_CODE_INVALID_PARA; metaReaderClear(&mr); - return terrno; + return code; } metaReaderClear(&mr); diff --git a/source/dnode/vnode/src/tsdb/tsdbReadUtil.h b/source/dnode/vnode/src/tsdb/tsdbReadUtil.h index 7c7bee8260..6ec1f99577 100644 --- a/source/dnode/vnode/src/tsdb/tsdbReadUtil.h +++ b/source/dnode/vnode/src/tsdb/tsdbReadUtil.h @@ -357,7 +357,7 @@ int32_t initRowKey(SRowKey* pKey, int64_t ts, int32_t numOfPks, int32_t type, in void clearRowKey(SRowKey* pKey); bool shouldFreePkBuf(SBlockLoadSuppInfo* pSupp); -int32_t resetDataBlockIterator(SDataBlockIter* pIter, int32_t order, bool hasPk); +int32_t resetDataBlockIterator(SDataBlockIter* pIter, int32_t order, bool hasPk, const char* id); void clearDataBlockIterator(SDataBlockIter* pIter, bool needFree); void cleanupDataBlockIterator(SDataBlockIter* pIter, bool hasPk); diff --git a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c index da49b40d18..bd22beb52d 100644 --- a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c +++ b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c @@ -440,9 +440,9 @@ _exit: return code; } -int32_t tsdbSnapReaderClose(STsdbSnapReader** reader) { +void tsdbSnapReaderClose(STsdbSnapReader** reader) { if (reader[0] == NULL) { - return 0; + return; } int32_t code = 0; @@ -469,7 +469,7 @@ int32_t tsdbSnapReaderClose(STsdbSnapReader** reader) { taosMemoryFree(reader[0]); reader[0] = NULL; - return code; + return; } int32_t tsdbSnapRead(STsdbSnapReader* reader, uint8_t** data) { diff --git a/source/dnode/vnode/src/tsdb/tsdbSnapshotRAW.c b/source/dnode/vnode/src/tsdb/tsdbSnapshotRAW.c index 609e2a80e9..a41003d9ab 100644 --- a/source/dnode/vnode/src/tsdb/tsdbSnapshotRAW.c +++ b/source/dnode/vnode/src/tsdb/tsdbSnapshotRAW.c @@ -75,8 +75,8 @@ _exit: return code; } -int32_t tsdbSnapRAWReaderClose(STsdbSnapRAWReader** reader) { - if (reader[0] == NULL) return 0; +void tsdbSnapRAWReaderClose(STsdbSnapRAWReader** reader) { + if (reader[0] == NULL) return; int32_t code = 0; int32_t lino = 0; @@ -87,14 +87,7 @@ int32_t tsdbSnapRAWReaderClose(STsdbSnapRAWReader** reader) { tsdbFSDestroyRefSnapshot(&reader[0]->fsetArr); taosMemoryFree(reader[0]); reader[0] = NULL; - -_exit: - if (code) { - TSDB_ERROR_LOG(TD_VID(tsdb->pVnode), lino, code); - } else { - tsdbDebug("vgId:%d %s done", TD_VID(tsdb->pVnode), __func__); - } - return code; + return; } static int32_t tsdbSnapRAWReadFileSetOpenReader(STsdbSnapRAWReader* reader) { diff --git a/source/dnode/vnode/src/tsdb/tsdbUtil2.c b/source/dnode/vnode/src/tsdb/tsdbUtil2.c index 4bccf7df13..cc9f8ce3ad 100644 --- a/source/dnode/vnode/src/tsdb/tsdbUtil2.c +++ b/source/dnode/vnode/src/tsdb/tsdbUtil2.c @@ -108,8 +108,8 @@ int32_t tStatisBlockClear(STbStatisBlock *statisBlock) { TAOS_UNUSED(tBufferClear(&statisBlock->buffers[i])); } for (int32_t i = 0; i < TD_MAX_PK_COLS; ++i) { - TAOS_UNUSED(tValueColumnClear(&statisBlock->firstKeyPKs[i])); - TAOS_UNUSED(tValueColumnClear(&statisBlock->lastKeyPKs[i])); + tValueColumnClear(&statisBlock->firstKeyPKs[i]); + tValueColumnClear(&statisBlock->lastKeyPKs[i]); } return 0; } @@ -267,8 +267,8 @@ void tBrinBlockClear(SBrinBlock *brinBlock) { TAOS_UNUSED(tBufferClear(&brinBlock->buffers[i])); } for (int32_t i = 0; i < TD_MAX_PK_COLS; ++i) { - TAOS_UNUSED(tValueColumnClear(&brinBlock->firstKeyPKs[i])); - TAOS_UNUSED(tValueColumnClear(&brinBlock->lastKeyPKs[i])); + tValueColumnClear(&brinBlock->firstKeyPKs[i]); + tValueColumnClear(&brinBlock->lastKeyPKs[i]); } } diff --git a/source/dnode/vnode/src/vnd/vnodeCommit.c b/source/dnode/vnode/src/vnd/vnodeCommit.c index 0bb16261dd..5aea6c5a63 100644 --- a/source/dnode/vnode/src/vnd/vnodeCommit.c +++ b/source/dnode/vnode/src/vnd/vnodeCommit.c @@ -91,7 +91,9 @@ static int32_t vnodeGetBufPoolToUse(SVnode *pVnode) { struct timeval tv; struct timespec ts; - (void)taosGetTimeOfDay(&tv); + if (taosGetTimeOfDay(&tv) != 0) { + continue; + } ts.tv_nsec = tv.tv_usec * 1000 + WAIT_TIME_MILI_SEC * 1000000; if (ts.tv_nsec > 999999999l) { ts.tv_sec = tv.tv_sec + 1; diff --git a/source/dnode/vnode/src/vnd/vnodeHash.c b/source/dnode/vnode/src/vnd/vnodeHash.c index 878f226309..e0faa05c78 100644 --- a/source/dnode/vnode/src/vnd/vnodeHash.c +++ b/source/dnode/vnode/src/vnd/vnodeHash.c @@ -24,10 +24,10 @@ struct SVHashEntry { void* obj; }; -static int32_t vHashRehash(SVHashTable* ht, uint32_t newNumBuckets) { +static void vHashRehash(SVHashTable* ht, uint32_t newNumBuckets) { SVHashEntry** newBuckets = (SVHashEntry**)taosMemoryCalloc(newNumBuckets, sizeof(SVHashEntry*)); if (newBuckets == NULL) { - return terrno; + return; } for (int32_t i = 0; i < ht->numBuckets; i++) { @@ -45,7 +45,7 @@ static int32_t vHashRehash(SVHashTable* ht, uint32_t newNumBuckets) { ht->buckets = newBuckets; ht->numBuckets = newNumBuckets; - return 0; + return; } int32_t vHashInit(SVHashTable** ht, uint32_t (*hash)(const void*), int32_t (*compare)(const void*, const void*)) { @@ -96,7 +96,7 @@ int32_t vHashPut(SVHashTable* ht, void* obj) { } if (ht->numEntries >= ht->numBuckets) { - (void)vHashRehash(ht, ht->numBuckets * 2); + vHashRehash(ht, ht->numBuckets * 2); bucketIndex = ht->hash(obj) % ht->numBuckets; } @@ -142,7 +142,7 @@ int32_t vHashDrop(SVHashTable* ht, const void* obj) { taosMemoryFree(tmp); ht->numEntries--; if (ht->numBuckets > VNODE_HASH_DEFAULT_NUM_BUCKETS && ht->numEntries < ht->numBuckets / 4) { - (void)vHashRehash(ht, ht->numBuckets / 2); + vHashRehash(ht, ht->numBuckets / 2); } return 0; } diff --git a/source/dnode/vnode/src/vnd/vnodeOpen.c b/source/dnode/vnode/src/vnd/vnodeOpen.c index 1f54fea27c..9fba8df90d 100644 --- a/source/dnode/vnode/src/vnd/vnodeOpen.c +++ b/source/dnode/vnode/src/vnd/vnodeOpen.c @@ -108,7 +108,7 @@ int32_t vnodeAlterReplica(const char *path, SAlterVnodeReplicaReq *pReq, int32_t pNode->nodePort = pReq->replicas[i].port; tstrncpy(pNode->nodeFqdn, pReq->replicas[i].fqdn, sizeof(pNode->nodeFqdn)); pNode->nodeRole = TAOS_SYNC_ROLE_VOTER; - (void)tmsgUpdateDnodeInfo(&pNode->nodeId, &pNode->clusterId, pNode->nodeFqdn, &pNode->nodePort); + bool ret = tmsgUpdateDnodeInfo(&pNode->nodeId, &pNode->clusterId, pNode->nodeFqdn, &pNode->nodePort); vInfo("vgId:%d, replica:%d ep:%s:%u dnode:%d", pReq->vgId, i, pNode->nodeFqdn, pNode->nodePort, pNode->nodeId); pCfg->replicaNum++; } @@ -121,7 +121,7 @@ int32_t vnodeAlterReplica(const char *path, SAlterVnodeReplicaReq *pReq, int32_t pNode->nodePort = pReq->learnerReplicas[pCfg->totalReplicaNum].port; pNode->nodeRole = TAOS_SYNC_ROLE_LEARNER; tstrncpy(pNode->nodeFqdn, pReq->learnerReplicas[pCfg->totalReplicaNum].fqdn, sizeof(pNode->nodeFqdn)); - (void)tmsgUpdateDnodeInfo(&pNode->nodeId, &pNode->clusterId, pNode->nodeFqdn, &pNode->nodePort); + bool ret = tmsgUpdateDnodeInfo(&pNode->nodeId, &pNode->clusterId, pNode->nodeFqdn, &pNode->nodePort); vInfo("vgId:%d, replica:%d ep:%s:%u dnode:%d", pReq->vgId, i, pNode->nodeFqdn, pNode->nodePort, pNode->nodeId); pCfg->totalReplicaNum++; } @@ -176,8 +176,10 @@ int32_t vnodeRenameVgroupId(const char *srcPath, const char *dstPath, int32_t sr int32_t prefixLen = strlen(tsdbFilePrefix); STfsDir *tsdbDir = NULL; - (void)tfsOpendir(pTfs, tsdbPath, &tsdbDir); - if (tsdbDir == NULL) return 0; + int32_t tret = tfsOpendir(pTfs, tsdbPath, &tsdbDir); + if (tsdbDir == NULL) { + return 0; + } while (1) { const STfsFile *tsdbFile = tfsReaddir(tsdbDir); @@ -248,7 +250,7 @@ int32_t vnodeAlterHashRange(const char *srcPath, const char *dstPath, SAlterVnod SNodeInfo *pNode = &pCfg->nodeInfo[0]; pNode->nodePort = tsServerPort; tstrncpy(pNode->nodeFqdn, tsLocalFqdn, TSDB_FQDN_LEN); - (void)tmsgUpdateDnodeInfo(&pNode->nodeId, &pNode->clusterId, pNode->nodeFqdn, &pNode->nodePort); + bool ret1 = tmsgUpdateDnodeInfo(&pNode->nodeId, &pNode->clusterId, pNode->nodeFqdn, &pNode->nodePort); vInfo("vgId:%d, ep:%s:%u dnode:%d", pReq->srcVgId, pNode->nodeFqdn, pNode->nodePort, pNode->nodeId); info.config.syncCfg = *pCfg; @@ -317,7 +319,9 @@ int32_t vnodeRestoreVgroupId(const char *srcPath, const char *dstPath, int32_t s void vnodeDestroy(int32_t vgId, const char *path, STfs *pTfs, int32_t nodeId) { vInfo("path:%s is removed while destroy vnode", path); - (void)tfsRmdir(pTfs, path); + if (tfsRmdir(pTfs, path) < 0) { + vError("failed to remove path:%s since %s", path, tstrerror(terrno)); + } // int32_t nlevel = tfsGetLevel(pTfs); if (nodeId > 0 && vgId > 0 /*&& nlevel > 1*/ && tsS3Enabled) { @@ -378,8 +382,13 @@ SVnode *vnodeOpen(const char *path, int32_t diskPrimary, STfs *pTfs, SMsgCb msgC } if (updated) { vInfo("vgId:%d, save vnode info since dnode info changed", info.config.vgId); - (void)vnodeSaveInfo(dir, &info); - (void)vnodeCommitInfo(dir); + if (vnodeSaveInfo(dir, &info) < 0) { + vError("vgId:%d, failed to save vnode info since %s", info.config.vgId, tstrerror(terrno)); + } + + if (vnodeCommitInfo(dir) < 0) { + vError("vgId:%d, failed to commit vnode info since %s", info.config.vgId, tstrerror(terrno)); + } } // create handle @@ -499,9 +508,9 @@ _err: if (pVnode->pQuery) vnodeQueryClose(pVnode); if (pVnode->pTq) tqClose(pVnode->pTq); if (pVnode->pWal) walClose(pVnode->pWal); - if (pVnode->pTsdb) (void)tsdbClose(&pVnode->pTsdb); - if (pVnode->pSma) (void)smaClose(pVnode->pSma); - if (pVnode->pMeta) (void)metaClose(&pVnode->pMeta); + if (pVnode->pTsdb) tsdbClose(&pVnode->pTsdb); + if (pVnode->pSma) smaClose(pVnode->pSma); + if (pVnode->pMeta) metaClose(&pVnode->pMeta); if (pVnode->freeList) vnodeCloseBufPool(pVnode); taosMemoryFree(pVnode); @@ -518,13 +527,16 @@ void vnodePostClose(SVnode *pVnode) { vnodeSyncPostClose(pVnode); } void vnodeClose(SVnode *pVnode) { if (pVnode) { vnodeAWait(&pVnode->commitTask); - (void)vnodeAChannelDestroy(&pVnode->commitChannel, true); + if (vnodeAChannelDestroy(&pVnode->commitChannel, true) != 0) { + vError("vgId:%d, failed to destroy commit channel", TD_VID(pVnode)); + } + vnodeSyncClose(pVnode); vnodeQueryClose(pVnode); tqClose(pVnode->pTq); walClose(pVnode->pWal); if (pVnode->pTsdb) tsdbClose(&pVnode->pTsdb); - (void)smaClose(pVnode->pSma); + smaClose(pVnode->pSma); if (pVnode->pMeta) metaClose(&pVnode->pMeta); vnodeCloseBufPool(pVnode); diff --git a/source/dnode/vnode/src/vnd/vnodeSnapshot.c b/source/dnode/vnode/src/vnd/vnodeSnapshot.c index 74b893d256..f732596d3b 100644 --- a/source/dnode/vnode/src/vnd/vnodeSnapshot.c +++ b/source/dnode/vnode/src/vnd/vnodeSnapshot.c @@ -219,15 +219,15 @@ void vnodeSnapReaderClose(SVSnapReader *pReader) { vnodeSnapReaderDestroyTsdbRanges(pReader); if (pReader->pRsmaReader) { - (void)rsmaSnapReaderClose(&pReader->pRsmaReader); + rsmaSnapReaderClose(&pReader->pRsmaReader); } if (pReader->pTsdbReader) { - (void)tsdbSnapReaderClose(&pReader->pTsdbReader); + tsdbSnapReaderClose(&pReader->pTsdbReader); } if (pReader->pTsdbRAWReader) { - (void)tsdbSnapRAWReaderClose(&pReader->pTsdbRAWReader); + tsdbSnapRAWReaderClose(&pReader->pTsdbRAWReader); } if (pReader->pMetaReader) { @@ -333,8 +333,7 @@ int32_t vnodeSnapRead(SVSnapReader *pReader, uint8_t **ppData, uint32_t *nData) goto _exit; } else { pReader->tsdbDone = 1; - code = tsdbSnapReaderClose(&pReader->pTsdbReader); - TSDB_CHECK_CODE(code, lino, _exit); + tsdbSnapReaderClose(&pReader->pTsdbReader); } } @@ -351,8 +350,7 @@ int32_t vnodeSnapRead(SVSnapReader *pReader, uint8_t **ppData, uint32_t *nData) goto _exit; } else { pReader->tsdbRAWDone = 1; - code = tsdbSnapRAWReaderClose(&pReader->pTsdbRAWReader); - TSDB_CHECK_CODE(code, lino, _exit); + tsdbSnapRAWReaderClose(&pReader->pTsdbRAWReader); } } @@ -463,8 +461,7 @@ int32_t vnodeSnapRead(SVSnapReader *pReader, uint8_t **ppData, uint32_t *nData) goto _exit; } else { pReader->rsmaDone = 1; - code = rsmaSnapReaderClose(&pReader->pRsmaReader); - TSDB_CHECK_CODE(code, lino, _exit); + rsmaSnapReaderClose(&pReader->pRsmaReader); } } @@ -590,15 +587,15 @@ extern int32_t tsdbDisableAndCancelAllBgTask(STsdb *pTsdb); extern void tsdbEnableBgTask(STsdb *pTsdb); static int32_t vnodeCancelAndDisableAllBgTask(SVnode *pVnode) { - (void)tsdbDisableAndCancelAllBgTask(pVnode->pTsdb); - (void)vnodeSyncCommit(pVnode); - (void)vnodeAChannelDestroy(&pVnode->commitChannel, true); + TAOS_CHECK_RETURN(tsdbDisableAndCancelAllBgTask(pVnode->pTsdb)); + TAOS_CHECK_RETURN(vnodeSyncCommit(pVnode)); + TAOS_CHECK_RETURN(vnodeAChannelDestroy(&pVnode->commitChannel, true)); return 0; } static int32_t vnodeEnableBgTask(SVnode *pVnode) { tsdbEnableBgTask(pVnode->pTsdb); - (void)vnodeAChannelInit(1, &pVnode->commitChannel); + TAOS_CHECK_RETURN(vnodeAChannelInit(1, &pVnode->commitChannel)); return 0; } @@ -613,7 +610,9 @@ int32_t vnodeSnapWriterOpen(SVnode *pVnode, SSnapshotParam *pParam, SVSnapWriter (void)taosThreadMutexLock(&pVnode->mutex); pVnode->disableWrite = true; (void)taosThreadMutexUnlock(&pVnode->mutex); - (void)vnodeCancelAndDisableAllBgTask(pVnode); + + code = vnodeCancelAndDisableAllBgTask(pVnode); + TSDB_CHECK_CODE(code, lino, _exit); // alloc pWriter = (SVSnapWriter *)taosMemoryCalloc(1, sizeof(*pWriter)); @@ -661,15 +660,18 @@ int32_t vnodeSnapWriterClose(SVSnapWriter *pWriter, int8_t rollback, SSnapshot * // prepare if (pWriter->pTsdbSnapWriter) { - (void)tsdbSnapWriterPrepareClose(pWriter->pTsdbSnapWriter, rollback); + code = tsdbSnapWriterPrepareClose(pWriter->pTsdbSnapWriter, rollback); + if (code) goto _exit; } if (pWriter->pTsdbSnapRAWWriter) { - (void)tsdbSnapRAWWriterPrepareClose(pWriter->pTsdbSnapRAWWriter); + code = tsdbSnapRAWWriterPrepareClose(pWriter->pTsdbSnapRAWWriter); + if (code) goto _exit; } if (pWriter->pRsmaSnapWriter) { - (void)rsmaSnapWriterPrepareClose(pWriter->pRsmaSnapWriter, rollback); + code = rsmaSnapWriterPrepareClose(pWriter->pRsmaSnapWriter, rollback); + if (code) goto _exit; } // commit json @@ -743,7 +745,9 @@ int32_t vnodeSnapWriterClose(SVSnapWriter *pWriter, int8_t rollback, SSnapshot * if (code) goto _exit; } - (void)vnodeBegin(pVnode); + code = vnodeBegin(pVnode); + if (code) goto _exit; + (void)taosThreadMutexLock(&pVnode->mutex); pVnode->disableWrite = false; (void)taosThreadMutexUnlock(&pVnode->mutex); @@ -755,7 +759,9 @@ _exit: vInfo("vgId:%d, vnode snapshot writer closed, rollback:%d", TD_VID(pVnode), rollback); taosMemoryFree(pWriter); } - (void)vnodeEnableBgTask(pVnode); + if (vnodeEnableBgTask(pVnode) != 0) { + tsdbError("vgId:%d, failed to enable bg task", TD_VID(pVnode)); + } return code; } diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index 9c0c3136ee..c5699796c1 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -466,7 +466,11 @@ static int32_t vnodePreProcessArbCheckSyncMsg(SVnode *pVnode, SRpcMsg *pMsg) { return TSDB_CODE_INVALID_MSG; } - (void)vnodePreCheckAssignedLogSyncd(pVnode, syncReq.member0Token, syncReq.member1Token); + int32_t ret = vnodePreCheckAssignedLogSyncd(pVnode, syncReq.member0Token, syncReq.member1Token); + if (ret != 0) { + vError("vgId:%d, failed to preprocess arb check sync request since %s", TD_VID(pVnode), tstrerror(ret)); + } + int32_t code = terrno; tFreeSVArbCheckSyncReq(&syncReq); @@ -710,7 +714,7 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t ver, SRpcMsg vTrace("vgId:%d, process %s request, code:0x%x index:%" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType), pRsp->code, ver); - (void)walApplyVer(pVnode->pWal, ver); + walApplyVer(pVnode->pWal, ver); code = tqPushMsg(pVnode->pTq, pMsg->msgType); if (code) { @@ -881,7 +885,10 @@ int32_t vnodeProcessStreamMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) } void smaHandleRes(void *pVnode, int64_t smaId, const SArray *data) { - (void)tdProcessTSmaInsert(((SVnode *)pVnode)->pSma, smaId, (const char *)data); + int32_t code = tdProcessTSmaInsert(((SVnode *)pVnode)->pSma, smaId, (const char *)data); + if (code) { + vError("failed to process sma result since %s", tstrerror(code)); + } } void vnodeUpdateMetaRsp(SVnode *pVnode, STableMetaRsp *pMetaRsp) { @@ -957,7 +964,10 @@ static int32_t vnodeProcessDropTtlTbReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t code = metaDropTables(pVnode->pMeta, ttlReq.pTbUids); if (code) return code; - (void)tqUpdateTbUidList(pVnode->pTq, ttlReq.pTbUids, false); + code = tqUpdateTbUidList(pVnode->pTq, ttlReq.pTbUids, false); + if (code) { + vError("vgId:%d, failed to update tbUid list since %s", TD_VID(pVnode), tstrerror(code)); + } } end: @@ -1160,7 +1170,9 @@ static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t ver, void *pReq, } } else { cRsp.code = TSDB_CODE_SUCCESS; - (void)tdFetchTbUidList(pVnode->pSma, &pStore, pCreateReq->ctb.suid, pCreateReq->uid); + if (tdFetchTbUidList(pVnode->pSma, &pStore, pCreateReq->ctb.suid, pCreateReq->uid) < 0) { + vError("vgId:%d, failed to fetch tbUid list", TD_VID(pVnode)); + } if (taosArrayPush(tbUids, &pCreateReq->uid) == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; rcode = -1; @@ -1177,11 +1189,13 @@ static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t ver, void *pReq, } vDebug("vgId:%d, add %d new created tables into query table list", TD_VID(pVnode), (int32_t)taosArrayGetSize(tbUids)); - (void)tqUpdateTbUidList(pVnode->pTq, tbUids, true); + if (tqUpdateTbUidList(pVnode->pTq, tbUids, true) < 0) { + vError("vgId:%d, failed to update tbUid list since %s", TD_VID(pVnode), tstrerror(terrno)); + } if (tdUpdateTbUidList(pVnode->pSma, pStore, true) < 0) { goto _exit; } - (void)tdUidStoreFree(pStore); + pStore = tdUidStoreFree(pStore); // prepare rsp int32_t ret = 0; @@ -1193,7 +1207,9 @@ static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t ver, void *pReq, goto _exit; } tEncoderInit(&encoder, pRsp->pCont, pRsp->contLen); - (void)tEncodeSVCreateTbBatchRsp(&encoder, &rsp); + if (tEncodeSVCreateTbBatchRsp(&encoder, &rsp) != 0) { + vError("vgId:%d, failed to encode create table batch response", TD_VID(pVnode)); + } if (tsEnableAudit && tsEnableAuditCreateTable) { int64_t clusterId = pVnode->config.syncCfg.nodeInfo[0].clusterId; @@ -1347,7 +1363,9 @@ _exit: tEncodeSize(tEncodeSVAlterTbRsp, &vAlterTbRsp, pRsp->contLen, ret); pRsp->pCont = rpcMallocCont(pRsp->contLen); tEncoderInit(&ec, pRsp->pCont, pRsp->contLen); - (void)tEncodeSVAlterTbRsp(&ec, &vAlterTbRsp); + if (tEncodeSVAlterTbRsp(&ec, &vAlterTbRsp) != 0) { + vError("vgId:%d, failed to encode alter table response", TD_VID(pVnode)); + } tEncoderClear(&ec); if (vMetaRsp.pSchemas) { taosMemoryFree(vMetaRsp.pSchemas); @@ -1402,7 +1420,11 @@ static int32_t vnodeProcessDropTbReq(SVnode *pVnode, int64_t ver, void *pReq, in } } else { dropTbRsp.code = TSDB_CODE_SUCCESS; - if (tbUid > 0) (void)tdFetchTbUidList(pVnode->pSma, &pStore, pDropTbReq->suid, tbUid); + if (tbUid > 0) { + if (tdFetchTbUidList(pVnode->pSma, &pStore, pDropTbReq->suid, tbUid) < 0) { + vError("vgId:%d, failed to fetch tbUid list", TD_VID(pVnode)); + } + } } if (taosArrayPush(rsp.pArray, &dropTbRsp) == NULL) { @@ -1426,14 +1448,21 @@ static int32_t vnodeProcessDropTbReq(SVnode *pVnode, int64_t ver, void *pReq, in } } - (void)tqUpdateTbUidList(pVnode->pTq, tbUids, false); - (void)tdUpdateTbUidList(pVnode->pSma, pStore, false); + if (tqUpdateTbUidList(pVnode->pTq, tbUids, false) < 0) { + vError("vgId:%d, failed to update tbUid list since %s", TD_VID(pVnode), tstrerror(terrno)); + } + + if (tdUpdateTbUidList(pVnode->pSma, pStore, false) < 0) { + goto _exit; + } if (tsEnableAuditCreateTable) { int64_t clusterId = pVnode->config.syncCfg.nodeInfo[0].clusterId; SName name = {0}; - (void)tNameFromString(&name, pVnode->config.dbname, T_NAME_ACCT | T_NAME_DB); + if (tNameFromString(&name, pVnode->config.dbname, T_NAME_ACCT | T_NAME_DB) != 0) { + vError("vgId:%d, failed to get name from string", TD_VID(pVnode)); + } SStringBuilder sb = {0}; for (int32_t iReq = 0; iReq < req.nReqs; iReq++) { @@ -1457,12 +1486,14 @@ static int32_t vnodeProcessDropTbReq(SVnode *pVnode, int64_t ver, void *pReq, in _exit: taosArrayDestroy(tbUids); - (void)tdUidStoreFree(pStore); + pStore = tdUidStoreFree(pStore); tDecoderClear(&decoder); tEncodeSize(tEncodeSVDropTbBatchRsp, &rsp, pRsp->contLen, ret); pRsp->pCont = rpcMallocCont(pRsp->contLen); tEncoderInit(&encoder, pRsp->pCont, pRsp->contLen); - (void)tEncodeSVDropTbBatchRsp(&encoder, &rsp); + if (tEncodeSVDropTbBatchRsp(&encoder, &rsp) != 0) { + vError("vgId:%d, failed to encode drop table batch response", TD_VID(pVnode)); + } tEncoderClear(&encoder); taosArrayDestroy(rsp.pArray); taosArrayDestroy(tbNames); @@ -1898,7 +1929,9 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t ver, void *pReq, in if (taosArrayGetSize(newTbUids) > 0) { vDebug("vgId:%d, add %d table into query table list in handling submit", TD_VID(pVnode), (int32_t)taosArrayGetSize(newTbUids)); - (void)tqUpdateTbUidList(pVnode->pTq, newTbUids, true); + if (tqUpdateTbUidList(pVnode->pTq, newTbUids, true) != 0) { + vError("vgId:%d, failed to update tbUid list", TD_VID(pVnode)); + } } _exit: @@ -1924,7 +1957,7 @@ _exit: pVnode->monitor.strVgId, pOriginalMsg->info.conn.user, "Success"}; - (void)taos_counter_add(tsInsertCounter, pSubmitRsp->affectedRows, sample_labels); + int tv = taos_counter_add(tsInsertCounter, pSubmitRsp->affectedRows, sample_labels); } if (code == 0) { @@ -2149,7 +2182,12 @@ static int32_t vnodeProcessAlterConfigReq(SVnode *pVnode, int64_t ver, void *pRe pVnode->config.sttTrigger = req.sttTrigger; } else { vnodeAWait(&pVnode->commitTask); - (void)tsdbDisableAndCancelAllBgTask(pVnode->pTsdb); + + int32_t ret = tsdbDisableAndCancelAllBgTask(pVnode->pTsdb); + if (ret != 0) { + vError("vgId:%d, failed to disable bg task since %s", TD_VID(pVnode), tstrerror(errno)); + } + pVnode->config.sttTrigger = req.sttTrigger; tsdbEnableBgTask(pVnode->pTsdb); } @@ -2167,7 +2205,9 @@ static int32_t vnodeProcessAlterConfigReq(SVnode *pVnode, int64_t ver, void *pRe } if (walChanged) { - (void)walAlter(pVnode->pWal, &pVnode->config.walCfg); + if (walAlter(pVnode->pWal, &pVnode->config.walCfg) != 0) { + vError("vgId:%d, failed to alter wal config since %s", TD_VID(pVnode), tstrerror(errno)); + } } if (tsdbChanged) { @@ -2351,7 +2391,9 @@ static int32_t vnodeProcessCompactVnodeReq(SVnode *pVnode, int64_t ver, void *pR } static int32_t vnodeProcessConfigChangeReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp) { - (void)syncCheckMember(pVnode->sync); + if (syncCheckMember(pVnode->sync) != 0) { + vError("vgId:%d, failed to check member", TD_VID(pVnode)); + } pRsp->msgType = TDMT_SYNC_CONFIG_CHANGE_RSP; pRsp->code = TSDB_CODE_SUCCESS; @@ -2411,7 +2453,9 @@ static int32_t vnodeProcessArbCheckSyncReq(SVnode *pVnode, void *pReq, int32_t l syncRsp.member1Token = syncReq.member1Token; syncRsp.vgId = TD_VID(pVnode); - (void)vnodeCheckAssignedLogSyncd(pVnode, syncReq.member0Token, syncReq.member1Token); + if (vnodeCheckAssignedLogSyncd(pVnode, syncReq.member0Token, syncReq.member1Token) != 0) { + vError("vgId:%d, failed to check assigned log syncd", TD_VID(pVnode)); + } syncRsp.errCode = terrno; if (vnodeUpdateArbTerm(pVnode, syncReq.arbTerm) != 0) { diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c index f725fb3809..bf317d257d 100644 --- a/source/dnode/vnode/src/vnd/vnodeSync.c +++ b/source/dnode/vnode/src/vnd/vnodeSync.c @@ -69,7 +69,9 @@ void vnodeRedirectRpcMsg(SVnode *pVnode, SRpcMsg *pMsg, int32_t code) { if (rsp.pCont == NULL) { pMsg->code = TSDB_CODE_OUT_OF_MEMORY; } else { - (void)tSerializeSEpSet(rsp.pCont, contLen, &newEpSet); + if (tSerializeSEpSet(rsp.pCont, contLen, &newEpSet) != 0) { + vError("vgId:%d, failed to serialize ep set", pVnode->config.vgId); + } rsp.contLen = contLen; } @@ -163,7 +165,9 @@ void vnodeProposeCommitOnNeed(SVnode *pVnode, bool atExit) { rpcFreeCont(rpcMsg.pCont); rpcMsg.pCont = NULL; } else { - (void)tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &rpcMsg); + if (tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &rpcMsg) < 0) { + vTrace("vgId:%d, failed to put vnode commit to queue since %s", pVnode->config.vgId, terrstr()); + } } } @@ -560,7 +564,7 @@ static void vnodeRestoreFinish(const SSyncFSM *pFsm, const SyncIndex commitIdx) } } while (true); - (void)walApplyVer(pVnode->pWal, commitIdx); + walApplyVer(pVnode->pWal, commitIdx); pVnode->restored = true; SStreamMeta *pMeta = pVnode->pTq->pStreamMeta; @@ -615,7 +619,9 @@ static void vnodeBecomeFollower(const SSyncFSM *pFsm) { if (pVnode->pTq) { tqUpdateNodeStage(pVnode->pTq, false); - (void)tqStopStreamTasksAsync(pVnode->pTq); + if (tqStopStreamTasksAsync(pVnode->pTq) != 0) { + vError("vgId:%d, failed to stop stream tasks", pVnode->config.vgId); + } } } @@ -750,7 +756,10 @@ int32_t vnodeSyncStart(SVnode *pVnode) { void vnodeSyncPreClose(SVnode *pVnode) { vInfo("vgId:%d, sync pre close", pVnode->config.vgId); - (void)syncLeaderTransfer(pVnode->sync); + int32_t code = syncLeaderTransfer(pVnode->sync); + if (code) { + vError("vgId:%d, failed to transfer leader since %s", pVnode->config.vgId, tstrerror(code)); + } syncPreStop(pVnode->sync); (void)taosThreadMutexLock(&pVnode->lock); diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index 48adb22927..e391d274e3 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -553,6 +553,7 @@ typedef struct SIntervalAggOperatorInfo { EOPTR_EXEC_MODEL execModel; // operator execution model [batch model|stream model] STimeWindowAggSupp twAggSup; SArray* pPrevValues; // SArray used to keep the previous not null value for interpolation. + struct SOperatorInfo* pOperator; // for limit optimization bool limited; int64_t limit; @@ -621,6 +622,7 @@ typedef struct SStreamIntervalOperatorInfo { int32_t midDelIndex; SSHashObj* pDeletedMap; bool destHasPrimaryKey; + struct SOperatorInfo* pOperator; } SStreamIntervalOperatorInfo; typedef struct SDataGroupInfo { @@ -676,6 +678,7 @@ typedef struct SStreamSessionAggOperatorInfo { bool recvGetAll; bool destHasPrimaryKey; SSHashObj* pPkDeleted; + struct SOperatorInfo* pOperator; } SStreamSessionAggOperatorInfo; typedef struct SStreamStateAggOperatorInfo { @@ -703,6 +706,7 @@ typedef struct SStreamStateAggOperatorInfo { bool recvGetAll; SSHashObj* pPkDeleted; bool destHasPrimaryKey; + struct SOperatorInfo* pOperator; } SStreamStateAggOperatorInfo; typedef struct SStreamEventAggOperatorInfo { @@ -732,6 +736,7 @@ typedef struct SStreamEventAggOperatorInfo { SFilterInfo* pEndCondInfo; SSHashObj* pPkDeleted; bool destHasPrimaryKey; + struct SOperatorInfo* pOperator; } SStreamEventAggOperatorInfo; typedef struct SStreamCountAggOperatorInfo { @@ -756,6 +761,7 @@ typedef struct SStreamCountAggOperatorInfo { SSDataBlock* pCheckpointRes; SSHashObj* pPkDeleted; bool destHasPrimaryKey; + struct SOperatorInfo* pOperator; } SStreamCountAggOperatorInfo; typedef struct SStreamPartitionOperatorInfo { @@ -823,6 +829,10 @@ void cleanupBasicInfo(SOptrBasicInfo* pInfo); int32_t initExprSupp(SExprSupp* pSup, SExprInfo* pExprInfo, int32_t numOfExpr, SFunctionStateStore* pStore); void cleanupExprSupp(SExprSupp* pSup); +void cleanupResultInfoInStream(SExecTaskInfo* pTaskInfo, void* pState, SExprSupp* pSup, + SGroupResInfo* pGroupResInfo); +void cleanupResultInfo(SExecTaskInfo* pTaskInfo, SExprSupp* pSup, SDiskbasedBuf* pBuf, + SGroupResInfo* pGroupResInfo, SSHashObj* pHashmap); int32_t initAggSup(SExprSupp* pSup, SAggSupporter* pAggSup, SExprInfo* pExprInfo, int32_t numOfCols, size_t keyBufSize, const char* pkey, void* pState, SFunctionStateStore* pStore); void cleanupAggSup(SAggSupporter* pAggSup); @@ -844,8 +854,8 @@ void initLimitInfo(const SNode* pLimit, const SNode* pSLimit, SLimitInfo* pLimit void resetLimitInfoForNextGroup(SLimitInfo* pLimitInfo); bool applyLimitOffset(SLimitInfo* pLimitInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo); -void applyAggFunctionOnPartialTuples(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, SColumnInfoData* pTimeWindowData, - int32_t offset, int32_t forwardStep, int32_t numOfTotal, int32_t numOfOutput); +int32_t applyAggFunctionOnPartialTuples(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, SColumnInfoData* pTimeWindowData, + int32_t offset, int32_t forwardStep, int32_t numOfTotal, int32_t numOfOutput); int32_t extractDataBlockFromFetchRsp(SSDataBlock* pRes, char* pData, SArray* pColList, char** pNextStart); void updateLoadRemoteInfo(SLoadRemoteDataInfo* pInfo, int64_t numOfRows, int32_t dataLen, int64_t startTs, @@ -916,8 +926,8 @@ SSDataBlock* buildCreateTableBlock(SExprSupp* tbName, SExprSupp* tag); SExprInfo* createExpr(SNodeList* pNodeList, int32_t* numOfExprs); void destroyExprInfo(SExprInfo* pExpr, int32_t numOfExprs); -void copyResultrowToDataBlock(SExprInfo* pExprInfo, int32_t numOfExprs, SResultRow* pRow, SqlFunctionCtx* pCtx, - SSDataBlock* pBlock, const int32_t* rowEntryOffset, SExecTaskInfo* pTaskInfo); +int32_t copyResultrowToDataBlock(SExprInfo* pExprInfo, int32_t numOfExprs, SResultRow* pRow, SqlFunctionCtx* pCtx, + SSDataBlock* pBlock, const int32_t* rowEntryOffset, SExecTaskInfo* pTaskInfo); void doUpdateNumOfRows(SqlFunctionCtx* pCtx, SResultRow* pRow, int32_t numOfExprs, const int32_t* rowEntryOffset); void doClearBufferedBlocks(SStreamScanInfo* pInfo); diff --git a/source/libs/executor/src/aggregateoperator.c b/source/libs/executor/src/aggregateoperator.c index 4605d19464..863ce01256 100644 --- a/source/libs/executor/src/aggregateoperator.c +++ b/source/libs/executor/src/aggregateoperator.c @@ -48,6 +48,7 @@ typedef struct SAggOperatorInfo { bool hasValidBlock; SSDataBlock* pNewGroupBlock; bool hasCountFunc; + SOperatorInfo* pOperator; } SAggOperatorInfo; static void destroyAggOperatorInfo(void* param); @@ -119,6 +120,7 @@ int32_t createAggregateOperatorInfo(SOperatorInfo* downstream, SAggPhysiNode* pA pInfo->binfo.inputTsOrder = pAggNode->node.inputTsOrder; pInfo->binfo.outputTsOrder = pAggNode->node.outputTsOrder; pInfo->hasCountFunc = pAggNode->hasCountLikeFunc; + pInfo->pOperator = pOperator; setOperatorInfo(pOperator, "TableAggregate", QUERY_NODE_PHYSICAL_PLAN_HASH_AGG, !pAggNode->node.forceCreateNonBlockingOptr, OP_NOT_OPENED, pInfo, pTaskInfo); @@ -153,6 +155,9 @@ void destroyAggOperatorInfo(void* param) { SAggOperatorInfo* pInfo = (SAggOperatorInfo*)param; cleanupBasicInfo(&pInfo->binfo); + cleanupResultInfo(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, pInfo->aggSup.pResultBuf, + &pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable); + pInfo->pOperator = NULL; cleanupAggSup(&pInfo->aggSup); cleanupExprSupp(&pInfo->scalarExprSup); cleanupGroupResInfo(&pInfo->groupResInfo); @@ -581,6 +586,80 @@ int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t n return code; } +void cleanupResultInfoInStream(SExecTaskInfo* pTaskInfo, void* pState, SExprSupp* pSup, SGroupResInfo* pGroupResInfo) { + int32_t code = TSDB_CODE_SUCCESS; + SStorageAPI* pAPI = &pTaskInfo->storageAPI; + int32_t numOfExprs = pSup->numOfExprs; + int32_t* rowEntryOffset = pSup->rowEntryInfoOffset; + SqlFunctionCtx* pCtx = pSup->pCtx; + int32_t numOfRows = getNumOfTotalRes(pGroupResInfo); + bool needCleanup = false; + + for (int32_t j = 0; j < numOfExprs; ++j) { + needCleanup |= pCtx[j].needCleanup; + } + if (!needCleanup) { + return; + } + + for (int32_t i = pGroupResInfo->index; i < numOfRows; i += 1) { + SResultWindowInfo* pWinInfo = taosArrayGet(pGroupResInfo->pRows, i); + SRowBuffPos* pPos = pWinInfo->pStatePos; + SResultRow* pRow = NULL; + + code = pAPI->stateStore.streamStateGetByPos(pState, pPos, (void**)&pRow); + if (TSDB_CODE_SUCCESS != code) { + qError("failed to get state by pos, code:%s, %s", tstrerror(code), GET_TASKID(pTaskInfo)); + continue; + } + + for (int32_t j = 0; j < numOfExprs; ++j) { + pCtx[j].resultInfo = getResultEntryInfo(pRow, j, rowEntryOffset); + if (pCtx[j].fpSet.cleanup) { + pCtx[j].fpSet.cleanup(&pCtx[j]); + } + } + } +} + +void cleanupResultInfo(SExecTaskInfo* pTaskInfo, SExprSupp* pSup, SDiskbasedBuf* pBuf, + SGroupResInfo* pGroupResInfo, SSHashObj* pHashmap) { + int32_t numOfExprs = pSup->numOfExprs; + int32_t* rowEntryOffset = pSup->rowEntryInfoOffset; + SqlFunctionCtx* pCtx = pSup->pCtx; + bool needCleanup = false; + for (int32_t j = 0; j < numOfExprs; ++j) { + needCleanup |= pCtx[j].needCleanup; + } + if (!needCleanup) { + return; + } + + // begin from last iter + void* pData = pGroupResInfo->dataPos; + int32_t iter = pGroupResInfo->iter; + while ((pData = tSimpleHashIterate(pHashmap, pData, &iter)) != NULL) { + SResultRowPosition* pos = pData; + + SFilePage* page = getBufPage(pBuf, pos->pageId); + if (page == NULL) { + qError("failed to get buffer, code:%s, %s", tstrerror(terrno), GET_TASKID(pTaskInfo)); + continue; + } + + SResultRow* pRow = (SResultRow*)((char*)page + pos->offset); + + for (int32_t j = 0; j < numOfExprs; ++j) { + pCtx[j].resultInfo = getResultEntryInfo(pRow, j, rowEntryOffset); + if (pCtx[j].fpSet.cleanup) { + pCtx[j].fpSet.cleanup(&pCtx[j]); + } + } + + releaseBufPage(pBuf, page); + } +} + void cleanupAggSup(SAggSupporter* pAggSup) { taosMemoryFreeClear(pAggSup->keyBuf); tSimpleHashCleanup(pAggSup->pResultRowHashTable); @@ -613,8 +692,9 @@ int32_t initAggSup(SExprSupp* pSup, SAggSupporter* pAggSup, SExprInfo* pExprInfo return TSDB_CODE_SUCCESS; } -void applyAggFunctionOnPartialTuples(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, SColumnInfoData* pTimeWindowData, - int32_t offset, int32_t forwardStep, int32_t numOfTotal, int32_t numOfOutput) { +int32_t applyAggFunctionOnPartialTuples(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, SColumnInfoData* pTimeWindowData, + int32_t offset, int32_t forwardStep, int32_t numOfTotal, int32_t numOfOutput) { + int32_t code = TSDB_CODE_SUCCESS; for (int32_t k = 0; k < numOfOutput; ++k) { // keep it temporarily SFunctionCtxStatus status = {0}; @@ -641,15 +721,14 @@ void applyAggFunctionOnPartialTuples(SExecTaskInfo* taskInfo, SqlFunctionCtx* pC SScalarParam out = {.columnData = &idata}; SScalarParam tw = {.numOfRows = 5, .columnData = pTimeWindowData}; - int32_t code = pCtx[k].sfp.process(&tw, 1, &out); + code = pCtx[k].sfp.process(&tw, 1, &out); if (code != TSDB_CODE_SUCCESS) { qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code)); taskInfo->code = code; - T_LONG_JMP(taskInfo->env, code); + return code; } pEntryInfo->numOfRes = 1; } else { - int32_t code = TSDB_CODE_SUCCESS; if (functionNeedToExecute(&pCtx[k]) && pCtx[k].fpSet.process != NULL) { if ((&pCtx[k])->input.pData[0] == NULL) { code = TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR; @@ -664,7 +743,7 @@ void applyAggFunctionOnPartialTuples(SExecTaskInfo* taskInfo, SqlFunctionCtx* pC } qError("%s apply functions error, code: %s", GET_TASKID(taskInfo), tstrerror(code)); taskInfo->code = code; - T_LONG_JMP(taskInfo->env, code); + return code; } } @@ -672,6 +751,7 @@ void applyAggFunctionOnPartialTuples(SExecTaskInfo* taskInfo, SqlFunctionCtx* pC functionCtxRestore(&pCtx[k], &status); } } + return code; } void functionCtxSave(SqlFunctionCtx* pCtx, SFunctionCtxStatus* pStatus) { diff --git a/source/libs/executor/src/countwindowoperator.c b/source/libs/executor/src/countwindowoperator.c index 2233d58ef8..542a7c89a9 100644 --- a/source/libs/executor/src/countwindowoperator.c +++ b/source/libs/executor/src/countwindowoperator.c @@ -149,8 +149,9 @@ void doCountWindowAggImpl(SOperatorInfo* pOperator, SSDataBlock* pBlock) { pInfo->pRow->win.ekey = tsCols[num + i - 1]; updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pInfo->pRow->win, 0); - applyAggFunctionOnPartialTuples(pTaskInfo, pExprSup->pCtx, &pInfo->twAggSup.timeWindowData, i, num, - pBlock->info.rows, pExprSup->numOfExprs); + code = applyAggFunctionOnPartialTuples(pTaskInfo, pExprSup->pCtx, &pInfo->twAggSup.timeWindowData, i, num, + pBlock->info.rows, pExprSup->numOfExprs); + QUERY_CHECK_CODE(code, lino, _end); if (pInfo->windowCount != pInfo->windowSliding) { if (prevRows <= pInfo->windowSliding) { if (pBuffInfo->winRows > pInfo->windowSliding) { @@ -164,8 +165,9 @@ void doCountWindowAggImpl(SOperatorInfo* pOperator, SSDataBlock* pBlock) { } if (pBuffInfo->winRows == pInfo->windowCount) { doUpdateNumOfRows(pExprSup->pCtx, pInfo->pRow, pExprSup->numOfExprs, pExprSup->rowEntryInfoOffset); - copyResultrowToDataBlock(pExprSup->pExprInfo, pExprSup->numOfExprs, pInfo->pRow, pExprSup->pCtx, pRes, - pExprSup->rowEntryInfoOffset, pTaskInfo); + code = copyResultrowToDataBlock(pExprSup->pExprInfo, pExprSup->numOfExprs, pInfo->pRow, pExprSup->pCtx, pRes, + pExprSup->rowEntryInfoOffset, pTaskInfo); + QUERY_CHECK_CODE(code, lino, _end); pRes->info.rows += pInfo->pRow->numOfRows; clearWinStateBuff(pBuffInfo); pInfo->preStateIndex = pInfo->countSup.curStateIndex; @@ -205,8 +207,9 @@ static void buildCountResult(SExprSupp* pExprSup, SCountWindowSupp* pCountSup, S continue; } doUpdateNumOfRows(pExprSup->pCtx, pResultRow, pExprSup->numOfExprs, pExprSup->rowEntryInfoOffset); - copyResultrowToDataBlock(pExprSup->pExprInfo, pExprSup->numOfExprs, pResultRow, pExprSup->pCtx, pBlock, - pExprSup->rowEntryInfoOffset, pTaskInfo); + code = copyResultrowToDataBlock(pExprSup->pExprInfo, pExprSup->numOfExprs, pResultRow, pExprSup->pCtx, pBlock, + pExprSup->rowEntryInfoOffset, pTaskInfo); + QUERY_CHECK_CODE(code, lino, _end); pBlock->info.rows += pResultRow->numOfRows; clearWinStateBuff(pBuff); clearResultRowInitFlag(pExprSup->pCtx, pExprSup->numOfExprs); diff --git a/source/libs/executor/src/eventwindowoperator.c b/source/libs/executor/src/eventwindowoperator.c index 0f3a08c14b..79e7494518 100644 --- a/source/libs/executor/src/eventwindowoperator.c +++ b/source/libs/executor/src/eventwindowoperator.c @@ -37,6 +37,7 @@ typedef struct SEventWindowOperatorInfo { bool inWindow; SResultRow* pRow; SSDataBlock* pPreDataBlock; + SOperatorInfo* pOperator; } SEventWindowOperatorInfo; static int32_t eventWindowAggregateNext(SOperatorInfo* pOperator, SSDataBlock** pRes); @@ -128,6 +129,7 @@ int32_t createEventwindowOperatorInfo(SOperatorInfo* downstream, SPhysiNode* phy pInfo->tsSlotId = tsSlotId; pInfo->pPreDataBlock = NULL; + pInfo->pOperator = pOperator; setOperatorInfo(pOperator, "EventWindowOperator", QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE, true, OP_NOT_OPENED, pInfo, pTaskInfo); @@ -152,6 +154,19 @@ _error: return code; } +void cleanupResultInfoInEventWindow(SOperatorInfo* pOperator, SEventWindowOperatorInfo* pInfo) { + if (pInfo == NULL || pInfo->pRow == NULL) { + return; + } + SExprSupp* pSup = &pOperator->exprSupp; + for (int32_t j = 0; j < pSup->numOfExprs; ++j) { + pSup->pCtx[j].resultInfo = getResultEntryInfo(pInfo->pRow, j, pSup->rowEntryInfoOffset); + if (pSup->pCtx[j].fpSet.cleanup) { + pSup->pCtx[j].fpSet.cleanup(&pSup->pCtx[j]); + } + } +} + void destroyEWindowOperatorInfo(void* param) { SEventWindowOperatorInfo* pInfo = (SEventWindowOperatorInfo*)param; if (pInfo == NULL) { @@ -175,6 +190,8 @@ void destroyEWindowOperatorInfo(void* param) { cleanupBasicInfo(&pInfo->binfo); colDataDestroy(&pInfo->twAggSup.timeWindowData); + cleanupResultInfoInEventWindow(pInfo->pOperator, pInfo); + pInfo->pOperator = NULL; cleanupAggSup(&pInfo->aggSup); cleanupExprSupp(&pInfo->scalarSup); taosMemoryFreeClear(param); @@ -260,8 +277,10 @@ static int32_t setSingleOutputTupleBufv1(SResultRowInfo* pResultRowInfo, STimeWi return setResultRowInitCtx(*pResult, pExprSup->pCtx, pExprSup->numOfExprs, pExprSup->rowEntryInfoOffset); } -static void doEventWindowAggImpl(SEventWindowOperatorInfo* pInfo, SExprSupp* pSup, int32_t startIndex, int32_t endIndex, - const SSDataBlock* pBlock, int64_t* tsList, SExecTaskInfo* pTaskInfo) { +static int32_t doEventWindowAggImpl(SEventWindowOperatorInfo* pInfo, SExprSupp* pSup, int32_t startIndex, + int32_t endIndex, const SSDataBlock* pBlock, int64_t* tsList, + SExecTaskInfo* pTaskInfo) { + int32_t code = TSDB_CODE_SUCCESS; SWindowRowsSup* pRowSup = &pInfo->winSup; int32_t numOfOutput = pSup->numOfExprs; @@ -269,15 +288,16 @@ static void doEventWindowAggImpl(SEventWindowOperatorInfo* pInfo, SExprSupp* pSu doKeepTuple(pRowSup, tsList[endIndex], pBlock->info.id.groupId); - int32_t ret = - setSingleOutputTupleBufv1(&pInfo->binfo.resultRowInfo, &pRowSup->win, &pInfo->pRow, pSup, &pInfo->aggSup); - if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code - T_LONG_JMP(pTaskInfo->env, TSDB_CODE_APP_ERROR); + code = setSingleOutputTupleBufv1(&pInfo->binfo.resultRowInfo, &pRowSup->win, &pInfo->pRow, pSup, &pInfo->aggSup); + if (code != TSDB_CODE_SUCCESS) { // null data, too many state code + qError("failed to set single output tuple buffer, code:%d", code); + return code; } updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pRowSup->win, 0); - applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startIndex, numOfRows, - pBlock->info.rows, numOfOutput); + code = applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startIndex, numOfRows, + pBlock->info.rows, numOfOutput); + return code; } int32_t eventWindowAggImpl(SOperatorInfo* pOperator, SEventWindowOperatorInfo* pInfo, SSDataBlock* pBlock) { @@ -287,11 +307,11 @@ int32_t eventWindowAggImpl(SOperatorInfo* pOperator, SEventWindowOperatorInfo* p SExprSupp* pSup = &pOperator->exprSupp; SSDataBlock* pRes = pInfo->binfo.pRes; int64_t gid = pBlock->info.id.groupId; + SColumnInfoData *ps = NULL, *pe = NULL; SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, pInfo->tsSlotId); QUERY_CHECK_NULL(pColInfoData, code, lino, _return, terrno); TSKEY* tsList = (TSKEY*)pColInfoData->pData; SWindowRowsSup* pRowSup = &pInfo->winSup; - SColumnInfoData *ps = NULL, *pe = NULL; int32_t rowIndex = 0; pRowSup->numOfRows = 0; @@ -333,7 +353,8 @@ int32_t eventWindowAggImpl(SOperatorInfo* pOperator, SEventWindowOperatorInfo* p } if (rowIndex < pBlock->info.rows) { - doEventWindowAggImpl(pInfo, pSup, startIndex, rowIndex, pBlock, tsList, pTaskInfo); + code = doEventWindowAggImpl(pInfo, pSup, startIndex, rowIndex, pBlock, tsList, pTaskInfo); + QUERY_CHECK_CODE(code, lino, _return); doUpdateNumOfRows(pSup->pCtx, pInfo->pRow, pSup->numOfExprs, pSup->rowEntryInfoOffset); // check buffer size @@ -343,8 +364,9 @@ int32_t eventWindowAggImpl(SOperatorInfo* pOperator, SEventWindowOperatorInfo* p QUERY_CHECK_CODE(code, lino, _return); } - copyResultrowToDataBlock(pSup->pExprInfo, pSup->numOfExprs, pInfo->pRow, pSup->pCtx, pRes, - pSup->rowEntryInfoOffset, pTaskInfo); + code = copyResultrowToDataBlock(pSup->pExprInfo, pSup->numOfExprs, pInfo->pRow, pSup->pCtx, pRes, + pSup->rowEntryInfoOffset, pTaskInfo); + QUERY_CHECK_CODE(code, lino, _return); pRes->info.rows += pInfo->pRow->numOfRows; pInfo->pRow->numOfRows = 0; @@ -352,7 +374,8 @@ int32_t eventWindowAggImpl(SOperatorInfo* pOperator, SEventWindowOperatorInfo* p pInfo->inWindow = false; rowIndex += 1; } else { - doEventWindowAggImpl(pInfo, pSup, startIndex, pBlock->info.rows - 1, pBlock, tsList, pTaskInfo); + code = doEventWindowAggImpl(pInfo, pSup, startIndex, pBlock->info.rows - 1, pBlock, tsList, pTaskInfo); + QUERY_CHECK_CODE(code, lino, _return); } } else { // find the first start value that is fulfill for the start condition for (; rowIndex < pBlock->info.rows; ++rowIndex) { diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 117a30ade2..e935b43c00 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -2111,6 +2111,7 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput, pCtx->saveHandle.currentPage = -1; pCtx->pStore = pStore; pCtx->hasWindowOrGroup = false; + pCtx->needCleanup = false; } for (int32_t i = 1; i < numOfOutput; ++i) { diff --git a/source/libs/executor/src/executorInt.c b/source/libs/executor/src/executorInt.c index b5c05c8558..5fc483087a 100644 --- a/source/libs/executor/src/executorInt.c +++ b/source/libs/executor/src/executorInt.c @@ -666,8 +666,8 @@ void doUpdateNumOfRows(SqlFunctionCtx* pCtx, SResultRow* pRow, int32_t numOfExpr } } -void copyResultrowToDataBlock(SExprInfo* pExprInfo, int32_t numOfExprs, SResultRow* pRow, SqlFunctionCtx* pCtx, - SSDataBlock* pBlock, const int32_t* rowEntryOffset, SExecTaskInfo* pTaskInfo) { +int32_t copyResultrowToDataBlock(SExprInfo* pExprInfo, int32_t numOfExprs, SResultRow* pRow, SqlFunctionCtx* pCtx, + SSDataBlock* pBlock, const int32_t* rowEntryOffset, SExecTaskInfo* pTaskInfo) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; for (int32_t j = 0; j < numOfExprs; ++j) { @@ -690,7 +690,7 @@ void copyResultrowToDataBlock(SExprInfo* pExprInfo, int32_t numOfExprs, SResultR int32_t winCode = pCtx[j].fpSet.finalize(&pCtx[j], pBlock); if (TAOS_FAILED(winCode)) { qError("%s build result data block error, code %s", GET_TASKID(pTaskInfo), tstrerror(winCode)); - T_LONG_JMP(pTaskInfo->env, winCode); + QUERY_CHECK_CODE(winCode, lino, _end); } } else if (strcmp(pCtx[j].pExpr->pExpr->_function.functionName, "_select_value") == 0) { // do nothing @@ -710,8 +710,8 @@ void copyResultrowToDataBlock(SExprInfo* pExprInfo, int32_t numOfExprs, SResultR _end: if (code != TSDB_CODE_SUCCESS) { qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); - T_LONG_JMP(pTaskInfo->env, code); } + return code; } // todo refactor. SResultRow has direct pointer in miainfo @@ -747,7 +747,12 @@ void finalizeResultRows(SDiskbasedBuf* pBuf, SResultRowPosition* resultRowPositi T_LONG_JMP(pTaskInfo->env, code); } - copyResultrowToDataBlock(pExprInfo, pSup->numOfExprs, pRow, pCtx, pBlock, rowEntryOffset, pTaskInfo); + code = copyResultrowToDataBlock(pExprInfo, pSup->numOfExprs, pRow, pCtx, pBlock, rowEntryOffset, pTaskInfo); + if (TAOS_FAILED(code)) { + releaseBufPage(pBuf, page); + qError("%s copy result row to datablock failed, code %s", GET_TASKID(pTaskInfo), tstrerror(code)); + T_LONG_JMP(pTaskInfo->env, code); + } releaseBufPage(pBuf, page); pBlock->info.rows += pRow->numOfRows; @@ -818,9 +823,9 @@ void doCopyToSDataBlockByHash(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SEx pGroupResInfo->iter = iter; pGroupResInfo->dataPos = pData; - copyResultrowToDataBlock(pExprInfo, numOfExprs, pRow, pCtx, pBlock, rowEntryOffset, pTaskInfo); - + code = copyResultrowToDataBlock(pExprInfo, numOfExprs, pRow, pCtx, pBlock, rowEntryOffset, pTaskInfo); releaseBufPage(pBuf, page); + QUERY_CHECK_CODE(code, lino, _end); pBlock->info.rows += pRow->numOfRows; if (pBlock->info.rows >= threshold) { break; @@ -892,9 +897,10 @@ void doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprSupp } pGroupResInfo->index += 1; - copyResultrowToDataBlock(pExprInfo, numOfExprs, pRow, pCtx, pBlock, rowEntryOffset, pTaskInfo); - + code = copyResultrowToDataBlock(pExprInfo, numOfExprs, pRow, pCtx, pBlock, rowEntryOffset, pTaskInfo); releaseBufPage(pBuf, page); + QUERY_CHECK_CODE(code, lino, _end); + pBlock->info.rows += pRow->numOfRows; if (pBlock->info.rows >= threshold) { break; @@ -1014,10 +1020,6 @@ static void destroySqlFunctionCtx(SqlFunctionCtx* pCtx, SExprInfo* pExpr, int32_ } for (int32_t i = 0; i < numOfOutput; ++i) { - if (pCtx[i].fpSet.cleanup != NULL) { - pCtx[i].fpSet.cleanup(&pCtx[i]); - } - if (pExpr != NULL) { SExprInfo* pExprInfo = &pExpr[i]; for (int32_t j = 0; j < pExprInfo->base.numOfParams; ++j) { diff --git a/source/libs/executor/src/groupcacheoperator.c b/source/libs/executor/src/groupcacheoperator.c index 06706b57ee..e4f6d73b7b 100644 --- a/source/libs/executor/src/groupcacheoperator.c +++ b/source/libs/executor/src/groupcacheoperator.c @@ -373,8 +373,16 @@ _return: return code; } + +void freeGcBlkBufInfo(void* ptr) { + SGcBlkBufInfo* pBlk = (SGcBlkBufInfo*)ptr; + taosMemoryFreeClear(pBlk->pBuf); +} + + static int32_t addBlkToDirtyBufList(SGroupCacheOperatorInfo* pGCache, SGcDownstreamCtx* pCtx, SGcBlkCacheInfo* pCache, SGcBlkBufInfo* pBufInfo) { if (0 != taosHashPut(pCache->pDirtyBlk, &pBufInfo->basic.blkId, sizeof(pBufInfo->basic.blkId), pBufInfo, sizeof(*pBufInfo))) { + freeGcBlkBufInfo(pBufInfo); return TSDB_CODE_OUT_OF_MEMORY; } pBufInfo = taosHashGet(pCache->pDirtyBlk, &pBufInfo->basic.blkId, sizeof(pBufInfo->basic.blkId)); @@ -1239,10 +1247,6 @@ _return: return code; } -void freeGcBlkBufInfo(void* ptr) { - SGcBlkBufInfo* pBlk = (SGcBlkBufInfo*)ptr; - taosMemoryFree(pBlk->pBuf); -} static int32_t initGroupCacheBlockCache(SGroupCacheOperatorInfo* pInfo) { SGcBlkCacheInfo* pCache = &pInfo->blkCache; diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c index 83a579615c..3ce20dbbd9 100644 --- a/source/libs/executor/src/groupoperator.c +++ b/source/libs/executor/src/groupoperator.c @@ -39,6 +39,7 @@ typedef struct SGroupbyOperatorInfo { int32_t groupKeyLen; // total group by column width SGroupResInfo groupResInfo; SExprSupp scalarSup; + SOperatorInfo *pOperator; } SGroupbyOperatorInfo; // The sort in partition may be needed later. @@ -85,9 +86,11 @@ static void destroyGroupOperatorInfo(void* param) { taosArrayDestroy(pInfo->pGroupCols); taosArrayDestroyEx(pInfo->pGroupColVals, freeGroupKey); cleanupExprSupp(&pInfo->scalarSup); - + cleanupResultInfo(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, pInfo->aggSup.pResultBuf, + &pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable); cleanupGroupResInfo(&pInfo->groupResInfo); cleanupAggSup(&pInfo->aggSup); + pInfo->pOperator = NULL; taosMemoryFreeClear(param); } @@ -329,8 +332,11 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) { } int32_t rowIndex = j - num; - applyAggFunctionOnPartialTuples(pTaskInfo, pCtx, NULL, rowIndex, num, pBlock->info.rows, - pOperator->exprSupp.numOfExprs); + ret = applyAggFunctionOnPartialTuples(pTaskInfo, pCtx, NULL, rowIndex, num, pBlock->info.rows, + pOperator->exprSupp.numOfExprs); + if (ret != TSDB_CODE_SUCCESS) { + T_LONG_JMP(pTaskInfo->env, ret); + } // assign the group keys or user input constant values if required doAssignGroupKeys(pCtx, pOperator->exprSupp.numOfExprs, pBlock->info.rows, rowIndex); @@ -347,8 +353,11 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) { } int32_t rowIndex = pBlock->info.rows - num; - applyAggFunctionOnPartialTuples(pTaskInfo, pCtx, NULL, rowIndex, num, pBlock->info.rows, - pOperator->exprSupp.numOfExprs); + ret = applyAggFunctionOnPartialTuples(pTaskInfo, pCtx, NULL, rowIndex, num, pBlock->info.rows, + pOperator->exprSupp.numOfExprs); + if (ret != TSDB_CODE_SUCCESS) { + T_LONG_JMP(pTaskInfo->env, ret); + } doAssignGroupKeys(pCtx, pOperator->exprSupp.numOfExprs, pBlock->info.rows, rowIndex); } } @@ -563,6 +572,8 @@ int32_t createGroupOperatorInfo(SOperatorInfo* downstream, SAggPhysiNode* pAggNo pInfo->binfo.inputTsOrder = pAggNode->node.inputTsOrder; pInfo->binfo.outputTsOrder = pAggNode->node.outputTsOrder; + pInfo->pOperator = pOperator; + pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, hashGroupbyAggregateNext, NULL, destroyGroupOperatorInfo, optrDefaultBufFn, NULL, optrDefaultGetNextExtFn, NULL); code = appendDownstream(pOperator, &downstream, 1); diff --git a/source/libs/executor/src/operator.c b/source/libs/executor/src/operator.c index fe2f3f8dfe..bbd6ce39d1 100644 --- a/source/libs/executor/src/operator.c +++ b/source/libs/executor/src/operator.c @@ -667,13 +667,11 @@ void destroyOperator(SOperatorInfo* pOperator) { pOperator->numOfDownstream = 0; } - cleanupExprSupp(&pOperator->exprSupp); - - // close operator after cleanup exprSupp, since we need to call cleanup of sqlFunctionCtx first to avoid mem leak. if (pOperator->fpSet.closeFn != NULL && pOperator->info != NULL) { pOperator->fpSet.closeFn(pOperator->info); } + cleanupExprSupp(&pOperator->exprSupp); taosMemoryFreeClear(pOperator); } diff --git a/source/libs/executor/src/streamcountwindowoperator.c b/source/libs/executor/src/streamcountwindowoperator.c index adf764a8c5..577af29bf7 100644 --- a/source/libs/executor/src/streamcountwindowoperator.c +++ b/source/libs/executor/src/streamcountwindowoperator.c @@ -48,6 +48,9 @@ typedef struct SBuffInfo { void destroyStreamCountAggOperatorInfo(void* param) { SStreamCountAggOperatorInfo* pInfo = (SStreamCountAggOperatorInfo*)param; cleanupBasicInfo(&pInfo->binfo); + cleanupResultInfoInStream(pInfo->pOperator->pTaskInfo, pInfo->streamAggSup.pState, &pInfo->pOperator->exprSupp, + &pInfo->groupResInfo); + pInfo->pOperator = NULL; destroyStreamAggSupporter(&pInfo->streamAggSup); cleanupExprSupp(&pInfo->scalarSupp); clearGroupResInfo(&pInfo->groupResInfo); @@ -906,6 +909,7 @@ int32_t createStreamCountAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* QUERY_CHECK_CODE(code, lino, _error); taosMemoryFree(buff); } + pInfo->pOperator = pOperator; pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, doStreamCountAggNext, NULL, destroyStreamCountAggOperatorInfo, optrDefaultBufFn, NULL, optrDefaultGetNextExtFn, NULL); setOperatorStreamStateFn(pOperator, streamCountReleaseState, streamCountReloadState); diff --git a/source/libs/executor/src/streameventwindowoperator.c b/source/libs/executor/src/streameventwindowoperator.c index 6b094e7264..4b527db0ae 100644 --- a/source/libs/executor/src/streameventwindowoperator.c +++ b/source/libs/executor/src/streameventwindowoperator.c @@ -48,6 +48,9 @@ void destroyStreamEventOperatorInfo(void* param) { } SStreamEventAggOperatorInfo* pInfo = (SStreamEventAggOperatorInfo*)param; cleanupBasicInfo(&pInfo->binfo); + cleanupResultInfoInStream(pInfo->pOperator->pTaskInfo, pInfo->streamAggSup.pState, &pInfo->pOperator->exprSupp, + &pInfo->groupResInfo); + pInfo->pOperator = NULL; destroyStreamAggSupporter(&pInfo->streamAggSup); clearGroupResInfo(&pInfo->groupResInfo); taosArrayDestroyP(pInfo->pUpdated, destroyFlusedPos); @@ -951,6 +954,7 @@ int32_t createStreamEventAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* QUERY_CHECK_NULL(pInfo->pPkDeleted, code, lino, _error, terrno); pInfo->destHasPrimaryKey = pEventNode->window.destHasPrimayKey; + pInfo->pOperator = pOperator; setOperatorInfo(pOperator, "StreamEventAggOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT, true, OP_NOT_OPENED, pInfo, pTaskInfo); // for stream diff --git a/source/libs/executor/src/streamtimewindowoperator.c b/source/libs/executor/src/streamtimewindowoperator.c index 09bf73c1ee..22e462abab 100644 --- a/source/libs/executor/src/streamtimewindowoperator.c +++ b/source/libs/executor/src/streamtimewindowoperator.c @@ -473,6 +473,9 @@ void destroyStreamFinalIntervalOperatorInfo(void* param) { } SStreamIntervalOperatorInfo* pInfo = (SStreamIntervalOperatorInfo*)param; cleanupBasicInfo(&pInfo->binfo); + cleanupResultInfo(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, pInfo->aggSup.pResultBuf, + &pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable); + pInfo->pOperator = NULL; cleanupAggSup(&pInfo->aggSup); clearGroupResInfo(&pInfo->groupResInfo); taosArrayDestroyP(pInfo->pUpdated, destroyFlusedPos); @@ -1172,8 +1175,9 @@ static int32_t doStreamIntervalAggImpl(SOperatorInfo* pOperator, SSDataBlock* pS } updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &nextWin, 1); - applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, - pSDataBlock->info.rows, numOfOutput); + code = applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, + forwardRows, pSDataBlock->info.rows, numOfOutput); + QUERY_CHECK_CODE(code, lino, _end); key.ts = nextWin.skey; if (pInfo->delKey.ts > key.ts) { @@ -2023,6 +2027,7 @@ int32_t createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiN pInfo->pDeletedMap = tSimpleHashInit(4096, hashFn); QUERY_CHECK_NULL(pInfo->pDeletedMap, code, lino, _error, terrno); pInfo->destHasPrimaryKey = pIntervalPhyNode->window.destHasPrimayKey; + pInfo->pOperator = pOperator; pOperator->operatorType = pPhyNode->type; if (!IS_FINAL_INTERVAL_OP(pOperator) || numOfChild == 0) { @@ -2087,6 +2092,9 @@ void destroyStreamSessionAggOperatorInfo(void* param) { } SStreamSessionAggOperatorInfo* pInfo = (SStreamSessionAggOperatorInfo*)param; cleanupBasicInfo(&pInfo->binfo); + cleanupResultInfoInStream(pInfo->pOperator->pTaskInfo, pInfo->streamAggSup.pState, &pInfo->pOperator->exprSupp, + &pInfo->groupResInfo); + pInfo->pOperator = NULL; destroyStreamAggSupporter(&pInfo->streamAggSup); cleanupExprSupp(&pInfo->scalarSupp); clearGroupResInfo(&pInfo->groupResInfo); @@ -2442,7 +2450,7 @@ int32_t doOneWindowAggImpl(SColumnInfoData* pTimeWindowData, SResultWindowInfo* QUERY_CHECK_CODE(code, lino, _end); updateTimeWindowInfo(pTimeWindowData, &pCurWin->sessionWin.win, winDelta); - applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, pTimeWindowData, startIndex, winRows, rows, numOutput); + code = applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, pTimeWindowData, startIndex, winRows, rows, numOutput); _end: if (code != TSDB_CODE_SUCCESS) { @@ -3854,6 +3862,7 @@ int32_t createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode pInfo->destHasPrimaryKey = pSessionNode->window.destHasPrimayKey; pInfo->pPkDeleted = tSimpleHashInit(64, hashFn); QUERY_CHECK_NULL(pInfo->pPkDeleted, code, lino, _error, terrno); + pInfo->pOperator = pOperator; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION; setOperatorInfo(pOperator, getStreamOpName(pOperator->operatorType), QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION, true, @@ -4102,6 +4111,7 @@ int32_t createStreamFinalSessionAggOperatorInfo(SOperatorInfo* downstream, SPhys SStorageAPI* pAPI = &pTaskInfo->storageAPI; SStreamSessionAggOperatorInfo* pInfo = pOperator->info; pOperator->operatorType = pPhyNode->type; + pInfo->pOperator = pOperator; if (pPhyNode->type != QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION) { pOperator->fpSet = @@ -4172,6 +4182,9 @@ void destroyStreamStateOperatorInfo(void* param) { } SStreamStateAggOperatorInfo* pInfo = (SStreamStateAggOperatorInfo*)param; cleanupBasicInfo(&pInfo->binfo); + cleanupResultInfoInStream(pInfo->pOperator->pTaskInfo, pInfo->streamAggSup.pState, &pInfo->pOperator->exprSupp, + &pInfo->groupResInfo); + pInfo->pOperator = NULL; destroyStreamAggSupporter(&pInfo->streamAggSup); clearGroupResInfo(&pInfo->groupResInfo); taosArrayDestroyP(pInfo->pUpdated, destroyFlusedPos); @@ -5027,6 +5040,7 @@ int32_t createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pInfo->pPkDeleted = tSimpleHashInit(64, hashFn); QUERY_CHECK_NULL(pInfo->pPkDeleted, code, lino, _error, terrno); pInfo->destHasPrimaryKey = pStateNode->window.destHasPrimayKey; + pInfo->pOperator = pOperator; setOperatorInfo(pOperator, "StreamStateAggOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE, true, OP_NOT_OPENED, pInfo, pTaskInfo); @@ -5360,6 +5374,7 @@ int32_t createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pInfo->twAggSup.deleteMark, GET_TASKID(pTaskInfo), pHandle->checkpointId, STREAM_STATE_BUFF_HASH, &pInfo->pState->pFileState); QUERY_CHECK_CODE(code, lino, _error); + pInfo->pOperator = pOperator; setOperatorInfo(pOperator, "StreamIntervalOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL, true, OP_NOT_OPENED, pInfo, pTaskInfo); pOperator->fpSet = @@ -5474,8 +5489,9 @@ static void doStreamMidIntervalAggImpl(SOperatorInfo* pOperator, SSDataBlock* pS } updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &nextWin, 1); - applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, - pSDataBlock->info.rows, numOfOutput); + code = applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, + forwardRows, pSDataBlock->info.rows, numOfOutput); + QUERY_CHECK_CODE(code, lino, _end); key.ts = nextWin.skey; if (pInfo->delKey.ts > key.ts) { diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 0cd506d15a..6ac24ad313 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -37,6 +37,7 @@ typedef struct SSessionAggOperatorInfo { int64_t gap; // session window gap int32_t tsSlotId; // primary timestamp slot id STimeWindowAggSupp twAggSup; + SOperatorInfo* pOperator; } SSessionAggOperatorInfo; typedef struct SStateWindowOperatorInfo { @@ -50,6 +51,7 @@ typedef struct SStateWindowOperatorInfo { SStateKeys stateKey; int32_t tsSlotId; // primary timestamp column slot id STimeWindowAggSupp twAggSup; + SOperatorInfo* pOperator; } SStateWindowOperatorInfo; typedef enum SResultTsInterpType { @@ -653,8 +655,11 @@ static void doInterpUnclosedTimeWindow(SOperatorInfo* pOperatorInfo, int32_t num setNotInterpoWindowKey(pSup->pCtx, numOfExprs, RESULT_ROW_START_INTERP); updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &w, 1); - applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, 0, - pBlock->info.rows, numOfExprs); + ret = applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, 0, + pBlock->info.rows, numOfExprs); + if (ret != TSDB_CODE_SUCCESS) { + T_LONG_JMP(pTaskInfo->env, ret); + } if (isResultRowInterpolated(pResult, RESULT_ROW_END_INTERP)) { closeResultRow(pr); @@ -799,8 +804,11 @@ static bool hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul } updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &win, 1); - applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, + ret = applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, pBlock->info.rows, numOfOutput); + if (ret != TSDB_CODE_SUCCESS) { + T_LONG_JMP(pTaskInfo->env, ret); + } doCloseWindow(pResultRowInfo, pInfo, pResult); @@ -838,8 +846,11 @@ static bool hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul } #endif updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &nextWin, 1); - applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, - pBlock->info.rows, numOfOutput); + ret = applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, + pBlock->info.rows, numOfOutput); + if (ret != TSDB_CODE_SUCCESS) { + T_LONG_JMP(pTaskInfo->env, ret); + } doCloseWindow(pResultRowInfo, pInfo, pResult); } @@ -1031,8 +1042,11 @@ static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorI } updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &window, 0); - applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex, - pRowSup->numOfRows, pBlock->info.rows, numOfOutput); + ret = applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, + pRowSup->startRowIndex, pRowSup->numOfRows, pBlock->info.rows, numOfOutput); + if (ret != TSDB_CODE_SUCCESS) { + T_LONG_JMP(pTaskInfo->env, ret); + } // here we start a new session window doKeepNewWindowStartInfo(pRowSup, tsList, j, gid); @@ -1056,8 +1070,11 @@ static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorI } updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pRowSup->win, 0); - applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex, - pRowSup->numOfRows, pBlock->info.rows, numOfOutput); + ret = applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex, + pRowSup->numOfRows, pBlock->info.rows, numOfOutput); + if (ret != TSDB_CODE_SUCCESS) { + T_LONG_JMP(pTaskInfo->env, ret); + } } static int32_t openStateWindowAggOptr(SOperatorInfo* pOperator) { @@ -1209,6 +1226,9 @@ static void destroyStateWindowOperatorInfo(void* param) { SStateWindowOperatorInfo* pInfo = (SStateWindowOperatorInfo*)param; cleanupBasicInfo(&pInfo->binfo); taosMemoryFreeClear(pInfo->stateKey.pData); + cleanupResultInfo(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, pInfo->aggSup.pResultBuf, + &pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable); + pInfo->pOperator = NULL; cleanupExprSupp(&pInfo->scalarSup); colDataDestroy(&pInfo->twAggSup.timeWindowData); cleanupAggSup(&pInfo->aggSup); @@ -1228,6 +1248,9 @@ void destroyIntervalOperatorInfo(void* param) { } SIntervalAggOperatorInfo* pInfo = (SIntervalAggOperatorInfo*)param; cleanupBasicInfo(&pInfo->binfo); + cleanupResultInfo(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, pInfo->aggSup.pResultBuf, + &pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable); + pInfo->pOperator = NULL; cleanupAggSup(&pInfo->aggSup); cleanupExprSupp(&pInfo->scalarSupp); @@ -1415,6 +1438,7 @@ int32_t createIntervalOperatorInfo(SOperatorInfo* downstream, SIntervalPhysiNode } } + pInfo->pOperator = pOperator; initResultRowInfo(&pInfo->binfo.resultRowInfo); setOperatorInfo(pOperator, "TimeIntervalAggOperator", QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL, true, OP_NOT_OPENED, pInfo, pTaskInfo); @@ -1492,8 +1516,11 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSessionAggOperator // pInfo->numOfRows data belong to the current session window updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &window, 0); - applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex, - pRowSup->numOfRows, pBlock->info.rows, numOfOutput); + ret = applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex, + pRowSup->numOfRows, pBlock->info.rows, numOfOutput); + if (ret != TSDB_CODE_SUCCESS) { + T_LONG_JMP(pTaskInfo->env, ret); + } } // here we start a new session window @@ -1511,8 +1538,11 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSessionAggOperator } updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pRowSup->win, 0); - applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex, - pRowSup->numOfRows, pBlock->info.rows, numOfOutput); + ret = applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex, + pRowSup->numOfRows, pBlock->info.rows, numOfOutput); + if (ret != TSDB_CODE_SUCCESS) { + T_LONG_JMP(pTaskInfo->env, ret); + } } static int32_t doSessionWindowAggNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) { @@ -1685,7 +1715,7 @@ int32_t createStatewindowOperatorInfo(SOperatorInfo* downstream, SStateWinodwPhy QUERY_CHECK_CODE(code, lino, _error); pInfo->tsSlotId = tsSlotId; - + pInfo->pOperator = pOperator; setOperatorInfo(pOperator, "StateWindowOperator", QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE, true, OP_NOT_OPENED, pInfo, pTaskInfo); pOperator->fpSet = createOperatorFpSet(openStateWindowAggOptr, doStateWindowAggNext, NULL, destroyStateWindowOperatorInfo, @@ -1717,7 +1747,9 @@ void destroySWindowOperatorInfo(void* param) { cleanupBasicInfo(&pInfo->binfo); colDataDestroy(&pInfo->twAggSup.timeWindowData); - + cleanupResultInfo(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, pInfo->aggSup.pResultBuf, + &pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable); + pInfo->pOperator = NULL; cleanupAggSup(&pInfo->aggSup); cleanupExprSupp(&pInfo->scalarSupp); @@ -1784,6 +1816,7 @@ int32_t createSessionAggOperatorInfo(SOperatorInfo* downstream, SSessionWinodwPh code = filterInitFromNode((SNode*)pSessionNode->window.node.pConditions, &pOperator->exprSupp.pFilterInfo, 0); QUERY_CHECK_CODE(code, lino, _error); + pInfo->pOperator = pOperator; setOperatorInfo(pOperator, "SessionWindowAggOperator", QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION, true, OP_NOT_OPENED, pInfo, pTaskInfo); pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, doSessionWindowAggNext, NULL, destroySWindowOperatorInfo, @@ -1874,8 +1907,11 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR } updateTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &currWin, 1); - applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &iaInfo->twAggSup.timeWindowData, startPos, - currPos - startPos, pBlock->info.rows, pSup->numOfExprs); + ret = applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &iaInfo->twAggSup.timeWindowData, startPos, + currPos - startPos, pBlock->info.rows, pSup->numOfExprs); + if (ret != TSDB_CODE_SUCCESS) { + T_LONG_JMP(pTaskInfo->env, ret); + } finalizeResultRows(iaInfo->aggSup.pResultBuf, &pResultRowInfo->cur, pSup, pResultBlock, pTaskInfo); resetResultRow(miaInfo->pResultRow, iaInfo->aggSup.resultRowSize - sizeof(SResultRow)); @@ -1894,8 +1930,11 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR } updateTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &currWin, 1); - applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &iaInfo->twAggSup.timeWindowData, startPos, currPos - startPos, - pBlock->info.rows, pSup->numOfExprs); + ret = applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &iaInfo->twAggSup.timeWindowData, startPos, + currPos - startPos, pBlock->info.rows, pSup->numOfExprs); + if (ret != TSDB_CODE_SUCCESS) { + T_LONG_JMP(pTaskInfo->env, ret); + } } static void cleanupAfterGroupResultGen(SMergeAlignedIntervalAggOperatorInfo* pMiaInfo, SSDataBlock* pRes) { @@ -2094,6 +2133,7 @@ int32_t createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream, SMerge initResultRowInfo(&iaInfo->binfo.resultRowInfo); code = blockDataEnsureCapacity(iaInfo->binfo.pRes, pOperator->resultInfo.capacity); QUERY_CHECK_CODE(code, lino, _error); + iaInfo->pOperator = pOperator; setOperatorInfo(pOperator, "TimeMergeAlignedIntervalAggOperator", QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL, false, OP_NOT_OPENED, miaInfo, pTaskInfo); @@ -2223,8 +2263,11 @@ static void doMergeIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultRowInfo* } updateTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &win, 1); - applyAggFunctionOnPartialTuples(pTaskInfo, pExprSup->pCtx, &iaInfo->twAggSup.timeWindowData, startPos, forwardRows, + ret = applyAggFunctionOnPartialTuples(pTaskInfo, pExprSup->pCtx, &iaInfo->twAggSup.timeWindowData, startPos, forwardRows, pBlock->info.rows, numOfOutput); + if (ret != TSDB_CODE_SUCCESS) { + T_LONG_JMP(pTaskInfo->env, ret); + } doCloseWindow(pResultRowInfo, iaInfo, pResult); // output previous interval results after this interval (&win) is closed @@ -2262,8 +2305,11 @@ static void doMergeIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultRowInfo* } updateTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &nextWin, 1); - applyAggFunctionOnPartialTuples(pTaskInfo, pExprSup->pCtx, &iaInfo->twAggSup.timeWindowData, startPos, forwardRows, - pBlock->info.rows, numOfOutput); + code = applyAggFunctionOnPartialTuples(pTaskInfo, pExprSup->pCtx, &iaInfo->twAggSup.timeWindowData, startPos, + forwardRows, pBlock->info.rows, numOfOutput); + if (code != TSDB_CODE_SUCCESS) { + T_LONG_JMP(pTaskInfo->env, code); + } doCloseWindow(pResultRowInfo, iaInfo, pResult); // output previous interval results after this interval (&nextWin) is closed @@ -2427,6 +2473,7 @@ int32_t createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SMergeInterva } } + pIntervalInfo->pOperator = pOperator; initResultRowInfo(&pIntervalInfo->binfo.resultRowInfo); setOperatorInfo(pOperator, "TimeMergeIntervalAggOperator", QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL, false, OP_NOT_OPENED, pMergeIntervalInfo, pTaskInfo); diff --git a/source/libs/function/inc/builtinsimpl.h b/source/libs/function/inc/builtinsimpl.h index 41e2cadace..0b2fb70eba 100644 --- a/source/libs/function/inc/builtinsimpl.h +++ b/source/libs/function/inc/builtinsimpl.h @@ -122,6 +122,7 @@ bool getPercentileFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv); int32_t percentileFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultInfo); int32_t percentileFunction(SqlFunctionCtx* pCtx); int32_t percentileFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock); +void percentileFunctionCleanupExt(SqlFunctionCtx* pCtx); bool getApercentileFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv); int32_t apercentileFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultInfo); diff --git a/source/libs/function/inc/tpercentile.h b/source/libs/function/inc/tpercentile.h index 1b80c2b1da..09df42d3a3 100644 --- a/source/libs/function/inc/tpercentile.h +++ b/source/libs/function/inc/tpercentile.h @@ -69,7 +69,7 @@ typedef struct tMemBucket { int32_t tMemBucketCreate(int32_t nElemSize, int16_t dataType, double minval, double maxval, bool hasWindowOrGroup, tMemBucket **pBucket); -void tMemBucketDestroy(tMemBucket *pBucket); +void tMemBucketDestroy(tMemBucket **pBucket); int32_t tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size); diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index 976d15e7d8..a43ae835ff 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -3115,6 +3115,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .processFunc = percentileFunction, .sprocessFunc = percentileScalarFunction, .finalizeFunc = percentileFinalize, + .cleanupFunc = percentileFunctionCleanupExt, #ifdef BUILD_NO_CALL .invertFunc = NULL, #endif diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 7761ec8aa5..194b68830b 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -2009,6 +2009,17 @@ int32_t percentileFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResu return TSDB_CODE_SUCCESS; } +void percentileFunctionCleanupExt(SqlFunctionCtx* pCtx) { + if (pCtx == NULL || GET_RES_INFO(pCtx) == NULL || GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)) == NULL) { + return; + } + SPercentileInfo* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); + if (pInfo->pMemBucket != NULL) { + tMemBucketDestroy(&(pInfo->pMemBucket)); + pInfo->pMemBucket = NULL; + } +} + int32_t percentileFunction(SqlFunctionCtx* pCtx) { int32_t code = TSDB_CODE_SUCCESS; int32_t numOfElems = 0; @@ -2095,7 +2106,7 @@ int32_t percentileFunction(SqlFunctionCtx* pCtx) { numOfElems += 1; code = tMemBucketPut(pInfo->pMemBucket, data, 1); if (code != TSDB_CODE_SUCCESS) { - tMemBucketDestroy(pInfo->pMemBucket); + tMemBucketDestroy(&(pInfo->pMemBucket)); return code; } } @@ -2103,6 +2114,7 @@ int32_t percentileFunction(SqlFunctionCtx* pCtx) { SET_VAL(pResInfo, numOfElems, 1); } + pCtx->needCleanup = true; return TSDB_CODE_SUCCESS; } @@ -2113,8 +2125,8 @@ int32_t percentileFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { int32_t code = 0; double v = 0; - tMemBucket* pMemBucket = ppInfo->pMemBucket; - if (pMemBucket != NULL && pMemBucket->total > 0) { // check for null + tMemBucket** pMemBucket = &ppInfo->pMemBucket; + if ((*pMemBucket) != NULL && (*pMemBucket)->total > 0) { // check for null if (pCtx->numOfParams > 2) { char buf[3200] = {0}; // max length of double num is 317, e.g. use %.6lf to print -1.0e+308, consider the comma and bracket, 3200 is enough. @@ -2126,7 +2138,7 @@ int32_t percentileFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { GET_TYPED_DATA(v, double, pVal->nType, &pVal->i); - code = getPercentile(pMemBucket, v, &ppInfo->result); + code = getPercentile((*pMemBucket), v, &ppInfo->result); if (code != TSDB_CODE_SUCCESS) { goto _fin_error; } @@ -2158,7 +2170,7 @@ int32_t percentileFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { GET_TYPED_DATA(v, double, pVal->nType, &pVal->i); - code = getPercentile(pMemBucket, v, &ppInfo->result); + code = getPercentile((*pMemBucket), v, &ppInfo->result); if (code != TSDB_CODE_SUCCESS) { goto _fin_error; } @@ -6067,7 +6079,7 @@ int32_t modeFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResInfo) { pInfo->pHash = NULL; return terrno; } - + pCtx->needCleanup = true; return TSDB_CODE_SUCCESS; } diff --git a/source/libs/function/src/tpercentile.c b/source/libs/function/src/tpercentile.c index 92a7c0d669..29c48460c0 100644 --- a/source/libs/function/src/tpercentile.c +++ b/source/libs/function/src/tpercentile.c @@ -291,12 +291,12 @@ int32_t tMemBucketCreate(int32_t nElemSize, int16_t dataType, double minval, dou (*pBucket)->maxCapacity = 200000; (*pBucket)->groupPagesMap = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK); if ((*pBucket)->groupPagesMap == NULL) { - tMemBucketDestroy(*pBucket); + tMemBucketDestroy(pBucket); return terrno; } if (setBoundingBox(&(*pBucket)->range, (*pBucket)->type, minval, maxval) != 0) { // qError("MemBucket:%p, invalid value range: %f-%f", pBucket, minval, maxval); - tMemBucketDestroy(*pBucket); + tMemBucketDestroy(pBucket); return TSDB_CODE_FUNC_INVALID_VALUE_RANGE; } @@ -306,13 +306,13 @@ int32_t tMemBucketCreate(int32_t nElemSize, int16_t dataType, double minval, dou (*pBucket)->hashFunc = getHashFunc((*pBucket)->type); if ((*pBucket)->hashFunc == NULL) { // qError("MemBucket:%p, not support data type %d, failed", pBucket, pBucket->type); - tMemBucketDestroy(*pBucket); + tMemBucketDestroy(pBucket); return TSDB_CODE_FUNC_FUNTION_PARA_TYPE; } (*pBucket)->pSlots = (tMemBucketSlot *)taosMemoryCalloc((*pBucket)->numOfSlots, sizeof(tMemBucketSlot)); if ((*pBucket)->pSlots == NULL) { - tMemBucketDestroy(*pBucket); + tMemBucketDestroy(pBucket); return terrno; } @@ -320,13 +320,13 @@ int32_t tMemBucketCreate(int32_t nElemSize, int16_t dataType, double minval, dou if (!osTempSpaceAvailable()) { // qError("MemBucket create disk based Buf failed since %s", terrstr(terrno)); - tMemBucketDestroy(*pBucket); + tMemBucketDestroy(pBucket); return TSDB_CODE_NO_DISKSPACE; } int32_t ret = createDiskbasedBuf(&(*pBucket)->pBuffer, (*pBucket)->bufPageSize, (*pBucket)->bufPageSize * DEFAULT_NUM_OF_SLOT * 4, "1", tsTempDir); if (ret != 0) { - tMemBucketDestroy(*pBucket); + tMemBucketDestroy(pBucket); return ret; } @@ -334,22 +334,22 @@ int32_t tMemBucketCreate(int32_t nElemSize, int16_t dataType, double minval, dou return TSDB_CODE_SUCCESS; } -void tMemBucketDestroy(tMemBucket *pBucket) { - if (pBucket == NULL) { +void tMemBucketDestroy(tMemBucket **pBucket) { + if (*pBucket == NULL) { return; } - void *p = taosHashIterate(pBucket->groupPagesMap, NULL); + void *p = taosHashIterate((*pBucket)->groupPagesMap, NULL); while (p) { SArray **p1 = p; - p = taosHashIterate(pBucket->groupPagesMap, p); + p = taosHashIterate((*pBucket)->groupPagesMap, p); taosArrayDestroy(*p1); } - destroyDiskbasedBuf(pBucket->pBuffer); - taosMemoryFreeClear(pBucket->pSlots); - taosHashCleanup(pBucket->groupPagesMap); - taosMemoryFreeClear(pBucket); + destroyDiskbasedBuf((*pBucket)->pBuffer); + taosMemoryFreeClear((*pBucket)->pSlots); + taosHashCleanup((*pBucket)->groupPagesMap); + taosMemoryFreeClear(*pBucket); } int32_t tMemBucketUpdateBoundingBox(MinMaxEntry *r, const char *data, int32_t dataType) { diff --git a/source/libs/monitorfw/inc/taos_metric_formatter_i.h b/source/libs/monitorfw/inc/taos_metric_formatter_i.h index 54e683fa91..2c891dae95 100644 --- a/source/libs/monitorfw/inc/taos_metric_formatter_i.h +++ b/source/libs/monitorfw/inc/taos_metric_formatter_i.h @@ -30,7 +30,7 @@ taos_metric_formatter_t *taos_metric_formatter_new(); /** * @brief API PRIVATE taos_metric_formatter destructor */ -int taos_metric_formatter_destroy(taos_metric_formatter_t *self); +void taos_metric_formatter_destroy(taos_metric_formatter_t *self); /** * @brief API PRIVATE Loads the help text diff --git a/source/libs/monitorfw/inc/taos_string_builder_i.h b/source/libs/monitorfw/inc/taos_string_builder_i.h index 933d778691..02ca0a0900 100644 --- a/source/libs/monitorfw/inc/taos_string_builder_i.h +++ b/source/libs/monitorfw/inc/taos_string_builder_i.h @@ -31,7 +31,7 @@ taos_string_builder_t *taos_string_builder_new(void); * API PRIVATE * @brief Destroys a taos_string_builder* */ -int taos_string_builder_destroy(taos_string_builder_t *self); +void taos_string_builder_destroy(taos_string_builder_t *self); /** * API PRIVATE diff --git a/source/libs/monitorfw/inc/taos_test.h b/source/libs/monitorfw/inc/taos_test.h index b881b2cee2..11e145a7fa 100644 --- a/source/libs/monitorfw/inc/taos_test.h +++ b/source/libs/monitorfw/inc/taos_test.h @@ -23,6 +23,11 @@ if (!(i)) return 1; #define TAOS_TEST_PARA_NULL(i) \ if (!(i)) return NULL; +#define TAOS_TEST_PARA_VOID(i) \ + if (!(i)) { \ + TAOS_LOG("parameter is NULL"); \ + return; \ + } #endif // TAOS_TEST #endif // TAOS_TEST_H diff --git a/source/libs/monitorfw/src/taos_collector.c b/source/libs/monitorfw/src/taos_collector.c index 21ebb3f737..a7ea9f0f61 100644 --- a/source/libs/monitorfw/src/taos_collector.c +++ b/source/libs/monitorfw/src/taos_collector.c @@ -39,18 +39,24 @@ taos_collector_t *taos_collector_new(const char *name) { self->name = taos_strdup(name); self->metrics = taos_map_new(); if (self->metrics == NULL) { - (void)taos_collector_destroy(self); + if (taos_collector_destroy(self) != 0) { + TAOS_LOG("taos_collector_destroy failed"); + } return NULL; } r = taos_map_set_free_value_fn(self->metrics, &taos_metric_free_generic); if (r) { - (void)taos_collector_destroy(self); + if (taos_collector_destroy(self) != 0) { + TAOS_LOG("taos_collector_destroy failed"); + } return NULL; } self->collect_fn = &taos_collector_default_collect; self->string_builder = taos_string_builder_new(); if (self->string_builder == NULL) { - (void)taos_collector_destroy(self); + if (taos_collector_destroy(self) != 0) { + TAOS_LOG("taos_collector_destroy failed"); + } return NULL; } self->proc_limits_file_path = NULL; @@ -70,8 +76,7 @@ int taos_collector_destroy(taos_collector_t *self) { self->metrics = NULL; if(self->string_builder != NULL){ - r = taos_string_builder_destroy(self->string_builder); - if (r) ret = r; + taos_string_builder_destroy(self->string_builder); self->string_builder = NULL; } @@ -93,7 +98,9 @@ int taos_collector_destroy_generic(void *gen) { void taos_collector_free_generic(void *gen) { taos_collector_t *self = (taos_collector_t *)gen; - (void)taos_collector_destroy(self); + if (taos_collector_destroy(self) != 0) { + TAOS_LOG("taos_collector_destroy failed"); + } } int taos_collector_set_collect_fn(taos_collector_t *self, taos_collect_fn *fn) { diff --git a/source/libs/monitorfw/src/taos_collector_registry.c b/source/libs/monitorfw/src/taos_collector_registry.c index 94295bf9c0..bfdbf92156 100644 --- a/source/libs/monitorfw/src/taos_collector_registry.c +++ b/source/libs/monitorfw/src/taos_collector_registry.c @@ -50,7 +50,7 @@ taos_collector_registry_t *taos_collector_registry_new(const char *name) { self->name = taos_strdup(name); self->collectors = taos_map_new(); - (void)taos_map_set_free_value_fn(self->collectors, &taos_collector_free_generic); + if (taos_map_set_free_value_fn(self->collectors, &taos_collector_free_generic) != 0) return NULL; if (taos_map_set(self->collectors, "default", taos_collector_new("default")) != 0) return NULL; self->metric_formatter = taos_metric_formatter_new(); @@ -86,17 +86,14 @@ int taos_collector_registry_destroy(taos_collector_registry_t *self) { self->collectors = NULL; if (r) ret = r; - r = taos_metric_formatter_destroy(self->metric_formatter); + taos_metric_formatter_destroy(self->metric_formatter); self->metric_formatter = NULL; - if (r) ret = r; - r = taos_string_builder_destroy(self->string_builder); + taos_string_builder_destroy(self->string_builder); self->string_builder = NULL; - if (r) ret = r; - r = taos_string_builder_destroy(self->string_builder_batch); + taos_string_builder_destroy(self->string_builder_batch); self->string_builder_batch = NULL; - if (r) ret = r; r = pthread_rwlock_destroy(self->lock); taos_free(self->lock); @@ -241,13 +238,25 @@ const char *taos_collector_registry_bridge_new(taos_collector_registry_t *self, SJson* pJson = tjsonCreateArray(); SJson* item = tjsonCreateObject(); - (void)tjsonAddItemToArray(pJson, item); - (void)tjsonAddStringToObject(item, "ts", ts); - (void)tjsonAddDoubleToObject(item, "protocol", 2); - SJson* array = tjsonCreateArray(); - (void)tjsonAddItemToObject(item, "tables", array); + if (tjsonAddItemToArray(pJson, item) != 0) { + tjsonDelete(pJson); + return NULL; + } + if (tjsonAddStringToObject(item, "ts", ts) != 0) { + tjsonDelete(pJson); + return NULL; + } + if (tjsonAddDoubleToObject(item, "protocol", 2) != 0) { + tjsonDelete(pJson); + return NULL; + } + SJson *array = tjsonCreateArray(); + if (tjsonAddItemToObject(item, "tables", array) != 0) { + tjsonDelete(pJson); + return NULL; + } - if(taos_metric_formatter_load_metrics_new(self->metric_formatter, self->collectors, ts, format, array) != 0){ + if (taos_metric_formatter_load_metrics_new(self->metric_formatter, self->collectors, ts, format, array) != 0) { TAOS_LOG("failed to load metrics"); tjsonDelete(pJson); return NULL; @@ -294,9 +303,8 @@ const char *taos_collector_registry_bridge_new(taos_collector_registry_t *self, r = taos_string_builder_clear(tmp_builder); if (r) goto _OVER;; - r = taos_string_builder_destroy(tmp_builder); + taos_string_builder_destroy(tmp_builder); tmp_builder = NULL; - if (r) goto _OVER;; tjsonDelete(pJson); return data; @@ -304,7 +312,7 @@ const char *taos_collector_registry_bridge_new(taos_collector_registry_t *self, _OVER: tjsonDelete(pJson); if(tmp_builder != NULL){ - (void)taos_string_builder_destroy(tmp_builder); + taos_string_builder_destroy(tmp_builder); } return NULL; diff --git a/source/libs/monitorfw/src/taos_map.c b/source/libs/monitorfw/src/taos_map.c index 2f5bf566c2..bf1b85c534 100644 --- a/source/libs/monitorfw/src/taos_map.c +++ b/source/libs/monitorfw/src/taos_map.c @@ -45,21 +45,19 @@ taos_map_node_t *taos_map_node_new(const char *key, void *value, taos_map_node_f return self; } -int taos_map_node_destroy(taos_map_node_t *self) { - TAOS_TEST_PARA(self != NULL); - if (self == NULL) return 0; +void taos_map_node_destroy(taos_map_node_t *self) { + TAOS_TEST_PARA_VOID(self != NULL); taos_free((void *)self->key); self->key = NULL; if (self->value != NULL) (*self->free_value_fn)(self->value); self->value = NULL; taos_free(self); self = NULL; - return 0; } void taos_map_node_free(void *item) { taos_map_node_t *map_node = (taos_map_node_t *)item; - (void)taos_map_node_destroy(map_node); + taos_map_node_destroy(map_node); } taos_linked_list_compare_t taos_map_node_compare(void *item_a, void *item_b) { @@ -87,7 +85,9 @@ taos_map_t *taos_map_new() { // we will only have to deallocate each key once. That will happen on taos_map_node_destroy. r = taos_linked_list_set_free_fn(self->keys, taos_linked_list_no_op_free); if (r) { - (void)taos_map_destroy(self); + if (taos_map_destroy(self) != 0) { + TAOS_LOG("TAOS_MAP_DESTROY_ERROR"); + } return NULL; } @@ -98,12 +98,16 @@ taos_map_t *taos_map_new() { self->addrs[i] = taos_linked_list_new(); r = taos_linked_list_set_free_fn(self->addrs[i], taos_map_node_free); if (r) { - (void)taos_map_destroy(self); + if (taos_map_destroy(self) != 0) { + TAOS_LOG("TAOS_MAP_DESTROY_ERROR"); + } return NULL; } r = taos_linked_list_set_compare_fn(self->addrs[i], taos_map_node_compare); if (r) { - (void)taos_map_destroy(self); + if (taos_map_destroy(self) != 0) { + TAOS_LOG("TAOS_MAP_DESTROY_ERROR"); + } return NULL; } } @@ -112,7 +116,9 @@ taos_map_t *taos_map_new() { r = pthread_rwlock_init(self->rwlock, NULL); if (r) { TAOS_LOG(TAOS_PTHREAD_RWLOCK_INIT_ERROR); - (void)taos_map_destroy(self); + if (taos_map_destroy(self) != 0) { + TAOS_LOG("TAOS_MAP_DESTROY_ERROR"); + } return NULL; } @@ -188,12 +194,12 @@ static void *taos_map_get_internal(const char *key, size_t *size, size_t *max_si taos_map_node_t *current_map_node = (taos_map_node_t *)current_node->item; taos_linked_list_compare_t result = taos_linked_list_compare(list, current_map_node, temp_map_node); if (result == TAOS_EQUAL) { - (void)taos_map_node_destroy(temp_map_node); + taos_map_node_destroy(temp_map_node); temp_map_node = NULL; return current_map_node->value; } } - (void)taos_map_node_destroy(temp_map_node); + taos_map_node_destroy(temp_map_node); temp_map_node = NULL; return NULL; } @@ -388,7 +394,7 @@ static int taos_map_delete_internal(const char *key, size_t *size, size_t *max_s break; } } - r = taos_map_node_destroy(temp_map_node); + taos_map_node_destroy(temp_map_node); temp_map_node = NULL; return r; } diff --git a/source/libs/monitorfw/src/taos_metric.c b/source/libs/monitorfw/src/taos_metric.c index 42564437d0..9d55680c4d 100644 --- a/source/libs/monitorfw/src/taos_metric.c +++ b/source/libs/monitorfw/src/taos_metric.c @@ -49,12 +49,12 @@ taos_metric_t *taos_metric_new(taos_metric_type_t metric_type, const char *name, for (int i = 0; i < label_key_count; i++) { if (strcmp(label_keys[i], "le") == 0) { TAOS_LOG(TAOS_METRIC_INVALID_LABEL_NAME); - (void)taos_metric_destroy(self); + if (taos_metric_destroy(self) != 0) return NULL; return NULL; } if (strcmp(label_keys[i], "quantile") == 0) { TAOS_LOG(TAOS_METRIC_INVALID_LABEL_NAME); - (void)taos_metric_destroy(self); + if (taos_metric_destroy(self) != 0) return NULL; return NULL; } k[i] = taos_strdup(label_keys[i]); @@ -68,14 +68,14 @@ taos_metric_t *taos_metric_new(taos_metric_type_t metric_type, const char *name, } else { r = taos_map_set_free_value_fn(self->samples, &taos_metric_sample_free_generic); if (r) { - (void)taos_metric_destroy(self); + if (taos_metric_destroy(self) != 0) return NULL; return NULL; } } self->formatter = taos_metric_formatter_new(); if (self->formatter == NULL) { - (void)taos_metric_destroy(self); + if (taos_metric_destroy(self) != 0) return NULL; return NULL; } self->rwlock = (pthread_rwlock_t *)taos_malloc(sizeof(pthread_rwlock_t)); @@ -101,9 +101,8 @@ int taos_metric_destroy(taos_metric_t *self) { if (r) ret = r; } - r = taos_metric_formatter_destroy(self->formatter); + taos_metric_formatter_destroy(self->formatter); self->formatter = NULL; - if (r) ret = r; r = pthread_rwlock_destroy(self->rwlock); if (r) { @@ -140,7 +139,9 @@ int taos_metric_destroy_generic(void *item) { void taos_metric_free_generic(void *item) { taos_metric_t *self = (taos_metric_t *)item; - (void)taos_metric_destroy(self); + if (taos_metric_destroy(self) != 0) { + TAOS_LOG("taos_metric_destroy failed"); + } } taos_metric_sample_t *taos_metric_sample_from_labels(taos_metric_t *self, const char **label_values) { diff --git a/source/libs/monitorfw/src/taos_metric_formatter.c b/source/libs/monitorfw/src/taos_metric_formatter.c index cb1edd30b6..31796c20df 100644 --- a/source/libs/monitorfw/src/taos_metric_formatter.c +++ b/source/libs/monitorfw/src/taos_metric_formatter.c @@ -22,6 +22,7 @@ // Private #include "taos_collector_t.h" #include "taos_linked_list_t.h" +#include "taos_log.h" #include "taos_map_i.h" #include "taos_metric_formatter_i.h" #include "taos_metric_sample_t.h" @@ -33,35 +34,28 @@ taos_metric_formatter_t *taos_metric_formatter_new() { taos_metric_formatter_t *self = (taos_metric_formatter_t *)taos_malloc(sizeof(taos_metric_formatter_t)); self->string_builder = taos_string_builder_new(); if (self->string_builder == NULL) { - (void)taos_metric_formatter_destroy(self); + taos_metric_formatter_destroy(self); return NULL; } self->err_builder = taos_string_builder_new(); if (self->err_builder == NULL) { - (void)taos_metric_formatter_destroy(self); + taos_metric_formatter_destroy(self); return NULL; } return self; } -int taos_metric_formatter_destroy(taos_metric_formatter_t *self) { - TAOS_TEST_PARA(self != NULL); - if (self == NULL) return 0; +void taos_metric_formatter_destroy(taos_metric_formatter_t *self) { + TAOS_TEST_PARA_VOID(self != NULL); - int r = 0; - int ret = 0; - - r = taos_string_builder_destroy(self->string_builder); + taos_string_builder_destroy(self->string_builder); self->string_builder = NULL; - if (r) ret = r; - r = taos_string_builder_destroy(self->err_builder); + taos_string_builder_destroy(self->err_builder); self->err_builder = NULL; - if (r) ret = r; taos_free(self); self = NULL; - return ret; } /* int taos_metric_formatter_load_help(taos_metric_formatter_t *self, const char *name, const char *help) { diff --git a/source/libs/monitorfw/src/taos_metric_sample.c b/source/libs/monitorfw/src/taos_metric_sample.c index e4b41d5475..ca41cf1a83 100644 --- a/source/libs/monitorfw/src/taos_metric_sample.c +++ b/source/libs/monitorfw/src/taos_metric_sample.c @@ -44,7 +44,6 @@ taos_metric_sample_t *taos_metric_sample_new(taos_metric_type_t type, const char int taos_metric_sample_destroy(taos_metric_sample_t *self) { TAOS_TEST_PARA(self != NULL); - if (self == NULL) return 0; taos_free((void *)self->l_value); self->l_value = NULL; taos_free((void *)self); @@ -63,7 +62,9 @@ int taos_metric_sample_destroy_generic(void *gen) { void taos_metric_sample_free_generic(void *gen) { taos_metric_sample_t *self = (taos_metric_sample_t *)gen; - (void)taos_metric_sample_destroy(self); + if(taos_metric_sample_destroy(self) != 0) { + TAOS_LOG(TAOS_METRIC_SAMPLE_DESTROY_ERROR); + } } int taos_metric_sample_add(taos_metric_sample_t *self, double r_value) { diff --git a/source/libs/monitorfw/src/taos_monitor_util.c b/source/libs/monitorfw/src/taos_monitor_util.c index 2285ed9e71..06ae4993c5 100644 --- a/source/libs/monitorfw/src/taos_monitor_util.c +++ b/source/libs/monitorfw/src/taos_monitor_util.c @@ -84,10 +84,10 @@ bool taos_monitor_is_match(const SJson* tags, char** pairs, int32_t count) { SJson* item = tjsonGetArrayItem(tags, i); char item_name[MONITOR_TAG_NAME_LEN] = {0}; - (void)tjsonGetStringValue(item, "name", item_name); + if (tjsonGetStringValue(item, "name", item_name) != 0) return false; char item_value[MONITOR_TAG_VALUE_LEN] = {0}; - (void)tjsonGetStringValue(item, "value", item_value); + if (tjsonGetStringValue(item, "value", item_value) != 0) return false; bool isfound = false; for(int32_t j = 0; j < count; j++){ diff --git a/source/libs/monitorfw/src/taos_string_builder.c b/source/libs/monitorfw/src/taos_string_builder.c index 6e3fe1d2e3..b1a5a098bc 100644 --- a/source/libs/monitorfw/src/taos_string_builder.c +++ b/source/libs/monitorfw/src/taos_string_builder.c @@ -20,6 +20,7 @@ #include "taos_alloc.h" // Private +#include "taos_log.h" #include "taos_string_builder_i.h" #include "taos_string_builder_t.h" #include "taos_test.h" @@ -44,7 +45,7 @@ taos_string_builder_t *taos_string_builder_new(void) { self->init_size = TAOS_STRING_BUILDER_INIT_SIZE; r = taos_string_builder_init(self); if (r) { - (void)taos_string_builder_destroy(self); + taos_string_builder_destroy(self); return NULL; } @@ -61,14 +62,12 @@ int taos_string_builder_init(taos_string_builder_t *self) { return 0; } -int taos_string_builder_destroy(taos_string_builder_t *self) { - TAOS_TEST_PARA(self != NULL); - if (self == NULL) return 0; +void taos_string_builder_destroy(taos_string_builder_t *self) { + TAOS_TEST_PARA_VOID(self != NULL); taos_free(self->str); self->str = NULL; taos_free(self); self = NULL; - return 0; } /** diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 0a871967cf..fe3e9b10da 100755 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -10599,6 +10599,10 @@ static int32_t checkStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStm } } + if (NULL != pSelect->pGroupByList) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "Unsupported Group by"); + } + return TSDB_CODE_SUCCESS; } diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 4c51026640..44c9e76906 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -476,23 +476,14 @@ _err: if (pMeta->pTasksMap) taosHashCleanup(pMeta->pTasksMap); if (pMeta->pTaskList) taosArrayDestroy(pMeta->pTaskList); if (pMeta->pTaskDb) { - int32_t ret = tdbTbClose(pMeta->pTaskDb); - if (ret) { - stError("vgId:%d tdb failed close task db, code:%s", pMeta->vgId, tstrerror(ret)); - } + tdbTbClose(pMeta->pTaskDb); pMeta->pTaskDb = NULL; } if (pMeta->pCheckpointDb) { - int32_t ret = tdbTbClose(pMeta->pCheckpointDb); - if (ret) { - stError("vgId:%d tdb failed close task checkpointDb, code:%s", pMeta->vgId, tstrerror(ret)); - } + tdbTbClose(pMeta->pCheckpointDb); } if (pMeta->db) { - int32_t ret = tdbClose(pMeta->db); - if (ret) { - stError("vgId:%d tdb failed close meta db, code:%s", pMeta->vgId, tstrerror(ret)); - } + tdbClose(pMeta->db); } if (pMeta->pHbInfo) taosMemoryFreeClear(pMeta->pHbInfo); @@ -597,22 +588,10 @@ void streamMetaCloseImpl(void* arg) { streamMetaWUnLock(pMeta); // already log the error, ignore here - code = tdbAbort(pMeta->db, pMeta->txn); - if (code) { - stError("vgId:%d failed to jump of trans for tdb, code:%s", vgId, tstrerror(code)); - } - code = tdbTbClose(pMeta->pTaskDb); - if (code) { - stError("vgId:%d failed to close taskDb, code:%s", vgId, tstrerror(code)); - } - code = tdbTbClose(pMeta->pCheckpointDb); - if (code) { - stError("vgId:%d failed to close checkpointDb, code:%s", vgId, tstrerror(code)); - } - code = tdbClose(pMeta->db); - if (code) { - stError("vgId:%d failed to close db, code:%s", vgId, tstrerror(code)); - } + tdbAbort(pMeta->db, pMeta->txn); + tdbTbClose(pMeta->pTaskDb); + tdbTbClose(pMeta->pCheckpointDb); + tdbClose(pMeta->db); taosArrayDestroy(pMeta->pTaskList); taosArrayDestroy(pMeta->chkpSaved); @@ -895,7 +874,7 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t stError("vgId:%d failed to remove task:0x%" PRIx64 ", code:%s", pMeta->vgId, id.taskId, tstrerror(code)); } - int32_t size = (int32_t) taosHashGetSize(pMeta->pTasksMap); + int32_t size = (int32_t)taosHashGetSize(pMeta->pTasksMap); int32_t sizeInList = taosArrayGetSize(pMeta->pTaskList); if (sizeInList != size) { stError("vgId:%d tasks number not consistent in list:%d and map:%d, ", vgId, sizeInList, size); @@ -1077,7 +1056,7 @@ void streamMetaLoadAllTasks(SStreamMeta* pMeta) { tFreeStreamTask(pTask); STaskId id = streamTaskGetTaskId(pTask); - void* px = taosArrayPush(pRecycleList, &id); + void* px = taosArrayPush(pRecycleList, &id); if (px == NULL) { stError("s-task:0x%x failed record the task into recycle list due to out of memory", taskId); } diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c index 536ff3e075..71a2ed3e4a 100644 --- a/source/libs/stream/src/streamTask.c +++ b/source/libs/stream/src/streamTask.c @@ -547,7 +547,7 @@ int32_t streamTaskGetNumOfUpstream(const SStreamTask* pTask) { return taosArrayG int32_t streamTaskSetUpstreamInfo(SStreamTask* pTask, const SStreamTask* pUpstreamTask) { SStreamUpstreamEpInfo* pEpInfo = createStreamTaskEpInfo(pUpstreamTask); if (pEpInfo == NULL) { - return TSDB_CODE_OUT_OF_MEMORY; + return terrno; } if (pTask->upstreamInfo.pList == NULL) { diff --git a/source/libs/sync/src/syncAppendEntries.c b/source/libs/sync/src/syncAppendEntries.c index f5f6679c5e..e3f94c1c9a 100644 --- a/source/libs/sync/src/syncAppendEntries.c +++ b/source/libs/sync/src/syncAppendEntries.c @@ -175,7 +175,8 @@ _SEND_RESPONSE: if (accepted && matched) { pReply->success = true; // update commit index only after matching - (void)syncNodeUpdateCommitIndex(ths, TMIN(pMsg->commitIndex, pReply->lastSendIndex)); + SyncIndex returnIndex = syncNodeUpdateCommitIndex(ths, TMIN(pMsg->commitIndex, pReply->lastSendIndex)); + sTrace("vgId:%d, update commit return index %" PRId64 "", ths->vgId, returnIndex); } // ack, i.e. send response diff --git a/source/libs/sync/src/syncCommit.c b/source/libs/sync/src/syncCommit.c index 5054339e8e..b3560f9283 100644 --- a/source/libs/sync/src/syncCommit.c +++ b/source/libs/sync/src/syncCommit.c @@ -85,10 +85,9 @@ int64_t syncNodeCheckCommitIndex(SSyncNode* ths, SyncIndex indexLikely) { int32_t code = 0; if (indexLikely > ths->commitIndex && syncNodeAgreedUpon(ths, indexLikely)) { SyncIndex commitIndex = indexLikely; - // TODO add return when error - (void)syncNodeUpdateCommitIndex(ths, commitIndex); - sTrace("vgId:%d, agreed upon. role:%d, term:%" PRId64 ", index:%" PRId64 "", ths->vgId, ths->state, - raftStoreGetTerm(ths), commitIndex); + SyncIndex returnIndex = syncNodeUpdateCommitIndex(ths, commitIndex); + sTrace("vgId:%d, agreed upon. role:%d, term:%" PRId64 ", index:%" PRId64 ", return:%" PRId64, ths->vgId, ths->state, + raftStoreGetTerm(ths), commitIndex, returnIndex); } return ths->commitIndex; } diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index eb7501981b..1a90eb8ab0 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -201,13 +201,13 @@ int32_t syncReconfig(int64_t rid, SSyncCfg* pNewCfg) { if (pSyncNode->state == TAOS_SYNC_STATE_LEADER || pSyncNode->state == TAOS_SYNC_STATE_ASSIGNED_LEADER) { // TODO check return value - (void)syncNodeStopHeartbeatTimer(pSyncNode); + TAOS_CHECK_RETURN(syncNodeStopHeartbeatTimer(pSyncNode)); for (int32_t i = 0; i < TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA; ++i) { - (void)syncHbTimerInit(pSyncNode, &pSyncNode->peerHeartbeatTimerArr[i], pSyncNode->replicasId[i]); + TAOS_CHECK_RETURN(syncHbTimerInit(pSyncNode, &pSyncNode->peerHeartbeatTimerArr[i], pSyncNode->replicasId[i])); } - (void)syncNodeStartHeartbeatTimer(pSyncNode); + TAOS_CHECK_RETURN(syncNodeStartHeartbeatTimer(pSyncNode)); // syncNodeReplicate(pSyncNode); } @@ -410,9 +410,8 @@ int32_t syncSendTimeoutRsp(int64_t rid, int64_t seq) { syncNodeRelease(pNode); if (ret == 1) { sInfo("send timeout response, seq:%" PRId64 " handle:%p ahandle:%p", seq, rpcMsg.info.handle, rpcMsg.info.ahandle); - // TODO check return value - (void)rpcSendResponse(&rpcMsg); - return 0; + code = rpcSendResponse(&rpcMsg); + return code; } else { sError("no message handle to send timeout response, seq:%" PRId64, seq); return TSDB_CODE_SYN_INTERNAL_ERROR; @@ -933,7 +932,7 @@ int32_t syncNodePropose(SSyncNode* pSyncNode, SRpcMsg* pMsg, bool isWeak, int64_ int32_t code = syncBuildClientRequest(&rpcMsg, pMsg, seqNum, isWeak, pSyncNode->vgId); if (code != 0) { sError("vgId:%d, failed to propose msg while serialize since %s", pSyncNode->vgId, terrstr()); - (void)syncRespMgrDel(pSyncNode->pSyncRespMgr, seqNum); + code = syncRespMgrDel(pSyncNode->pSyncRespMgr, seqNum); TAOS_RETURN(code); } @@ -941,7 +940,7 @@ int32_t syncNodePropose(SSyncNode* pSyncNode, SRpcMsg* pMsg, bool isWeak, int64_ code = (*pSyncNode->syncEqMsg)(pSyncNode->msgcb, &rpcMsg); if (code != 0) { sWarn("vgId:%d, failed to propose msg while enqueue since %s", pSyncNode->vgId, terrstr()); - (void)syncRespMgrDel(pSyncNode->pSyncRespMgr, seqNum); + TAOS_CHECK_RETURN(syncRespMgrDel(pSyncNode->pSyncRespMgr, seqNum)); } if (seq != NULL) *seq = seqNum; @@ -961,7 +960,7 @@ static int32_t syncHbTimerInit(SSyncNode* pSyncNode, SSyncTimer* pSyncTimer, SRa } static int32_t syncHbTimerStart(SSyncNode* pSyncNode, SSyncTimer* pSyncTimer) { - int32_t ret = 0; + int32_t code = 0; int64_t tsNow = taosGetTimestampMs(); if (syncIsInit()) { SSyncHbTimerData* pData = syncHbTimerDataAcquire(pSyncTimer->hbDataRid); @@ -980,21 +979,20 @@ static int32_t syncHbTimerStart(SSyncNode* pSyncNode, SSyncTimer* pSyncTimer) { sTrace("vgId:%d, start hb timer, rid:%" PRId64 " addr:%" PRId64, pSyncNode->vgId, pData->rid, pData->destId.addr); - (void)taosTmrReset(pSyncTimer->timerCb, pSyncTimer->timerMS / HEARTBEAT_TICK_NUM, (void*)(pData->rid), - syncEnv()->pTimerManager, &pSyncTimer->pTimer); + TAOS_CHECK_RETURN(taosTmrReset(pSyncTimer->timerCb, pSyncTimer->timerMS / HEARTBEAT_TICK_NUM, (void*)(pData->rid), + syncEnv()->pTimerManager, &pSyncTimer->pTimer)); } else { - ret = TSDB_CODE_SYN_INTERNAL_ERROR; + code = TSDB_CODE_SYN_INTERNAL_ERROR; sError("vgId:%d, start ctrl hb timer error, sync env is stop", pSyncNode->vgId); } - return ret; + return code; } static int32_t syncHbTimerStop(SSyncNode* pSyncNode, SSyncTimer* pSyncTimer) { int32_t ret = 0; (void)atomic_add_fetch_64(&pSyncTimer->logicClock, 1); - if (!taosTmrStop(pSyncTimer->pTimer)) { - return TSDB_CODE_SYN_INTERNAL_ERROR; - } + bool stop = taosTmrStop(pSyncTimer->pTimer); + sDebug("vgId:%d, stop hb timer stop:%d", pSyncNode->vgId, stop); pSyncTimer->pTimer = NULL; syncHbTimerDataRemove(pSyncTimer->hbDataRid); pSyncTimer->hbDataRid = -1; @@ -1141,8 +1139,8 @@ SSyncNode* syncNodeOpen(SSyncInfo* pSyncInfo, int32_t vnodeVersion) { pSyncNode->replicaNum = pSyncNode->raftCfg.cfg.replicaNum; pSyncNode->totalReplicaNum = pSyncNode->raftCfg.cfg.totalReplicaNum; for (int32_t i = 0; i < pSyncNode->raftCfg.cfg.totalReplicaNum; ++i) { - if (!syncUtilNodeInfo2RaftId(&pSyncNode->raftCfg.cfg.nodeInfo[i], pSyncNode->vgId, &pSyncNode->replicasId[i])) { - terrno = TSDB_CODE_SYN_INTERNAL_ERROR; + if (syncUtilNodeInfo2RaftId(&pSyncNode->raftCfg.cfg.nodeInfo[i], pSyncNode->vgId, &pSyncNode->replicasId[i]) == + false) { sError("vgId:%d, failed to determine raft member id, replica:%d", pSyncNode->vgId, i); goto _error; } @@ -1308,7 +1306,10 @@ SSyncNode* syncNodeOpen(SSyncInfo* pSyncInfo, int32_t vnodeVersion) { } // tools - (void)syncRespMgrCreate(pSyncNode, SYNC_RESP_TTL_MS, &pSyncNode->pSyncRespMgr); // TODO: check return value + if ((code = syncRespMgrCreate(pSyncNode, SYNC_RESP_TTL_MS, &pSyncNode->pSyncRespMgr)) != 0) { + sError("vgId:%d, failed to create SyncRespMgr", pSyncNode->vgId); + goto _error; + } if (pSyncNode->pSyncRespMgr == NULL) { sError("vgId:%d, failed to create SyncRespMgr", pSyncNode->vgId); goto _error; @@ -1471,29 +1472,31 @@ int32_t syncNodeStart(SSyncNode* pSyncNode) { #ifdef BUILD_NO_CALL int32_t syncNodeStartStandBy(SSyncNode* pSyncNode) { // state change + int32_t code = 0; pSyncNode->state = TAOS_SYNC_STATE_FOLLOWER; pSyncNode->roleTimeMs = taosGetTimestampMs(); // TODO check return value - (void)syncNodeStopHeartbeatTimer(pSyncNode); + TAOS_CHECK_RETURN(syncNodeStopHeartbeatTimer(pSyncNode)); // reset elect timer, long enough int32_t electMS = TIMER_MAX_MS; - int32_t ret = syncNodeRestartElectTimer(pSyncNode, electMS); - if (ret < 0) { + code = syncNodeRestartElectTimer(pSyncNode, electMS); + if (code < 0) { sError("vgId:%d, failed to restart elect timer since %s", pSyncNode->vgId, terrstr()); return -1; } - ret = syncNodeStartPingTimer(pSyncNode); - if (ret < 0) { + code = syncNodeStartPingTimer(pSyncNode); + if (code < 0) { sError("vgId:%d, failed to start ping timer since %s", pSyncNode->vgId, terrstr()); return -1; } - return ret; + return code; } #endif void syncNodePreClose(SSyncNode* pSyncNode) { + int32_t code = 0; if (pSyncNode == NULL) { sError("failed to pre close sync node since sync node is null"); return; @@ -1508,13 +1511,22 @@ void syncNodePreClose(SSyncNode* pSyncNode) { } // stop elect timer - (void)syncNodeStopElectTimer(pSyncNode); + if ((code = syncNodeStopElectTimer(pSyncNode)) != 0) { + sError("vgId:%d, failed to stop elect timer since %s", pSyncNode->vgId, tstrerror(code)); + return; + } // stop heartbeat timer - (void)syncNodeStopHeartbeatTimer(pSyncNode); + if ((code = syncNodeStopHeartbeatTimer(pSyncNode)) != 0) { + sError("vgId:%d, failed to stop heartbeat timer since %s", pSyncNode->vgId, tstrerror(code)); + return; + } // stop ping timer - (void)syncNodeStopPingTimer(pSyncNode); + if ((code = syncNodeStopPingTimer(pSyncNode)) != 0) { + sError("vgId:%d, failed to stop ping timer since %s", pSyncNode->vgId, tstrerror(code)); + return; + } // clean rsp syncRespCleanRsp(pSyncNode->pSyncRespMgr); @@ -1536,14 +1548,24 @@ void syncNodePostClose(SSyncNode* pSyncNode) { void syncHbTimerDataFree(SSyncHbTimerData* pData) { taosMemoryFree(pData); } void syncNodeClose(SSyncNode* pSyncNode) { + int32_t code = 0; if (pSyncNode == NULL) return; sNInfo(pSyncNode, "sync close, node:%p", pSyncNode); syncRespCleanRsp(pSyncNode->pSyncRespMgr); - (void)syncNodeStopPingTimer(pSyncNode); - (void)syncNodeStopElectTimer(pSyncNode); - (void)syncNodeStopHeartbeatTimer(pSyncNode); + if ((code = syncNodeStopPingTimer(pSyncNode)) != 0) { + sError("vgId:%d, failed to stop ping timer since %s", pSyncNode->vgId, tstrerror(code)); + return; + } + if ((code = syncNodeStopElectTimer(pSyncNode)) != 0) { + sError("vgId:%d, failed to stop elect timer since %s", pSyncNode->vgId, tstrerror(code)); + return; + } + if ((code = syncNodeStopHeartbeatTimer(pSyncNode)) != 0) { + sError("vgId:%d, failed to stop heartbeat timer since %s", pSyncNode->vgId, tstrerror(code)); + return; + } syncNodeLogReplDestroy(pSyncNode); syncRespMgrDestroy(pSyncNode->pSyncRespMgr); @@ -1599,28 +1621,28 @@ ESyncStrategy syncNodeStrategy(SSyncNode* pSyncNode) { return pSyncNode->raftCfg // timer control -------------- int32_t syncNodeStartPingTimer(SSyncNode* pSyncNode) { - int32_t ret = 0; + int32_t code = 0; if (syncIsInit()) { - (void)taosTmrReset(pSyncNode->FpPingTimerCB, pSyncNode->pingTimerMS, (void*)pSyncNode->rid, - syncEnv()->pTimerManager, &pSyncNode->pPingTimer); + TAOS_CHECK_RETURN(taosTmrReset(pSyncNode->FpPingTimerCB, pSyncNode->pingTimerMS, (void*)pSyncNode->rid, + syncEnv()->pTimerManager, &pSyncNode->pPingTimer)); atomic_store_64(&pSyncNode->pingTimerLogicClock, pSyncNode->pingTimerLogicClockUser); } else { sError("vgId:%d, start ping timer error, sync env is stop", pSyncNode->vgId); } - return ret; + return code; } int32_t syncNodeStopPingTimer(SSyncNode* pSyncNode) { - int32_t ret = 0; + int32_t code = 0; (void)atomic_add_fetch_64(&pSyncNode->pingTimerLogicClockUser, 1); - // TODO check return value - (void)taosTmrStop(pSyncNode->pPingTimer); + bool stop = taosTmrStop(pSyncNode->pPingTimer); + sDebug("vgId:%d, stop ping timer, stop:%d", pSyncNode->vgId, stop); pSyncNode->pPingTimer = NULL; - return ret; + return code; } int32_t syncNodeStartElectTimer(SSyncNode* pSyncNode, int32_t ms) { - int32_t ret = 0; + int32_t code = 0; if (syncIsInit()) { pSyncNode->electTimerMS = ms; @@ -1630,22 +1652,22 @@ int32_t syncNodeStartElectTimer(SSyncNode* pSyncNode, int32_t ms) { pSyncNode->electTimerParam.pSyncNode = pSyncNode; pSyncNode->electTimerParam.pData = NULL; - (void)taosTmrReset(pSyncNode->FpElectTimerCB, pSyncNode->electTimerMS, (void*)(pSyncNode->rid), - syncEnv()->pTimerManager, &pSyncNode->pElectTimer); + TAOS_CHECK_RETURN(taosTmrReset(pSyncNode->FpElectTimerCB, pSyncNode->electTimerMS, (void*)(pSyncNode->rid), + syncEnv()->pTimerManager, &pSyncNode->pElectTimer)); } else { sError("vgId:%d, start elect timer error, sync env is stop", pSyncNode->vgId); } - return ret; + return code; } int32_t syncNodeStopElectTimer(SSyncNode* pSyncNode) { - int32_t ret = 0; + int32_t code = 0; (void)atomic_add_fetch_64(&pSyncNode->electTimerLogicClock, 1); - // TODO check return value - (void)taosTmrStop(pSyncNode->pElectTimer); + bool stop = taosTmrStop(pSyncNode->pElectTimer); + sDebug("vgId:%d, stop elect timer, stop:%d", pSyncNode->vgId, stop); pSyncNode->pElectTimer = NULL; - return ret; + return code; } int32_t syncNodeRestartElectTimer(SSyncNode* pSyncNode, int32_t ms) { @@ -1666,7 +1688,10 @@ void syncNodeResetElectTimer(SSyncNode* pSyncNode) { } // TODO check return value - (void)syncNodeRestartElectTimer(pSyncNode, electMS); + if ((code = syncNodeRestartElectTimer(pSyncNode, electMS)) != 0) { + sError("vgId:%d, failed to restart elect timer since %s", pSyncNode->vgId, terrstr()); + return; + }; sNTrace(pSyncNode, "reset elect timer, min:%d, max:%d, ms:%d", pSyncNode->electBaseLine, 2 * pSyncNode->electBaseLine, electMS); @@ -1674,17 +1699,17 @@ void syncNodeResetElectTimer(SSyncNode* pSyncNode) { #ifdef BUILD_NO_CALL static int32_t syncNodeDoStartHeartbeatTimer(SSyncNode* pSyncNode) { - int32_t ret = 0; + int32_t code = 0; if (syncIsInit()) { - (void)taosTmrReset(pSyncNode->FpHeartbeatTimerCB, pSyncNode->heartbeatTimerMS, (void*)pSyncNode->rid, - syncEnv()->pTimerManager, &pSyncNode->pHeartbeatTimer); + TAOS_CHECK_RETURN(taosTmrReset(pSyncNode->FpHeartbeatTimerCB, pSyncNode->heartbeatTimerMS, (void*)pSyncNode->rid, + syncEnv()->pTimerManager, &pSyncNode->pHeartbeatTimer)); atomic_store_64(&pSyncNode->heartbeatTimerLogicClock, pSyncNode->heartbeatTimerLogicClockUser); } else { sError("vgId:%d, start heartbeat timer error, sync env is stop", pSyncNode->vgId); } sNTrace(pSyncNode, "start heartbeat timer, ms:%d", pSyncNode->heartbeatTimerMS); - return ret; + return code; } #endif @@ -1707,12 +1732,12 @@ int32_t syncNodeStartHeartbeatTimer(SSyncNode* pSyncNode) { } int32_t syncNodeStopHeartbeatTimer(SSyncNode* pSyncNode) { - int32_t ret = 0; + int32_t code = 0; #if 0 - //TODO check return value - (void)atomic_add_fetch_64(&pSyncNode->heartbeatTimerLogicClockUser, 1); - (void)taosTmrStop(pSyncNode->pHeartbeatTimer); + TAOS_CHECK_RETURN(atomic_add_fetch_64(&pSyncNode->heartbeatTimerLogicClockUser, 1)); + bool stop = taosTmrStop(pSyncNode->pHeartbeatTimer); + sDebug("vgId:%d, stop heartbeat timer, stop:%d", pSyncNode->vgId, stop); pSyncNode->pHeartbeatTimer = NULL; #endif @@ -1723,14 +1748,15 @@ int32_t syncNodeStopHeartbeatTimer(SSyncNode* pSyncNode) { } } - return ret; + return code; } #ifdef BUILD_NO_CALL int32_t syncNodeRestartHeartbeatTimer(SSyncNode* pSyncNode) { // TODO check return value - (void)syncNodeStopHeartbeatTimer(pSyncNode); - (void)syncNodeStartHeartbeatTimer(pSyncNode); + int32_t code = 0; + TAOS_CHECK_RETURN(syncNodeStopHeartbeatTimer(pSyncNode)); + TAOS_CHECK_RETURN(syncNodeStartHeartbeatTimer(pSyncNode)); return 0; } #endif @@ -1806,6 +1832,7 @@ static bool syncIsConfigChanged(const SSyncCfg* pOldCfg, const SSyncCfg* pNewCfg } int32_t syncNodeDoConfigChange(SSyncNode* pSyncNode, SSyncCfg* pNewConfig, SyncIndex lastConfigChangeIndex) { + int32_t code = 0; SSyncCfg oldConfig = pSyncNode->raftCfg.cfg; if (!syncIsConfigChanged(&oldConfig, pNewConfig)) { sInfo("vgId:1, sync not reconfig since not changed"); @@ -1873,7 +1900,7 @@ int32_t syncNodeDoConfigChange(SSyncNode* pSyncNode, SSyncCfg* pNewConfig, SyncI // init internal pSyncNode->myNodeInfo = pSyncNode->raftCfg.cfg.nodeInfo[pSyncNode->raftCfg.cfg.myIndex]; - (void)syncUtilNodeInfo2RaftId(&pSyncNode->myNodeInfo, pSyncNode->vgId, &pSyncNode->myRaftId); + if (syncUtilNodeInfo2RaftId(&pSyncNode->myNodeInfo, pSyncNode->vgId, &pSyncNode->myRaftId) == false) return terrno; // init peersNum, peers, peersId pSyncNode->peersNum = pSyncNode->raftCfg.cfg.totalReplicaNum - 1; @@ -1886,14 +1913,17 @@ int32_t syncNodeDoConfigChange(SSyncNode* pSyncNode, SSyncCfg* pNewConfig, SyncI } } for (int32_t i = 0; i < pSyncNode->peersNum; ++i) { - (void)syncUtilNodeInfo2RaftId(&pSyncNode->peersNodeInfo[i], pSyncNode->vgId, &pSyncNode->peersId[i]); + if (syncUtilNodeInfo2RaftId(&pSyncNode->peersNodeInfo[i], pSyncNode->vgId, &pSyncNode->peersId[i]) == false) + return terrno; } // init replicaNum, replicasId pSyncNode->replicaNum = pSyncNode->raftCfg.cfg.replicaNum; pSyncNode->totalReplicaNum = pSyncNode->raftCfg.cfg.totalReplicaNum; for (int32_t i = 0; i < pSyncNode->raftCfg.cfg.totalReplicaNum; ++i) { - (void)syncUtilNodeInfo2RaftId(&pSyncNode->raftCfg.cfg.nodeInfo[i], pSyncNode->vgId, &pSyncNode->replicasId[i]); + if (syncUtilNodeInfo2RaftId(&pSyncNode->raftCfg.cfg.nodeInfo[i], pSyncNode->vgId, &pSyncNode->replicasId[i]) == + false) + return terrno; } // update quorum first @@ -1939,7 +1969,7 @@ int32_t syncNodeDoConfigChange(SSyncNode* pSyncNode, SSyncCfg* pNewConfig, SyncI // create new for (int32_t i = 0; i < TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA; ++i) { if (pSyncNode->senders[i] == NULL) { - (void)snapshotSenderCreate(pSyncNode, i, &pSyncNode->senders[i]); + TAOS_CHECK_RETURN(snapshotSenderCreate(pSyncNode, i, &pSyncNode->senders[i])); if (pSyncNode->senders[i] == NULL) { // will be created later while send snapshot sSError(pSyncNode->senders[i], "snapshot sender create failed while reconfig"); @@ -1961,10 +1991,10 @@ int32_t syncNodeDoConfigChange(SSyncNode* pSyncNode, SSyncCfg* pNewConfig, SyncI } // persist cfg - (void)syncWriteCfgFile(pSyncNode); + TAOS_CHECK_RETURN(syncWriteCfgFile(pSyncNode)); } else { // persist cfg - (void)syncWriteCfgFile(pSyncNode); + TAOS_CHECK_RETURN(syncWriteCfgFile(pSyncNode)); sNInfo(pSyncNode, "do not config change from %d to %d", oldConfig.totalReplicaNum, pNewConfig->totalReplicaNum); } @@ -2015,7 +2045,7 @@ void syncNodeStepDown(SSyncNode* pSyncNode, SyncTerm newTerm) { void syncNodeLeaderChangeRsp(SSyncNode* pSyncNode) { syncRespCleanRsp(pSyncNode->pSyncRespMgr); } void syncNodeBecomeFollower(SSyncNode* pSyncNode, const char* debugStr) { - // maybe clear leader cache + int32_t code = 0; // maybe clear leader cache if (pSyncNode->state == TAOS_SYNC_STATE_LEADER) { pSyncNode->leaderCache = EMPTY_RAFT_ID; } @@ -2025,7 +2055,10 @@ void syncNodeBecomeFollower(SSyncNode* pSyncNode, const char* debugStr) { // state change pSyncNode->state = TAOS_SYNC_STATE_FOLLOWER; pSyncNode->roleTimeMs = taosGetTimestampMs(); - (void)syncNodeStopHeartbeatTimer(pSyncNode); + if ((code = syncNodeStopHeartbeatTimer(pSyncNode)) != 0) { + sError("vgId:%d, failed to stop heartbeat timer since %s", pSyncNode->vgId, tstrerror(code)); + return; + } // trace log sNTrace(pSyncNode, "become follower %s", debugStr); @@ -2042,7 +2075,10 @@ void syncNodeBecomeFollower(SSyncNode* pSyncNode, const char* debugStr) { pSyncNode->minMatchIndex = SYNC_INDEX_INVALID; // reset log buffer - (void)syncLogBufferReset(pSyncNode->pLogBuf, pSyncNode); + if ((code = syncLogBufferReset(pSyncNode->pLogBuf, pSyncNode)) != 0) { + sError("vgId:%d, failed to reset log buffer since %s", pSyncNode->vgId, tstrerror(code)); + return; + } // reset elect timer syncNodeResetElectTimer(pSyncNode); @@ -2069,7 +2105,11 @@ void syncNodeBecomeLearner(SSyncNode* pSyncNode, const char* debugStr) { pSyncNode->minMatchIndex = SYNC_INDEX_INVALID; // reset log buffer - (void)syncLogBufferReset(pSyncNode->pLogBuf, pSyncNode); + int32_t code = 0; + if ((code = syncLogBufferReset(pSyncNode->pLogBuf, pSyncNode)) != 0) { + sError("vgId:%d, failed to reset log buffer since %s", pSyncNode->vgId, tstrerror(code)); + return; + }; } // TLA+ Spec @@ -2091,6 +2131,7 @@ void syncNodeBecomeLearner(SSyncNode* pSyncNode, const char* debugStr) { // /\ UNCHANGED <> // void syncNodeBecomeLeader(SSyncNode* pSyncNode, const char* debugStr) { + int32_t code = 0; pSyncNode->becomeLeaderNum++; pSyncNode->hbrSlowNum = 0; @@ -2122,7 +2163,10 @@ void syncNodeBecomeLeader(SSyncNode* pSyncNode, const char* debugStr) { } // init peer mgr - (void)syncNodePeerStateInit(pSyncNode); + if ((code = syncNodePeerStateInit(pSyncNode)) != 0) { + sError("vgId:%d, failed to init peer state since %s", pSyncNode->vgId, tstrerror(code)); + return; + } #if 0 // update sender private term @@ -2143,13 +2187,22 @@ void syncNodeBecomeLeader(SSyncNode* pSyncNode, const char* debugStr) { } // stop elect timer - (void)syncNodeStopElectTimer(pSyncNode); + if ((code = syncNodeStopElectTimer(pSyncNode)) != 0) { + sError("vgId:%d, failed to stop elect timer since %s", pSyncNode->vgId, tstrerror(code)); + return; + } // start heartbeat timer - (void)syncNodeStartHeartbeatTimer(pSyncNode); + if ((code = syncNodeStartHeartbeatTimer(pSyncNode)) != 0) { + sError("vgId:%d, failed to start heartbeat timer since %s", pSyncNode->vgId, tstrerror(code)); + return; + } // send heartbeat right now - (void)syncNodeHeartbeatPeers(pSyncNode); + if ((code = syncNodeHeartbeatPeers(pSyncNode)) != 0) { + sError("vgId:%d, failed to send heartbeat to peers since %s", pSyncNode->vgId, tstrerror(code)); + return; + } // call back if (pSyncNode->pFsm != NULL && pSyncNode->pFsm->FpBecomeLeaderCb != NULL) { @@ -2160,13 +2213,17 @@ void syncNodeBecomeLeader(SSyncNode* pSyncNode, const char* debugStr) { pSyncNode->minMatchIndex = SYNC_INDEX_INVALID; // reset log buffer - (void)syncLogBufferReset(pSyncNode->pLogBuf, pSyncNode); + if ((code = syncLogBufferReset(pSyncNode->pLogBuf, pSyncNode)) != 0) { + sError("vgId:%d, failed to reset log buffer since %s", pSyncNode->vgId, tstrerror(code)); + return; + } // trace log sNInfo(pSyncNode, "become leader %s", debugStr); } void syncNodeBecomeAssignedLeader(SSyncNode* pSyncNode) { + int32_t code = 0; pSyncNode->becomeAssignedLeaderNum++; pSyncNode->hbrSlowNum = 0; @@ -2198,7 +2255,10 @@ void syncNodeBecomeAssignedLeader(SSyncNode* pSyncNode) { } // init peer mgr - (void)syncNodePeerStateInit(pSyncNode); + if ((code = syncNodePeerStateInit(pSyncNode)) != 0) { + sError("vgId:%d, failed to init peer state since %s", pSyncNode->vgId, tstrerror(code)); + return; + } // close receiver if (snapshotReceiverIsStart(pSyncNode->pNewNodeReceiver)) { @@ -2206,13 +2266,22 @@ void syncNodeBecomeAssignedLeader(SSyncNode* pSyncNode) { } // stop elect timer - (void)syncNodeStopElectTimer(pSyncNode); + if ((code = syncNodeStopElectTimer(pSyncNode)) != 0) { + sError("vgId:%d, failed to stop elect timer since %s", pSyncNode->vgId, tstrerror(code)); + return; + } // start heartbeat timer - (void)syncNodeStartHeartbeatTimer(pSyncNode); + if ((code = syncNodeStartHeartbeatTimer(pSyncNode)) != 0) { + sError("vgId:%d, failed to start heartbeat timer since %s", pSyncNode->vgId, tstrerror(code)); + return; + } // send heartbeat right now - (void)syncNodeHeartbeatPeers(pSyncNode); + if ((code = syncNodeHeartbeatPeers(pSyncNode)) != 0) { + sError("vgId:%d, failed to send heartbeat to peers since %s", pSyncNode->vgId, tstrerror(code)); + return; + } // call back if (pSyncNode->pFsm != NULL && pSyncNode->pFsm->FpBecomeAssignedLeaderCb != NULL) { @@ -2223,7 +2292,10 @@ void syncNodeBecomeAssignedLeader(SSyncNode* pSyncNode) { pSyncNode->minMatchIndex = SYNC_INDEX_INVALID; // reset log buffer - (void)syncLogBufferReset(pSyncNode->pLogBuf, pSyncNode); + if ((code = syncLogBufferReset(pSyncNode->pLogBuf, pSyncNode)) != 0) { + sError("vgId:%d, failed to reset log buffer since %s", pSyncNode->vgId, tstrerror(code)); + return; + } // trace log sNInfo(pSyncNode, "become assigned leader"); @@ -2513,8 +2585,10 @@ static void syncNodeEqPingTimer(void* param, void* tmrId) { } _out: - (void)taosTmrReset(syncNodeEqPingTimer, pNode->pingTimerMS, (void*)pNode->rid, syncEnv()->pTimerManager, - &pNode->pPingTimer); + if ((code = taosTmrReset(syncNodeEqPingTimer, pNode->pingTimerMS, (void*)pNode->rid, syncEnv()->pTimerManager, + &pNode->pPingTimer)) != 0) { + sError("failed to reset ping timer since %s", tstrerror(code)); + }; } syncNodeRelease(pNode); } @@ -2591,8 +2665,9 @@ static void syncNodeEqHeartbeatTimer(void* param, void* tmrId) { } _out: - (void)taosTmrReset(syncNodeEqHeartbeatTimer, pNode->heartbeatTimerMS, (void*)pNode->rid, syncEnv()->pTimerManager, - &pNode->pHeartbeatTimer); + if (taosTmrReset(syncNodeEqHeartbeatTimer, pNode->heartbeatTimerMS, (void*)pNode->rid, syncEnv()->pTimerManager, + &pNode->pHeartbeatTimer) != 0) + return; } else { sTrace("==syncNodeEqHeartbeatTimer== heartbeatTimerLogicClock:%" PRId64 ", heartbeatTimerLogicClockUser:%" PRId64, @@ -2603,6 +2678,7 @@ static void syncNodeEqHeartbeatTimer(void* param, void* tmrId) { #endif static void syncNodeEqPeerHeartbeatTimer(void* param, void* tmrId) { + int32_t code = 0; int64_t hbDataRid = (int64_t)param; int64_t tsNow = taosGetTimestampMs(); @@ -2646,7 +2722,12 @@ static void syncNodeEqPeerHeartbeatTimer(void* param, void* tmrId) { pData->execTime += pSyncTimer->timerMS; SRpcMsg rpcMsg = {0}; - (void)syncBuildHeartbeat(&rpcMsg, pSyncNode->vgId); + if ((code = syncBuildHeartbeat(&rpcMsg, pSyncNode->vgId)) != 0) { + sError("vgId:%d, failed to build heartbeat msg since %s", pSyncNode->vgId, tstrerror(code)); + syncNodeRelease(pSyncNode); + syncHbTimerDataRelease(pData); + return; + } pSyncNode->minMatchIndex = syncMinMatchIndex(pSyncNode); @@ -2668,14 +2749,22 @@ static void syncNodeEqPeerHeartbeatTimer(void* param, void* tmrId) { STraceId* trace = &(rpcMsg.info.traceId); sGTrace("vgId:%d, send sync-heartbeat to dnode:%d", pSyncNode->vgId, DID(&(pSyncMsg->destId))); syncLogSendHeartbeat(pSyncNode, pSyncMsg, false, timerElapsed, pData->execTime); - (void)syncNodeSendHeartbeat(pSyncNode, &pSyncMsg->destId, &rpcMsg); + int ret = syncNodeSendHeartbeat(pSyncNode, &pSyncMsg->destId, &rpcMsg); + if (ret != 0) { + sError("vgId:%d, failed to send heartbeat since %s", pSyncNode->vgId, tstrerror(ret)); + } } else { } if (syncIsInit()) { // sTrace("vgId:%d, reset peer hb timer", pSyncNode->vgId); - (void)taosTmrReset(syncNodeEqPeerHeartbeatTimer, pSyncTimer->timerMS / HEARTBEAT_TICK_NUM, (void*)hbDataRid, - syncEnv()->pTimerManager, &pSyncTimer->pTimer); + if ((code = taosTmrReset(syncNodeEqPeerHeartbeatTimer, pSyncTimer->timerMS / HEARTBEAT_TICK_NUM, + (void*)hbDataRid, syncEnv()->pTimerManager, &pSyncTimer->pTimer)) != 0) { + sError("vgId:%d, reset peer hb timer error, %s", pSyncNode->vgId, tstrerror(code)); + syncNodeRelease(pSyncNode); + syncHbTimerDataRelease(pData); + return; + } } else { sError("sync env is stop, reset peer hb timer error"); } @@ -2715,6 +2804,7 @@ int32_t syncCacheEntry(SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry, LRUHand void syncBuildConfigFromReq(SAlterVnodeReplicaReq* pReq, SSyncCfg* cfg) { // TODO SAlterVnodeReplicaReq name is proper? cfg->replicaNum = 0; cfg->totalReplicaNum = 0; + int32_t code = 0; for (int i = 0; i < pReq->replica; ++i) { SNodeInfo* pNode = &cfg->nodeInfo[i]; @@ -2722,9 +2812,9 @@ void syncBuildConfigFromReq(SAlterVnodeReplicaReq* pReq, SSyncCfg* cfg) { // TO pNode->nodePort = pReq->replicas[i].port; tstrncpy(pNode->nodeFqdn, pReq->replicas[i].fqdn, sizeof(pNode->nodeFqdn)); pNode->nodeRole = TAOS_SYNC_ROLE_VOTER; - (void)tmsgUpdateDnodeInfo(&pNode->nodeId, &pNode->clusterId, pNode->nodeFqdn, &pNode->nodePort); - sInfo("vgId:%d, replica:%d ep:%s:%u dnode:%d nodeRole:%d", pReq->vgId, i, pNode->nodeFqdn, pNode->nodePort, - pNode->nodeId, pNode->nodeRole); + bool update = tmsgUpdateDnodeInfo(&pNode->nodeId, &pNode->clusterId, pNode->nodeFqdn, &pNode->nodePort); + sInfo("vgId:%d, replica:%d ep:%s:%u dnode:%d nodeRole:%d, update:%d", pReq->vgId, i, pNode->nodeFqdn, + pNode->nodePort, pNode->nodeId, pNode->nodeRole, update); cfg->replicaNum++; } if (pReq->selfIndex != -1) { @@ -2736,9 +2826,9 @@ void syncBuildConfigFromReq(SAlterVnodeReplicaReq* pReq, SSyncCfg* cfg) { // TO pNode->nodePort = pReq->learnerReplicas[cfg->totalReplicaNum].port; pNode->nodeRole = TAOS_SYNC_ROLE_LEARNER; tstrncpy(pNode->nodeFqdn, pReq->learnerReplicas[cfg->totalReplicaNum].fqdn, sizeof(pNode->nodeFqdn)); - (void)tmsgUpdateDnodeInfo(&pNode->nodeId, &pNode->clusterId, pNode->nodeFqdn, &pNode->nodePort); - sInfo("vgId:%d, replica:%d ep:%s:%u dnode:%d nodeRole:%d", pReq->vgId, i, pNode->nodeFqdn, pNode->nodePort, - pNode->nodeId, pNode->nodeRole); + bool update = tmsgUpdateDnodeInfo(&pNode->nodeId, &pNode->clusterId, pNode->nodeFqdn, &pNode->nodePort); + sInfo("vgId:%d, replica:%d ep:%s:%u dnode:%d nodeRole:%d, update:%d", pReq->vgId, i, pNode->nodeFqdn, + pNode->nodePort, pNode->nodeId, pNode->nodeRole, update); cfg->totalReplicaNum++; } cfg->totalReplicaNum += pReq->replica; @@ -2849,9 +2939,9 @@ int32_t syncNodeRebuildPeerAndCfg(SSyncNode* ths, SSyncCfg* cfg) { syncUtilNodeInfo2EpSet(&ths->peersNodeInfo[i], &ths->peersEpset[i]); - if (!syncUtilNodeInfo2RaftId(&ths->peersNodeInfo[i], ths->vgId, &ths->peersId[i])) { + if (syncUtilNodeInfo2RaftId(&ths->peersNodeInfo[i], ths->vgId, &ths->peersId[i]) == false) { sError("vgId:%d, failed to determine raft member id, peer:%d", ths->vgId, i); - return -1; + return terrno; } i++; @@ -2919,8 +3009,7 @@ int32_t syncNodeRebuildAndCopyIfExist(SSyncNode* ths, int32_t oldtotalReplicaNum ths->replicaNum = ths->raftCfg.cfg.replicaNum; ths->totalReplicaNum = ths->raftCfg.cfg.totalReplicaNum; for (int32_t i = 0; i < ths->raftCfg.cfg.totalReplicaNum; ++i) { - if (!syncUtilNodeInfo2RaftId(&ths->raftCfg.cfg.nodeInfo[i], ths->vgId, &ths->replicasId[i])) - return TSDB_CODE_SYN_INTERNAL_ERROR; + if (syncUtilNodeInfo2RaftId(&ths->raftCfg.cfg.nodeInfo[i], ths->vgId, &ths->replicasId[i]) == false) return terrno; } // 2.rebuild MatchIndex, remove deleted one @@ -3287,7 +3376,10 @@ int32_t syncNodeAppend(SSyncNode* ths, SSyncRaftEntry* pEntry) { // append to log buffer if ((code = syncLogBufferAppend(ths->pLogBuf, ths, pEntry)) < 0) { sError("vgId:%d, failed to enqueue sync log buffer, index:%" PRId64, ths->vgId, pEntry->index); - (void)syncFsmExecute(ths, ths->pFsm, ths->state, raftStoreGetTerm(ths), pEntry, terrno, false); + int32_t ret = 0; + if ((ret = syncFsmExecute(ths, ths->pFsm, ths->state, raftStoreGetTerm(ths), pEntry, terrno, false)) != 0) { + sError("vgId:%d, failed to execute fsm, since %s", ths->vgId, tstrerror(ret)); + } syncEntryDestroy(pEntry); pEntry = NULL; goto _out; @@ -3305,7 +3397,7 @@ _out:; ths->pLogBuf->matchIndex, ths->pLogBuf->endIndex); if (code == 0 && ths->state == TAOS_SYNC_STATE_ASSIGNED_LEADER) { - (void)syncNodeUpdateAssignedCommitIndex(ths, matchIndex); + TAOS_CHECK_RETURN(syncNodeUpdateAssignedCommitIndex(ths, matchIndex)); if (ths->fsmState != SYNC_FSM_STATE_INCOMPLETE && syncLogBufferCommit(ths->pLogBuf, ths, ths->assignedCommitIndex) < 0) { @@ -3320,7 +3412,8 @@ _out:; } // single replica - (void)syncNodeUpdateCommitIndex(ths, matchIndex); + SyncIndex returnIndex = syncNodeUpdateCommitIndex(ths, matchIndex); + sTrace("vgId:%d, update commit return index %" PRId64 "", ths->vgId, returnIndex); if (ths->fsmState != SYNC_FSM_STATE_INCOMPLETE && (code = syncLogBufferCommit(ths->pLogBuf, ths, ths->commitIndex)) < 0) { @@ -3442,7 +3535,7 @@ int32_t syncNodeOnHeartbeat(SSyncNode* ths, const SRpcMsg* pRpcMsg) { } SRpcMsg rpcMsg = {0}; - (void)syncBuildHeartbeatReply(&rpcMsg, ths->vgId); + TAOS_CHECK_RETURN(syncBuildHeartbeatReply(&rpcMsg, ths->vgId)); SyncTerm currentTerm = raftStoreGetTerm(ths); SyncHeartbeatReply* pMsgReply = rpcMsg.pCont; @@ -3470,7 +3563,7 @@ int32_t syncNodeOnHeartbeat(SSyncNode* ths, const SRpcMsg* pRpcMsg) { if (ths->state == TAOS_SYNC_STATE_FOLLOWER || ths->state == TAOS_SYNC_STATE_LEARNER) { SRpcMsg rpcMsgLocalCmd = {0}; - (void)syncBuildLocalCmd(&rpcMsgLocalCmd, ths->vgId); + TAOS_CHECK_RETURN(syncBuildLocalCmd(&rpcMsgLocalCmd, ths->vgId)); SyncLocalCmd* pSyncMsg = rpcMsgLocalCmd.pCont; pSyncMsg->cmd = @@ -3494,7 +3587,7 @@ int32_t syncNodeOnHeartbeat(SSyncNode* ths, const SRpcMsg* pRpcMsg) { if (pMsg->term >= currentTerm && (ths->state == TAOS_SYNC_STATE_LEADER || ths->state == TAOS_SYNC_STATE_ASSIGNED_LEADER)) { SRpcMsg rpcMsgLocalCmd = {0}; - (void)syncBuildLocalCmd(&rpcMsgLocalCmd, ths->vgId); + TAOS_CHECK_RETURN(syncBuildLocalCmd(&rpcMsgLocalCmd, ths->vgId)); SyncLocalCmd* pSyncMsg = rpcMsgLocalCmd.pCont; pSyncMsg->cmd = SYNC_LOCAL_CMD_STEP_DOWN; @@ -3577,7 +3670,8 @@ int32_t syncNodeOnLocalCmd(SSyncNode* ths, const SRpcMsg* pRpcMsg) { return TSDB_CODE_SYN_INTERNAL_ERROR; } if (pMsg->currentTerm == matchTerm) { - (void)syncNodeUpdateCommitIndex(ths, pMsg->commitIndex); + SyncIndex returnIndex = syncNodeUpdateCommitIndex(ths, pMsg->commitIndex); + sTrace("vgId:%d, update commit return index %" PRId64 "", ths->vgId, returnIndex); } if (ths->fsmState != SYNC_FSM_STATE_INCOMPLETE && syncLogBufferCommit(ths->pLogBuf, ths, ths->commitIndex) < 0) { sError("vgId:%d, failed to commit raft log since %s. commit index:%" PRId64 "", ths->vgId, terrstr(), diff --git a/source/libs/sync/src/syncRaftCfg.c b/source/libs/sync/src/syncRaftCfg.c index bccd76b243..5d938afa67 100644 --- a/source/libs/sync/src/syncRaftCfg.c +++ b/source/libs/sync/src/syncRaftCfg.c @@ -163,7 +163,7 @@ int32_t syncWriteCfgFile(SSyncNode *pNode) { TAOS_CHECK_EXIT(TAOS_SYSTEM_ERROR(errno)); } - (void)taosCloseFile(&pFile); + TAOS_CHECK_EXIT(taosCloseFile(&pFile)); TAOS_CHECK_EXIT(taosRenameFile(file, realfile)); sInfo("vgId:%d, succeed to write sync cfg file:%s, len:%d, lastConfigIndex:%" PRId64 ", changeVersion:%d", diff --git a/source/libs/sync/src/syncRaftStore.c b/source/libs/sync/src/syncRaftStore.c index 60cef7b5c0..c61be4356c 100644 --- a/source/libs/sync/src/syncRaftStore.c +++ b/source/libs/sync/src/syncRaftStore.c @@ -134,7 +134,7 @@ int32_t raftStoreWriteFile(SSyncNode *pNode) { if (taosFsyncFile(pFile) < 0) TAOS_CHECK_GOTO(terrno, &lino, _OVER); - (void)taosCloseFile(&pFile); + TAOS_CHECK_GOTO(taosCloseFile(&pFile), &lino, _OVER); if (taosRenameFile(file, realfile) != 0) TAOS_CHECK_GOTO(terrno, &lino, _OVER); code = 0; @@ -168,21 +168,30 @@ bool raftStoreHasVoted(SSyncNode *pNode) { void raftStoreVote(SSyncNode *pNode, SRaftId *pRaftId) { (void)taosThreadMutexLock(&pNode->raftStore.mutex); pNode->raftStore.voteFor = *pRaftId; - (void)raftStoreWriteFile(pNode); + int32_t code = 0; + if ((code = raftStoreWriteFile(pNode)) != 0) { + sError("vgId:%d, failed to write raft store file since %s", pNode->vgId, tstrerror(code)); + } (void)taosThreadMutexUnlock(&pNode->raftStore.mutex); } void raftStoreClearVote(SSyncNode *pNode) { (void)taosThreadMutexLock(&pNode->raftStore.mutex); pNode->raftStore.voteFor = EMPTY_RAFT_ID; - (void)raftStoreWriteFile(pNode); + int32_t code = 0; + if ((code = raftStoreWriteFile(pNode)) != 0) { + sError("vgId:%d, failed to write raft store file since %s", pNode->vgId, tstrerror(code)); + } (void)taosThreadMutexUnlock(&pNode->raftStore.mutex); } void raftStoreNextTerm(SSyncNode *pNode) { (void)taosThreadMutexLock(&pNode->raftStore.mutex); pNode->raftStore.currentTerm++; - (void)raftStoreWriteFile(pNode); + int32_t code = 0; + if ((code = raftStoreWriteFile(pNode)) != 0) { + sError("vgId:%d, failed to write raft store file since %s", pNode->vgId, tstrerror(code)); + } (void)taosThreadMutexUnlock(&pNode->raftStore.mutex); } @@ -190,7 +199,10 @@ void raftStoreSetTerm(SSyncNode *pNode, SyncTerm term) { (void)taosThreadMutexLock(&pNode->raftStore.mutex); if (pNode->raftStore.currentTerm < term) { pNode->raftStore.currentTerm = term; - (void)raftStoreWriteFile(pNode); + int32_t code = 0; + if ((code = raftStoreWriteFile(pNode)) != 0) { + sError("vgId:%d, failed to write raft store file since %s", pNode->vgId, tstrerror(code)); + } } (void)taosThreadMutexUnlock(&pNode->raftStore.mutex); } diff --git a/source/libs/sync/src/syncReplication.c b/source/libs/sync/src/syncReplication.c index 0cda5d1ea9..247b5624c3 100644 --- a/source/libs/sync/src/syncReplication.c +++ b/source/libs/sync/src/syncReplication.c @@ -118,8 +118,9 @@ int32_t syncNodeHeartbeatPeers(SSyncNode* pSyncNode) { STraceId* trace = &(rpcMsg.info.traceId); sGTrace("vgId:%d, send sync-heartbeat to dnode:%d", pSyncNode->vgId, DID(&(pSyncMsg->destId))); syncLogSendHeartbeat(pSyncNode, pSyncMsg, true, 0, 0); - if (syncNodeSendHeartbeat(pSyncNode, &pSyncMsg->destId, &rpcMsg) != 0) { - sError("vgId:%d, failed to send sync-heartbeat to dnode:%d", pSyncNode->vgId, DID(&(pSyncMsg->destId))); + int32_t ret = syncNodeSendHeartbeat(pSyncNode, &pSyncMsg->destId, &rpcMsg); + if (ret != 0) { + sError("vgId:%d, failed to send sync-heartbeat since %s", pSyncNode->vgId, tstrerror(ret)); } } diff --git a/source/libs/sync/src/syncRequestVote.c b/source/libs/sync/src/syncRequestVote.c index c8e81b13df..fe5b3eb7ad 100644 --- a/source/libs/sync/src/syncRequestVote.c +++ b/source/libs/sync/src/syncRequestVote.c @@ -137,7 +137,7 @@ int32_t syncNodeOnRequestVote(SSyncNode* ths, const SRpcMsg* pRpcMsg) { // trace log syncLogRecvRequestVote(ths, pMsg, pReply->voteGranted, "", "proceed"); syncLogSendRequestVoteReply(ths, pReply, ""); - (void)syncNodeSendMsgById(&pReply->destId, ths, &rpcMsg); + TAOS_CHECK_RETURN(syncNodeSendMsgById(&pReply->destId, ths, &rpcMsg)); if (resetElect) syncNodeResetElectTimer(ths); diff --git a/source/libs/tdb/inc/tdb.h b/source/libs/tdb/inc/tdb.h index 374c3e0dc3..52ff749191 100644 --- a/source/libs/tdb/inc/tdb.h +++ b/source/libs/tdb/inc/tdb.h @@ -34,19 +34,19 @@ typedef struct STxn TXN; // TDB int32_t tdbOpen(const char *dbname, int szPage, int pages, TDB **ppDb, int8_t rollback, int32_t encryptAlgorithm, char *encryptKey); -int32_t tdbClose(TDB *pDb); +void tdbClose(TDB *pDb); int32_t tdbBegin(TDB *pDb, TXN **pTxn, void *(*xMalloc)(void *, size_t), void (*xFree)(void *, void *), void *xArg, int flags); int32_t tdbCommit(TDB *pDb, TXN *pTxn); int32_t tdbPostCommit(TDB *pDb, TXN *pTxn); int32_t tdbPrepareAsyncCommit(TDB *pDb, TXN *pTxn); -int32_t tdbAbort(TDB *pDb, TXN *pTxn); +void tdbAbort(TDB *pDb, TXN *pTxn); int32_t tdbAlter(TDB *pDb, int pages); // TTB int32_t tdbTbOpen(const char *tbname, int keyLen, int valLen, tdb_cmpr_fn_t keyCmprFn, TDB *pEnv, TTB **ppTb, int8_t rollback); -int32_t tdbTbClose(TTB *pTb); +void tdbTbClose(TTB *pTb); bool tdbTbExist(const char *tbname, TDB *pEnv); int tdbTbDropByName(const char *tbname, TDB *pEnv, TXN *pTxn); int32_t tdbTbDrop(TTB *pTb); @@ -79,11 +79,11 @@ int32_t tdbTbcUpsert(TBC *pTbc, const void *pKey, int nKey, const void *pData, i int32_t tdbTxnOpen(TXN *pTxn, int64_t txnid, void *(*xMalloc)(void *, size_t), void (*xFree)(void *, void *), void *xArg, int flags); -int32_t tdbTxnCloseImpl(TXN *pTxn); -#define tdbTxnClose(pTxn) \ - do { \ - (void)tdbTxnCloseImpl(pTxn); \ - (pTxn) = NULL; \ +void tdbTxnCloseImpl(TXN *pTxn); +#define tdbTxnClose(pTxn) \ + do { \ + tdbTxnCloseImpl(pTxn); \ + (pTxn) = NULL; \ } while (0) // other diff --git a/source/libs/tdb/src/db/tdbBtree.c b/source/libs/tdb/src/db/tdbBtree.c index a7bce24aa4..c688a6cc6a 100644 --- a/source/libs/tdb/src/db/tdbBtree.c +++ b/source/libs/tdb/src/db/tdbBtree.c @@ -118,7 +118,7 @@ int tdbBtreeOpen(int keyLen, int valLen, SPager *pPager, char const *tbname, SPg zArg.pBt = pBt; ret = tdbPagerFetchPage(pPager, &pgno, &pPage, tdbBtreeInitPage, &zArg, txn); if (ret < 0) { - (void)tdbAbort(pEnv, txn); + tdbAbort(pEnv, txn); tdbOsFree(pBt); return ret; } @@ -126,7 +126,7 @@ int tdbBtreeOpen(int keyLen, int valLen, SPager *pPager, char const *tbname, SPg ret = tdbPagerWrite(pPager, pPage); if (ret < 0) { tdbError("failed to write page since %s", terrstr()); - (void)tdbAbort(pEnv, txn); + tdbAbort(pEnv, txn); tdbOsFree(pBt); return ret; } @@ -139,7 +139,7 @@ int tdbBtreeOpen(int keyLen, int valLen, SPager *pPager, char const *tbname, SPg ret = tdbTbInsert(pPager->pEnv->pMainDb, tbname, strlen(tbname) + 1, &pBt->info, sizeof(pBt->info), txn); if (ret < 0) { - (void)tdbAbort(pEnv, txn); + tdbAbort(pEnv, txn); tdbOsFree(pBt); return ret; } @@ -513,7 +513,10 @@ static int tdbBtreeBalanceDeeper(SBTree *pBt, SPage *pRoot, SPage **ppChild, TXN } // Copy the root page content to the child page - (void)tdbPageCopy(pRoot, pChild, 0); + ret = tdbPageCopy(pRoot, pChild, 0); + if (ret < 0) { + return ret; + } // Reinitialize the root page zArg.flags = TDB_BTREE_ROOT; @@ -633,14 +636,22 @@ static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTx } } - (void)tdbPageDropCell(pParent, sIdx, pTxn, pBt); + ret = tdbPageDropCell(pParent, sIdx, pTxn, pBt); + if (ret < 0) { + tdbError("tdb/btree-balance: drop cell failed with ret: %d.", ret); + return TSDB_CODE_FAILED; + } if (!childNotLeaf) { SArray *ofps = pParent->pPager->ofps; if (ofps) { for (int i = 0; i < TARRAY_SIZE(ofps); ++i) { SPage *ofp = *(SPage **)taosArrayGet(ofps, i); - (void)tdbPagerInsertFreePage(pParent->pPager, ofp, pTxn); + ret = tdbPagerInsertFreePage(pParent->pPager, ofp, pTxn); + if (ret < 0) { + tdbError("tdb/btree-balance: insert free page failed with ret: %d.", ret); + return TSDB_CODE_FAILED; + } } if (destroyOfps) { @@ -853,7 +864,11 @@ static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTx if (iNew == nNews - 1 && pIntHdr->pgno == 0) { pIntHdr->pgno = TDB_PAGE_PGNO(pNews[iNew]); } else { - (void)tdbBtreeDecodeCell(pPage, pCell, &cd, pTxn, pBt); + ret = tdbBtreeDecodeCell(pPage, pCell, &cd, pTxn, pBt); + if (ret < 0) { + tdbError("tdb/btree-balance: decode cell failed with ret: %d.", ret); + return TSDB_CODE_FAILED; + } // TODO: pCell here may be inserted as an overflow cell, handle it SCell *pNewCell = tdbOsMalloc(cd.kLen + 9); @@ -863,8 +878,12 @@ static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTx int szNewCell; SPgno pgno; pgno = TDB_PAGE_PGNO(pNews[iNew]); - (void)tdbBtreeEncodeCell(pParent, cd.pKey, cd.kLen, (void *)&pgno, sizeof(SPgno), pNewCell, &szNewCell, + ret = tdbBtreeEncodeCell(pParent, cd.pKey, cd.kLen, (void *)&pgno, sizeof(SPgno), pNewCell, &szNewCell, pTxn, pBt); + if (ret < 0) { + tdbError("tdb/btree-balance: encode cell failed with ret: %d.", ret); + return TSDB_CODE_FAILED; + } ret = tdbPageInsertCell(pParent, sIdx++, pNewCell, szNewCell, 0); if (ret) { tdbError("tdb/btree-balance: insert cell failed with ret: %d.", ret); @@ -979,7 +998,10 @@ static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTx for (pageIdx = 0; pageIdx < nOlds; ++pageIdx) { if (pageIdx >= nNews) { - (void)tdbPagerInsertFreePage(pBt->pPager, pOlds[pageIdx], pTxn); + ret = tdbPagerInsertFreePage(pBt->pPager, pOlds[pageIdx], pTxn); + if (ret < 0) { + return ret; + } } tdbPagerReturnPage(pBt->pPager, pOlds[pageIdx], pTxn); } @@ -2189,7 +2211,11 @@ int tdbBtcGet(SBTC *pBtc, const void **ppKey, int *kLen, const void **ppVal, int } pCell = tdbPageGetCell(pBtc->pPage, pBtc->idx); - (void)tdbBtreeDecodeCell(pBtc->pPage, pCell, &pBtc->coder, pBtc->pTxn, pBtc->pBt); + int32_t ret = tdbBtreeDecodeCell(pBtc->pPage, pCell, &pBtc->coder, pBtc->pTxn, pBtc->pBt); + if (ret < 0) { + tdbError("tdb/btc-get: decode cell failed with ret: %d.", ret); + return ret; + } if (ppKey) { *ppKey = (void *)pBtc->coder.pKey; @@ -2238,13 +2264,19 @@ int tdbBtcDelete(SBTC *pBtc) { destroyOfps = true; } - (void)tdbPageDropCell(pBtc->pPage, idx, pBtc->pTxn, pBtc->pBt); + ret = tdbPageDropCell(pBtc->pPage, idx, pBtc->pTxn, pBtc->pBt); + if (ret < 0) { + tdbError("tdb/btc-delete: page drop cell failed with ret: %d.", ret); + } SArray *ofps = pBtc->pPage->pPager->ofps; if (ofps) { for (int i = 0; i < TARRAY_SIZE(ofps); ++i) { SPage *ofp = *(SPage **)taosArrayGet(ofps, i); - (void)tdbPagerInsertFreePage(pBtc->pPage->pPager, ofp, pBtc->pTxn); + ret = tdbPagerInsertFreePage(pBtc->pPage->pPager, ofp, pBtc->pTxn); + if (ret < 0) { + tdbError("tdb/btc-delete: insert free page failed with ret: %d.", ret); + } } if (destroyOfps) { @@ -2282,7 +2314,10 @@ int tdbBtcDelete(SBTC *pBtc) { tdbError("tdb/btc-delete: malloc failed."); return terrno; } - (void)tdbBtreeEncodeCell(pPage, pKey, nKey, &pgno, sizeof(pgno), pCell, &szCell, pBtc->pTxn, pBtc->pBt); + ret = tdbBtreeEncodeCell(pPage, pKey, nKey, &pgno, sizeof(pgno), pCell, &szCell, pBtc->pTxn, pBtc->pBt); + if (ret < 0) { + tdbError("tdb/btc-delete: btree encode cell failed with ret: %d.", ret); + } ret = tdbPageUpdateCell(pPage, idx, pCell, szCell, pBtc->pTxn, pBtc->pBt); if (ret < 0) { diff --git a/source/libs/tdb/src/db/tdbDb.c b/source/libs/tdb/src/db/tdbDb.c index 825a6e2b94..02ab997f69 100644 --- a/source/libs/tdb/src/db/tdbDb.c +++ b/source/libs/tdb/src/db/tdbDb.c @@ -90,7 +90,7 @@ int32_t tdbOpen(const char *dbname, int32_t szPage, int32_t pages, TDB **ppDb, i return 0; } -int tdbClose(TDB *pDb) { +void tdbClose(TDB *pDb) { SPager *pPager; if (pDb) { @@ -101,15 +101,15 @@ int tdbClose(TDB *pDb) { for (pPager = pDb->pgrList; pPager; pPager = pDb->pgrList) { pDb->pgrList = pPager->pNext; - (void)tdbPagerClose(pPager); + tdbPagerClose(pPager); } - (void)tdbPCacheClose(pDb->pCache); + tdbPCacheClose(pDb->pCache); tdbOsFree(pDb->pgrHash); tdbOsFree(pDb); } - return 0; + return; } int32_t tdbAlter(TDB *pDb, int pages) { return tdbPCacheAlter(pDb->pCache, pages); } @@ -199,7 +199,7 @@ int32_t tdbPrepareAsyncCommit(TDB *pDb, TXN *pTxn) { return 0; } -int32_t tdbAbort(TDB *pDb, TXN *pTxn) { +void tdbAbort(TDB *pDb, TXN *pTxn) { SPager *pPager; int ret; @@ -208,13 +208,12 @@ int32_t tdbAbort(TDB *pDb, TXN *pTxn) { if (ret < 0) { tdbError("failed to abort pager since %s. dbName:%s, txnId:%" PRId64, tstrerror(terrno), pDb->dbName, pTxn->txnId); - return ret; } } tdbTxnClose(pTxn); - return 0; + return; } SPager *tdbEnvGetPager(TDB *pDb, const char *fname) { diff --git a/source/libs/tdb/src/db/tdbPCache.c b/source/libs/tdb/src/db/tdbPCache.c index 6fcc8deb12..b0bcbd1a4c 100644 --- a/source/libs/tdb/src/db/tdbPCache.c +++ b/source/libs/tdb/src/db/tdbPCache.c @@ -42,12 +42,31 @@ static void tdbPCachePinPage(SPCache *pCache, SPage *pPage); static void tdbPCacheRemovePageFromHash(SPCache *pCache, SPage *pPage); static void tdbPCacheAddPageToHash(SPCache *pCache, SPage *pPage); static void tdbPCacheUnpinPage(SPCache *pCache, SPage *pPage); -static int tdbPCacheCloseImpl(SPCache *pCache); +static void tdbPCacheCloseImpl(SPCache *pCache); -static void tdbPCacheInitLock(SPCache *pCache) { (void)tdbMutexInit(&(pCache->mutex), NULL); } -static void tdbPCacheDestroyLock(SPCache *pCache) { (void)tdbMutexDestroy(&(pCache->mutex)); } -static void tdbPCacheLock(SPCache *pCache) { (void)tdbMutexLock(&(pCache->mutex)); } -static void tdbPCacheUnlock(SPCache *pCache) { (void)tdbMutexUnlock(&(pCache->mutex)); } +static void tdbPCacheInitLock(SPCache *pCache) { + if (tdbMutexInit(&(pCache->mutex), NULL) != 0) { + tdbError("tdb/pcache: mutex init failed."); + } +} + +static void tdbPCacheDestroyLock(SPCache *pCache) { + if (tdbMutexDestroy(&(pCache->mutex)) != 0) { + tdbError("tdb/pcache: mutex destroy failed."); + } +} + +static void tdbPCacheLock(SPCache *pCache) { + if (tdbMutexLock(&(pCache->mutex)) != 0) { + tdbError("tdb/pcache: mutex lock failed."); + } +} + +static void tdbPCacheUnlock(SPCache *pCache) { + if (tdbMutexUnlock(&(pCache->mutex)) != 0) { + tdbError("tdb/pcache: mutex unlock failed."); + } +} int tdbPCacheOpen(int pageSize, int cacheSize, SPCache **ppCache) { int32_t code = 0; @@ -74,7 +93,7 @@ int tdbPCacheOpen(int pageSize, int cacheSize, SPCache **ppCache) { _exit: if (code) { tdbError("%s failed at %s:%d since %s", __func__, __FILE__, __LINE__, tstrerror(code)); - (void)tdbPCacheClose(pCache); + tdbPCacheClose(pCache); *ppCache = NULL; } else { *ppCache = pCache; @@ -82,13 +101,13 @@ _exit: return code; } -int tdbPCacheClose(SPCache *pCache) { +void tdbPCacheClose(SPCache *pCache) { if (pCache) { - (void)tdbPCacheCloseImpl(pCache); + tdbPCacheCloseImpl(pCache); tdbOsFree(pCache->aPage); tdbOsFree(pCache); } - return 0; + return; } // TODO: @@ -514,7 +533,7 @@ static int tdbPCacheOpenImpl(SPCache *pCache) { return 0; } -static int tdbPCacheCloseImpl(SPCache *pCache) { +static void tdbPCacheCloseImpl(SPCache *pCache) { // free free page for (SPage *pPage = pCache->pFree; pPage;) { SPage *pPageT = pPage->pFreeNext; @@ -532,5 +551,5 @@ static int tdbPCacheCloseImpl(SPCache *pCache) { tdbOsFree(pCache->pgHash); tdbPCacheDestroyLock(pCache); - return 0; + return ; } diff --git a/source/libs/tdb/src/db/tdbPage.c b/source/libs/tdb/src/db/tdbPage.c index abc7fb5858..be391a75f1 100644 --- a/source/libs/tdb/src/db/tdbPage.c +++ b/source/libs/tdb/src/db/tdbPage.c @@ -64,7 +64,10 @@ int tdbPageCreate(int pageSize, SPage **ppPage, void *(*xMalloc)(void *, size_t) memset(ptr, 0, size); pPage = (SPage *)(ptr + pageSize); - (void)TDB_INIT_PAGE_LOCK(pPage); + int32_t code = TDB_INIT_PAGE_LOCK(pPage); + if (code) { + tdbError("tdb/page-create: init page lock failed."); + } pPage->pageSize = pageSize; pPage->pData = ptr; if (pageSize < 65536) { diff --git a/source/libs/tdb/src/db/tdbPager.c b/source/libs/tdb/src/db/tdbPager.c index 7d9e70cfaa..2753fe30d6 100644 --- a/source/libs/tdb/src/db/tdbPager.c +++ b/source/libs/tdb/src/db/tdbPager.c @@ -95,7 +95,7 @@ static int hashset_add(hashset_t set, void *item) { set->nitems = 0; for (size_t i = 0; i < old_capacity; ++i) { - (void)hashset_add_member(set, (void *)old_items[i]); + int nt = hashset_add_member(set, (void *)old_items[i]); } tdbOsFree(old_items); } @@ -209,12 +209,15 @@ int tdbPagerOpen(SPCache *pCache, const char *fileName, SPager **ppPager) { return 0; } -int tdbPagerClose(SPager *pPager) { +void tdbPagerClose(SPager *pPager) { if (pPager) { - (void)tdbOsClose(pPager->fd); + int32_t code = tdbOsClose(pPager->fd); + if (code) { + tdbWarn("failed to close file since %s", tstrerror(code)); + } tdbOsFree(pPager); } - return 0; + return; } int tdbPagerWrite(SPager *pPager, SPage *pPage) { @@ -224,14 +227,14 @@ int tdbPagerWrite(SPager *pPager, SPage *pPage) { if (pPage->isDirty) return 0; // ref page one more time so the page will not be release - (void)tdbRefPage(pPage); - tdbTrace("pager/mdirty page %p/%d/%d", pPage, TDB_PAGE_PGNO(pPage), pPage->id); + int32_t nRef = tdbRefPage(pPage); + tdbTrace("pager/mdirty page %p/%d/%d, ref:%d", pPage, TDB_PAGE_PGNO(pPage), pPage->id, nRef); // Set page as dirty pPage->isDirty = 1; tdbTrace("tdb/pager-write: put page: %p %d to dirty tree: %p", pPage, TDB_PAGE_PGNO(pPage), &pPager->rbt); - (void)tRBTreePut(&pPager->rbt, (SRBTreeNode *)pPage); + SRBTreeNode *tnode = tRBTreePut(&pPager->rbt, (SRBTreeNode *)pPage); // Write page to journal if neccessary if (TDB_PAGE_PGNO(pPage) <= pPager->dbOrigSize && @@ -244,7 +247,7 @@ int tdbPagerWrite(SPager *pPager, SPage *pPage) { } if (pPager->pActiveTxn->jPageSet) { - (void)hashset_add(pPager->pActiveTxn->jPageSet, (void *)((long)TDB_PAGE_PGNO(pPage))); + int32_t nt = hashset_add(pPager->pActiveTxn->jPageSet, (void *)((long)TDB_PAGE_PGNO(pPage))); } } @@ -340,7 +343,7 @@ int tdbPagerCommit(SPager *pPager, TXN *pTxn) { tRBTreeDrop(&pPager->rbt, (SRBTreeNode *)pPage); if (pTxn->jPageSet) { - (void)hashset_remove(pTxn->jPageSet, (void *)((long)TDB_PAGE_PGNO(pPage))); + int32_t nt = hashset_remove(pTxn->jPageSet, (void *)((long)TDB_PAGE_PGNO(pPage))); } tdbTrace("tdb/pager-commit: remove page: %p %d from dirty tree: %p", pPage, TDB_PAGE_PGNO(pPage), &pPager->rbt); @@ -577,7 +580,7 @@ int tdbPagerAbort(SPager *pPager, TXN *pTxn) { pPage->isDirty = 0; tRBTreeDrop(&pPager->rbt, (SRBTreeNode *)pPage); - (void)hashset_remove(pTxn->jPageSet, (void *)((long)TDB_PAGE_PGNO(pPage))); + int32_t nt = hashset_remove(pTxn->jPageSet, (void *)((long)TDB_PAGE_PGNO(pPage))); tdbPCacheMarkFree(pPager->pCache, pPage); tdbPCacheRelease(pPager->pCache, pPage, pTxn); } @@ -699,7 +702,11 @@ int tdbPagerFetchPage(SPager *pPager, SPgno *ppgno, SPage **ppPage, int (*initPa memcpy(&pgid, pPager->fid, TDB_FILE_ID_LEN); pgid.pgno = pgno; while ((pPage = tdbPCacheFetch(pPager->pCache, &pgid, pTxn)) == NULL) { - (void)tdbPagerFlushPage(pPager, pTxn); + int32_t code = tdbPagerFlushPage(pPager, pTxn); + if (code) { + tdbError("tdb/pager: %p, pPage: %p, flush page failed.", pPager, pPage); + return code; + } } tdbTrace("tdbttl fetch pager:%p", pPage->pPager); @@ -879,7 +886,9 @@ static int tdbPagerInitPage(SPager *pPager, SPage *pPage, int (*initPage)(SPage lcode = TDB_TRY_LOCK_PAGE(pPage); if (lcode == P_LOCK_SUCC) { if (TDB_PAGE_INITIALIZED(pPage)) { - (void)TDB_UNLOCK_PAGE(pPage); + if (TDB_UNLOCK_PAGE(pPage) != 0) { + tdbError("tdb/pager:%p, pgno:%d, unlock page failed.", pPager, pgno); + } return 0; } @@ -893,7 +902,10 @@ static int tdbPagerInitPage(SPager *pPager, SPage *pPage, int (*initPage)(SPage tdbTrace("tdb/pager:%p, pgno:%d, nRead:%" PRId64, pPager, pgno, nRead); if (nRead < pPage->pageSize) { tdbError("tdb/pager:%p, pgno:%d, nRead:%" PRId64 "pgSize:%" PRId32, pPager, pgno, nRead, pPage->pageSize); - (void)TDB_UNLOCK_PAGE(pPage); + if (TDB_UNLOCK_PAGE(pPage) < 0) { + tdbError("tdb/pager:%p, pgno:%d, nRead:%" PRId64 "pgSize:%" PRId32 " unlock page failed.", pPager, pgno, + nRead, pPage->pageSize); + } return TAOS_SYSTEM_ERROR(errno); } @@ -939,7 +951,10 @@ static int tdbPagerInitPage(SPager *pPager, SPage *pPage, int (*initPage)(SPage if (ret < 0) { tdbError("tdb/pager:%p, pgno:%d, nRead:%" PRId64 "pgSize:%" PRId32 " init page failed.", pPager, pgno, nRead, pPage->pageSize); - (void)TDB_UNLOCK_PAGE(pPage); + if (TDB_UNLOCK_PAGE(pPage) != 0) { + tdbError("tdb/pager:%p, pgno:%d, nRead:%" PRId64 "pgSize:%" PRId32 " unlock page failed.", pPager, pgno, nRead, + pPage->pageSize); + } return ret; } @@ -947,7 +962,10 @@ static int tdbPagerInitPage(SPager *pPager, SPage *pPage, int (*initPage)(SPage pPage->pPager = pPager; - (void)TDB_UNLOCK_PAGE(pPage); + if (TDB_UNLOCK_PAGE(pPage) != 0) { + tdbError("tdb/pager:%p, pgno:%d, nRead:%" PRId64 "pgSize:%" PRId32 " unlock page failed.", pPager, pgno, nRead, + pPage->pageSize); + } } else if (lcode == P_LOCK_BUSY) { nLoops = 0; for (;;) { diff --git a/source/libs/tdb/src/db/tdbTable.c b/source/libs/tdb/src/db/tdbTable.c index e7a25c7fee..6dc6aa0940 100644 --- a/source/libs/tdb/src/db/tdbTable.c +++ b/source/libs/tdb/src/db/tdbTable.c @@ -112,7 +112,11 @@ int tdbTbOpen(const char *tbname, int keyLen, int valLen, tdb_cmpr_fn_t keyCmprF return ret; } } else { - (void)tdbPagerRollback(pPager); + ret = tdbPagerRollback(pPager); + if (ret < 0) { + tdbOsFree(pTb); + return ret; + } } // pTb->pBt @@ -126,12 +130,12 @@ int tdbTbOpen(const char *tbname, int keyLen, int valLen, tdb_cmpr_fn_t keyCmprF return 0; } -int tdbTbClose(TTB *pTb) { +void tdbTbClose(TTB *pTb) { if (pTb) { tdbBtreeClose(pTb->pBt); tdbOsFree(pTb); } - return 0; + return; } bool tdbTbExist(const char *tbname, TDB *pEnv) { @@ -202,7 +206,7 @@ int tdbTbInsert(TTB *pTb, const void *pKey, int keyLen, const void *pVal, int va int tdbTbDelete(TTB *pTb, const void *pKey, int kLen, TXN *pTxn) { return tdbBtreeDelete(pTb->pBt, pKey, kLen, pTxn); } int tdbTbUpsert(TTB *pTb, const void *pKey, int kLen, const void *pVal, int vLen, TXN *pTxn) { - (void)tdbTbDelete(pTb, pKey, kLen, pTxn); + TAOS_UNUSED(tdbTbDelete(pTb, pKey, kLen, pTxn)); return tdbTbInsert(pTb, pKey, kLen, pVal, vLen, pTxn); } @@ -241,7 +245,11 @@ int32_t tdbTbTraversal(TTB *pTb, void *data, return ret; } - (void)tdbTbcMoveToFirst(pCur); + ret = tdbTbcMoveToFirst(pCur); + if (ret < 0) { + tdbTbcClose(pCur); + return ret; + } void *pKey = NULL; int kLen = 0; diff --git a/source/libs/tdb/src/db/tdbTxn.c b/source/libs/tdb/src/db/tdbTxn.c index 71560e3e85..fff0fc0261 100644 --- a/source/libs/tdb/src/db/tdbTxn.c +++ b/source/libs/tdb/src/db/tdbTxn.c @@ -31,7 +31,7 @@ int tdbTxnOpen(TXN *pTxn, int64_t txnid, void *(*xMalloc)(void *, size_t), void return 0; } -int tdbTxnCloseImpl(TXN *pTxn) { +void tdbTxnCloseImpl(TXN *pTxn) { if (pTxn) { if (pTxn->jPageSet) { hashset_destroy(pTxn->jPageSet); @@ -39,11 +39,14 @@ int tdbTxnCloseImpl(TXN *pTxn) { } if (pTxn->jfd) { - TAOS_UNUSED(tdbOsClose(pTxn->jfd)); + int32_t code = tdbOsClose(pTxn->jfd); + if (code) { + tdbError("tdb/txn: close journal file failed, code:%d", code); + } } tdbOsFree(pTxn); } - return 0; + return; } diff --git a/source/libs/tdb/src/inc/tdbInt.h b/source/libs/tdb/src/inc/tdbInt.h index a4498ee1ea..3e12483309 100644 --- a/source/libs/tdb/src/inc/tdbInt.h +++ b/source/libs/tdb/src/inc/tdbInt.h @@ -179,7 +179,7 @@ int tdbBtcUpsert(SBTC *pBtc, const void *pKey, int kLen, const void *pData, int // tdbPager.c ==================================== int tdbPagerOpen(SPCache *pCache, const char *fileName, SPager **ppPager); -int tdbPagerClose(SPager *pPager); +void tdbPagerClose(SPager *pPager); int tdbPagerOpenDB(SPager *pPager, SPgno *ppgno, bool toCreate, SBTree *pBt); int tdbPagerWrite(SPager *pPager, SPage *pPage); int tdbPagerBegin(SPager *pPager, TXN *pTxn); @@ -214,7 +214,7 @@ int tdbPagerRollback(SPager *pPager); // For page ref int tdbPCacheOpen(int pageSize, int cacheSize, SPCache **ppCache); -int tdbPCacheClose(SPCache *pCache); +void tdbPCacheClose(SPCache *pCache); int tdbPCacheAlter(SPCache *pCache, int32_t nPage); SPage *tdbPCacheFetch(SPCache *pCache, const SPgid *pPgid, TXN *pTxn); void tdbPCacheRelease(SPCache *pCache, SPage *pPage, TXN *pTxn); diff --git a/source/libs/tdb/test/tdbExOVFLTest.cpp b/source/libs/tdb/test/tdbExOVFLTest.cpp index a2deba4696..388a812ff5 100644 --- a/source/libs/tdb/test/tdbExOVFLTest.cpp +++ b/source/libs/tdb/test/tdbExOVFLTest.cpp @@ -197,8 +197,7 @@ static void insertOfp(void) { tdbTbClose(pDb); // Close Env - ret = tdbClose(pEnv); - GTEST_ASSERT_EQ(ret, 0); + tdbClose(pEnv); } // TEST(TdbOVFLPagesTest, DISABLED_TbInsertTest) { @@ -247,8 +246,7 @@ TEST(TdbOVFLPagesTest, TbGetTest) { tdbTbClose(pDb); // Close Env - ret = tdbClose(pEnv); - GTEST_ASSERT_EQ(ret, 0); + tdbClose(pEnv); } // TEST(TdbOVFLPagesTest, DISABLED_TbDeleteTest) { @@ -357,8 +355,7 @@ tdbBegin(pEnv, &txn); tdbTbClose(pDb); // Close Env - ret = tdbClose(pEnv); - GTEST_ASSERT_EQ(ret, 0); + tdbClose(pEnv); } // TEST(tdb_test, DISABLED_simple_insert1) { @@ -492,6 +489,5 @@ TEST(tdb_test, simple_insert1) { tdbTbClose(pDb); // Close Env - ret = tdbClose(pEnv); - GTEST_ASSERT_EQ(ret, 0); + tdbClose(pEnv); } diff --git a/source/libs/tdb/test/tdbPageDefragmentTest.cpp b/source/libs/tdb/test/tdbPageDefragmentTest.cpp index b64517c787..85b5c6e6b3 100644 --- a/source/libs/tdb/test/tdbPageDefragmentTest.cpp +++ b/source/libs/tdb/test/tdbPageDefragmentTest.cpp @@ -468,8 +468,7 @@ TEST(TdbPageDefragmentTest, DISABLED_simple_insert1) { tdbTbClose(pDb); // Close Env - ret = tdbClose(pEnv); - GTEST_ASSERT_EQ(ret, 0); + tdbClose(pEnv); } // TEST(TdbPageDefragmentTest, DISABLED_seq_insert) { @@ -551,8 +550,7 @@ TEST(TdbPageDefragmentTest, seq_insert) { tdbTbClose(pDb); // Close Env - ret = tdbClose(pEnv); - GTEST_ASSERT_EQ(ret, 0); + tdbClose(pEnv); } // TEST(TdbPageDefragmentTest, DISABLED_seq_delete) { @@ -635,8 +633,7 @@ TEST(TdbPageDefragmentTest, seq_delete) { tdbTbClose(pDb); // Close Env - ret = tdbClose(pEnv); - GTEST_ASSERT_EQ(ret, 0); + tdbClose(pEnv); } // TEST(TdbPageDefragmentTest, DISABLED_defragment_insert) { @@ -717,6 +714,5 @@ TEST(TdbPageDefragmentTest, defragment_insert) { tdbTbClose(pDb); // Close Env - ret = tdbClose(pEnv); - GTEST_ASSERT_EQ(ret, 0); + tdbClose(pEnv); } diff --git a/source/libs/tdb/test/tdbPageRecycleTest.cpp b/source/libs/tdb/test/tdbPageRecycleTest.cpp index bbbd90e73d..30e1081d0c 100644 --- a/source/libs/tdb/test/tdbPageRecycleTest.cpp +++ b/source/libs/tdb/test/tdbPageRecycleTest.cpp @@ -123,7 +123,7 @@ static int tDefaultKeyCmpr(const void *pKey1, int keyLen1, const void *pKey2, in static TDB *openEnv(char const *envName, int const pageSize, int const pageNum) { TDB *pEnv = NULL; - int ret = tdbOpen(envName, pageSize, pageNum, &pEnv, 0 , 0, NULL); + int ret = tdbOpen(envName, pageSize, pageNum, &pEnv, 0, 0, NULL); if (ret) { pEnv = NULL; } @@ -187,8 +187,7 @@ static void insertOfp(void) { tdbTbClose(pDb); // Close Env - ret = tdbClose(pEnv); - GTEST_ASSERT_EQ(ret, 0); + tdbClose(pEnv); } static void clearDb(char const *db) { taosRemoveDir(db); } @@ -471,8 +470,7 @@ TEST(TdbPageRecycleTest, DISABLED_simple_insert1) { tdbTbClose(pDb); // Close Env - ret = tdbClose(pEnv); - GTEST_ASSERT_EQ(ret, 0); + tdbClose(pEnv); } static void insertDb(int nData) { @@ -537,8 +535,7 @@ static void insertDb(int nData) { tdbTbClose(pDb); // Close Env - ret = tdbClose(pEnv); - GTEST_ASSERT_EQ(ret, 0); + tdbClose(pEnv); system("ls -l ./tdb"); } @@ -607,8 +604,7 @@ static void deleteDb(int nData) { tdbTbClose(pDb); // Close Env - ret = tdbClose(pEnv); - GTEST_ASSERT_EQ(ret, 0); + tdbClose(pEnv); system("ls -l ./tdb"); } @@ -675,8 +671,7 @@ static void deleteOfp(void) { tdbTbClose(pDb); // Close Env - ret = tdbClose(pEnv); - GTEST_ASSERT_EQ(ret, 0); + tdbClose(pEnv); } // TEST(TdbPageRecycleTest, DISABLED_seq_delete_ofp) { @@ -761,8 +756,7 @@ TEST(TdbPageRecycleTest, recycly_seq_insert_ofp_nocommit) { tdbTbClose(pDb); // Close Env - ret = tdbClose(pEnv); - GTEST_ASSERT_EQ(ret, 0); + tdbClose(pEnv); system("ls -l ./tdb"); } @@ -828,8 +822,7 @@ TEST(TdbPageRecycleTest, recycly_delete_interior_ofp_nocommit) { tdbTbClose(pDb); // Close Env - ret = tdbClose(pEnv); - GTEST_ASSERT_EQ(ret, 0); + tdbClose(pEnv); system("ls -l ./tdb"); } diff --git a/source/libs/tdb/test/tdbTest.cpp b/source/libs/tdb/test/tdbTest.cpp index e358ac0197..a8fdce2c38 100644 --- a/source/libs/tdb/test/tdbTest.cpp +++ b/source/libs/tdb/test/tdbTest.cpp @@ -231,8 +231,7 @@ TEST(tdb_test, DISABLED_simple_insert1) { tdbTbClose(pDb); // Close Env - ret = tdbClose(pEnv); - GTEST_ASSERT_EQ(ret, 0); + tdbClose(pEnv); } TEST(tdb_test, DISABLED_simple_insert2) { @@ -315,8 +314,7 @@ TEST(tdb_test, DISABLED_simple_insert2) { tdbTbClose(pDb); // Close Env - ret = tdbClose(pEnv); - GTEST_ASSERT_EQ(ret, 0); + tdbClose(pEnv); } TEST(tdb_test, DISABLED_simple_delete1) { @@ -620,8 +618,7 @@ TEST(tdb_test, multi_thread_query) { tdbTbClose(pDb); // Close Env - ret = tdbClose(pEnv); - GTEST_ASSERT_EQ(ret, 0); + tdbClose(pEnv); } TEST(tdb_test, DISABLED_multi_thread1) { @@ -745,7 +742,6 @@ TEST(tdb_test, DISABLED_multi_thread1) { tdbTbClose(pTb); // Close Env - ret = tdbClose(pDb); - GTEST_ASSERT_EQ(ret, 0); + tdbClose(pEnv); #endif } diff --git a/source/libs/transport/src/thttp.c b/source/libs/transport/src/thttp.c index d4c973926e..573b59e30c 100644 --- a/source/libs/transport/src/thttp.c +++ b/source/libs/transport/src/thttp.c @@ -154,7 +154,7 @@ static int32_t taosCompressHttpRport(char* pSrc, int32_t srcLen) { void* pDest = taosMemoryMalloc(destLen); if (pDest == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; + code = terrno; goto _OVER; } @@ -258,7 +258,7 @@ static int32_t httpCreateMsg(const char* server, const char* uri, uint16_t port, SHttpMsg* msg = taosMemoryMalloc(sizeof(SHttpMsg)); if (msg == NULL) { *httpMsg = NULL; - return TSDB_CODE_OUT_OF_MEMORY; + return terrno; } msg->seq = seqNum; @@ -273,7 +273,7 @@ static int32_t httpCreateMsg(const char* server, const char* uri, uint16_t port, if (msg->server == NULL || msg->uri == NULL || msg->cont == NULL) { httpDestroyMsg(msg); *httpMsg = NULL; - return TSDB_CODE_OUT_OF_MEMORY; + return terrno; } memcpy(msg->cont, pCont, contLen); @@ -585,7 +585,7 @@ static void httpHandleReq(SHttpMsg* msg) { int32_t cap = 2048; header = taosMemoryCalloc(1, cap); if (header == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; + code = terrno; goto END; } @@ -597,7 +597,7 @@ static void httpHandleReq(SHttpMsg* msg) { uv_buf_t* wb = taosMemoryCalloc(2, sizeof(uv_buf_t)); if (wb == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; + code = terrno; goto END; } @@ -607,7 +607,7 @@ static void httpHandleReq(SHttpMsg* msg) { SHttpClient* cli = taosMemoryCalloc(1, sizeof(SHttpClient)); if (cli == NULL) { taosMemoryFree(wb); - code = TSDB_CODE_OUT_OF_MEMORY; + code = terrno; goto END; } cli->seq = msg->seq; @@ -784,13 +784,13 @@ int64_t transInitHttpChanImpl() { http->connStatusTable = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK); if (http->connStatusTable == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; + code = terrno; goto _ERROR; } http->loop = taosMemoryMalloc(sizeof(uv_loop_t)); if (http->loop == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; + code = terrno; goto _ERROR; } diff --git a/source/libs/transport/src/tmsgcb.c b/source/libs/transport/src/tmsgcb.c index 619592c82c..5685ac55ae 100644 --- a/source/libs/transport/src/tmsgcb.c +++ b/source/libs/transport/src/tmsgcb.c @@ -81,6 +81,6 @@ bool tmsgUpdateDnodeInfo(int32_t* dnodeId, int64_t* clusterId, char* fqdn, uint1 void tmsgUpdateDnodeEpSet(SEpSet* epset) { for (int32_t i = 0; i < epset->numOfEps; ++i) { - (void)tmsgUpdateDnodeInfo(NULL, NULL, epset->eps[i].fqdn, &epset->eps[i].port); + bool ret = tmsgUpdateDnodeInfo(NULL, NULL, epset->eps[i].fqdn, &epset->eps[i].port); } } diff --git a/source/libs/transport/src/trans.c b/source/libs/transport/src/trans.c index df7b181a82..9ba1c3d677 100644 --- a/source/libs/transport/src/trans.c +++ b/source/libs/transport/src/trans.c @@ -40,7 +40,7 @@ void* rpcOpen(const SRpcInit* pInit) { SRpcInfo* pRpc = taosMemoryCalloc(1, sizeof(SRpcInfo)); if (pRpc == NULL) { - TAOS_CHECK_GOTO(TSDB_CODE_OUT_OF_MEMORY, NULL, _end); + TAOS_CHECK_GOTO(terrno, NULL, _end); } if (pInit->label) { int len = strlen(pInit->label) > sizeof(pRpc->label) ? sizeof(pRpc->label) : strlen(pInit->label); diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 087e82d0ec..1dc4f7bfcf 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -984,7 +984,7 @@ static int32_t cliCreateConn(SCliThrd* pThrd, SCliConn** pCliConn) { // read/write stream handle conn->stream = (uv_stream_t*)taosMemoryMalloc(sizeof(uv_tcp_t)); if (conn->stream == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; + code = terrno; TAOS_CHECK_GOTO(code, NULL, _failed); } @@ -1167,7 +1167,7 @@ void cliSendBatch(SCliConn* pConn) { uv_buf_t* wb = taosMemoryCalloc(wLen, sizeof(uv_buf_t)); if (wb == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; + code = terrno; tError("%s conn %p failed to send batch msg since:%s", CONN_GET_INST_LABEL(pConn), pConn, tstrerror(code)); goto _exception; } @@ -1221,7 +1221,7 @@ void cliSendBatch(SCliConn* pConn) { uv_write_t* req = taosMemoryCalloc(1, sizeof(uv_write_t)); if (req == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; + code = terrno; tError("%s conn %p failed to send batch msg since:%s", CONN_GET_INST_LABEL(pConn), pConn, tstrerror(code)); goto _exception; } @@ -1971,7 +1971,7 @@ static int32_t createBatchList(SCliBatchList** ppBatchList, char* key, char* ip, taosMemoryFree(pBatchList->dst); taosMemoryFree(pBatchList); tError("failed to create batch list, reason:%s", tstrerror(TSDB_CODE_OUT_OF_MEMORY)); - return TSDB_CODE_OUT_OF_MEMORY; + return terrno; } *ppBatchList = pBatchList; return 0; @@ -2205,7 +2205,7 @@ void* transInitClient(uint32_t ip, uint32_t port, char* label, int numOfThreads, int32_t code = 0; SCliObj* cli = taosMemoryCalloc(1, sizeof(SCliObj)); if (cli == NULL) { - TAOS_CHECK_GOTO(TSDB_CODE_OUT_OF_MEMORY, NULL, _err); + TAOS_CHECK_GOTO(terrno, NULL, _err); } STrans* pTransInst = shandle; @@ -2214,7 +2214,7 @@ void* transInitClient(uint32_t ip, uint32_t port, char* label, int numOfThreads, cli->pThreadObj = (SCliThrd**)taosMemoryCalloc(cli->numOfThreads, sizeof(SCliThrd*)); if (cli->pThreadObj == NULL) { - TAOS_CHECK_GOTO(TSDB_CODE_OUT_OF_MEMORY, NULL, _err); + TAOS_CHECK_GOTO(terrno, NULL, _err); } for (int i = 0; i < cli->numOfThreads; i++) { @@ -2342,29 +2342,29 @@ static int32_t createThrdObj(void* trans, SCliThrd** ppThrd) { pThrd->destroyAhandleFp = pTransInst->destroyFp; pThrd->fqdn2ipCache = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); if (pThrd->fqdn2ipCache == NULL) { - TAOS_CHECK_GOTO(TSDB_CODE_OUT_OF_MEMORY, NULL, _end); + TAOS_CHECK_GOTO(terrno, NULL, _end); } pThrd->failFastCache = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); if (pThrd->failFastCache == NULL) { - TAOS_CHECK_GOTO(TSDB_CODE_OUT_OF_MEMORY, NULL, _end); + TAOS_CHECK_GOTO(terrno, NULL, _end); } pThrd->batchCache = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); if (pThrd->batchCache == NULL) { - TAOS_CHECK_GOTO(TSDB_CODE_OUT_OF_MEMORY, NULL, _end); + TAOS_CHECK_GOTO(terrno, NULL, _end); } int32_t timerSize = 64; pThrd->timerList = taosArrayInit(timerSize, sizeof(void*)); if (pThrd->timerList == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - TAOS_CHECK_GOTO(TSDB_CODE_OUT_OF_MEMORY, NULL, _end); + code = terrno; + TAOS_CHECK_GOTO(terrno, NULL, _end); } for (int i = 0; i < timerSize; i++) { uv_timer_t* timer = taosMemoryCalloc(1, sizeof(uv_timer_t)); if (timer == NULL) { - TAOS_CHECK_GOTO(TSDB_CODE_OUT_OF_MEMORY, NULL, _end); + TAOS_CHECK_GOTO(terrno, NULL, _end); } (void)uv_timer_init(pThrd->loop, timer); if (taosArrayPush(pThrd->timerList, &timer) == NULL) { diff --git a/source/libs/transport/src/transComm.c b/source/libs/transport/src/transComm.c index 05244dbce2..75d118a66b 100644 --- a/source/libs/transport/src/transComm.c +++ b/source/libs/transport/src/transComm.c @@ -128,7 +128,7 @@ int32_t transClearBuffer(SConnBuffer* buf) { p->cap = BUFFER_CAP; p->buf = taosMemoryRealloc(p->buf, BUFFER_CAP); if (p->buf == NULL) { - return TSDB_CODE_OUT_OF_MEMORY; + return terrno; } } p->left = -1; @@ -179,7 +179,7 @@ int32_t transResetBuffer(SConnBuffer* connBuf, int8_t resetBuf) { p->cap = BUFFER_CAP; p->buf = taosMemoryRealloc(p->buf, p->cap); if (p->buf == NULL) { - return TSDB_CODE_OUT_OF_MEMORY; + return terrno; } } } @@ -209,7 +209,7 @@ int32_t transAllocBuffer(SConnBuffer* connBuf, uv_buf_t* uvBuf) { if (p->buf == NULL) { uvBuf->base = NULL; uvBuf->len = 0; - return TSDB_CODE_OUT_OF_MEMORY; + return terrno; } uvBuf->base = p->buf + p->len; uvBuf->len = p->left; @@ -443,7 +443,7 @@ void transReqQueueClear(queue* q) { int32_t transQueueInit(STransQueue* queue, void (*freeFunc)(const void* arg)) { queue->q = taosArrayInit(2, sizeof(void*)); if (queue->q == NULL) { - return TSDB_CODE_OUT_OF_MEMORY; + return terrno; } queue->freeFunc = (void (*)(const void*))freeFunc; @@ -565,12 +565,12 @@ int32_t transDQCreate(uv_loop_t* loop, SDelayQueue** queue) { heap = heapCreate(timeCompare); if (heap == NULL) { - TAOS_CHECK_GOTO(TSDB_CODE_OUT_OF_MEMORY, NULL, _return1); + TAOS_CHECK_GOTO(terrno, NULL, _return1); } q = taosMemoryCalloc(1, sizeof(SDelayQueue)); if (q == NULL) { - TAOS_CHECK_GOTO(TSDB_CODE_OUT_OF_MEMORY, NULL, _return1); + TAOS_CHECK_GOTO(terrno, NULL, _return1); } q->heap = heap; q->timer = timer; diff --git a/source/libs/transport/src/transSvr.c b/source/libs/transport/src/transSvr.c index 4c70ff5212..3ef7de6c1d 100644 --- a/source/libs/transport/src/transSvr.c +++ b/source/libs/transport/src/transSvr.c @@ -636,7 +636,7 @@ static int uvPrepareSendData(SSvrMsg* smsg, uv_buf_t* wb) { if (pMsg->pCont == 0) { pMsg->pCont = (void*)rpcMallocCont(0); if (pMsg->pCont == NULL) { - return TSDB_CODE_OUT_OF_MEMORY; + return terrno; } pMsg->contLen = 0; @@ -1027,7 +1027,7 @@ static int32_t addHandleToWorkloop(SWorkThrd* pThrd, char* pipeName) { int32_t code = 0; pThrd->loop = (uv_loop_t*)taosMemoryMalloc(sizeof(uv_loop_t)); if (pThrd->loop == NULL) { - return TSDB_CODE_OUT_OF_MEMORY; + return terrno; } if ((code = uv_loop_init(pThrd->loop)) != 0) { @@ -1135,7 +1135,7 @@ static FORCE_INLINE SSvrConn* createConn(void* hThrd) { SSvrConn* pConn = (SSvrConn*)taosMemoryCalloc(1, sizeof(SSvrConn)); if (pConn == NULL) { - TAOS_CHECK_GOTO(TSDB_CODE_OUT_OF_MEMORY, NULL, _end); + TAOS_CHECK_GOTO(terrno, NULL, _end); } transReqQueueInit(&pConn->wreqQueue); @@ -1156,7 +1156,7 @@ static FORCE_INLINE SSvrConn* createConn(void* hThrd) { SExHandle* exh = taosMemoryMalloc(sizeof(SExHandle)); if (exh == NULL) { - TAOS_CHECK_GOTO(TSDB_CODE_OUT_OF_MEMORY, NULL, _end); + TAOS_CHECK_GOTO(terrno, NULL, _end); } exh->handle = pConn; @@ -1187,7 +1187,7 @@ static FORCE_INLINE SSvrConn* createConn(void* hThrd) { // init client handle pConn->pTcp = (uv_tcp_t*)taosMemoryMalloc(sizeof(uv_tcp_t)); if (pConn->pTcp == NULL) { - TAOS_CHECK_GOTO(TSDB_CODE_OUT_OF_MEMORY, NULL, _end); + TAOS_CHECK_GOTO(terrno, NULL, _end); } code = uv_tcp_init(pThrd->loop, pConn->pTcp); @@ -1236,7 +1236,7 @@ static int32_t reallocConnRef(SSvrConn* conn) { // avoid app continue to send msg on invalid handle SExHandle* exh = taosMemoryMalloc(sizeof(SExHandle)); if (exh == NULL) { - return TSDB_CODE_OUT_OF_MEMORY; + return terrno; } exh->handle = conn; @@ -1335,7 +1335,7 @@ void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads, SServerObj* srv = taosMemoryCalloc(1, sizeof(SServerObj)); if (srv == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; + code = terrno; tError("failed to init server since: %s", tstrerror(code)); return NULL; } @@ -1349,7 +1349,7 @@ void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads, srv->pThreadObj = (SWorkThrd**)taosMemoryCalloc(srv->numOfThreads, sizeof(SWorkThrd*)); srv->pipe = (uv_pipe_t**)taosMemoryCalloc(srv->numOfThreads, sizeof(uv_pipe_t*)); if (srv->loop == NULL || srv->pThreadObj == NULL || srv->pipe == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; + code = terrno; goto End; } @@ -1400,14 +1400,14 @@ void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads, thrd->pWhiteList = uvWhiteListCreate(); if (thrd->pWhiteList == NULL) { destroyWorkThrdObj(thrd); - code = TSDB_CODE_OUT_OF_MEMORY; + code = terrno; goto End; } srv->pipe[i] = (uv_pipe_t*)taosMemoryCalloc(2, sizeof(uv_pipe_t)); if (srv->pipe[i] == NULL) { destroyWorkThrdObj(thrd); - code = TSDB_CODE_OUT_OF_MEMORY; + code = terrno; goto End; } @@ -1433,7 +1433,7 @@ void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads, for (int i = 0; i < srv->numOfThreads; i++) { SWorkThrd* thrd = (SWorkThrd*)taosMemoryCalloc(1, sizeof(SWorkThrd)); if (thrd == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; + code = terrno; goto End; } srv->pThreadObj[i] = thrd; @@ -1444,13 +1444,13 @@ void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads, thrd->pWhiteList = uvWhiteListCreate(); if (thrd->pWhiteList == NULL) { destroyWorkThrdObj(thrd); - code = TSDB_CODE_OUT_OF_MEMORY; + code = terrno; goto End; } srv->pipe[i] = (uv_pipe_t*)taosMemoryCalloc(2, sizeof(uv_pipe_t)); if (srv->pipe[i] == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; + code = terrno; goto End; } @@ -1599,7 +1599,7 @@ void uvHandleUpdate(SSvrMsg* msg, SWorkThrd* thrd) { SIpWhiteList* pList = taosMemoryCalloc(1, sz + sizeof(SIpWhiteList)); if (pList == NULL) { tError("failed to create ip-white-list since %s", tstrerror(code)); - code = TSDB_CODE_OUT_OF_MEMORY; + code = terrno; break; } pList->num = pUser->numOfRange; diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index 0dd55aacdb..7ea98d648d 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -57,7 +57,11 @@ static FORCE_INLINE int32_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx, in walBuildLogName(pWal, pFileInfo->firstVer, fnameStr); int64_t fileSize = 0; - (void)taosStatFile(fnameStr, &fileSize, NULL, NULL); + if (taosStatFile(fnameStr, &fileSize, NULL, NULL) != 0) { + wError("vgId:%d, failed to stat file due to %s. file:%s", pWal->cfg.vgId, strerror(errno), fnameStr); + code = terrno; + goto _err; + } TdFilePtr pFile = taosOpenFile(fnameStr, TD_FILE_READ | TD_FILE_WRITE); if (pFile == NULL) { @@ -99,7 +103,7 @@ static FORCE_INLINE int32_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx, in ptr = taosMemoryRealloc(buf, capacity); if (ptr == NULL) { - TAOS_CHECK_GOTO(TSDB_CODE_OUT_OF_MEMORY, &lino, _err); + TAOS_CHECK_GOTO(terrno, &lino, _err); } buf = ptr; @@ -163,7 +167,7 @@ static FORCE_INLINE int32_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx, in capacity += extraSize; void* ptr = taosMemoryRealloc(buf, capacity); if (ptr == NULL) { - TAOS_CHECK_GOTO(TSDB_CODE_OUT_OF_MEMORY, &lino, _err); + TAOS_CHECK_GOTO(terrno, &lino, _err); } buf = ptr; } @@ -270,7 +274,7 @@ static int32_t walRebuildFileInfoSet(SArray* metaLogList, SArray* actualLogList) for (int i = 0; i < actualFileNum; i++) { SWalFileInfo* pFileInfo = taosArrayGet(actualLogList, i); if (NULL == taosArrayPush(metaLogList, pFileInfo)) { - TAOS_RETURN(TSDB_CODE_OUT_OF_MEMORY); + TAOS_RETURN(terrno); } } @@ -356,6 +360,7 @@ static int32_t walLogEntriesComplete(const SWal* pWal) { } static int32_t walTrimIdxFile(SWal* pWal, int32_t fileIdx) { + int32_t code = 0; SWalFileInfo* pFileInfo = taosArrayGet(pWal->fileInfoSet, fileIdx); if (!pFileInfo) { TAOS_RETURN(TSDB_CODE_FAILED); @@ -365,7 +370,11 @@ static int32_t walTrimIdxFile(SWal* pWal, int32_t fileIdx) { walBuildIdxName(pWal, pFileInfo->firstVer, fnameStr); int64_t fileSize = 0; - (void)taosStatFile(fnameStr, &fileSize, NULL, NULL); + if (taosStatFile(fnameStr, &fileSize, NULL, NULL) != 0) { + wError("vgId:%d, failed to stat file due to %s. file:%s", pWal->cfg.vgId, strerror(errno), fnameStr); + code = terrno; + TAOS_RETURN(code); + } int64_t records = TMAX(0, pFileInfo->lastVer - pFileInfo->firstVer + 1); int64_t lastEndOffset = records * sizeof(SWalIdxEntry); @@ -381,7 +390,11 @@ static int32_t walTrimIdxFile(SWal* pWal, int32_t fileIdx) { wInfo("vgId:%d, trim idx file. file: %s, size: %" PRId64 ", offset: %" PRId64, pWal->cfg.vgId, fnameStr, fileSize, lastEndOffset); - (void)taosFtruncateFile(pFile, lastEndOffset); + code = taosFtruncateFile(pFile, lastEndOffset); + if (code < 0) { + wError("vgId:%d, failed to truncate file due to %s. file:%s", pWal->cfg.vgId, strerror(errno), fnameStr); + TAOS_RETURN(code); + } (void)taosCloseFile(&pFile); TAOS_RETURN(TSDB_CODE_SUCCESS); @@ -395,8 +408,14 @@ int32_t walCheckAndRepairMeta(SWal* pWal) { regex_t logRegPattern; regex_t idxRegPattern; - (void)regcomp(&logRegPattern, logPattern, REG_EXTENDED); - (void)regcomp(&idxRegPattern, idxPattern, REG_EXTENDED); + if (regcomp(&logRegPattern, logPattern, REG_EXTENDED) != 0) { + wError("failed to compile log pattern, error:%s", tstrerror(terrno)); + return terrno; + } + if (regcomp(&idxRegPattern, idxPattern, REG_EXTENDED) != 0) { + wError("failed to compile idx pattern"); + return terrno; + } TdDirPtr pDir = taosOpenDir(pWal->path); if (pDir == NULL) { @@ -420,14 +439,22 @@ int32_t walCheckAndRepairMeta(SWal* pWal) { if (!taosArrayPush(actualLog, &fileInfo)) { regfree(&logRegPattern); regfree(&idxRegPattern); - (void)taosCloseDir(&pDir); + int32_t ret = taosCloseDir(&pDir); + if (ret != 0) { + wError("failed to close dir, ret:%s", tstrerror(ret)); + return terrno; + } return terrno; } } } - (void)taosCloseDir(&pDir); + int32_t ret = taosCloseDir(&pDir); + if (ret != 0) { + wError("failed to close dir, ret:%s", tstrerror(ret)); + return terrno; + } regfree(&logRegPattern); regfree(&idxRegPattern); @@ -684,7 +711,9 @@ _err: int64_t walGetVerRetention(SWal* pWal, int64_t bytes) { int64_t ver = -1; int64_t totSize = 0; - (void)taosThreadRwlockRdlock(&pWal->mutex); + if (taosThreadRwlockRdlock(&pWal->mutex) != 0) { + wError("vgId:%d failed to lock %p", pWal->cfg.vgId, &pWal->mutex); + } int32_t fileIdx = taosArrayGetSize(pWal->fileInfoSet); while (--fileIdx) { SWalFileInfo* pInfo = taosArrayGet(pWal->fileInfoSet, fileIdx); @@ -694,7 +723,9 @@ int64_t walGetVerRetention(SWal* pWal, int64_t bytes) { } totSize += pInfo->fileSize; } - (void)taosThreadRwlockUnlock(&pWal->mutex); + if (taosThreadRwlockUnlock(&pWal->mutex) != 0) { + wError("vgId:%d failed to lock %p", pWal->cfg.vgId, &pWal->mutex); + } return ver + 1; } @@ -728,7 +759,7 @@ int32_t walRollFileInfo(SWal* pWal) { // TODO: change to emplace back SWalFileInfo* pNewInfo = taosMemoryMalloc(sizeof(SWalFileInfo)); if (pNewInfo == NULL) { - TAOS_RETURN(TSDB_CODE_OUT_OF_MEMORY); + TAOS_RETURN(terrno); } pNewInfo->firstVer = pWal->vers.lastVer + 1; pNewInfo->lastVer = -1; @@ -738,7 +769,7 @@ int32_t walRollFileInfo(SWal* pWal) { pNewInfo->syncedOffset = 0; if (!taosArrayPush(pArray, pNewInfo)) { taosMemoryFree(pNewInfo); - TAOS_RETURN(TSDB_CODE_OUT_OF_MEMORY); + TAOS_RETURN(terrno); } taosMemoryFree(pNewInfo); @@ -765,21 +796,35 @@ int32_t walMetaSerialize(SWal* pWal, char** serialized) { TAOS_RETURN(TSDB_CODE_OUT_OF_MEMORY); } - (void)cJSON_AddItemToObject(pRoot, "meta", pMeta); + if (cJSON_AddItemToObject(pRoot, "meta", pMeta) != 0) { + wInfo("vgId:%d, failed to add meta to root", pWal->cfg.vgId); + } (void)sprintf(buf, "%" PRId64, pWal->vers.firstVer); - (void)cJSON_AddStringToObject(pMeta, "firstVer", buf); + if (cJSON_AddStringToObject(pMeta, "firstVer", buf) == NULL) { + wInfo("vgId:%d, failed to add firstVer to meta", pWal->cfg.vgId); + } (void)sprintf(buf, "%" PRId64, pWal->vers.snapshotVer); - (void)cJSON_AddStringToObject(pMeta, "snapshotVer", buf); + if (cJSON_AddStringToObject(pMeta, "snapshotVer", buf) == NULL) { + wInfo("vgId:%d, failed to add snapshotVer to meta", pWal->cfg.vgId); + } (void)sprintf(buf, "%" PRId64, pWal->vers.commitVer); - (void)cJSON_AddStringToObject(pMeta, "commitVer", buf); + if (cJSON_AddStringToObject(pMeta, "commitVer", buf) == NULL) { + wInfo("vgId:%d, failed to add commitVer to meta", pWal->cfg.vgId); + } (void)sprintf(buf, "%" PRId64, pWal->vers.lastVer); - (void)cJSON_AddStringToObject(pMeta, "lastVer", buf); + if (cJSON_AddStringToObject(pMeta, "lastVer", buf) == NULL) { + wInfo("vgId:%d, failed to add lastVer to meta", pWal->cfg.vgId); + } - (void)cJSON_AddItemToObject(pRoot, "files", pFiles); + if (cJSON_AddItemToObject(pRoot, "files", pFiles) != 0) { + wInfo("vgId:%d, failed to add files to root", pWal->cfg.vgId); + } SWalFileInfo* pData = pWal->fileInfoSet->pData; for (int i = 0; i < sz; i++) { SWalFileInfo* pInfo = &pData[i]; - (void)cJSON_AddItemToArray(pFiles, pField = cJSON_CreateObject()); + if (cJSON_AddItemToArray(pFiles, pField = cJSON_CreateObject()) != 0) { + wInfo("vgId:%d, failed to add field to files", pWal->cfg.vgId); + } if (pField == NULL) { cJSON_Delete(pRoot); @@ -788,15 +833,25 @@ int32_t walMetaSerialize(SWal* pWal, char** serialized) { // cjson only support int32_t or double // string are used to prohibit the loss of precision (void)sprintf(buf, "%" PRId64, pInfo->firstVer); - (void)cJSON_AddStringToObject(pField, "firstVer", buf); + if (cJSON_AddStringToObject(pField, "firstVer", buf) == NULL) { + wInfo("vgId:%d, failed to add firstVer to field", pWal->cfg.vgId); + } (void)sprintf(buf, "%" PRId64, pInfo->lastVer); - (void)cJSON_AddStringToObject(pField, "lastVer", buf); + if (cJSON_AddStringToObject(pField, "lastVer", buf) == NULL) { + wInfo("vgId:%d, failed to add lastVer to field", pWal->cfg.vgId); + } (void)sprintf(buf, "%" PRId64, pInfo->createTs); - (void)cJSON_AddStringToObject(pField, "createTs", buf); + if (cJSON_AddStringToObject(pField, "createTs", buf) == NULL) { + wInfo("vgId:%d, failed to add createTs to field", pWal->cfg.vgId); + } (void)sprintf(buf, "%" PRId64, pInfo->closeTs); - (void)cJSON_AddStringToObject(pField, "closeTs", buf); + if (cJSON_AddStringToObject(pField, "closeTs", buf) == NULL) { + wInfo("vgId:%d, failed to add closeTs to field", pWal->cfg.vgId); + } (void)sprintf(buf, "%" PRId64, pInfo->fileSize); - (void)cJSON_AddStringToObject(pField, "fileSize", buf); + if (cJSON_AddStringToObject(pField, "fileSize", buf) == NULL) { + wInfo("vgId:%d, failed to add fileSize to field", pWal->cfg.vgId); + } } char* pSerialized = cJSON_Print(pRoot); cJSON_Delete(pRoot); @@ -874,7 +929,10 @@ _err: static int walFindCurMetaVer(SWal* pWal) { const char* pattern = "^meta-ver[0-9]+$"; regex_t walMetaRegexPattern; - (void)regcomp(&walMetaRegexPattern, pattern, REG_EXTENDED); + if (regcomp(&walMetaRegexPattern, pattern, REG_EXTENDED) != 0) { + wError("failed to compile wal meta pattern, error %s", tstrerror(terrno)); + return terrno; + } TdDirPtr pDir = taosOpenDir(pWal->path); if (pDir == NULL) { @@ -896,7 +954,10 @@ static int walFindCurMetaVer(SWal* pWal) { } wDebug("vgId:%d, wal find current meta: %s is not meta file", pWal->cfg.vgId, name); } - (void)taosCloseDir(&pDir); + if (taosCloseDir(&pDir) != 0) { + wError("failed to close dir, ret:%s", tstrerror(terrno)); + return terrno; + } regfree(&walMetaRegexPattern); return metaVer; } @@ -979,21 +1040,31 @@ int32_t walSaveMeta(SWal* pWal) { // delete old file if (metaVer > -1) { - (void)walBuildMetaName(pWal, metaVer, fnameStr); - (void)taosRemoveFile(fnameStr); + n = walBuildMetaName(pWal, metaVer, fnameStr); + if (n >= sizeof(fnameStr)) { + TAOS_RETURN(TAOS_SYSTEM_ERROR(errno)); + } + code = taosRemoveFile(fnameStr); + if (code) { + wError("vgId:%d, failed to remove file due to %s. file:%s", pWal->cfg.vgId, strerror(errno), fnameStr); + } else { + wInfo("vgId:%d, remove old meta file: %s", pWal->cfg.vgId, fnameStr); + } } taosMemoryFree(serialized); return code; _err: - taosCloseFile(&pMetaFile); + wError("vgId:%d, %s failed at line %d since %s", pWal->cfg.vgId, __func__, lino, tstrerror(code)); + (void)taosCloseFile(&pMetaFile); taosMemoryFree(serialized); return code; } int32_t walLoadMeta(SWal* pWal) { int32_t code = 0; + int n = 0; // find existing meta file int metaVer = walFindCurMetaVer(pWal); if (metaVer == -1) { @@ -1002,12 +1073,24 @@ int32_t walLoadMeta(SWal* pWal) { TAOS_RETURN(TSDB_CODE_FAILED); } char fnameStr[WAL_FILE_LEN]; - (void)walBuildMetaName(pWal, metaVer, fnameStr); + n = walBuildMetaName(pWal, metaVer, fnameStr); + if (n >= sizeof(fnameStr)) { + TAOS_RETURN(TAOS_SYSTEM_ERROR(errno)); + } // read metafile int64_t fileSize = 0; - (void)taosStatFile(fnameStr, &fileSize, NULL, NULL); + if (taosStatFile(fnameStr, &fileSize, NULL, NULL) != 0) { + wError("vgId:%d, failed to stat file due to %s. file:%s", pWal->cfg.vgId, strerror(errno), fnameStr); + code = terrno; + TAOS_RETURN(code); + } if (fileSize == 0) { - (void)taosRemoveFile(fnameStr); + code = taosRemoveFile(fnameStr); + if (code) { + wError("vgId:%d, failed to remove file due to %s. file:%s", pWal->cfg.vgId, strerror(errno), fnameStr); + } else { + wInfo("vgId:%d, remove old meta file: %s", pWal->cfg.vgId, fnameStr); + } wDebug("vgId:%d, wal find empty meta ver %d", pWal->cfg.vgId, metaVer); TAOS_RETURN(TSDB_CODE_FAILED); @@ -1015,7 +1098,7 @@ int32_t walLoadMeta(SWal* pWal) { int size = (int)fileSize; char* buf = taosMemoryMalloc(size + 5); if (buf == NULL) { - TAOS_RETURN(TSDB_CODE_OUT_OF_MEMORY); + TAOS_RETURN(terrno); } (void)memset(buf, 0, size + 5); TdFilePtr pFile = taosOpenFile(fnameStr, TD_FILE_READ); @@ -1046,6 +1129,9 @@ int32_t walRemoveMeta(SWal* pWal) { int metaVer = walFindCurMetaVer(pWal); if (metaVer == -1) return 0; char fnameStr[WAL_FILE_LEN]; - (void)walBuildMetaName(pWal, metaVer, fnameStr); + int n = walBuildMetaName(pWal, metaVer, fnameStr); + if (n >= sizeof(fnameStr)) { + TAOS_RETURN(TAOS_SYSTEM_ERROR(errno)); + } return taosRemoveFile(fnameStr); } diff --git a/source/libs/wal/src/walMgmt.c b/source/libs/wal/src/walMgmt.c index 3bb4b9d747..81d31f9ecd 100644 --- a/source/libs/wal/src/walMgmt.c +++ b/source/libs/wal/src/walMgmt.c @@ -160,7 +160,9 @@ SWal *walOpen(const char *path, SWalCfg *pCfg) { pWal->writeHead.magic = WAL_MAGIC; // load meta - (void)walLoadMeta(pWal); + if (walLoadMeta(pWal) < 0) { + wInfo("vgId:%d, failed to load meta since %s", pWal->cfg.vgId, tstrerror(terrno)); + } if (walCheckAndRepairMeta(pWal) < 0) { wError("vgId:%d, cannot open wal since repair meta file failed", pWal->cfg.vgId); @@ -233,7 +235,9 @@ int32_t walPersist(SWal *pWal) { void walClose(SWal *pWal) { TAOS_UNUSED(taosThreadRwlockWrlock(&pWal->mutex)); - (void)walSaveMeta(pWal); + if (walSaveMeta(pWal) < 0) { + wError("vgId:%d, failed to save meta since %s", pWal->cfg.vgId, tstrerror(terrno)); + } TAOS_UNUSED(taosCloseFile(&pWal->pLogFile)); pWal->pLogFile = NULL; (void)taosCloseFile(&pWal->pIdxFile); @@ -257,10 +261,14 @@ void walClose(SWal *pWal) { if (pWal->cfg.level == TAOS_WAL_SKIP) { wInfo("vgId:%d, remove all wals, path:%s", pWal->cfg.vgId, pWal->path); taosRemoveDir(pWal->path); - (void)taosMkDir(pWal->path); + if (taosMkDir(pWal->path) != 0) { + wError("vgId:%d, path:%s, failed to create directory since %s", pWal->cfg.vgId, pWal->path, tstrerror(terrno)); + } } - (void)taosRemoveRef(tsWal.refSetId, pWal->refId); + if (taosRemoveRef(tsWal.refSetId, pWal->refId) < 0) { + wError("vgId:%d, failed to remove ref for Wal since %s", pWal->cfg.vgId, tstrerror(terrno)); + } } static void walFreeObj(void *wal) { @@ -285,7 +293,9 @@ static bool walNeedFsync(SWal *pWal) { static void walUpdateSeq() { taosMsleep(WAL_REFRESH_MS); - (void)atomic_add_fetch_32((volatile int32_t *)&tsWal.seq, 1); + if (atomic_add_fetch_32((volatile int32_t *)&tsWal.seq, 1) < 0) { + wError("failed to update wal seq since %s", strerror(errno)); + } } static void walFsyncAll() { diff --git a/source/libs/wal/src/walRead.c b/source/libs/wal/src/walRead.c index 321a47d678..9cf5bcbf09 100644 --- a/source/libs/wal/src/walRead.c +++ b/source/libs/wal/src/walRead.c @@ -212,7 +212,7 @@ static int32_t walReadSeekVerImpl(SWalReader *pReader, int64_t ver) { if (pRet == NULL) { wError("failed to allocate memory for localRet"); TAOS_UNUSED(taosThreadRwlockUnlock(&pWal->mutex)); - TAOS_RETURN(TSDB_CODE_OUT_OF_MEMORY); + TAOS_RETURN(terrno); } TAOS_MEMCPY(pRet, gloablPRet, sizeof(SWalFileInfo)); TAOS_UNUSED(taosThreadRwlockUnlock(&pWal->mutex)); @@ -341,7 +341,7 @@ int32_t walFetchBody(SWalReader *pRead) { if (pRead->capacity < cryptedBodyLen) { SWalCkHead *ptr = (SWalCkHead *)taosMemoryRealloc(pRead->pHead, sizeof(SWalCkHead) + cryptedBodyLen); if (ptr == NULL) { - TAOS_RETURN(TSDB_CODE_OUT_OF_MEMORY); + TAOS_RETURN(terrno); } pRead->pHead = ptr; pReadHead = &pRead->pHead->head; @@ -463,7 +463,7 @@ int32_t walReadVer(SWalReader *pReader, int64_t ver) { if (ptr == NULL) { TAOS_UNUSED(taosThreadMutexUnlock(&pReader->mutex)); - TAOS_RETURN(TSDB_CODE_OUT_OF_MEMORY); + TAOS_RETURN(terrno); } pReader->pHead = ptr; pReader->capacity = cryptedBodyLen; @@ -523,7 +523,7 @@ int32_t decryptBody(SWalCfg *cfg, SWalCkHead *pHead, int32_t plainBodyLen, const int32_t cryptedBodyLen = ENCRYPTED_LEN(plainBodyLen); char *newBody = taosMemoryMalloc(cryptedBodyLen); if (!newBody) { - TAOS_RETURN(TSDB_CODE_OUT_OF_MEMORY); + TAOS_RETURN(terrno); } SCryptOpts opts; diff --git a/source/libs/wal/src/walRef.c b/source/libs/wal/src/walRef.c index bf24ed89fb..6b8b063b2b 100644 --- a/source/libs/wal/src/walRef.c +++ b/source/libs/wal/src/walRef.c @@ -52,8 +52,11 @@ void walCloseRef(SWal *pWal, int64_t refId) { } else { wDebug("vgId:%d, wal close ref null, refId %" PRId64, pWal->cfg.vgId, refId); } - - (void)taosHashRemove(pWal->pRefHash, &refId, sizeof(int64_t)); + int32_t code = 0; + code = taosHashRemove(pWal->pRefHash, &refId, sizeof(int64_t)); + if (code) { + wError("vgId:%d, wal remove ref failed, refId %" PRId64 ", error:%s", pWal->cfg.vgId, refId, tstrerror(code)); + } } } diff --git a/source/libs/wal/src/walWrite.c b/source/libs/wal/src/walWrite.c index a5105fc107..c98714df19 100644 --- a/source/libs/wal/src/walWrite.c +++ b/source/libs/wal/src/walWrite.c @@ -86,11 +86,9 @@ int32_t walRestoreFromSnapshot(SWal *pWal, int64_t ver) { TAOS_RETURN(TSDB_CODE_SUCCESS); } -int32_t walApplyVer(SWal *pWal, int64_t ver) { +void walApplyVer(SWal *pWal, int64_t ver) { // TODO: error check pWal->vers.appliedVer = ver; - - TAOS_RETURN(TSDB_CODE_SUCCESS); } int32_t walCommit(SWal *pWal, int64_t ver) { diff --git a/source/util/src/tcompression.c b/source/util/src/tcompression.c index afbd5304ce..eec89617c9 100644 --- a/source/util/src/tcompression.c +++ b/source/util/src/tcompression.c @@ -1766,12 +1766,12 @@ int32_t tsDecompressBigint2(void *pIn, int32_t nIn, int32_t nEle, void *pOut, in FUNC_COMPRESS_IMPL(pIn, nIn, nEle, pOut, nOut, cmprAlg, pBuf, nBuf, TSDB_DATA_TYPE_BIGINT, 0); } -int32_t tcompressDebug(uint32_t cmprAlg, uint8_t *l1Alg, uint8_t *l2Alg, uint8_t *level) { +void tcompressDebug(uint32_t cmprAlg, uint8_t *l1Alg, uint8_t *l2Alg, uint8_t *level) { DEFINE_VAR(cmprAlg) *l1Alg = l1; *l2Alg = l2; *level = lvl; - return 0; + return; } int8_t tUpdateCompress(uint32_t oldCmpr, uint32_t newCmpr, uint8_t l2Disabled, uint8_t lvlDiabled, uint8_t lvlDefault, diff --git a/source/util/src/tconfig.c b/source/util/src/tconfig.c index 6868e6d0eb..ee55243415 100644 --- a/source/util/src/tconfig.c +++ b/source/util/src/tconfig.c @@ -51,7 +51,7 @@ int32_t cfgInit(SConfig **ppCfg) { pCfg->array = taosArrayInit(32, sizeof(SConfigItem)); if (pCfg->array == NULL) { taosMemoryFree(pCfg); - TAOS_RETURN(TSDB_CODE_OUT_OF_MEMORY); + TAOS_RETURN(terrno); } TAOS_CHECK_RETURN(taosThreadMutexInit(&pCfg->lock, NULL)); @@ -127,7 +127,7 @@ static int32_t cfgCheckAndSetConf(SConfigItem *pItem, const char *conf) { pItem->str = taosStrdup(conf); if (pItem->str == NULL) { - TAOS_RETURN(TSDB_CODE_OUT_OF_MEMORY); + TAOS_RETURN(terrno); } TAOS_RETURN(TSDB_CODE_SUCCESS); @@ -144,7 +144,7 @@ static int32_t cfgCheckAndSetDir(SConfigItem *pItem, const char *inputDir) { taosMemoryFreeClear(pItem->str); pItem->str = taosStrdup(fullDir); if (pItem->str == NULL) { - TAOS_RETURN(TSDB_CODE_OUT_OF_MEMORY); + TAOS_RETURN(terrno); } TAOS_RETURN(TSDB_CODE_SUCCESS); @@ -211,7 +211,7 @@ static int32_t cfgSetString(SConfigItem *pItem, const char *value, ECfgSrcType s if (tmp == NULL) { uError("cfg:%s, type:%s src:%s value:%s failed to dup since %s", pItem->name, cfgDtypeStr(pItem->dtype), cfgStypeStr(stype), value, tstrerror(TSDB_CODE_OUT_OF_MEMORY)); - TAOS_RETURN(TSDB_CODE_OUT_OF_MEMORY); + TAOS_RETURN(terrno); } taosMemoryFreeClear(pItem->str); @@ -272,7 +272,7 @@ static int32_t cfgSetTfsItem(SConfig *pCfg, const char *name, const char *value, if (pItem->array == NULL) { (void)taosThreadMutexUnlock(&pCfg->lock); - TAOS_RETURN(TSDB_CODE_OUT_OF_MEMORY); + TAOS_RETURN(terrno); } } @@ -285,7 +285,7 @@ static int32_t cfgSetTfsItem(SConfig *pCfg, const char *name, const char *value, if (ret == NULL) { (void)taosThreadMutexUnlock(&pCfg->lock); - TAOS_RETURN(TSDB_CODE_OUT_OF_MEMORY); + TAOS_RETURN(terrno); } pItem->stype = stype; @@ -304,7 +304,7 @@ static int32_t cfgUpdateDebugFlagItem(SConfig *pCfg, const char *name, bool rese if (pDebugFlagItem->array == NULL) { pDebugFlagItem->array = taosArrayInit(16, sizeof(SLogVar)); if (pDebugFlagItem->array == NULL) { - TAOS_RETURN(TSDB_CODE_OUT_OF_MEMORY); + TAOS_RETURN(terrno); } } taosArrayClear(pDebugFlagItem->array); @@ -317,7 +317,7 @@ static int32_t cfgUpdateDebugFlagItem(SConfig *pCfg, const char *name, bool rese SLogVar logVar = {0}; (void)strncpy(logVar.name, name, TSDB_LOG_VAR_LEN - 1); if (NULL == taosArrayPush(pDebugFlagItem->array, &logVar)) { - TAOS_RETURN(TSDB_CODE_OUT_OF_MEMORY); + TAOS_RETURN(terrno); } } TAOS_RETURN(TSDB_CODE_SUCCESS); @@ -505,7 +505,7 @@ static int32_t cfgAddItem(SConfig *pCfg, SConfigItem *pItem, const char *name) { pItem->stype = CFG_STYPE_DEFAULT; pItem->name = taosStrdup(name); if (pItem->name == NULL) { - TAOS_RETURN(TSDB_CODE_OUT_OF_MEMORY); + TAOS_RETURN(terrno); } int32_t size = taosArrayGetSize(pCfg->array); @@ -527,7 +527,7 @@ static int32_t cfgAddItem(SConfig *pCfg, SConfigItem *pItem, const char *name) { } taosMemoryFree(pItem->name); - TAOS_RETURN(TSDB_CODE_OUT_OF_MEMORY); + TAOS_RETURN(terrno); } TAOS_RETURN(TSDB_CODE_SUCCESS); @@ -587,7 +587,7 @@ int32_t cfgAddString(SConfig *pCfg, const char *name, const char *defaultVal, in SConfigItem item = {.dtype = CFG_DTYPE_STRING, .scope = scope, .dynScope = dynScope}; item.str = taosStrdup(defaultVal); if (item.str == NULL) { - TAOS_RETURN(TSDB_CODE_OUT_OF_MEMORY); + TAOS_RETURN(terrno); } return cfgAddItem(pCfg, &item, name); } @@ -1309,7 +1309,7 @@ int32_t cfgLoadFromApollUrl(SConfig *pConfig, const char *url) { size_t itemValueStringLen = strlen(itemValueString); void *px = taosMemoryRealloc(cfgLineBuf, itemNameLen + itemValueStringLen + 3); if (NULL == px) { - TAOS_CHECK_EXIT(TSDB_CODE_OUT_OF_MEMORY); + TAOS_CHECK_EXIT(terrno); } cfgLineBuf = px; diff --git a/source/util/src/tencode.c b/source/util/src/tencode.c index 99b0b2bded..e962edaa27 100644 --- a/source/util/src/tencode.c +++ b/source/util/src/tencode.c @@ -112,7 +112,7 @@ void tEndEncode(SEncoder* pCoder) { pCoder->size = pNode->size; pCoder->pos = pNode->pos; - (void)tEncodeI32(pCoder, len); + int32_t ret = tEncodeI32(pCoder, len); pCoder->pos += len; } diff --git a/source/util/src/thash.c b/source/util/src/thash.c index 758e283bc3..ab88bef1a0 100644 --- a/source/util/src/thash.c +++ b/source/util/src/thash.c @@ -352,7 +352,7 @@ int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, const vo // no data in hash table with the specified key, add it into hash table SHashNode *pNewNode = doCreateHashNode(key, keyLen, data, size, hashVal); if (pNewNode == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; + // terrno = TSDB_CODE_OUT_OF_MEMORY; code = terrno; goto _exit; } @@ -364,7 +364,7 @@ int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, const vo if (pHashObj->enableUpdate) { SHashNode *pNewNode = doCreateHashNode(key, keyLen, data, size, hashVal); if (pNewNode == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; + // terrno = TSDB_CODE_OUT_OF_MEMORY; code = terrno; goto _exit; } @@ -391,14 +391,13 @@ void *taosHashGet(SHashObj *pHashObj, const void *key, size_t keyLen) { int32_t taosHashGetDup(SHashObj *pHashObj, const void *key, size_t keyLen, void *destBuf) { terrno = 0; - (void)taosHashGetImpl(pHashObj, key, keyLen, &destBuf, 0, false); + void *data = taosHashGetImpl(pHashObj, key, keyLen, &destBuf, 0, false); return terrno; } int32_t taosHashGetDup_m(SHashObj *pHashObj, const void *key, size_t keyLen, void **destBuf, int32_t *size) { terrno = 0; - - (void)taosHashGetImpl(pHashObj, key, keyLen, destBuf, size, false); + void *data = taosHashGetImpl(pHashObj, key, keyLen, destBuf, size, false); return terrno; } diff --git a/source/util/src/theap.c b/source/util/src/theap.c index 7ee49ff56d..e906d1f55b 100644 --- a/source/util/src/theap.c +++ b/source/util/src/theap.c @@ -257,10 +257,11 @@ static PriorityQueueNode* pqHeapify(PriorityQueue* pq, size_t from, size_t last) static void pqBuildHeap(PriorityQueue* pq) { if (pqContainerSize(pq) > 1) { + PriorityQueueNode* node; for (size_t i = pqContainerSize(pq) - 1; i > 0; --i) { - (void)pqHeapify(pq, i, pqContainerSize(pq)); + node = pqHeapify(pq, i, pqContainerSize(pq)); } - (void)pqHeapify(pq, 0, pqContainerSize(pq)); + node = pqHeapify(pq, 0, pqContainerSize(pq)); } } @@ -274,23 +275,24 @@ static PriorityQueueNode* pqReverseHeapify(PriorityQueue* pq, size_t i) { } static void pqUpdate(PriorityQueue* pq, size_t i) { + PriorityQueueNode* node; if (i == 0 || pq->fn(pqContainerGetEle(pq, i)->data, pqContainerGetEle(pq, pqParent(i))->data, pq->param)) { // if value in pos i is smaller than parent, heapify down from i to the end - (void)pqHeapify(pq, i, pqContainerSize(pq)); + node = pqHeapify(pq, i, pqContainerSize(pq)); } else { // if value in pos i is big than parent, heapify up from i - (void)pqReverseHeapify(pq, i); + node = pqReverseHeapify(pq, i); } } static void pqRemove(PriorityQueue* pq, size_t i) { if (i == pqContainerSize(pq) - 1) { - (void)taosArrayPop(pq->container); + void* tmp = taosArrayPop(pq->container); return; } taosArraySet(pq->container, i, taosArrayGet(pq->container, pqContainerSize(pq) - 1)); - (void)taosArrayPop(pq->container); + void* tmp = taosArrayPop(pq->container); pqUpdate(pq, i); } diff --git a/source/util/src/tjson.c b/source/util/src/tjson.c index 4cf5917f7c..0f2504ff5e 100644 --- a/source/util/src/tjson.c +++ b/source/util/src/tjson.c @@ -131,7 +131,7 @@ int32_t tjsonAddArray(SJson* pJson, const char* pName, FToJson func, const void* if (num > 0) { SJson* pJsonArray = tjsonAddArrayToObject(pJson, pName); if (NULL == pJsonArray) { - return TSDB_CODE_OUT_OF_MEMORY; + return terrno; } for (size_t i = 0; i < num; ++i) { int32_t code = tjsonAddItem(pJsonArray, func, (const char*)pArray + itemSize * i); @@ -148,7 +148,7 @@ int32_t tjsonAddTArray(SJson* pJson, const char* pName, FToJson func, const SArr if (num > 0) { SJson* pJsonArray = tjsonAddArrayToObject(pJson, pName); if (NULL == pJsonArray) { - return TSDB_CODE_OUT_OF_MEMORY; + return terrno; } for (int32_t i = 0; i < num; ++i) { int32_t code = tjsonAddItem(pJsonArray, func, taosArrayGet(pArray, i)); @@ -343,7 +343,7 @@ int32_t tjsonToTArray(const SJson* pJson, const char* pName, FToObject func, SAr if (size > 0) { *pArray = taosArrayInit_s(itemSize, size); if (NULL == *pArray) { - return TSDB_CODE_OUT_OF_MEMORY; + return terrno; } for (int32_t i = 0; i < size; ++i) { int32_t code = func(tjsonGetArrayItem(jArray, i), taosArrayGet(*pArray, i)); diff --git a/source/util/src/tlrucache.c b/source/util/src/tlrucache.c index cfbd875890..fbd17dd023 100644 --- a/source/util/src/tlrucache.c +++ b/source/util/src/tlrucache.c @@ -14,12 +14,12 @@ */ #define _DEFAULT_SOURCE -#include "tlrucache.h" #include "os.h" #include "taoserror.h" #include "tarray.h" #include "tdef.h" #include "tlog.h" +#include "tlrucache.h" #include "tutil.h" typedef struct SLRUEntry SLRUEntry; @@ -305,8 +305,7 @@ static void taosLRUCacheShardEvictLRU(SLRUCacheShard *shard, size_t charge, SArr SLRUEntry *old = shard->lru.next; taosLRUCacheShardLRURemove(shard, old); - (void)taosLRUEntryTableRemove(&shard->table, old->keyData, old->keyLength, old->hash); - + SLRUEntry *tentry = taosLRUEntryTableRemove(&shard->table, old->keyData, old->keyLength, old->hash); TAOS_LRU_ENTRY_SET_IN_CACHE(old, false); shard->usage -= old->totalCharge; @@ -529,7 +528,7 @@ static void taosLRUCacheShardEraseUnrefEntries(SLRUCacheShard *shard) { while (shard->lru.next != &shard->lru) { SLRUEntry *old = shard->lru.next; taosLRUCacheShardLRURemove(shard, old); - (void)taosLRUEntryTableRemove(&shard->table, old->keyData, old->keyLength, old->hash); + SLRUEntry *tentry = taosLRUEntryTableRemove(&shard->table, old->keyData, old->keyLength, old->hash); TAOS_LRU_ENTRY_SET_IN_CACHE(old, false); shard->usage -= old->totalCharge; @@ -574,7 +573,7 @@ static bool taosLRUCacheShardRelease(SLRUCacheShard *shard, LRUHandle *handle, b lastReference = taosLRUEntryUnref(e); if (lastReference && TAOS_LRU_ENTRY_IN_CACHE(e)) { if (shard->usage > shard->capacity || eraseIfLastRef) { - (void)taosLRUEntryTableRemove(&shard->table, e->keyData, e->keyLength, e->hash); + SLRUEntry *tentry = taosLRUEntryTableRemove(&shard->table, e->keyData, e->keyLength, e->hash); TAOS_LRU_ENTRY_SET_IN_CACHE(e, false); } else { taosLRUCacheShardLRUInsert(shard, e); @@ -870,4 +869,4 @@ bool taosLRUCacheIsStrictCapacity(SLRUCache *cache) { (void)taosThreadMutexUnlock(&cache->shardedCache.capacityMutex); return strict; -} +} \ No newline at end of file diff --git a/source/util/src/tpagedbuf.c b/source/util/src/tpagedbuf.c index 46f2fdc647..25e10a17df 100644 --- a/source/util/src/tpagedbuf.c +++ b/source/util/src/tpagedbuf.c @@ -57,7 +57,7 @@ static int32_t createDiskFile(SDiskbasedBuf* pBuf) { taosGetTmpfilePath(pBuf->prefix, "paged-buf", path); pBuf->path = taosStrdup(path); if (pBuf->path == NULL) { - return TSDB_CODE_OUT_OF_MEMORY; + return terrno; } } diff --git a/source/util/src/tskiplist.c b/source/util/src/tskiplist.c index ae01292e08..95680686cf 100644 --- a/source/util/src/tskiplist.c +++ b/source/util/src/tskiplist.c @@ -32,9 +32,9 @@ static SSkipListNode *tSkipListNewNode(uint8_t level); static SSkipListNode *tSkipListPutImpl(SSkipList *pSkipList, void *pData, SSkipListNode **direction, bool isForward, bool hasDup); -static FORCE_INLINE int32_t tSkipListWLock(SSkipList *pSkipList); -static FORCE_INLINE int32_t tSkipListRLock(SSkipList *pSkipList); -static FORCE_INLINE int32_t tSkipListUnlock(SSkipList *pSkipList); +static FORCE_INLINE void tSkipListWLock(SSkipList *pSkipList); +static FORCE_INLINE void tSkipListRLock(SSkipList *pSkipList); +static FORCE_INLINE void tSkipListUnlock(SSkipList *pSkipList); static FORCE_INLINE int32_t getSkipListRandLevel(SSkipList *pSkipList); SSkipList *tSkipListCreate(uint8_t maxLevel, uint8_t keyType, uint16_t keyLen, __compar_fn_t comparFn, uint8_t flags, @@ -103,7 +103,7 @@ SSkipList *tSkipListCreate(uint8_t maxLevel, uint8_t keyType, uint16_t keyLen, _ void tSkipListDestroy(SSkipList *pSkipList) { if (pSkipList == NULL) return; - (void)tSkipListWLock(pSkipList); + tSkipListWLock(pSkipList); SSkipListNode *pNode = SL_NODE_GET_FORWARD_POINTER(pSkipList->pHead, 0); @@ -113,7 +113,7 @@ void tSkipListDestroy(SSkipList *pSkipList) { tSkipListFreeNode(pTemp); } - (void)tSkipListUnlock(pSkipList); + tSkipListUnlock(pSkipList); if (pSkipList->lock != NULL) { (void)taosThreadRwlockDestroy(pSkipList->lock); taosMemoryFreeClear(pSkipList->lock); @@ -130,12 +130,12 @@ SSkipListNode *tSkipListPut(SSkipList *pSkipList, void *pData) { SSkipListNode *backward[MAX_SKIP_LIST_LEVEL] = {0}; SSkipListNode *pNode = NULL; - (void)tSkipListWLock(pSkipList); + tSkipListWLock(pSkipList); bool hasDup = tSkipListGetPosToPut(pSkipList, backward, pData); pNode = tSkipListPutImpl(pSkipList, pData, backward, false, hasDup); - (void)tSkipListUnlock(pSkipList); + tSkipListUnlock(pSkipList); return pNode; } @@ -293,11 +293,11 @@ SSkipListIterator *tSkipListCreateIterFromVal(SSkipList *pSkipList, const char * return iter; } - (void)tSkipListRLock(pSkipList); + tSkipListRLock(pSkipList); iter->cur = getPriorNode(pSkipList, val, order, &(iter->next)); - (void)tSkipListUnlock(pSkipList); + tSkipListUnlock(pSkipList); return iter; } @@ -307,13 +307,13 @@ bool tSkipListIterNext(SSkipListIterator *iter) { SSkipList *pSkipList = iter->pSkipList; - (void)tSkipListRLock(pSkipList); + tSkipListRLock(pSkipList); if (iter->order == TSDB_ORDER_ASC) { // no data in the skip list if (iter->cur == pSkipList->pTail || iter->next == NULL) { iter->cur = pSkipList->pTail; - (void)tSkipListUnlock(pSkipList); + tSkipListUnlock(pSkipList); return false; } @@ -329,7 +329,7 @@ bool tSkipListIterNext(SSkipListIterator *iter) { } else { if (iter->cur == pSkipList->pHead) { iter->cur = pSkipList->pHead; - (void)tSkipListUnlock(pSkipList); + tSkipListUnlock(pSkipList); return false; } @@ -344,7 +344,7 @@ bool tSkipListIterNext(SSkipListIterator *iter) { iter->step++; } - (void)tSkipListUnlock(pSkipList); + tSkipListUnlock(pSkipList); return (iter->order == TSDB_ORDER_ASC) ? (iter->cur != pSkipList->pTail) : (iter->cur != pSkipList->pHead); } @@ -413,25 +413,31 @@ static SSkipListIterator *doCreateSkipListIterator(SSkipList *pSkipList, int32_t return iter; } -static FORCE_INLINE int32_t tSkipListWLock(SSkipList *pSkipList) { +static FORCE_INLINE void tSkipListWLock(SSkipList *pSkipList) { if (pSkipList->lock) { - return taosThreadRwlockWrlock(pSkipList->lock); + if (taosThreadRwlockWrlock(pSkipList->lock) != 0) { + uError("failed to lock skip list"); + } } - return 0; + return; } -static FORCE_INLINE int32_t tSkipListRLock(SSkipList *pSkipList) { +static FORCE_INLINE void tSkipListRLock(SSkipList *pSkipList) { if (pSkipList->lock) { - return taosThreadRwlockRdlock(pSkipList->lock); + if (taosThreadRwlockRdlock(pSkipList->lock) != 0) { + uError("failed to lock skip list"); + } } - return 0; + return; } -static FORCE_INLINE int32_t tSkipListUnlock(SSkipList *pSkipList) { +static FORCE_INLINE void tSkipListUnlock(SSkipList *pSkipList) { if (pSkipList->lock) { - return taosThreadRwlockUnlock(pSkipList->lock); + if (taosThreadRwlockUnlock(pSkipList->lock) != 0) { + uError("failed to unlock skip list"); + } } - return 0; + return; } static bool tSkipListGetPosToPut(SSkipList *pSkipList, SSkipListNode **backward, void *pData) { diff --git a/source/util/src/tworker.c b/source/util/src/tworker.c index c2757dcabc..767fb5ce3c 100644 --- a/source/util/src/tworker.c +++ b/source/util/src/tworker.c @@ -59,7 +59,7 @@ void tQWorkerCleanup(SQWorkerPool *pool) { if (taosCheckPthreadValid(worker->thread)) { uInfo("worker:%s:%d is stopping", pool->name, worker->id); (void)taosThreadJoin(worker->thread, NULL); - (void)taosThreadClear(&worker->thread); + taosThreadClear(&worker->thread); uInfo("worker:%s:%d is stopped", pool->name, worker->id); } } @@ -77,7 +77,11 @@ static void *tQWorkerThreadFp(SQueueWorker *worker) { void *msg = NULL; int32_t code = 0; - (void)taosBlockSIGPIPE(); + int32_t ret = taosBlockSIGPIPE(); + if (ret < 0) { + uError("worker:%s:%d failed to block SIGPIPE", pool->name, worker->id); + } + setThreadName(pool->name); worker->pid = taosGetSelfPthreadId(); uInfo("worker:%s:%d is running, thread:%08" PRId64, pool->name, worker->id, worker->pid); @@ -122,7 +126,13 @@ STaosQueue *tQWorkerAllocQueue(SQWorkerPool *pool, void *ahandle, FItem fp) { (void)taosThreadMutexLock(&pool->mutex); taosSetQueueFp(queue, fp, NULL); - (void)taosAddIntoQset(pool->qset, queue, ahandle); + code = taosAddIntoQset(pool->qset, queue, ahandle); + if (code) { + taosCloseQueue(queue); + (void)taosThreadMutexUnlock(&pool->mutex); + terrno = code; + return NULL; + } // spawn a thread to process queue if (pool->num < pool->max) { @@ -191,7 +201,7 @@ void tAutoQWorkerCleanup(SAutoQWorkerPool *pool) { if (taosCheckPthreadValid(worker->thread)) { uInfo("worker:%s:%d is stopping", pool->name, worker->id); (void)taosThreadJoin(worker->thread, NULL); - (void)taosThreadClear(&worker->thread); + taosThreadClear(&worker->thread); uInfo("worker:%s:%d is stopped", pool->name, worker->id); } taosMemoryFree(worker); @@ -210,7 +220,11 @@ static void *tAutoQWorkerThreadFp(SQueueWorker *worker) { void *msg = NULL; int32_t code = 0; - (void)taosBlockSIGPIPE(); + int32_t ret = taosBlockSIGPIPE(); + if (ret < 0) { + uError("worker:%s:%d failed to block SIGPIPE", pool->name, worker->id); + } + setThreadName(pool->name); worker->pid = taosGetSelfPthreadId(); uInfo("worker:%s:%d is running, thread:%08" PRId64, pool->name, worker->id, worker->pid); @@ -254,7 +268,14 @@ STaosQueue *tAutoQWorkerAllocQueue(SAutoQWorkerPool *pool, void *ahandle, FItem (void)taosThreadMutexLock(&pool->mutex); taosSetQueueFp(queue, fp, NULL); - (void)taosAddIntoQset(pool->qset, queue, ahandle); + + code = taosAddIntoQset(pool->qset, queue, ahandle); + if (code) { + taosCloseQueue(queue); + (void)taosThreadMutexUnlock(&pool->mutex); + terrno = code; + return NULL; + } int32_t queueNum = taosGetQueueNumber(pool->qset); int32_t curWorkerNum = taosArrayGetSize(pool->workers); @@ -281,7 +302,7 @@ STaosQueue *tAutoQWorkerAllocQueue(SAutoQWorkerPool *pool, void *ahandle, FItem if (taosThreadCreate(&worker->thread, &thAttr, (ThreadFp)tAutoQWorkerThreadFp, worker) != 0) { uError("worker:%s:%d failed to create thread, total:%d", pool->name, worker->id, curWorkerNum); - (void)taosArrayPop(pool->workers); + void *tmp = taosArrayPop(pool->workers); taosMemoryFree(worker); taosCloseQueue(queue); terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -342,7 +363,7 @@ void tWWorkerCleanup(SWWorkerPool *pool) { if (taosCheckPthreadValid(worker->thread)) { uInfo("worker:%s:%d is stopping", pool->name, worker->id); (void)taosThreadJoin(worker->thread, NULL); - (void)taosThreadClear(&worker->thread); + taosThreadClear(&worker->thread); taosFreeQall(worker->qall); taosCloseQset(worker->qset); uInfo("worker:%s:%d is stopped", pool->name, worker->id); @@ -362,7 +383,11 @@ static void *tWWorkerThreadFp(SWWorker *worker) { int32_t code = 0; int32_t numOfMsgs = 0; - (void)taosBlockSIGPIPE(); + int32_t ret = taosBlockSIGPIPE(); + if (ret < 0) { + uError("worker:%s:%d failed to block SIGPIPE", pool->name, worker->id); + } + setThreadName(pool->name); worker->pid = taosGetSelfPthreadId(); uInfo("worker:%s:%d is running, thread:%08" PRId64, pool->name, worker->id, worker->pid); @@ -407,7 +432,8 @@ STaosQueue *tWWorkerAllocQueue(SWWorkerPool *pool, void *ahandle, FItems fp) { code = taosOpenQset(&worker->qset); if (code) goto _OVER; - (void)taosAddIntoQset(worker->qset, queue, ahandle); + code = taosAddIntoQset(worker->qset, queue, ahandle); + if (code) goto _OVER; code = taosAllocateQall(&worker->qall); if (code) goto _OVER; @@ -423,7 +449,8 @@ STaosQueue *tWWorkerAllocQueue(SWWorkerPool *pool, void *ahandle, FItems fp) { pool->num++; if (pool->num > pool->max) pool->num = pool->max; } else { - (void)taosAddIntoQset(worker->qset, queue, ahandle); + code = taosAddIntoQset(worker->qset, queue, ahandle); + if (code) goto _OVER; pool->nextId = (pool->nextId + 1) % pool->max; } @@ -551,7 +578,7 @@ void tMultiWorkerCleanup(SMultiWorker *pWorker) { static int32_t tQueryAutoQWorkerAddWorker(SQueryAutoQWorkerPool *pool); static int32_t tQueryAutoQWorkerBeforeBlocking(void *p); static int32_t tQueryAutoQWorkerRecoverFromBlocking(void *p); -static int32_t tQueryAutoQWorkerWaitingCheck(SQueryAutoQWorkerPool *pPool); +static void tQueryAutoQWorkerWaitingCheck(SQueryAutoQWorkerPool *pPool); static bool tQueryAutoQWorkerTryRecycleWorker(SQueryAutoQWorkerPool *pPool, SQueryAutoQWorker *pWorker); #define GET_ACTIVE_N(int64_val) (int32_t)((int64_val) >> 32) @@ -629,7 +656,11 @@ static void *tQueryAutoQWorkerThreadFp(SQueryAutoQWorker *worker) { void *msg = NULL; int32_t code = 0; - (void)taosBlockSIGPIPE(); + int32_t ret = taosBlockSIGPIPE(); + if (ret < 0) { + uError("worker:%s:%d failed to block SIGPIPE", pool->name, worker->id); + } + setThreadName(pool->name); worker->pid = taosGetSelfPthreadId(); uDebug("worker:%s:%d is running, thread:%08" PRId64, pool->name, worker->id, worker->pid); @@ -648,7 +679,7 @@ static void *tQueryAutoQWorkerThreadFp(SQueryAutoQWorker *worker) { } } - (void)tQueryAutoQWorkerWaitingCheck(pool); + tQueryAutoQWorkerWaitingCheck(pool); if (qinfo.fp != NULL) { qinfo.workerId = worker->id; @@ -717,13 +748,13 @@ static bool tQueryAutoQWorkerTryDecActive(void *p, int32_t minActive) { return false; } -static int32_t tQueryAutoQWorkerWaitingCheck(SQueryAutoQWorkerPool *pPool) { +static void tQueryAutoQWorkerWaitingCheck(SQueryAutoQWorkerPool *pPool) { while (1) { int64_t val64 = pPool->activeRunningN; int32_t running = GET_RUNNING_N(val64), active = GET_ACTIVE_N(val64); while (running < pPool->num) { if (atomicCompareExchangeActiveAndRunning(&pPool->activeRunningN, &active, active, &running, running + 1)) { - return TSDB_CODE_SUCCESS; + return; } } if (atomicCompareExchangeActive(&pPool->activeRunningN, &active, active - 1)) { @@ -736,7 +767,7 @@ static int32_t tQueryAutoQWorkerWaitingCheck(SQueryAutoQWorkerPool *pPool) { if (!pPool->exit) (void)taosThreadCondWait(&pPool->waitingBeforeProcessMsgCond, &pPool->waitingBeforeProcessMsgLock); // recovered from waiting (void)taosThreadMutexUnlock(&pPool->waitingBeforeProcessMsgLock); - return TSDB_CODE_SUCCESS; + return; } bool tQueryAutoQWorkerTryRecycleWorker(SQueryAutoQWorkerPool *pPool, SQueryAutoQWorker *pWorker) { @@ -744,7 +775,7 @@ bool tQueryAutoQWorkerTryRecycleWorker(SQueryAutoQWorkerPool *pPool, SQueryAutoQ tQueryAutoQWorkerTryDecActive(pPool, pPool->num)) { (void)taosThreadMutexLock(&pPool->poolLock); SListNode *pNode = listNode(pWorker); - (void)tdListPopNode(pPool->workers, pNode); + SListNode *tNode = tdListPopNode(pPool->workers, pNode); // reclaim some workers if (pWorker->id >= pPool->maxInUse) { while (listNEles(pPool->exitedWorkers) > pPool->maxInUse - pPool->num) { @@ -752,7 +783,7 @@ bool tQueryAutoQWorkerTryRecycleWorker(SQueryAutoQWorkerPool *pPool, SQueryAutoQ SQueryAutoQWorker *pWorker = (SQueryAutoQWorker *)head->data; if (pWorker && taosCheckPthreadValid(pWorker->thread)) { (void)taosThreadJoin(pWorker->thread, NULL); - (void)taosThreadClear(&pWorker->thread); + taosThreadClear(&pWorker->thread); } taosMemoryFree(head); } @@ -777,7 +808,7 @@ bool tQueryAutoQWorkerTryRecycleWorker(SQueryAutoQWorkerPool *pPool, SQueryAutoQ (void)taosThreadMutexUnlock(&pPool->poolLock); return false; } - (void)tdListPopNode(pPool->backupWorkers, pNode); + SListNode *tNode1 = tdListPopNode(pPool->backupWorkers, pNode); tdListAppendNode(pPool->workers, pNode); (void)taosThreadMutexUnlock(&pPool->poolLock); @@ -803,11 +834,11 @@ int32_t tQueryAutoQWorkerInit(SQueryAutoQWorkerPool *pool) { code = taosOpenQset(&pool->qset); if (code) return terrno = code; pool->workers = tdListNew(sizeof(SQueryAutoQWorker)); - if (!pool->workers) return TSDB_CODE_OUT_OF_MEMORY; + if (!pool->workers) return terrno; pool->backupWorkers = tdListNew(sizeof(SQueryAutoQWorker)); - if (!pool->backupWorkers) return TSDB_CODE_OUT_OF_MEMORY; + if (!pool->backupWorkers) return terrno; pool->exitedWorkers = tdListNew(sizeof(SQueryAutoQWorker)); - if (!pool->exitedWorkers) return TSDB_CODE_OUT_OF_MEMORY; + if (!pool->exitedWorkers) return terrno; pool->maxInUse = pool->max * 2 + 2; if (!pool->pCb) { @@ -862,7 +893,7 @@ void tQueryAutoQWorkerCleanup(SQueryAutoQWorkerPool *pPool) { (void)taosThreadMutexUnlock(&pPool->poolLock); if (worker && taosCheckPthreadValid(worker->thread)) { (void)taosThreadJoin(worker->thread, NULL); - (void)taosThreadClear(&worker->thread); + taosThreadClear(&worker->thread); } taosMemoryFree(pNode); } @@ -872,7 +903,7 @@ void tQueryAutoQWorkerCleanup(SQueryAutoQWorkerPool *pPool) { worker = (SQueryAutoQWorker *)pNode->data; if (worker && taosCheckPthreadValid(worker->thread)) { (void)taosThreadJoin(worker->thread, NULL); - (void)taosThreadClear(&worker->thread); + taosThreadClear(&worker->thread); } taosMemoryFree(pNode); } @@ -882,7 +913,7 @@ void tQueryAutoQWorkerCleanup(SQueryAutoQWorkerPool *pPool) { worker = (SQueryAutoQWorker *)pNode->data; if (worker && taosCheckPthreadValid(worker->thread)) { (void)taosThreadJoin(worker->thread, NULL); - (void)taosThreadClear(&worker->thread); + taosThreadClear(&worker->thread); } taosMemoryFree(pNode); } @@ -913,7 +944,13 @@ STaosQueue *tQueryAutoQWorkerAllocQueue(SQueryAutoQWorkerPool *pool, void *ahand (void)taosThreadMutexLock(&pool->poolLock); taosSetQueueFp(queue, fp, NULL); - (void)taosAddIntoQset(pool->qset, queue, ahandle); + code = taosAddIntoQset(pool->qset, queue, ahandle); + if (code) { + taosCloseQueue(queue); + queue = NULL; + (void)taosThreadMutexUnlock(&pool->poolLock); + return NULL; + } SQueryAutoQWorker worker = {0}; SQueryAutoQWorker *pWorker = NULL; diff --git a/tests/parallel_test/run_case.sh b/tests/parallel_test/run_case.sh index ff84d9e66c..fa8fedbdbe 100755 --- a/tests/parallel_test/run_case.sh +++ b/tests/parallel_test/run_case.sh @@ -76,15 +76,15 @@ ulimit -c unlimited md5sum /usr/lib/libtaos.so.1 md5sum /home/TDinternal/debug/build/lib/libtaos.so -#define taospy 2.7.10 +#define taospy 2.7.16 pip3 list|grep taospy pip3 uninstall taospy -y -pip3 install --default-timeout=120 taospy==2.7.15 +pip3 install --default-timeout=120 taospy==2.7.16 #define taos-ws-py 0.3.1 pip3 list|grep taos-ws-py pip3 uninstall taos-ws-py -y -pip3 install --default-timeout=600 taos-ws-py==0.3.1 +pip3 install --default-timeout=600 taos-ws-py==0.3.3 $TIMEOUT_CMD $cmd RET=$? diff --git a/tests/script/tsim/stream/basic3.sim b/tests/script/tsim/stream/basic3.sim index 010578d4d2..c6c98281c1 100644 --- a/tests/script/tsim/stream/basic3.sim +++ b/tests/script/tsim/stream/basic3.sim @@ -99,6 +99,18 @@ if $rows == 0 then return -1 endi +print ========== step2 + +sql CREATE DATABASE test2 VGROUPS 2; +sql use test2; + +sql CREATE STABLE st (time TIMESTAMP, ca DOUBLE, cb DOUBLE, cc int) TAGS (ta VARCHAR(10) ); + +sql_error create stream stream_t1 trigger at_once ignore update 0 ignore expired 0 into streamtST as select time, count(*) c1, count(1) c2 from st partition by tbname group by ca,time ; + +sql_error create stream stream_t1 trigger at_once ignore update 0 ignore expired 0 into streamtST as select time, count(*) c1, count(1) c2 from st group by ca,time ; + + _OVER: system sh/exec.sh -n dnode1 -s stop -x SIGINT print =============== check