rowDataList = new ArrayList<>();
+ int index = 0;
+ while (resultSet0.next()) {
+ TSDBResultSetRowData rowData = new TSDBResultSetRowData(24);
+ // set TABLE_NAME
+ rowData.setString(2, tableNamePattern);
+ // set COLUMN_NAME
+ rowData.setString(3, resultSet0.getString(1));
+ // set DATA_TYPE
+ String typeName = resultSet0.getString(2);
+ rowData.setInt(4, getDataType(typeName));
+ // set TYPE_NAME
+ rowData.setString(5, typeName);
+ // set COLUMN_SIZE
+ int length = resultSet0.getInt(3);
+ rowData.setInt(6, getColumnSize(typeName, length));
+ // set DECIMAL_DIGITS
+ rowData.setInt(8, getDecimalDigits(typeName));
+ // set NUM_PREC_RADIX
+ rowData.setInt(9, 10);
+ // set NULLABLE
+ rowData.setInt(10, getNullable(index, typeName));
+ rowDataList.add(rowData);
+ index++;
+ }
+ resultSet.setRowDataList(rowDataList);
- public ResultSet getImportedKeys(String catalog, String schema, String table) throws SQLException {
- return getEmptyResultSet();
- }
+// GetColumnsResultSet getColumnsResultSet = new GetColumnsResultSet(resultSet0, catalog, schemaPattern, tableNamePattern, columnNamePattern);
+// return getColumnsResultSet;
+// DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet();
+ return resultSet;
+ } else {
+ throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
+ }
- public ResultSet getExportedKeys(String catalog, String schema, String table) throws SQLException {
- return getEmptyResultSet();
- }
+ /*************************/
- public ResultSet getCrossReference(String parentCatalog, String parentSchema, String parentTable,
- String foreignCatalog, String foreignSchema, String foreignTable) throws SQLException {
- return getEmptyResultSet();
- }
+// return getEmptyResultSet();
+ }
- public ResultSet getTypeInfo() throws SQLException {
- return getEmptyResultSet();
- }
+ private int getNullable(int index, String typeName) {
+ if (index == 0 && "TIMESTAMP".equals(typeName))
+ return DatabaseMetaData.columnNoNulls;
+ return DatabaseMetaData.columnNullable;
+ }
- public ResultSet getIndexInfo(String catalog, String schema, String table, boolean unique, boolean approximate)
- throws SQLException {
- return getEmptyResultSet();
- }
+ private int getColumnSize(String typeName, int length) {
+ switch (typeName) {
+ case "TIMESTAMP":
+ return 23;
- public boolean supportsResultSetType(int type) throws SQLException {
- return false;
- }
+ default:
+ return 0;
+ }
+ }
- public boolean supportsResultSetConcurrency(int type, int concurrency) throws SQLException {
- return false;
- }
+ private int getDecimalDigits(String typeName) {
+ switch (typeName) {
+ case "FLOAT":
+ return 5;
+ case "DOUBLE":
+ return 9;
+ default:
+ return 0;
+ }
+ }
- public boolean ownUpdatesAreVisible(int type) throws SQLException {
- return false;
- }
+ private int getDataType(String typeName) {
+ switch (typeName) {
+ case "TIMESTAMP":
+ return Types.TIMESTAMP;
+ case "INT":
+ return Types.INTEGER;
+ case "BIGINT":
+ return Types.BIGINT;
+ case "FLOAT":
+ return Types.FLOAT;
+ case "DOUBLE":
+ return Types.DOUBLE;
+ case "BINARY":
+ return Types.BINARY;
+ case "SMALLINT":
+ return Types.SMALLINT;
+ case "TINYINT":
+ return Types.TINYINT;
+ case "BOOL":
+ return Types.BOOLEAN;
+ case "NCHAR":
+ return Types.NCHAR;
+ default:
+ return Types.NULL;
+ }
+ }
- public boolean ownDeletesAreVisible(int type) throws SQLException {
- return false;
- }
+ public ResultSet getColumnPrivileges(String catalog, String schema, String table, String columnNamePattern)
+ throws SQLException {
+ return getEmptyResultSet();
+ }
- public boolean ownInsertsAreVisible(int type) throws SQLException {
- return false;
- }
+ public ResultSet getTablePrivileges(String catalog, String schemaPattern, String tableNamePattern)
+ throws SQLException {
+ return getEmptyResultSet();
+ }
- public boolean othersUpdatesAreVisible(int type) throws SQLException {
- return false;
- }
+ public ResultSet getBestRowIdentifier(String catalog, String schema, String table, int scope, boolean nullable)
+ throws SQLException {
+ return getEmptyResultSet();
+ }
- public boolean othersDeletesAreVisible(int type) throws SQLException {
- return false;
- }
+ public ResultSet getVersionColumns(String catalog, String schema, String table) throws SQLException {
+ return getEmptyResultSet();
+ }
- public boolean othersInsertsAreVisible(int type) throws SQLException {
- return false;
- }
+ public ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException {
+ return getEmptyResultSet();
+ }
- public boolean updatesAreDetected(int type) throws SQLException {
- return false;
- }
+ public ResultSet getImportedKeys(String catalog, String schema, String table) throws SQLException {
+ return getEmptyResultSet();
+ }
- public boolean deletesAreDetected(int type) throws SQLException {
- return false;
- }
+ public ResultSet getExportedKeys(String catalog, String schema, String table) throws SQLException {
+ return getEmptyResultSet();
+ }
- public boolean insertsAreDetected(int type) throws SQLException {
- return false;
- }
+ public ResultSet getCrossReference(String parentCatalog, String parentSchema, String parentTable,
+ String foreignCatalog, String foreignSchema, String foreignTable) throws SQLException {
+ return getEmptyResultSet();
+ }
- public boolean supportsBatchUpdates() throws SQLException {
- return false;
- }
+ public ResultSet getTypeInfo() throws SQLException {
+ return getEmptyResultSet();
+ }
- public ResultSet getUDTs(String catalog, String schemaPattern, String typeNamePattern, int[] types)
- throws SQLException {
- return getEmptyResultSet();
- }
+ public ResultSet getIndexInfo(String catalog, String schema, String table, boolean unique, boolean approximate)
+ throws SQLException {
+ return getEmptyResultSet();
+ }
- public Connection getConnection() throws SQLException {
- return null;
- }
+ public boolean supportsResultSetType(int type) throws SQLException {
+ return false;
+ }
- public boolean supportsSavepoints() throws SQLException {
- return false;
- }
+ public boolean supportsResultSetConcurrency(int type, int concurrency) throws SQLException {
+ return false;
+ }
- public boolean supportsNamedParameters() throws SQLException {
- return false;
- }
+ public boolean ownUpdatesAreVisible(int type) throws SQLException {
+ return false;
+ }
- public boolean supportsMultipleOpenResults() throws SQLException {
- return false;
- }
+ public boolean ownDeletesAreVisible(int type) throws SQLException {
+ return false;
+ }
- public boolean supportsGetGeneratedKeys() throws SQLException {
- return false;
- }
+ public boolean ownInsertsAreVisible(int type) throws SQLException {
+ return false;
+ }
- public ResultSet getSuperTypes(String catalog, String schemaPattern, String typeNamePattern) throws SQLException {
- return getEmptyResultSet();
- }
+ public boolean othersUpdatesAreVisible(int type) throws SQLException {
+ return false;
+ }
- public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) throws SQLException {
- return getEmptyResultSet();
- }
+ public boolean othersDeletesAreVisible(int type) throws SQLException {
+ return false;
+ }
- public ResultSet getAttributes(String catalog, String schemaPattern, String typeNamePattern,
- String attributeNamePattern) throws SQLException {
- return getEmptyResultSet();
- }
+ public boolean othersInsertsAreVisible(int type) throws SQLException {
+ return false;
+ }
- public boolean supportsResultSetHoldability(int holdability) throws SQLException {
- return false;
- }
+ public boolean updatesAreDetected(int type) throws SQLException {
+ return false;
+ }
- public int getResultSetHoldability() throws SQLException {
- return 0;
- }
+ public boolean deletesAreDetected(int type) throws SQLException {
+ return false;
+ }
- public int getDatabaseMajorVersion() throws SQLException {
- return 0;
- }
+ public boolean insertsAreDetected(int type) throws SQLException {
+ return false;
+ }
- public int getDatabaseMinorVersion() throws SQLException {
- return 0;
- }
+ public boolean supportsBatchUpdates() throws SQLException {
+ return false;
+ }
- public int getJDBCMajorVersion() throws SQLException {
- return 0;
- }
+ public ResultSet getUDTs(String catalog, String schemaPattern, String typeNamePattern, int[] types)
+ throws SQLException {
+ return getEmptyResultSet();
+ }
- public int getJDBCMinorVersion() throws SQLException {
- return 0;
- }
+ public Connection getConnection() throws SQLException {
+ return null;
+ }
- public int getSQLStateType() throws SQLException {
- return 0;
- }
+ public boolean supportsSavepoints() throws SQLException {
+ return false;
+ }
- public boolean locatorsUpdateCopy() throws SQLException {
- return false;
- }
+ public boolean supportsNamedParameters() throws SQLException {
+ return false;
+ }
- public boolean supportsStatementPooling() throws SQLException {
- return false;
- }
+ public boolean supportsMultipleOpenResults() throws SQLException {
+ return false;
+ }
- public RowIdLifetime getRowIdLifetime() throws SQLException {
- return null;
- }
+ public boolean supportsGetGeneratedKeys() throws SQLException {
+ return false;
+ }
- public ResultSet getSchemas(String catalog, String schemaPattern) throws SQLException {
- return null;
- }
+ public ResultSet getSuperTypes(String catalog, String schemaPattern, String typeNamePattern) throws SQLException {
+ return getEmptyResultSet();
+ }
- public boolean supportsStoredFunctionsUsingCallSyntax() throws SQLException {
- return false;
- }
+ public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) throws SQLException {
+ return getEmptyResultSet();
+ }
- public boolean autoCommitFailureClosesAllResultSets() throws SQLException {
- return false;
- }
+ public ResultSet getAttributes(String catalog, String schemaPattern, String typeNamePattern,
+ String attributeNamePattern) throws SQLException {
+ return getEmptyResultSet();
+ }
- public ResultSet getClientInfoProperties() throws SQLException {
- return getEmptyResultSet();
- }
+ public boolean supportsResultSetHoldability(int holdability) throws SQLException {
+ return false;
+ }
- public ResultSet getFunctions(String catalog, String schemaPattern, String functionNamePattern)
- throws SQLException {
- return getEmptyResultSet();
- }
+ public int getResultSetHoldability() throws SQLException {
+ return 0;
+ }
- public ResultSet getFunctionColumns(String catalog, String schemaPattern, String functionNamePattern,
- String columnNamePattern) throws SQLException {
- return getEmptyResultSet();
- }
+ public int getDatabaseMajorVersion() throws SQLException {
+ return 0;
+ }
- public ResultSet getPseudoColumns(String catalog, String schemaPattern, String tableNamePattern,
- String columnNamePattern) throws SQLException {
- return getEmptyResultSet();
- }
+ public int getDatabaseMinorVersion() throws SQLException {
+ return 0;
+ }
- public boolean generatedKeyAlwaysReturned() throws SQLException {
- return false;
- }
+ public int getJDBCMajorVersion() throws SQLException {
+ return 0;
+ }
- private ResultSet getEmptyResultSet() {
- return new EmptyResultSet();
- }
+ public int getJDBCMinorVersion() throws SQLException {
+ return 0;
+ }
+
+ public int getSQLStateType() throws SQLException {
+ return 0;
+ }
+
+ public boolean locatorsUpdateCopy() throws SQLException {
+ return false;
+ }
+
+ public boolean supportsStatementPooling() throws SQLException {
+ return false;
+ }
+
+ public RowIdLifetime getRowIdLifetime() throws SQLException {
+ return null;
+ }
+
+ public ResultSet getSchemas(String catalog, String schemaPattern) throws SQLException {
+ return null;
+ }
+
+ public boolean supportsStoredFunctionsUsingCallSyntax() throws SQLException {
+ return false;
+ }
+
+ public boolean autoCommitFailureClosesAllResultSets() throws SQLException {
+ return false;
+ }
+
+ public ResultSet getClientInfoProperties() throws SQLException {
+ return getEmptyResultSet();
+ }
+
+ public ResultSet getFunctions(String catalog, String schemaPattern, String functionNamePattern)
+ throws SQLException {
+ return getEmptyResultSet();
+ }
+
+ public ResultSet getFunctionColumns(String catalog, String schemaPattern, String functionNamePattern,
+ String columnNamePattern) throws SQLException {
+ return getEmptyResultSet();
+ }
+
+ public ResultSet getPseudoColumns(String catalog, String schemaPattern, String tableNamePattern,
+ String columnNamePattern) throws SQLException {
+ return getEmptyResultSet();
+ }
+
+ public boolean generatedKeyAlwaysReturned() throws SQLException {
+ return false;
+ }
+
+ private ResultSet getEmptyResultSet() {
+ return new EmptyResultSet();
+ }
}
\ No newline at end of file
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java
index 02d642d643..97d93fb0a1 100755
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java
@@ -14,26 +14,29 @@
*****************************************************************************/
package com.taosdata.jdbc;
-import org.apache.commons.lang3.StringUtils;
+
+import java.io.*;
import java.sql.*;
+import java.util.ArrayList;
+import java.util.List;
import java.util.Properties;
import java.util.logging.Logger;
/**
* The Java SQL framework allows for multiple database drivers. Each driver
* should supply a class that implements the Driver interface
- *
+ *
*
* The DriverManager will try to load as many drivers as it can find and then
* for any given connection request, it will ask each driver in turn to try to
* connect to the target URL.
- *
+ *
*
* It is strongly recommended that each Driver class should be small and stand
* alone so that the Driver class can be loaded and queried without bringing in
* vast quantities of supporting code.
- *
+ *
*
* When a Driver class is loaded, it should create an instance of itself and
* register it with the DriverManager. This means that a user can load and
@@ -41,39 +44,41 @@ import java.util.logging.Logger;
*/
public class TSDBDriver implements java.sql.Driver {
- @Deprecated
- private static final String URL_PREFIX1 = "jdbc:TSDB://";
- private static final String URL_PREFIX = "jdbc:TAOS://";
+ @Deprecated
+ private static final String URL_PREFIX1 = "jdbc:TSDB://";
- /**
- * Key used to retrieve the database value from the properties instance passed
- * to the driver.
- */
- public static final String PROPERTY_KEY_DBNAME = "dbname";
+ private static final String URL_PREFIX = "jdbc:TAOS://";
- /**
- * Key used to retrieve the host value from the properties instance passed to
- * the driver.
- */
- public static final String PROPERTY_KEY_HOST = "host";
- /**
- * Key used to retrieve the password value from the properties instance passed
- * to the driver.
- */
- public static final String PROPERTY_KEY_PASSWORD = "password";
+ /**
+ * Key used to retrieve the database value from the properties instance passed
+ * to the driver.
+ */
+ public static final String PROPERTY_KEY_DBNAME = "dbname";
- /**
- * Key used to retrieve the port number value from the properties instance
- * passed to the driver.
- */
- public static final String PROPERTY_KEY_PORT = "port";
+ /**
+ * Key used to retrieve the host value from the properties instance passed to
+ * the driver.
+ */
+ public static final String PROPERTY_KEY_HOST = "host";
+ /**
+ * Key used to retrieve the password value from the properties instance passed
+ * to the driver.
+ */
+ public static final String PROPERTY_KEY_PASSWORD = "password";
+
+ /**
+ * Key used to retrieve the port number value from the properties instance
+ * passed to the driver.
+ */
+ public static final String PROPERTY_KEY_PORT = "port";
+
+ /**
+ * Key used to retrieve the user value from the properties instance passed to
+ * the driver.
+ */
+ public static final String PROPERTY_KEY_USER = "user";
- /**
- * Key used to retrieve the user value from the properties instance passed to
- * the driver.
- */
- public static final String PROPERTY_KEY_USER = "user";
/**
* Key for the configuration file directory of TSDB client in properties instance
@@ -98,277 +103,319 @@ public class TSDBDriver implements java.sql.Driver {
public static final String PROPERTY_KEY_PROTOCOL = "protocol";
- /**
- * Index for port coming out of parseHostPortPair().
- */
- public final static int PORT_NUMBER_INDEX = 1;
- /**
- * Index for host coming out of parseHostPortPair().
- */
- public final static int HOST_NAME_INDEX = 0;
+ /**
+ * Index for port coming out of parseHostPortPair().
+ */
+ public final static int PORT_NUMBER_INDEX = 1;
- private TSDBDatabaseMetaData dbMetaData = null;
+ /**
+ * Index for host coming out of parseHostPortPair().
+ */
+ public final static int HOST_NAME_INDEX = 0;
- static {
- try {
- java.sql.DriverManager.registerDriver(new TSDBDriver());
- } catch (SQLException E) {
- throw new RuntimeException(TSDBConstants.WrapErrMsg("can't register tdengine jdbc driver!"));
- }
- }
+ private TSDBDatabaseMetaData dbMetaData = null;
- public Connection connect(String url, Properties info) throws SQLException {
- if (url == null) {
- throw new SQLException(TSDBConstants.WrapErrMsg("url is not set!"));
- }
+ static {
+ try {
+ java.sql.DriverManager.registerDriver(new TSDBDriver());
+ } catch (SQLException E) {
+ throw new RuntimeException(TSDBConstants.WrapErrMsg("can't register tdengine jdbc driver!"));
+ }
+ }
- Properties props = null;
+ private List loadConfigEndpoints(File cfgFile) {
+ List endpoints = new ArrayList<>();
+ try (BufferedReader reader = new BufferedReader(new FileReader(cfgFile))) {
+ String line = null;
+ while ((line = reader.readLine()) != null) {
+ if (line.trim().startsWith("firstEp") || line.trim().startsWith("secondEp")) {
+ endpoints.add(line.substring(line.indexOf('p') + 1).trim());
+ }
+ if (endpoints.size() > 1)
+ break;
+ }
+ } catch (FileNotFoundException e) {
+ e.printStackTrace();
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ return endpoints;
+ }
- if ((props = parseURL(url, info)) == null) {
- return null;
- }
+ /**
+ * @param cfgDirPath
+ * @return return the config dir
+ **/
+ private File loadConfigDir(String cfgDirPath) {
+ if (cfgDirPath == null)
+ return loadDefaultConfigDir();
+ File cfgDir = new File(cfgDirPath);
+ if (!cfgDir.exists())
+ return loadDefaultConfigDir();
+ return cfgDir;
+ }
- try {
- TSDBJNIConnector.init((String) props.get(PROPERTY_KEY_CONFIG_DIR), (String) props.get(PROPERTY_KEY_LOCALE), (String) props.get(PROPERTY_KEY_CHARSET),
- (String) props.get(PROPERTY_KEY_TIME_ZONE));
- Connection newConn = new TSDBConnection(props, this.dbMetaData);
- return newConn;
- } catch (SQLWarning sqlWarning) {
- sqlWarning.printStackTrace();
- Connection newConn = new TSDBConnection(props, this.dbMetaData);
- return newConn;
- } catch (SQLException sqlEx) {
- throw sqlEx;
- } catch (Exception ex) {
- SQLException sqlEx = new SQLException("SQLException:" + ex.toString());
- sqlEx.initCause(ex);
- throw sqlEx;
- }
- }
+ /**
+ * @return search the default config dir, if the config dir is not exist will return null
+ */
+ private File loadDefaultConfigDir() {
+ File cfgDir;
+ File cfgDir_linux = new File("/etc/taos");
+ cfgDir = cfgDir_linux.exists() ? cfgDir_linux : null;
+ File cfgDir_windows = new File("C:\\TDengine\\cfg");
+ cfgDir = (cfgDir == null && cfgDir_windows.exists()) ? cfgDir_windows : cfgDir;
+ return cfgDir;
+ }
- /**
- * Parses hostPortPair in the form of [host][:port] into an array, with the
- * element of index HOST_NAME_INDEX being the host (or null if not specified),
- * and the element of index PORT_NUMBER_INDEX being the port (or null if not
- * specified).
- *
- * @param hostPortPair
- * host and port in form of of [host][:port]
- *
- * @return array containing host and port as Strings
- *
- * @throws SQLException
- * if a parse error occurs
- */
- protected static String[] parseHostPortPair(String hostPortPair) throws SQLException {
- String[] splitValues = new String[2];
+ public Connection connect(String url, Properties info) throws SQLException {
+ if (url == null) {
+ throw new SQLException(TSDBConstants.WrapErrMsg("url is not set!"));
+ }
- int portIndex = hostPortPair.indexOf(":");
+ Properties props = null;
+ if ((props = parseURL(url, info)) == null) {
+ return null;
+ }
- String hostname = null;
+ //load taos.cfg start
+ if (info.getProperty(TSDBDriver.PROPERTY_KEY_HOST) == null && info.getProperty(TSDBDriver.PROPERTY_KEY_PORT) == null) {
+ File cfgDir = loadConfigDir(info.getProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR));
+ File cfgFile = cfgDir.listFiles((dir, name) -> "taos.cfg".equalsIgnoreCase(name))[0];
+ List endpoints = loadConfigEndpoints(cfgFile);
+ if (!endpoints.isEmpty()) {
+ info.setProperty(TSDBDriver.PROPERTY_KEY_HOST, endpoints.get(0).split(":")[0]);
+ info.setProperty(TSDBDriver.PROPERTY_KEY_PORT, endpoints.get(0).split(":")[1]);
+ }
+ }
- if (portIndex != -1) {
- if ((portIndex + 1) < hostPortPair.length()) {
- String portAsString = hostPortPair.substring(portIndex + 1);
- hostname = hostPortPair.substring(0, portIndex);
+ try {
+ TSDBJNIConnector.init((String) props.get(PROPERTY_KEY_CONFIG_DIR), (String) props.get(PROPERTY_KEY_LOCALE), (String) props.get(PROPERTY_KEY_CHARSET),
+ (String) props.get(PROPERTY_KEY_TIME_ZONE));
+ Connection newConn = new TSDBConnection(props, this.dbMetaData);
+ return newConn;
+ } catch (SQLWarning sqlWarning) {
+ sqlWarning.printStackTrace();
+ Connection newConn = new TSDBConnection(props, this.dbMetaData);
+ return newConn;
+ } catch (SQLException sqlEx) {
+ throw sqlEx;
+ } catch (Exception ex) {
+ SQLException sqlEx = new SQLException("SQLException:" + ex.toString());
+ sqlEx.initCause(ex);
+ throw sqlEx;
+ }
+ }
- splitValues[HOST_NAME_INDEX] = hostname;
+ /**
+ * Parses hostPortPair in the form of [host][:port] into an array, with the
+ * element of index HOST_NAME_INDEX being the host (or null if not specified),
+ * and the element of index PORT_NUMBER_INDEX being the port (or null if not
+ * specified).
+ *
+ * @param hostPortPair host and port in form of of [host][:port]
+ * @return array containing host and port as Strings
+ * @throws SQLException if a parse error occurs
+ */
+ protected static String[] parseHostPortPair(String hostPortPair) throws SQLException {
+ String[] splitValues = new String[2];
- splitValues[PORT_NUMBER_INDEX] = portAsString;
- } else {
- throw new SQLException(TSDBConstants.WrapErrMsg("port is not proper!"));
- }
- } else {
- splitValues[HOST_NAME_INDEX] = hostPortPair;
- splitValues[PORT_NUMBER_INDEX] = null;
- }
+ int portIndex = hostPortPair.indexOf(":");
- return splitValues;
- }
+ String hostname = null;
- public boolean acceptsURL(String url) throws SQLException {
- return StringUtils.isNotBlank(url) && url.startsWith(URL_PREFIX);
- }
+ if (portIndex != -1) {
+ if ((portIndex + 1) < hostPortPair.length()) {
+ String portAsString = hostPortPair.substring(portIndex + 1);
+ hostname = hostPortPair.substring(0, portIndex);
- public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) throws SQLException {
- if (info == null) {
- info = new Properties();
- }
+ splitValues[HOST_NAME_INDEX] = hostname;
- if ((url != null) && (url.startsWith(URL_PREFIX) || url.startsWith(URL_PREFIX1))) {
- info = parseURL(url, info);
- }
+ splitValues[PORT_NUMBER_INDEX] = portAsString;
+ } else {
+ throw new SQLException(TSDBConstants.WrapErrMsg("port is not proper!"));
+ }
+ } else {
+ splitValues[HOST_NAME_INDEX] = hostPortPair;
+ splitValues[PORT_NUMBER_INDEX] = null;
+ }
- DriverPropertyInfo hostProp = new DriverPropertyInfo(PROPERTY_KEY_HOST, info.getProperty(PROPERTY_KEY_HOST));
- hostProp.required = true;
+ return splitValues;
+ }
- DriverPropertyInfo portProp = new DriverPropertyInfo(PROPERTY_KEY_PORT,
- info.getProperty(PROPERTY_KEY_PORT, TSDBConstants.DEFAULT_PORT));
- portProp.required = false;
+ public boolean acceptsURL(String url) throws SQLException {
+ return (url != null && url.length() > 0 && url.trim().length() > 0) && url.startsWith(URL_PREFIX);
+ }
- DriverPropertyInfo dbProp = new DriverPropertyInfo(PROPERTY_KEY_DBNAME, info.getProperty(PROPERTY_KEY_DBNAME));
- dbProp.required = false;
- dbProp.description = "Database name";
+ public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) throws SQLException {
+ if (info == null) {
+ info = new Properties();
+ }
- DriverPropertyInfo userProp = new DriverPropertyInfo(PROPERTY_KEY_USER, info.getProperty(PROPERTY_KEY_USER));
- userProp.required = true;
+ if ((url != null) && (url.startsWith(URL_PREFIX) || url.startsWith(URL_PREFIX1))) {
+ info = parseURL(url, info);
+ }
- DriverPropertyInfo passwordProp = new DriverPropertyInfo(PROPERTY_KEY_PASSWORD,
- info.getProperty(PROPERTY_KEY_PASSWORD));
- passwordProp.required = true;
+ DriverPropertyInfo hostProp = new DriverPropertyInfo(PROPERTY_KEY_HOST, info.getProperty(PROPERTY_KEY_HOST));
+ hostProp.required = true;
- DriverPropertyInfo[] propertyInfo = new DriverPropertyInfo[5];
- propertyInfo[0] = hostProp;
- propertyInfo[1] = portProp;
- propertyInfo[2] = dbProp;
- propertyInfo[3] = userProp;
- propertyInfo[4] = passwordProp;
+ DriverPropertyInfo portProp = new DriverPropertyInfo(PROPERTY_KEY_PORT, info.getProperty(PROPERTY_KEY_PORT, TSDBConstants.DEFAULT_PORT));
+ portProp.required = false;
- return propertyInfo;
- }
+ DriverPropertyInfo dbProp = new DriverPropertyInfo(PROPERTY_KEY_DBNAME, info.getProperty(PROPERTY_KEY_DBNAME));
+ dbProp.required = false;
+ dbProp.description = "Database name";
- /**
- * example: jdbc:TSDB://127.0.0.1:0/db?user=root&password=your_password
- */
+ DriverPropertyInfo userProp = new DriverPropertyInfo(PROPERTY_KEY_USER, info.getProperty(PROPERTY_KEY_USER));
+ userProp.required = true;
- public Properties parseURL(String url, Properties defaults) throws java.sql.SQLException {
- Properties urlProps = (defaults != null) ? defaults : new Properties();
- if (url == null) {
- return null;
- }
+ DriverPropertyInfo passwordProp = new DriverPropertyInfo(PROPERTY_KEY_PASSWORD, info.getProperty(PROPERTY_KEY_PASSWORD));
+ passwordProp.required = true;
- if (!StringUtils.startsWithIgnoreCase(url, URL_PREFIX) && !StringUtils.startsWithIgnoreCase(url, URL_PREFIX1)) {
- return null;
- }
+ DriverPropertyInfo[] propertyInfo = new DriverPropertyInfo[5];
+ propertyInfo[0] = hostProp;
+ propertyInfo[1] = portProp;
+ propertyInfo[2] = dbProp;
+ propertyInfo[3] = userProp;
+ propertyInfo[4] = passwordProp;
- String urlForMeta = url;
+ return propertyInfo;
+ }
- String dbProductName = url.substring(url.indexOf(":") + 1);
- dbProductName = dbProductName.substring(0, dbProductName.indexOf(":"));
- int beginningOfSlashes = url.indexOf("//");
- url = url.substring(beginningOfSlashes + 2);
+ /**
+ * example: jdbc:TSDB://127.0.0.1:0/db?user=root&password=your_password
+ */
+ public Properties parseURL(String url, Properties defaults) throws java.sql.SQLException {
+ Properties urlProps = (defaults != null) ? defaults : new Properties();
+ if (url == null) {
+ return null;
+ }
- String host = url.substring(0, url.indexOf(":"));
- url = url.substring(url.indexOf(":") + 1);
- urlProps.setProperty(PROPERTY_KEY_HOST, host);
+ if (!url.startsWith(URL_PREFIX) && !url.startsWith(URL_PREFIX1)) {
+ return null;
+ }
- String port = url.substring(0, url.indexOf("/"));
- urlProps.setProperty(PROPERTY_KEY_PORT, port);
- url = url.substring(url.indexOf("/") + 1);
+ String urlForMeta = url;
- if (url.indexOf("?") != -1) {
- String dbName = url.substring(0, url.indexOf("?"));
- urlProps.setProperty(PROPERTY_KEY_DBNAME, dbName);
- url = url.trim().substring(url.indexOf("?") + 1);
- } else {
- // without user & password so return
- if(!url.trim().isEmpty()) {
- String dbName = url.trim();
- urlProps.setProperty(PROPERTY_KEY_DBNAME, dbName);
- }
- this.dbMetaData = new TSDBDatabaseMetaData(dbProductName, urlForMeta, urlProps.getProperty("user"));
- return urlProps;
- }
+ String dbProductName = url.substring(url.indexOf(":") + 1);
+ dbProductName = dbProductName.substring(0, dbProductName.indexOf(":"));
+ int beginningOfSlashes = url.indexOf("//");
+ url = url.substring(beginningOfSlashes + 2);
- String user = "";
+ String host = url.substring(0, url.indexOf(":"));
+ url = url.substring(url.indexOf(":") + 1);
+ urlProps.setProperty(PROPERTY_KEY_HOST, host);
- if (url.indexOf("&") == -1) {
- String[] kvPair = url.trim().split("=");
- if (kvPair.length == 2) {
- setPropertyValue(urlProps, kvPair);
- return urlProps;
- }
- }
+ String port = url.substring(0, url.indexOf("/"));
+ urlProps.setProperty(PROPERTY_KEY_PORT, port);
+ url = url.substring(url.indexOf("/") + 1);
- String[] queryStrings = url.trim().split("&");
- for (String queryStr : queryStrings) {
- String[] kvPair = queryStr.trim().split("=");
- if (kvPair.length < 2){
- continue;
- }
- setPropertyValue(urlProps, kvPair);
- }
+ if (url.indexOf("?") != -1) {
+ String dbName = url.substring(0, url.indexOf("?"));
+ urlProps.setProperty(PROPERTY_KEY_DBNAME, dbName);
+ url = url.trim().substring(url.indexOf("?") + 1);
+ } else {
+ // without user & password so return
+ if (!url.trim().isEmpty()) {
+ String dbName = url.trim();
+ urlProps.setProperty(PROPERTY_KEY_DBNAME, dbName);
+ }
+ this.dbMetaData = new TSDBDatabaseMetaData(dbProductName, urlForMeta, urlProps.getProperty("user"));
+ return urlProps;
+ }
- user = urlProps.getProperty(PROPERTY_KEY_USER).toString();
- this.dbMetaData = new TSDBDatabaseMetaData(dbProductName, urlForMeta, user);
+ String user = "";
- return urlProps;
- }
+ if (url.indexOf("&") == -1) {
+ String[] kvPair = url.trim().split("=");
+ if (kvPair.length == 2) {
+ setPropertyValue(urlProps, kvPair);
+ return urlProps;
+ }
+ }
- public void setPropertyValue(Properties property, String[] keyValuePair) {
- switch (keyValuePair[0].toLowerCase()) {
- case PROPERTY_KEY_USER:
- property.setProperty(PROPERTY_KEY_USER, keyValuePair[1]);
- break;
- case PROPERTY_KEY_PASSWORD:
- property.setProperty(PROPERTY_KEY_PASSWORD, keyValuePair[1]);
- break;
- case PROPERTY_KEY_TIME_ZONE:
- property.setProperty(PROPERTY_KEY_TIME_ZONE, keyValuePair[1]);
- break;
- case PROPERTY_KEY_LOCALE:
- property.setProperty(PROPERTY_KEY_LOCALE, keyValuePair[1]);
- break;
- case PROPERTY_KEY_CHARSET:
- property.setProperty(PROPERTY_KEY_CHARSET, keyValuePair[1]);
- break;
- case PROPERTY_KEY_CONFIG_DIR:
- property.setProperty(PROPERTY_KEY_CONFIG_DIR, keyValuePair[1]);
- break;
- }
- }
+ String[] queryStrings = url.trim().split("&");
+ for (String queryStr : queryStrings) {
+ String[] kvPair = queryStr.trim().split("=");
+ if (kvPair.length < 2) {
+ continue;
+ }
+ setPropertyValue(urlProps, kvPair);
+ }
+
+ user = urlProps.getProperty(PROPERTY_KEY_USER).toString();
+ this.dbMetaData = new TSDBDatabaseMetaData(dbProductName, urlForMeta, user);
+
+ return urlProps;
+ }
+
+ public void setPropertyValue(Properties property, String[] keyValuePair) {
+ switch (keyValuePair[0].toLowerCase()) {
+ case PROPERTY_KEY_USER:
+ property.setProperty(PROPERTY_KEY_USER, keyValuePair[1]);
+ break;
+ case PROPERTY_KEY_PASSWORD:
+ property.setProperty(PROPERTY_KEY_PASSWORD, keyValuePair[1]);
+ break;
+ case PROPERTY_KEY_TIME_ZONE:
+ property.setProperty(PROPERTY_KEY_TIME_ZONE, keyValuePair[1]);
+ break;
+ case PROPERTY_KEY_LOCALE:
+ property.setProperty(PROPERTY_KEY_LOCALE, keyValuePair[1]);
+ break;
+ case PROPERTY_KEY_CHARSET:
+ property.setProperty(PROPERTY_KEY_CHARSET, keyValuePair[1]);
+ break;
+ case PROPERTY_KEY_CONFIG_DIR:
+ property.setProperty(PROPERTY_KEY_CONFIG_DIR, keyValuePair[1]);
+ break;
+ }
+ }
- public int getMajorVersion() {
- return 1;
- }
+ public int getMajorVersion() {
+ return 1;
+ }
- public int getMinorVersion() {
- return 1;
- }
+ public int getMinorVersion() {
+ return 1;
+ }
- public boolean jdbcCompliant() {
- return false;
- }
+ public boolean jdbcCompliant() {
+ return false;
+ }
- public Logger getParentLogger() throws SQLFeatureNotSupportedException {
- return null;
- }
+ public Logger getParentLogger() throws SQLFeatureNotSupportedException {
+ return null;
+ }
- /**
- * Returns the host property
- *
- * @param props
- * the java.util.Properties instance to retrieve the hostname from.
- *
- * @return the host
- */
- public String host(Properties props) {
- return props.getProperty(PROPERTY_KEY_HOST, "localhost");
- }
+ /**
+ * Returns the host property
+ *
+ * @param props the java.util.Properties instance to retrieve the hostname from.
+ * @return the host
+ */
+ public String host(Properties props) {
+ return props.getProperty(PROPERTY_KEY_HOST, "localhost");
+ }
- /**
- * Returns the port number property
- *
- * @param props
- * the properties to get the port number from
- *
- * @return the port number
- */
- public int port(Properties props) {
- return Integer.parseInt(props.getProperty(PROPERTY_KEY_PORT, TSDBConstants.DEFAULT_PORT));
- }
+ /**
+ * Returns the port number property
+ *
+ * @param props the properties to get the port number from
+ * @return the port number
+ */
+ public int port(Properties props) {
+ return Integer.parseInt(props.getProperty(PROPERTY_KEY_PORT, TSDBConstants.DEFAULT_PORT));
+ }
- /**
- * Returns the database property from props
- *
- * @param props
- * the Properties to look for the database property.
- *
- * @return the database name.
- */
- public String database(Properties props) {
- return props.getProperty(PROPERTY_KEY_DBNAME);
- }
+ /**
+ * Returns the database property from props
+ *
+ * @param props the Properties to look for the database property.
+ * @return the database name.
+ */
+ public String database(Properties props) {
+ return props.getProperty(PROPERTY_KEY_DBNAME);
+ }
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java
index a8d6ceb713..5c6b0545e9 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java
@@ -242,7 +242,7 @@ public class TSDBStatement implements Statement {
public void addBatch(String sql) throws SQLException {
if (batchedArgs == null) {
- batchedArgs = new ArrayList();
+ batchedArgs = new ArrayList<>();
}
batchedArgs.add(sql);
}
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java
index 07b43d1227..0a71c77d1d 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java
@@ -10,8 +10,6 @@ import java.sql.SQLException;
import java.sql.Statement;
import java.util.Properties;
-import static org.junit.Assert.assertTrue;
-
public class SubscribeTest extends BaseTest {
Connection connection = null;
Statement statement = null;
diff --git a/src/cq/CMakeLists.txt b/src/cq/CMakeLists.txt
index db366639ef..e631397348 100644
--- a/src/cq/CMakeLists.txt
+++ b/src/cq/CMakeLists.txt
@@ -2,6 +2,8 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine)
INCLUDE_DIRECTORIES(inc)
+INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc)
+INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc)
AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR}/src SRC)
IF (TD_LINUX)
diff --git a/src/cq/src/cqMain.c b/src/cq/src/cqMain.c
index 758d620e57..1a99a84b8e 100644
--- a/src/cq/src/cqMain.c
+++ b/src/cq/src/cqMain.c
@@ -21,6 +21,7 @@
#include
#include "taos.h"
+#include "tsclient.h"
#include "taosdef.h"
#include "taosmsg.h"
#include "ttimer.h"
@@ -30,10 +31,12 @@
#include "tlog.h"
#include "twal.h"
-#define cError(...) { if (cqDebugFlag & DEBUG_ERROR) { taosPrintLog("ERROR CQ ", cqDebugFlag, __VA_ARGS__); }}
-#define cWarn(...) { if (cqDebugFlag & DEBUG_WARN) { taosPrintLog("WARN CQ ", cqDebugFlag, __VA_ARGS__); }}
+#define cFatal(...) { if (cqDebugFlag & DEBUG_FATAL) { taosPrintLog("CQ FATAL ", 255, __VA_ARGS__); }}
+#define cError(...) { if (cqDebugFlag & DEBUG_ERROR) { taosPrintLog("CQ ERROR ", 255, __VA_ARGS__); }}
+#define cWarn(...) { if (cqDebugFlag & DEBUG_WARN) { taosPrintLog("CQ WARN ", 255, __VA_ARGS__); }}
+#define cInfo(...) { if (cqDebugFlag & DEBUG_INFO) { taosPrintLog("CQ ", 255, __VA_ARGS__); }}
+#define cDebug(...) { if (cqDebugFlag & DEBUG_DEBUG) { taosPrintLog("CQ ", cqDebugFlag, __VA_ARGS__); }}
#define cTrace(...) { if (cqDebugFlag & DEBUG_TRACE) { taosPrintLog("CQ ", cqDebugFlag, __VA_ARGS__); }}
-#define cPrint(...) { taosPrintLog("CQ ", 255, __VA_ARGS__); }
typedef struct {
int vgId;
@@ -63,8 +66,6 @@ typedef struct SCqObj {
SCqContext * pContext;
} SCqObj;
-int cqDebugFlag = 135;
-
static void cqProcessStreamRes(void *param, TAOS_RES *tres, TAOS_ROW row);
static void cqCreateStream(SCqContext *pContext, SCqObj *pObj);
@@ -94,7 +95,7 @@ void *cqOpen(void *ahandle, const SCqCfg *pCfg) {
pthread_mutex_init(&pContext->mutex, NULL);
- cTrace("vgId:%d, CQ is opened", pContext->vgId);
+ cInfo("vgId:%d, CQ is opened", pContext->vgId);
return pContext;
}
@@ -125,7 +126,7 @@ void cqClose(void *handle) {
taosTmrCleanUp(pContext->tmrCtrl);
pContext->tmrCtrl = NULL;
- cTrace("vgId:%d, CQ is closed", pContext->vgId);
+ cInfo("vgId:%d, CQ is closed", pContext->vgId);
free(pContext);
}
@@ -133,7 +134,7 @@ void cqStart(void *handle) {
SCqContext *pContext = handle;
if (pContext->dbConn || pContext->master) return;
- cTrace("vgId:%d, start all CQs", pContext->vgId);
+ cInfo("vgId:%d, start all CQs", pContext->vgId);
pthread_mutex_lock(&pContext->mutex);
pContext->master = 1;
@@ -149,7 +150,7 @@ void cqStart(void *handle) {
void cqStop(void *handle) {
SCqContext *pContext = handle;
- cTrace("vgId:%d, stop all CQs", pContext->vgId);
+ cInfo("vgId:%d, stop all CQs", pContext->vgId);
if (pContext->dbConn == NULL || pContext->master == 0) return;
pthread_mutex_lock(&pContext->mutex);
@@ -160,7 +161,7 @@ void cqStop(void *handle) {
if (pObj->pStream) {
taos_close_stream(pObj->pStream);
pObj->pStream = NULL;
- cTrace("vgId:%d, id:%d CQ:%s is closed", pContext->vgId, pObj->tid, pObj->sqlStr);
+ cInfo("vgId:%d, id:%d CQ:%s is closed", pContext->vgId, pObj->tid, pObj->sqlStr);
} else {
taosTmrStop(pObj->tmrId);
pObj->tmrId = 0;
@@ -188,7 +189,7 @@ void *cqCreate(void *handle, uint64_t uid, int tid, char *sqlStr, STSchema *pSch
pObj->pSchema = tdDupSchema(pSchema);
pObj->rowSize = schemaTLen(pSchema);
- cTrace("vgId:%d, id:%d CQ:%s is created", pContext->vgId, pObj->tid, pObj->sqlStr);
+ cInfo("vgId:%d, id:%d CQ:%s is created", pContext->vgId, pObj->tid, pObj->sqlStr);
pthread_mutex_lock(&pContext->mutex);
@@ -228,7 +229,7 @@ void cqDrop(void *handle) {
pObj->tmrId = 0;
}
- cTrace("vgId:%d, id:%d CQ:%s is dropped", pContext->vgId, pObj->tid, pObj->sqlStr);
+ cInfo("vgId:%d, id:%d CQ:%s is dropped", pContext->vgId, pObj->tid, pObj->sqlStr);
tdFreeSchema(pObj->pSchema);
free(pObj->sqlStr);
free(pObj);
@@ -236,24 +237,31 @@ void cqDrop(void *handle) {
pthread_mutex_unlock(&pContext->mutex);
}
+static void doCreateStream(void *param, TAOS_RES *result, int code) {
+ SCqObj* pObj = (SCqObj*)param;
+ SCqContext* pContext = pObj->pContext;
+ SSqlObj* pSql = (SSqlObj*)result;
+ pContext->dbConn = pSql->pTscObj;
+ cqCreateStream(pContext, pObj);
+}
+
static void cqProcessCreateTimer(void *param, void *tmrId) {
SCqObj* pObj = (SCqObj*)param;
SCqContext* pContext = pObj->pContext;
if (pContext->dbConn == NULL) {
- pContext->dbConn = taos_connect("localhost", pContext->user, pContext->pass, pContext->db, 0);
- if (pContext->dbConn == NULL) {
- cError("vgId:%d, failed to connect to TDengine(%s)", pContext->vgId, tstrerror(terrno));
- }
+ cDebug("vgId:%d, try connect to TDengine", pContext->vgId);
+ taos_connect_a(NULL, pContext->user, pContext->pass, pContext->db, 0, doCreateStream, param, NULL);
+ } else {
+ cqCreateStream(pContext, pObj);
}
-
- cqCreateStream(pContext, pObj);
}
static void cqCreateStream(SCqContext *pContext, SCqObj *pObj) {
pObj->pContext = pContext;
if (pContext->dbConn == NULL) {
+ cDebug("vgId:%d, create dbConn after 1000 ms", pContext->vgId);
pObj->tmrId = taosTmrStart(cqProcessCreateTimer, 1000, pObj, pContext->tmrCtrl);
return;
}
@@ -262,7 +270,7 @@ static void cqCreateStream(SCqContext *pContext, SCqObj *pObj) {
pObj->pStream = taos_open_stream(pContext->dbConn, pObj->sqlStr, cqProcessStreamRes, 0, pObj, NULL);
if (pObj->pStream) {
pContext->num++;
- cTrace("vgId:%d, id:%d CQ:%s is openned", pContext->vgId, pObj->tid, pObj->sqlStr);
+ cInfo("vgId:%d, id:%d CQ:%s is openned", pContext->vgId, pObj->tid, pObj->sqlStr);
} else {
cError("vgId:%d, id:%d CQ:%s, failed to open", pContext->vgId, pObj->tid, pObj->sqlStr);
}
@@ -278,7 +286,7 @@ static void cqProcessStreamRes(void *param, TAOS_RES *tres, TAOS_ROW row) {
STSchema *pSchema = pObj->pSchema;
if (pObj->pStream == NULL) return;
- cTrace("vgId:%d, id:%d CQ:%s stream result is ready", pContext->vgId, pObj->tid, pObj->sqlStr);
+ cDebug("vgId:%d, id:%d CQ:%s stream result is ready", pContext->vgId, pObj->tid, pObj->sqlStr);
int size = sizeof(SWalHead) + sizeof(SSubmitMsg) + sizeof(SSubmitBlk) + TD_DATA_ROW_HEAD_SIZE + pObj->rowSize;
char *buffer = calloc(size, 1);
diff --git a/src/dnode/src/dnodeMgmt.c b/src/dnode/src/dnodeMgmt.c
index c968246a68..8f2b687dc4 100644
--- a/src/dnode/src/dnodeMgmt.c
+++ b/src/dnode/src/dnodeMgmt.c
@@ -464,7 +464,30 @@ void dnodeUpdateMnodeEpSetForPeer(SRpcEpSet *pEpSet) {
dInfo("mnode EP list for peer is changed, numOfEps:%d inUse:%d", pEpSet->numOfEps, pEpSet->inUse);
for (int i = 0; i < pEpSet->numOfEps; ++i) {
pEpSet->port[i] -= TSDB_PORT_DNODEDNODE;
- dInfo("mnode index:%d %s:%u", i, pEpSet->fqdn[i], pEpSet->port[i])
+ dInfo("mnode index:%d %s:%u", i, pEpSet->fqdn[i], pEpSet->port[i]);
+
+ if (!mnodeIsRunning()) {
+ if (strcmp(pEpSet->fqdn[i], tsLocalFqdn) == 0 && pEpSet->port[i] == tsServerPort) {
+ dInfo("mnode index:%d %s:%u should work as mnode", i, pEpSet->fqdn[i], pEpSet->port[i]);
+ bool find = false;
+ for (int i = 0; i < tsDMnodeInfos.nodeNum; ++i) {
+ if (tsDMnodeInfos.nodeInfos[i].nodeId == dnodeGetDnodeId()) {
+ dInfo("localEp found in mnode infos");
+ find = true;
+ break;
+ }
+ }
+
+ if (!find) {
+ dInfo("localEp not found in mnode infos, will set into mnode infos");
+ tstrncpy(tsDMnodeInfos.nodeInfos[tsDMnodeInfos.nodeNum].nodeEp, tsLocalEp, TSDB_EP_LEN);
+ tsDMnodeInfos.nodeInfos[tsDMnodeInfos.nodeNum].nodeId = dnodeGetDnodeId();
+ tsDMnodeInfos.nodeNum++;
+ }
+
+ dnodeStartMnode();
+ }
+ }
}
tsDMnodeEpSet = *pEpSet;
diff --git a/src/dnode/src/dnodeModule.c b/src/dnode/src/dnodeModule.c
index 0a5b9b550c..ba7cdf2664 100644
--- a/src/dnode/src/dnodeModule.c
+++ b/src/dnode/src/dnodeModule.c
@@ -146,19 +146,16 @@ void dnodeProcessModuleStatus(uint32_t moduleStatus) {
}
}
-bool dnodeCheckMnodeStarting() {
- if (tsModuleStatus & TSDB_MOD_MNODE) return false;
-
- SDMMnodeInfos *mnodes = dnodeGetMnodeInfos();
- for (int32_t i = 0; i < mnodes->nodeNum; ++i) {
- SDMMnodeInfo *node = &mnodes->nodeInfos[i];
- if (node->nodeId == dnodeGetDnodeId()) {
- uint32_t moduleStatus = tsModuleStatus | (1 << TSDB_MOD_MNODE);;
- dInfo("start mnode module, module status:%d, new status:%d", tsModuleStatus, moduleStatus);
- dnodeProcessModuleStatus(moduleStatus);
- return true;
- }
+bool dnodeStartMnode() {
+ if (tsModuleStatus & (1 << TSDB_MOD_MNODE)) {
+ dDebug("mnode module is already started, module status:%d", tsModuleStatus);
+ return false;
}
- return false;
+ uint32_t moduleStatus = tsModuleStatus | (1 << TSDB_MOD_MNODE);
+ dInfo("start mnode module, module status:%d, new status:%d", tsModuleStatus, moduleStatus);
+ dnodeProcessModuleStatus(moduleStatus);
+
+ sdbUpdateSync();
+ return true;
}
diff --git a/src/dnode/src/dnodeVRead.c b/src/dnode/src/dnodeVRead.c
index d66ebf9772..fb4ffcdafa 100644
--- a/src/dnode/src/dnodeVRead.c
+++ b/src/dnode/src/dnodeVRead.c
@@ -187,6 +187,7 @@ void dnodeSendRpcReadRsp(void *pVnode, SReadMsg *pRead, int32_t code) {
}
void dnodeDispatchNonRspMsg(void *pVnode, SReadMsg *pRead, int32_t code) {
+ rpcFreeCont(pRead->rpcMsg.pCont);
vnodeRelease(pVnode);
return;
}
diff --git a/src/inc/dnode.h b/src/inc/dnode.h
index fda9c1c1dd..017241c4f8 100644
--- a/src/inc/dnode.h
+++ b/src/inc/dnode.h
@@ -43,7 +43,7 @@ void dnodeGetMnodeEpSetForPeer(void *epSet);
void dnodeGetMnodeEpSetForShell(void *epSet);
void * dnodeGetMnodeInfos();
int32_t dnodeGetDnodeId();
-bool dnodeCheckMnodeStarting();
+bool dnodeStartMnode();
void dnodeAddClientRspHandle(uint8_t msgType, void (*fp)(SRpcMsg *rpcMsg));
void dnodeSendMsgToDnode(SRpcEpSet *epSet, SRpcMsg *rpcMsg);
diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h
index 1a40f3b56d..25814a748e 100644
--- a/src/inc/taosdef.h
+++ b/src/inc/taosdef.h
@@ -64,7 +64,7 @@ extern const int32_t TYPE_BYTES[11];
// TODO: replace and remove code below
#define CHAR_BYTES sizeof(char)
#define SHORT_BYTES sizeof(int16_t)
-#define INT_BYTES sizeof(int)
+#define INT_BYTES sizeof(int32_t)
#define LONG_BYTES sizeof(int64_t)
#define FLOAT_BYTES sizeof(float)
#define DOUBLE_BYTES sizeof(double)
@@ -73,7 +73,7 @@ extern const int32_t TYPE_BYTES[11];
#define TSDB_DATA_BOOL_NULL 0x02
#define TSDB_DATA_TINYINT_NULL 0x80
#define TSDB_DATA_SMALLINT_NULL 0x8000
-#define TSDB_DATA_INT_NULL 0x80000000
+#define TSDB_DATA_INT_NULL 0x80000000L
#define TSDB_DATA_BIGINT_NULL 0x8000000000000000L
#define TSDB_DATA_FLOAT_NULL 0x7FF00000 // it is an NAN
@@ -101,6 +101,7 @@ extern const int32_t TYPE_BYTES[11];
#define TSDB_TIME_PRECISION_MILLI 0
#define TSDB_TIME_PRECISION_MICRO 1
#define TSDB_TIME_PRECISION_NANO 2
+#define TSDB_TICK_PER_SECOND(precision) ((precision)==TSDB_TIME_PRECISION_MILLI ? 1e3L : ((precision)==TSDB_TIME_PRECISION_MICRO ? 1e6L : 1e9L))
#define TSDB_TIME_PRECISION_MILLI_STR "ms"
#define TSDB_TIME_PRECISION_MICRO_STR "us"
@@ -130,22 +131,31 @@ do { \
#define GET_INT16_VAL(x) (*(int16_t *)(x))
#define GET_INT32_VAL(x) (*(int32_t *)(x))
#define GET_INT64_VAL(x) (*(int64_t *)(x))
-#ifdef _TD_ARM_32_
- #define GET_FLOAT_VAL(x) taos_align_get_float(x)
- #define GET_DOUBLE_VAL(x) taos_align_get_double(x)
-
- float taos_align_get_float(const char* pBuf);
- double taos_align_get_double(const char* pBuf);
+#ifdef _TD_ARM_32
//#define __float_align_declear() float __underlyFloat = 0.0;
//#define __float_align_declear()
//#define GET_FLOAT_VAL_ALIGN(x) (*(int32_t*)&(__underlyFloat) = *(int32_t*)(x); __underlyFloat);
// notes: src must be float or double type variable !!!
- #define SET_FLOAT_VAL_ALIGN(dst, src) (*(int32_t*) dst = *(int32_t*)src);
- #define SET_DOUBLE_VAL_ALIGN(dst, src) (*(int64_t*) dst = *(int64_t*)src);
+ //#define SET_FLOAT_VAL_ALIGN(dst, src) (*(int32_t*) dst = *(int32_t*)src);
+ //#define SET_DOUBLE_VAL_ALIGN(dst, src) (*(int64_t*) dst = *(int64_t*)src);
+
+ float taos_align_get_float(const char* pBuf);
+ double taos_align_get_double(const char* pBuf);
+
+ #define GET_FLOAT_VAL(x) taos_align_get_float(x)
+ #define GET_DOUBLE_VAL(x) taos_align_get_double(x)
+ #define SET_FLOAT_VAL(x, y) { float z = (float)(y); (*(int32_t*) x = *(int32_t*)(&z)); }
+ #define SET_DOUBLE_VAL(x, y) { double z = (double)(y); (*(int64_t*) x = *(int64_t*)(&z)); }
+ #define SET_FLOAT_PTR(x, y) { (*(int32_t*) x = *(int32_t*)y); }
+ #define SET_DOUBLE_PTR(x, y) { (*(int64_t*) x = *(int64_t*)y); }
#else
- #define GET_FLOAT_VAL(x) (*(float *)(x))
- #define GET_DOUBLE_VAL(x) (*(double *)(x))
+ #define GET_FLOAT_VAL(x) (*(float *)(x))
+ #define GET_DOUBLE_VAL(x) (*(double *)(x))
+ #define SET_FLOAT_VAL(x, y) { (*(float *)(x)) = (float)(y); }
+ #define SET_DOUBLE_VAL(x, y) { (*(double *)(x)) = (double)(y); }
+ #define SET_FLOAT_PTR(x, y) { (*(float *)(x)) = (*(float *)(y)); }
+ #define SET_DOUBLE_PTR(x, y) { (*(double *)(x)) = (*(double *)(y)); }
#endif
typedef struct tDataTypeDescriptor {
@@ -198,7 +208,7 @@ void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems);
void* getNullValue(int32_t type);
void assignVal(char *val, const char *src, int32_t len, int32_t type);
-void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
+void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size, void* buf);
// TODO: check if below is necessary
#define TSDB_RELATION_INVALID 0
@@ -209,21 +219,24 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
#define TSDB_RELATION_GREATER_EQUAL 5
#define TSDB_RELATION_NOT_EQUAL 6
#define TSDB_RELATION_LIKE 7
-#define TSDB_RELATION_IN 8
+#define TSDB_RELATION_ISNULL 8
+#define TSDB_RELATION_NOTNULL 9
+#define TSDB_RELATION_IN 10
-#define TSDB_RELATION_AND 9
-#define TSDB_RELATION_OR 10
-#define TSDB_RELATION_NOT 11
+#define TSDB_RELATION_AND 11
+#define TSDB_RELATION_OR 12
+#define TSDB_RELATION_NOT 13
-#define TSDB_BINARY_OP_ADD 12
-#define TSDB_BINARY_OP_SUBTRACT 13
-#define TSDB_BINARY_OP_MULTIPLY 14
-#define TSDB_BINARY_OP_DIVIDE 15
-#define TSDB_BINARY_OP_REMAINDER 16
+#define TSDB_BINARY_OP_ADD 30
+#define TSDB_BINARY_OP_SUBTRACT 31
+#define TSDB_BINARY_OP_MULTIPLY 32
+#define TSDB_BINARY_OP_DIVIDE 33
+#define TSDB_BINARY_OP_REMAINDER 34
#define TS_PATH_DELIMITER_LEN 1
#define TSDB_UNI_LEN 24
#define TSDB_USER_LEN TSDB_UNI_LEN
+
// ACCOUNT is a 32 bit positive integer
// this is the length of its string representation
// including the terminator zero
@@ -241,7 +254,7 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
#define TSDB_MAX_SAVED_SQL_LEN TSDB_MAX_COLUMNS * 64
#define TSDB_MAX_SQL_LEN TSDB_PAYLOAD_SIZE
#define TSDB_MAX_SQL_SHOW_LEN 256
-#define TSDB_MAX_ALLOWED_SQL_LEN (8*1024*1024U) // sql length should be less than 8mb
+#define TSDB_MAX_ALLOWED_SQL_LEN (1*1024*1024U) // sql length should be less than 8mb
#define TSDB_MAX_BYTES_PER_ROW 16384
#define TSDB_MAX_TAGS_LEN 16384
@@ -290,6 +303,8 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
#define TSDB_CQ_SQL_SIZE 1024
#define TSDB_MIN_VNODES 64
#define TSDB_MAX_VNODES 2048
+#define TSDB_MIN_VNODES_PER_DB 2
+#define TSDB_MAX_VNODES_PER_DB 64
#define TSDB_DNODE_ROLE_ANY 0
#define TSDB_DNODE_ROLE_MGMT 1
diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h
index d8e5c8f1d7..b5d22ea80c 100644
--- a/src/inc/taoserror.h
+++ b/src/inc/taoserror.h
@@ -97,8 +97,10 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TSC_APP_ERROR, 0, 0x0211, "Applicatio
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_ACTION_IN_PROGRESS, 0, 0x0212, "Action in progress")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_DISCONNECTED, 0, 0x0213, "Disconnected from service")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_NO_WRITE_AUTH, 0, 0x0214, "No write permission")
-TAOS_DEFINE_ERROR(TSDB_CODE_TSC_CONN_KILLED, 0, 0x0215, "Connection killed")
-TAOS_DEFINE_ERROR(TSDB_CODE_TSC_SQL_SYNTAX_ERROR, 0, 0x0216, "Syntax errr in SQL")
+TAOS_DEFINE_ERROR(TSDB_CODE_TSC_CONN_KILLED, 0, 0x0215, "Connection killed")
+TAOS_DEFINE_ERROR(TSDB_CODE_TSC_SQL_SYNTAX_ERROR, 0, 0x0216, "Syntax error in SQL")
+TAOS_DEFINE_ERROR(TSDB_CODE_TSC_DB_NOT_SELECTED, 0, 0x0217, "Database not specified or available")
+TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_TABLE_NAME, 0, 0x0218, "Table does not exist")
// mnode
TAOS_DEFINE_ERROR(TSDB_CODE_MND_MSG_NOT_PROCESSED, 0, 0x0300, "Message not processed")
@@ -246,6 +248,107 @@ TAOS_DEFINE_ERROR(TSDB_CODE_SYN_NOT_ENABLED, 0, 0x0901, "Sync modul
// wal
TAOS_DEFINE_ERROR(TSDB_CODE_WAL_APP_ERROR, 0, 0x1000, "Unexpected generic error in wal")
+TAOS_DEFINE_ERROR(TSDB_CODE_WAL_FILE_CORRUPTED, 0, 0x1001, "WAL file is corrupted")
+
+// http
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_SERVER_OFFLINE, 0, 0x1100, "http server is not onlin")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_UNSUPPORT_URL, 0, 0x1101, "url is not support")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_INVLALID_URL, 0, 0x1102, "invalid url format")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_NO_ENOUGH_MEMORY, 0, 0x1103, "no enough memory")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_REQUSET_TOO_BIG, 0, 0x1104, "request size is too big")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_NO_AUTH_INFO, 0, 0x1105, "no auth info input")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_NO_MSG_INPUT, 0, 0x1106, "request is empty")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_NO_SQL_INPUT, 0, 0x1107, "no sql input")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_NO_EXEC_USEDB, 0, 0x1108, "no need to execute use db cmd")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_SESSION_FULL, 0, 0x1109, "session list was full")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_GEN_TAOSD_TOKEN_ERR, 0, 0x110A, "generate taosd token error")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_INVALID_MULTI_REQUEST, 0, 0x110B, "size of multi request is 0")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_CREATE_GZIP_FAILED, 0, 0x110C, "failed to create gzip")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_FINISH_GZIP_FAILED, 0, 0x110D, "failed to finish gzip")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_LOGIN_FAILED, 0, 0x110E, "failed to login")
+
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_INVALID_VERSION, 0, 0x1120, "invalid http version")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_INVALID_CONTENT_LENGTH, 0, 0x1121, "invalid content length")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_INVALID_AUTH_TYPE, 0, 0x1122, "invalid type of Authorization")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_INVALID_AUTH_FORMAT, 0, 0x1123, "invalid format of Authorization")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_INVALID_BASIC_AUTH, 0, 0x1124, "invalid basic Authorization")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_INVALID_TAOSD_AUTH, 0, 0x1125, "invalid taosd Authorization")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_METHOD_FAILED, 0, 0x1126, "failed to parse method")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_TARGET_FAILED, 0, 0x1127, "failed to parse target")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_VERSION_FAILED, 0, 0x1128, "failed to parse http version")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_SP_FAILED, 0, 0x1129, "failed to parse sp")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_STATUS_FAILED, 0, 0x112A, "failed to parse status")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_PHRASE_FAILED, 0, 0x112B, "failed to parse phrase")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_CRLF_FAILED, 0, 0x112C, "failed to parse crlf")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_HEADER_FAILED, 0, 0x112D, "failed to parse header")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_HEADER_KEY_FAILED, 0, 0x112E, "failed to parse header key")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_HEADER_VAL_FAILED, 0, 0x112F, "failed to parse header val")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_CHUNK_SIZE_FAILED, 0, 0x1130, "failed to parse chunk size")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_CHUNK_FAILED, 0, 0x1131, "failed to parse chunk")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_END_FAILED, 0, 0x1132, "failed to parse end section")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_INVALID_STATE, 0, 0x1134, "invalid parse state")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_ERROR_STATE, 0, 0x1135, "failed to parse error section")
+
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_GC_QUERY_NULL, 0, 0x1150, "query size is 0")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_GC_QUERY_SIZE, 0, 0x1151, "query size can not more than 100")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_GC_REQ_PARSE_ERROR, 0, 0x1152, "parse grafana json error")
+
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_DB_NOT_INPUT, 0, 0x1160, "database name can not be null")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_DB_TOO_LONG, 0, 0x1161, "database name too long")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_INVALID_JSON, 0, 0x1162, "invalid telegraf json fromat")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_METRICS_NULL, 0, 0x1163, "metrics size is 0")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_METRICS_SIZE, 0, 0x1164, "metrics size can not more than 1K")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_METRIC_NULL, 0, 0x1165, "metric name not find")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_METRIC_TYPE, 0, 0x1166, "metric name type should be string")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_METRIC_NAME_NULL, 0, 0x1167, "metric name length is 0")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_METRIC_NAME_LONG, 0, 0x1168, "metric name length too long")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TIMESTAMP_NULL, 0, 0x1169, "timestamp not find")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TIMESTAMP_TYPE, 0, 0x116A, "timestamp type should be integer")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TIMESTAMP_VAL_NULL, 0, 0x116B, "timestamp value smaller than 0")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TAGS_NULL, 0, 0x116C, "tags not find")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TAGS_SIZE_0, 0, 0x116D, "tags size is 0")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TAGS_SIZE_LONG, 0, 0x116E, "tags size too long")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TAG_NULL, 0, 0x116F, "tag is null")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TAG_NAME_NULL, 0, 0x1170, "tag name is null")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TAG_NAME_SIZE, 0, 0x1171, "tag name length too long")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TAG_VALUE_TYPE, 0, 0x1172, "tag value type should be number or string")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TAG_VALUE_NULL, 0, 0x1173, "tag value is null")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TABLE_NULL, 0, 0x1174, "table is null")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TABLE_SIZE, 0, 0x1175, "table name length too long")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_FIELDS_NULL, 0, 0x1176, "fields not find")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_FIELDS_SIZE_0, 0, 0x1177, "fields size is 0")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_FIELDS_SIZE_LONG, 0, 0x1178, "fields size too long")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_FIELD_NULL, 0, 0x1179, "field is null")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_FIELD_NAME_NULL, 0, 0x117A, "field name is null")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_FIELD_NAME_SIZE, 0, 0x117B, "field name length too long")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_FIELD_VALUE_TYPE, 0, 0x117C, "field value type should be number or string")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_FIELD_VALUE_NULL, 0, 0x117D, "field value is null")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_HOST_NOT_STRING, 0, 0x117E, "host type should be string")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_STABLE_NOT_EXIST, 0, 0x117F, "stable not exist")
+
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_DB_NOT_INPUT, 0, 0x1190, "database name can not be null")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_DB_TOO_LONG, 0, 0x1191, "database name too long")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_INVALID_JSON, 0, 0x1192, "invalid opentsdb json fromat")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_METRICS_NULL, 0, 0x1193, "metrics size is 0")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_METRICS_SIZE, 0, 0x1194, "metrics size can not more than 10K")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_METRIC_NULL, 0, 0x1195, "metric name not find")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_METRIC_TYPE, 0, 0x1196, "metric name type should be string")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_METRIC_NAME_NULL, 0, 0x1197, "metric name length is 0")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_METRIC_NAME_LONG, 0, 0x1198, "metric name length can not more than 22")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TIMESTAMP_NULL, 0, 0x1199, "timestamp not find")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TIMESTAMP_TYPE, 0, 0x119A, "timestamp type should be integer")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TIMESTAMP_VAL_NULL, 0, 0x119B, "timestamp value smaller than 0")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAGS_NULL, 0, 0x119C, "tags not find")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAGS_SIZE_0, 0, 0x119D, "tags size is 0")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAGS_SIZE_LONG, 0, 0x119E, "tags size too long")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAG_NULL, 0, 0x119F, "tag is null")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAG_NAME_NULL, 0, 0x11A0, "tag name is null")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAG_NAME_SIZE, 0, 0x11A1, "tag name length too long")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAG_VALUE_TYPE, 0, 0x11A2, "tag value type should be boolean, number or string")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAG_VALUE_NULL, 0, 0x11A3, "tag value is null")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAG_VALUE_TOO_LONG, 0, 0x11A4, "tag value can not more than 64")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_VALUE_NULL, 0, 0x11A5, "value not find")
+TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_VALUE_TYPE, 0, 0x11A6, "value type should be boolean, number or string")
#ifdef TAOS_ERROR_C
};
diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h
index e2df886320..50b31a86cc 100644
--- a/src/inc/taosmsg.h
+++ b/src/inc/taosmsg.h
@@ -460,11 +460,7 @@ typedef struct {
int16_t order;
int16_t orderColId;
int16_t numOfCols; // the number of columns will be load from vnode
- int64_t intervalTime; // time interval for aggregation, in million second
- int64_t intervalOffset; // start offset for interval query
- int64_t slidingTime; // value for sliding window
- char intervalTimeUnit;
- char slidingTimeUnit; // time interval type, for revisement of interval(1d)
+ SInterval interval;
uint16_t tagCondLen; // tag length in current query
int16_t numOfGroupCols; // num of group by columns
int16_t orderByIdx;
@@ -677,6 +673,7 @@ typedef struct {
typedef struct STableMetaMsg {
int32_t contLen;
char tableId[TSDB_TABLE_FNAME_LEN]; // table id
+ char sTableId[TSDB_TABLE_FNAME_LEN];
uint8_t numOfTags;
uint8_t precision;
uint8_t tableType;
diff --git a/src/inc/ttokendef.h b/src/inc/ttokendef.h
index c5831a9b8a..a94cdaad15 100644
--- a/src/inc/ttokendef.h
+++ b/src/inc/ttokendef.h
@@ -16,6 +16,7 @@
#ifndef TDENGINE_TTOKENDEF_H
#define TDENGINE_TTOKENDEF_H
+
#define TK_ID 1
#define TK_BOOL 2
#define TK_TINYINT 3
@@ -75,24 +76,24 @@
#define TK_VNODES 57
#define TK_IPTOKEN 58
#define TK_DOT 59
-#define TK_TABLES 60
-#define TK_STABLES 61
-#define TK_VGROUPS 62
-#define TK_DROP 63
-#define TK_TABLE 64
-#define TK_DATABASE 65
-#define TK_DNODE 66
-#define TK_USER 67
-#define TK_ACCOUNT 68
-#define TK_USE 69
-#define TK_DESCRIBE 70
-#define TK_ALTER 71
-#define TK_PASS 72
-#define TK_PRIVILEGE 73
-#define TK_LOCAL 74
-#define TK_IF 75
-#define TK_EXISTS 76
-#define TK_CREATE 77
+#define TK_CREATE 60
+#define TK_TABLE 61
+#define TK_DATABASE 62
+#define TK_TABLES 63
+#define TK_STABLES 64
+#define TK_VGROUPS 65
+#define TK_DROP 66
+#define TK_DNODE 67
+#define TK_USER 68
+#define TK_ACCOUNT 69
+#define TK_USE 70
+#define TK_DESCRIBE 71
+#define TK_ALTER 72
+#define TK_PASS 73
+#define TK_PRIVILEGE 74
+#define TK_LOCAL 75
+#define TK_IF 76
+#define TK_EXISTS 77
#define TK_PPS 78
#define TK_TSERIES 79
#define TK_DBS 80
@@ -222,7 +223,6 @@
#define TK_INTO 204
#define TK_VALUES 205
-
#define TK_SPACE 300
#define TK_COMMENT 301
#define TK_ILLEGAL 302
diff --git a/src/inc/twal.h b/src/inc/twal.h
index 4fdb7aa275..cf570aefdc 100644
--- a/src/inc/twal.h
+++ b/src/inc/twal.h
@@ -44,6 +44,7 @@ typedef void* twalh; // WAL HANDLE
typedef int (*FWalWrite)(void *ahandle, void *pHead, int type);
twalh walOpen(const char *path, const SWalCfg *pCfg);
+int walAlter(twalh pWal, const SWalCfg *pCfg);
void walClose(twalh);
int walRenew(twalh);
int walWrite(twalh, SWalHead *);
diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c
index 277dc45f8e..d5e826fbaa 100644
--- a/src/kit/shell/src/shellEngine.c
+++ b/src/kit/shell/src/shellEngine.c
@@ -419,16 +419,16 @@ static void dumpFieldToFile(FILE* fp, const char* val, TAOS_FIELD* field, int32_
char buf[TSDB_MAX_BYTES_PER_ROW];
switch (field->type) {
case TSDB_DATA_TYPE_BOOL:
- fprintf(fp, "%d", ((((int)(*((char *)val))) == 1) ? 1 : 0));
+ fprintf(fp, "%d", ((((int32_t)(*((char *)val))) == 1) ? 1 : 0));
break;
case TSDB_DATA_TYPE_TINYINT:
- fprintf(fp, "%d", (int)(*((char *)val)));
+ fprintf(fp, "%d", *((int8_t *)val));
break;
case TSDB_DATA_TYPE_SMALLINT:
- fprintf(fp, "%d", (int)(*((short *)val)));
+ fprintf(fp, "%d", *((int16_t *)val));
break;
case TSDB_DATA_TYPE_INT:
- fprintf(fp, "%d", *((int *)val));
+ fprintf(fp, "%d", *((int32_t *)val));
break;
case TSDB_DATA_TYPE_BIGINT:
fprintf(fp, "%" PRId64, *((int64_t *)val));
@@ -559,16 +559,16 @@ static void printField(const char* val, TAOS_FIELD* field, int width, int32_t le
char buf[TSDB_MAX_BYTES_PER_ROW];
switch (field->type) {
case TSDB_DATA_TYPE_BOOL:
- printf("%*s", width, ((((int)(*((char *)val))) == 1) ? "true" : "false"));
+ printf("%*s", width, ((((int32_t)(*((char *)val))) == 1) ? "true" : "false"));
break;
case TSDB_DATA_TYPE_TINYINT:
- printf("%*d", width, (int)(*((char *)val)));
+ printf("%*d", width, *((int8_t *)val));
break;
case TSDB_DATA_TYPE_SMALLINT:
- printf("%*d", width, (int)(*((short *)val)));
+ printf("%*d", width, *((int16_t *)val));
break;
case TSDB_DATA_TYPE_INT:
- printf("%*d", width, *((int *)val));
+ printf("%*d", width, *((int32_t *)val));
break;
case TSDB_DATA_TYPE_BIGINT:
printf("%*" PRId64, width, *((int64_t *)val));
@@ -765,7 +765,9 @@ void read_history() {
FILE *f = fopen(f_history, "r");
if (f == NULL) {
#ifndef WINDOWS
- fprintf(stderr, "Failed to open file %s\n", f_history);
+ if (errno != ENOENT) {
+ fprintf(stderr, "Failed to open file %s, reason:%s\n", f_history, strerror(errno));
+ }
#endif
return;
}
@@ -792,7 +794,7 @@ void write_history() {
FILE *f = fopen(f_history, "w");
if (f == NULL) {
#ifndef WINDOWS
- fprintf(stderr, "Failed to open file %s for write\n", f_history);
+ fprintf(stderr, "Failed to open file %s for write, reason:%s\n", f_history, strerror(errno));
#endif
return;
}
diff --git a/src/mnode/inc/mnodeDef.h b/src/mnode/inc/mnodeDef.h
index 682986b29f..8a2947dd18 100644
--- a/src/mnode/inc/mnodeDef.h
+++ b/src/mnode/inc/mnodeDef.h
@@ -69,7 +69,8 @@ typedef struct SDnodeObj {
int16_t cpuAvgUsage; // calc from sys.cpu
int16_t memoryAvgUsage; // calc from sys.mem
int16_t bandwidthUsage; // calc from sys.band
- int8_t reserved2[2];
+ int8_t offlineReason;
+ int8_t reserved2[1];
} SDnodeObj;
typedef struct SMnodeObj {
diff --git a/src/mnode/inc/mnodeDnode.h b/src/mnode/inc/mnodeDnode.h
index 13b6ec4411..b6ddb7a9bf 100644
--- a/src/mnode/inc/mnodeDnode.h
+++ b/src/mnode/inc/mnodeDnode.h
@@ -33,6 +33,28 @@ typedef enum {
TAOS_DN_ALTERNATIVE_ROLE_VNODE
} EDnodeAlternativeRole;
+typedef enum EDnodeOfflineReason {
+ TAOS_DN_OFF_ONLINE = 0,
+ TAOS_DN_OFF_STATUS_MSG_TIMEOUT,
+ TAOS_DN_OFF_STATUS_NOT_RECEIVED,
+ TAOS_DN_OFF_RESET_BY_MNODE,
+ TAOS_DN_OFF_VERSION_NOT_MATCH,
+ TAOS_DN_OFF_DNODE_ID_NOT_MATCH,
+ TAOS_DN_OFF_CLUSTER_ID_NOT_MATCH,
+ TAOS_DN_OFF_NUM_OF_MNODES_NOT_MATCH,
+ TAOS_DN_OFF_ENABLE_BALANCE_NOT_MATCH,
+ TAOS_DN_OFF_MN_EQUAL_VN_NOT_MATCH,
+ TAOS_DN_OFF_OFFLINE_THRESHOLD_NOT_MATCH,
+ TAOS_DN_OFF_STATUS_INTERVAL_NOT_MATCH,
+ TAOS_DN_OFF_MAX_TAB_PER_VN_NOT_MATCH,
+ TAOS_DN_OFF_MAX_VG_PER_DB_NOT_MATCH,
+ TAOS_DN_OFF_ARBITRATOR_NOT_MATCH,
+ TAOS_DN_OFF_TIME_ZONE_NOT_MATCH,
+ TAOS_DN_OFF_LOCALE_NOT_MATCH,
+ TAOS_DN_OFF_CHARSET_NOT_MATCH,
+ TAOS_DN_OFF_OTHERS
+} EDnodeOfflineReason;
+
int32_t mnodeInitDnodes();
void mnodeCleanupDnodes();
diff --git a/src/mnode/src/mnodeDb.c b/src/mnode/src/mnodeDb.c
index 54c049d242..a06152080e 100644
--- a/src/mnode/src/mnodeDb.c
+++ b/src/mnode/src/mnodeDb.c
@@ -760,6 +760,8 @@ static int32_t mnodeRetrieveDbs(SShowObj *pShow, char *data, int32_t rows, void
}
pShow->numOfReads += numOfRows;
+ mnodeVacuumResult(data, cols, numOfRows, rows, pShow);
+
mnodeDecUserRef(pUser);
return numOfRows;
}
@@ -910,13 +912,13 @@ static SDbCfg mnodeGetAlterDbOption(SDbObj *pDb, SCMAlterDbMsg *pAlter) {
}
if (walLevel > 0 && walLevel != pDb->cfg.walLevel) {
- mError("db:%s, can't alter walLevel option", pDb->name);
- terrno = TSDB_CODE_MND_INVALID_DB_OPTION;
+ mDebug("db:%s, walLevel:%d change to %d", pDb->name, pDb->cfg.walLevel, walLevel);
+ newCfg.walLevel = walLevel;
}
if (fsyncPeriod >= 0 && fsyncPeriod != pDb->cfg.fsyncPeriod) {
- mError("db:%s, can't alter fsyncPeriod option", pDb->name);
- terrno = TSDB_CODE_MND_INVALID_DB_OPTION;
+ mDebug("db:%s, fsyncPeriod:%d change to %d", pDb->name, pDb->cfg.fsyncPeriod, fsyncPeriod);
+ newCfg.fsyncPeriod = fsyncPeriod;
}
if (replications > 0 && replications != pDb->cfg.replications) {
diff --git a/src/mnode/src/mnodeDnode.c b/src/mnode/src/mnodeDnode.c
index ac8730b0cc..61c1d4113f 100644
--- a/src/mnode/src/mnodeDnode.c
+++ b/src/mnode/src/mnodeDnode.c
@@ -60,6 +60,28 @@ static int32_t mnodeGetDnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pC
static int32_t mnodeRetrieveDnodes(SShowObj *pShow, char *data, int32_t rows, void *pConn);
static char* mnodeGetDnodeAlternativeRoleStr(int32_t alternativeRole);
+static char* offlineReason[] = {
+ "",
+ "status msg timeout",
+ "status not received",
+ "status reset by mnode",
+ "version not match",
+ "dnodeId not match",
+ "clusterId not match",
+ "numOfMnodes not match",
+ "balance not match",
+ "mnEqualVn not match",
+ "offThreshold not match",
+ "interval not match",
+ "maxTabPerVn not match",
+ "maxVgPerDb not match",
+ "arbitrator not match",
+ "timezone not match",
+ "locale not match",
+ "charset not match",
+ "unknown",
+};
+
static int32_t mnodeDnodeActionDestroy(SSdbOper *pOper) {
taosTFree(pOper->pObj);
return TSDB_CODE_SUCCESS;
@@ -70,6 +92,7 @@ static int32_t mnodeDnodeActionInsert(SSdbOper *pOper) {
if (pDnode->status != TAOS_DN_STATUS_DROPPING) {
pDnode->status = TAOS_DN_STATUS_OFFLINE;
pDnode->lastAccess = tsAccessSquence;
+ pDnode->offlineReason = TAOS_DN_OFF_STATUS_NOT_RECEIVED;
}
mInfo("dnode:%d, fqdn:%s ep:%s port:%d, do insert action", pDnode->dnodeId, pDnode->dnodeFqdn, pDnode->dnodeEp, pDnode->dnodePort);
@@ -334,74 +357,85 @@ static void mnodeProcessCfgDnodeMsgRsp(SRpcMsg *rpcMsg) {
mInfo("cfg dnode rsp is received");
}
-static bool mnodeCheckClusterCfgPara(const SClusterCfg *clusterCfg) {
+static int32_t mnodeCheckClusterCfgPara(const SClusterCfg *clusterCfg) {
if (clusterCfg->numOfMnodes != htonl(tsNumOfMnodes)) {
mError("\"numOfMnodes\"[%d - %d] cfg parameters inconsistent", clusterCfg->numOfMnodes, htonl(tsNumOfMnodes));
- return false;
- }
- if (clusterCfg->enableBalance != htonl(tsEnableBalance)) {
+ return TAOS_DN_OFF_NUM_OF_MNODES_NOT_MATCH;
+ }
+ if (clusterCfg->enableBalance != htonl(tsEnableBalance)) {
mError("\"balance\"[%d - %d] cfg parameters inconsistent", clusterCfg->enableBalance, htonl(tsEnableBalance));
- return false;
+ return TAOS_DN_OFF_ENABLE_BALANCE_NOT_MATCH;
}
if (clusterCfg->mnodeEqualVnodeNum != htonl(tsMnodeEqualVnodeNum)) {
- mError("\"mnodeEqualVnodeNum\"[%d - %d] cfg parameters inconsistent", clusterCfg->mnodeEqualVnodeNum, htonl(tsMnodeEqualVnodeNum));
- return false;
+ mError("\"mnodeEqualVnodeNum\"[%d - %d] cfg parameters inconsistent", clusterCfg->mnodeEqualVnodeNum,
+ htonl(tsMnodeEqualVnodeNum));
+ return TAOS_DN_OFF_MN_EQUAL_VN_NOT_MATCH;
}
- if (clusterCfg->offlineThreshold != htonl(tsOfflineThreshold)) {
- mError("\"offlineThreshold\"[%d - %d] cfg parameters inconsistent", clusterCfg->offlineThreshold, htonl(tsOfflineThreshold));
- return false;
+ if (clusterCfg->offlineThreshold != htonl(tsOfflineThreshold)) {
+ mError("\"offlineThreshold\"[%d - %d] cfg parameters inconsistent", clusterCfg->offlineThreshold,
+ htonl(tsOfflineThreshold));
+ return TAOS_DN_OFF_OFFLINE_THRESHOLD_NOT_MATCH;
}
- if (clusterCfg->statusInterval != htonl(tsStatusInterval)) {
- mError("\"statusInterval\"[%d - %d] cfg parameters inconsistent", clusterCfg->statusInterval, htonl(tsStatusInterval));
- return false;
+ if (clusterCfg->statusInterval != htonl(tsStatusInterval)) {
+ mError("\"statusInterval\"[%d - %d] cfg parameters inconsistent", clusterCfg->statusInterval,
+ htonl(tsStatusInterval));
+ return TAOS_DN_OFF_STATUS_INTERVAL_NOT_MATCH;
}
- if (clusterCfg->maxtablesPerVnode != htonl(tsMaxTablePerVnode)) {
- mError("\"maxTablesPerVnode\"[%d - %d] cfg parameters inconsistent", clusterCfg->maxtablesPerVnode, htonl(tsMaxTablePerVnode));
- return false;
+ if (clusterCfg->maxtablesPerVnode != htonl(tsMaxTablePerVnode)) {
+ mError("\"maxTablesPerVnode\"[%d - %d] cfg parameters inconsistent", clusterCfg->maxtablesPerVnode,
+ htonl(tsMaxTablePerVnode));
+ return TAOS_DN_OFF_MAX_TAB_PER_VN_NOT_MATCH;
}
- if (clusterCfg->maxVgroupsPerDb != htonl(tsMaxVgroupsPerDb)) {
- mError("\"maxVgroupsPerDb\"[%d - %d] cfg parameters inconsistent", clusterCfg->maxVgroupsPerDb, htonl(tsMaxVgroupsPerDb));
- return false;
+ if (clusterCfg->maxVgroupsPerDb != htonl(tsMaxVgroupsPerDb)) {
+ mError("\"maxVgroupsPerDb\"[%d - %d] cfg parameters inconsistent", clusterCfg->maxVgroupsPerDb,
+ htonl(tsMaxVgroupsPerDb));
+ return TAOS_DN_OFF_MAX_VG_PER_DB_NOT_MATCH;
}
if (0 != strncasecmp(clusterCfg->arbitrator, tsArbitrator, strlen(tsArbitrator))) {
mError("\"arbitrator\"[%s - %s] cfg parameters inconsistent", clusterCfg->arbitrator, tsArbitrator);
- return false;
+ return TAOS_DN_OFF_ARBITRATOR_NOT_MATCH;
}
int64_t checkTime = 0;
- char timestr[32] = "1970-01-01 00:00:00.00";
+ char timestr[32] = "1970-01-01 00:00:00.00";
(void)taosParseTime(timestr, &checkTime, strlen(timestr), TSDB_TIME_PRECISION_MILLI, 0);
- if ((0 != strncasecmp(clusterCfg->timezone, tsTimezone, strlen(tsTimezone))) && (checkTime != clusterCfg->checkTime)) {
- mError("\"timezone\"[%s - %s] [%" PRId64 " - %" PRId64"] cfg parameters inconsistent", clusterCfg->timezone, tsTimezone, clusterCfg->checkTime, checkTime);
- return false;
+ if ((0 != strncasecmp(clusterCfg->timezone, tsTimezone, strlen(tsTimezone))) &&
+ (checkTime != clusterCfg->checkTime)) {
+ mError("\"timezone\"[%s - %s] [%" PRId64 " - %" PRId64 "] cfg parameters inconsistent", clusterCfg->timezone,
+ tsTimezone, clusterCfg->checkTime, checkTime);
+ return TAOS_DN_OFF_TIME_ZONE_NOT_MATCH;
}
if (0 != strncasecmp(clusterCfg->locale, tsLocale, strlen(tsLocale))) {
mError("\"locale\"[%s - %s] cfg parameters inconsistent", clusterCfg->locale, tsLocale);
- return false;
+ return TAOS_DN_OFF_LOCALE_NOT_MATCH;
}
if (0 != strncasecmp(clusterCfg->charset, tsCharset, strlen(tsCharset))) {
mError("\"charset\"[%s - %s] cfg parameters inconsistent.", clusterCfg->charset, tsCharset);
- return false;
+ return TAOS_DN_OFF_CHARSET_NOT_MATCH;
}
-
- return true;
+
+ return 0;
}
static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) {
+ SDnodeObj *pDnode = NULL;
SDMStatusMsg *pStatus = pMsg->rpcMsg.pCont;
pStatus->dnodeId = htonl(pStatus->dnodeId);
pStatus->moduleStatus = htonl(pStatus->moduleStatus);
pStatus->lastReboot = htonl(pStatus->lastReboot);
pStatus->numOfCores = htons(pStatus->numOfCores);
-
+
uint32_t version = htonl(pStatus->version);
if (version != tsVersion) {
- mError("status msg version:%d not equal with mnode:%d", version, tsVersion);
+ pDnode = mnodeGetDnodeByEp(pStatus->dnodeEp);
+ if (pDnode != NULL && pDnode->status != TAOS_DN_STATUS_READY) {
+ pDnode->offlineReason = TAOS_DN_OFF_VERSION_NOT_MATCH;
+ }
+ mError("dnode:%d, status msg version:%d not equal with cluster:%d", pStatus->dnodeId, version, tsVersion);
return TSDB_CODE_MND_INVALID_MSG_VERSION;
}
- SDnodeObj *pDnode = NULL;
if (pStatus->dnodeId == 0) {
pDnode = mnodeGetDnodeByEp(pStatus->dnodeEp);
if (pDnode == NULL) {
@@ -411,7 +445,11 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) {
} else {
pDnode = mnodeGetDnode(pStatus->dnodeId);
if (pDnode == NULL) {
- mError("dnode id:%d, %s not exist", pStatus->dnodeId, pStatus->dnodeEp);
+ pDnode = mnodeGetDnodeByEp(pStatus->dnodeEp);
+ if (pDnode != NULL && pDnode->status != TAOS_DN_STATUS_READY) {
+ pDnode->offlineReason = TAOS_DN_OFF_DNODE_ID_NOT_MATCH;
+ }
+ mError("dnode:%d, %s not exist", pStatus->dnodeId, pStatus->dnodeEp);
return TSDB_CODE_MND_DNODE_NOT_EXIST;
}
}
@@ -426,6 +464,9 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) {
mDebug("dnode:%d %s, first access, set clusterId %s", pDnode->dnodeId, pDnode->dnodeEp, mnodeGetClusterId());
} else {
if (strncmp(pStatus->clusterId, mnodeGetClusterId(), TSDB_CLUSTER_ID_LEN - 1) != 0) {
+ if (pDnode != NULL && pDnode->status != TAOS_DN_STATUS_READY) {
+ pDnode->offlineReason = TAOS_DN_OFF_CLUSTER_ID_NOT_MATCH;
+ }
mError("dnode:%d, input clusterId %s not match with exist %s", pDnode->dnodeId, pStatus->clusterId,
mnodeGetClusterId());
return TSDB_CODE_MND_INVALID_CLUSTER_ID;
@@ -469,16 +510,19 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) {
if (pDnode->status == TAOS_DN_STATUS_OFFLINE) {
// Verify whether the cluster parameters are consistent when status change from offline to ready
- bool ret = mnodeCheckClusterCfgPara(&(pStatus->clusterCfg));
- if (false == ret) {
+ int32_t ret = mnodeCheckClusterCfgPara(&(pStatus->clusterCfg));
+ if (0 != ret) {
+ pDnode->offlineReason = ret;
mnodeDecDnodeRef(pDnode);
rpcFreeCont(pRsp);
- mError("dnode:%d, %s cluster cfg parameters inconsistent", pDnode->dnodeId, pStatus->dnodeEp);
+ mError("dnode:%d, %s cluster cfg parameters inconsistent, reason:%s", pDnode->dnodeId, pStatus->dnodeEp,
+ offlineReason[ret]);
return TSDB_CODE_MND_CLUSTER_CFG_INCONSISTENT;
}
-
+
mDebug("dnode:%d, from offline to online", pDnode->dnodeId);
pDnode->status = TAOS_DN_STATUS_READY;
+ pDnode->offlineReason = TAOS_DN_OFF_ONLINE;
balanceSyncNotify();
balanceAsyncNotify();
}
@@ -529,6 +573,7 @@ static int32_t mnodeCreateDnode(char *ep, SMnodeMsg *pMsg) {
pDnode = (SDnodeObj *) calloc(1, sizeof(SDnodeObj));
pDnode->createdTime = taosGetTimestampMs();
pDnode->status = TAOS_DN_STATUS_OFFLINE;
+ pDnode->offlineReason = TAOS_DN_OFF_STATUS_NOT_RECEIVED;
tstrncpy(pDnode->dnodeEp, ep, TSDB_EP_LEN);
taosGetFqdnPortFromEp(ep, pDnode->dnodeFqdn, &pDnode->dnodePort);
@@ -654,13 +699,13 @@ static int32_t mnodeGetDnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pC
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
- pShow->bytes[cols] = 12 + VARSTR_HEADER_SIZE;
+ pShow->bytes[cols] = 10 + VARSTR_HEADER_SIZE;
pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
strcpy(pSchema[cols].name, "status");
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
- pShow->bytes[cols] = 6 + VARSTR_HEADER_SIZE;
+ pShow->bytes[cols] = 5 + VARSTR_HEADER_SIZE;
pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
strcpy(pSchema[cols].name, "role");
pSchema[cols].bytes = htons(pShow->bytes[cols]);
@@ -672,6 +717,12 @@ static int32_t mnodeGetDnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pC
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
+ pShow->bytes[cols] = 24 + VARSTR_HEADER_SIZE;
+ pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
+ strcpy(pSchema[cols].name, "offline reason");
+ pSchema[cols].bytes = htons(pShow->bytes[cols]);
+ cols++;
+
pMeta->numOfColumns = htons(cols);
pShow->numOfColumns = cols;
@@ -731,8 +782,11 @@ static int32_t mnodeRetrieveDnodes(SShowObj *pShow, char *data, int32_t rows, vo
*(int64_t *)pWrite = pDnode->createdTime;
cols++;
-
- numOfRows++;
+ pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
+ STR_TO_VARSTR(pWrite, offlineReason[pDnode->offlineReason]);
+ cols++;
+
+ numOfRows++;
mnodeDecDnodeRef(pDnode);
}
diff --git a/src/mnode/src/mnodeInt.c b/src/mnode/src/mnodeInt.c
index 91c8dcb6e5..fb1b8741a9 100644
--- a/src/mnode/src/mnodeInt.c
+++ b/src/mnode/src/mnodeInt.c
@@ -39,6 +39,11 @@ void mnodeCreateMsg(SMnodeMsg *pMsg, SRpcMsg *rpcMsg) {
}
int32_t mnodeInitMsg(SMnodeMsg *pMsg) {
+ if (pMsg->pUser != NULL) {
+ mDebug("app:%p:%p, user info already inited", pMsg->rpcMsg.ahandle, pMsg);
+ return TSDB_CODE_SUCCESS;
+ }
+
pMsg->pUser = mnodeGetUserFromConn(pMsg->rpcMsg.handle);
if (pMsg->pUser == NULL) {
return TSDB_CODE_MND_INVALID_USER;
diff --git a/src/mnode/src/mnodeSdb.c b/src/mnode/src/mnodeSdb.c
index 646c17b2b8..7654536122 100644
--- a/src/mnode/src/mnodeSdb.c
+++ b/src/mnode/src/mnodeSdb.c
@@ -91,7 +91,6 @@ typedef struct {
} SSdbWriteWorkerPool;
extern void * tsMnodeTmr;
-static void * tsUpdateSyncTmr;
static SSdbObject tsSdbObj = {0};
static taos_qset tsSdbWriteQset;
static taos_qall tsSdbWriteQall;
@@ -185,7 +184,11 @@ static int32_t sdbInitWal() {
}
sdbInfo("open sdb wal for restore");
- walRestore(tsSdbObj.wal, NULL, sdbWrite);
+ int code = walRestore(tsSdbObj.wal, NULL, sdbWrite);
+ if (code != TSDB_CODE_SUCCESS) {
+ sdbError("failed to open wal for restore, reason:%s", tstrerror(code));
+ return -1;
+ }
return 0;
}
@@ -294,16 +297,12 @@ static void sdbConfirmForward(void *ahandle, void *param, int32_t code) {
taosFreeQitem(pOper);
}
-static void sdbUpdateSyncTmrFp(void *param, void *tmrId) { sdbUpdateSync(); }
-
void sdbUpdateSync() {
if (!mnodeIsRunning()) {
mDebug("mnode not start yet, update sync info later");
- if (dnodeCheckMnodeStarting()) {
- taosTmrReset(sdbUpdateSyncTmrFp, 1000, NULL, tsMnodeTmr, &tsUpdateSyncTmr);
- }
return;
}
+
mDebug("update sync info in sdb");
SSyncCfg syncCfg = {0};
diff --git a/src/mnode/src/mnodeShow.c b/src/mnode/src/mnodeShow.c
index f66ef6b7a3..e587758e46 100644
--- a/src/mnode/src/mnodeShow.c
+++ b/src/mnode/src/mnodeShow.c
@@ -65,7 +65,7 @@ int32_t mnodeInitShow() {
mnodeAddReadMsgHandle(TSDB_MSG_TYPE_CM_CONNECT, mnodeProcessConnectMsg);
mnodeAddReadMsgHandle(TSDB_MSG_TYPE_CM_USE_DB, mnodeProcessUseMsg);
- tsMnodeShowCache = taosCacheInit(TSDB_DATA_TYPE_BIGINT, 5, false, mnodeFreeShowObj, "show");
+ tsMnodeShowCache = taosCacheInit(TSDB_CACHE_PTR_KEY, 5, true, mnodeFreeShowObj, "show");
return 0;
}
@@ -378,8 +378,8 @@ static bool mnodeCheckShowFinished(SShowObj *pShow) {
}
static bool mnodeAccquireShowObj(SShowObj *pShow) {
- uint64_t handleVal = (uint64_t)pShow;
- SShowObj **ppShow = taosCacheAcquireByKey(tsMnodeShowCache, &handleVal, sizeof(int64_t));
+ TSDB_CACHE_PTR_TYPE handleVal = (TSDB_CACHE_PTR_TYPE)pShow;
+ SShowObj **ppShow = taosCacheAcquireByKey(tsMnodeShowCache, &handleVal, sizeof(TSDB_CACHE_PTR_TYPE));
if (ppShow) {
mDebug("%p, show is accquired from cache, data:%p, index:%d", pShow, ppShow, pShow->index);
return true;
@@ -389,10 +389,12 @@ static bool mnodeAccquireShowObj(SShowObj *pShow) {
}
static void* mnodePutShowObj(SShowObj *pShow) {
+ const int32_t DEFAULT_SHOWHANDLE_LIFE_SPAN = tsShellActivityTimer * 6 * 1000;
+
if (tsMnodeShowCache != NULL) {
pShow->index = atomic_add_fetch_32(&tsShowObjIndex, 1);
- uint64_t handleVal = (uint64_t)pShow;
- SShowObj **ppShow = taosCachePut(tsMnodeShowCache, &handleVal, sizeof(int64_t), &pShow, sizeof(int64_t), 6000);
+ TSDB_CACHE_PTR_TYPE handleVal = (TSDB_CACHE_PTR_TYPE)pShow;
+ SShowObj **ppShow = taosCachePut(tsMnodeShowCache, &handleVal, sizeof(TSDB_CACHE_PTR_TYPE), &pShow, sizeof(TSDB_CACHE_PTR_TYPE), DEFAULT_SHOWHANDLE_LIFE_SPAN);
pShow->ppShow = (void**)ppShow;
mDebug("%p, show is put into cache, data:%p index:%d", pShow, ppShow, pShow->index);
return pShow;
diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c
index 4400927e9b..1bc328800e 100644
--- a/src/mnode/src/mnodeTable.c
+++ b/src/mnode/src/mnodeTable.c
@@ -1384,6 +1384,9 @@ int32_t mnodeRetrieveShowSuperTables(SShowObj *pShow, char *data, int32_t rows,
}
pShow->numOfReads += numOfRows;
+ const int32_t NUM_OF_COLUMNS = 5;
+
+ mnodeVacuumResult(data, NUM_OF_COLUMNS, numOfRows, rows, pShow);
mnodeDecDbRef(pDb);
return numOfRows;
@@ -2090,8 +2093,11 @@ static int32_t mnodeDoGetChildTableMeta(SMnodeMsg *pMsg, STableMetaMsg *pMeta) {
pMeta->precision = pDb->cfg.precision;
pMeta->tableType = pTable->info.type;
tstrncpy(pMeta->tableId, pTable->info.tableId, TSDB_TABLE_FNAME_LEN);
+ if (pTable->superTable != NULL) {
+ tstrncpy(pMeta->sTableId, pTable->superTable->info.tableId, TSDB_TABLE_FNAME_LEN);
+ }
- if (pTable->info.type == TSDB_CHILD_TABLE) {
+ if (pTable->info.type == TSDB_CHILD_TABLE && pTable->superTable != NULL) {
pMeta->sversion = htons(pTable->superTable->sversion);
pMeta->tversion = htons(pTable->superTable->tversion);
pMeta->numOfTags = (int8_t)pTable->superTable->numOfTags;
@@ -2122,8 +2128,8 @@ static int32_t mnodeDoGetChildTableMeta(SMnodeMsg *pMsg, STableMetaMsg *pMeta) {
}
pMeta->vgroup.vgId = htonl(pMsg->pVgroup->vgId);
- mDebug("app:%p:%p, table:%s, uid:%" PRIu64 " table meta is retrieved", pMsg->rpcMsg.ahandle, pMsg,
- pTable->info.tableId, pTable->uid);
+ mDebug("app:%p:%p, table:%s, uid:%" PRIu64 " table meta is retrieved, vgId:%d sid:%d", pMsg->rpcMsg.ahandle, pMsg,
+ pTable->info.tableId, pTable->uid, pTable->vgId, pTable->sid);
return TSDB_CODE_SUCCESS;
}
diff --git a/src/mnode/src/mnodeVgroup.c b/src/mnode/src/mnodeVgroup.c
index aa6631ff83..7dbf605405 100644
--- a/src/mnode/src/mnodeVgroup.c
+++ b/src/mnode/src/mnodeVgroup.c
@@ -434,7 +434,8 @@ int32_t mnodeGetAvailableVgroup(SMnodeMsg *pMsg, SVgObj **ppVgroup, int32_t *pSi
int maxVgroupsPerDb = tsMaxVgroupsPerDb;
if (maxVgroupsPerDb <= 0) {
maxVgroupsPerDb = mnodeGetOnlinDnodesCpuCoreNum();
- maxVgroupsPerDb = MAX(maxVgroupsPerDb, 2);
+ maxVgroupsPerDb = MAX(maxVgroupsPerDb, TSDB_MIN_VNODES_PER_DB);
+ maxVgroupsPerDb = MIN(maxVgroupsPerDb, TSDB_MAX_VNODES_PER_DB);
}
int32_t code = TSDB_CODE_MND_NO_ENOUGH_DNODES;
@@ -660,13 +661,13 @@ static int32_t mnodeGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *p
for (int32_t i = 0; i < pShow->maxReplica; ++i) {
pShow->bytes[cols] = 2;
pSchema[cols].type = TSDB_DATA_TYPE_SMALLINT;
- strcpy(pSchema[cols].name, "dnode");
+ snprintf(pSchema[cols].name, TSDB_COL_NAME_LEN, "dnode%d", i + 1);
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
pShow->bytes[cols] = 9 + VARSTR_HEADER_SIZE;
pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
- strcpy(pSchema[cols].name, "vstatus");
+ snprintf(pSchema[cols].name, TSDB_COL_NAME_LEN, "v%dstatus", i + 1);
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
}
diff --git a/src/os/inc/os.h b/src/os/inc/os.h
index d4b71173a0..86e16db8b1 100644
--- a/src/os/inc/os.h
+++ b/src/os/inc/os.h
@@ -24,10 +24,14 @@ extern "C" {
#include "osDarwin.h"
#endif
-#ifdef _TD_ARM_64_
+#ifdef _TD_ARM_64
#include "osArm64.h"
#endif
+#ifdef _TD_ARM_32
+#include "osArm32.h"
+#endif
+
#ifdef _TD_LINUX_64
#include "osLinux64.h"
#endif
@@ -40,7 +44,7 @@ extern "C" {
#include "osAlpine.h"
#endif
-#ifdef _TD_NINGSI_60_
+#ifdef _TD_NINGSI_60
#include "osNingsi.h"
#endif
diff --git a/src/os/inc/osArm32.h b/src/os/inc/osArm32.h
new file mode 100644
index 0000000000..17b4d2dbd5
--- /dev/null
+++ b/src/os/inc/osArm32.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#ifndef TDENGINE_OS_ARM32_H
+#define TDENGINE_OS_ARM32_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#define TAOS_OS_FUNC_LZ4
+ #define BUILDIN_CLZL(val) __builtin_clzll(val)
+ #define BUILDIN_CTZL(val) __builtin_ctzll(val)
+ #define BUILDIN_CLZ(val) __builtin_clz(val)
+ #define BUILDIN_CTZ(val) __builtin_ctz(val)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/os/inc/osDir.h b/src/os/inc/osDir.h
index 17683743e3..67cfdb3b53 100644
--- a/src/os/inc/osDir.h
+++ b/src/os/inc/osDir.h
@@ -20,11 +20,14 @@
extern "C" {
#endif
+#include
+
// TAOS_OS_FUNC_DIR
void taosRemoveDir(char *rootDir);
int taosMkDir(const char *pathname, mode_t mode);
void taosRename(char* oldName, char *newName);
void taosRemoveOldLogFiles(char *rootDir, int32_t keepDays);
+int32_t taosCompressFile(char *srcFileName, char *destFileName);
#ifdef __cplusplus
}
diff --git a/src/os/inc/osNingsi.h b/src/os/inc/osNingsi.h
index da7f796b59..d88d279745 100644
--- a/src/os/inc/osNingsi.h
+++ b/src/os/inc/osNingsi.h
@@ -129,8 +129,6 @@ void* atomic_exchange_ptr_impl( void **ptr, void *val );
#define atomic_fetch_xor_64(ptr, val) __sync_fetch_and_xor((ptr), (val))
#define atomic_fetch_xor_ptr(ptr, val) __sync_fetch_and_xor((ptr), (val))
-
-
#ifdef __cplusplus
}
#endif
diff --git a/src/os/inc/osTime.h b/src/os/inc/osTime.h
index 97432ca241..6b209219c6 100644
--- a/src/os/inc/osTime.h
+++ b/src/os/inc/osTime.h
@@ -30,8 +30,6 @@ extern "C" {
#define MILLISECOND_PER_HOUR (MILLISECOND_PER_MINUTE * 60)
#define MILLISECOND_PER_DAY (MILLISECOND_PER_HOUR * 24)
#define MILLISECOND_PER_WEEK (MILLISECOND_PER_DAY * 7)
-#define MILLISECOND_PER_MONTH (MILLISECOND_PER_DAY * 30)
-#define MILLISECOND_PER_YEAR (MILLISECOND_PER_DAY * 365)
//@return timestamp in second
int32_t taosGetTimestampSec();
@@ -63,8 +61,23 @@ static FORCE_INLINE int64_t taosGetTimestamp(int32_t precision) {
}
}
-int32_t getTimestampInUsFromStr(char* token, int32_t tokenlen, int64_t* ts);
-int32_t parseDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit);
+
+typedef struct SInterval {
+ int32_t tz; // query client timezone
+ char intervalUnit;
+ char slidingUnit;
+ char offsetUnit;
+ int64_t interval;
+ int64_t sliding;
+ int64_t offset;
+} SInterval;
+
+int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision);
+int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precision);
+int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision);
+
+int32_t parseAbsoluteDuration(char* token, int32_t tokenlen, int64_t* ts);
+int32_t parseNatualDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit);
int32_t taosParseTime(char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t dayligth);
void deltaToUtcInitOnce();
diff --git a/src/os/inc/osWindows.h b/src/os/inc/osWindows.h
index d4f3d6d2af..dc1da35037 100644
--- a/src/os/inc/osWindows.h
+++ b/src/os/inc/osWindows.h
@@ -51,8 +51,6 @@
extern "C" {
#endif
-#define TAOS_OS_FUNC_ATOMIC
-
#define TAOS_OS_FUNC_LZ4
int32_t BUILDIN_CLZL(uint64_t val);
int32_t BUILDIN_CLZ(uint32_t val);
@@ -351,4 +349,4 @@ void wordfree(wordexp_t *pwordexp);
#ifdef __cplusplus
}
#endif
-#endif
\ No newline at end of file
+#endif
diff --git a/src/os/src/detail/CMakeLists.txt b/src/os/src/detail/CMakeLists.txt
index 9f710e3ddf..0d5c130d6e 100644
--- a/src/os/src/detail/CMakeLists.txt
+++ b/src/os/src/detail/CMakeLists.txt
@@ -2,9 +2,14 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine)
INCLUDE_DIRECTORIES(.)
+INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/zlib-1.2.11/inc)
AUX_SOURCE_DIRECTORY(. SRC)
SET_SOURCE_FILES_PROPERTIES(osSysinfo.c PROPERTIES COMPILE_FLAGS -w)
SET_SOURCE_FILES_PROPERTIES(osCoredump.c PROPERTIES COMPILE_FLAGS -w)
ADD_LIBRARY(osdetail ${SRC})
TARGET_LINK_LIBRARIES(osdetail os)
+
+IF (TD_ARM_32 OR TD_LINUX_32)
+ TARGET_LINK_LIBRARIES(osdetail atomic)
+ENDIF ()
diff --git a/src/os/src/detail/osDir.c b/src/os/src/detail/osDir.c
index 93651c78ef..d3f0fda1a5 100644
--- a/src/os/src/detail/osDir.c
+++ b/src/os/src/detail/osDir.c
@@ -17,6 +17,9 @@
#include "os.h"
#include "tglobal.h"
#include "tulog.h"
+#include "zlib.h"
+
+#define COMPRESS_STEP_SIZE 163840
void taosRemoveDir(char *rootDir) {
DIR *dir = opendir(rootDir);
@@ -73,11 +76,11 @@ void taosRemoveOldLogFiles(char *rootDir, int32_t keepDays) {
if (de->d_type & DT_DIR) {
continue;
} else {
- // struct stat fState;
- // if (stat(fname, &fState) < 0) {
- // continue;
- // }
int32_t len = (int32_t)strlen(filename);
+ if (len > 3 && strcmp(filename + len - 3, ".gz") == 0) {
+ len -= 3;
+ }
+
int64_t fileSec = 0;
for (int i = len - 1; i >= 0; i--) {
if (filename[i] == '.') {
@@ -100,3 +103,46 @@ void taosRemoveOldLogFiles(char *rootDir, int32_t keepDays) {
closedir(dir);
rmdir(rootDir);
}
+
+int32_t taosCompressFile(char *srcFileName, char *destFileName) {
+ int32_t ret = 0;
+ int32_t len = 0;
+ char * data = malloc(COMPRESS_STEP_SIZE);
+ FILE * srcFp = NULL;
+ gzFile dstFp = NULL;
+
+ srcFp = fopen(srcFileName, "r");
+ if (srcFp == NULL) {
+ ret = -1;
+ goto cmp_end;
+ }
+
+ int32_t fd = open(destFileName, O_WRONLY | O_CREAT | O_TRUNC, S_IRWXU | S_IRWXG | S_IRWXO);
+ if (fd < 0) {
+ ret = -2;
+ goto cmp_end;
+ }
+
+ dstFp = gzdopen(fd, "wb6f");
+ if (dstFp == NULL) {
+ ret = -3;
+ close(fd);
+ goto cmp_end;
+ }
+
+ while (!feof(srcFp)) {
+ len = (int32_t)fread(data, 1, COMPRESS_STEP_SIZE, srcFp);
+ (void)gzwrite(dstFp, data, len);
+ }
+
+cmp_end:
+ if (srcFp) {
+ fclose(srcFp);
+ }
+ if (dstFp) {
+ gzclose(dstFp);
+ }
+ free(data);
+
+ return ret;
+}
diff --git a/src/os/src/detail/osSysinfo.c b/src/os/src/detail/osSysinfo.c
index 83ecd85809..f6470fc3e1 100644
--- a/src/os/src/detail/osSysinfo.c
+++ b/src/os/src/detail/osSysinfo.c
@@ -569,7 +569,6 @@ int taosSystem(const char *cmd) {
}
}
-int _sysctl(struct __sysctl_args *args );
void taosSetCoreDump() {
if (0 == tsEnableCoreFile) {
return;
diff --git a/src/os/src/detail/osTime.c b/src/os/src/detail/osTime.c
index 9d8328a71b..b78627f46f 100644
--- a/src/os/src/detail/osTime.c
+++ b/src/os/src/detail/osTime.c
@@ -321,7 +321,7 @@ int32_t parseLocaltimeWithDst(char* timestr, int64_t* time, int32_t timePrec) {
}
-static int32_t getTimestampInUsFromStrImpl(int64_t val, char unit, int64_t* result) {
+static int32_t getDurationInUs(int64_t val, char unit, int64_t* result) {
*result = val;
int64_t factor = 1000L;
@@ -342,19 +342,12 @@ static int32_t getTimestampInUsFromStrImpl(int64_t val, char unit, int64_t* resu
case 'w':
(*result) *= MILLISECOND_PER_WEEK*factor;
break;
- case 'n':
- (*result) *= MILLISECOND_PER_MONTH*factor;
- break;
- case 'y':
- (*result) *= MILLISECOND_PER_YEAR*factor;
- break;
case 'a':
(*result) *= factor;
break;
case 'u':
break;
default: {
- ;
return -1;
}
}
@@ -373,7 +366,7 @@ static int32_t getTimestampInUsFromStrImpl(int64_t val, char unit, int64_t* resu
* n - Months (30 days)
* y - Years (365 days)
*/
-int32_t getTimestampInUsFromStr(char* token, int32_t tokenlen, int64_t* ts) {
+int32_t parseAbsoluteDuration(char* token, int32_t tokenlen, int64_t* duration) {
errno = 0;
char* endPtr = NULL;
@@ -383,10 +376,16 @@ int32_t getTimestampInUsFromStr(char* token, int32_t tokenlen, int64_t* ts) {
return -1;
}
- return getTimestampInUsFromStrImpl(timestamp, token[tokenlen - 1], ts);
+ /* natual month/year are not allowed in absolute duration */
+ char unit = token[tokenlen - 1];
+ if (unit == 'n' || unit == 'y') {
+ return -1;
+ }
+
+ return getDurationInUs(timestamp, unit, duration);
}
-int32_t parseDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit) {
+int32_t parseNatualDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit) {
errno = 0;
/* get the basic numeric value */
@@ -400,7 +399,121 @@ int32_t parseDuration(const char* token, int32_t tokenLen, int64_t* duration, ch
return 0;
}
- return getTimestampInUsFromStrImpl(*duration, *unit, duration);
+ return getDurationInUs(*duration, *unit, duration);
+}
+
+int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision) {
+ if (duration == 0) {
+ return t;
+ }
+ if (unit == 'y') {
+ duration *= 12;
+ } else if (unit != 'n') {
+ return t + duration;
+ }
+
+ struct tm tm;
+ time_t tt = (time_t)(t / TSDB_TICK_PER_SECOND(precision));
+ localtime_r(&tt, &tm);
+ int mon = tm.tm_year * 12 + tm.tm_mon + (int)duration;
+ tm.tm_year = mon / 12;
+ tm.tm_mon = mon % 12;
+
+ return (int64_t)(mktime(&tm) * TSDB_TICK_PER_SECOND(precision));
+}
+
+int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision) {
+ if (ekey < skey) {
+ int64_t tmp = ekey;
+ ekey = skey;
+ skey = tmp;
+ }
+ if (unit != 'n' && unit != 'y') {
+ return (int32_t)((ekey - skey) / interval);
+ }
+
+ skey /= (int64_t)(TSDB_TICK_PER_SECOND(precision));
+ ekey /= (int64_t)(TSDB_TICK_PER_SECOND(precision));
+
+ struct tm tm;
+ time_t t = (time_t)skey;
+ localtime_r(&t, &tm);
+ int smon = tm.tm_year * 12 + tm.tm_mon;
+
+ t = (time_t)ekey;
+ localtime_r(&t, &tm);
+ int emon = tm.tm_year * 12 + tm.tm_mon;
+
+ if (unit == 'y') {
+ interval *= 12;
+ }
+
+ return (emon - smon) / (int32_t)interval;
+}
+
+int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precision) {
+ if (pInterval->sliding == 0) {
+ assert(pInterval->interval == 0);
+ return t;
+ }
+
+ int64_t start = t;
+ if (pInterval->slidingUnit == 'n' || pInterval->slidingUnit == 'y') {
+ start /= (int64_t)(TSDB_TICK_PER_SECOND(precision));
+ struct tm tm;
+ time_t tt = (time_t)start;
+ localtime_r(&tt, &tm);
+ tm.tm_sec = 0;
+ tm.tm_min = 0;
+ tm.tm_hour = 0;
+ tm.tm_mday = 1;
+
+ if (pInterval->slidingUnit == 'y') {
+ tm.tm_mon = 0;
+ tm.tm_year = (int)(tm.tm_year / pInterval->sliding * pInterval->sliding);
+ } else {
+ int mon = tm.tm_year * 12 + tm.tm_mon;
+ mon = (int)(mon / pInterval->sliding * pInterval->sliding);
+ tm.tm_year = mon / 12;
+ tm.tm_mon = mon % 12;
+ }
+
+ start = (int64_t)(mktime(&tm) * TSDB_TICK_PER_SECOND(precision));
+ } else {
+ int64_t delta = t - pInterval->interval;
+ int32_t factor = (delta >= 0) ? 1 : -1;
+
+ start = (delta / pInterval->sliding + factor) * pInterval->sliding;
+
+ if (pInterval->intervalUnit == 'd' || pInterval->intervalUnit == 'w') {
+ /*
+ * here we revised the start time of day according to the local time zone,
+ * but in case of DST, the start time of one day need to be dynamically decided.
+ */
+ // todo refactor to extract function that is available for Linux/Windows/Mac platform
+ #if defined(WINDOWS) && _MSC_VER >= 1900
+ // see https://docs.microsoft.com/en-us/cpp/c-runtime-library/daylight-dstbias-timezone-and-tzname?view=vs-2019
+ int64_t timezone = _timezone;
+ int32_t daylight = _daylight;
+ char** tzname = _tzname;
+ #endif
+
+ start += (int64_t)(timezone * TSDB_TICK_PER_SECOND(precision));
+ }
+
+ int64_t end = start + pInterval->interval - 1;
+ if (end < t) {
+ start += pInterval->sliding;
+ }
+ }
+
+ if (pInterval->offset > 0) {
+ start = taosTimeAdd(start, pInterval->offset, pInterval->offsetUnit, precision);
+ if (start > t) {
+ start = taosTimeAdd(start, -pInterval->interval, pInterval->intervalUnit, precision);
+ }
+ }
+ return start;
}
// internal function, when program is paused in debugger,
@@ -411,24 +524,38 @@ int32_t parseDuration(const char* token, int32_t tokenLen, int64_t* duration, ch
// 2020-07-03 17:48:42
// and the parameter can also be a variable.
const char* fmtts(int64_t ts) {
- static char buf[32];
+ static char buf[96];
+ size_t pos = 0;
+ struct tm tm;
- time_t tt;
if (ts > -62135625943 && ts < 32503651200) {
- tt = ts;
- } else if (ts > -62135625943000 && ts < 32503651200000) {
- tt = ts / 1000;
- } else {
- tt = ts / 1000000;
+ time_t t = (time_t)ts;
+ localtime_r(&t, &tm);
+ pos += strftime(buf + pos, sizeof(buf), "s=%Y-%m-%d %H:%M:%S", &tm);
}
- struct tm* ptm = localtime(&tt);
- size_t pos = strftime(buf, sizeof(buf), "%Y-%m-%d %H:%M:%S", ptm);
+ if (ts > -62135625943000 && ts < 32503651200000) {
+ time_t t = (time_t)(ts / 1000);
+ localtime_r(&t, &tm);
+ if (pos > 0) {
+ buf[pos++] = ' ';
+ buf[pos++] = '|';
+ buf[pos++] = ' ';
+ }
+ pos += strftime(buf + pos, sizeof(buf), "ms=%Y-%m-%d %H:%M:%S", &tm);
+ pos += sprintf(buf + pos, ".%03d", (int)(ts % 1000));
+ }
- if (ts <= -62135625943000 || ts >= 32503651200000) {
- sprintf(buf + pos, ".%06d", (int)(ts % 1000000));
- } else if (ts <= -62135625943 || ts >= 32503651200) {
- sprintf(buf + pos, ".%03d", (int)(ts % 1000));
+ {
+ time_t t = (time_t)(ts / 1000000);
+ localtime_r(&t, &tm);
+ if (pos > 0) {
+ buf[pos++] = ' ';
+ buf[pos++] = '|';
+ buf[pos++] = ' ';
+ }
+ pos += strftime(buf + pos, sizeof(buf), "us=%Y-%m-%d %H:%M:%S", &tm);
+ pos += sprintf(buf + pos, ".%06d", (int)(ts % 1000000));
}
return buf;
diff --git a/src/os/src/linux/ningsi.c b/src/os/src/linux/ningsi.c
index 793ccac84a..2514c38ece 100644
--- a/src/os/src/linux/ningsi.c
+++ b/src/os/src/linux/ningsi.c
@@ -16,7 +16,7 @@
#define _DEFAULT_SOURCE
#include "os.h"
-#ifdef _TD_NINGSI_60_
+#ifdef _TD_NINGSI_60
void* atomic_exchange_ptr_impl(void** ptr, void* val ) {
void *old;
do {
diff --git a/src/plugins/http/inc/httpAuth.h b/src/plugins/http/inc/httpAuth.h
index b8fabbe1ec..2ce9725d4b 100644
--- a/src/plugins/http/inc/httpAuth.h
+++ b/src/plugins/http/inc/httpAuth.h
@@ -16,8 +16,8 @@
#ifndef TDENGINE_HTTP_TOKEN_H
#define TDENGINE_HTTP_TOKEN_H
-bool httpParseBasicAuthToken(HttpContext *pContext, char *token, int len);
-bool httpParseTaosdAuthToken(HttpContext *pContext, char *token, int len);
-bool httpGenTaosdAuthToken(HttpContext *pContext, char *token, int maxLen);
+int32_t httpParseBasicAuthToken(HttpContext *pContext, char *token, int32_t len);
+int32_t httpParseTaosdAuthToken(HttpContext *pContext, char *token, int32_t len);
+int32_t httpGenTaosdAuthToken(HttpContext *pContext, char *token, int32_t maxLen);
#endif
\ No newline at end of file
diff --git a/src/plugins/http/inc/httpCode.h b/src/plugins/http/inc/httpCode.h
deleted file mode 100644
index 08111260e9..0000000000
--- a/src/plugins/http/inc/httpCode.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-#ifndef TDENGINE_HTTP_CODE_H
-#define TDENGINE_HTTP_CODE_H
-
-//for fixed msg info
-#define HTTP_SUCCESS 0
-#define HTTP_SERVER_OFFLINE 1
-#define HTTP_UNSUPPORT_URL 2
-#define HTTP_PARSE_HTTP_METHOD_ERROR 3
-#define HTTP_PARSE_HTTP_VERSION_ERROR 4
-#define HTTP_PARSE_HEAD_ERROR 5
-#define HTTP_REQUSET_TOO_BIG 6
-#define HTTP_PARSE_BODY_ERROR 7
-#define HTTP_PARSE_CHUNKED_BODY_ERROR 8
-#define HTTP_PARSE_URL_ERROR 9
-#define HTTP_INVALID_AUTH_TOKEN 10
-#define HTTP_PARSE_USR_ERROR 11
-#define HTTP_NO_SQL_INPUT 12
-#define HTTP_SESSION_FULL 13
-#define HTTP_NO_ENOUGH_MEMORY 14
-#define HTTP_GEN_TAOSD_TOKEN_ERR 15
-#define HTTP_INVALID_DB_TABLE 16
-#define HTTP_NO_EXEC_USEDB 17
-#define HTTP_PARSE_GC_REQ_ERROR 18
-#define HTTP_INVALID_MULTI_REQUEST 19
-#define HTTP_NO_MSG_INPUT 20
-#define HTTP_NO_ENOUGH_SESSIONS 21
-
-//telegraf
-#define HTTP_TG_DB_NOT_INPUT 22
-#define HTTP_TG_DB_TOO_LONG 23
-#define HTTP_TG_INVALID_JSON 24
-#define HTTP_TG_METRICS_NULL 25
-#define HTTP_TG_METRICS_SIZE 26
-#define HTTP_TG_METRIC_NULL 27
-#define HTTP_TG_METRIC_TYPE 28
-#define HTTP_TG_METRIC_NAME_NULL 29
-#define HTTP_TG_METRIC_NAME_LONG 30
-#define HTTP_TG_TIMESTAMP_NULL 31
-#define HTTP_TG_TIMESTAMP_TYPE 32
-#define HTTP_TG_TIMESTAMP_VAL_NULL 33
-#define HTTP_TG_TAGS_NULL 34
-#define HTTP_TG_TAGS_SIZE_0 35
-#define HTTP_TG_TAGS_SIZE_LONG 36
-#define HTTP_TG_TAG_NULL 37
-#define HTTP_TG_TAG_NAME_NULL 38
-#define HTTP_TG_TAG_NAME_SIZE 39
-#define HTTP_TG_TAG_VALUE_TYPE 40
-#define HTTP_TG_TAG_VALUE_NULL 41
-#define HTTP_TG_TABLE_NULL 42
-#define HTTP_TG_TABLE_SIZE 43
-#define HTTP_TG_FIELDS_NULL 44
-#define HTTP_TG_FIELDS_SIZE_0 45
-#define HTTP_TG_FIELDS_SIZE_LONG 46
-#define HTTP_TG_FIELD_NULL 47
-#define HTTP_TG_FIELD_NAME_NULL 48
-#define HTTP_TG_FIELD_NAME_SIZE 49
-#define HTTP_TG_FIELD_VALUE_TYPE 50
-#define HTTP_TG_FIELD_VALUE_NULL 51
-#define HTTP_INVALID_BASIC_AUTH_TOKEN 52
-#define HTTP_INVALID_TAOSD_AUTH_TOKEN 53
-#define HTTP_TG_HOST_NOT_STRING 54
-
-//grafana
-#define HTTP_GC_QUERY_NULL 55
-#define HTTP_GC_QUERY_SIZE 56
-
-//opentsdb
-#define HTTP_OP_DB_NOT_INPUT 57
-#define HTTP_OP_DB_TOO_LONG 58
-#define HTTP_OP_INVALID_JSON 59
-#define HTTP_OP_METRICS_NULL 60
-#define HTTP_OP_METRICS_SIZE 61
-#define HTTP_OP_METRIC_NULL 62
-#define HTTP_OP_METRIC_TYPE 63
-#define HTTP_OP_METRIC_NAME_NULL 64
-#define HTTP_OP_METRIC_NAME_LONG 65
-#define HTTP_OP_TIMESTAMP_NULL 66
-#define HTTP_OP_TIMESTAMP_TYPE 67
-#define HTTP_OP_TIMESTAMP_VAL_NULL 68
-#define HTTP_OP_TAGS_NULL 69
-#define HTTP_OP_TAGS_SIZE_0 70
-#define HTTP_OP_TAGS_SIZE_LONG 71
-#define HTTP_OP_TAG_NULL 72
-#define HTTP_OP_TAG_NAME_NULL 73
-#define HTTP_OP_TAG_NAME_SIZE 74
-#define HTTP_OP_TAG_VALUE_TYPE 75
-#define HTTP_OP_TAG_VALUE_NULL 76
-#define HTTP_OP_TAG_VALUE_TOO_LONG 77
-#define HTTP_OP_VALUE_NULL 78
-#define HTTP_OP_VALUE_TYPE 79
-
-//tgf
-#define HTTP_TG_STABLE_NOT_EXIST 80
-
-extern char *httpMsg[];
-
-#endif
\ No newline at end of file
diff --git a/src/plugins/http/inc/httpContext.h b/src/plugins/http/inc/httpContext.h
index a2d50d6b7f..260858c5cc 100644
--- a/src/plugins/http/inc/httpContext.h
+++ b/src/plugins/http/inc/httpContext.h
@@ -25,7 +25,7 @@ const char *httpContextStateStr(HttpContextState state);
HttpContext *httpCreateContext(int32_t fd);
bool httpInitContext(HttpContext *pContext);
HttpContext *httpGetContext(void * pContext);
-void httpReleaseContext(HttpContext *pContext);
+void httpReleaseContext(HttpContext *pContext, bool clearRes);
void httpCloseContextByServer(HttpContext *pContext);
void httpCloseContextByApp(HttpContext *pContext);
void httpNotifyContextClose(HttpContext *pContext);
diff --git a/src/plugins/http/inc/gcHandle.h b/src/plugins/http/inc/httpGcHandle.h
similarity index 100%
rename from src/plugins/http/inc/gcHandle.h
rename to src/plugins/http/inc/httpGcHandle.h
diff --git a/src/plugins/http/inc/gcJson.h b/src/plugins/http/inc/httpGcJson.h
similarity index 96%
rename from src/plugins/http/inc/gcJson.h
rename to src/plugins/http/inc/httpGcJson.h
index 609bb9b95e..0ba860687d 100644
--- a/src/plugins/http/inc/gcJson.h
+++ b/src/plugins/http/inc/httpGcJson.h
@@ -24,7 +24,7 @@ void gcCleanQueryJson(HttpContext *pContext);
void gcStartQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result);
void gcStopQueryJson(HttpContext *pContext, HttpSqlCmd *cmd);
-bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, int numOfRows);
+bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, int32_t numOfRows);
void gcSendHeartBeatResp(HttpContext *pContext, HttpSqlCmd *cmd);
diff --git a/src/plugins/http/inc/httpGzip.h b/src/plugins/http/inc/httpGzip.h
new file mode 100644
index 0000000000..aeac79c975
--- /dev/null
+++ b/src/plugins/http/inc/httpGzip.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#ifndef HTTP_GZIP_H
+#define HTTP_GZIP_H
+
+#define EHTTP_GZIP_CHUNK_SIZE_DEFAULT (1024*16)
+
+typedef struct ehttp_gzip_s ehttp_gzip_t;
+
+typedef struct ehttp_gzip_callbacks_s ehttp_gzip_callbacks_t;
+typedef struct ehttp_gzip_conf_s ehttp_gzip_conf_t;
+
+struct ehttp_gzip_callbacks_s {
+ void (*on_data)(ehttp_gzip_t *gzip, void *arg, const char *buf, int32_t len);
+};
+
+struct ehttp_gzip_conf_s {
+ int32_t get_header:2; // 0: not fetching header info
+ int32_t chunk_size; // 0: fallback to default: EHTTP_GZIP_CHUNK_SIZE_DEFAULT
+};
+
+ehttp_gzip_t* ehttp_gzip_create_decompressor(ehttp_gzip_conf_t conf, ehttp_gzip_callbacks_t callbacks, void *arg);
+ehttp_gzip_t* ehttp_gzip_create_compressor(ehttp_gzip_conf_t conf, ehttp_gzip_callbacks_t callbacks, void *arg);
+void ehttp_gzip_destroy(ehttp_gzip_t *gzip);
+
+int32_t ehttp_gzip_write(ehttp_gzip_t *gzip, const char *buf, int32_t len);
+int32_t ehttp_gzip_finish(ehttp_gzip_t *gzip);
+
+#endif // _ehttp_gzip_h_9196791b_ac2a_4d73_9979_f4b41abbc4c0_
+
diff --git a/src/plugins/http/inc/httpInt.h b/src/plugins/http/inc/httpInt.h
index ffd621be7a..36a022159f 100644
--- a/src/plugins/http/inc/httpInt.h
+++ b/src/plugins/http/inc/httpInt.h
@@ -24,56 +24,31 @@
#include "tutil.h"
#include "zlib.h"
#include "http.h"
-#include "httpCode.h"
#include "httpLog.h"
#include "httpJson.h"
+#include "httpParser.h"
#define HTTP_MAX_CMD_SIZE 1024
-#define HTTP_MAX_BUFFER_SIZE 1024*1024
-
+#define HTTP_MAX_BUFFER_SIZE 1024*1024*8
#define HTTP_LABEL_SIZE 8
#define HTTP_MAX_EVENTS 10
-#define HTTP_BUFFER_SIZE 1024*65 //65k
-#define HTTP_DECOMPRESS_BUF_SIZE 1024*64
-#define HTTP_STEP_SIZE 1024 //http message get process step by step
-#define HTTP_MAX_URL 5 //http url stack size
+#define HTTP_BUFFER_INIT 4096
+#define HTTP_BUFFER_SIZE 8388608
+#define HTTP_STEP_SIZE 4096 //http message get process step by step
#define HTTP_METHOD_SCANNER_SIZE 7 //http method fp size
#define HTTP_GC_TARGET_SIZE 512
-
-#define HTTP_VERSION_10 0
-#define HTTP_VERSION_11 1
-//#define HTTP_VERSION_12 2
-
-#define HTTP_UNCUNKED 0
-#define HTTP_CHUNKED 1
-
-#define HTTP_KEEPALIVE_NO_INPUT 0
-#define HTTP_KEEPALIVE_ENABLE 1
-#define HTTP_KEEPALIVE_DISABLE 2
-
-#define HTTP_REQTYPE_OTHERS 0
-#define HTTP_REQTYPE_LOGIN 1
-#define HTTP_REQTYPE_HEARTBEAT 2
-#define HTTP_REQTYPE_SINGLE_SQL 3
-#define HTTP_REQTYPE_MULTI_SQL 4
-
-#define HTTP_CHECK_BODY_ERROR -1
-#define HTTP_CHECK_BODY_CONTINUE 0
-#define HTTP_CHECK_BODY_SUCCESS 1
-
-#define HTTP_READ_DATA_SUCCESS 0
-#define HTTP_READ_DATA_FAILED 1
-
#define HTTP_WRITE_RETRY_TIMES 500
#define HTTP_WRITE_WAIT_TIME_MS 5
-#define HTTP_EXPIRED_TIME 60000
-#define HTTP_DELAY_CLOSE_TIME_MS 500
-
-#define HTTP_COMPRESS_IDENTITY 0
-#define HTTP_COMPRESS_GZIP 2
-
#define HTTP_SESSION_ID_LEN (TSDB_USER_LEN + TSDB_PASSWORD_LEN)
+typedef enum HttpReqType {
+ HTTP_REQTYPE_OTHERS = 0,
+ HTTP_REQTYPE_LOGIN = 1,
+ HTTP_REQTYPE_HEARTBEAT = 2,
+ HTTP_REQTYPE_SINGLE_SQL = 3,
+ HTTP_REQTYPE_MULTI_SQL = 4
+} HttpReqType;
+
typedef enum {
HTTP_SERVER_INIT,
HTTP_SERVER_RUNNING,
@@ -82,21 +57,12 @@ typedef enum {
} HttpServerStatus;
typedef enum {
- HTTP_CONTEXT_STATE_READY,
- HTTP_CONTEXT_STATE_HANDLING,
- HTTP_CONTEXT_STATE_DROPPING,
- HTTP_CONTEXT_STATE_CLOSED
+ HTTP_CONTEXT_STATE_READY,
+ HTTP_CONTEXT_STATE_HANDLING,
+ HTTP_CONTEXT_STATE_DROPPING,
+ HTTP_CONTEXT_STATE_CLOSED
} HttpContextState;
-struct HttpContext;
-struct HttpThread;
-
-typedef struct {
- char id[HTTP_SESSION_ID_LEN];
- int refCount;
- void *taos;
-} HttpSession;
-
typedef enum {
HTTP_CMD_TYPE_UN_SPECIFIED,
HTTP_CMD_TYPE_CREATE_DB,
@@ -108,6 +74,15 @@ typedef enum { HTTP_CMD_STATE_NOT_RUN_YET, HTTP_CMD_STATE_RUN_FINISHED } HttpSql
typedef enum { HTTP_CMD_RETURN_TYPE_WITH_RETURN, HTTP_CMD_RETURN_TYPE_NO_RETURN } HttpSqlCmdReturnType;
+struct HttpContext;
+struct HttpThread;
+
+typedef struct {
+ char id[HTTP_SESSION_ID_LEN];
+ int32_t refCount;
+ void * taos;
+} HttpSession;
+
typedef struct {
// used by single cmd
char *nativSql;
@@ -157,34 +132,17 @@ typedef struct {
void (*setNextCmdFp)(struct HttpContext *pContext, HttpSqlCmd *cmd, int code);
} HttpEncodeMethod;
-typedef struct {
- char *pos;
- int32_t len;
-} HttpBuf;
-
-typedef struct {
- char buffer[HTTP_BUFFER_SIZE];
- int bufsize;
- char *pLast;
- char *pCur;
- HttpBuf method;
- HttpBuf path[HTTP_MAX_URL]; // url: dbname/meter/query
- HttpBuf data; // body content
- HttpBuf token; // auth token
- HttpDecodeMethod *pMethod;
-} HttpParser;
+typedef enum {
+ EHTTP_CONTEXT_PROCESS_FAILED = 0x01,
+ EHTTP_CONTEXT_PARSER_FAILED = 0x02
+} EHTTP_CONTEXT_FAILED_CAUSE;
typedef struct HttpContext {
int32_t refCount;
- int fd;
+ int32_t fd;
uint32_t accessTimes;
uint32_t lastAccessTime;
int32_t state;
- uint8_t httpVersion;
- uint8_t httpChunked;
- uint8_t httpKeepAlive; // http1.0 and not keep-alive, close connection immediately
- uint8_t acceptEncoding;
- uint8_t contentEncoding;
uint8_t reqType;
uint8_t parsed;
char ipstr[22];
@@ -194,12 +152,12 @@ typedef struct HttpContext {
void * ppContext;
HttpSession *session;
z_stream gzipStream;
- HttpParser parser;
+ HttpParser *parser;
HttpSqlCmd singleCmd;
HttpSqlCmds *multiCmds;
JsonBuf * jsonBuf;
- void * timer;
- HttpEncodeMethod * encodeMethod;
+ HttpEncodeMethod *encodeMethod;
+ HttpDecodeMethod *decodeMethod;
struct HttpThread *pThread;
} HttpContext;
@@ -208,9 +166,9 @@ typedef struct HttpThread {
HttpContext * pHead;
pthread_mutex_t threadMutex;
bool stop;
- int pollFd;
- int numOfContexts;
- int threadId;
+ int32_t pollFd;
+ int32_t numOfContexts;
+ int32_t threadId;
char label[HTTP_LABEL_SIZE];
bool (*processData)(HttpContext *pContext);
} HttpThread;
@@ -219,9 +177,9 @@ typedef struct HttpServer {
char label[HTTP_LABEL_SIZE];
uint32_t serverIp;
uint16_t serverPort;
- int fd;
- int numOfThreads;
- int methodScannerLen;
+ int32_t fd;
+ int32_t numOfThreads;
+ int32_t methodScannerLen;
int32_t requestNum;
int32_t status;
pthread_t thread;
diff --git a/src/plugins/http/inc/httpJson.h b/src/plugins/http/inc/httpJson.h
index 905460c67b..fcb74253b9 100644
--- a/src/plugins/http/inc/httpJson.h
+++ b/src/plugins/http/inc/httpJson.h
@@ -19,7 +19,7 @@
#include
#include
-#define JSON_BUFFER_SIZE 10240
+#define JSON_BUFFER_SIZE 16384
struct HttpContext;
enum { JsonNumber, JsonString, JsonBoolean, JsonArray, JsonObject, JsonNull };
@@ -37,65 +37,65 @@ extern char JsonTrueTkn[];
extern char JsonFalseTkn[];
typedef struct {
- int size;
- int total;
- char* lst;
- char buf[JSON_BUFFER_SIZE];
- struct HttpContext* pContext;
+ int32_t size;
+ int32_t total;
+ char* lst;
+ char buf[JSON_BUFFER_SIZE];
+ struct HttpContext* pContext;
} JsonBuf;
// http response
-int httpWriteBuf(struct HttpContext* pContext, const char* buf, int sz);
-int httpWriteBufNoTrace(struct HttpContext* pContext, const char* buf, int sz);
-int httpWriteBufByFd(struct HttpContext* pContext, const char* buf, int sz);
+int32_t httpWriteBuf(struct HttpContext* pContext, const char* buf, int32_t sz);
+int32_t httpWriteBufNoTrace(struct HttpContext* pContext, const char* buf, int32_t sz);
+int32_t httpWriteBufByFd(struct HttpContext* pContext, const char* buf, int32_t sz);
// builder callback
typedef void (*httpJsonBuilder)(JsonBuf* buf, void* jsnHandle);
// buffer
-void httpInitJsonBuf(JsonBuf* buf, struct HttpContext* pContext);
-void httpWriteJsonBufHead(JsonBuf* buf);
-int httpWriteJsonBufBody(JsonBuf* buf, bool isTheLast);
-void httpWriteJsonBufEnd(JsonBuf* buf);
+void httpInitJsonBuf(JsonBuf* buf, struct HttpContext* pContext);
+void httpWriteJsonBufHead(JsonBuf* buf);
+int32_t httpWriteJsonBufBody(JsonBuf* buf, bool isTheLast);
+void httpWriteJsonBufEnd(JsonBuf* buf);
// value
-void httpJsonString(JsonBuf* buf, char* sVal, int len);
-void httpJsonOriginString(JsonBuf* buf, char* sVal, int len);
-void httpJsonStringForTransMean(JsonBuf* buf, char* SVal, int maxLen);
+void httpJsonString(JsonBuf* buf, char* sVal, int32_t len);
+void httpJsonOriginString(JsonBuf* buf, char* sVal, int32_t len);
+void httpJsonStringForTransMean(JsonBuf* buf, char* SVal, int32_t maxLen);
void httpJsonInt64(JsonBuf* buf, int64_t num);
void httpJsonTimestamp(JsonBuf* buf, int64_t t, bool us);
void httpJsonUtcTimestamp(JsonBuf* buf, int64_t t, bool us);
-void httpJsonInt(JsonBuf* buf, int num);
+void httpJsonInt(JsonBuf* buf, int32_t num);
void httpJsonFloat(JsonBuf* buf, float num);
void httpJsonDouble(JsonBuf* buf, double num);
void httpJsonNull(JsonBuf* buf);
-void httpJsonBool(JsonBuf* buf, int val);
+void httpJsonBool(JsonBuf* buf, int32_t val);
// pair
-void httpJsonPair(JsonBuf* buf, char* name, int nameLen, char* sVal, int valLen);
-void httpJsonPairOriginString(JsonBuf* buf, char* name, int nameLen, char* sVal, int valLen);
-void httpJsonPairHead(JsonBuf* buf, char* name, int len);
-void httpJsonPairIntVal(JsonBuf* buf, char* name, int nNameLen, int num);
-void httpJsonPairInt64Val(JsonBuf* buf, char* name, int nNameLen, int64_t num);
-void httpJsonPairBoolVal(JsonBuf* buf, char* name, int nNameLen, int num);
-void httpJsonPairFloatVal(JsonBuf* buf, char* name, int nNameLen, float num);
-void httpJsonPairDoubleVal(JsonBuf* buf, char* name, int nNameLen, double num);
-void httpJsonPairNullVal(JsonBuf* buf, char* name, int nNameLen);
+void httpJsonPair(JsonBuf* buf, char* name, int32_t nameLen, char* sVal, int32_t valLen);
+void httpJsonPairOriginString(JsonBuf* buf, char* name, int32_t nameLen, char* sVal, int32_t valLen);
+void httpJsonPairHead(JsonBuf* buf, char* name, int32_t len);
+void httpJsonPairIntVal(JsonBuf* buf, char* name, int32_t nNameLen, int32_t num);
+void httpJsonPairInt64Val(JsonBuf* buf, char* name, int32_t nNameLen, int64_t num);
+void httpJsonPairBoolVal(JsonBuf* buf, char* name, int32_t nNameLen, int32_t num);
+void httpJsonPairFloatVal(JsonBuf* buf, char* name, int32_t nNameLen, float num);
+void httpJsonPairDoubleVal(JsonBuf* buf, char* name, int32_t nNameLen, double num);
+void httpJsonPairNullVal(JsonBuf* buf, char* name, int32_t nNameLen);
// object
-void httpJsonPairArray(JsonBuf* buf, char* name, int nLen, httpJsonBuilder builder, void* dsHandle);
-void httpJsonPairObject(JsonBuf* buf, char* name, int nLen, httpJsonBuilder builder, void* dsHandle);
+void httpJsonPairArray(JsonBuf* buf, char* name, int32_t nLen, httpJsonBuilder builder, void* dsHandle);
+void httpJsonPairObject(JsonBuf* buf, char* name, int32_t nLen, httpJsonBuilder builder, void* dsHandle);
void httpJsonObject(JsonBuf* buf, httpJsonBuilder fnBuilder, void* dsHandle);
void httpJsonArray(JsonBuf* buf, httpJsonBuilder fnBuidler, void* jsonHandle);
// print
-void httpJsonTestBuf(JsonBuf* buf, int safety);
+void httpJsonTestBuf(JsonBuf* buf, int32_t safety);
void httpJsonToken(JsonBuf* buf, char c);
void httpJsonItemToken(JsonBuf* buf);
-void httpJsonPrint(JsonBuf* buf, const char* json, int len);
+void httpJsonPrint(JsonBuf* buf, const char* json, int32_t len);
// quick
-void httpJsonPairStatus(JsonBuf* buf, int code);
+void httpJsonPairStatus(JsonBuf* buf, int32_t code);
// http json printer
JsonBuf* httpMallocJsonBuf(struct HttpContext* pContext);
diff --git a/src/plugins/http/inc/httpParser.h b/src/plugins/http/inc/httpParser.h
new file mode 100644
index 0000000000..85ba843716
--- /dev/null
+++ b/src/plugins/http/inc/httpParser.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#ifndef HTTP_PARSER_H
+#define HTTP_PARSER_H
+#include "httpGzip.h"
+
+#define HTTP_MAX_URL 5 // http url stack size
+
+typedef enum HTTP_PARSER_STATE {
+ HTTP_PARSER_BEGIN,
+ HTTP_PARSER_REQUEST_OR_RESPONSE,
+ HTTP_PARSER_METHOD,
+ HTTP_PARSER_TARGET,
+ HTTP_PARSER_HTTP_VERSION,
+ HTTP_PARSER_SP,
+ HTTP_PARSER_STATUS_CODE,
+ HTTP_PARSER_REASON_PHRASE,
+ HTTP_PARSER_CRLF,
+ HTTP_PARSER_HEADER,
+ HTTP_PARSER_HEADER_KEY,
+ HTTP_PARSER_HEADER_VAL,
+ HTTP_PARSER_CHUNK_SIZE,
+ HTTP_PARSER_CHUNK,
+ HTTP_PARSER_END,
+ HTTP_PARSER_ERROR,
+} HTTP_PARSER_STATE;
+
+typedef enum HTTP_AUTH_TYPE {
+ HTTP_INVALID_AUTH,
+ HTTP_BASIC_AUTH,
+ HTTP_TAOSD_AUTH
+} HTTP_AUTH_TYPE;
+
+typedef enum HTTP_VERSION {
+ HTTP_VERSION_10 = 0,
+ HTTP_VERSION_11 = 1,
+ HTTP_VERSION_12 = 2,
+ HTTP_INVALID_VERSION
+} HTTP_VERSION;
+
+typedef enum HTTP_KEEPALIVE {
+ HTTP_KEEPALIVE_NO_INPUT = 0,
+ HTTP_KEEPALIVE_ENABLE = 1,
+ HTTP_KEEPALIVE_DISABLE = 2
+} HTTP_KEEPALIVE;
+
+typedef struct HttpString {
+ char * str;
+ int32_t pos;
+ int32_t size;
+} HttpString;
+
+typedef struct HttpStatus {
+ int32_t code;
+ char * desc;
+} HttpStatus;
+
+typedef struct HttpStack{
+ int8_t *stacks;
+ int32_t pos;
+ int32_t size;
+} HttpStack;
+
+struct HttpContext;
+typedef struct HttpParser {
+ struct HttpContext *pContext;
+ ehttp_gzip_t *gzip;
+ HttpStack stacks;
+ HttpString str;
+ HttpString body;
+ HttpString path[HTTP_MAX_URL];
+ char * method;
+ char * target;
+ char * version;
+ char * reasonPhrase;
+ char * key;
+ char * val;
+ char * authContent;
+ int8_t httpVersion;
+ int8_t acceptEncodingGzip;
+ int8_t acceptEncodingChunked;
+ int8_t contentLengthSpecified;
+ int8_t contentChunked;
+ int8_t transferGzip;
+ int8_t transferChunked;
+ int8_t keepAlive;
+ int8_t authType;
+ int32_t contentLength;
+ int32_t chunkSize;
+ int32_t receivedChunkSize;
+ int32_t receivedSize;
+ int32_t statusCode;
+ int8_t inited;
+ int8_t parsed;
+ int16_t httpCode;
+ int32_t parseCode;
+} HttpParser;
+
+void httpInitParser(HttpParser *parser);
+HttpParser *httpCreateParser(struct HttpContext *pContext);
+void httpClearParser(HttpParser *parser);
+void httpDestroyParser(HttpParser *parser);
+int32_t httpParseBuf(HttpParser *parser, const char *buf, int32_t len);
+char * httpGetStatusDesc(int32_t statusCode);
+
+#endif
diff --git a/src/plugins/http/inc/httpResp.h b/src/plugins/http/inc/httpResp.h
index 5eaaa2a037..a528bcc39e 100644
--- a/src/plugins/http/inc/httpResp.h
+++ b/src/plugins/http/inc/httpResp.h
@@ -32,9 +32,7 @@ enum _httpRespTempl {
extern const char *httpRespTemplate[];
-void httpSendErrorResp(HttpContext *pContext, int errNo);
-void httpSendErrorRespWithDesc(HttpContext *pContext, int errNo, char *desc);
-void httpSendTaosdErrorResp(HttpContext *pContext, int errCode);
+void httpSendErrorResp(HttpContext *pContext, int32_t errNo);
void httpSendTaosdInvalidSqlErrorResp(HttpContext *pContext, char* errMsg);
void httpSendSuccResp(HttpContext *pContext, char *desc);
void httpSendOptionResp(HttpContext *pContext, char *desc);
diff --git a/src/plugins/http/inc/restHandle.h b/src/plugins/http/inc/httpRestHandle.h
similarity index 100%
rename from src/plugins/http/inc/restHandle.h
rename to src/plugins/http/inc/httpRestHandle.h
diff --git a/src/plugins/http/inc/restJson.h b/src/plugins/http/inc/httpRestJson.h
similarity index 91%
rename from src/plugins/http/inc/restJson.h
rename to src/plugins/http/inc/httpRestJson.h
index 7cff21d190..112e845f36 100644
--- a/src/plugins/http/inc/restJson.h
+++ b/src/plugins/http/inc/httpRestJson.h
@@ -43,12 +43,12 @@
#define REST_TIMESTAMP_FMT_TIMESTAMP 1
#define REST_TIMESTAMP_FMT_UTC_STRING 2
-void restBuildSqlAffectRowsJson(HttpContext *pContext, HttpSqlCmd *cmd, int affect_rows);
+void restBuildSqlAffectRowsJson(HttpContext *pContext, HttpSqlCmd *cmd, int32_t affect_rows);
void restStartSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result);
-bool restBuildSqlTimestampJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, int numOfRows);
-bool restBuildSqlLocalTimeStringJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, int numOfRows);
-bool restBuildSqlUtcTimeStringJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, int numOfRows);
+bool restBuildSqlTimestampJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, int32_t numOfRows);
+bool restBuildSqlLocalTimeStringJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, int32_t numOfRows);
+bool restBuildSqlUtcTimeStringJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, int32_t numOfRows);
void restStopSqlJson(HttpContext *pContext, HttpSqlCmd *cmd);
#endif
\ No newline at end of file
diff --git a/src/plugins/http/inc/httpServer.h b/src/plugins/http/inc/httpServer.h
index 508baa6112..58ed3545f3 100644
--- a/src/plugins/http/inc/httpServer.h
+++ b/src/plugins/http/inc/httpServer.h
@@ -21,8 +21,7 @@
bool httpInitConnect();
void httpCleanUpConnect();
-void *httpInitServer(char *ip, uint16_t port, char *label, int numOfThreads, void *fp, void *shandle);
-void httpCleanUpServer(HttpServer *pServer);
-int httpReadDataImp(HttpContext *pContext);
+void *httpInitServer(char *ip, uint16_t port, char *label, int32_t numOfThreads, void *fp, void *shandle);
+void httpCleanUpServer(HttpServer *pServer);
#endif
diff --git a/src/plugins/http/inc/httpSql.h b/src/plugins/http/inc/httpSql.h
index 09f5b142fb..db3e3a3b16 100644
--- a/src/plugins/http/inc/httpSql.h
+++ b/src/plugins/http/inc/httpSql.h
@@ -19,20 +19,20 @@
int32_t httpAddToSqlCmdBuffer(HttpContext *pContext, const char *const format, ...);
int32_t httpAddToSqlCmdBufferNoTerminal(HttpContext *pContext, const char *const format, ...);
-int32_t httpAddToSqlCmdBufferWithSize(HttpContext *pContext, int mallocSize);
+int32_t httpAddToSqlCmdBufferWithSize(HttpContext *pContext, int32_t mallocSize);
int32_t httpAddToSqlCmdBufferTerminal(HttpContext *pContext);
-bool httpMallocMultiCmds(HttpContext *pContext, int cmdSize, int bufferSize);
-bool httpReMallocMultiCmdsSize(HttpContext *pContext, int cmdSize);
-bool httpReMallocMultiCmdsBuffer(HttpContext *pContext, int bufferSize);
+bool httpMallocMultiCmds(HttpContext *pContext, int32_t cmdSize, int32_t bufferSize);
+bool httpReMallocMultiCmdsSize(HttpContext *pContext, int32_t cmdSize);
+bool httpReMallocMultiCmdsBuffer(HttpContext *pContext, int32_t bufferSize);
void httpFreeMultiCmds(HttpContext *pContext);
HttpSqlCmd *httpNewSqlCmd(HttpContext *pContext);
HttpSqlCmd *httpCurrSqlCmd(HttpContext *pContext);
-int httpCurSqlCmdPos(HttpContext *pContext);
+int32_t httpCurSqlCmdPos(HttpContext *pContext);
-void httpTrimTableName(char *name);
-int httpShrinkTableName(HttpContext *pContext, int pos, char *name);
-char *httpGetCmdsString(HttpContext *pContext, int pos);
+void httpTrimTableName(char *name);
+int32_t httpShrinkTableName(HttpContext *pContext, int32_t pos, char *name);
+char * httpGetCmdsString(HttpContext *pContext, int32_t pos);
#endif
diff --git a/src/plugins/http/inc/tgHandle.h b/src/plugins/http/inc/httpTgHandle.h
similarity index 100%
rename from src/plugins/http/inc/tgHandle.h
rename to src/plugins/http/inc/httpTgHandle.h
diff --git a/src/plugins/http/inc/tgJson.h b/src/plugins/http/inc/httpTgJson.h
similarity index 94%
rename from src/plugins/http/inc/tgJson.h
rename to src/plugins/http/inc/httpTgJson.h
index bf3aa093ae..6b7d0681b6 100644
--- a/src/plugins/http/inc/tgJson.h
+++ b/src/plugins/http/inc/httpTgJson.h
@@ -24,8 +24,8 @@ void tgInitQueryJson(HttpContext *pContext);
void tgCleanQueryJson(HttpContext *pContext);
void tgStartQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result);
void tgStopQueryJson(HttpContext *pContext, HttpSqlCmd *cmd);
-void tgBuildSqlAffectRowsJson(HttpContext *pContext, HttpSqlCmd *cmd, int affect_rows);
-bool tgCheckFinished(struct HttpContext *pContext, HttpSqlCmd *cmd, int code);
-void tgSetNextCmd(struct HttpContext *pContext, HttpSqlCmd *cmd, int code);
+void tgBuildSqlAffectRowsJson(HttpContext *pContext, HttpSqlCmd *cmd, int32_t affect_rows);
+bool tgCheckFinished(struct HttpContext *pContext, HttpSqlCmd *cmd, int32_t code);
+void tgSetNextCmd(struct HttpContext *pContext, HttpSqlCmd *cmd, int32_t code);
#endif
\ No newline at end of file
diff --git a/src/plugins/http/inc/httpUtil.h b/src/plugins/http/inc/httpUtil.h
index c82f702ebc..61cd50a77a 100644
--- a/src/plugins/http/inc/httpUtil.h
+++ b/src/plugins/http/inc/httpUtil.h
@@ -17,21 +17,19 @@
#define TDENGINE_HTTP_UTIL_H
bool httpCheckUsedbSql(char *sql);
-void httpTimeToString(time_t t, char *buf, int buflen);
+void httpTimeToString(time_t t, char *buf, int32_t buflen);
-bool httpUrlMatch(HttpContext *pContext, int pos, char *cmp);
-bool httpParseRequest(HttpContext *pContext);
-int httpCheckReadCompleted(HttpContext *pContext);
-void httpReadDirtyData(HttpContext *pContext);
+bool httpUrlMatch(HttpContext *pContext, int32_t pos, char *cmp);
+bool httpParseRequest(HttpContext *pContext);
+int32_t httpCheckReadCompleted(HttpContext *pContext);
+void httpReadDirtyData(HttpContext *pContext);
-int httpGzipDeCompress(char *srcData, int32_t nSrcData, char *destData, int32_t *nDestData);
-int httpGzipCompressInit(HttpContext *pContext);
-int httpGzipCompress(HttpContext *pContext, char *inSrcData, int32_t inSrcDataLen,
+int32_t httpGzipDeCompress(char *srcData, int32_t nSrcData, char *destData, int32_t *nDestData);
+int32_t httpGzipCompressInit(HttpContext *pContext);
+int32_t httpGzipCompress(HttpContext *pContext, char *inSrcData, int32_t inSrcDataLen,
char *outDestData, int32_t *outDestDataLen, bool isTheLast);
// http request parser
void httpAddMethod(HttpServer *pServer, HttpDecodeMethod *pMethod);
-
-
#endif
diff --git a/src/plugins/http/src/httpAuth.c b/src/plugins/http/src/httpAuth.c
index ea7024fad6..8beef7c042 100644
--- a/src/plugins/http/src/httpAuth.c
+++ b/src/plugins/http/src/httpAuth.c
@@ -23,85 +23,80 @@
#define KEY_DES_4 4971256377704625728L
-bool httpParseBasicAuthToken(HttpContext *pContext, char *token, int len) {
+int32_t httpParseBasicAuthToken(HttpContext *pContext, char *token, int32_t len) {
token[len] = '\0';
- int outlen = 0;
+ int32_t outlen = 0;
char *base64 = (char *)base64_decode(token, len, &outlen);
if (base64 == NULL || outlen == 0) {
- httpError("context:%p, fd:%d, ip:%s, basic token:%s parsed error", pContext, pContext->fd, pContext->ipstr, token);
+ httpError("context:%p, fd:%d, basic token:%s parsed error", pContext, pContext->fd, token);
free(base64);
- return false;
+ return -1;
}
char *user = strstr(base64, ":");
if (user == NULL) {
- httpError("context:%p, fd:%d, ip:%s, basic token:%s invalid format", pContext, pContext->fd, pContext->ipstr,
- token);
+ httpError("context:%p, fd:%d, basic token:%s invalid format", pContext, pContext->fd, token);
free(base64);
- return false;
+ return -1;
}
- int user_len = (int)(user - base64);
+ int32_t user_len = (int32_t)(user - base64);
if (user_len < 1 || user_len >= TSDB_USER_LEN) {
- httpError("context:%p, fd:%d, ip:%s, basic token:%s parse user error", pContext, pContext->fd, pContext->ipstr,
- token);
+ httpError("context:%p, fd:%d, basic token:%s parse user error", pContext, pContext->fd, token);
free(base64);
- return false;
+ return -1;
}
strncpy(pContext->user, base64, (size_t)user_len);
pContext->user[user_len] = 0;
char *password = user + 1;
- int pass_len = (int)((base64 + outlen) - password);
+ int32_t pass_len = (int32_t)((base64 + outlen) - password);
if (pass_len < 1 || pass_len >= TSDB_PASSWORD_LEN) {
- httpError("context:%p, fd:%d, ip:%s, basic token:%s parse password error", pContext, pContext->fd, pContext->ipstr,
- token);
+ httpError("context:%p, fd:%d, basic token:%s parse password error", pContext, pContext->fd, token);
free(base64);
- return false;
+ return -1;
}
strncpy(pContext->pass, password, (size_t)pass_len);
pContext->pass[pass_len] = 0;
free(base64);
- httpDebug("context:%p, fd:%d, ip:%s, basic token parsed success, user:%s", pContext, pContext->fd, pContext->ipstr,
- pContext->user);
- return true;
+ httpDebug("context:%p, fd:%d, basic token parsed success, user:%s", pContext, pContext->fd, pContext->user);
+ return 0;
}
-bool httpParseTaosdAuthToken(HttpContext *pContext, char *token, int len) {
+int32_t httpParseTaosdAuthToken(HttpContext *pContext, char *token, int32_t len) {
token[len] = '\0';
- int outlen = 0;
+ int32_t outlen = 0;
unsigned char *base64 = base64_decode(token, len, &outlen);
if (base64 == NULL || outlen == 0) {
- httpError("context:%p, fd:%d, ip:%s, taosd token:%s parsed error", pContext, pContext->fd, pContext->ipstr, token);
+ httpError("context:%p, fd:%d, taosd token:%s parsed error", pContext, pContext->fd, token);
if (base64) free(base64);
- return false;
+ return 01;
}
if (outlen != (TSDB_USER_LEN + TSDB_PASSWORD_LEN)) {
- httpError("context:%p, fd:%d, ip:%s, taosd token:%s length error", pContext, pContext->fd, pContext->ipstr, token);
+ httpError("context:%p, fd:%d, taosd token:%s length error", pContext, pContext->fd, token);
free(base64);
- return false;
+ return -1;
}
char *descrypt = taosDesDecode(KEY_DES_4, (char *)base64, outlen);
if (descrypt == NULL) {
- httpError("context:%p, fd:%d, ip:%s, taosd token:%s descrypt error", pContext, pContext->fd, pContext->ipstr,
- token);
+ httpError("context:%p, fd:%d, taosd token:%s descrypt error", pContext, pContext->fd, token);
free(base64);
- return false;
+ return -1;
} else {
tstrncpy(pContext->user, descrypt, sizeof(pContext->user));
tstrncpy(pContext->pass, descrypt + TSDB_USER_LEN, sizeof(pContext->pass));
- httpDebug("context:%p, fd:%d, ip:%s, taosd token:%s parsed success, user:%s", pContext, pContext->fd,
- pContext->ipstr, token, pContext->user);
+ httpDebug("context:%p, fd:%d, taosd token:%s parsed success, user:%s", pContext, pContext->fd, token,
+ pContext->user);
free(base64);
free(descrypt);
- return true;
+ return 0;
}
}
-bool httpGenTaosdAuthToken(HttpContext *pContext, char *token, int maxLen) {
+int32_t httpGenTaosdAuthToken(HttpContext *pContext, char *token, int32_t maxLen) {
char buffer[sizeof(pContext->user) + sizeof(pContext->pass)] = {0};
size_t size = sizeof(pContext->user);
tstrncpy(buffer, pContext->user, size);
@@ -116,7 +111,7 @@ bool httpGenTaosdAuthToken(HttpContext *pContext, char *token, int maxLen) {
free(encrypt);
free(base64);
- httpDebug("context:%p, fd:%d, ip:%s, gen taosd token:%s", pContext, pContext->fd, pContext->ipstr, token);
+ httpDebug("context:%p, fd:%d, generate taosd token:%s", pContext, pContext->fd, token);
- return true;
+ return 0;
}
diff --git a/src/plugins/http/src/httpCode.c b/src/plugins/http/src/httpCode.c
deleted file mode 100644
index 9ec07fd851..0000000000
--- a/src/plugins/http/src/httpCode.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-#define _DEFAULT_SOURCE
-
-
-char* httpMsg[] = {
- "success", // 0
- "http server is not online", // 1
- "http url is not support", // 2
- "http method parse error", // 3
- "http version should be 1.0, 1.1 or 1.2", // 4
- "http head parse error", // 5
- "request size is too big",
- "http body size invalid",
- "http chunked body parse error", // 8
- "http url parse error", // 9
- "invalid type of Authorization",
- "no auth info input",
- "no sql input",
- "session list was full",
- "no enough memory to alloc sqls",
- "generate taosd token error",
- "db and table can not be null",
- "no need to execute use db cmd",
- "parse grafana json error",
- "size of multi request is 0", // 19
- "request is empty", // 20
- "no enough connections for http", // 21
-
- // telegraf
- "database name can not be null", // 22
- "database name too long",
- "invalid telegraf json fromat",
- "metrics size is 0",
- "metrics size can not more than 1K", // 26
- "metric name not find",
- "metric name type should be string",
- "metric name length is 0",
- "metric name length too long",
- "timestamp not find", // 31
- "timestamp type should be integer",
- "timestamp value smaller than 0",
- "tags not find",
- "tags size is 0",
- "tags size too long", // 36
- "tag is null",
- "tag name is null",
- "tag name length too long", // 39
- "tag value type should be number or string",
- "tag value is null",
- "table is null", // 42
- "table name length too long",
- "fields not find", // 44
- "fields size is 0",
- "fields size too long",
- "field is null", // 47
- "field name is null",
- "field name length too long", // 49
- "field value type should be number or string",
- "field value is null", // 51
- "parse basic auth token error",
- "parse http auth token error",
- "host type should be string",
-
- // grafana
- "query size is 0", // 55
- "query size can not more than 100",
-
- // opentsdb
- "database name can not be null", // 57
- "database name too long",
- "invalid opentsdb json fromat", // 59
- "metrics size is 0",
- "metrics size can not more than 10K", // 61
- "metric name not find",
- "metric name type should be string",
- "metric name length is 0",
- "metric name length can not more than 22",
- "timestamp not find",
- "timestamp type should be integer",
- "timestamp value smaller than 0",
- "tags not find",
- "tags size is 0",
- "tags size too long", // 71
- "tag is null",
- "tag name is null",
- "tag name length too long", // 74
- "tag value type should be boolean, number or string",
- "tag value is null",
- "tag value can not more than 64", // 77
- "value not find",
- "value type should be boolean, number or string",
- "stable not exist",
-
-};
diff --git a/src/plugins/http/src/httpContext.c b/src/plugins/http/src/httpContext.c
index 5ef3c9a66e..ec60b984b2 100644
--- a/src/plugins/http/src/httpContext.c
+++ b/src/plugins/http/src/httpContext.c
@@ -26,6 +26,10 @@
#include "httpResp.h"
#include "httpSql.h"
#include "httpSession.h"
+#include "httpContext.h"
+#include "httpParser.h"
+
+static void httpDestroyContext(void *data);
static void httpRemoveContextFromEpoll(HttpContext *pContext) {
HttpThread *pThread = pContext->pThread;
@@ -54,11 +58,16 @@ static void httpDestroyContext(void *data) {
httpFreeJsonBuf(pContext);
httpFreeMultiCmds(pContext);
+ if (pContext->parser) {
+ httpDestroyParser(pContext->parser);
+ pContext->parser = NULL;
+ }
+
taosTFree(pContext);
}
bool httpInitContexts() {
- tsHttpServer.contextCache = taosCacheInit(TSDB_DATA_TYPE_BIGINT, 2, true, httpDestroyContext, "restc");
+ tsHttpServer.contextCache = taosCacheInit(TSDB_CACHE_PTR_KEY, 2, true, httpDestroyContext, "restc");
if (tsHttpServer.contextCache == NULL) {
httpError("failed to init context cache");
return false;
@@ -104,12 +113,13 @@ HttpContext *httpCreateContext(int32_t fd) {
if (pContext == NULL) return NULL;
pContext->fd = fd;
- pContext->httpVersion = HTTP_VERSION_10;
pContext->lastAccessTime = taosGetTimestampSec();
pContext->state = HTTP_CONTEXT_STATE_READY;
+ pContext->parser = httpCreateParser(pContext);
- uint64_t handleVal = (uint64_t)pContext;
- HttpContext **ppContext = taosCachePut(tsHttpServer.contextCache, &handleVal, sizeof(int64_t), &pContext, sizeof(int64_t), 3000);
+ TSDB_CACHE_PTR_TYPE handleVal = (TSDB_CACHE_PTR_TYPE)pContext;
+ HttpContext **ppContext = taosCachePut(tsHttpServer.contextCache, &handleVal, sizeof(TSDB_CACHE_PTR_TYPE), &pContext,
+ sizeof(TSDB_CACHE_PTR_TYPE), 3000);
pContext->ppContext = ppContext;
httpDebug("context:%p, fd:%d, is created, data:%p", pContext, fd, ppContext);
@@ -120,29 +130,33 @@ HttpContext *httpCreateContext(int32_t fd) {
}
HttpContext *httpGetContext(void *ptr) {
- uint64_t handleVal = (uint64_t)ptr;
- HttpContext **ppContext = taosCacheAcquireByKey(tsHttpServer.contextCache, &handleVal, sizeof(HttpContext *));
+ TSDB_CACHE_PTR_TYPE handleVal = (TSDB_CACHE_PTR_TYPE)ptr;
+ HttpContext **ppContext = taosCacheAcquireByKey(tsHttpServer.contextCache, &handleVal, sizeof(TSDB_CACHE_PTR_TYPE));
if (ppContext) {
HttpContext *pContext = *ppContext;
if (pContext) {
int32_t refCount = atomic_add_fetch_32(&pContext->refCount, 1);
- httpDebug("context:%p, fd:%d, is accquired, data:%p refCount:%d", pContext, pContext->fd, ppContext, refCount);
+ httpTrace("context:%p, fd:%d, is accquired, data:%p refCount:%d", pContext, pContext->fd, ppContext, refCount);
return pContext;
}
}
return NULL;
}
-void httpReleaseContext(HttpContext *pContext) {
+void httpReleaseContext(HttpContext *pContext, bool clearRes) {
int32_t refCount = atomic_sub_fetch_32(&pContext->refCount, 1);
if (refCount < 0) {
httpError("context:%p, is already released, refCount:%d", pContext, refCount);
return;
}
+ if (clearRes) {
+ httpClearParser(pContext->parser);
+ }
+
HttpContext **ppContext = pContext->ppContext;
- httpDebug("context:%p, is released, data:%p refCount:%d", pContext, ppContext, refCount);
+ httpTrace("context:%p, is released, data:%p refCount:%d", pContext, ppContext, refCount);
if (tsHttpServer.contextCache != NULL) {
taosCacheRelease(tsHttpServer.contextCache, (void **)(&ppContext), false);
@@ -155,79 +169,67 @@ void httpReleaseContext(HttpContext *pContext) {
bool httpInitContext(HttpContext *pContext) {
pContext->accessTimes++;
pContext->lastAccessTime = taosGetTimestampSec();
- pContext->httpVersion = HTTP_VERSION_10;
- pContext->httpKeepAlive = HTTP_KEEPALIVE_NO_INPUT;
- pContext->httpChunked = HTTP_UNCUNKED;
- pContext->acceptEncoding = HTTP_COMPRESS_IDENTITY;
- pContext->contentEncoding = HTTP_COMPRESS_IDENTITY;
+
pContext->reqType = HTTP_REQTYPE_OTHERS;
pContext->encodeMethod = NULL;
- pContext->timer = NULL;
memset(&pContext->singleCmd, 0, sizeof(HttpSqlCmd));
- HttpParser *pParser = &pContext->parser;
- memset(pParser, 0, sizeof(HttpParser));
- pParser->pCur = pParser->pLast = pParser->buffer;
- httpDebug("context:%p, fd:%d, ip:%s, accessTimes:%d, parsed:%d", pContext, pContext->fd, pContext->ipstr,
- pContext->accessTimes, pContext->parsed);
+ httpTrace("context:%p, fd:%d, parsed:%d", pContext, pContext->fd, pContext->parsed);
return true;
}
void httpCloseContextByApp(HttpContext *pContext) {
+ HttpParser *parser = pContext->parser;
pContext->parsed = false;
bool keepAlive = true;
- if (pContext->httpVersion == HTTP_VERSION_10 && pContext->httpKeepAlive != HTTP_KEEPALIVE_ENABLE) {
+ if (parser->httpVersion == HTTP_VERSION_10 && parser->keepAlive != HTTP_KEEPALIVE_ENABLE) {
keepAlive = false;
- } else if (pContext->httpVersion != HTTP_VERSION_10 && pContext->httpKeepAlive == HTTP_KEEPALIVE_DISABLE) {
+ } else if (parser->httpVersion != HTTP_VERSION_10 && parser->keepAlive == HTTP_KEEPALIVE_DISABLE) {
keepAlive = false;
} else {
}
if (keepAlive) {
if (httpAlterContextState(pContext, HTTP_CONTEXT_STATE_HANDLING, HTTP_CONTEXT_STATE_READY)) {
- httpDebug("context:%p, fd:%d, ip:%s, last state:handling, keepAlive:true, reuse context", pContext, pContext->fd,
- pContext->ipstr);
+ httpTrace("context:%p, fd:%d, last state:handling, keepAlive:true, reuse context", pContext, pContext->fd);
} else if (httpAlterContextState(pContext, HTTP_CONTEXT_STATE_DROPPING, HTTP_CONTEXT_STATE_CLOSED)) {
httpRemoveContextFromEpoll(pContext);
- httpDebug("context:%p, fd:%d, ip:%s, last state:dropping, keepAlive:true, close connect", pContext, pContext->fd,
- pContext->ipstr);
+ httpTrace("context:%p, fd:%d, ast state:dropping, keepAlive:true, close connect", pContext, pContext->fd);
} else if (httpAlterContextState(pContext, HTTP_CONTEXT_STATE_READY, HTTP_CONTEXT_STATE_READY)) {
- httpDebug("context:%p, fd:%d, ip:%s, last state:ready, keepAlive:true, reuse context", pContext, pContext->fd,
- pContext->ipstr);
+ httpTrace("context:%p, fd:%d, last state:ready, keepAlive:true, reuse context", pContext, pContext->fd);
} else if (httpAlterContextState(pContext, HTTP_CONTEXT_STATE_CLOSED, HTTP_CONTEXT_STATE_CLOSED)) {
httpRemoveContextFromEpoll(pContext);
- httpDebug("context:%p, fd:%d, ip:%s, last state:ready, keepAlive:true, close connect", pContext, pContext->fd,
- pContext->ipstr);
+ httpTrace("context:%p, fd:%d, last state:ready, keepAlive:true, close connect", pContext, pContext->fd);
} else {
httpRemoveContextFromEpoll(pContext);
- httpError("context:%p, fd:%d, ip:%s, last state:%s:%d, keepAlive:true, close connect", pContext, pContext->fd,
- pContext->ipstr, httpContextStateStr(pContext->state), pContext->state);
+ httpError("context:%p, fd:%d, last state:%s:%d, keepAlive:true, close connect", pContext, pContext->fd,
+ httpContextStateStr(pContext->state), pContext->state);
}
} else {
httpRemoveContextFromEpoll(pContext);
- httpDebug("context:%p, fd:%d, ip:%s, last state:%s:%d, keepAlive:false, close context", pContext, pContext->fd,
- pContext->ipstr, httpContextStateStr(pContext->state), pContext->state);
+ httpTrace("context:%p, fd:%d, ilast state:%s:%d, keepAlive:false, close context", pContext, pContext->fd,
+ httpContextStateStr(pContext->state), pContext->state);
}
- httpReleaseContext(pContext);
+ httpReleaseContext(pContext, true);
}
void httpCloseContextByServer(HttpContext *pContext) {
if (httpAlterContextState(pContext, HTTP_CONTEXT_STATE_HANDLING, HTTP_CONTEXT_STATE_DROPPING)) {
- httpDebug("context:%p, fd:%d, ip:%s, epoll finished, still used by app", pContext, pContext->fd, pContext->ipstr);
+ httpTrace("context:%p, fd:%d, epoll finished, still used by app", pContext, pContext->fd);
} else if (httpAlterContextState(pContext, HTTP_CONTEXT_STATE_DROPPING, HTTP_CONTEXT_STATE_DROPPING)) {
- httpDebug("context:%p, fd:%d, ip:%s, epoll already finished, wait app finished", pContext, pContext->fd, pContext->ipstr);
+ httpTrace("context:%p, fd:%d, epoll already finished, wait app finished", pContext, pContext->fd);
} else if (httpAlterContextState(pContext, HTTP_CONTEXT_STATE_READY, HTTP_CONTEXT_STATE_CLOSED)) {
- httpDebug("context:%p, fd:%d, ip:%s, epoll finished, close connect", pContext, pContext->fd, pContext->ipstr);
+ httpTrace("context:%p, fd:%d, epoll finished, close connect", pContext, pContext->fd);
} else if (httpAlterContextState(pContext, HTTP_CONTEXT_STATE_CLOSED, HTTP_CONTEXT_STATE_CLOSED)) {
- httpDebug("context:%p, fd:%d, ip:%s, epoll finished, will be closed soon", pContext, pContext->fd, pContext->ipstr);
+ httpTrace("context:%p, fd:%d, epoll finished, will be closed soon", pContext, pContext->fd);
} else {
- httpError("context:%p, fd:%d, ip:%s, unknown state:%d", pContext, pContext->fd, pContext->ipstr, pContext->state);
+ httpError("context:%p, fd:%d, unknown state:%d", pContext, pContext->fd, pContext->state);
}
pContext->parsed = false;
httpRemoveContextFromEpoll(pContext);
- httpReleaseContext(pContext);
+ httpReleaseContext(pContext, true);
}
diff --git a/src/plugins/http/src/gcHandle.c b/src/plugins/http/src/httpGcHandle.c
similarity index 72%
rename from src/plugins/http/src/gcHandle.c
rename to src/plugins/http/src/httpGcHandle.c
index 72b73b4bad..5d4cb0c680 100644
--- a/src/plugins/http/src/gcHandle.c
+++ b/src/plugins/http/src/httpGcHandle.c
@@ -15,11 +15,12 @@
#define _DEFAULT_SOURCE
#include "os.h"
+#include "taosdef.h"
+#include "taoserror.h"
#include "cJSON.h"
#include "httpLog.h"
-#include "gcHandle.h"
-#include "gcJson.h"
-#include "taosdef.h"
+#include "httpGcHandle.h"
+#include "httpGcJson.h"
static HttpDecodeMethod gcDecodeMethod = {"grafana", gcProcessRequest};
static HttpEncodeMethod gcHeartBeatMethod = {
@@ -47,28 +48,27 @@ static HttpEncodeMethod gcQueryMethod = {
void gcInitHandle(HttpServer* pServer) { httpAddMethod(pServer, &gcDecodeMethod); }
bool gcGetUserFromUrl(HttpContext* pContext) {
- HttpParser* pParser = &pContext->parser;
- if (pParser->path[GC_USER_URL_POS].len >= TSDB_USER_LEN || pParser->path[GC_USER_URL_POS].len <= 0) {
+ HttpParser* pParser = pContext->parser;
+ if (pParser->path[GC_USER_URL_POS].pos >= TSDB_USER_LEN || pParser->path[GC_USER_URL_POS].pos <= 0) {
return false;
}
- tstrncpy(pContext->user, pParser->path[GC_USER_URL_POS].pos, TSDB_USER_LEN);
+ tstrncpy(pContext->user, pParser->path[GC_USER_URL_POS].str, TSDB_USER_LEN);
return true;
}
bool gcGetPassFromUrl(HttpContext* pContext) {
- HttpParser* pParser = &pContext->parser;
- if (pParser->path[GC_PASS_URL_POS].len >= TSDB_PASSWORD_LEN || pParser->path[GC_PASS_URL_POS].len <= 0) {
+ HttpParser* pParser = pContext->parser;
+ if (pParser->path[GC_PASS_URL_POS].pos >= TSDB_PASSWORD_LEN || pParser->path[GC_PASS_URL_POS].pos <= 0) {
return false;
}
- tstrncpy(pContext->pass, pParser->path[GC_PASS_URL_POS].pos, TSDB_PASSWORD_LEN);
+ tstrncpy(pContext->pass, pParser->path[GC_PASS_URL_POS].str, TSDB_PASSWORD_LEN);
return true;
}
bool gcProcessLoginRequest(HttpContext* pContext) {
- httpDebug("context:%p, fd:%d, ip:%s, user:%s, process grafana login msg", pContext, pContext->fd, pContext->ipstr,
- pContext->user);
+ httpDebug("context:%p, fd:%d, user:%s, process grafana login msg", pContext, pContext->fd, pContext->user);
pContext->reqType = HTTP_REQTYPE_LOGIN;
return true;
}
@@ -143,65 +143,61 @@ bool gcProcessLoginRequest(HttpContext* pContext) {
//}]
bool gcProcessQueryRequest(HttpContext* pContext) {
- httpDebug("context:%p, fd:%d, ip:%s, process grafana query msg", pContext, pContext->fd, pContext->ipstr);
+ httpDebug("context:%p, fd:%d, process grafana query msg", pContext, pContext->fd);
- HttpParser* pParser = &pContext->parser;
- char* filter = pParser->data.pos;
+ char* filter = pContext->parser->body.str;
if (filter == NULL) {
- httpSendErrorResp(pContext, HTTP_NO_MSG_INPUT);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_NO_MSG_INPUT);
return false;
}
cJSON* root = cJSON_Parse(filter);
if (root == NULL) {
- httpSendErrorResp(pContext, HTTP_PARSE_GC_REQ_ERROR);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_GC_REQ_PARSE_ERROR);
return false;
}
- int size = cJSON_GetArraySize(root);
+ int32_t size = cJSON_GetArraySize(root);
if (size <= 0) {
- httpSendErrorResp(pContext, HTTP_GC_QUERY_NULL);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_GC_QUERY_NULL);
cJSON_Delete(root);
return false;
}
if (size > 100) {
- httpSendErrorResp(pContext, HTTP_GC_QUERY_SIZE);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_GC_QUERY_SIZE);
cJSON_Delete(root);
return false;
}
if (!httpMallocMultiCmds(pContext, size, HTTP_BUFFER_SIZE)) {
- httpSendErrorResp(pContext, HTTP_NO_ENOUGH_MEMORY);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_NO_ENOUGH_MEMORY);
cJSON_Delete(root);
return false;
}
- for (int i = 0; i < size; ++i) {
+ for (int32_t i = 0; i < size; ++i) {
cJSON* query = cJSON_GetArrayItem(root, i);
if (query == NULL) continue;
cJSON* refId = cJSON_GetObjectItem(query, "refId");
if (refId == NULL || refId->valuestring == NULL || strlen(refId->valuestring) == 0) {
- httpDebug("context:%p, fd:%d, ip:%s, user:%s, refId is null", pContext, pContext->fd, pContext->ipstr,
- pContext->user);
+ httpDebug("context:%p, fd:%d, user:%s, refId is null", pContext, pContext->fd, pContext->user);
continue;
}
- int refIdBuffer = httpAddToSqlCmdBuffer(pContext, refId->valuestring);
+ int32_t refIdBuffer = httpAddToSqlCmdBuffer(pContext, refId->valuestring);
if (refIdBuffer == -1) {
- httpWarn("context:%p, fd:%d, ip:%s, user:%s, refId buffer is full", pContext, pContext->fd, pContext->ipstr,
- pContext->user);
+ httpWarn("context:%p, fd:%d, user:%s, refId buffer is full", pContext, pContext->fd, pContext->user);
break;
}
cJSON* alias = cJSON_GetObjectItem(query, "alias");
- int aliasBuffer = -1;
+ int32_t aliasBuffer = -1;
if (!(alias == NULL || alias->valuestring == NULL || strlen(alias->valuestring) == 0)) {
aliasBuffer = httpAddToSqlCmdBuffer(pContext, alias->valuestring);
if (aliasBuffer == -1) {
- httpWarn("context:%p, fd:%d, ip:%s, user:%s, alias buffer is full", pContext, pContext->fd, pContext->ipstr,
- pContext->user);
+ httpWarn("context:%p, fd:%d, user:%s, alias buffer is full", pContext, pContext->fd, pContext->user);
break;
}
}
@@ -211,21 +207,19 @@ bool gcProcessQueryRequest(HttpContext* pContext) {
cJSON* sql = cJSON_GetObjectItem(query, "sql");
if (sql == NULL || sql->valuestring == NULL || strlen(sql->valuestring) == 0) {
- httpDebug("context:%p, fd:%d, ip:%s, user:%s, sql is null", pContext, pContext->fd, pContext->ipstr,
- pContext->user);
+ httpDebug("context:%p, fd:%d, user:%s, sql is null", pContext, pContext->fd, pContext->user);
continue;
}
- int sqlBuffer = httpAddToSqlCmdBuffer(pContext, sql->valuestring);
+ int32_t sqlBuffer = httpAddToSqlCmdBuffer(pContext, sql->valuestring);
if (sqlBuffer == -1) {
- httpWarn("context:%p, fd:%d, ip:%s, user:%s, sql buffer is full", pContext, pContext->fd, pContext->ipstr,
- pContext->user);
+ httpWarn("context:%p, fd:%d, user:%s, sql buffer is full", pContext, pContext->fd, pContext->user);
break;
}
HttpSqlCmd* cmd = httpNewSqlCmd(pContext);
if (cmd == NULL) {
- httpSendErrorResp(pContext, HTTP_NO_ENOUGH_MEMORY);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_NO_ENOUGH_MEMORY);
cJSON_Delete(root);
return false;
}
@@ -237,8 +231,8 @@ bool gcProcessQueryRequest(HttpContext* pContext) {
cmd->timestamp = httpAddToSqlCmdBufferWithSize(pContext, HTTP_GC_TARGET_SIZE + 1); // hack way
if (cmd->timestamp == -1) {
- httpWarn("context:%p, fd:%d, ip:%s, user:%s, cant't malloc target size, sql buffer is full",
- pContext, pContext->fd, pContext->ipstr, pContext->user);
+ httpWarn("context:%p, fd:%d, user:%s, cant't malloc target size, sql buffer is full", pContext, pContext->fd,
+ pContext->user);
break;
}
}
@@ -251,7 +245,7 @@ bool gcProcessQueryRequest(HttpContext* pContext) {
}
bool gcProcessHeartbeatRequest(HttpContext* pContext) {
- httpDebug("context:%p, fd:%d, ip:%s, process grafana heartbeat msg", pContext, pContext->fd, pContext->ipstr);
+ httpDebug("context:%p, fd:%d, process grafana heartbeat msg", pContext, pContext->fd);
pContext->reqType = HTTP_REQTYPE_HEARTBEAT;
pContext->encodeMethod = &gcHeartBeatMethod;
return true;
@@ -267,7 +261,7 @@ bool gcProcessRequest(struct HttpContext* pContext) {
}
if (strlen(pContext->user) == 0 || strlen(pContext->pass) == 0) {
- httpSendErrorResp(pContext, HTTP_PARSE_USR_ERROR);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_NO_AUTH_INFO);
return false;
}
diff --git a/src/plugins/http/src/gcJson.c b/src/plugins/http/src/httpGcJson.c
similarity index 85%
rename from src/plugins/http/src/gcJson.c
rename to src/plugins/http/src/httpGcJson.c
index 94d53db6ef..80e4ae3bc2 100644
--- a/src/plugins/http/src/gcJson.c
+++ b/src/plugins/http/src/httpGcJson.c
@@ -15,8 +15,8 @@
#define _DEFAULT_SOURCE
#include "os.h"
-#include "gcHandle.h"
-#include "gcJson.h"
+#include "httpGcHandle.h"
+#include "httpGcJson.h"
#include "httpJson.h"
#include "httpResp.h"
@@ -54,8 +54,8 @@ void gcWriteTargetStartJson(JsonBuf *jsonBuf, char *refId, char *target) {
httpJsonToken(jsonBuf, JsonObjStt);
// target section
- httpJsonPair(jsonBuf, "refId", 5, refId, (int)strlen(refId));
- httpJsonPair(jsonBuf, "target", 6, target, (int)strlen(target));
+ httpJsonPair(jsonBuf, "refId", 5, refId, (int32_t)strlen(refId));
+ httpJsonPair(jsonBuf, "target", 6, target, (int32_t)strlen(target));
// data begin
httpJsonPairHead(jsonBuf, "datapoints", 10);
@@ -82,25 +82,25 @@ void gcStopQueryJson(HttpContext *pContext, HttpSqlCmd *cmd) {
}
}
-bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, int numOfRows) {
+bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, int32_t numOfRows) {
JsonBuf *jsonBuf = httpMallocJsonBuf(pContext);
if (jsonBuf == NULL) return false;
- int num_fields = taos_num_fields(result);
+ int32_t num_fields = taos_num_fields(result);
TAOS_FIELD *fields = taos_fetch_fields(result);
if (num_fields == 0) {
return false;
}
- int precision = taos_result_precision(result);
+ int32_t precision = taos_result_precision(result);
// such as select count(*) from sys.cpu
// such as select count(*) from sys.cpu group by ipaddr
// such as select count(*) from sys.cpu interval(1d)
// such as select count(*) from sys.cpu interval(1d) group by ipaddr
// such as select count(*) count(*) from sys.cpu group by ipaddr interval(1d)
- int dataFields = -1;
- int groupFields = -1;
+ int32_t dataFields = -1;
+ int32_t groupFields = -1;
bool hasTimestamp = fields[0].type == TSDB_DATA_TYPE_TIMESTAMP;
if (hasTimestamp) {
dataFields = 1;
@@ -119,7 +119,7 @@ bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result,
}
cmd->numOfRows += numOfRows;
- for (int k = 0; k < numOfRows; ++k) {
+ for (int32_t k = 0; k < numOfRows; ++k) {
TAOS_ROW row = taos_fetch_row(result);
if (row == NULL) {
cmd->numOfRows--;
@@ -130,9 +130,9 @@ bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result,
// for group by
if (groupFields != -1) {
char target[HTTP_GC_TARGET_SIZE] = {0};
- int len;
+ int32_t len;
len = snprintf(target,HTTP_GC_TARGET_SIZE,"%s{",aliasBuffer);
- for (int i = dataFields + 1; i= 0; i--) {
+ for (int32_t i = dataFields; i >= 0; i--) {
httpJsonItemToken(jsonBuf);
if (row[i] == NULL) {
httpJsonOriginString(jsonBuf, "null", 4);
@@ -210,10 +210,10 @@ bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result,
httpJsonInt64(jsonBuf, *((int64_t *)row[i]));
break;
case TSDB_DATA_TYPE_FLOAT:
- httpJsonFloat(jsonBuf, *((float *)row[i]));
+ httpJsonFloat(jsonBuf, GET_FLOAT_VAL(row[i]));
break;
case TSDB_DATA_TYPE_DOUBLE:
- httpJsonDouble(jsonBuf, *((double *)row[i]));
+ httpJsonDouble(jsonBuf, GET_DOUBLE_VAL(row[i]));
break;
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
@@ -253,13 +253,13 @@ void gcSendHeartBeatResp(HttpContext *pContext, HttpSqlCmd *cmd) {
httpInitJsonBuf(jsonBuf, pContext);
httpJsonToken(jsonBuf, JsonObjStt);
- httpJsonPair(jsonBuf, "message", (int)strlen("message"), desc, (int)strlen(desc));
+ httpJsonPair(jsonBuf, "message", (int32_t)strlen("message"), desc, (int32_t)strlen(desc));
httpJsonToken(jsonBuf, JsonObjEnd);
char head[1024];
- int hLen = sprintf(head, httpRespTemplate[HTTP_RESPONSE_GRAFANA], httpVersionStr[pContext->httpVersion],
- httpKeepAliveStr[pContext->httpKeepAlive], (jsonBuf->lst - jsonBuf->buf));
+ int32_t hLen = sprintf(head, httpRespTemplate[HTTP_RESPONSE_GRAFANA], httpVersionStr[pContext->parser->httpVersion],
+ httpKeepAliveStr[pContext->parser->keepAlive], (jsonBuf->lst - jsonBuf->buf));
httpWriteBuf(pContext, head, hLen);
- httpWriteBuf(pContext, jsonBuf->buf, (int)(jsonBuf->lst - jsonBuf->buf));
+ httpWriteBuf(pContext, jsonBuf->buf, (int32_t)(jsonBuf->lst - jsonBuf->buf));
}
diff --git a/src/plugins/http/src/httpGzip.c b/src/plugins/http/src/httpGzip.c
new file mode 100644
index 0000000000..54f900c755
--- /dev/null
+++ b/src/plugins/http/src/httpGzip.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#define _DEFAULT_SOURCE
+#include "os.h"
+#include "zlib.h"
+#include "httpGzip.h"
+
+typedef enum {
+ EHTTP_GZIP_INITING,
+ EHTTP_GZIP_READY,
+ EHTTP_GZIP_CLOSED,
+} EHTTP_GZIP_STATE;
+
+struct ehttp_gzip_s {
+ ehttp_gzip_conf_t conf;
+ ehttp_gzip_callbacks_t callbacks;
+ void *arg;
+ z_stream *gzip;
+ gz_header *header;
+ char *chunk;
+
+ int32_t state;
+};
+
+static void dummy_on_data(ehttp_gzip_t *gzip, void *arg, const char *buf, int32_t len) {
+}
+
+static void ehttp_gzip_cleanup(ehttp_gzip_t *gzip) {
+ switch(gzip->state) {
+ case EHTTP_GZIP_READY: {
+ inflateEnd(gzip->gzip);
+ } break;
+ default: break;
+ }
+ if (gzip->gzip) {
+ free(gzip->gzip);
+ gzip->gzip = NULL;
+ }
+ if (gzip->header) {
+ free(gzip->header);
+ gzip->header = NULL;
+ }
+ if (gzip->chunk) {
+ free(gzip->chunk);
+ gzip->chunk = NULL;
+ }
+ gzip->state = EHTTP_GZIP_CLOSED;
+}
+
+ehttp_gzip_t* ehttp_gzip_create_decompressor(ehttp_gzip_conf_t conf, ehttp_gzip_callbacks_t callbacks, void *arg) {
+ ehttp_gzip_t *gzip = (ehttp_gzip_t*)calloc(1, sizeof(*gzip));
+ if (!gzip) return NULL;
+
+ do {
+ gzip->conf = conf;
+ gzip->callbacks = callbacks;
+ gzip->arg = arg;
+ if (gzip->callbacks.on_data == NULL) gzip->callbacks.on_data = dummy_on_data;
+ gzip->gzip = (z_stream*)calloc(1, sizeof(*gzip->gzip));
+ if (gzip->conf.get_header) {
+ gzip->header = (gz_header*)calloc(1, sizeof(*gzip->header));
+ }
+ if (gzip->conf.chunk_size<=0) gzip->conf.chunk_size = EHTTP_GZIP_CHUNK_SIZE_DEFAULT;
+ gzip->chunk = (char*)malloc(gzip->conf.chunk_size);
+ if (!gzip->gzip || (gzip->conf.get_header && !gzip->header) || !gzip->chunk) break;
+ gzip->gzip->zalloc = Z_NULL;
+ gzip->gzip->zfree = Z_NULL;
+ gzip->gzip->opaque = Z_NULL;
+
+ // 863 windowBits can also be greater than 15 for optional gzip decoding. Add
+ // 864 32 to windowBits to enable zlib and gzip decoding with automatic header
+ // 865 detection, or add 16 to decode only the gzip format (the zlib format will
+ // 866 return a Z_DATA_ERROR). If a gzip stream is being decoded, strm->adler is a
+ // 867 CRC-32 instead of an Adler-32. Unlike the gunzip utility and gzread() (see
+ // 868 below), inflate() will not automatically decode concatenated gzip streams.
+ // 869 inflate() will return Z_STREAM_END at the end of the gzip stream. The state
+ // 870 would need to be reset to continue decoding a subsequent gzip stream.
+ int32_t ret = inflateInit2(gzip->gzip, 32); // 32/16? 32/16 + MAX_WBITS
+ if (ret != Z_OK) break;
+ if (gzip->header) {
+ ret = inflateGetHeader(gzip->gzip, gzip->header);
+ }
+ if (ret != Z_OK) break;
+
+ gzip->gzip->next_out = (z_const Bytef*)gzip->chunk;
+ gzip->gzip->avail_out = gzip->conf.chunk_size;
+ gzip->state = EHTTP_GZIP_READY;
+ return gzip;
+ } while (0);
+
+ ehttp_gzip_destroy(gzip);
+ return NULL;
+}
+
+ehttp_gzip_t* ehttp_gzip_create_compressor(ehttp_gzip_conf_t conf, ehttp_gzip_callbacks_t callbacks, void *arg);
+
+void ehttp_gzip_destroy(ehttp_gzip_t *gzip) {
+ ehttp_gzip_cleanup(gzip);
+
+ free(gzip);
+}
+
+int32_t ehttp_gzip_write(ehttp_gzip_t *gzip, const char *buf, int32_t len) {
+ if (gzip->state != EHTTP_GZIP_READY) return -1;
+ if (len <= 0) return 0;
+
+ gzip->gzip->next_in = (z_const Bytef*)buf;
+ gzip->gzip->avail_in = len;
+
+ while (gzip->gzip->avail_in) {
+ int32_t ret;
+ if (gzip->header) {
+ ret = inflate(gzip->gzip, Z_BLOCK);
+ } else {
+ ret = inflate(gzip->gzip, Z_SYNC_FLUSH);
+ }
+ if (ret != Z_OK && ret != Z_STREAM_END) return -1;
+
+ if (gzip->gzip->avail_out>0) {
+ if (ret!=Z_STREAM_END) continue;
+ }
+
+ int32_t len = gzip->gzip->next_out - (z_const Bytef*)gzip->chunk;
+
+ gzip->gzip->next_out[0] = '\0';
+ gzip->callbacks.on_data(gzip, gzip->arg, gzip->chunk, len);
+ gzip->gzip->next_out = (z_const Bytef*)gzip->chunk;
+ gzip->gzip->avail_out = gzip->conf.chunk_size;
+ }
+
+ return 0;
+}
+
+int32_t ehttp_gzip_finish(ehttp_gzip_t *gzip) {
+ if (gzip->state != EHTTP_GZIP_READY) return -1;
+
+ gzip->gzip->next_in = NULL;
+ gzip->gzip->avail_in = 0;
+
+ int32_t ret;
+ ret = inflate(gzip->gzip, Z_FINISH);
+
+ if (ret != Z_STREAM_END) return -1;
+
+ int32_t len = gzip->gzip->next_out - (z_const Bytef*)gzip->chunk;
+
+ gzip->gzip->next_out[0] = '\0';
+ gzip->callbacks.on_data(gzip, gzip->arg, gzip->chunk, len);
+ gzip->gzip->next_out = NULL;
+ gzip->gzip->avail_out = 0;
+
+ return 0;
+}
+
diff --git a/src/plugins/http/src/httpHandle.c b/src/plugins/http/src/httpHandle.c
index 407d19b307..b50217cfc4 100644
--- a/src/plugins/http/src/httpHandle.c
+++ b/src/plugins/http/src/httpHandle.c
@@ -15,382 +15,33 @@
#define _DEFAULT_SOURCE
#include "os.h"
-#include "taos.h"
-#include "tglobal.h"
-#include "tsocket.h"
-#include "ttimer.h"
#include "httpInt.h"
#include "httpResp.h"
-#include "httpAuth.h"
-#include "httpServer.h"
#include "httpContext.h"
#include "httpHandle.h"
-void httpToLowerUrl(char* url) {
- /*ignore case */
- while (*url) {
- if (*url >= 'A' && *url <= 'Z') {
- *url = *url | 0x20;
- }
- url++;
- }
-}
-
-bool httpUrlMatch(HttpContext* pContext, int pos, char* cmp) {
- HttpParser* pParser = &pContext->parser;
-
- if (pos < 0 || pos >= HTTP_MAX_URL) {
- return false;
- }
-
- if (pParser->path[pos].len <= 0) {
- return false;
- }
-
- if (strcmp(pParser->path[pos].pos, cmp) != 0) {
- return false;
- }
-
- return true;
-}
-
-// /account/db/meter HTTP/1.1\r\nHost
-bool httpParseURL(HttpContext* pContext) {
- HttpParser* pParser = &pContext->parser;
- char* pSeek;
- char* pEnd = strchr(pParser->pLast, ' ');
- if (pEnd == NULL) {
- httpSendErrorResp(pContext, HTTP_UNSUPPORT_URL);
- return false;
- }
-
- if (*pParser->pLast != '/') {
- httpSendErrorResp(pContext, HTTP_UNSUPPORT_URL);
- return false;
- }
- pParser->pLast++;
-
- for (int i = 0; i < HTTP_MAX_URL; i++) {
- pSeek = strchr(pParser->pLast, '/');
- if (pSeek == NULL) {
- break;
- }
- pParser->path[i].pos = pParser->pLast;
- if (pSeek <= pEnd) {
- pParser->path[i].len = (int16_t)(pSeek - pParser->pLast);
- pParser->path[i].pos[pParser->path[i].len] = 0;
- httpToLowerUrl(pParser->path[i].pos);
- pParser->pLast = pSeek + 1;
- } else {
- pParser->path[i].len = (int16_t)(pEnd - pParser->pLast);
- pParser->path[i].pos[pParser->path[i].len] = 0;
- httpToLowerUrl(pParser->path[i].pos);
- pParser->pLast = pEnd + 1;
- break;
- }
- }
- pParser->pLast = pEnd + 1;
-
- if (pParser->path[0].len == 0) {
- httpSendErrorResp(pContext, HTTP_UNSUPPORT_URL);
- return false;
- }
-
- return true;
-}
-
-bool httpParseHttpVersion(HttpContext* pContext) {
- HttpParser* pParser = &pContext->parser;
- char* pEnd = strchr(pParser->pLast, '1');
- if (pEnd == NULL) {
- httpError("context:%p, fd:%d, ip:%s, can't find http version at position:%s", pContext, pContext->fd,
- pContext->ipstr, pParser->pLast);
- httpSendErrorResp(pContext, HTTP_PARSE_HTTP_VERSION_ERROR);
- return false;
- }
-
- if (*(pEnd + 1) != '.') {
- httpError("context:%p, fd:%d, ip:%s, can't find http version at position:%s", pContext, pContext->fd,
- pContext->ipstr, pParser->pLast);
- httpSendErrorResp(pContext, HTTP_PARSE_HTTP_VERSION_ERROR);
- return false;
- }
-
- if (*(pEnd + 2) == '0')
- pContext->httpVersion = HTTP_VERSION_10;
- else if (*(pEnd + 2) == '1')
- pContext->httpVersion = HTTP_VERSION_11;
- else if (*(pEnd + 2) == '2')
- pContext->httpVersion = HTTP_VERSION_11;
- else
- pContext->httpVersion = HTTP_VERSION_10;
-
- httpDebug("context:%p, fd:%d, ip:%s, httpVersion:1.%d", pContext, pContext->fd, pContext->ipstr,
- pContext->httpVersion);
- return true;
-}
-
-bool httpGetNextLine(HttpContext* pContext) {
- HttpParser* pParser = &pContext->parser;
- while (pParser->buffer + pParser->bufsize - pParser->pCur++ > 0) {
- if (*(pParser->pCur) == '\n' && *(pParser->pCur - 1) == '\r') {
- // cut the string
- *pParser->pCur = 0;
- return true;
- }
- }
-
- httpSendErrorResp(pContext, HTTP_PARSE_HEAD_ERROR);
-
- return false;
-}
-
-bool httpGetHttpMethod(HttpContext* pContext) {
- HttpParser* pParser = &pContext->parser;
-
- char* pSeek = strchr(pParser->pLast, ' ');
- if (pSeek == NULL) {
- httpSendErrorResp(pContext, HTTP_PARSE_HTTP_METHOD_ERROR);
- return false;
- }
- pParser->method.pos = pParser->pLast;
- pParser->method.len = (int16_t)(pSeek - pParser->pLast);
- pParser->method.pos[pParser->method.len] = 0;
- pParser->pLast = pSeek + 1;
-
- httpTrace("context:%p, fd:%d, ip:%s, httpMethod:%s", pContext, pContext->fd, pContext->ipstr, pParser->method.pos);
- return true;
-}
-
-bool httpGetDecodeMethod(HttpContext* pContext) {
- HttpParser* pParser = &pContext->parser;
-
- HttpServer* pServer = &tsHttpServer;
- int methodLen = pServer->methodScannerLen;
- for (int i = 0; i < methodLen; i++) {
- HttpDecodeMethod* method = pServer->methodScanner[i];
- if (strcmp(method->module, pParser->path[0].pos) != 0) {
- continue;
- }
- pParser->pMethod = method;
- return true;
- }
-
- httpError("context:%p, fd:%d, ip:%s, error:the url is not support, method:%s, path:%s",
- pContext, pContext->fd, pContext->ipstr, pParser->method.pos, pParser->path[0].pos);
- httpSendErrorResp(pContext, HTTP_UNSUPPORT_URL);
-
- return false;
-}
-
-bool httpParseHead(HttpContext* pContext) {
- HttpParser* pParser = &pContext->parser;
- if (strncasecmp(pParser->pLast, "Content-Length: ", 16) == 0) {
- pParser->data.len = (int32_t)atoi(pParser->pLast + 16);
- httpTrace("context:%p, fd:%d, ip:%s, Content-Length:%d", pContext, pContext->fd, pContext->ipstr,
- pParser->data.len);
- } else if (strncasecmp(pParser->pLast, "Accept-Encoding: ", 17) == 0) {
- if (tsHttpEnableCompress && strstr(pParser->pLast + 17, "gzip") != NULL) {
- pContext->acceptEncoding = HTTP_COMPRESS_GZIP;
- httpTrace("context:%p, fd:%d, ip:%s, Accept-Encoding:gzip", pContext, pContext->fd, pContext->ipstr);
- } else {
- pContext->acceptEncoding = HTTP_COMPRESS_IDENTITY;
- httpTrace("context:%p, fd:%d, ip:%s, Accept-Encoding:identity", pContext, pContext->fd, pContext->ipstr);
- }
- } else if (strncasecmp(pParser->pLast, "Content-Encoding: ", 18) == 0) {
- if (strstr(pParser->pLast + 18, "gzip") != NULL) {
- pContext->contentEncoding = HTTP_COMPRESS_GZIP;
- httpTrace("context:%p, fd:%d, ip:%s, Content-Encoding:gzip", pContext, pContext->fd, pContext->ipstr);
- } else {
- pContext->contentEncoding = HTTP_COMPRESS_IDENTITY;
- httpTrace("context:%p, fd:%d, ip:%s, Content-Encoding:identity", pContext, pContext->fd, pContext->ipstr);
- }
- } else if (strncasecmp(pParser->pLast, "Connection: ", 12) == 0) {
- if (strncasecmp(pParser->pLast + 12, "Keep-Alive", 10) == 0) {
- pContext->httpKeepAlive = HTTP_KEEPALIVE_ENABLE;
- } else {
- pContext->httpKeepAlive = HTTP_KEEPALIVE_DISABLE;
- }
- httpTrace("context:%p, fd:%d, ip:%s, keepAlive:%d", pContext, pContext->fd, pContext->ipstr,
- pContext->httpKeepAlive);
- } else if (strncasecmp(pParser->pLast, "Transfer-Encoding: ", 19) == 0) {
- if (strncasecmp(pParser->pLast + 19, "chunked", 7) == 0) {
- pContext->httpChunked = HTTP_CHUNKED;
- }
- } else if (strncasecmp(pParser->pLast, "Authorization: ", 15) == 0) {
- if (strncasecmp(pParser->pLast + 15, "Basic ", 6) == 0) {
- pParser->token.pos = pParser->pLast + 21;
- pParser->token.len = (int16_t)(pParser->pCur - pParser->token.pos - 1);
- bool parsed = httpParseBasicAuthToken(pContext, pParser->token.pos, pParser->token.len);
- if (!parsed) {
- httpSendErrorResp(pContext, HTTP_INVALID_BASIC_AUTH_TOKEN);
- return false;
- }
- } else if (strncasecmp(pParser->pLast + 15, "Taosd ", 6) == 0) {
- pParser->token.pos = pParser->pLast + 21;
- pParser->token.len = (int16_t)(pParser->pCur - pParser->token.pos - 1);
- bool parsed = httpParseTaosdAuthToken(pContext, pParser->token.pos, pParser->token.len);
- if (!parsed) {
- httpSendErrorResp(pContext, HTTP_INVALID_TAOSD_AUTH_TOKEN);
- return false;
- }
- } else {
- httpSendErrorResp(pContext, HTTP_INVALID_AUTH_TOKEN);
- return false;
- }
- } else {
- }
-
- return true;
-}
-
-bool httpParseChunkedBody(HttpContext* pContext, HttpParser* pParser, bool test) {
- char* pEnd = pParser->buffer + pParser->bufsize;
- char* pRet = pParser->data.pos;
- char* pSize = pParser->data.pos;
- size_t size = strtoul(pSize, NULL, 16);
- if (size <= 0) return false;
-
- while (size > 0) {
- char* pData = strstr(pSize, "\r\n");
- if (pData == NULL || pData >= pEnd) return false;
- pData += 2;
-
- pSize = strstr(pData, "\r\n");
- if (pSize == NULL || pSize >= pEnd) return false;
- if ((size_t)(pSize - pData) != size) return false;
- pSize += 2;
-
- if (!test) {
- memmove(pRet, pData, size);
- pRet += size;
- }
-
- size = strtoul(pSize, NULL, 16);
- }
-
- if (!test) {
- *pRet = '\0';
- }
-
- return true;
-}
-
-int httpReadChunkedBody(HttpContext* pContext, HttpParser* pParser) {
- bool parsedOk = httpParseChunkedBody(pContext, pParser, true);
- if (parsedOk) {
- httpParseChunkedBody(pContext, pParser, false);
- return HTTP_CHECK_BODY_SUCCESS;
- } else {
- httpTrace("context:%p, fd:%d, ip:%s, chunked body not finished, continue read", pContext, pContext->fd, pContext->ipstr);
- if (httpReadDataImp(pContext) != HTTP_READ_DATA_SUCCESS) {
- httpError("context:%p, fd:%d, ip:%s, read chunked request error", pContext, pContext->fd, pContext->ipstr);
- return HTTP_CHECK_BODY_ERROR;
- } else {
- return HTTP_CHECK_BODY_CONTINUE;
- }
- }
-}
-
-int httpReadUnChunkedBody(HttpContext* pContext, HttpParser* pParser) {
- int dataReadLen = pParser->bufsize - (int)(pParser->data.pos - pParser->buffer);
- if (dataReadLen > pParser->data.len) {
- httpError("context:%p, fd:%d, ip:%s, un-chunked body length invalid, read size:%d dataReadLen:%d > pContext->data.len:%d",
- pContext, pContext->fd, pContext->ipstr, pContext->parser.bufsize, dataReadLen, pParser->data.len);
- return HTTP_CHECK_BODY_ERROR;
- } else if (dataReadLen < pParser->data.len) {
- httpTrace("context:%p, fd:%d, ip:%s, un-chunked body not finished, read size:%d dataReadLen:%d < pContext->data.len:%d, continue read",
- pContext, pContext->fd, pContext->ipstr, pContext->parser.bufsize, dataReadLen, pParser->data.len);
- return HTTP_CHECK_BODY_CONTINUE;
- } else {
- return HTTP_CHECK_BODY_SUCCESS;
- }
-}
-
-bool httpParseRequest(HttpContext* pContext) {
- HttpParser *pParser = &pContext->parser;
- if (pContext->parsed) {
- return true;
- }
-
- httpTraceL("context:%p, fd:%d, ip:%s, thread:%s, numOfContexts:%d, read size:%d, raw data:\n%s", pContext,
- pContext->fd, pContext->ipstr, pContext->pThread->label, pContext->pThread->numOfContexts,
- pContext->parser.bufsize, pContext->parser.buffer);
-
- if (!httpGetHttpMethod(pContext)) {
- return false;
- }
-
- if (!httpParseURL(pContext)) {
- return false;
- }
-
- if (!httpParseHttpVersion(pContext)) {
- return false;
- }
-
- if (!httpGetDecodeMethod(pContext)) {
- return false;
- }
-
- do {
- if (!httpGetNextLine(pContext)) {
- return false;
- }
-
- // Empty line, end of the HTTP HEAD
- if (pParser->pCur - pParser->pLast == 1) {
- pParser->data.pos = ++pParser->pCur;
- break;
- }
-
- if (!httpParseHead(pContext)) {
- return false;
- }
-
- pParser->pLast = ++pParser->pCur;
- } while (1);
-
- httpDebug("context:%p, fd:%d, ip:%s, parse http head ok", pContext, pContext->fd, pContext->ipstr);
-
- pContext->parsed = true;
- return true;
-}
-
-int httpCheckReadCompleted(HttpContext* pContext) {
- HttpParser* pParser = &pContext->parser;
-
- if (pContext->httpChunked == HTTP_UNCUNKED) {
- return httpReadUnChunkedBody(pContext, pParser);
- } else {
- return httpReadChunkedBody(pContext, pParser);
- }
-}
-
bool httpDecodeRequest(HttpContext* pContext) {
- HttpParser* pParser = &pContext->parser;
- if (pParser->pMethod->decodeFp == NULL) {
+ if (pContext->decodeMethod->decodeFp == NULL) {
return false;
}
- return (*pParser->pMethod->decodeFp)(pContext);
+ return (*pContext->decodeMethod->decodeFp)(pContext);
}
/**
* Process the request from http pServer
*/
bool httpProcessData(HttpContext* pContext) {
-
if (!httpAlterContextState(pContext, HTTP_CONTEXT_STATE_READY, HTTP_CONTEXT_STATE_HANDLING)) {
- httpDebug("context:%p, fd:%d, ip:%s, state:%s not in ready state, stop process request",
- pContext, pContext->fd, pContext->ipstr, httpContextStateStr(pContext->state));
+ httpTrace("context:%p, fd:%d, state:%s not in ready state, stop process request", pContext, pContext->fd,
+ httpContextStateStr(pContext->state));
httpCloseContextByApp(pContext);
return false;
}
// handle Cross-domain request
- if (strcmp(pContext->parser.method.pos, "OPTIONS") == 0) {
- httpDebug("context:%p, fd:%d, ip:%s, process options request", pContext, pContext->fd, pContext->ipstr);
+ if (strcmp(pContext->parser->method, "OPTIONS") == 0) {
+ httpTrace("context:%p, fd:%d, process options request", pContext, pContext->fd);
httpSendOptionResp(pContext, "process options request success");
} else {
if (!httpDecodeRequest(pContext)) {
diff --git a/src/plugins/http/src/httpJson.c b/src/plugins/http/src/httpJson.c
index 4748f03b66..1aa6cfac4b 100644
--- a/src/plugins/http/src/httpJson.c
+++ b/src/plugins/http/src/httpJson.c
@@ -17,9 +17,9 @@
#include "os.h"
#include "taosmsg.h"
#include "taoserror.h"
+#include "tglobal.h"
#include "http.h"
#include "httpLog.h"
-#include "httpCode.h"
#include "httpJson.h"
#include "httpResp.h"
#include "httpUtil.h"
@@ -38,28 +38,26 @@ char JsonNulTkn[] = "null";
char JsonTrueTkn[] = "true";
char JsonFalseTkn[] = "false";
-int httpWriteBufByFd(struct HttpContext* pContext, const char* buf, int sz) {
- int len;
- int countWait = 0;
- int writeLen = 0;
+int32_t httpWriteBufByFd(struct HttpContext* pContext, const char* buf, int32_t sz) {
+ int32_t len;
+ int32_t countWait = 0;
+ int32_t writeLen = 0;
do {
if (pContext->fd > 2){
- len = (int)taosSend(pContext->fd, buf + writeLen, (size_t)(sz - writeLen), MSG_NOSIGNAL);
+ len = (int32_t)taosSend(pContext->fd, buf + writeLen, (size_t)(sz - writeLen), MSG_NOSIGNAL);
}
else {
return sz;
}
if (len < 0) {
- httpDebug("context:%p, fd:%d, ip:%s, socket write errno:%d, times:%d",
- pContext, pContext->fd, pContext->ipstr, errno, countWait);
+ httpDebug("context:%p, fd:%d, socket write errno:%d:%s, times:%d", pContext, pContext->fd, errno, strerror(errno), countWait);
if (++countWait > HTTP_WRITE_RETRY_TIMES) break;
taosMsleep(HTTP_WRITE_WAIT_TIME_MS);
continue;
} else if (len == 0) {
- httpDebug("context:%p, fd:%d, ip:%s, socket write errno:%d, connect already closed",
- pContext, pContext->fd, pContext->ipstr, errno);
+ httpDebug("context:%p, fd:%d, socket write errno:%d:%s, connect already closed", pContext, pContext->fd, errno, strerror(errno));
break;
} else {
countWait = 0;
@@ -70,36 +68,35 @@ int httpWriteBufByFd(struct HttpContext* pContext, const char* buf, int sz) {
return writeLen;
}
-int httpWriteBuf(struct HttpContext *pContext, const char *buf, int sz) {
- int writeSz = httpWriteBufByFd(pContext, buf, sz);
+int32_t httpWriteBuf(struct HttpContext* pContext, const char* buf, int32_t sz) {
+ int32_t writeSz = httpWriteBufByFd(pContext, buf, sz);
if (writeSz != sz) {
- httpError("context:%p, fd:%d, ip:%s, dataSize:%d, writeSize:%d, failed to send response:\n%s",
- pContext, pContext->fd, pContext->ipstr, sz, writeSz, buf);
+ httpError("context:%p, fd:%d, dataSize:%d, writeSize:%d, failed to send response:\n%s", pContext, pContext->fd, sz,
+ writeSz, buf);
} else {
- httpTrace("context:%p, fd:%d, ip:%s, dataSize:%d, writeSize:%d, response:\n%s", pContext, pContext->fd,
- pContext->ipstr, sz, writeSz, buf);
+ httpTrace("context:%p, fd:%d, dataSize:%d, writeSize:%d, response:\n%s", pContext, pContext->fd, sz, writeSz, buf);
}
return writeSz;
}
-int httpWriteBufNoTrace(struct HttpContext *pContext, const char *buf, int sz) {
- int writeSz = httpWriteBufByFd(pContext, buf, sz);
+int32_t httpWriteBufNoTrace(struct HttpContext *pContext, const char *buf, int32_t sz) {
+ int32_t writeSz = httpWriteBufByFd(pContext, buf, sz);
if (writeSz != sz) {
- httpError("context:%p, fd:%d, ip:%s, dataSize:%d, writeSize:%d, failed to send response",
- pContext, pContext->fd, pContext->ipstr, sz, writeSz);
+ httpError("context:%p, fd:%d, dataSize:%d, writeSize:%d, failed to send response", pContext, pContext->fd, sz,
+ writeSz);
}
return writeSz;
}
-int httpWriteJsonBufBody(JsonBuf* buf, bool isTheLast) {
- int remain = 0;
+int32_t httpWriteJsonBufBody(JsonBuf* buf, bool isTheLast) {
+ int32_t remain = 0;
char sLen[24];
uint64_t srcLen = (uint64_t) (buf->lst - buf->buf);
if (buf->pContext->fd <= 0) {
- httpTrace("context:%p, fd:%d, ip:%s, write json body error", buf->pContext, buf->pContext->fd, buf->pContext->ipstr);
+ httpTrace("context:%p, fd:%d, write json body error", buf->pContext, buf->pContext->fd);
buf->pContext->fd = -1;
}
@@ -111,44 +108,44 @@ int httpWriteJsonBufBody(JsonBuf* buf, bool isTheLast) {
* The remote endpoint then decodes the stream by concatenating the chunks and uncompressing the result.
*/
- if (buf->pContext->acceptEncoding == HTTP_COMPRESS_IDENTITY) {
+ if (buf->pContext->parser->acceptEncodingGzip == 0 || !tsHttpEnableCompress) {
if (buf->lst == buf->buf) {
- httpTrace("context:%p, fd:%d, ip:%s, no data need dump", buf->pContext, buf->pContext->fd, buf->pContext->ipstr);
+ httpTrace("context:%p, fd:%d, no data need dump", buf->pContext, buf->pContext->fd);
return 0; // there is no data to dump.
} else {
- int len = sprintf(sLen, "%lx\r\n", srcLen);
- httpTrace("context:%p, fd:%d, ip:%s, write body, chunkSize:%" PRIu64 ", response:\n%s",
- buf->pContext, buf->pContext->fd, buf->pContext->ipstr, srcLen, buf->buf);
+ int32_t len = sprintf(sLen, "%" PRIx64 "\r\n", srcLen);
+ httpTrace("context:%p, fd:%d, write body, chunkSize:%" PRIu64 ", response:\n%s", buf->pContext, buf->pContext->fd,
+ srcLen, buf->buf);
httpWriteBufNoTrace(buf->pContext, sLen, len);
- remain = httpWriteBufNoTrace(buf->pContext, buf->buf, (int) srcLen);
+ remain = httpWriteBufNoTrace(buf->pContext, buf->buf, (int32_t)srcLen);
}
} else {
char compressBuf[JSON_BUFFER_SIZE] = {0};
int32_t compressBufLen = JSON_BUFFER_SIZE;
- int ret = httpGzipCompress(buf->pContext, buf->buf, srcLen, compressBuf, &compressBufLen, isTheLast);
+ int32_t ret = httpGzipCompress(buf->pContext, buf->buf, srcLen, compressBuf, &compressBufLen, isTheLast);
if (ret == 0) {
if (compressBufLen > 0) {
- int len = sprintf(sLen, "%x\r\n", compressBufLen);
- httpTrace("context:%p, fd:%d, ip:%s, write body, chunkSize:%" PRIu64 ", compressSize:%d, last:%d, response:\n%s",
- buf->pContext, buf->pContext->fd, buf->pContext->ipstr, srcLen, compressBufLen, isTheLast, buf->buf);
+ int32_t len = sprintf(sLen, "%x\r\n", compressBufLen);
+ httpTrace("context:%p, fd:%d, write body, chunkSize:%" PRIu64 ", compressSize:%d, last:%d, response:\n%s",
+ buf->pContext, buf->pContext->fd, srcLen, compressBufLen, isTheLast, buf->buf);
httpWriteBufNoTrace(buf->pContext, sLen, len);
- remain = httpWriteBufNoTrace(buf->pContext, (const char *) compressBuf, (int) compressBufLen);
+ remain = httpWriteBufNoTrace(buf->pContext, (const char*)compressBuf, compressBufLen);
} else {
- httpTrace("context:%p, fd:%d, ip:%s, last:%d, compress already dumped, response:\n%s",
- buf->pContext, buf->pContext->fd, buf->pContext->ipstr, isTheLast, buf->buf);
- return 0; // there is no data to dump.
+ httpDebug("context:%p, fd:%d, last:%d, compress already dumped, response:\n%s", buf->pContext,
+ buf->pContext->fd, isTheLast, buf->buf);
+ remain = 0; // there is no data to dump.
}
} else {
- httpError("context:%p, fd:%d, ip:%s, failed to compress data, chunkSize:%" PRIu64 ", last:%d, error:%d, response:\n%s",
- buf->pContext, buf->pContext->fd, buf->pContext->ipstr, srcLen, isTheLast, ret, buf->buf);
- return 0;
+ httpError("context:%p, fd:%d, failed to compress data, chunkSize:%" PRIu64 ", last:%d, error:%d, response:\n%s",
+ buf->pContext, buf->pContext->fd, srcLen, isTheLast, ret, buf->buf);
+ remain = 0;
}
}
httpWriteBufNoTrace(buf->pContext, "\r\n", 2);
- buf->total += (int) (buf->lst - buf->buf);
+ buf->total += (int32_t)(buf->lst - buf->buf);
buf->lst = buf->buf;
- memset(buf->buf, 0, (size_t) buf->size);
+ memset(buf->buf, 0, (size_t)buf->size);
return remain;
}
@@ -158,14 +155,14 @@ void httpWriteJsonBufHead(JsonBuf* buf) {
}
char msg[1024] = {0};
- int len = -1;
+ int32_t len = -1;
- if (buf->pContext->acceptEncoding == HTTP_COMPRESS_IDENTITY) {
- len = sprintf(msg, httpRespTemplate[HTTP_RESPONSE_CHUNKED_UN_COMPRESS], httpVersionStr[buf->pContext->httpVersion],
- httpKeepAliveStr[buf->pContext->httpKeepAlive]);
+ if (buf->pContext->parser->acceptEncodingGzip == 0 || !tsHttpEnableCompress) {
+ len = sprintf(msg, httpRespTemplate[HTTP_RESPONSE_CHUNKED_UN_COMPRESS], httpVersionStr[buf->pContext->parser->httpVersion],
+ httpKeepAliveStr[buf->pContext->parser->keepAlive]);
} else {
- len = sprintf(msg, httpRespTemplate[HTTP_RESPONSE_CHUNKED_COMPRESS], httpVersionStr[buf->pContext->httpVersion],
- httpKeepAliveStr[buf->pContext->httpKeepAlive]);
+ len = sprintf(msg, httpRespTemplate[HTTP_RESPONSE_CHUNKED_COMPRESS], httpVersionStr[buf->pContext->parser->httpVersion],
+ httpKeepAliveStr[buf->pContext->parser->keepAlive]);
}
httpWriteBuf(buf->pContext, (const char*)msg, len);
@@ -173,7 +170,7 @@ void httpWriteJsonBufHead(JsonBuf* buf) {
void httpWriteJsonBufEnd(JsonBuf* buf) {
if (buf->pContext->fd <= 0) {
- httpTrace("context:%p, fd:%d, ip:%s, json buf fd is 0", buf->pContext, buf->pContext->fd, buf->pContext->ipstr);
+ httpTrace("context:%p, fd:%d, json buf fd is 0", buf->pContext, buf->pContext->fd);
buf->pContext->fd = -1;
}
@@ -188,11 +185,11 @@ void httpInitJsonBuf(JsonBuf* buf, struct HttpContext* pContext) {
buf->pContext = pContext;
memset(buf->lst, 0, JSON_BUFFER_SIZE);
- if (pContext->acceptEncoding == HTTP_COMPRESS_GZIP) {
+ if (pContext->parser->acceptEncodingGzip == 1 && tsHttpEnableCompress) {
httpGzipCompressInit(buf->pContext);
}
- httpDebug("context:%p, fd:%d, ip:%s, json buffer initialized", buf->pContext, buf->pContext->fd, buf->pContext->ipstr);
+ httpTrace("context:%p, fd:%d, json buffer initialized", buf->pContext, buf->pContext->fd);
}
void httpJsonItemToken(JsonBuf* buf) {
@@ -203,19 +200,19 @@ void httpJsonItemToken(JsonBuf* buf) {
if (buf->lst > buf->buf) httpJsonToken(buf, JsonItmTkn);
}
-void httpJsonString(JsonBuf* buf, char* sVal, int len) {
+void httpJsonString(JsonBuf* buf, char* sVal, int32_t len) {
httpJsonItemToken(buf);
httpJsonToken(buf, JsonStrStt);
httpJsonPrint(buf, sVal, len);
httpJsonToken(buf, JsonStrEnd);
}
-void httpJsonOriginString(JsonBuf* buf, char* sVal, int len) {
+void httpJsonOriginString(JsonBuf* buf, char* sVal, int32_t len) {
httpJsonItemToken(buf);
httpJsonPrint(buf, sVal, len);
}
-void httpJsonStringForTransMean(JsonBuf* buf, char* sVal, int maxLen) {
+void httpJsonStringForTransMean(JsonBuf* buf, char* sVal, int32_t maxLen) {
httpJsonItemToken(buf);
httpJsonToken(buf, JsonStrStt);
@@ -224,18 +221,18 @@ void httpJsonStringForTransMean(JsonBuf* buf, char* sVal, int maxLen) {
char* lastPos = sVal;
char* curPos = sVal;
- for (int i = 0; i < maxLen; ++i) {
+ for (int32_t i = 0; i < maxLen; ++i) {
if (*curPos == 0) {
break;
}
if (*curPos == '\"') {
- httpJsonPrint(buf, lastPos, (int)(curPos - lastPos));
+ httpJsonPrint(buf, lastPos, (int32_t)(curPos - lastPos));
curPos++;
lastPos = curPos;
httpJsonPrint(buf, "\\\"", 2);
} else if (*curPos == '\\') {
- httpJsonPrint(buf, lastPos, (int)(curPos - lastPos));
+ httpJsonPrint(buf, lastPos, (int32_t)(curPos - lastPos));
curPos++;
lastPos = curPos;
httpJsonPrint(buf, "\\\\", 2);
@@ -245,7 +242,7 @@ void httpJsonStringForTransMean(JsonBuf* buf, char* sVal, int maxLen) {
}
if (*lastPos) {
- httpJsonPrint(buf, lastPos, (int)(curPos - lastPos));
+ httpJsonPrint(buf, lastPos, (int32_t)(curPos - lastPos));
}
}
@@ -261,18 +258,18 @@ void httpJsonInt64(JsonBuf* buf, int64_t num) {
void httpJsonTimestamp(JsonBuf* buf, int64_t t, bool us) {
char ts[35] = {0};
struct tm *ptm;
- int precision = 1000;
+ int32_t precision = 1000;
if (us) {
precision = 1000000;
}
time_t tt = t / precision;
ptm = localtime(&tt);
- int length = (int) strftime(ts, 35, "%Y-%m-%d %H:%M:%S", ptm);
+ int32_t length = (int32_t) strftime(ts, 35, "%Y-%m-%d %H:%M:%S", ptm);
if (us) {
- length += snprintf(ts + length, 8, ".%06ld", t % precision);
+ length += snprintf(ts + length, 8, ".%06" PRId64, t % precision);
} else {
- length += snprintf(ts + length, 5, ".%03ld", t % precision);
+ length += snprintf(ts + length, 5, ".%03" PRId64, t % precision);
}
httpJsonString(buf, ts, length);
@@ -281,25 +278,25 @@ void httpJsonTimestamp(JsonBuf* buf, int64_t t, bool us) {
void httpJsonUtcTimestamp(JsonBuf* buf, int64_t t, bool us) {
char ts[40] = {0};
struct tm *ptm;
- int precision = 1000;
+ int32_t precision = 1000;
if (us) {
precision = 1000000;
}
time_t tt = t / precision;
ptm = localtime(&tt);
- int length = (int) strftime(ts, 40, "%Y-%m-%dT%H:%M:%S", ptm);
+ int32_t length = (int32_t)strftime(ts, 40, "%Y-%m-%dT%H:%M:%S", ptm);
if (us) {
- length += snprintf(ts + length, 8, ".%06ld", t % precision);
+ length += snprintf(ts + length, 8, ".%06" PRId64, t % precision);
} else {
- length += snprintf(ts + length, 5, ".%03ld", t % precision);
+ length += snprintf(ts + length, 5, ".%03" PRId64, t % precision);
}
- length += (int) strftime(ts + length, 40 - length, "%z", ptm);
+ length += (int32_t)strftime(ts + length, 40 - length, "%z", ptm);
httpJsonString(buf, ts, length);
}
-void httpJsonInt(JsonBuf* buf, int num) {
+void httpJsonInt(JsonBuf* buf, int32_t num) {
httpJsonItemToken(buf);
httpJsonTestBuf(buf, MAX_NUM_STR_SZ);
buf->lst += snprintf(buf->lst, MAX_NUM_STR_SZ, "%d", num);
@@ -331,65 +328,65 @@ void httpJsonDouble(JsonBuf* buf, double num) {
void httpJsonNull(JsonBuf* buf) { httpJsonString(buf, "null", 4); }
-void httpJsonBool(JsonBuf* buf, int val) {
+void httpJsonBool(JsonBuf* buf, int32_t val) {
if (val == 0)
httpJsonPrint(buf, JsonFalseTkn, sizeof(JsonFalseTkn));
else
httpJsonPrint(buf, JsonTrueTkn, sizeof(JsonTrueTkn));
}
-void httpJsonPairHead(JsonBuf* buf, char* name, int len) {
+void httpJsonPairHead(JsonBuf* buf, char* name, int32_t len) {
httpJsonItemToken(buf);
httpJsonString(buf, name, len);
httpJsonToken(buf, JsonPairTkn);
}
-void httpJsonPair(JsonBuf* buf, char* name, int nameLen, char* sVal, int valLen) {
+void httpJsonPair(JsonBuf* buf, char* name, int32_t nameLen, char* sVal, int32_t valLen) {
httpJsonPairHead(buf, name, nameLen);
httpJsonString(buf, sVal, valLen);
}
-void httpJsonPairOriginString(JsonBuf* buf, char* name, int nameLen, char* sVal, int valLen) {
+void httpJsonPairOriginString(JsonBuf* buf, char* name, int32_t nameLen, char* sVal, int32_t valLen) {
httpJsonPairHead(buf, name, nameLen);
httpJsonOriginString(buf, sVal, valLen);
}
-void httpJsonPairIntVal(JsonBuf* buf, char* name, int nNameLen, int num) {
+void httpJsonPairIntVal(JsonBuf* buf, char* name, int32_t nNameLen, int32_t num) {
httpJsonPairHead(buf, name, nNameLen);
httpJsonInt(buf, num);
}
-void httpJsonPairInt64Val(JsonBuf* buf, char* name, int nNameLen, int64_t num) {
+void httpJsonPairInt64Val(JsonBuf* buf, char* name, int32_t nNameLen, int64_t num) {
httpJsonPairHead(buf, name, nNameLen);
httpJsonInt64(buf, num);
}
-void httpJsonPairBoolVal(JsonBuf* buf, char* name, int nNameLen, int num) {
+void httpJsonPairBoolVal(JsonBuf* buf, char* name, int32_t nNameLen, int32_t num) {
httpJsonPairHead(buf, name, nNameLen);
httpJsonBool(buf, num);
}
-void httpJsonPairFloatVal(JsonBuf* buf, char* name, int nNameLen, float num) {
+void httpJsonPairFloatVal(JsonBuf* buf, char* name, int32_t nNameLen, float num) {
httpJsonPairHead(buf, name, nNameLen);
httpJsonFloat(buf, num);
}
-void httpJsonPairDoubleVal(JsonBuf* buf, char* name, int nNameLen, double num) {
+void httpJsonPairDoubleVal(JsonBuf* buf, char* name, int32_t nNameLen, double num) {
httpJsonPairHead(buf, name, nNameLen);
httpJsonDouble(buf, num);
}
-void httpJsonPairNullVal(JsonBuf* buf, char* name, int nNameLen) {
+void httpJsonPairNullVal(JsonBuf* buf, char* name, int32_t nNameLen) {
httpJsonPairHead(buf, name, nNameLen);
httpJsonNull(buf);
}
-void httpJsonPairArray(JsonBuf* buf, char* name, int len, httpJsonBuilder fnBuilder, void* dsHandle) {
+void httpJsonPairArray(JsonBuf* buf, char* name, int32_t len, httpJsonBuilder fnBuilder, void* dsHandle) {
httpJsonPairHead(buf, name, len);
httpJsonArray(buf, fnBuilder, dsHandle);
}
-void httpJsonPairObject(JsonBuf* buf, char* name, int len, httpJsonBuilder fnBuilder, void* dsHandle) {
+void httpJsonPairObject(JsonBuf* buf, char* name, int32_t len, httpJsonBuilder fnBuilder, void* dsHandle) {
httpJsonPairHead(buf, name, len);
httpJsonObject(buf, fnBuilder, dsHandle);
}
@@ -408,7 +405,7 @@ void httpJsonArray(JsonBuf* buf, httpJsonBuilder fnBuilder, void* jsonHandle) {
httpJsonToken(buf, JsonArrEnd);
}
-void httpJsonTestBuf(JsonBuf* buf, int safety) {
+void httpJsonTestBuf(JsonBuf* buf, int32_t safety) {
if ((buf->lst - buf->buf + safety) < buf->size) return;
// buf->slot = *buf->lst;
httpWriteJsonBufBody(buf, false);
@@ -419,7 +416,7 @@ void httpJsonToken(JsonBuf* buf, char c) {
*buf->lst++ = c;
}
-void httpJsonPrint(JsonBuf* buf, const char* json, int len) {
+void httpJsonPrint(JsonBuf* buf, const char* json, int32_t len) {
if (len == 0 || len >= JSON_BUFFER_SIZE) {
return;
}
@@ -435,7 +432,7 @@ void httpJsonPrint(JsonBuf* buf, const char* json, int len) {
buf->lst += len;
}
-void httpJsonPairStatus(JsonBuf* buf, int code) {
+void httpJsonPairStatus(JsonBuf* buf, int32_t code) {
if (code == 0) {
httpJsonPair(buf, "status", 6, "succ", 4);
} else {
@@ -448,7 +445,7 @@ void httpJsonPairStatus(JsonBuf* buf, int code) {
} else if (code == TSDB_CODE_MND_INVALID_TABLE_NAME) {
httpJsonPair(buf, "desc", 4, "failed to create table", 22);
} else {
- httpJsonPair(buf, "desc", 4, (char*)tstrerror(code), (int)strlen(tstrerror(code)));
+ httpJsonPair(buf, "desc", 4, (char*)tstrerror(code), (int32_t)strlen(tstrerror(code)));
}
}
}
diff --git a/src/plugins/http/src/httpParser.c b/src/plugins/http/src/httpParser.c
new file mode 100644
index 0000000000..b844834537
--- /dev/null
+++ b/src/plugins/http/src/httpParser.c
@@ -0,0 +1,1139 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#define _DEFAULT_SOURCE
+#include "os.h"
+#include "taoserror.h"
+#include "httpLog.h"
+#include "httpContext.h"
+#include "httpParser.h"
+#include "httpGzip.h"
+#include "httpAuth.h"
+
+static void httpOnData(ehttp_gzip_t *gzip, void *arg, const char *buf, int32_t len);
+
+static HttpStatus httpStatusCodes[] = {
+ {100, "Continue"},
+ {101, "Switching Protocol"},
+ {102, "Processing (WebDAV)"},
+ {103, "Early Hints"},
+ {200, "OK"},
+ {201, "Created"},
+ {202, "Accepted"},
+ {203, "Non-Authoritative Information"},
+ {204, "No Content"},
+ {205, "Reset Content"},
+ {206, "Partial Content"},
+ {207, "Multi-Status (WebDAV)"},
+ {208, "Already Reported (WebDAV)"},
+ {226, "IM Used (HTTP Delta encoding)"},
+ {300, "Multiple Choice"},
+ {301, "Moved Permanently"},
+ {302, "Found"},
+ {303, "See Other"},
+ {304, "Not Modified"},
+ {305, "Use Proxy"},
+ {306, "unused"},
+ {307, "Temporary Redirect"},
+ {308, "Permanent Redirect"},
+ {400, "Bad Request"},
+ {401, "Unauthorized"},
+ {402, "Payment Required"},
+ {403, "Forbidden"},
+ {404, "Not Found"},
+ {405, "Method Not Allowed"},
+ {406, "Not Acceptable"},
+ {407, "Proxy Authentication Required"},
+ {408, "Request Timeout"},
+ {409, "Conflict"},
+ {410, "Gone"},
+ {411, "Length Required"},
+ {412, "Precondition Failed"},
+ {413, "Payload Too Large"},
+ {414, "URI Too Long"},
+ {415, "Unsupported Media Type"},
+ {416, "Range Not Satisfiable"},
+ {417, "Expectation Failed"},
+ {418, "I'm a teapot"},
+ {421, "Misdirected Request"},
+ {422, "Unprocessable Entity (WebDAV)"},
+ {423, "Locked (WebDAV)"},
+ {424, "Failed Dependency (WebDAV)"},
+ {425, "Too Early"},
+ {426, "Upgrade Required"},
+ {428, "Precondition Required"},
+ {429, "Too Many Requests"},
+ {431, "Request Header Fields Too Large"},
+ {451, "Unavailable For Legal Reasons"},
+ {500, "Internal Server Error"},
+ {501, "Not Implemented"},
+ {502, "Bad Gateway"},
+ {503, "Service Unavailable"},
+ {504, "Gateway Timeout"},
+ {505, "HTTP Version Not Supported"},
+ {506, "Variant Also Negotiates"},
+ {507, "Insufficient Storage"},
+ {508, "Loop Detected (WebDAV)"},
+ {510, "Not Extended"},
+ {511, "Network Authentication Required"},
+ {0, NULL}
+};
+
+char *httpGetStatusDesc(int32_t statusCode) {
+ HttpStatus *p = httpStatusCodes;
+ while (p->code != 0) {
+ if (p->code == statusCode) return p->desc;
+ ++p;
+ }
+ return "Unknow status code";
+}
+
+static void httpCleanupString(HttpString *str) {
+ free(str->str);
+ str->str = NULL;
+ str->pos = 0;
+ str->size = 0;
+}
+
+static int32_t httpAppendString(HttpString *str, const char *s, int32_t len) {
+ if (str->size == 0) {
+ str->pos = 0;
+ str->size = 64;
+ str->str = malloc(str->size);
+ } else if (str->pos + len + 1 >= str->size) {
+ str->size += len;
+ str->size *= 4;
+ str->str = realloc(str->str, str->size);
+ } else {
+ }
+
+ if (str->str == NULL) return -1;
+
+ memcpy(str->str + str->pos, s, len);
+ str->pos += len;
+ str->str[str->pos] = 0;
+ return 0;
+}
+
+static void httpClearString(HttpString *str) {
+ if (str->str) {
+ str->str[0] = '\0';
+ str->pos = 0;
+ }
+}
+
+static int32_t httpOnError(HttpParser *parser, int32_t httpCode, int32_t parseCode) {
+ HttpContext *pContext = parser->pContext;
+ if (httpCode != 0) parser->httpCode = httpCode;
+ if (parseCode != 0) parser->parseCode = parseCode;
+
+ httpError("context:%p, fd:%d, parse failed, httpCode:%d parseCode:%d reason:%s", pContext, pContext->fd, httpCode,
+ parseCode & 0XFFFF, tstrerror(parseCode));
+ return 0;
+}
+
+static int32_t httpOnRequestLine(HttpParser *pParser, char *method, char *target, char *version) {
+ HttpContext *pContext = pParser->pContext;
+ httpDebug("context:%p, fd:%d, method:%s target:%s version:%s", pContext, pContext->fd, method, target, version);
+
+ // parse url
+ char *pStart = target + 1;
+ for (int32_t i = 0; i < HTTP_MAX_URL; i++) {
+ char *pSeek = strchr(pStart, '/');
+ if (pSeek == NULL) {
+ (void)httpAppendString(pParser->path + i, pStart, strlen(pStart));
+ break;
+ } else {
+ (void)httpAppendString(pParser->path + i, pStart, (int32_t)(pSeek - pStart));
+ }
+ pStart = pSeek + 1;
+ }
+
+ // parse decode method
+ for (int32_t i = 0; i < tsHttpServer.methodScannerLen; i++) {
+ HttpDecodeMethod *method = tsHttpServer.methodScanner[i];
+ if (strcmp(method->module, pParser->path[0].str) == 0) {
+ pContext->decodeMethod = method;
+ break;
+ }
+ }
+
+ if (pContext->decodeMethod != NULL) {
+ httpTrace("context:%p, fd:%d, decode method is %s", pContext, pContext->fd, pContext->decodeMethod->module);
+ } else {
+ httpError("context:%p, fd:%d, the url is not support, target:%s", pContext, pContext->fd, target);
+ httpOnError(pParser, 0, TSDB_CODE_HTTP_UNSUPPORT_URL);
+ return -1;
+ }
+
+ // parse version
+ if (pParser->httpVersion < HTTP_VERSION_10 || pParser->httpVersion > HTTP_VERSION_12) {
+ httpError("context:%p, fd:%d, unsupport httpVersion %d", pContext, pContext->fd, pParser->httpVersion);
+ httpOnError(pParser, 0, TSDB_CODE_HTTP_INVALID_VERSION);
+ } else {
+ httpTrace("context:%p, fd:%d, httpVersion:1.%d", pContext, pContext->fd, pParser->httpVersion);
+ }
+
+ return 0;
+}
+
+static int32_t httpOnStatusLine(HttpParser *pParser, int32_t code, const char *reason) {
+ HttpContext *pContext = pParser->pContext;
+ httpError("context:%p, fd:%d, status line, code:%d reason:%s", pContext, pContext->fd, code, reason);
+ return 0;
+}
+
+static int32_t httpOnParseHeaderField(HttpParser *parser, const char *key, const char *val) {
+ HttpContext *pContext = parser->pContext;
+ httpTrace("context:%p, fd:%d, key:%s val:%s", pContext, pContext->fd, key, val);
+
+ if (0 == strcasecmp(key, "Content-Length")) {
+ int32_t len = 0;
+ int32_t bytes = 0;
+ int32_t n = sscanf(val, "%d%n", &len, &bytes);
+ if (n == 1 && bytes == strlen(val)) {
+ parser->contentLength = len;
+ parser->chunkSize = len;
+ parser->contentLengthSpecified = 1;
+ httpTrace("context:%p, fd:%d, contentLength:%d chunkSize:%d contentLengthSpecified:%d", pContext, pContext->fd,
+ parser->contentLength, parser->chunkSize, parser->contentLengthSpecified);
+ return 0;
+ } else {
+ httpError("context:%p, fd:%d, failed to parser %s:%s", pContext, pContext->fd, key, val);
+ httpOnError(parser, 0, TSDB_CODE_HTTP_INVALID_CONTENT_LENGTH);
+ return -1;
+ }
+ }
+
+ else if (0 == strcasecmp(key, "Accept-Encoding")) {
+ if (strstr(val, "gzip")) {
+ parser->acceptEncodingGzip = 1;
+ httpTrace("context:%p, fd:%d, acceptEncodingGzip:%d", pContext, pContext->fd, parser->acceptEncodingGzip);
+ }
+ if (strstr(val, "chunked")) {
+ parser->acceptEncodingChunked = 1;
+ httpTrace("context:%p, fd:%d, acceptEncodingChunked:%d", pContext, pContext->fd, parser->acceptEncodingChunked);
+ }
+ return 0;
+ }
+
+ else if (strncasecmp(key, "Connection: ", 12) == 0) {
+ if (strncasecmp(val, "Keep-Alive", 10) == 0) {
+ parser->keepAlive = HTTP_KEEPALIVE_ENABLE;
+ } else {
+ parser->keepAlive = HTTP_KEEPALIVE_DISABLE;
+ }
+ httpTrace("context:%p, fd:%d, keepAlive:%d", pContext, pContext->fd, pContext->parser->keepAlive);
+ }
+
+#if 0
+ else if (0 == strcasecmp(key, "Content-Encoding")) {
+ if (0 == strcmp(val, "gzip")) {
+ parser->contentChunked = 1;
+ httpTrace("context:%p, fd:%d, contentChunked:%d", pContext, pContext->fd, parser->contentChunked);
+ }
+ return 0;
+ }
+ #endif
+
+ else if (0 == strcasecmp(key, "Transfer-Encoding") || 0 == strcasecmp(key, "Content-Encoding")) {
+ if (strstr(val, "gzip")) {
+ parser->transferGzip = 1;
+ ehttp_gzip_conf_t conf = {0};
+ ehttp_gzip_callbacks_t callbacks = {0};
+
+ callbacks.on_data = httpOnData;
+
+ parser->gzip = ehttp_gzip_create_decompressor(conf, callbacks, parser);
+
+ if (!parser->gzip) {
+ httpError("context:%p, fd:%d, failed to create gzip decompressor", pContext, pContext->fd);
+ httpOnError(parser, 0, TSDB_CODE_HTTP_CREATE_GZIP_FAILED);
+ return -1;
+ }
+ }
+ if (strstr(val, "chunked")) {
+ parser->transferChunked = 1;
+ httpTrace("context:%p, fd:%d, transferChunked:%d", pContext, pContext->fd, parser->transferChunked);
+ }
+ return 0;
+ }
+
+ else if (0 == strcasecmp(key, "Authorization")) {
+ char * t = NULL;
+ char * s = NULL;
+ int32_t bytes = 0;
+ int32_t n = sscanf(val, "%ms %ms%n", &t, &s, &bytes);
+ if (n == 2 && t && s && bytes == strlen(val)) {
+ if (strcmp(t, "Basic") == 0) {
+ free(parser->authContent);
+ parser->authContent = s;
+ parser->authType = HTTP_BASIC_AUTH;
+ s = NULL;
+ free(t);
+ free(s);
+ httpTrace("context:%p, fd:%d, basic auth:%s", pContext, pContext->fd, parser->authContent);
+ int32_t ok = httpParseBasicAuthToken(pContext, parser->authContent, strlen(parser->authContent));
+ if (ok != 0) {
+ httpOnError(parser, 0, TSDB_CODE_HTTP_INVALID_BASIC_AUTH);
+ return -1;
+ }
+ return 0;
+ } else if (strcmp(t, "Taosd") == 0) {
+ free(parser->authContent);
+ parser->authContent = s;
+ parser->authType = HTTP_TAOSD_AUTH;
+ s = NULL;
+ free(t);
+ free(s);
+ httpTrace("context:%p, fd:%d, taosd auth:%s", pContext, pContext->fd, parser->authContent);
+ int32_t ok = httpParseTaosdAuthToken(pContext, parser->authContent, strlen(parser->authContent));
+ if (ok != 0) {
+ httpOnError(parser, 0, TSDB_CODE_HTTP_INVALID_TAOSD_AUTH);
+ return -1;
+ }
+ return 0;
+ } else {
+ parser->authType = HTTP_INVALID_AUTH;
+ httpError("context:%p, fd:%d, invalid auth, t:%s s:%s", pContext, pContext->fd, t, s);
+ httpOnError(parser, 0, TSDB_CODE_HTTP_INVALID_AUTH_TYPE);
+ free(t);
+ free(s);
+ return -1;
+ }
+ } else {
+ parser->authType = HTTP_INVALID_AUTH;
+ httpError("context:%p, fd:%d, parse auth failed, t:%s s:%s", pContext, pContext->fd, t, s);
+ httpOnError(parser, 0, TSDB_CODE_HTTP_INVALID_AUTH_FORMAT);
+ free(t);
+ free(s);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int32_t httpOnBody(HttpParser *parser, const char *chunk, int32_t len) {
+ HttpContext *pContext = parser->pContext;
+ HttpString * buf = &parser->body;
+ if (parser->parseCode != TSDB_CODE_SUCCESS) return -1;
+
+ if (buf->size <= 0) {
+ buf->size = MIN(len + 2, HTTP_BUFFER_SIZE);
+ buf->str = malloc(buf->size);
+ }
+
+ int32_t newSize = buf->pos + len + 1;
+ if (newSize >= buf->size) {
+ if (buf->size >= HTTP_BUFFER_SIZE) {
+ httpError("context:%p, fd:%d, failed parse body, exceeding buffer size %d", pContext, pContext->fd, buf->size);
+ httpOnError(parser, 0, TSDB_CODE_HTTP_REQUSET_TOO_BIG);
+ return -1;
+ }
+
+ newSize = MAX(newSize, HTTP_BUFFER_INIT);
+ newSize *= 4;
+ newSize = MIN(newSize, HTTP_BUFFER_SIZE);
+ buf->str = realloc(buf->str, newSize);
+ buf->size = newSize;
+
+ if (buf->str == NULL) {
+ httpError("context:%p, fd:%d, failed parse body, realloc %d failed", pContext, pContext->fd, buf->size);
+ httpOnError(parser, 0, TSDB_CODE_HTTP_NO_ENOUGH_MEMORY);
+ return -1;
+ }
+ }
+
+ memcpy(buf->str + buf->pos, chunk, len);
+ buf->pos += len;
+ buf->str[buf->pos] = 0;
+
+ return 0;
+}
+
+static int32_t httpOnEnd(HttpParser *parser) {
+ HttpContext *pContext = parser->pContext;
+ parser->parsed = true;
+
+ if (parser->parseCode != TSDB_CODE_SUCCESS) {
+ return -1;
+ }
+
+ httpTrace("context:%p, fd:%d, parse success", pContext, pContext->fd);
+ return 0;
+}
+
+static HTTP_PARSER_STATE httpTopStack(HttpParser *parser) {
+ HttpStack *stack = &parser->stacks;
+ ASSERT(stack->pos >= 1);
+
+ return stack->stacks[stack->pos - 1];
+}
+
+static int32_t httpPushStack(HttpParser *parser, HTTP_PARSER_STATE state) {
+ HttpStack *stack = &parser->stacks;
+ if (stack->size == 0) {
+ stack->pos = 0;
+ stack->size = 32;
+ stack->stacks = malloc(stack->size * sizeof(int8_t));
+ } else if (stack->pos + 1 > stack->size) {
+ stack->size *= 2;
+ stack->stacks = realloc(stack->stacks, stack->size * sizeof(int8_t));
+ } else {
+ }
+
+ if (stack->stacks == NULL) return -1;
+
+ stack->stacks[stack->pos] = state;
+ stack->pos++;
+
+ return 0;
+}
+
+static int32_t httpPopStack(HttpParser *parser) {
+ HttpStack *stack = &parser->stacks;
+ ASSERT(stack->pos >= 1);
+ stack->pos--;
+ return 0;
+}
+
+static void httpClearStack(HttpStack *stack) {
+ stack->pos = 0;
+}
+
+static int32_t httpCleanupStack(HttpStack *stack) {
+ free(stack->stacks);
+ memset(stack, 0, sizeof(HttpStack));
+
+ return 0;
+}
+
+void httpInitParser(HttpParser *parser) {
+ HttpContext *pContext = parser->pContext;
+ httpTrace("context:%p, fd:%d, init parser", pContext, pContext->fd);
+
+ parser->parsed = false;
+ parser->inited = 1;
+ parser->httpVersion = 0;
+ parser->acceptEncodingGzip = 0;
+ parser->acceptEncodingChunked = 0;
+ parser->contentLengthSpecified = 0;
+ parser->contentChunked = 0;
+ parser->transferGzip = 0;
+ parser->transferChunked = 0;
+ parser->keepAlive = 0;
+ parser->authType = 0;
+ parser->contentLength = 0;
+ parser->chunkSize = 0;
+ parser->receivedChunkSize = 0;
+ parser->receivedSize = 0;
+ parser->statusCode = 0;
+ parser->httpCode = 0;
+ parser->parseCode = 0;
+
+ free(parser->method); parser->method = NULL;
+ free(parser->target); parser->target = NULL;
+ free(parser->version); parser->version = NULL;
+ free(parser->reasonPhrase); parser->reasonPhrase = NULL;
+ free(parser->key); parser->key = NULL;
+ free(parser->val); parser->val = NULL;
+ free(parser->authContent); parser->authContent = NULL;
+
+ httpClearStack(&parser->stacks);
+ httpClearString(&parser->str);
+ httpClearString(&parser->body);
+ for (int32_t i = 0; i < HTTP_MAX_URL; ++i) {
+ httpClearString(&parser->path[i]);
+ }
+
+ if (parser->gzip != NULL) {
+ ehttp_gzip_destroy(parser->gzip);
+ parser->gzip = NULL;
+ }
+
+ httpPushStack(parser, HTTP_PARSER_BEGIN);
+}
+
+HttpParser *httpCreateParser(HttpContext *pContext) {
+ HttpParser *parser = calloc(1, sizeof(HttpParser));
+ if (!parser) return NULL;
+ httpTrace("context:%p, fd:%d, create parser", pContext, pContext->fd);
+
+ parser->pContext = pContext;
+ return parser;
+}
+
+void httpClearParser(HttpParser *parser) {
+ HttpContext *pContext = parser->pContext;
+ httpTrace("context:%p, fd:%d, clear parser", pContext, pContext->fd);
+
+ pContext->parser->inited = 0;
+ pContext->parser->parsed = false;
+}
+
+void httpDestroyParser(HttpParser *parser) {
+ if (!parser) return;
+
+ HttpContext *pContext = parser->pContext;
+ httpTrace("context:%p, fd:%d, destroy parser", pContext, pContext->fd);
+
+ free(parser->method); parser->method = NULL;
+ free(parser->target); parser->target = NULL;
+ free(parser->version); parser->version = NULL;
+ free(parser->reasonPhrase); parser->reasonPhrase = NULL;
+ free(parser->key); parser->key = NULL;
+ free(parser->val); parser->val = NULL;
+ free(parser->authContent); parser->authContent = NULL;
+
+ httpCleanupStack(&parser->stacks);
+ httpCleanupString(&parser->str);
+ httpCleanupString(&parser->body);
+ for (int32_t i = 0; i < HTTP_MAX_URL; ++i) {
+ httpCleanupString(&parser->path[i]);
+ }
+
+ if (parser->gzip != NULL) {
+ ehttp_gzip_destroy(parser->gzip);
+ parser->gzip = NULL;
+ }
+
+ free(parser);
+}
+
+#define is_token(c) (strchr("!#$%&'*+-.^_`|~", c) || isdigit(c) || isalpha(c))
+
+char *httpDecodeUrl(const char *enc) {
+ int32_t ok = 1;
+ HttpString str = {0};
+ while (*enc) {
+ char *p = strchr(enc, '%');
+ if (!p) break;
+ int32_t hex, cnt;
+ int32_t n = sscanf(p+1, "%2x%n", &hex, &cnt);
+ if (n!=1 && cnt !=2) { ok = 0; break; }
+ if (httpAppendString(&str, enc, p-enc)) { ok = 0; break; }
+ char c = (char)hex;
+ if (httpAppendString(&str, &c, 1)) { ok = 0; break; }
+ enc = p+3;
+ }
+ char *dec = NULL;
+ if (ok && *enc) {
+ if (httpAppendString(&str, enc, strlen(enc))) { ok = 0; }
+ }
+ if (ok) {
+ dec = str.str;
+ str.str = NULL;
+ }
+ httpCleanupString(&str);
+ return dec;
+}
+
+static void httpOnData(ehttp_gzip_t *gzip, void *arg, const char *buf, int32_t len) {
+ HttpParser *parser = (HttpParser*)arg;
+ httpOnBody(parser, buf, len);
+}
+
+static int32_t httpParserOnBegin(HttpParser *parser, HTTP_PARSER_STATE state, const char c, int32_t *again) {
+ HttpContext *pContext = parser->pContext;
+ int32_t ok = 0;
+ do {
+ if (c == 'G' || c == 'P' || c == 'H' || c == 'D' || c == 'C' || c == 'O' || c == 'T') {
+ if (httpAppendString(&parser->str, &c, 1)) {
+ httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c);
+ ok = -1;
+ httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_METHOD_FAILED);
+ break;
+ }
+ httpPopStack(parser);
+ httpPushStack(parser, HTTP_PARSER_REQUEST_OR_RESPONSE);
+ break;
+ }
+ httpError("context:%p, fd:%d, parser state:%d, unexpected char:[%c]%02x", pContext, pContext->fd, state, c, c);
+ ok = -1;
+ httpOnError(parser, 400, TSDB_CODE_HTTP_PARSE_METHOD_FAILED);
+ } while (0);
+ return ok;
+}
+
+static int32_t httpParserOnRquestOrResponse(HttpParser *parser, HTTP_PARSER_STATE state, const char c, int32_t *again) {
+ HttpContext *pContext = parser->pContext;
+ int32_t ok = 0;
+ do {
+ if (parser->str.pos == 1) {
+ if (c == 'T' && parser->str.str[0] == 'H') {
+ httpPopStack(parser);
+ httpPushStack(parser, HTTP_PARSER_END);
+ httpPushStack(parser, HTTP_PARSER_HEADER);
+ httpPushStack(parser, HTTP_PARSER_CRLF);
+ httpPushStack(parser, HTTP_PARSER_REASON_PHRASE);
+ httpPushStack(parser, HTTP_PARSER_SP);
+ httpPushStack(parser, HTTP_PARSER_STATUS_CODE);
+ httpPushStack(parser, HTTP_PARSER_SP);
+ httpPushStack(parser, HTTP_PARSER_HTTP_VERSION);
+ *again = 1;
+ break;
+ }
+ httpPopStack(parser);
+ httpPushStack(parser, HTTP_PARSER_END);
+ httpPushStack(parser, HTTP_PARSER_HEADER);
+ httpPushStack(parser, HTTP_PARSER_CRLF);
+ httpPushStack(parser, HTTP_PARSER_HTTP_VERSION);
+ httpPushStack(parser, HTTP_PARSER_SP);
+ httpPushStack(parser, HTTP_PARSER_TARGET);
+ httpPushStack(parser, HTTP_PARSER_SP);
+ httpPushStack(parser, HTTP_PARSER_METHOD);
+ *again = 1;
+ break;
+ }
+
+ httpError("context:%p, fd:%d, parser state:%d, unexpected char:[%c]%02x", pContext, pContext->fd, state, c, c);
+ ok = -1;
+ httpOnError(parser, 400, TSDB_CODE_HTTP_PARSE_METHOD_FAILED);
+ } while (0);
+ return ok;
+}
+
+static int32_t httpParserOnMethod(HttpParser *parser, HTTP_PARSER_STATE state, const char c, int32_t *again) {
+ HttpContext *pContext = parser->pContext;
+ int32_t ok = 0;
+ do {
+ if (isalnum(c) || strchr("!#$%&'*+-.^_`|~", c)) {
+ if (httpAppendString(&parser->str, &c, 1)) {
+ httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c);
+ ok = -1;
+ httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_METHOD_FAILED);
+ break;
+ }
+ break;
+ }
+ parser->method = strdup(parser->str.str);
+ if (!parser->method) {
+ httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c);
+ ok = -1;
+ httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_METHOD_FAILED);
+ break;
+ } else {
+ httpTrace("context:%p, fd:%d, httpMethod:%s", pContext, pContext->fd, parser->method);
+ }
+ httpClearString(&parser->str);
+ httpPopStack(parser);
+ *again = 1;
+ } while (0);
+ return ok;
+}
+
+static int32_t httpParserOnTarget(HttpParser *parser, HTTP_PARSER_STATE state, const char c, int32_t *again) {
+ HttpContext *pContext = parser->pContext;
+ int32_t ok = 0;
+ do {
+ if (!isspace(c) && c != '\r' && c != '\n') {
+ if (httpAppendString(&parser->str, &c, 1)) {
+ httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c);
+ ok = -1;
+ httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_TARGET_FAILED);
+ break;
+ }
+ break;
+ }
+ parser->target = strdup(parser->str.str);
+ if (!parser->target) {
+ httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c);
+ ok = -1;
+ httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_TARGET_FAILED);
+ break;
+ }
+ httpClearString(&parser->str);
+ httpPopStack(parser);
+ *again = 1;
+ } while (0);
+ return ok;
+}
+
+static int32_t httpParserOnVersion(HttpParser *parser, HTTP_PARSER_STATE state, const char c, int32_t *again) {
+ HttpContext *pContext = parser->pContext;
+ int32_t ok = 0;
+ do {
+ const char *prefix = "HTTP/1.";
+ int32_t len = strlen(prefix);
+ if (parser->str.pos < len) {
+ if (prefix[parser->str.pos] != c) {
+ httpError("context:%p, fd:%d, parser state:%d, unexpected char:[%c]%02x", pContext, pContext->fd, state, c, c);
+ ok = -1;
+ httpOnError(parser, 400, TSDB_CODE_HTTP_PARSE_VERSION_FAILED);
+ break;
+ }
+ if (httpAppendString(&parser->str, &c, 1)) {
+ httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c);
+ ok = -1;
+ httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_VERSION_FAILED);
+ break;
+ }
+ break;
+ }
+
+ if (c != '0' && c != '1' && c != '2') {
+ httpError("context:%p, fd:%d, parser state:%d, unexpected char:[%c]%02x", pContext, pContext->fd, state, c, c);
+ ok = -1;
+ httpOnError(parser, 400, TSDB_CODE_HTTP_PARSE_VERSION_FAILED);
+ break;
+ }
+
+ if (httpAppendString(&parser->str, &c, 1)) {
+ httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c);
+ ok = -1;
+ httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_VERSION_FAILED);
+ break;
+ }
+
+ if (c == '0')
+ parser->httpVersion = HTTP_VERSION_10;
+ else if (c == '1')
+ parser->httpVersion = HTTP_VERSION_11;
+ else if (c == '2')
+ parser->httpVersion = HTTP_VERSION_12;
+ else {
+ }
+
+ parser->version = strdup(parser->str.str);
+ if (!parser->version) {
+ httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c);
+ ok = -1;
+ httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_VERSION_FAILED);
+ break;
+ }
+
+ if (parser->method) {
+ ok = httpOnRequestLine(parser, parser->method, parser->target, parser->version);
+ }
+
+ httpClearString(&parser->str);
+ httpPopStack(parser);
+ } while (0);
+ return ok;
+}
+
+static int32_t httpParserOnSp(HttpParser *parser, HTTP_PARSER_STATE state, const char c, int32_t *again) {
+ HttpContext *pContext = parser->pContext;
+ int32_t ok = 0;
+ do {
+ if (c == ' ') {
+ httpPopStack(parser);
+ break;
+ }
+ httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c);
+ ok = -1;
+ httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_SP_FAILED);
+ } while (0);
+ return ok;
+}
+
+static int32_t httpParserOnStatusCode(HttpParser *parser, HTTP_PARSER_STATE state, const char c, int32_t *again) {
+ HttpContext *pContext = parser->pContext;
+ int32_t ok = 0;
+ do {
+ if (isdigit(c)) {
+ if (httpAppendString(&parser->str, &c, 1)) {
+ httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c);
+ ok = -1;
+ httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_STATUS_FAILED);
+ break;
+ }
+ if (parser->str.pos < 3) break;
+
+ sscanf(parser->str.str, "%d", &parser->statusCode);
+ httpClearString(&parser->str);
+ httpPopStack(parser);
+ break;
+ }
+ httpError("context:%p, fd:%d, parser state:%d, unexpected char:[%c]%02x", pContext, pContext->fd, state, c, c);
+ ok = -1;
+ httpOnError(parser, 400, TSDB_CODE_HTTP_PARSE_STATUS_FAILED);
+ } while (0);
+ return ok;
+}
+
+static int32_t httpParserOnReasonPhrase(HttpParser *parser, HTTP_PARSER_STATE state, const char c, int32_t *again) {
+ HttpContext *pContext = parser->pContext;
+ int32_t ok = 0;
+ do {
+ if (c == '\r') {
+ parser->reasonPhrase = strdup(parser->str.str);
+ if (!parser->reasonPhrase) {
+ httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c);
+ ok = -1;
+ httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_PHRASE_FAILED);
+ break;
+ }
+ ok = httpOnStatusLine(parser, parser->statusCode, parser->reasonPhrase);
+ httpClearString(&parser->str);
+ httpPopStack(parser);
+ *again = 1;
+ break;
+ }
+ if (httpAppendString(&parser->str, &c, 1)) {
+ httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c);
+ ok = -1;
+ httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_PHRASE_FAILED);
+ break;
+ }
+ } while (0);
+ return ok;
+}
+
+static int32_t httpParserPostProcess(HttpParser *parser) {
+ HttpContext *pContext = parser->pContext;
+ if (parser->gzip) {
+ if (ehttp_gzip_finish(parser->gzip)) {
+ httpError("context:%p, fd:%d, gzip failed", pContext, pContext->fd);
+ httpOnError(parser, 507, TSDB_CODE_HTTP_FINISH_GZIP_FAILED);
+ return -1;
+ }
+ }
+ httpOnEnd(parser);
+ return 0;
+}
+
+static int32_t httpParserOnCrlf(HttpParser *parser, HTTP_PARSER_STATE state, const char c, int32_t *again) {
+ HttpContext *pContext = parser->pContext;
+ int32_t ok = 0;
+ do {
+ const char *s = "\r\n";
+ int32_t len = strlen(s);
+ if (s[parser->str.pos] != c) {
+ httpError("context:%p, fd:%d, parser state:%d, unexpected char:[%c]%02x", pContext, pContext->fd, state, c, c);
+ ok = -1;
+ httpOnError(parser, 400, TSDB_CODE_HTTP_PARSE_CRLF_FAILED);
+ break;
+ }
+ if (httpAppendString(&parser->str, &c, 1)) {
+ httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c);
+ ok = -1;
+ httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_CRLF_FAILED);
+ break;
+ }
+ if (parser->str.pos == len) {
+ httpClearString(&parser->str);
+ httpPopStack(parser);
+ if (httpTopStack(parser) == HTTP_PARSER_END) {
+ ok = httpParserPostProcess(parser);
+ }
+ }
+ break;
+ } while (0);
+ return ok;
+}
+
+static int32_t httpParserOnHeader(HttpParser *parser, HTTP_PARSER_STATE state, const char c, int32_t *again) {
+ HttpContext *pContext = parser->pContext;
+ int32_t ok = 0;
+ do {
+ if (c == '\r') {
+ httpPopStack(parser);
+ if (parser->transferChunked) {
+ httpPushStack(parser, HTTP_PARSER_CHUNK_SIZE);
+ httpPushStack(parser, HTTP_PARSER_CRLF);
+ } else {
+ if (parser->contentLength > 0) {
+ httpPushStack(parser, HTTP_PARSER_CHUNK);
+ }
+ httpPushStack(parser, HTTP_PARSER_CRLF);
+ }
+ *again = 1;
+ break;
+ }
+ if (c != ' ' && c != '\t' && c != ':') {
+ if (httpAppendString(&parser->str, &c, 1)) {
+ httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c);
+ ok = -1;
+ httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_HEADER_FAILED);
+ break;
+ }
+ httpPushStack(parser, HTTP_PARSER_CRLF);
+ httpPushStack(parser, HTTP_PARSER_HEADER_VAL);
+ httpPushStack(parser, HTTP_PARSER_SP);
+ httpPushStack(parser, HTTP_PARSER_HEADER_KEY);
+ break;
+ }
+ httpError("context:%p, fd:%d, parser state:%d, unexpected char:[%c]%02x", pContext, pContext->fd, state, c, c);
+ ok = -1;
+ httpOnError(parser, 400, TSDB_CODE_HTTP_PARSE_HEADER_FAILED);
+ } while (0);
+ return ok;
+}
+
+static int32_t httpParserOnHeaderKey(HttpParser *parser, HTTP_PARSER_STATE state, const char c, int32_t *again) {
+ HttpContext *pContext = parser->pContext;
+ int32_t ok = 0;
+ do {
+ if (isalnum(c) || strchr("!#$%&'*+-.^_`|~", c)) {
+ if (httpAppendString(&parser->str, &c, 1)) {
+ httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c);
+ ok = -1;
+ httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_HEADER_KEY_FAILED);
+ break;
+ }
+ break;
+ }
+ if (c == ':') {
+ parser->key = strdup(parser->str.str);
+ if (!parser->key) {
+ httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c);
+ ok = -1;
+ httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_HEADER_KEY_FAILED);
+ break;
+ }
+ httpClearString(&parser->str);
+ httpPopStack(parser);
+ break;
+ }
+ httpError("context:%p, fd:%d, parser state:%d, unexpected char:[%c]%02x", pContext, pContext->fd, state, c, c);
+ ok = -1;
+ httpOnError(parser, 400, TSDB_CODE_HTTP_PARSE_HEADER_KEY_FAILED);
+ } while (0);
+ return ok;
+}
+
+static int32_t httpParserOnHeaderVal(HttpParser *parser, HTTP_PARSER_STATE state, const char c, int32_t *again) {
+ HttpContext *pContext = parser->pContext;
+ int32_t ok = 0;
+ do {
+ if (c != '\r' && c != '\n' && (!isspace(c) || parser->str.pos > 0)) {
+ if (httpAppendString(&parser->str, &c, 1)) {
+ httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c);
+ ok = -1;
+ parser->parseCode = TSDB_CODE_HTTP_PARSE_HEADER_VAL_FAILED;
+ httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_HEADER_VAL_FAILED);
+ break;
+ }
+ break;
+ }
+ const char *val = parser->str.str;
+ ok = httpOnParseHeaderField(parser, parser->key, val);
+ free(parser->key);
+ parser->key = NULL;
+ val = NULL;
+ if (ok == -1) break;
+ httpClearString(&parser->str);
+ httpPopStack(parser);
+ *again = 1;
+ } while (0);
+ return ok;
+}
+
+static int32_t httpParserOnChunkSize(HttpParser *parser, HTTP_PARSER_STATE state, const char c, int32_t *again) {
+ HttpContext *pContext = parser->pContext;
+ int32_t ok = 0;
+ int32_t bytes;
+ int32_t len;
+ int32_t n;
+ do {
+ if (isxdigit(c)) {
+ if (httpAppendString(&parser->str, &c, 1)) {
+ httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c);
+ ok = -1;
+ httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_CHUNK_SIZE_FAILED);
+ break;
+ }
+ break;
+ }
+ if (c == '\r') {
+ n = sscanf(parser->str.str, "%x%n", &len, &bytes);
+ if (n == 1 && bytes == strlen(parser->str.str) && len >= 0) {
+ if (len == 0) {
+ if (parser->contentLengthSpecified == 0 || parser->receivedSize == parser->contentLength) {
+ httpClearString(&parser->str);
+ httpPopStack(parser);
+ httpPushStack(parser, HTTP_PARSER_CRLF);
+ httpPushStack(parser, HTTP_PARSER_CRLF);
+ *again = 1;
+ break;
+ }
+ } else {
+ if (parser->contentLengthSpecified == 0 || parser->receivedSize + len <= parser->contentLength) {
+ parser->chunkSize = len;
+ httpClearString(&parser->str);
+ httpPopStack(parser);
+ httpPushStack(parser, HTTP_PARSER_CHUNK_SIZE);
+ httpPushStack(parser, HTTP_PARSER_CRLF);
+ httpPushStack(parser, HTTP_PARSER_CHUNK);
+ httpPushStack(parser, HTTP_PARSER_CRLF);
+ *again = 1;
+ break;
+ }
+ }
+ }
+ }
+ httpError("context:%p, fd:%d, parser state:%d, unexpected char:[%c]%02x", pContext, pContext->fd, state, c, c);
+ ok = -1;
+ httpOnError(parser, 400, TSDB_CODE_HTTP_PARSE_CHUNK_SIZE_FAILED);
+ } while (0);
+ return ok;
+}
+
+static int32_t httpParserOnChunk(HttpParser *parser, HTTP_PARSER_STATE state, const char c, int32_t *again) {
+ HttpContext *pContext = parser->pContext;
+ int32_t ok = 0;
+ do {
+ if (httpAppendString(&parser->str, &c, 1)) {
+ httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c);
+ ok = -1;
+ httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_CHUNK_FAILED);
+ break;
+ }
+ ++parser->receivedSize;
+ ++parser->receivedChunkSize;
+ if (parser->receivedChunkSize < parser->chunkSize) break;
+
+ if (parser->gzip) {
+ if (ehttp_gzip_write(parser->gzip, parser->str.str, parser->str.pos)) {
+ httpError("context:%p, fd:%d, gzip failed", pContext, pContext->fd);
+ ok = -1;
+ httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_CHUNK_FAILED);
+ break;
+ }
+ } else {
+ httpOnBody(parser, parser->str.str, parser->str.pos);
+ }
+ parser->receivedChunkSize = 0;
+ httpClearString(&parser->str);
+ httpPopStack(parser);
+ if (httpTopStack(parser) == HTTP_PARSER_END) {
+ ok = httpParserPostProcess(parser);
+ }
+ } while (0);
+ return ok;
+}
+
+static int32_t httpParserOnEnd(HttpParser *parser, HTTP_PARSER_STATE state, const char c, int32_t *again) {
+ HttpContext *pContext = parser->pContext;
+ int32_t ok = 0;
+ do {
+ ok = -1;
+ httpError("context:%p, fd:%d, parser state:%d, unexpected char:[%c]%02x", pContext, pContext->fd, state, c, c);
+ httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_END_FAILED);
+ } while (0);
+ return ok;
+}
+
+static int32_t httpParseChar(HttpParser *parser, const char c, int32_t *again) {
+ HttpContext *pContext = parser->pContext;
+ int32_t ok = 0;
+ HTTP_PARSER_STATE state = httpTopStack(parser);
+ do {
+ if (state == HTTP_PARSER_BEGIN) {
+ ok = httpParserOnBegin(parser, state, c, again);
+ break;
+ }
+ if (state == HTTP_PARSER_REQUEST_OR_RESPONSE) {
+ ok = httpParserOnRquestOrResponse(parser, state, c, again);
+ break;
+ }
+ if (state == HTTP_PARSER_METHOD) {
+ ok = httpParserOnMethod(parser, state, c, again);
+ break;
+ }
+ if (state == HTTP_PARSER_TARGET) {
+ ok = httpParserOnTarget(parser, state, c, again);
+ break;
+ }
+ if (state == HTTP_PARSER_HTTP_VERSION) {
+ ok = httpParserOnVersion(parser, state, c, again);
+ break;
+ }
+ if (state == HTTP_PARSER_SP) {
+ ok = httpParserOnSp(parser, state, c, again);
+ break;
+ }
+ if (state == HTTP_PARSER_STATUS_CODE) {
+ ok = httpParserOnStatusCode(parser, state, c, again);
+ break;
+ }
+ if (state == HTTP_PARSER_REASON_PHRASE) {
+ ok = httpParserOnReasonPhrase(parser, state, c, again);
+ break;
+ }
+ if (state == HTTP_PARSER_CRLF) {
+ ok = httpParserOnCrlf(parser, state, c, again);
+ break;
+ }
+ if (state == HTTP_PARSER_HEADER) {
+ ok = httpParserOnHeader(parser, state, c, again);
+ break;
+ }
+ if (state == HTTP_PARSER_HEADER_KEY) {
+ ok = httpParserOnHeaderKey(parser, state, c, again);
+ break;
+ }
+ if (state == HTTP_PARSER_HEADER_VAL) {
+ ok = httpParserOnHeaderVal(parser, state, c, again);
+ break;
+ }
+ if (state == HTTP_PARSER_CHUNK_SIZE) {
+ ok = httpParserOnChunkSize(parser, state, c, again);
+ break;
+ }
+ if (state == HTTP_PARSER_CHUNK) {
+ ok = httpParserOnChunk(parser, state, c, again);
+ break;
+ }
+ if (state == HTTP_PARSER_END) {
+ ok = httpParserOnEnd(parser, state, c, again);
+ break;
+ }
+ if (state == HTTP_PARSER_ERROR) {
+ ok = -2;
+ break;
+ }
+
+ ok = -1;
+ httpError("context:%p, fd:%d, unknown parse state:%d", pContext, pContext->fd, state);
+ httpOnError(parser, 500, TSDB_CODE_HTTP_PARSE_INVALID_STATE);
+ } while (0);
+
+ if (ok == -1) {
+ httpError("context:%p, fd:%d, failed to parse, state:%d", pContext, pContext->fd, state);
+ httpPushStack(parser, HTTP_PARSER_ERROR);
+ }
+
+ if (ok == -2) {
+ ok = -1;
+ httpError("context:%p, fd:%d, failed to parse, invalid state", pContext, pContext->fd);
+ httpOnError(parser, 500, TSDB_CODE_HTTP_PARSE_ERROR_STATE);
+ }
+
+ return ok;
+}
+
+int32_t httpParseBuf(HttpParser *parser, const char *buf, int32_t len) {
+ HttpContext *pContext = parser->pContext;
+ const char *p = buf;
+ int32_t ret = 0;
+ int32_t i = 0;
+
+ while (i < len) {
+ int32_t again = 0;
+ ret = httpParseChar(parser, *p, &again);
+ if (ret != 0) {
+ httpError("context:%p, fd:%d, parse failed, ret:%d i:%d len:%d buf:%s", pContext, pContext->fd, ret, i, len, buf);
+ break;
+ }
+ if (again) continue;
+ ++p;
+ ++i;
+ }
+
+ return ret;
+}
diff --git a/src/plugins/http/src/httpQueue.c b/src/plugins/http/src/httpQueue.c
index 9625102f74..86a97a6abe 100644
--- a/src/plugins/http/src/httpQueue.c
+++ b/src/plugins/http/src/httpQueue.c
@@ -39,15 +39,15 @@ typedef struct {
typedef struct {
void *param;
void *result;
- int numOfRows;
- void (*fp)(void *param, void *result, int numOfRows);
+ int32_t numOfRows;
+ void (*fp)(void *param, void *result, int32_t numOfRows);
} SHttpResult;
static SHttpWorkerPool tsHttpPool;
static taos_qset tsHttpQset;
static taos_queue tsHttpQueue;
-void httpDispatchToResultQueue(void *param, TAOS_RES *result, int numOfRows, void (*fp)(void *param, void *result, int numOfRows)) {
+void httpDispatchToResultQueue(void *param, TAOS_RES *result, int32_t numOfRows, void (*fp)(void *param, void *result, int32_t numOfRows)) {
if (tsHttpQueue != NULL) {
SHttpResult *pMsg = (SHttpResult *)taosAllocateQitem(sizeof(SHttpResult));
pMsg->param = param;
@@ -71,7 +71,7 @@ static void *httpProcessResultQueue(void *param) {
break;
}
- httpDebug("context:%p, res:%p will be processed in result queue", pMsg->param, pMsg->result);
+ httpTrace("context:%p, res:%p will be processed in result queue", pMsg->param, pMsg->result);
(*pMsg->fp)(pMsg->param, pMsg->result, pMsg->numOfRows);
taosFreeQitem(pMsg);
}
diff --git a/src/plugins/http/src/httpResp.c b/src/plugins/http/src/httpResp.c
index f53aff7831..755dad2d85 100644
--- a/src/plugins/http/src/httpResp.c
+++ b/src/plugins/http/src/httpResp.c
@@ -19,7 +19,6 @@
#include "taosmsg.h"
#include "httpLog.h"
#include "httpResp.h"
-#include "httpCode.h"
#include "httpJson.h"
#include "httpContext.h"
@@ -45,155 +44,112 @@ const char *httpRespTemplate[] = {
"%s 200 OK\r\nAccess-Control-Allow-Origin:*\r\n%sAccess-Control-Allow-Methods:POST, GET, OPTIONS, DELETE, PUT\r\nAccess-Control-Allow-Headers:Accept, Content-Type\r\nContent-Type: application/json;charset=utf-8\r\nContent-Length: %d\r\n\r\n"
};
-static void httpSendErrorRespImp(HttpContext *pContext, int httpCode, char *httpCodeStr, int errNo, char *desc) {
- httpError("context:%p, fd:%d, ip:%s, code:%d, error:%s", pContext, pContext->fd, pContext->ipstr, httpCode, desc);
+static void httpSendErrorRespImp(HttpContext *pContext, int32_t httpCode, char *httpCodeStr, int32_t errNo, const char *desc) {
+ httpError("context:%p, fd:%d, code:%d, error:%s", pContext, pContext->fd, httpCode, desc);
char head[512] = {0};
char body[512] = {0};
- int bodyLen = sprintf(body, httpRespTemplate[HTTP_RESPONSE_JSON_ERROR], errNo, desc);
- int headLen = sprintf(head, httpRespTemplate[HTTP_RESPONSE_ERROR], httpVersionStr[pContext->httpVersion], httpCode,
- httpCodeStr, httpKeepAliveStr[pContext->httpKeepAlive], bodyLen);
+ int32_t bodyLen = sprintf(body, httpRespTemplate[HTTP_RESPONSE_JSON_ERROR], errNo, desc);
+ int32_t headLen = sprintf(head, httpRespTemplate[HTTP_RESPONSE_ERROR], httpVersionStr[pContext->parser->httpVersion],
+ httpCode, httpCodeStr, httpKeepAliveStr[pContext->parser->keepAlive], bodyLen);
httpWriteBuf(pContext, head, headLen);
httpWriteBuf(pContext, body, bodyLen);
httpCloseContextByApp(pContext);
}
-void httpSendErrorRespWithDesc(HttpContext *pContext, int errNo, char *desc) {
- int httpCode = 500;
- char *httpCodeStr = "Internal Server Error";
- switch (errNo) {
- case HTTP_SUCCESS:
- httpCode = 200;
- httpCodeStr = "OK";
- break;
- case HTTP_SERVER_OFFLINE:
- case HTTP_UNSUPPORT_URL:
- httpCode = 404;
- httpCodeStr = "Not Found";
- break;
- case HTTP_PARSE_HTTP_METHOD_ERROR:
- httpCode = 405;
- httpCodeStr = "Method Not Allowed";
- break;
- case HTTP_PARSE_HTTP_VERSION_ERROR:
- httpCode = 505;
- httpCodeStr = "HTTP Version Not Supported";
- break;
- case HTTP_PARSE_HEAD_ERROR:
- httpCode = 406;
- httpCodeStr = "Not Acceptable";
- break;
- case HTTP_REQUSET_TOO_BIG:
- httpCode = 413;
- httpCodeStr = "Request Entity Too Large";
- break;
- case HTTP_PARSE_BODY_ERROR:
- case HTTP_PARSE_CHUNKED_BODY_ERROR:
- httpCode = 409;
- httpCodeStr = "Conflict";
- break;
- case HTTP_PARSE_URL_ERROR:
- httpCode = 414;
- httpCodeStr = "Request-URI Invalid";
- break;
- case HTTP_INVALID_AUTH_TOKEN:
- case HTTP_PARSE_USR_ERROR:
- httpCode = 401;
- httpCodeStr = "Unauthorized";
- break;
- case HTTP_NO_SQL_INPUT:
- httpCode = 400;
- httpCodeStr = "Bad Request";
- break;
- case HTTP_SESSION_FULL:
- httpCode = 421;
- httpCodeStr = "Too many connections";
- break;
- case HTTP_NO_ENOUGH_MEMORY:
- case HTTP_GEN_TAOSD_TOKEN_ERR:
- httpCode = 507;
- httpCodeStr = "Insufficient Storage";
- break;
- case HTTP_INVALID_DB_TABLE:
- case HTTP_NO_EXEC_USEDB:
- case HTTP_PARSE_GC_REQ_ERROR:
- case HTTP_INVALID_MULTI_REQUEST:
- case HTTP_NO_MSG_INPUT:
- httpCode = 400;
- httpCodeStr = "Bad Request";
- break;
- case HTTP_NO_ENOUGH_SESSIONS:
- httpCode = 421;
- httpCodeStr = "Too many connections";
- break;
- // telegraf
- case HTTP_TG_DB_NOT_INPUT:
- case HTTP_TG_DB_TOO_LONG:
- case HTTP_TG_INVALID_JSON:
- case HTTP_TG_METRICS_NULL:
- case HTTP_TG_METRICS_SIZE:
- case HTTP_TG_METRIC_NULL:
- case HTTP_TG_METRIC_TYPE:
- case HTTP_TG_METRIC_NAME_NULL:
- case HTTP_TG_METRIC_NAME_LONG:
- case HTTP_TG_TIMESTAMP_NULL:
- case HTTP_TG_TIMESTAMP_TYPE:
- case HTTP_TG_TIMESTAMP_VAL_NULL:
- case HTTP_TG_TAGS_NULL:
- case HTTP_TG_TAGS_SIZE_0:
- case HTTP_TG_TAGS_SIZE_LONG:
- case HTTP_TG_TAG_NULL:
- case HTTP_TG_TAG_NAME_NULL:
- case HTTP_TG_TAG_NAME_SIZE:
- case HTTP_TG_TAG_VALUE_TYPE:
- case HTTP_TG_TAG_VALUE_NULL:
- case HTTP_TG_TABLE_NULL:
- case HTTP_TG_TABLE_SIZE:
- case HTTP_TG_FIELDS_NULL:
- case HTTP_TG_FIELDS_SIZE_0:
- case HTTP_TG_FIELDS_SIZE_LONG:
- case HTTP_TG_FIELD_NULL:
- case HTTP_TG_FIELD_NAME_NULL:
- case HTTP_TG_FIELD_NAME_SIZE:
- case HTTP_TG_FIELD_VALUE_TYPE:
- case HTTP_TG_FIELD_VALUE_NULL:
- case HTTP_INVALID_BASIC_AUTH_TOKEN:
- case HTTP_INVALID_TAOSD_AUTH_TOKEN:
- case HTTP_TG_HOST_NOT_STRING:
- // grafana
- case HTTP_GC_QUERY_NULL:
- case HTTP_GC_QUERY_SIZE:
- httpCode = 400;
- httpCodeStr = "Bad Request";
- break;
- default:
- httpError("context:%p, fd:%d, ip:%s, error:%d not recognized", pContext, pContext->fd, pContext->ipstr, errNo);
- break;
+void httpSendErrorResp(HttpContext *pContext, int32_t errNo) {
+ int32_t httpCode = 500;
+ if (errNo == TSDB_CODE_SUCCESS)
+ httpCode = 200;
+ else if (errNo == TSDB_CODE_HTTP_SERVER_OFFLINE)
+ httpCode = 404;
+ else if (errNo == TSDB_CODE_HTTP_UNSUPPORT_URL)
+ httpCode = 404;
+ else if (errNo == TSDB_CODE_HTTP_INVLALID_URL)
+ httpCode = 404;
+ else if (errNo == TSDB_CODE_HTTP_NO_ENOUGH_MEMORY)
+ httpCode = 507;
+ else if (errNo == TSDB_CODE_HTTP_REQUSET_TOO_BIG)
+ httpCode = 413;
+ else if (errNo == TSDB_CODE_HTTP_NO_AUTH_INFO)
+ httpCode = 401;
+ else if (errNo == TSDB_CODE_HTTP_NO_MSG_INPUT)
+ httpCode = 400;
+ else if (errNo == TSDB_CODE_HTTP_NO_SQL_INPUT)
+ httpCode = 400;
+ else if (errNo == TSDB_CODE_HTTP_NO_EXEC_USEDB)
+ httpCode = 400;
+ else if (errNo == TSDB_CODE_HTTP_SESSION_FULL)
+ httpCode = 421;
+ else if (errNo == TSDB_CODE_HTTP_GEN_TAOSD_TOKEN_ERR)
+ httpCode = 507;
+ else if (errNo == TSDB_CODE_HTTP_INVALID_MULTI_REQUEST)
+ httpCode = 400;
+ else if (errNo == TSDB_CODE_HTTP_CREATE_GZIP_FAILED)
+ httpCode = 507;
+ else if (errNo == TSDB_CODE_HTTP_FINISH_GZIP_FAILED)
+ httpCode = 507;
+ else if (errNo == TSDB_CODE_HTTP_INVALID_VERSION)
+ httpCode = 406;
+ else if (errNo == TSDB_CODE_HTTP_INVALID_CONTENT_LENGTH)
+ httpCode = 406;
+ else if (errNo == TSDB_CODE_HTTP_INVALID_AUTH_TYPE)
+ httpCode = 406;
+ else if (errNo == TSDB_CODE_HTTP_INVALID_AUTH_FORMAT)
+ httpCode = 406;
+ else if (errNo == TSDB_CODE_HTTP_INVALID_BASIC_AUTH)
+ httpCode = 406;
+ else if (errNo == TSDB_CODE_HTTP_INVALID_TAOSD_AUTH)
+ httpCode = 406;
+ else if (errNo == TSDB_CODE_HTTP_PARSE_METHOD_FAILED)
+ httpCode = 406;
+ else if (errNo == TSDB_CODE_HTTP_PARSE_TARGET_FAILED)
+ httpCode = 406;
+ else if (errNo == TSDB_CODE_HTTP_PARSE_VERSION_FAILED)
+ httpCode = 406;
+ else if (errNo == TSDB_CODE_HTTP_PARSE_SP_FAILED)
+ httpCode = 406;
+ else if (errNo == TSDB_CODE_HTTP_PARSE_STATUS_FAILED)
+ httpCode = 406;
+ else if (errNo == TSDB_CODE_HTTP_PARSE_PHRASE_FAILED)
+ httpCode = 406;
+ else if (errNo == TSDB_CODE_HTTP_PARSE_CRLF_FAILED)
+ httpCode = 406;
+ else if (errNo == TSDB_CODE_HTTP_PARSE_HEADER_FAILED)
+ httpCode = 406;
+ else if (errNo == TSDB_CODE_HTTP_PARSE_HEADER_KEY_FAILED)
+ httpCode = 406;
+ else if (errNo == TSDB_CODE_HTTP_PARSE_HEADER_VAL_FAILED)
+ httpCode = 406;
+ else if (errNo == TSDB_CODE_HTTP_PARSE_CHUNK_SIZE_FAILED)
+ httpCode = 406;
+ else if (errNo == TSDB_CODE_HTTP_PARSE_CHUNK_FAILED)
+ httpCode = 406;
+ else if (errNo == TSDB_CODE_HTTP_PARSE_END_FAILED)
+ httpCode = 406;
+ else if (errNo == TSDB_CODE_HTTP_PARSE_INVALID_STATE)
+ httpCode = 406;
+ else if (errNo == TSDB_CODE_HTTP_PARSE_ERROR_STATE)
+ httpCode = 406;
+ else
+ httpCode = 400;
+
+ if (pContext->parser->httpCode != 0) {
+ httpCode = pContext->parser->httpCode;
}
- if (desc == NULL) {
- httpSendErrorRespImp(pContext, httpCode, httpCodeStr, errNo + 5000, httpMsg[errNo]);
- } else {
- httpSendErrorRespImp(pContext, httpCode, httpCodeStr, errNo + 5000, desc);
- }
+ char *httpCodeStr = httpGetStatusDesc(httpCode);
+ httpSendErrorRespImp(pContext, httpCode, httpCodeStr, errNo & 0XFFFF, tstrerror(errNo));
}
-void httpSendErrorResp(HttpContext *pContext, int errNo) { httpSendErrorRespWithDesc(pContext, errNo, NULL); }
+void httpSendTaosdInvalidSqlErrorResp(HttpContext *pContext, char *errMsg) {
+ int32_t httpCode = 400;
+ char temp[512] = {0};
+ int32_t len = sprintf(temp, "invalid SQL: %s", errMsg);
-void httpSendTaosdErrorResp(HttpContext *pContext, int errCode) {
- int httpCode = 400;
-
- httpSendErrorRespImp(pContext, httpCode, "Bad Request", errCode & 0XFFFF, (char*)tstrerror(errCode));
-}
-
-void httpSendTaosdInvalidSqlErrorResp(HttpContext *pContext, char* errMsg) {
- int httpCode = 400;
- char temp[512] = {0};
- int len = sprintf(temp, "invalid SQL: %s", errMsg);
-
- for (int i = 0; i < len; ++i) {
+ for (int32_t i = 0; i < len; ++i) {
if (temp[i] == '\"') {
temp[i] = '\'';
} else if (temp[i] == '\n') {
@@ -208,9 +164,9 @@ void httpSendSuccResp(HttpContext *pContext, char *desc) {
char head[1024] = {0};
char body[1024] = {0};
- int bodyLen = sprintf(body, httpRespTemplate[HTTP_RESPONSE_JSON_OK], HTTP_SUCCESS, desc);
- int headLen = sprintf(head, httpRespTemplate[HTTP_RESPONSE_OK], httpVersionStr[pContext->httpVersion],
- httpKeepAliveStr[pContext->httpKeepAlive], bodyLen);
+ int32_t bodyLen = sprintf(body, httpRespTemplate[HTTP_RESPONSE_JSON_OK], TSDB_CODE_SUCCESS, desc);
+ int32_t headLen = sprintf(head, httpRespTemplate[HTTP_RESPONSE_OK], httpVersionStr[pContext->parser->httpVersion],
+ httpKeepAliveStr[pContext->parser->keepAlive], bodyLen);
httpWriteBuf(pContext, head, headLen);
httpWriteBuf(pContext, body, bodyLen);
@@ -221,9 +177,9 @@ void httpSendOptionResp(HttpContext *pContext, char *desc) {
char head[1024] = {0};
char body[1024] = {0};
- int bodyLen = sprintf(body, httpRespTemplate[HTTP_RESPONSE_JSON_OK], HTTP_SUCCESS, desc);
- int headLen = sprintf(head, httpRespTemplate[HTTP_RESPONSE_OPTIONS], httpVersionStr[pContext->httpVersion],
- httpKeepAliveStr[pContext->httpKeepAlive], bodyLen);
+ int32_t bodyLen = sprintf(body, httpRespTemplate[HTTP_RESPONSE_JSON_OK], TSDB_CODE_SUCCESS, desc);
+ int32_t headLen = sprintf(head, httpRespTemplate[HTTP_RESPONSE_OPTIONS], httpVersionStr[pContext->parser->httpVersion],
+ httpKeepAliveStr[pContext->parser->keepAlive], bodyLen);
httpWriteBuf(pContext, head, headLen);
httpWriteBuf(pContext, body, bodyLen);
diff --git a/src/plugins/http/src/restHandle.c b/src/plugins/http/src/httpRestHandle.c
similarity index 78%
rename from src/plugins/http/src/restHandle.c
rename to src/plugins/http/src/httpRestHandle.c
index f0841e2f99..0a28c431ef 100644
--- a/src/plugins/http/src/restHandle.c
+++ b/src/plugins/http/src/httpRestHandle.c
@@ -15,9 +15,10 @@
#define _DEFAULT_SOURCE
#include "os.h"
+#include "taoserror.h"
#include "httpLog.h"
-#include "restHandle.h"
-#include "restJson.h"
+#include "httpRestHandle.h"
+#include "httpRestJson.h"
static HttpDecodeMethod restDecodeMethod = {"rest", restProcessRequest};
static HttpDecodeMethod restDecodeMethod2 = {"restful", restProcessRequest};
@@ -60,39 +61,37 @@ void restInitHandle(HttpServer* pServer) {
}
bool restGetUserFromUrl(HttpContext* pContext) {
- HttpParser* pParser = &pContext->parser;
- if (pParser->path[REST_USER_URL_POS].len >= TSDB_USER_LEN || pParser->path[REST_USER_URL_POS].len <= 0) {
+ HttpParser* pParser = pContext->parser;
+ if (pParser->path[REST_USER_URL_POS].pos >= TSDB_USER_LEN || pParser->path[REST_USER_URL_POS].pos <= 0) {
return false;
}
- tstrncpy(pContext->user, pParser->path[REST_USER_URL_POS].pos, TSDB_USER_LEN);
+ tstrncpy(pContext->user, pParser->path[REST_USER_URL_POS].str, TSDB_USER_LEN);
return true;
}
bool restGetPassFromUrl(HttpContext* pContext) {
- HttpParser* pParser = &pContext->parser;
- if (pParser->path[REST_PASS_URL_POS].len >= TSDB_PASSWORD_LEN || pParser->path[REST_PASS_URL_POS].len <= 0) {
+ HttpParser* pParser = pContext->parser;
+ if (pParser->path[REST_PASS_URL_POS].pos >= TSDB_PASSWORD_LEN || pParser->path[REST_PASS_URL_POS].pos <= 0) {
return false;
}
- tstrncpy(pContext->pass, pParser->path[REST_PASS_URL_POS].pos, TSDB_PASSWORD_LEN);
+ tstrncpy(pContext->pass, pParser->path[REST_PASS_URL_POS].str, TSDB_PASSWORD_LEN);
return true;
}
bool restProcessLoginRequest(HttpContext* pContext) {
- httpDebug("context:%p, fd:%d, ip:%s, user:%s, process restful login msg", pContext, pContext->fd, pContext->ipstr,
- pContext->user);
+ httpDebug("context:%p, fd:%d, user:%s, process restful login msg", pContext, pContext->fd, pContext->user);
pContext->reqType = HTTP_REQTYPE_LOGIN;
return true;
}
-bool restProcessSqlRequest(HttpContext* pContext, int timestampFmt) {
- httpDebug("context:%p, fd:%d, ip:%s, user:%s, process restful sql msg", pContext, pContext->fd, pContext->ipstr,
- pContext->user);
+bool restProcessSqlRequest(HttpContext* pContext, int32_t timestampFmt) {
+ httpDebug("context:%p, fd:%d, user:%s, process restful sql msg", pContext, pContext->fd, pContext->user);
- char* sql = pContext->parser.data.pos;
+ char* sql = pContext->parser->body.str;
if (sql == NULL) {
- httpSendErrorResp(pContext, HTTP_NO_SQL_INPUT);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_NO_SQL_INPUT);
return false;
}
@@ -101,7 +100,7 @@ bool restProcessSqlRequest(HttpContext* pContext, int timestampFmt) {
* for async test
*
if (httpCheckUsedbSql(sql)) {
- httpSendErrorResp(pContext, HTTP_NO_EXEC_USEDB);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_NO_EXEC_USEDB);
return false;
}
*/
@@ -128,7 +127,7 @@ bool restProcessRequest(struct HttpContext* pContext) {
}
if (strlen(pContext->user) == 0 || strlen(pContext->pass) == 0) {
- httpSendErrorResp(pContext, HTTP_PARSE_USR_ERROR);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_NO_AUTH_INFO);
return false;
}
@@ -143,6 +142,6 @@ bool restProcessRequest(struct HttpContext* pContext) {
} else {
}
- httpSendErrorResp(pContext, HTTP_PARSE_URL_ERROR);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_INVLALID_URL);
return false;
}
diff --git a/src/plugins/http/src/restJson.c b/src/plugins/http/src/httpRestJson.c
similarity index 78%
rename from src/plugins/http/src/restJson.c
rename to src/plugins/http/src/httpRestJson.c
index 7a73f6559f..954678c24c 100644
--- a/src/plugins/http/src/restJson.c
+++ b/src/plugins/http/src/httpRestJson.c
@@ -18,10 +18,10 @@
#include "tglobal.h"
#include "httpLog.h"
#include "httpJson.h"
-#include "restHandle.h"
-#include "restJson.h"
+#include "httpRestHandle.h"
+#include "httpRestJson.h"
-void restBuildSqlAffectRowsJson(HttpContext *pContext, HttpSqlCmd *cmd, int affect_rows) {
+void restBuildSqlAffectRowsJson(HttpContext *pContext, HttpSqlCmd *cmd, int32_t affect_rows) {
JsonBuf *jsonBuf = httpMallocJsonBuf(pContext);
if (jsonBuf == NULL) return;
@@ -43,7 +43,7 @@ void restStartSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result)
if (jsonBuf == NULL) return;
TAOS_FIELD *fields = taos_fetch_fields(result);
- int num_fields = taos_num_fields(result);
+ int32_t num_fields = taos_num_fields(result);
httpInitJsonBuf(jsonBuf, pContext);
httpWriteJsonBufHead(jsonBuf);
@@ -66,9 +66,9 @@ void restStartSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result)
httpJsonItemToken(jsonBuf);
httpJsonString(jsonBuf, REST_JSON_AFFECT_ROWS, REST_JSON_AFFECT_ROWS_LEN);
} else {
- for (int i = 0; i < num_fields; ++i) {
+ for (int32_t i = 0; i < num_fields; ++i) {
httpJsonItemToken(jsonBuf);
- httpJsonString(jsonBuf, fields[i].name, (int)strlen(fields[i].name));
+ httpJsonString(jsonBuf, fields[i].name, (int32_t)strlen(fields[i].name));
}
}
@@ -83,19 +83,16 @@ void restStartSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result)
httpJsonToken(jsonBuf, JsonArrStt);
}
-bool restBuildSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, int numOfRows, int timestampFormat) {
+bool restBuildSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, int32_t numOfRows, int32_t timestampFormat) {
JsonBuf *jsonBuf = httpMallocJsonBuf(pContext);
if (jsonBuf == NULL) return false;
- cmd->numOfRows += numOfRows;
-
- int num_fields = taos_num_fields(result);
+ int32_t num_fields = taos_num_fields(result);
TAOS_FIELD *fields = taos_fetch_fields(result);
- for (int k = 0; k < numOfRows; ++k) {
+ for (int32_t k = 0; k < numOfRows; ++k) {
TAOS_ROW row = taos_fetch_row(result);
if (row == NULL) {
- cmd->numOfRows--;
continue;
}
int32_t* length = taos_fetch_lengths(result);
@@ -104,7 +101,7 @@ bool restBuildSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result,
httpJsonItemToken(jsonBuf);
httpJsonToken(jsonBuf, JsonArrStt);
- for (int i = 0; i < num_fields; i++) {
+ for (int32_t i = 0; i < num_fields; i++) {
httpJsonItemToken(jsonBuf);
if (row[i] == NULL) {
@@ -127,10 +124,10 @@ bool restBuildSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result,
httpJsonInt64(jsonBuf, *((int64_t *)row[i]));
break;
case TSDB_DATA_TYPE_FLOAT:
- httpJsonFloat(jsonBuf, *((float *)row[i]));
+ httpJsonFloat(jsonBuf, GET_FLOAT_VAL(row[i]));
break;
case TSDB_DATA_TYPE_DOUBLE:
- httpJsonDouble(jsonBuf, *((double *)row[i]));
+ httpJsonDouble(jsonBuf, GET_DOUBLE_VAL(row[i]));
break;
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
@@ -151,37 +148,34 @@ bool restBuildSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result,
}
// data row array end
- httpJsonToken(jsonBuf, JsonArrEnd);
- }
+ httpJsonToken(jsonBuf, JsonArrEnd);
+ cmd->numOfRows ++;
- if (cmd->numOfRows >= tsRestRowLimit) {
- httpDebug("context:%p, fd:%d, ip:%s, user:%s, retrieve rows:%d larger than limit:%d, abort retrieve", pContext,
- pContext->fd, pContext->ipstr, pContext->user, cmd->numOfRows, tsRestRowLimit);
- return false;
- }
- else {
if (pContext->fd <= 0) {
- httpError("context:%p, fd:%d, ip:%s, user:%s, connection is closed, abort retrieve", pContext, pContext->fd,
- pContext->ipstr, pContext->user);
+ httpError("context:%p, fd:%d, user:%s, conn closed, abort retrieve", pContext, pContext->fd, pContext->user);
return false;
}
- else {
- httpDebug("context:%p, fd:%d, ip:%s, user:%s, total rows:%d retrieved", pContext, pContext->fd, pContext->ipstr,
- pContext->user, cmd->numOfRows);
- return true;
+
+ if (cmd->numOfRows >= tsRestRowLimit) {
+ httpDebug("context:%p, fd:%d, user:%s, retrieve rows:%d larger than limit:%d, abort retrieve", pContext,
+ pContext->fd, pContext->user, cmd->numOfRows, tsRestRowLimit);
+ return false;
}
}
+
+ httpDebug("context:%p, fd:%d, user:%s, retrieved row:%d", pContext, pContext->fd, pContext->user, cmd->numOfRows);
+ return true;
}
-bool restBuildSqlTimestampJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, int numOfRows) {
+bool restBuildSqlTimestampJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, int32_t numOfRows) {
return restBuildSqlJson(pContext,cmd, result, numOfRows, REST_TIMESTAMP_FMT_TIMESTAMP);
}
-bool restBuildSqlLocalTimeStringJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, int numOfRows) {
+bool restBuildSqlLocalTimeStringJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, int32_t numOfRows) {
return restBuildSqlJson(pContext,cmd, result, numOfRows, REST_TIMESTAMP_FMT_LOCAL_STRING);
}
-bool restBuildSqlUtcTimeStringJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, int numOfRows) {
+bool restBuildSqlUtcTimeStringJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, int32_t numOfRows) {
return restBuildSqlJson(pContext,cmd, result, numOfRows, REST_TIMESTAMP_FMT_UTC_STRING);
}
diff --git a/src/plugins/http/src/httpServer.c b/src/plugins/http/src/httpServer.c
index f4aca91cba..f0a7249b51 100644
--- a/src/plugins/http/src/httpServer.c
+++ b/src/plugins/http/src/httpServer.c
@@ -29,6 +29,8 @@
#define EPOLLWAKEUP (1u << 29)
#endif
+static bool httpReadData(HttpContext *pContext);
+
static void httpStopThread(HttpThread* pThread) {
pThread->stop = true;
@@ -59,7 +61,7 @@ void httpCleanUpConnect() {
if (pServer->pThreads == NULL) return;
pthread_join(pServer->thread, NULL);
- for (int i = 0; i < pServer->numOfThreads; ++i) {
+ for (int32_t i = 0; i < pServer->numOfThreads; ++i) {
HttpThread* pThread = pServer->pThreads + i;
if (pThread != NULL) {
httpStopThread(pThread);
@@ -69,119 +71,11 @@ void httpCleanUpConnect() {
httpDebug("http server:%s is cleaned up", pServer->label);
}
-int httpReadDataImp(HttpContext *pContext) {
- HttpParser *pParser = &pContext->parser;
-
- while (pParser->bufsize <= (HTTP_BUFFER_SIZE - HTTP_STEP_SIZE)) {
- int nread = (int)taosReadSocket(pContext->fd, pParser->buffer + pParser->bufsize, HTTP_STEP_SIZE);
- if (nread >= 0 && nread < HTTP_STEP_SIZE) {
- pParser->bufsize += nread;
- break;
- } else if (nread < 0) {
- if (errno == EINTR || errno == EAGAIN || errno == EWOULDBLOCK) {
- httpDebug("context:%p, fd:%d, ip:%s, read from socket error:%d, wait another event",
- pContext, pContext->fd, pContext->ipstr, errno);
- break;
- } else {
- httpError("context:%p, fd:%d, ip:%s, read from socket error:%d, close connect",
- pContext, pContext->fd, pContext->ipstr, errno);
- return HTTP_READ_DATA_FAILED;
- }
- } else {
- pParser->bufsize += nread;
- }
-
- if (pParser->bufsize >= (HTTP_BUFFER_SIZE - HTTP_STEP_SIZE)) {
- httpError("context:%p, fd:%d, ip:%s, thread:%s, request big than:%d",
- pContext, pContext->fd, pContext->ipstr, pContext->pThread->label, HTTP_BUFFER_SIZE);
- return HTTP_REQUSET_TOO_BIG;
- }
- }
-
- pParser->buffer[pParser->bufsize] = 0;
-
- return HTTP_READ_DATA_SUCCESS;
-}
-
-static bool httpDecompressData(HttpContext *pContext) {
- if (pContext->contentEncoding != HTTP_COMPRESS_GZIP) {
- httpTraceL("context:%p, fd:%d, ip:%s, content:%s", pContext, pContext->fd, pContext->ipstr, pContext->parser.data.pos);
- return true;
- }
-
- char *decompressBuf = calloc(HTTP_DECOMPRESS_BUF_SIZE, 1);
- int32_t decompressBufLen = HTTP_DECOMPRESS_BUF_SIZE;
- size_t bufsize = sizeof(pContext->parser.buffer) - (pContext->parser.data.pos - pContext->parser.buffer) - 1;
- if (decompressBufLen > (int)bufsize) {
- decompressBufLen = (int)bufsize;
- }
-
- int ret = httpGzipDeCompress(pContext->parser.data.pos, pContext->parser.data.len, decompressBuf, &decompressBufLen);
-
- if (ret == 0) {
- memcpy(pContext->parser.data.pos, decompressBuf, decompressBufLen);
- pContext->parser.data.pos[decompressBufLen] = 0;
- httpTraceL("context:%p, fd:%d, ip:%s, rawSize:%d, decompressSize:%d, content:%s", pContext, pContext->fd,
- pContext->ipstr, pContext->parser.data.len, decompressBufLen, decompressBuf);
- pContext->parser.data.len = decompressBufLen;
- } else {
- httpError("context:%p, fd:%d, ip:%s, failed to decompress data, rawSize:%d, error:%d",
- pContext, pContext->fd, pContext->ipstr, pContext->parser.data.len, ret);
- }
-
- free(decompressBuf);
- return ret == 0;
-}
-
-static bool httpReadData(HttpContext *pContext) {
- if (!pContext->parsed) {
- httpInitContext(pContext);
- }
-
- int32_t code = httpReadDataImp(pContext);
- if (code != HTTP_READ_DATA_SUCCESS) {
- if (code == HTTP_READ_DATA_FAILED) {
- httpReleaseContext(pContext);
- } else {
- httpSendErrorResp(pContext, code);
- httpNotifyContextClose(pContext);
- }
- return false;
- }
-
- if (!httpParseRequest(pContext)) {
- httpNotifyContextClose(pContext);
- return false;
- }
-
- int ret = httpCheckReadCompleted(pContext);
- if (ret == HTTP_CHECK_BODY_CONTINUE) {
- //httpDebug("context:%p, fd:%d, ip:%s, not finished yet, wait another event", pContext, pContext->fd, pContext->ipstr);
- httpReleaseContext(pContext);
- return false;
- } else if (ret == HTTP_CHECK_BODY_SUCCESS){
- httpDebug("context:%p, fd:%d, ip:%s, thread:%s, read size:%d, dataLen:%d",
- pContext, pContext->fd, pContext->ipstr, pContext->pThread->label, pContext->parser.bufsize, pContext->parser.data.len);
- if (httpDecompressData(pContext)) {
- return true;
- } else {
- httpNotifyContextClose(pContext);
- httpReleaseContext(pContext);
- return false;
- }
- } else {
- httpError("context:%p, fd:%d, ip:%s, failed to read http body, close connect", pContext, pContext->fd, pContext->ipstr);
- httpNotifyContextClose(pContext);
- httpReleaseContext(pContext);
- return false;
- }
-}
-
static void httpProcessHttpData(void *param) {
HttpServer *pServer = &tsHttpServer;
HttpThread *pThread = (HttpThread *)param;
HttpContext *pContext;
- int fdNum;
+ int32_t fdNum;
sigset_t set;
sigemptyset(&set);
@@ -198,7 +92,7 @@ static void httpProcessHttpData(void *param) {
}
if (fdNum <= 0) continue;
- for (int i = 0; i < fdNum; ++i) {
+ for (int32_t i = 0; i < fdNum; ++i) {
pContext = httpGetContext(events[i].data.ptr);
if (pContext == NULL) {
httpError("context:%p, is already released, close connect", events[i].data.ptr);
@@ -208,49 +102,51 @@ static void httpProcessHttpData(void *param) {
}
if (events[i].events & EPOLLPRI) {
- httpDebug("context:%p, fd:%d, ip:%s, state:%s, EPOLLPRI events occured, accessed:%d, close connect",
- pContext, pContext->fd, pContext->ipstr, httpContextStateStr(pContext->state), pContext->accessTimes);
+ httpDebug("context:%p, fd:%d, state:%s, EPOLLPRI events occured, accessed:%d, close connect", pContext,
+ pContext->fd, httpContextStateStr(pContext->state), pContext->accessTimes);
httpCloseContextByServer(pContext);
continue;
}
if (events[i].events & EPOLLRDHUP) {
- httpDebug("context:%p, fd:%d, ip:%s, state:%s, EPOLLRDHUP events occured, accessed:%d, close connect",
- pContext, pContext->fd, pContext->ipstr, httpContextStateStr(pContext->state), pContext->accessTimes);
+ httpDebug("context:%p, fd:%d, state:%s, EPOLLRDHUP events occured, accessed:%d, close connect", pContext,
+ pContext->fd, httpContextStateStr(pContext->state), pContext->accessTimes);
httpCloseContextByServer(pContext);
continue;
}
if (events[i].events & EPOLLERR) {
- httpDebug("context:%p, fd:%d, ip:%s, state:%s, EPOLLERR events occured, accessed:%d, close connect",
- pContext, pContext->fd, pContext->ipstr, httpContextStateStr(pContext->state), pContext->accessTimes);
+ httpDebug("context:%p, fd:%d, state:%s, EPOLLERR events occured, accessed:%d, close connect", pContext,
+ pContext->fd, httpContextStateStr(pContext->state), pContext->accessTimes);
httpCloseContextByServer(pContext);
continue;
}
if (events[i].events & EPOLLHUP) {
- httpDebug("context:%p, fd:%d, ip:%s, state:%s, EPOLLHUP events occured, accessed:%d, close connect",
- pContext, pContext->fd, pContext->ipstr, httpContextStateStr(pContext->state), pContext->accessTimes);
+ httpDebug("context:%p, fd:%d, state:%s, EPOLLHUP events occured, accessed:%d, close connect", pContext,
+ pContext->fd, httpContextStateStr(pContext->state), pContext->accessTimes);
httpCloseContextByServer(pContext);
continue;
}
if (!httpAlterContextState(pContext, HTTP_CONTEXT_STATE_READY, HTTP_CONTEXT_STATE_READY)) {
- httpDebug("context:%p, fd:%d, ip:%s, state:%s, not in ready state, ignore read events",
- pContext, pContext->fd, pContext->ipstr, httpContextStateStr(pContext->state));
- httpReleaseContext(pContext);
+ httpDebug("context:%p, fd:%d, state:%s, not in ready state, ignore read events", pContext, pContext->fd,
+ httpContextStateStr(pContext->state));
+ httpReleaseContext(pContext, true);
continue;
}
if (pServer->status != HTTP_SERVER_RUNNING) {
- httpDebug("context:%p, fd:%d, ip:%s, state:%s, server is not running, accessed:%d, close connect", pContext,
- pContext->fd, pContext->ipstr, httpContextStateStr(pContext->state), pContext->accessTimes);
- httpSendErrorResp(pContext, HTTP_SERVER_OFFLINE);
+ httpDebug("context:%p, fd:%d, state:%s, server is not running, accessed:%d, close connect", pContext,
+ pContext->fd, httpContextStateStr(pContext->state), pContext->accessTimes);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_SERVER_OFFLINE);
httpNotifyContextClose(pContext);
} else {
if (httpReadData(pContext)) {
(*(pThread->processData))(pContext);
atomic_fetch_add_32(&pServer->requestNum, 1);
+ } else {
+ httpReleaseContext(pContext, false);
}
}
}
@@ -258,13 +154,13 @@ static void httpProcessHttpData(void *param) {
}
static void *httpAcceptHttpConnection(void *arg) {
- int connFd = -1;
+ int32_t connFd = -1;
struct sockaddr_in clientAddr;
- int threadId = 0;
+ int32_t threadId = 0;
HttpServer * pServer = &tsHttpServer;
HttpThread * pThread = NULL;
HttpContext * pContext = NULL;
- int totalFds = 0;
+ int32_t totalFds = 0;
sigset_t set;
sigemptyset(&set);
@@ -284,7 +180,7 @@ static void *httpAcceptHttpConnection(void *arg) {
while (1) {
socklen_t addrlen = sizeof(clientAddr);
- connFd = (int)accept(pServer->fd, (struct sockaddr *)&clientAddr, &addrlen);
+ connFd = (int32_t)accept(pServer->fd, (struct sockaddr *)&clientAddr, &addrlen);
if (connFd == -1) {
if (errno == EINVAL) {
httpDebug("http server:%s socket was shutdown, exiting...", pServer->label);
@@ -295,7 +191,7 @@ static void *httpAcceptHttpConnection(void *arg) {
}
totalFds = 1;
- for (int i = 0; i < pServer->numOfThreads; ++i) {
+ for (int32_t i = 0; i < pServer->numOfThreads; ++i) {
totalFds += pServer->pThreads[i].numOfContexts;
}
@@ -332,7 +228,7 @@ static void *httpAcceptHttpConnection(void *arg) {
httpError("context:%p, fd:%d, ip:%s, thread:%s, failed to add http fd for epoll, error:%s", pContext, connFd,
pContext->ipstr, pThread->label, strerror(errno));
taosClose(pContext->fd);
- httpReleaseContext(pContext);
+ httpReleaseContext(pContext, true);
continue;
}
@@ -359,7 +255,7 @@ bool httpInitConnect() {
}
HttpThread *pThread = pServer->pThreads;
- for (int i = 0; i < pServer->numOfThreads; ++i) {
+ for (int32_t i = 0; i < pServer->numOfThreads; ++i) {
sprintf(pThread->label, "%s%d", pServer->label, i);
pThread->processData = pServer->processData;
pThread->threadId = i;
@@ -405,3 +301,62 @@ bool httpInitConnect() {
pServer->serverPort, pServer->numOfThreads);
return true;
}
+
+static bool httpReadData(HttpContext *pContext) {
+ HttpParser *pParser = pContext->parser;
+ if (!pParser->inited) {
+ httpInitParser(pParser);
+ }
+
+ if (pParser->parsed) {
+ httpDebug("context:%p, fd:%d, not in ready state, parsed:%d", pContext, pContext->fd, pParser->parsed);
+ return false;
+ }
+
+ pContext->accessTimes++;
+ pContext->lastAccessTime = taosGetTimestampSec();
+ char buf[HTTP_STEP_SIZE + 1] = {0};
+
+ while (1) {
+ int32_t nread = (int32_t)taosReadSocket(pContext->fd, buf, HTTP_STEP_SIZE);
+ if (nread > 0) {
+ buf[nread] = '\0';
+ httpTraceL("context:%p, fd:%d, nread:%d content:%s", pContext, pContext->fd, nread, buf);
+ int32_t ok = httpParseBuf(pParser, buf, nread);
+
+ if (ok) {
+ httpError("context:%p, fd:%d, parse failed, ret:%d code:%d close connect", pContext, pContext->fd, ok,
+ pParser->parseCode);
+ httpSendErrorResp(pContext, pParser->parseCode);
+ httpNotifyContextClose(pContext);
+ return false;
+ }
+
+ if (pParser->parseCode) {
+ httpError("context:%p, fd:%d, parse failed, code:%d close connect", pContext, pContext->fd, pParser->parseCode);
+ httpSendErrorResp(pContext, pParser->parseCode);
+ httpNotifyContextClose(pContext);
+ return false;
+ }
+
+ if (!pParser->parsed) {
+ httpTrace("context:%p, fd:%d, read not finished", pContext, pContext->fd);
+ continue;
+ } else {
+ httpDebug("context:%p, fd:%d, bodyLen:%d", pContext, pContext->fd, pParser->body.pos);
+ return true;
+ }
+ } else if (nread < 0) {
+ if (errno == EINTR || errno == EAGAIN || errno == EWOULDBLOCK) {
+ httpDebug("context:%p, fd:%d, read from socket error:%d, wait another event", pContext, pContext->fd, errno);
+ return false; // later again
+ } else {
+ httpError("context:%p, fd:%d, read from socket error:%d, close connect", pContext, pContext->fd, errno);
+ return false;
+ }
+ } else {
+ httpError("context:%p, fd:%d, nread:%d, wait another event", pContext, pContext->fd, nread);
+ return false;
+ }
+ }
+}
diff --git a/src/plugins/http/src/httpSession.c b/src/plugins/http/src/httpSession.c
index f19679e072..a96e4433b0 100644
--- a/src/plugins/http/src/httpSession.c
+++ b/src/plugins/http/src/httpSession.c
@@ -16,6 +16,7 @@
#define _DEFAULT_SOURCE
#include "os.h"
#include "taos.h"
+#include "taoserror.h"
#include "tglobal.h"
#include "tcache.h"
#include "httpInt.h"
@@ -39,15 +40,15 @@ void httpCreateSession(HttpContext *pContext, void *taos) {
// taosCacheRelease(server->sessionCache, (void **)&temp, false);
if (pContext->session == NULL) {
- httpError("context:%p, fd:%d, ip:%s, user:%s, error:%s", pContext, pContext->fd, pContext->ipstr, pContext->user,
- httpMsg[HTTP_SESSION_FULL]);
+ httpError("context:%p, fd:%d, user:%s, error:%s", pContext, pContext->fd, pContext->user,
+ tstrerror(TSDB_CODE_HTTP_SESSION_FULL));
taos_close(taos);
pthread_mutex_unlock(&server->serverMutex);
return;
}
- httpDebug("context:%p, fd:%d, ip:%s, user:%s, create a new session:%p:%p sessionRef:%d", pContext, pContext->fd,
- pContext->ipstr, pContext->user, pContext->session, pContext->session->taos, pContext->session->refCount);
+ httpDebug("context:%p, fd:%d, user:%s, create a new session:%p:%p sessionRef:%d", pContext, pContext->fd,
+ pContext->user, pContext->session, pContext->session->taos, pContext->session->refCount);
pthread_mutex_unlock(&server->serverMutex);
}
@@ -61,11 +62,10 @@ static void httpFetchSessionImp(HttpContext *pContext) {
pContext->session = taosCacheAcquireByKey(server->sessionCache, sessionId, len);
if (pContext->session != NULL) {
atomic_add_fetch_32(&pContext->session->refCount, 1);
- httpDebug("context:%p, fd:%d, ip:%s, user:%s, find an exist session:%p:%p, sessionRef:%d", pContext, pContext->fd,
- pContext->ipstr, pContext->user, pContext->session, pContext->session->taos, pContext->session->refCount);
+ httpDebug("context:%p, fd:%d, user:%s, find an exist session:%p:%p, sessionRef:%d", pContext, pContext->fd,
+ pContext->user, pContext->session, pContext->session->taos, pContext->session->refCount);
} else {
- httpDebug("context:%p, fd:%d, ip:%s, user:%s, session not found", pContext, pContext->fd, pContext->ipstr,
- pContext->user);
+ httpDebug("context:%p, fd:%d, user:%s, session not found", pContext, pContext->fd, pContext->user);
}
pthread_mutex_unlock(&server->serverMutex);
diff --git a/src/plugins/http/src/httpSql.c b/src/plugins/http/src/httpSql.c
index 07cdea1380..883fa574ff 100644
--- a/src/plugins/http/src/httpSql.c
+++ b/src/plugins/http/src/httpSql.c
@@ -17,6 +17,7 @@
#include "os.h"
#include "tnote.h"
#include "taos.h"
+#include "taoserror.h"
#include "tsclient.h"
#include "httpInt.h"
#include "httpContext.h"
@@ -50,24 +51,20 @@ void httpProcessMultiSqlRetrieveCallBackImp(void *param, TAOS_RES *result, int n
}
}
- // if (tscResultsetFetchCompleted(result)) {
- // isContinue = false;
- // }
-
if (isContinue) {
// retrieve next batch of rows
- httpDebug("context:%p, fd:%d, ip:%s, user:%s, process pos:%d, continue retrieve, numOfRows:%d, sql:%s",
- pContext, pContext->fd, pContext->ipstr, pContext->user, multiCmds->pos, numOfRows, sql);
+ httpDebug("context:%p, fd:%d, user:%s, process pos:%d, continue retrieve, numOfRows:%d, sql:%s", pContext,
+ pContext->fd, pContext->user, multiCmds->pos, numOfRows, sql);
taos_fetch_rows_a(result, httpProcessMultiSqlRetrieveCallBack, param);
} else {
- httpDebug("context:%p, fd:%d, ip:%s, user:%s, process pos:%d, stop retrieve, numOfRows:%d, sql:%s",
- pContext, pContext->fd, pContext->ipstr, pContext->user, multiCmds->pos, numOfRows, sql);
+ httpDebug("context:%p, fd:%d, user:%s, process pos:%d, stop retrieve, numOfRows:%d, sql:%s", pContext, pContext->fd,
+ pContext->user, multiCmds->pos, numOfRows, sql);
if (numOfRows < 0) {
- httpError("context:%p, fd:%d, ip:%s, user:%s, process pos:%d, retrieve failed code:%s, sql:%s",
- pContext, pContext->fd, pContext->ipstr, pContext->user, multiCmds->pos, tstrerror(numOfRows), sql);
- }
-
+ httpError("context:%p, fd:%d, user:%s, process pos:%d, retrieve failed code:%s, sql:%s", pContext, pContext->fd,
+ pContext->user, multiCmds->pos, tstrerror(numOfRows), sql);
+ }
+
taos_free_result(result);
if (singleCmd->cmdReturnType == HTTP_CMD_RETURN_TYPE_WITH_RETURN && encode->stopJsonFp) {
@@ -94,20 +91,20 @@ void httpProcessMultiSqlCallBackImp(void *param, TAOS_RES *result, int code) {
char * sql = httpGetCmdsString(pContext, singleCmd->sql);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
- httpWarn("context:%p, fd:%d, ip:%s, user:%s, process pos:%d, code:%s:inprogress, sql:%s",
- pContext, pContext->fd, pContext->ipstr, pContext->user, multiCmds->pos, tstrerror(code), sql);
+ httpWarn("context:%p, fd:%d, user:%s, process pos:%d, code:%s:inprogress, sql:%s", pContext, pContext->fd,
+ pContext->user, multiCmds->pos, tstrerror(code), sql);
return;
}
if (code < 0) {
if (encode->checkFinishedFp != NULL && !encode->checkFinishedFp(pContext, singleCmd, code)) {
singleCmd->code = code;
- httpDebug("context:%p, fd:%d, ip:%s, user:%s, process pos jump to:%d, last code:%s, last sql:%s",
- pContext, pContext->fd, pContext->ipstr, pContext->user, multiCmds->pos + 1, tstrerror(code), sql);
+ httpDebug("context:%p, fd:%d, user:%s, process pos jump to:%d, last code:%s, last sql:%s", pContext, pContext->fd,
+ pContext->user, multiCmds->pos + 1, tstrerror(code), sql);
} else {
singleCmd->code = code;
- httpError("context:%p, fd:%d, ip:%s, user:%s, process pos:%d, error code:%s, sql:%s",
- pContext, pContext->fd, pContext->ipstr, pContext->user, multiCmds->pos, tstrerror(code), sql);
+ httpError("context:%p, fd:%d, user:%s, process pos:%d, error code:%s, sql:%s", pContext, pContext->fd,
+ pContext->user, multiCmds->pos, tstrerror(code), sql);
if (singleCmd->cmdReturnType == HTTP_CMD_RETURN_TYPE_WITH_RETURN) {
if (encode->startJsonFp) (encode->startJsonFp)(pContext, singleCmd, result);
@@ -125,8 +122,8 @@ void httpProcessMultiSqlCallBackImp(void *param, TAOS_RES *result, int code) {
if (isUpdate) {
// not select or show commands
int affectRows = taos_affected_rows(result);
- httpDebug("context:%p, fd:%d, ip:%s, user:%s, process pos:%d, affect rows:%d, sql:%s",
- pContext, pContext->fd, pContext->ipstr, pContext->user, multiCmds->pos, affectRows, sql);
+ httpDebug("context:%p, fd:%d, user:%s, process pos:%d, affect rows:%d, sql:%s", pContext, pContext->fd,
+ pContext->user, multiCmds->pos, affectRows, sql);
singleCmd->code = 0;
@@ -151,8 +148,8 @@ void httpProcessMultiSqlCallBackImp(void *param, TAOS_RES *result, int code) {
taos_free_result(result);
httpProcessMultiSql(pContext);
} else {
- httpDebug("context:%p, fd:%d, ip:%s, user:%s, process pos:%d, start retrieve, sql:%s",
- pContext, pContext->fd, pContext->ipstr, pContext->user, multiCmds->pos, sql);
+ httpDebug("context:%p, fd:%d, user:%s, process pos:%d, start retrieve, sql:%s", pContext, pContext->fd,
+ pContext->user, multiCmds->pos, sql);
if (singleCmd->cmdReturnType == HTTP_CMD_RETURN_TYPE_WITH_RETURN && encode->startJsonFp) {
(encode->startJsonFp)(pContext, singleCmd, result);
@@ -170,8 +167,8 @@ void httpProcessMultiSql(HttpContext *pContext) {
HttpEncodeMethod *encode = pContext->encodeMethod;
if (multiCmds->pos >= multiCmds->size) {
- httpDebug("context:%p, fd:%d, ip:%s, user:%s, process pos:%d, size:%d, stop mulit-querys",
- pContext, pContext->fd, pContext->ipstr, pContext->user, multiCmds->pos, multiCmds->size);
+ httpDebug("context:%p, fd:%d, user:%s, process pos:%d, size:%d, stop mulit-querys", pContext, pContext->fd,
+ pContext->user, multiCmds->pos, multiCmds->size);
if (encode->cleanJsonFp) {
(encode->cleanJsonFp)(pContext);
}
@@ -182,8 +179,8 @@ void httpProcessMultiSql(HttpContext *pContext) {
HttpSqlCmd *cmd = multiCmds->cmds + multiCmds->pos;
char *sql = httpGetCmdsString(pContext, cmd->sql);
- httpTraceL("context:%p, fd:%d, ip:%s, user:%s, process pos:%d, start query, sql:%s", pContext, pContext->fd,
- pContext->ipstr, pContext->user, multiCmds->pos, sql);
+ httpTraceL("context:%p, fd:%d, user:%s, process pos:%d, start query, sql:%s", pContext, pContext->fd, pContext->user,
+ multiCmds->pos, sql);
taosNotePrintHttp(sql);
taos_query_a(pContext->session->taos, sql, httpProcessMultiSqlCallBack, (void *)pContext);
}
@@ -193,12 +190,12 @@ void httpProcessMultiSqlCmd(HttpContext *pContext) {
HttpSqlCmds *multiCmds = pContext->multiCmds;
if (multiCmds == NULL || multiCmds->size <= 0 || multiCmds->pos >= multiCmds->size || multiCmds->pos < 0) {
- httpSendErrorResp(pContext, HTTP_INVALID_MULTI_REQUEST);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_INVALID_MULTI_REQUEST);
return;
}
- httpDebug("context:%p, fd:%d, ip:%s, user:%s, start multi-querys pos:%d, size:%d", pContext, pContext->fd,
- pContext->ipstr, pContext->user, multiCmds->pos, multiCmds->size);
+ httpDebug("context:%p, fd:%d, user:%s, start multi-querys pos:%d, size:%d", pContext, pContext->fd, pContext->user,
+ multiCmds->pos, multiCmds->size);
HttpEncodeMethod *encode = pContext->encodeMethod;
if (encode->initJsonFp) {
(encode->initJsonFp)(pContext);
@@ -223,27 +220,18 @@ void httpProcessSingleSqlRetrieveCallBackImp(void *param, TAOS_RES *result, int
}
}
-#if 0
- // todo refactor
- if (tscResultsetFetchCompleted(result)) {
- httpDebug("context:%p, fd:%d, ip:%s, user:%s, resultset fetch completed", pContext, pContext->fd, pContext->ipstr,
- pContext->user);
- isContinue = false;
- }
-#endif
-
if (isContinue) {
// retrieve next batch of rows
- httpDebug("context:%p, fd:%d, ip:%s, user:%s, continue retrieve, numOfRows:%d", pContext, pContext->fd,
- pContext->ipstr, pContext->user, numOfRows);
+ httpDebug("context:%p, fd:%d, user:%s, continue retrieve, numOfRows:%d", pContext, pContext->fd, pContext->user,
+ numOfRows);
taos_fetch_rows_a(result, httpProcessSingleSqlRetrieveCallBack, param);
} else {
- httpDebug("context:%p, fd:%d, ip:%s, user:%s, stop retrieve, numOfRows:%d", pContext, pContext->fd, pContext->ipstr,
- pContext->user, numOfRows);
+ httpDebug("context:%p, fd:%d, user:%s, stop retrieve, numOfRows:%d", pContext, pContext->fd, pContext->user,
+ numOfRows);
if (numOfRows < 0) {
- httpError("context:%p, fd:%d, ip:%s, user:%s, retrieve failed, code:%s", pContext, pContext->fd, pContext->ipstr,
- pContext->user, tstrerror(numOfRows));
+ httpError("context:%p, fd:%d, user:%s, retrieve failed, code:%s", pContext, pContext->fd, pContext->user,
+ tstrerror(numOfRows));
}
taos_free_result(result);
@@ -269,21 +257,21 @@ void httpProcessSingleSqlCallBackImp(void *param, TAOS_RES *result, int unUsedCo
HttpEncodeMethod *encode = pContext->encodeMethod;
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
- httpError("context:%p, fd:%d, ip:%s, user:%s, query error, taos:%p, code:%s:inprogress, sqlObj:%p",
- pContext, pContext->fd, pContext->ipstr, pContext->user, pContext->session->taos, tstrerror(code), (SSqlObj *)result);
+ httpError("context:%p, fd:%d, user:%s, query error, code:%s:inprogress, sqlObj:%p", pContext, pContext->fd,
+ pContext->user, tstrerror(code), (SSqlObj *)result);
return;
}
if (code < 0) {
SSqlObj *pObj = (SSqlObj *)result;
if (code == TSDB_CODE_TSC_INVALID_SQL) {
- httpError("context:%p, fd:%d, ip:%s, user:%s, query error, taos:%p, code:%s, sqlObj:%p, error:%s",
- pContext, pContext->fd, pContext->ipstr, pContext->user, pContext->session->taos, tstrerror(code), pObj, pObj->cmd.payload);
+ httpError("context:%p, fd:%d, user:%s, query error, code:%s, sqlObj:%p, error:%s", pContext,
+ pContext->fd, pContext->user, tstrerror(code), pObj, pObj->cmd.payload);
httpSendTaosdInvalidSqlErrorResp(pContext, pObj->cmd.payload);
} else {
- httpError("context:%p, fd:%d, ip:%s, user:%s, query error, taos:%p, code:%s, sqlObj:%p",
- pContext, pContext->fd, pContext->ipstr, pContext->user, pContext->session->taos, tstrerror(code), pObj);
- httpSendTaosdErrorResp(pContext, code);
+ httpError("context:%p, fd:%d, user:%s, query error, code:%s, sqlObj:%p", pContext, pContext->fd,
+ pContext->user, tstrerror(code), pObj);
+ httpSendErrorResp(pContext, code);
}
taos_free_result(result);
return;
@@ -294,8 +282,8 @@ void httpProcessSingleSqlCallBackImp(void *param, TAOS_RES *result, int unUsedCo
// not select or show commands
int affectRows = taos_affected_rows(result);
- httpDebug("context:%p, fd:%d, ip:%s, user:%s, affect rows:%d, stop query, sqlObj:%p",
- pContext, pContext->fd, pContext->ipstr, pContext->user, affectRows, result);
+ httpDebug("context:%p, fd:%d, user:%s, affect rows:%d, stop query, sqlObj:%p", pContext, pContext->fd,
+ pContext->user, affectRows, result);
if (encode->startJsonFp) {
(encode->startJsonFp)(pContext, &pContext->singleCmd, result);
@@ -312,8 +300,7 @@ void httpProcessSingleSqlCallBackImp(void *param, TAOS_RES *result, int unUsedCo
taos_free_result(result);
httpCloseContextByApp(pContext);
} else {
- httpDebug("context:%p, fd:%d, ip:%s, user:%s, start retrieve", pContext, pContext->fd, pContext->ipstr,
- pContext->user);
+ httpDebug("context:%p, fd:%d, user:%s, start retrieve", pContext, pContext->fd, pContext->user);
if (encode->startJsonFp) {
(encode->startJsonFp)(pContext, &pContext->singleCmd, result);
@@ -333,25 +320,23 @@ void httpProcessSingleSqlCmd(HttpContext *pContext) {
HttpSession *pSession = pContext->session;
if (sql == NULL || sql[0] == 0) {
- httpError("context:%p, fd:%d, ip:%s, user:%s, error:no sql input", pContext, pContext->fd, pContext->ipstr,
- pContext->user);
- httpSendErrorResp(pContext, HTTP_NO_SQL_INPUT);
+ httpError("context:%p, fd:%d, user:%s, error:no sql input", pContext, pContext->fd, pContext->user);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_NO_SQL_INPUT);
return;
}
- httpTraceL("context:%p, fd:%d, ip:%s, user:%s, start query, sql:%s", pContext, pContext->fd, pContext->ipstr,
- pContext->user, sql);
+ httpTraceL("context:%p, fd:%d, user:%s, start query, sql:%s", pContext, pContext->fd, pContext->user, sql);
taosNotePrintHttp(sql);
taos_query_a(pSession->taos, sql, httpProcessSingleSqlCallBack, (void *)pContext);
}
void httpProcessLoginCmd(HttpContext *pContext) {
char token[128] = {0};
- if (!httpGenTaosdAuthToken(pContext, token, 128)) {
- httpSendErrorResp(pContext, HTTP_GEN_TAOSD_TOKEN_ERR);
+ if (httpGenTaosdAuthToken(pContext, token, 128) != 0) {
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_GEN_TAOSD_TOKEN_ERR);
} else {
- httpDebug("context:%p, fd:%d, ip:%s, user:%s, login via http, return token:%s",
- pContext, pContext->fd, pContext->ipstr, pContext->user, token);
+ httpDebug("context:%p, fd:%d, user:%s, login via http, return token:%s", pContext, pContext->fd, pContext->user,
+ token);
httpSendSuccResp(pContext, token);
}
}
@@ -397,25 +382,24 @@ void httpProcessRequestCb(void *param, TAOS_RES *result, int code) {
if (pContext == NULL) return;
if (code < 0) {
- httpError("context:%p, fd:%d, ip:%s, user:%s, login error, code:%s", pContext, pContext->fd, pContext->ipstr,
- pContext->user, tstrerror(code));
- httpSendTaosdErrorResp(pContext, code);
+ httpError("context:%p, fd:%d, user:%s, login error, code:%s", pContext, pContext->fd, pContext->user,
+ tstrerror(code));
+ httpSendErrorResp(pContext, code);
return;
}
- httpDebug("context:%p, fd:%d, ip:%s, user:%s, connect tdengine success, taos:%p", pContext, pContext->fd,
- pContext->ipstr, pContext->user, pContext->taos);
+ httpDebug("context:%p, fd:%d, user:%s, connect tdengine success, taos:%p", pContext, pContext->fd, pContext->user,
+ pContext->taos);
if (pContext->taos == NULL) {
- httpError("context:%p, fd:%d, ip:%s, user:%s, login error, taos is empty", pContext, pContext->fd, pContext->ipstr,
- pContext->user);
- httpSendErrorResp(pContext, HTTP_NO_ENOUGH_SESSIONS);
+ httpError("context:%p, fd:%d, user:%s, login error, taos is empty", pContext, pContext->fd, pContext->user);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_LOGIN_FAILED);
return;
}
httpCreateSession(pContext, pContext->taos);
if (pContext->session == NULL) {
- httpSendErrorResp(pContext, HTTP_SESSION_FULL);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_SESSION_FULL);
httpCloseContextByApp(pContext);
} else {
httpExecCmd(pContext);
@@ -428,8 +412,8 @@ void httpProcessRequest(HttpContext *pContext) {
if (pContext->session == NULL || pContext->reqType == HTTP_REQTYPE_LOGIN) {
taos_connect_a(NULL, pContext->user, pContext->pass, "", 0, httpProcessRequestCb, (void *)pContext,
&(pContext->taos));
- httpDebug("context:%p, fd:%d, ip:%s, user:%s, try connect tdengine, taos:%p", pContext, pContext->fd,
- pContext->ipstr, pContext->user, pContext->taos);
+ httpDebug("context:%p, fd:%d, user:%s, try connect tdengine, taos:%p", pContext, pContext->fd, pContext->user,
+ pContext->taos);
} else {
httpExecCmd(pContext);
}
diff --git a/src/plugins/http/src/httpSystem.c b/src/plugins/http/src/httpSystem.c
index e51c8dd4f7..8993b233dd 100644
--- a/src/plugins/http/src/httpSystem.c
+++ b/src/plugins/http/src/httpSystem.c
@@ -27,9 +27,9 @@
#include "httpResp.h"
#include "httpHandle.h"
#include "httpQueue.h"
-#include "gcHandle.h"
-#include "restHandle.h"
-#include "tgHandle.h"
+#include "httpGcHandle.h"
+#include "httpRestHandle.h"
+#include "httpTgHandle.h"
#ifndef _ADMIN
void adminInitHandle(HttpServer* pServer) {}
@@ -37,9 +37,9 @@ void opInitHandle(HttpServer* pServer) {}
#endif
HttpServer tsHttpServer;
-void taosInitNote(int numOfNoteLines, int maxNotes, char* lable);
+void taosInitNote(int32_t numOfNoteLines, int32_t maxNotes, char* lable);
-int httpInitSystem() {
+int32_t httpInitSystem() {
strcpy(tsHttpServer.label, "rest");
tsHttpServer.serverIp = 0;
tsHttpServer.serverPort = tsHttpPort;
@@ -60,7 +60,7 @@ int httpInitSystem() {
return 0;
}
-int httpStartSystem() {
+int32_t httpStartSystem() {
httpInfo("start http server ...");
if (tsHttpServer.status != HTTP_SERVER_INIT) {
diff --git a/src/plugins/http/src/tgHandle.c b/src/plugins/http/src/httpTgHandle.c
similarity index 79%
rename from src/plugins/http/src/tgHandle.c
rename to src/plugins/http/src/httpTgHandle.c
index 48c66c4c07..e2b57b87bb 100644
--- a/src/plugins/http/src/tgHandle.c
+++ b/src/plugins/http/src/httpTgHandle.c
@@ -19,8 +19,8 @@
#include "taosdef.h"
#include "taosmsg.h"
#include "httpInt.h"
-#include "tgHandle.h"
-#include "tgJson.h"
+#include "httpTgHandle.h"
+#include "httpTgJson.h"
#include "cJSON.h"
/*
@@ -83,16 +83,16 @@ static const char DEFAULT_TELEGRAF_CFG[] =
"]}";
typedef struct {
- char *name;
- char *tbName;
- char **fields;
- int fieldNum;
+ char * name;
+ char * tbName;
+ char ** fields;
+ int32_t fieldNum;
} STgSchema;
typedef struct {
STgSchema *schemas;
- int size;
- int pos;
+ int32_t size;
+ int32_t pos;
} STgSchemas;
static STgSchemas tgSchemas = {0};
@@ -107,7 +107,7 @@ void tgFreeSchema(STgSchema *schema) {
schema->tbName = NULL;
}
if (schema->fields != NULL) {
- for (int f = 0; f < schema->fieldNum; ++f) {
+ for (int32_t f = 0; f < schema->fieldNum; ++f) {
if (schema->fields[f] != NULL) {
free(schema->fields[f]);
schema->fields[f] = NULL;
@@ -121,7 +121,7 @@ void tgFreeSchema(STgSchema *schema) {
void tgFreeSchemas() {
if (tgSchemas.schemas != NULL) {
- for (int s = 0; s < tgSchemas.size; ++s) {
+ for (int32_t s = 0; s < tgSchemas.size; ++s) {
tgFreeSchema(&tgSchemas.schemas[s]);
}
free(tgSchemas.schemas);
@@ -130,7 +130,7 @@ void tgFreeSchemas() {
}
}
-void tgInitSchemas(int size) {
+void tgInitSchemas(int32_t size) {
tgFreeSchemas();
tgSchemas.schemas = calloc(sizeof(STgSchema), size);
tgSchemas.size = 0;
@@ -154,7 +154,7 @@ void tgParseSchemaMetric(cJSON *metric) {
parsedOk = false;
goto ParseEnd;
}
- int nameLen = (int)strlen(name->valuestring);
+ int32_t nameLen = (int32_t)strlen(name->valuestring);
if (nameLen == 0) {
parsedOk = false;
goto ParseEnd;
@@ -177,7 +177,7 @@ void tgParseSchemaMetric(cJSON *metric) {
parsedOk = false;
goto ParseEnd;
}
- int tbnameLen = (int)strlen(tbname->valuestring);
+ int32_t tbnameLen = (int32_t)strlen(tbname->valuestring);
if (tbnameLen == 0) {
parsedOk = false;
goto ParseEnd;
@@ -191,7 +191,7 @@ void tgParseSchemaMetric(cJSON *metric) {
if (fields == NULL) {
goto ParseEnd;
}
- int fieldSize = cJSON_GetArraySize(fields);
+ int32_t fieldSize = cJSON_GetArraySize(fields);
if (fieldSize <= 0 || fieldSize > TSDB_MAX_COLUMNS) {
goto ParseEnd;
}
@@ -199,7 +199,7 @@ void tgParseSchemaMetric(cJSON *metric) {
if (fieldSize > 0) {
schema.fields = calloc(sizeof(STgSchema), (size_t)fieldSize);
schema.fieldNum = fieldSize;
- for (int i = 0; i < fieldSize; i++) {
+ for (int32_t i = 0; i < fieldSize; i++) {
cJSON *field = cJSON_GetArrayItem(fields, i);
if (field == NULL) {
parsedOk = false;
@@ -209,7 +209,7 @@ void tgParseSchemaMetric(cJSON *metric) {
parsedOk = false;
goto ParseEnd;
}
- int nameLen = (int)strlen(field->valuestring);
+ int32_t nameLen = (int32_t)strlen(field->valuestring);
if (nameLen == 0 || nameLen >= TSDB_TABLE_NAME_LEN) {
parsedOk = false;
goto ParseEnd;
@@ -227,13 +227,13 @@ ParseEnd:
}
}
-int tgParseSchema(const char *content, char*fileName) {
+int32_t tgParseSchema(const char *content, char*fileName) {
cJSON *root = cJSON_Parse(content);
if (root == NULL) {
httpError("failed to parse telegraf schema file:%s, invalid json format, content:%s", fileName, content);
return -1;
}
- int size = 0;
+ int32_t size = 0;
cJSON *metrics = cJSON_GetObjectItem(root, "metrics");
if (metrics != NULL) {
size = cJSON_GetArraySize(metrics);
@@ -244,7 +244,7 @@ int tgParseSchema(const char *content, char*fileName) {
}
tgInitSchemas(size);
- for (int i = 0; i < size; i++) {
+ for (int32_t i = 0; i < size; i++) {
cJSON *metric = cJSON_GetArrayItem(metrics, i);
if (metric != NULL) {
tgParseSchemaMetric(metric);
@@ -260,7 +260,7 @@ int tgParseSchema(const char *content, char*fileName) {
return size;
}
-int tgReadSchema(char *fileName) {
+int32_t tgReadSchema(char *fileName) {
FILE *fp = fopen(fileName, "r");
if (fp == NULL) {
return -1;
@@ -286,7 +286,7 @@ int tgReadSchema(char *fileName) {
}
content[contentSize] = 0;
- int schemaNum = tgParseSchema(content, fileName);
+ int32_t schemaNum = tgParseSchema(content, fileName);
free(content);
fclose(fp);
@@ -313,53 +313,53 @@ void tgCleanupHandle() {
}
bool tgGetUserFromUrl(HttpContext *pContext) {
- HttpParser *pParser = &pContext->parser;
- if (pParser->path[TG_USER_URL_POS].len >= TSDB_USER_LEN || pParser->path[TG_USER_URL_POS].len <= 0) {
+ HttpParser *pParser = pContext->parser;
+ if (pParser->path[TG_USER_URL_POS].pos >= TSDB_USER_LEN || pParser->path[TG_USER_URL_POS].pos <= 0) {
return false;
}
- tstrncpy(pContext->user, pParser->path[TG_USER_URL_POS].pos, sizeof(pContext->user));
+ tstrncpy(pContext->user, pParser->path[TG_USER_URL_POS].str, sizeof(pContext->user));
return true;
}
bool tgGetPassFromUrl(HttpContext *pContext) {
- HttpParser *pParser = &pContext->parser;
- if (pParser->path[TG_PASS_URL_POS].len >= TSDB_PASSWORD_LEN || pParser->path[TG_PASS_URL_POS].len <= 0) {
+ HttpParser *pParser = pContext->parser;
+ if (pParser->path[TG_PASS_URL_POS].pos >= TSDB_PASSWORD_LEN || pParser->path[TG_PASS_URL_POS].pos <= 0) {
return false;
}
- tstrncpy(pContext->pass, pParser->path[TG_PASS_URL_POS].pos, sizeof(pContext->pass));
+ tstrncpy(pContext->pass, pParser->path[TG_PASS_URL_POS].str, sizeof(pContext->pass));
return true;
}
char *tgGetDbFromUrl(HttpContext *pContext) {
- HttpParser *pParser = &pContext->parser;
- if (pParser->path[TG_DB_URL_POS].len <= 0) {
- httpSendErrorResp(pContext, HTTP_TG_DB_NOT_INPUT);
+ HttpParser *pParser = pContext->parser;
+ if (pParser->path[TG_DB_URL_POS].pos <= 0) {
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_TG_DB_NOT_INPUT);
return NULL;
}
- if (pParser->path[TG_DB_URL_POS].len >= TSDB_DB_NAME_LEN) {
- httpSendErrorResp(pContext, HTTP_TG_DB_TOO_LONG);
+ if (pParser->path[TG_DB_URL_POS].pos >= TSDB_DB_NAME_LEN) {
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_TG_DB_TOO_LONG);
return NULL;
}
- return pParser->path[TG_DB_URL_POS].pos;
+ return pParser->path[TG_DB_URL_POS].str;
}
-char *tgGetStableName(char *stname, cJSON *fields, int fieldsSize) {
- for (int s = 0; s < tgSchemas.size; ++s) {
+char *tgGetStableName(char *stname, cJSON *fields, int32_t fieldsSize) {
+ for (int32_t s = 0; s < tgSchemas.size; ++s) {
STgSchema *schema = &tgSchemas.schemas[s];
if (strcasecmp(schema->name, stname) != 0) {
continue;
}
bool schemaMatched = true;
- for (int f = 0; f < schema->fieldNum; ++f) {
+ for (int32_t f = 0; f < schema->fieldNum; ++f) {
char *fieldName = schema->fields[f];
bool fieldMatched = false;
- for (int i = 0; i < fieldsSize; i++) {
+ for (int32_t i = 0; i < fieldsSize; i++) {
cJSON *field = cJSON_GetArrayItem(fields, i);
if (strcasecmp(field->string, fieldName) == 0) {
fieldMatched = true;
@@ -401,70 +401,70 @@ bool tgProcessSingleMetric(HttpContext *pContext, cJSON *metric, char *db) {
// metric name
cJSON *name = cJSON_GetObjectItem(metric, "name");
if (name == NULL) {
- httpSendErrorResp(pContext, HTTP_TG_METRIC_NULL);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_TG_METRIC_NULL);
return false;
}
if (name->type != cJSON_String) {
- httpSendErrorResp(pContext, HTTP_TG_METRIC_TYPE);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_TG_METRIC_TYPE);
return false;
}
if (name->valuestring == NULL) {
- httpSendErrorResp(pContext, HTTP_TG_METRIC_NAME_NULL);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_TG_METRIC_NAME_NULL);
return false;
}
- int nameLen = (int)strlen(name->valuestring);
+ int32_t nameLen = (int32_t)strlen(name->valuestring);
if (nameLen == 0) {
- httpSendErrorResp(pContext, HTTP_TG_METRIC_NAME_NULL);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_TG_METRIC_NAME_NULL);
return false;
}
if (nameLen >= TSDB_TABLE_NAME_LEN - 8) {
- httpSendErrorResp(pContext, HTTP_TG_METRIC_NAME_LONG);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_TG_METRIC_NAME_LONG);
return false;
}
// timestamp
cJSON *timestamp = cJSON_GetObjectItem(metric, "timestamp");
if (timestamp == NULL) {
- httpSendErrorResp(pContext, HTTP_TG_TIMESTAMP_NULL);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_TG_TIMESTAMP_NULL);
return false;
}
if (timestamp->type != cJSON_Number) {
- httpSendErrorResp(pContext, HTTP_TG_TIMESTAMP_TYPE);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_TG_TIMESTAMP_TYPE);
return false;
}
if (timestamp->valueint <= 0) {
- httpSendErrorResp(pContext, HTTP_TG_TIMESTAMP_VAL_NULL);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_TG_TIMESTAMP_VAL_NULL);
return false;
}
// tags
cJSON *tags = cJSON_GetObjectItem(metric, "tags");
if (tags == NULL) {
- httpSendErrorResp(pContext, HTTP_TG_TAGS_NULL);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_TG_TAGS_NULL);
return false;
}
- int tagsSize = cJSON_GetArraySize(tags);
+ int32_t tagsSize = cJSON_GetArraySize(tags);
if (tagsSize <= 0) {
- httpSendErrorResp(pContext, HTTP_TG_TAGS_SIZE_0);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_TG_TAGS_SIZE_0);
return false;
}
if (tagsSize > TG_MAX_SORT_TAG_SIZE) {
- httpSendErrorResp(pContext, HTTP_TG_TAGS_SIZE_LONG);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_TG_TAGS_SIZE_LONG);
return false;
}
cJSON *host = NULL;
- for (int i = 0; i < tagsSize; i++) {
+ for (int32_t i = 0; i < tagsSize; i++) {
cJSON *tag = cJSON_GetArrayItem(tags, i);
if (tag == NULL) {
- httpSendErrorResp(pContext, HTTP_TG_TAG_NULL);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_TG_TAG_NULL);
return false;
}
if (tag->string == NULL || strlen(tag->string) == 0) {
- httpSendErrorResp(pContext, HTTP_TG_TAG_NAME_NULL);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_TG_TAG_NAME_NULL);
return false;
}
@@ -474,19 +474,19 @@ bool tgProcessSingleMetric(HttpContext *pContext, cJSON *metric, char *db) {
*/
if (0) {
if (strlen(tag->string) >= TSDB_COL_NAME_LEN) {
- httpSendErrorResp(pContext, HTTP_TG_TAG_NAME_SIZE);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_TG_TAG_NAME_SIZE);
return false;
}
}
if (tag->type != cJSON_Number && tag->type != cJSON_String) {
- httpSendErrorResp(pContext, HTTP_TG_TAG_VALUE_TYPE);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_TG_TAG_VALUE_TYPE);
return false;
}
if (tag->type == cJSON_String) {
if (tag->valuestring == NULL || strlen(tag->valuestring) == 0) {
- httpSendErrorResp(pContext, HTTP_TG_TAG_VALUE_NULL);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_TG_TAG_VALUE_NULL);
return false;
}
}
@@ -497,46 +497,46 @@ bool tgProcessSingleMetric(HttpContext *pContext, cJSON *metric, char *db) {
}
if (host == NULL) {
- httpSendErrorResp(pContext, HTTP_TG_TABLE_NULL);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_TG_TABLE_NULL);
return false;
}
if (host->type != cJSON_String) {
- httpSendErrorResp(pContext, HTTP_TG_HOST_NOT_STRING);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_TG_HOST_NOT_STRING);
return false;
}
if (strlen(host->valuestring) >= TSDB_TABLE_NAME_LEN - 1) {
- httpSendErrorResp(pContext, HTTP_TG_TABLE_SIZE);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_TG_TABLE_SIZE);
return false;
}
// fields
cJSON *fields = cJSON_GetObjectItem(metric, "fields");
if (fields == NULL) {
- httpSendErrorResp(pContext, HTTP_TG_FIELDS_NULL);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_TG_FIELDS_NULL);
return false;
}
- int fieldsSize = cJSON_GetArraySize(fields);
+ int32_t fieldsSize = cJSON_GetArraySize(fields);
if (fieldsSize <= 0) {
- httpSendErrorResp(pContext, HTTP_TG_FIELDS_SIZE_0);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_TG_FIELDS_SIZE_0);
return false;
}
if (fieldsSize > (TSDB_MAX_COLUMNS - TSDB_MAX_TAGS - 1)) {
- httpSendErrorResp(pContext, HTTP_TG_FIELDS_SIZE_LONG);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_TG_FIELDS_SIZE_LONG);
return false;
}
- for (int i = 0; i < fieldsSize; i++) {
+ for (int32_t i = 0; i < fieldsSize; i++) {
cJSON *field = cJSON_GetArrayItem(fields, i);
if (field == NULL) {
- httpSendErrorResp(pContext, HTTP_TG_FIELD_NULL);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_TG_FIELD_NULL);
return false;
}
if (field->string == NULL || strlen(field->string) == 0) {
- httpSendErrorResp(pContext, HTTP_TG_FIELD_NAME_NULL);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_TG_FIELD_NAME_NULL);
return false;
}
/*
@@ -545,17 +545,17 @@ bool tgProcessSingleMetric(HttpContext *pContext, cJSON *metric, char *db) {
*/
if (0) {
if (strlen(field->string) >= TSDB_COL_NAME_LEN) {
- httpSendErrorResp(pContext, HTTP_TG_FIELD_NAME_SIZE);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_TG_FIELD_NAME_SIZE);
return false;
}
}
if (field->type != cJSON_Number && field->type != cJSON_String) {
- httpSendErrorResp(pContext, HTTP_TG_FIELD_VALUE_TYPE);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_TG_FIELD_VALUE_TYPE);
return false;
}
if (field->type == cJSON_String) {
if (field->valuestring == NULL || strlen(field->valuestring) == 0) {
- httpSendErrorResp(pContext, HTTP_TG_FIELD_VALUE_NULL);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_TG_FIELD_VALUE_NULL);
return false;
}
}
@@ -564,7 +564,7 @@ bool tgProcessSingleMetric(HttpContext *pContext, cJSON *metric, char *db) {
// assembling cmds
HttpSqlCmd *stable_cmd = httpNewSqlCmd(pContext);
if (stable_cmd == NULL) {
- httpSendErrorResp(pContext, HTTP_NO_ENOUGH_MEMORY);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_NO_ENOUGH_MEMORY);
return false;
}
stable_cmd->cmdType = HTTP_CMD_TYPE_CREATE_STBALE;
@@ -572,18 +572,18 @@ bool tgProcessSingleMetric(HttpContext *pContext, cJSON *metric, char *db) {
HttpSqlCmd *table_cmd = httpNewSqlCmd(pContext);
if (table_cmd == NULL) {
- httpSendErrorResp(pContext, HTTP_NO_ENOUGH_MEMORY);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_NO_ENOUGH_MEMORY);
return false;
}
table_cmd->cmdType = HTTP_CMD_TYPE_INSERT;
// order by tag name
cJSON *orderedTags[TG_MAX_SORT_TAG_SIZE] = {0};
- int orderTagsLen = 0;
- for (int i = 0; i < tagsSize; ++i) {
+ int32_t orderTagsLen = 0;
+ for (int32_t i = 0; i < tagsSize; ++i) {
cJSON *tag = cJSON_GetArrayItem(tags, i);
orderedTags[orderTagsLen++] = tag;
- for (int j = orderTagsLen - 1; j >= 1; --j) {
+ for (int32_t j = orderTagsLen - 1; j >= 1; --j) {
cJSON *tag1 = orderedTags[j];
cJSON *tag2 = orderedTags[j - 1];
if (strcasecmp(tag1->string, "host") == 0 || strcmp(tag1->string, tag2->string) < 0) {
@@ -609,7 +609,7 @@ bool tgProcessSingleMetric(HttpContext *pContext, cJSON *metric, char *db) {
httpShrinkTableName(pContext, table_cmd->stable, httpGetCmdsString(pContext, table_cmd->stable));
// stable tag for detail
- for (int i = 0; i < orderTagsLen; ++i) {
+ for (int32_t i = 0; i < orderTagsLen; ++i) {
cJSON *tag = orderedTags[i];
stable_cmd->tagNames[i] = table_cmd->tagNames[i] = httpAddToSqlCmdBuffer(pContext, tag->string);
@@ -631,7 +631,7 @@ bool tgProcessSingleMetric(HttpContext *pContext, cJSON *metric, char *db) {
} else {
table_cmd->table = stable_cmd->table = httpAddToSqlCmdBufferNoTerminal(pContext, "%s_%d_%d_%s", stname, fieldsSize, orderTagsLen, host->valuestring);
}
- for (int i = 0; i < orderTagsLen; ++i) {
+ for (int32_t i = 0; i < orderTagsLen; ++i) {
cJSON *tag = orderedTags[i];
if (tag == host) continue;
if (tag->type == cJSON_String)
@@ -653,7 +653,7 @@ bool tgProcessSingleMetric(HttpContext *pContext, cJSON *metric, char *db) {
// assembling create stable sql
stable_cmd->sql = httpAddToSqlCmdBufferNoTerminal(pContext, "create table if not exists %s.%s(ts timestamp", db,
httpGetCmdsString(pContext, table_cmd->stable));
- for (int i = 0; i < fieldsSize; ++i) {
+ for (int32_t i = 0; i < fieldsSize; ++i) {
cJSON *field = cJSON_GetArrayItem(fields, i);
char * field_type = "double";
if (field->type == cJSON_String)
@@ -668,7 +668,7 @@ bool tgProcessSingleMetric(HttpContext *pContext, cJSON *metric, char *db) {
}
httpAddToSqlCmdBufferNoTerminal(pContext, ") tags(");
- for (int i = 0; i < orderTagsLen; ++i) {
+ for (int32_t i = 0; i < orderTagsLen; ++i) {
cJSON *tag = orderedTags[i];
char * tag_type = "bigint";
if (tag->type == cJSON_String)
@@ -689,7 +689,7 @@ bool tgProcessSingleMetric(HttpContext *pContext, cJSON *metric, char *db) {
table_cmd->sql = httpAddToSqlCmdBufferNoTerminal(pContext, "import into %s.%s using %s.%s tags(", db,
httpGetCmdsString(pContext, table_cmd->table), db,
httpGetCmdsString(pContext, table_cmd->stable));
- for (int i = 0; i < orderTagsLen; ++i) {
+ for (int32_t i = 0; i < orderTagsLen; ++i) {
cJSON *tag = orderedTags[i];
if (i != orderTagsLen - 1) {
if (tag->type == cJSON_Number)
@@ -719,7 +719,7 @@ bool tgProcessSingleMetric(HttpContext *pContext, cJSON *metric, char *db) {
}
httpAddToSqlCmdBufferNoTerminal(pContext, " values(%" PRId64 ",", timestamp->valueint);
- for (int i = 0; i < fieldsSize; ++i) {
+ for (int32_t i = 0; i < fieldsSize; ++i) {
cJSON *field = cJSON_GetArrayItem(fields, i);
if (i != fieldsSize - 1) {
if (field->type == cJSON_Number)
@@ -800,48 +800,46 @@ bool tgProcessSingleMetric(HttpContext *pContext, cJSON *metric, char *db) {
}
*/
bool tgProcessQueryRequest(HttpContext *pContext, char *db) {
- httpDebug("context:%p, fd:%d, ip:%s, process telegraf query msg", pContext, pContext->fd, pContext->ipstr);
+ httpDebug("context:%p, fd:%d, process telegraf query msg", pContext, pContext->fd);
- HttpParser *pParser = &pContext->parser;
- char * filter = pParser->data.pos;
+ char *filter = pContext->parser->body.str;
if (filter == NULL) {
- httpSendErrorResp(pContext, HTTP_NO_MSG_INPUT);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_NO_MSG_INPUT);
return false;
}
cJSON *root = cJSON_Parse(filter);
if (root == NULL) {
- httpSendErrorResp(pContext, HTTP_TG_INVALID_JSON);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_TG_INVALID_JSON);
return false;
}
cJSON *metrics = cJSON_GetObjectItem(root, "metrics");
if (metrics != NULL) {
- int size = cJSON_GetArraySize(metrics);
- httpDebug("context:%p, fd:%d, ip:%s, multiple metrics:%d at one time", pContext, pContext->fd, pContext->ipstr,
- size);
+ int32_t size = cJSON_GetArraySize(metrics);
+ httpDebug("context:%p, fd:%d, multiple metrics:%d at one time", pContext, pContext->fd, size);
if (size <= 0) {
- httpSendErrorResp(pContext, HTTP_TG_METRICS_NULL);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_TG_METRICS_NULL);
cJSON_Delete(root);
return false;
}
- int cmdSize = size * 2 + 1;
+ int32_t cmdSize = size * 2 + 1;
if (cmdSize > HTTP_MAX_CMD_SIZE) {
- httpSendErrorResp(pContext, HTTP_TG_METRICS_SIZE);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_TG_METRICS_SIZE);
cJSON_Delete(root);
return false;
}
if (!httpMallocMultiCmds(pContext, cmdSize, HTTP_BUFFER_SIZE)) {
- httpSendErrorResp(pContext, HTTP_NO_ENOUGH_MEMORY);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_NO_ENOUGH_MEMORY);
cJSON_Delete(root);
return false;
}
HttpSqlCmd *cmd = httpNewSqlCmd(pContext);
if (cmd == NULL) {
- httpSendErrorResp(pContext, HTTP_NO_ENOUGH_MEMORY);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_NO_ENOUGH_MEMORY);
cJSON_Delete(root);
return false;
}
@@ -849,7 +847,7 @@ bool tgProcessQueryRequest(HttpContext *pContext, char *db) {
cmd->cmdReturnType = HTTP_CMD_RETURN_TYPE_NO_RETURN;
cmd->sql = httpAddToSqlCmdBuffer(pContext, "create database if not exists %s", db);
- for (int i = 0; i < size; i++) {
+ for (int32_t i = 0; i < size; i++) {
cJSON *metric = cJSON_GetArrayItem(metrics, i);
if (metric != NULL) {
if (!tgProcessSingleMetric(pContext, metric, db)) {
@@ -859,17 +857,17 @@ bool tgProcessQueryRequest(HttpContext *pContext, char *db) {
}
}
} else {
- httpDebug("context:%p, fd:%d, ip:%s, single metric", pContext, pContext->fd, pContext->ipstr);
+ httpDebug("context:%p, fd:%d, single metric", pContext, pContext->fd);
if (!httpMallocMultiCmds(pContext, 3, HTTP_BUFFER_SIZE)) {
- httpSendErrorResp(pContext, HTTP_NO_ENOUGH_MEMORY);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_NO_ENOUGH_MEMORY);
cJSON_Delete(root);
return false;
}
HttpSqlCmd *cmd = httpNewSqlCmd(pContext);
if (cmd == NULL) {
- httpSendErrorResp(pContext, HTTP_NO_ENOUGH_MEMORY);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_NO_ENOUGH_MEMORY);
cJSON_Delete(root);
return false;
}
@@ -894,7 +892,7 @@ bool tgProcessQueryRequest(HttpContext *pContext, char *db) {
bool tgProcessRquest(struct HttpContext *pContext) {
if (strlen(pContext->user) == 0 || strlen(pContext->pass) == 0) {
- httpSendErrorResp(pContext, HTTP_PARSE_USR_ERROR);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_NO_AUTH_INFO);
return false;
}
diff --git a/src/plugins/http/src/tgJson.c b/src/plugins/http/src/httpTgJson.c
similarity index 73%
rename from src/plugins/http/src/tgJson.c
rename to src/plugins/http/src/httpTgJson.c
index ed4ee0d7de..603092f09d 100644
--- a/src/plugins/http/src/tgJson.c
+++ b/src/plugins/http/src/httpTgJson.c
@@ -19,8 +19,8 @@
#include "httpLog.h"
#include "httpJson.h"
#include "httpResp.h"
-#include "tgHandle.h"
-#include "tgJson.h"
+#include "httpTgHandle.h"
+#include "httpTgJson.h"
void tgInitQueryJson(HttpContext *pContext) {
JsonBuf *jsonBuf = httpMallocJsonBuf(pContext);
@@ -61,19 +61,19 @@ void tgStartQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result)
// data
httpJsonItemToken(jsonBuf);
httpJsonPair(jsonBuf, "metric", 6, httpGetCmdsString(pContext, cmd->stable),
- (int)strlen(httpGetCmdsString(pContext, cmd->metric)));
+ (int32_t)strlen(httpGetCmdsString(pContext, cmd->metric)));
httpJsonItemToken(jsonBuf);
httpJsonPair(jsonBuf, "stable", 6, httpGetCmdsString(pContext, cmd->stable),
- (int)strlen(httpGetCmdsString(pContext, cmd->stable)));
+ (int32_t)strlen(httpGetCmdsString(pContext, cmd->stable)));
httpJsonItemToken(jsonBuf);
httpJsonPair(jsonBuf, "table", 5, httpGetCmdsString(pContext, cmd->table),
- (int)strlen(httpGetCmdsString(pContext, cmd->table)));
+ (int32_t)strlen(httpGetCmdsString(pContext, cmd->table)));
httpJsonItemToken(jsonBuf);
httpJsonPair(jsonBuf, "timestamp", 9, httpGetCmdsString(pContext, cmd->timestamp),
- (int)strlen(httpGetCmdsString(pContext, cmd->timestamp))); // hack way
+ (int32_t)strlen(httpGetCmdsString(pContext, cmd->timestamp))); // hack way
}
void tgStopQueryJson(HttpContext *pContext, HttpSqlCmd *cmd) {
@@ -88,7 +88,7 @@ void tgStopQueryJson(HttpContext *pContext, HttpSqlCmd *cmd) {
httpJsonToken(jsonBuf, JsonObjEnd);
}
-void tgBuildSqlAffectRowsJson(HttpContext *pContext, HttpSqlCmd *cmd, int affect_rows) {
+void tgBuildSqlAffectRowsJson(HttpContext *pContext, HttpSqlCmd *cmd, int32_t affect_rows) {
JsonBuf *jsonBuf = httpMallocJsonBuf(pContext);
if (jsonBuf == NULL) return;
@@ -96,10 +96,10 @@ void tgBuildSqlAffectRowsJson(HttpContext *pContext, HttpSqlCmd *cmd, int affect
httpJsonPairIntVal(jsonBuf, "affected_rows", 13, affect_rows);
}
-bool tgCheckFinished(struct HttpContext *pContext, HttpSqlCmd *cmd, int code) {
+bool tgCheckFinished(struct HttpContext *pContext, HttpSqlCmd *cmd, int32_t code) {
HttpSqlCmds *multiCmds = pContext->multiCmds;
- httpDebug("context:%p, fd:%d, ip:%s, check telegraf command, code:%s, state:%d, type:%d, rettype:%d, tags:%d",
- pContext, pContext->fd, pContext->ipstr, tstrerror(code), cmd->cmdState, cmd->cmdType, cmd->cmdReturnType, cmd->tagNum);
+ httpDebug("context:%p, fd:%d, check telegraf command, code:%s, state:%d, type:%d, rettype:%d, tags:%d", pContext,
+ pContext->fd, tstrerror(code), cmd->cmdState, cmd->cmdType, cmd->cmdReturnType, cmd->tagNum);
if (cmd->cmdType == HTTP_CMD_TYPE_INSERT) {
if (cmd->cmdState == HTTP_CMD_STATE_NOT_RUN_YET) {
@@ -107,16 +107,14 @@ bool tgCheckFinished(struct HttpContext *pContext, HttpSqlCmd *cmd, int code) {
cmd->cmdState = HTTP_CMD_STATE_RUN_FINISHED;
if (multiCmds->cmds[0].cmdState == HTTP_CMD_STATE_NOT_RUN_YET) {
multiCmds->pos = (int16_t)-1;
- httpDebug("context:%p, fd:%d, ip:%s, import failed, try create database", pContext, pContext->fd,
- pContext->ipstr);
+ httpDebug("context:%p, fd:%d, import failed, try create database", pContext, pContext->fd);
return false;
}
} else if (code == TSDB_CODE_MND_INVALID_TABLE_NAME) {
cmd->cmdState = HTTP_CMD_STATE_RUN_FINISHED;
if (multiCmds->cmds[multiCmds->pos - 1].cmdState == HTTP_CMD_STATE_NOT_RUN_YET) {
multiCmds->pos = (int16_t)(multiCmds->pos - 2);
- httpDebug("context:%p, fd:%d, ip:%s, import failed, try create stable", pContext, pContext->fd,
- pContext->ipstr);
+ httpDebug("context:%p, fd:%d, import failed, try create stable", pContext, pContext->fd);
return false;
}
} else {
@@ -125,22 +123,21 @@ bool tgCheckFinished(struct HttpContext *pContext, HttpSqlCmd *cmd, int code) {
}
} else if (cmd->cmdType == HTTP_CMD_TYPE_CREATE_DB) {
cmd->cmdState = HTTP_CMD_STATE_RUN_FINISHED;
- httpDebug("context:%p, fd:%d, ip:%s, code:%s, create database failed", pContext, pContext->fd, pContext->ipstr,
- tstrerror(code));
+ httpDebug("context:%p, fd:%d, code:%s, create database failed", pContext, pContext->fd, tstrerror(code));
} else if (cmd->cmdType == HTTP_CMD_TYPE_CREATE_STBALE) {
cmd->cmdState = HTTP_CMD_STATE_RUN_FINISHED;
- httpDebug("context:%p, fd:%d, ip:%s, code:%s, create stable failed", pContext, pContext->fd, pContext->ipstr, tstrerror(code));
+ httpDebug("context:%p, fd:%d, code:%s, create stable failed", pContext, pContext->fd, tstrerror(code));
} else {
}
return true;
}
-void tgSetNextCmd(struct HttpContext *pContext, HttpSqlCmd *cmd, int code) {
+void tgSetNextCmd(struct HttpContext *pContext, HttpSqlCmd *cmd, int32_t code) {
HttpSqlCmds *multiCmds = pContext->multiCmds;
- httpDebug("context:%p, fd:%d, ip:%s, get telegraf next command, pos:%d, code:%s, state:%d, type:%d, rettype:%d, tags:%d",
- pContext, pContext->fd, pContext->ipstr, multiCmds->pos, tstrerror(code), cmd->cmdState, cmd->cmdType,
- cmd->cmdReturnType, cmd->tagNum);
+ httpDebug("context:%p, fd:%d, get telegraf next command, pos:%d, code:%s, state:%d, type:%d, rettype:%d, tags:%d",
+ pContext, pContext->fd, multiCmds->pos, tstrerror(code), cmd->cmdState, cmd->cmdType, cmd->cmdReturnType,
+ cmd->tagNum);
if (cmd->cmdType == HTTP_CMD_TYPE_INSERT) {
multiCmds->pos = (int16_t)(multiCmds->pos + 2);
diff --git a/src/plugins/http/src/httpUtil.c b/src/plugins/http/src/httpUtil.c
index d1a0eb90f0..39168ee96d 100644
--- a/src/plugins/http/src/httpUtil.c
+++ b/src/plugins/http/src/httpUtil.c
@@ -29,7 +29,7 @@ bool httpCheckUsedbSql(char *sql) {
return false;
}
-void httpTimeToString(time_t t, char *buf, int buflen) {
+void httpTimeToString(time_t t, char *buf, int32_t buflen) {
memset(buf, 0, (size_t)buflen);
char ts[32] = {0};
@@ -44,13 +44,13 @@ int32_t httpAddToSqlCmdBuffer(HttpContext *pContext, const char *const format, .
HttpSqlCmds *cmd = pContext->multiCmds;
if (cmd->buffer == NULL) return -1;
- int remainLength = cmd->bufferSize - cmd->bufferPos;
+ int32_t remainLength = cmd->bufferSize - cmd->bufferPos;
if (remainLength < 4096) {
if (!httpReMallocMultiCmdsBuffer(pContext, cmd->bufferSize * 2)) return -1;
}
- char *buffer = cmd->buffer + cmd->bufferPos;
- int len = 0;
+ char * buffer = cmd->buffer + cmd->bufferPos;
+ int32_t len = 0;
va_list argpointer;
va_start(argpointer, format);
@@ -76,13 +76,13 @@ int32_t httpAddToSqlCmdBufferNoTerminal(HttpContext *pContext, const char *const
HttpSqlCmds *cmd = pContext->multiCmds;
if (cmd->buffer == NULL) return -1;
- int remainLength = cmd->bufferSize - cmd->bufferPos;
+ int32_t remainLength = cmd->bufferSize - cmd->bufferPos;
if (remainLength < 4096) {
if (!httpReMallocMultiCmdsBuffer(pContext, cmd->bufferSize * 2)) return -1;
}
- char *buffer = cmd->buffer + cmd->bufferPos;
- int len = 0;
+ char * buffer = cmd->buffer + cmd->bufferPos;
+ int32_t len = 0;
va_list argpointer;
va_start(argpointer, format);
@@ -107,7 +107,7 @@ int32_t httpAddToSqlCmdBufferTerminal(HttpContext *pContext) {
HttpSqlCmds *cmd = pContext->multiCmds;
if (cmd->buffer == NULL) return -1;
- int remainLength = cmd->bufferSize - cmd->bufferPos;
+ int32_t remainLength = cmd->bufferSize - cmd->bufferPos;
if (remainLength < 4096) {
if (!httpReMallocMultiCmdsBuffer(pContext, cmd->bufferSize * 2)) return -1;
}
@@ -124,7 +124,7 @@ int32_t httpAddToSqlCmdBufferTerminal(HttpContext *pContext) {
return (int32_t)(buffer - cmd->buffer);
}
-int32_t httpAddToSqlCmdBufferWithSize(HttpContext *pContext, int mallocSize) {
+int32_t httpAddToSqlCmdBufferWithSize(HttpContext *pContext, int32_t mallocSize) {
HttpSqlCmds *cmd = pContext->multiCmds;
if (cmd->buffer == NULL) return -1;
@@ -139,18 +139,17 @@ int32_t httpAddToSqlCmdBufferWithSize(HttpContext *pContext, int mallocSize) {
return (int32_t)(buffer - cmd->buffer);
}
-bool httpMallocMultiCmds(HttpContext *pContext, int cmdSize, int bufferSize) {
+bool httpMallocMultiCmds(HttpContext *pContext, int32_t cmdSize, int32_t bufferSize) {
if (cmdSize > HTTP_MAX_CMD_SIZE) {
- httpError("context:%p, fd:%d, ip:%s, user:%s, mulitcmd size:%d large then %d", pContext, pContext->fd,
- pContext->ipstr, pContext->user, cmdSize, HTTP_MAX_CMD_SIZE);
+ httpError("context:%p, fd:%d, user:%s, mulitcmd size:%d large then %d", pContext, pContext->fd, pContext->user,
+ cmdSize, HTTP_MAX_CMD_SIZE);
return false;
}
if (pContext->multiCmds == NULL) {
pContext->multiCmds = (HttpSqlCmds *)malloc(sizeof(HttpSqlCmds));
if (pContext->multiCmds == NULL) {
- httpError("context:%p, fd:%d, ip:%s, user:%s, malloc multiCmds error", pContext, pContext->fd, pContext->ipstr,
- pContext->user);
+ httpError("context:%p, fd:%d, user:%s, malloc multiCmds error", pContext, pContext->fd, pContext->user);
return false;
}
memset(pContext->multiCmds, 0, sizeof(HttpSqlCmds));
@@ -161,7 +160,7 @@ bool httpMallocMultiCmds(HttpContext *pContext, int cmdSize, int bufferSize) {
free(multiCmds->cmds);
multiCmds->cmds = (HttpSqlCmd *)malloc((size_t)cmdSize * sizeof(HttpSqlCmd));
if (multiCmds->cmds == NULL) {
- httpError("context:%p, fd:%d, ip:%s, user:%s, malloc cmds:%d error", pContext, pContext->fd, pContext->ipstr,
+ httpError("context:%p, fd:%d, user:%s, malloc cmds:%d error", pContext, pContext->fd,
pContext->user, cmdSize);
return false;
}
@@ -172,8 +171,8 @@ bool httpMallocMultiCmds(HttpContext *pContext, int cmdSize, int bufferSize) {
free(multiCmds->buffer);
multiCmds->buffer = (char *)malloc((size_t)bufferSize);
if (multiCmds->buffer == NULL) {
- httpError("context:%p, fd:%d, ip:%s, user:%s, malloc buffer:%d error", pContext, pContext->fd, pContext->ipstr,
- pContext->user, bufferSize);
+ httpError("context:%p, fd:%d, user:%s, malloc buffer:%d error", pContext, pContext->fd, pContext->user,
+ bufferSize);
return false;
}
multiCmds->bufferSize = bufferSize;
@@ -187,19 +186,18 @@ bool httpMallocMultiCmds(HttpContext *pContext, int cmdSize, int bufferSize) {
return true;
}
-bool httpReMallocMultiCmdsSize(HttpContext *pContext, int cmdSize) {
+bool httpReMallocMultiCmdsSize(HttpContext *pContext, int32_t cmdSize) {
HttpSqlCmds *multiCmds = pContext->multiCmds;
if (cmdSize > HTTP_MAX_CMD_SIZE) {
- httpError("context:%p, fd:%d, ip:%s, user:%s, mulitcmd size:%d large then %d", pContext, pContext->fd,
- pContext->ipstr, pContext->user, cmdSize, HTTP_MAX_CMD_SIZE);
+ httpError("context:%p, fd:%d, user:%s, mulitcmd size:%d large then %d", pContext, pContext->fd, pContext->user,
+ cmdSize, HTTP_MAX_CMD_SIZE);
return false;
}
multiCmds->cmds = (HttpSqlCmd *)realloc(multiCmds->cmds, (size_t)cmdSize * sizeof(HttpSqlCmd));
if (multiCmds->cmds == NULL) {
- httpError("context:%p, fd:%d, ip:%s, user:%s, malloc cmds:%d error", pContext, pContext->fd, pContext->ipstr,
- pContext->user, cmdSize);
+ httpError("context:%p, fd:%d, user:%s, malloc cmds:%d error", pContext, pContext->fd, pContext->user, cmdSize);
return false;
}
memset(multiCmds->cmds + multiCmds->maxSize, 0, (size_t)(cmdSize - multiCmds->maxSize) * sizeof(HttpSqlCmd));
@@ -208,19 +206,18 @@ bool httpReMallocMultiCmdsSize(HttpContext *pContext, int cmdSize) {
return true;
}
-bool httpReMallocMultiCmdsBuffer(HttpContext *pContext, int bufferSize) {
+bool httpReMallocMultiCmdsBuffer(HttpContext *pContext, int32_t bufferSize) {
HttpSqlCmds *multiCmds = pContext->multiCmds;
if (bufferSize > HTTP_MAX_BUFFER_SIZE) {
- httpError("context:%p, fd:%d, ip:%s, user:%s, mulitcmd buffer size:%d large then %d",
- pContext, pContext->fd, pContext->ipstr, pContext->user, bufferSize, HTTP_MAX_BUFFER_SIZE);
+ httpError("context:%p, fd:%d, user:%s, mulitcmd buffer size:%d large then %d", pContext, pContext->fd,
+ pContext->user, bufferSize, HTTP_MAX_BUFFER_SIZE);
return false;
}
multiCmds->buffer = (char *)realloc(multiCmds->buffer, (size_t)bufferSize);
if (multiCmds->buffer == NULL) {
- httpError("context:%p, fd:%d, ip:%s, user:%s, malloc buffer:%d error", pContext, pContext->fd, pContext->ipstr,
- pContext->user, bufferSize);
+ httpError("context:%p, fd:%d, user:%s, malloc buffer:%d error", pContext, pContext->fd, pContext->user, bufferSize);
return false;
}
memset(multiCmds->buffer + multiCmds->bufferSize, 0, (size_t)(bufferSize - multiCmds->bufferSize));
@@ -261,7 +258,7 @@ bool httpCompareMethod(HttpDecodeMethod *pSrc, HttpDecodeMethod *pCmp) {
}
void httpAddMethod(HttpServer *pServer, HttpDecodeMethod *pMethod) {
- int pos = 0;
+ int32_t pos = 0;
for (pos = 0; pos < pServer->methodScannerLen; ++pos) {
if (httpCompareMethod(pServer->methodScanner[pos], pMethod)) {
break;
@@ -296,13 +293,13 @@ HttpSqlCmd *httpCurrSqlCmd(HttpContext *pContext) {
return multiCmds->cmds + multiCmds->size - 1;
}
-int httpNextSqlCmdPos(HttpContext *pContext) {
+int32_t httpNextSqlCmdPos(HttpContext *pContext) {
HttpSqlCmds *multiCmds = pContext->multiCmds;
return multiCmds->size;
}
void httpTrimTableName(char *name) {
- for (int i = 0; name[i] != 0; i++) {
+ for (int32_t i = 0; name[i] != 0; i++) {
if (name[i] == ' ' || name[i] == ':' || name[i] == '.' || name[i] == '-' || name[i] == '/' || name[i] == '\'')
name[i] = '_';
if (i == TSDB_TABLE_NAME_LEN) {
@@ -312,9 +309,9 @@ void httpTrimTableName(char *name) {
}
}
-int httpShrinkTableName(HttpContext *pContext, int pos, char *name) {
- int len = 0;
- for (int i = 0; name[i] != 0; i++) {
+int32_t httpShrinkTableName(HttpContext *pContext, int32_t pos, char *name) {
+ int32_t len = 0;
+ for (int32_t i = 0; name[i] != 0; i++) {
if (name[i] == ' ' || name[i] == ':' || name[i] == '.' || name[i] == '-' || name[i] == '/' || name[i] == '\'' ||
name[i] == '\"')
name[i] = '_';
@@ -330,7 +327,7 @@ int httpShrinkTableName(HttpContext *pContext, int pos, char *name) {
MD5Update(&context, (uint8_t *)name, (uint32_t)len);
MD5Final(&context);
- int table_name = httpAddToSqlCmdBuffer(
+ int32_t table_name = httpAddToSqlCmdBuffer(
pContext, "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x", context.digest[0],
context.digest[1], context.digest[2], context.digest[3], context.digest[4], context.digest[5], context.digest[6],
context.digest[7], context.digest[8], context.digest[9], context.digest[10], context.digest[11],
@@ -343,7 +340,7 @@ int httpShrinkTableName(HttpContext *pContext, int pos, char *name) {
return table_name;
}
-char *httpGetCmdsString(HttpContext *pContext, int pos) {
+char *httpGetCmdsString(HttpContext *pContext, int32_t pos) {
HttpSqlCmds *multiCmds = pContext->multiCmds;
if (pos < 0 || pos >= multiCmds->bufferSize) {
return "";
@@ -352,8 +349,8 @@ char *httpGetCmdsString(HttpContext *pContext, int pos) {
return multiCmds->buffer + pos;
}
-int httpGzipDeCompress(char *srcData, int32_t nSrcData, char *destData, int32_t *nDestData) {
- int err = 0;
+int32_t httpGzipDeCompress(char *srcData, int32_t nSrcData, char *destData, int32_t *nDestData) {
+ int32_t err = 0;
z_stream gzipStream = {0};
static char dummyHead[2] = {
@@ -396,7 +393,7 @@ int httpGzipDeCompress(char *srcData, int32_t nSrcData, char *destData, int32_t
return 0;
}
-int httpGzipCompressInit(HttpContext *pContext) {
+int32_t httpGzipCompressInit(HttpContext *pContext) {
pContext->gzipStream.zalloc = (alloc_func) 0;
pContext->gzipStream.zfree = (free_func) 0;
pContext->gzipStream.opaque = (voidpf) 0;
@@ -407,17 +404,23 @@ int httpGzipCompressInit(HttpContext *pContext) {
return 0;
}
-int httpGzipCompress(HttpContext *pContext, char *srcData, int32_t nSrcData, char *destData, int32_t *nDestData, bool isTheLast) {
- int err = 0;
+int32_t httpGzipCompress(HttpContext *pContext, char *srcData, int32_t nSrcData, char *destData, int32_t *nDestData, bool isTheLast) {
+ int32_t err = 0;
+ int32_t lastTotalLen = (int32_t) (pContext->gzipStream.total_out);
pContext->gzipStream.next_in = (Bytef *) srcData;
pContext->gzipStream.avail_in = (uLong) nSrcData;
pContext->gzipStream.next_out = (Bytef *) destData;
pContext->gzipStream.avail_out = (uLong) (*nDestData);
- while (pContext->gzipStream.avail_in != 0 && pContext->gzipStream.total_out < (uLong) (*nDestData)) {
+ while (pContext->gzipStream.avail_in != 0) {
if (deflate(&pContext->gzipStream, Z_FULL_FLUSH) != Z_OK) {
return -1;
}
+
+ int32_t cacheLen = pContext->gzipStream.total_out - lastTotalLen;
+ if (cacheLen >= *nDestData) {
+ return -2;
+ }
}
if (pContext->gzipStream.avail_in != 0) {
@@ -430,15 +433,33 @@ int httpGzipCompress(HttpContext *pContext, char *srcData, int32_t nSrcData, cha
break;
}
if (err != Z_OK) {
- return -2;
+ return -3;
}
}
if (deflateEnd(&pContext->gzipStream) != Z_OK) {
- return -3;
+ return -4;
}
}
- *nDestData = (int32_t) (pContext->gzipStream.total_out);
+ *nDestData = (int32_t) (pContext->gzipStream.total_out) - lastTotalLen;
return 0;
}
+
+bool httpUrlMatch(HttpContext* pContext, int32_t pos, char* cmp) {
+ HttpParser* pParser = pContext->parser;
+
+ if (pos < 0 || pos >= HTTP_MAX_URL) {
+ return false;
+ }
+
+ if (pParser->path[pos].pos <= 0) {
+ return false;
+ }
+
+ if (strcmp(pParser->path[pos].str, cmp) != 0) {
+ return false;
+ }
+
+ return true;
+}
diff --git a/src/query/inc/qAst.h b/src/query/inc/qAst.h
index 547616dee6..d3e60c21dc 100644
--- a/src/query/inc/qAst.h
+++ b/src/query/inc/qAst.h
@@ -32,6 +32,7 @@ struct tExprNode;
struct SSchema;
enum {
+ TSQL_NODE_DUMMY = 0x0,
TSQL_NODE_EXPR = 0x1,
TSQL_NODE_COL = 0x2,
TSQL_NODE_VALUE = 0x4,
diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h
index 25fb04fb9a..5d570821cb 100644
--- a/src/query/inc/qExecutor.h
+++ b/src/query/inc/qExecutor.h
@@ -57,7 +57,7 @@ typedef struct SWindowResult {
uint16_t numOfRows; // number of rows of current time window
bool closed; // this result status: closed or opened
SResultInfo* resultInfo; // For each result column, there is a resultInfo
- TSKEY skey; // start key of current time window
+ union {STimeWindow win; char* key;}; // start key of current time window
} SWindowResult;
/**
@@ -132,12 +132,9 @@ typedef struct SQueryCostInfo {
typedef struct SQuery {
int16_t numOfCols;
int16_t numOfTags;
- char intervalTimeUnit;
- char slidingTimeUnit; // interval data type, used for daytime revise
SOrderVal order;
STimeWindow window;
- int64_t intervalTime;
- int64_t slidingTime; // sliding time for sliding window query
+ SInterval interval;
int16_t precision;
int16_t numOfOutput;
int16_t fillType;
diff --git a/src/query/inc/qFill.h b/src/query/inc/qFill.h
index 6b8dcb0bf9..6d44fee095 100644
--- a/src/query/inc/qFill.h
+++ b/src/query/inc/qFill.h
@@ -51,12 +51,11 @@ typedef struct SFillInfo {
int32_t rowSize; // size of each row
// char ** pTags; // tags value for current interpolation
SFillTagColInfo* pTags; // tags value for filling gap
- int64_t slidingTime; // sliding value to determine the number of result for a given time window
+ SInterval interval;
char * prevValues; // previous row of data, to generate the interpolation results
char * nextValues; // next row of data
char** pData; // original result data block involved in filling data
int32_t capacityInRows; // data buffer size in rows
- int8_t slidingUnit; // sliding time unit
int8_t precision; // time resoluation
SFillColInfo* pFillCol; // column info for fill operations
} SFillInfo;
diff --git a/src/query/inc/qPercentile.h b/src/query/inc/qPercentile.h
index 0a52d4f205..c34c24c5b2 100644
--- a/src/query/inc/qPercentile.h
+++ b/src/query/inc/qPercentile.h
@@ -64,11 +64,11 @@ typedef struct tMemBucket {
__perc_hash_func_t hashFunc;
} tMemBucket;
-tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType);
+tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType, double minval, double maxval);
void tMemBucketDestroy(tMemBucket *pBucket);
-void tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size);
+int32_t tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size);
double getPercentile(tMemBucket *pMemBucket, double percent);
diff --git a/src/query/inc/qResultbuf.h b/src/query/inc/qResultbuf.h
index 5303251d98..704df9f3f2 100644
--- a/src/query/inc/qResultbuf.h
+++ b/src/query/inc/qResultbuf.h
@@ -73,12 +73,11 @@ typedef struct SDiskbasedResultBuf {
bool comp; // compressed before flushed to disk
int32_t nextPos; // next page flush position
- const void* handle; // for debug purpose
+ const void* handle; // for debug purpose
SResultBufStatis statis;
} SDiskbasedResultBuf;
-#define DEFAULT_INTERN_BUF_PAGE_SIZE (4096L)
-#define DEFAULT_INMEM_BUF_PAGES 10
+#define DEFAULT_INTERN_BUF_PAGE_SIZE (256L) // in bytes
#define PAGE_INFO_INITIALIZER (SPageDiskInfo){-1, -1}
/**
diff --git a/src/query/inc/qSqlparser.h b/src/query/inc/qSqlparser.h
index 69fc0bc7ef..d6664577a3 100644
--- a/src/query/inc/qSqlparser.h
+++ b/src/query/inc/qSqlparser.h
@@ -65,6 +65,11 @@ typedef struct tVariantList {
tVariantListItem *a; /* One entry for each expression */
} tVariantList;
+typedef struct SIntervalVal {
+ SStrToken interval;
+ SStrToken offset;
+} SIntervalVal;
+
typedef struct SQuerySQL {
struct tSQLExprList *pSelection; // select clause
tVariantList * from; // from clause
@@ -72,6 +77,7 @@ typedef struct SQuerySQL {
tVariantList * pGroupby; // groupby clause, only for tags[optional]
tVariantList * pSortOrder; // orderby [optional]
SStrToken interval; // interval [optional]
+ SStrToken offset; // offset window [optional]
SStrToken sliding; // sliding window [optional]
SLimitVal limit; // limit offset [optional]
SLimitVal slimit; // group limit offset [optional]
@@ -259,7 +265,7 @@ tSQLExprList *tSQLExprListAppend(tSQLExprList *pList, tSQLExpr *pNode, SStrToken
void tSQLExprListDestroy(tSQLExprList *pList);
SQuerySQL *tSetQuerySQLElems(SStrToken *pSelectToken, tSQLExprList *pSelection, tVariantList *pFrom, tSQLExpr *pWhere,
- tVariantList *pGroupby, tVariantList *pSortOrder, SStrToken *pInterval,
+ tVariantList *pGroupby, tVariantList *pSortOrder, SIntervalVal *pInterval,
SStrToken *pSliding, tVariantList *pFill, SLimitVal *pLimit, SLimitVal *pGLimit);
SCreateTableSQL *tSetCreateSQLElems(tFieldList *pCols, tFieldList *pTags, SStrToken *pMetricName,
diff --git a/src/query/inc/qUtil.h b/src/query/inc/qUtil.h
index 6de3c7c0e5..5320e5622e 100644
--- a/src/query/inc/qUtil.h
+++ b/src/query/inc/qUtil.h
@@ -39,7 +39,6 @@ static FORCE_INLINE SWindowResult *getWindowResult(SWindowResInfo *pWindowResInf
}
#define curTimeWindowIndex(_winres) ((_winres)->curIndex)
-#define GET_TIMEWINDOW(_winresInfo, _win) (STimeWindow) {(_win)->skey, ((_win)->skey + (_winresInfo)->interval - 1)}
#define GET_ROW_PARAM_FOR_MULTIOUTPUT(_q, tbq, sq) (((tbq) && (!sq))? (_q)->pSelectExpr[1].base.arg->argValue.i64:1)
bool isWindowResClosed(SWindowResInfo *pWindowResInfo, int32_t slot);
@@ -50,14 +49,16 @@ static FORCE_INLINE char *getPosInResultPage(SQueryRuntimeEnv *pRuntimeEnv, int3
tFilePage* page) {
assert(pResult != NULL && pRuntimeEnv != NULL);
- SQuery *pQuery = pRuntimeEnv->pQuery;
-// tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, pResult->pos.pageId);
+ SQuery *pQuery = pRuntimeEnv->pQuery;
int32_t realRowId = (int32_t)(pResult->pos.rowId * GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, pRuntimeEnv->topBotQuery, pRuntimeEnv->stableQuery));
return ((char *)page->data) + pRuntimeEnv->offset[columnIndex] * pRuntimeEnv->numOfRowsPerPage +
pQuery->pSelectExpr[columnIndex].bytes * realRowId;
}
+bool isNull_filter(SColumnFilterElem *pFilter, char* minval, char* maxval);
+bool notNull_filter(SColumnFilterElem *pFilter, char* minval, char* maxval);
+
__filter_func_t *getRangeFilterFuncArray(int32_t type);
__filter_func_t *getValueFilterFuncArray(int32_t type);
diff --git a/src/query/inc/sql.y b/src/query/inc/sql.y
index 79aec2f349..e5d1185330 100644
--- a/src/query/inc/sql.y
+++ b/src/query/inc/sql.y
@@ -80,6 +80,7 @@ cmd ::= SHOW GRANTS. { setShowOptions(pInfo, TSDB_MGMT_TABLE_GRANTS, 0, 0);
cmd ::= SHOW VNODES. { setShowOptions(pInfo, TSDB_MGMT_TABLE_VNODES, 0, 0); }
cmd ::= SHOW VNODES IPTOKEN(X). { setShowOptions(pInfo, TSDB_MGMT_TABLE_VNODES, &X, 0); }
+
%type dbPrefix {SStrToken}
dbPrefix(A) ::=. {A.n = 0; A.type = 0;}
dbPrefix(A) ::= ids(X) DOT. {A = X; }
@@ -88,6 +89,15 @@ dbPrefix(A) ::= ids(X) DOT. {A = X; }
cpxName(A) ::= . {A.n = 0; }
cpxName(A) ::= DOT ids(Y). {A = Y; A.n += 1; }
+cmd ::= SHOW CREATE TABLE ids(X) cpxName(Y). {
+ X.n += Y.n;
+ setDCLSQLElems(pInfo, TSDB_SQL_SHOW_CREATE_TABLE, 1, &X);
+}
+
+cmd ::= SHOW CREATE DATABASE ids(X). {
+ setDCLSQLElems(pInfo, TSDB_SQL_SHOW_CREATE_DATABASE, 1, &X);
+}
+
cmd ::= SHOW dbPrefix(X) TABLES. {
setShowOptions(pInfo, TSDB_MGMT_TABLE_TABLE, &X, 0);
}
@@ -458,9 +468,10 @@ tablelist(A) ::= tablelist(Y) COMMA ids(X) cpxName(Z) ids(F). {
%type tmvar {SStrToken}
tmvar(A) ::= VARIABLE(X). {A = X;}
-%type interval_opt {SStrToken}
-interval_opt(N) ::= INTERVAL LP tmvar(E) RP. {N = E; }
-interval_opt(N) ::= . {N.n = 0; N.z = NULL; N.type = 0; }
+%type interval_opt {SIntervalVal}
+interval_opt(N) ::= INTERVAL LP tmvar(E) RP. {N.interval = E; N.offset.n = 0; N.offset.z = NULL; N.offset.type = 0;}
+interval_opt(N) ::= INTERVAL LP tmvar(E) COMMA tmvar(O) RP. {N.interval = E; N.offset = O;}
+interval_opt(N) ::= . {memset(&N, 0, sizeof(N));}
%type fill_opt {tVariantList*}
%destructor fill_opt {tVariantListDestroy($$);}
@@ -567,53 +578,53 @@ where_opt(A) ::= WHERE expr(X). {A = X;}
%type expr {tSQLExpr*}
%destructor expr {tSQLExprDestroy($$);}
-expr(A) ::= LP expr(X) RP. {A = X; }
+expr(A) ::= LP expr(X) RP. {A = X; }
-expr(A) ::= ID(X). {A = tSQLExprIdValueCreate(&X, TK_ID);}
-expr(A) ::= ID(X) DOT ID(Y). {X.n += (1+Y.n); A = tSQLExprIdValueCreate(&X, TK_ID);}
-expr(A) ::= ID(X) DOT STAR(Y). {X.n += (1+Y.n); A = tSQLExprIdValueCreate(&X, TK_ALL);}
+expr(A) ::= ID(X). {A = tSQLExprIdValueCreate(&X, TK_ID);}
+expr(A) ::= ID(X) DOT ID(Y). {X.n += (1+Y.n); A = tSQLExprIdValueCreate(&X, TK_ID);}
+expr(A) ::= ID(X) DOT STAR(Y). {X.n += (1+Y.n); A = tSQLExprIdValueCreate(&X, TK_ALL);}
-expr(A) ::= INTEGER(X). {A = tSQLExprIdValueCreate(&X, TK_INTEGER);}
+expr(A) ::= INTEGER(X). {A = tSQLExprIdValueCreate(&X, TK_INTEGER);}
expr(A) ::= MINUS(X) INTEGER(Y). {X.n += Y.n; X.type = TK_INTEGER; A = tSQLExprIdValueCreate(&X, TK_INTEGER);}
expr(A) ::= PLUS(X) INTEGER(Y). {X.n += Y.n; X.type = TK_INTEGER; A = tSQLExprIdValueCreate(&X, TK_INTEGER);}
-expr(A) ::= FLOAT(X). {A = tSQLExprIdValueCreate(&X, TK_FLOAT);}
-expr(A) ::= MINUS(X) FLOAT(Y). {X.n += Y.n; X.type = TK_FLOAT; A = tSQLExprIdValueCreate(&X, TK_FLOAT);}
-expr(A) ::= PLUS(X) FLOAT(Y). {X.n += Y.n; X.type = TK_FLOAT; A = tSQLExprIdValueCreate(&X, TK_FLOAT);}
-expr(A) ::= STRING(X). {A = tSQLExprIdValueCreate(&X, TK_STRING);}
-expr(A) ::= NOW(X). {A = tSQLExprIdValueCreate(&X, TK_NOW); }
-expr(A) ::= VARIABLE(X). {A = tSQLExprIdValueCreate(&X, TK_VARIABLE);}
-expr(A) ::= BOOL(X). {A = tSQLExprIdValueCreate(&X, TK_BOOL);}
-// normal functions: min(x)
-expr(A) ::= ID(X) LP exprlist(Y) RP(E). {
- A = tSQLExprCreateFunction(Y, &X, &E, X.type);
-}
+expr(A) ::= FLOAT(X). {A = tSQLExprIdValueCreate(&X, TK_FLOAT);}
+expr(A) ::= MINUS(X) FLOAT(Y). {X.n += Y.n; X.type = TK_FLOAT; A = tSQLExprIdValueCreate(&X, TK_FLOAT);}
+expr(A) ::= PLUS(X) FLOAT(Y). {X.n += Y.n; X.type = TK_FLOAT; A = tSQLExprIdValueCreate(&X, TK_FLOAT);}
+expr(A) ::= STRING(X). {A = tSQLExprIdValueCreate(&X, TK_STRING);}
+expr(A) ::= NOW(X). {A = tSQLExprIdValueCreate(&X, TK_NOW); }
+expr(A) ::= VARIABLE(X). {A = tSQLExprIdValueCreate(&X, TK_VARIABLE);}
+expr(A) ::= BOOL(X). {A = tSQLExprIdValueCreate(&X, TK_BOOL);}
-// this is for: count(*)/first(*)/last(*) operation
-expr(A) ::= ID(X) LP STAR RP(Y). {
- A = tSQLExprCreateFunction(NULL, &X, &Y, X.type);
-}
+// ordinary functions: min(x), max(x), top(k, 20)
+expr(A) ::= ID(X) LP exprlist(Y) RP(E). { A = tSQLExprCreateFunction(Y, &X, &E, X.type); }
-//binary expression: a+2, b+3
-expr(A) ::= expr(X) AND expr(Y). {A = tSQLExprCreate(X, Y, TK_AND);}
-expr(A) ::= expr(X) OR expr(Y). {A = tSQLExprCreate(X, Y, TK_OR); }
+// for parsing sql functions with wildcard for parameters. e.g., count(*)/first(*)/last(*) operation
+expr(A) ::= ID(X) LP STAR RP(Y). { A = tSQLExprCreateFunction(NULL, &X, &Y, X.type); }
-//binary relational expression
-expr(A) ::= expr(X) LT expr(Y). {A = tSQLExprCreate(X, Y, TK_LT);}
-expr(A) ::= expr(X) GT expr(Y). {A = tSQLExprCreate(X, Y, TK_GT);}
-expr(A) ::= expr(X) LE expr(Y). {A = tSQLExprCreate(X, Y, TK_LE);}
-expr(A) ::= expr(X) GE expr(Y). {A = tSQLExprCreate(X, Y, TK_GE);}
-expr(A) ::= expr(X) NE expr(Y). {A = tSQLExprCreate(X, Y, TK_NE);}
-expr(A) ::= expr(X) EQ expr(Y). {A = tSQLExprCreate(X, Y, TK_EQ);}
+// is (not) null expression
+expr(A) ::= expr(X) IS NULL. {A = tSQLExprCreate(X, NULL, TK_ISNULL);}
+expr(A) ::= expr(X) IS NOT NULL. {A = tSQLExprCreate(X, NULL, TK_NOTNULL);}
-//binary arithmetic expression
+// relational expression
+expr(A) ::= expr(X) LT expr(Y). {A = tSQLExprCreate(X, Y, TK_LT);}
+expr(A) ::= expr(X) GT expr(Y). {A = tSQLExprCreate(X, Y, TK_GT);}
+expr(A) ::= expr(X) LE expr(Y). {A = tSQLExprCreate(X, Y, TK_LE);}
+expr(A) ::= expr(X) GE expr(Y). {A = tSQLExprCreate(X, Y, TK_GE);}
+expr(A) ::= expr(X) NE expr(Y). {A = tSQLExprCreate(X, Y, TK_NE);}
+expr(A) ::= expr(X) EQ expr(Y). {A = tSQLExprCreate(X, Y, TK_EQ);}
+
+expr(A) ::= expr(X) AND expr(Y). {A = tSQLExprCreate(X, Y, TK_AND);}
+expr(A) ::= expr(X) OR expr(Y). {A = tSQLExprCreate(X, Y, TK_OR); }
+
+// binary arithmetic expression
expr(A) ::= expr(X) PLUS expr(Y). {A = tSQLExprCreate(X, Y, TK_PLUS); }
expr(A) ::= expr(X) MINUS expr(Y). {A = tSQLExprCreate(X, Y, TK_MINUS); }
expr(A) ::= expr(X) STAR expr(Y). {A = tSQLExprCreate(X, Y, TK_STAR); }
expr(A) ::= expr(X) SLASH expr(Y). {A = tSQLExprCreate(X, Y, TK_DIVIDE);}
expr(A) ::= expr(X) REM expr(Y). {A = tSQLExprCreate(X, Y, TK_REM); }
-//like expression
-expr(A) ::= expr(X) LIKE expr(Y). {A = tSQLExprCreate(X, Y, TK_LIKE); }
+// like expression
+expr(A) ::= expr(X) LIKE expr(Y). {A = tSQLExprCreate(X, Y, TK_LIKE); }
//in expression
expr(A) ::= expr(X) IN LP exprlist(Y) RP. {A = tSQLExprCreate(X, (tSQLExpr*)Y, TK_IN); }
@@ -625,9 +636,9 @@ expr(A) ::= expr(X) IN LP exprlist(Y) RP. {A = tSQLExprCreate(X, (tSQLExpr*)Y,
%destructor expritem {tSQLExprDestroy($$);}
exprlist(A) ::= exprlist(X) COMMA expritem(Y). {A = tSQLExprListAppend(X,Y,0);}
-exprlist(A) ::= expritem(X). {A = tSQLExprListAppend(0,X,0);}
-expritem(A) ::= expr(X). {A = X;}
-expritem(A) ::= . {A = 0;}
+exprlist(A) ::= expritem(X). {A = tSQLExprListAppend(0,X,0);}
+expritem(A) ::= expr(X). {A = X;}
+expritem(A) ::= . {A = 0;}
///////////////////////////////////reset query cache//////////////////////////////////////
cmd ::= RESET QUERY CACHE. { setDCLSQLElems(pInfo, TSDB_SQL_RESET_CACHE, 0);}
diff --git a/src/query/inc/tsqlfunction.h b/src/query/inc/tsqlfunction.h
index c314087179..28b9a60102 100644
--- a/src/query/inc/tsqlfunction.h
+++ b/src/query/inc/tsqlfunction.h
@@ -108,7 +108,7 @@ extern "C" {
#define GET_FORWARD_DIRECTION_FACTOR(ord) (((ord) == TSDB_ORDER_ASC) ? QUERY_ASC_FORWARD_STEP : QUERY_DESC_FORWARD_STEP)
-#define MAX_INTERVAL_TIME_WINDOW 10000000
+#define MAX_INTERVAL_TIME_WINDOW 1000000 // maximum allowed time windows in final results
#define TOP_BOTTOM_QUERY_LIMIT 100
enum {
@@ -177,7 +177,7 @@ typedef struct SQLFunctionCtx {
int16_t outputType;
int16_t outputBytes; // size of results, determined by function and input column data type
bool hasNull; // null value exist in current block
- bool requireNull; // require null in some function
+ bool requireNull; // require null in some function
int16_t functionId; // function id
void * aInputElemBuf;
char * aOutputBuf; // final result output buffer, point to sdata->data
diff --git a/src/query/src/qAst.c b/src/query/src/qAst.c
index 634f014d97..63411aaf3f 100644
--- a/src/query/src/qAst.c
+++ b/src/query/src/qAst.c
@@ -188,6 +188,10 @@ uint8_t getBinaryExprOptr(SStrToken *pToken) {
return TSDB_BINARY_OP_REMAINDER;
case TK_LIKE:
return TSDB_RELATION_LIKE;
+ case TK_ISNULL:
+ return TSDB_RELATION_ISNULL;
+ case TK_NOTNULL:
+ return TSDB_RELATION_NOTNULL;
default: { return 0; }
}
}
@@ -486,29 +490,42 @@ static void tQueryIndexColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SArr
} else {
int32_t optr = cond.end ? cond.end->optr : TSDB_RELATION_INVALID;
if (optr == TSDB_RELATION_LESS || optr == TSDB_RELATION_LESS_EQUAL) {
- bool comp = true;
+ bool comp = true;
int32_t ret = 0;
-
- while(tSkipListIterNext(iter)) {
- SSkipListNode* pNode = tSkipListIterGet(iter);
-
+
+ while (tSkipListIterNext(iter)) {
+ SSkipListNode *pNode = tSkipListIterGet(iter);
+
if (comp) {
ret = pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.end->v);
assert(ret <= 0);
}
-
+
if (ret == 0 && optr == TSDB_RELATION_LESS) {
continue;
} else {
- STableKeyInfo info = {.pTable = *(void**)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL};
+ STableKeyInfo info = {.pTable = *(void **)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL};
taosArrayPush(result, &info);
comp = false; // no need to compare anymore
}
}
+ } else {
+ assert(pQueryInfo->optr == TSDB_RELATION_ISNULL || pQueryInfo->optr == TSDB_RELATION_NOTNULL);
+
+ while (tSkipListIterNext(iter)) {
+ SSkipListNode *pNode = tSkipListIterGet(iter);
+
+ bool isnull = isNull(SL_GET_NODE_KEY(pSkipList, pNode), pQueryInfo->sch.type);
+ if ((pQueryInfo->optr == TSDB_RELATION_ISNULL && isnull) ||
+ (pQueryInfo->optr == TSDB_RELATION_NOTNULL && (!isnull))) {
+ STableKeyInfo info = {.pTable = *(void **)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL};
+ taosArrayPush(result, &info);
+ }
+ }
}
}
- free(cond.start);
+ free(cond.start);
free(cond.end);
tSkipListDestroyIter(iter);
}
@@ -683,6 +700,7 @@ static void tQueryIndexlessColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo,
char * pData = SL_GET_NODE_DATA(pNode);
tstr *name = (tstr*) tsdbGetTableName(*(void**) pData);
+
// todo speed up by using hash
if (pQueryInfo->sch.colId == TSDB_TBNAME_COLUMN_INDEX) {
if (pQueryInfo->optr == TSDB_RELATION_IN) {
@@ -714,7 +732,7 @@ void tExprTreeTraverse(tExprNode *pExpr, SSkipList *pSkipList, SArray *result, S
// column project
if (pLeft->nodeType != TSQL_NODE_EXPR && pRight->nodeType != TSQL_NODE_EXPR) {
- assert(pLeft->nodeType == TSQL_NODE_COL && pRight->nodeType == TSQL_NODE_VALUE);
+ assert(pLeft->nodeType == TSQL_NODE_COL && (pRight->nodeType == TSQL_NODE_VALUE || pRight->nodeType == TSQL_NODE_DUMMY));
param->setupInfoFn(pExpr, param->pExtInfo);
if (pSkipList == NULL) {
diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c
index d48d7d5ea1..77a402c7be 100644
--- a/src/query/src/qExecutor.c
+++ b/src/query/src/qExecutor.c
@@ -131,21 +131,21 @@ static UNUSED_FUNC void* u_realloc(void* p, size_t __size) {
static void setQueryStatus(SQuery *pQuery, int8_t status);
static void finalizeQueryResult(SQueryRuntimeEnv *pRuntimeEnv);
-#define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->intervalTime > 0)
+#define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->interval.interval > 0)
static void getNextTimeWindow(SQuery* pQuery, STimeWindow* tw) {
int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order);
- if (pQuery->intervalTimeUnit != 'n' && pQuery->intervalTimeUnit != 'y') {
- tw->skey += pQuery->slidingTime * factor;
- tw->ekey = tw->skey + pQuery->intervalTime - 1;
+ if (pQuery->interval.intervalUnit != 'n' && pQuery->interval.intervalUnit != 'y') {
+ tw->skey += pQuery->interval.sliding * factor;
+ tw->ekey = tw->skey + pQuery->interval.interval - 1;
return;
}
- int64_t key = tw->skey / 1000, interval = pQuery->intervalTime;
+ int64_t key = tw->skey / 1000, interval = pQuery->interval.interval;
if (pQuery->precision == TSDB_TIME_PRECISION_MICRO) {
key /= 1000;
}
- if (pQuery->intervalTimeUnit == 'y') {
+ if (pQuery->interval.intervalUnit == 'y') {
interval *= 12;
}
@@ -187,7 +187,7 @@ static void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, void* inputData,
SDataStatis *pStatis, void *param, int32_t colIndex);
static void initCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv);
-static void destroyTableQueryInfo(STableQueryInfo *pTableQueryInfo);
+static void destroyTableQueryInfoImpl(STableQueryInfo *pTableQueryInfo);
static void resetCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv);
static bool hasMainOutput(SQuery *pQuery);
static void buildTagQueryResult(SQInfo *pQInfo);
@@ -200,14 +200,28 @@ bool doFilterData(SQuery *pQuery, int32_t elemPos) {
SSingleColumnFilterInfo *pFilterInfo = &pQuery->pFilterInfo[k];
char *pElem = (char*)pFilterInfo->pData + pFilterInfo->info.bytes * elemPos;
- if (isNull(pElem, pFilterInfo->info.type)) {
- return false;
- }
bool qualified = false;
for (int32_t j = 0; j < pFilterInfo->numOfFilters; ++j) {
SColumnFilterElem *pFilterElem = &pFilterInfo->pFilters[j];
+ bool isnull = isNull(pElem, pFilterInfo->info.type);
+ if (isnull) {
+ if (pFilterElem->fp == isNull_filter) {
+ qualified = true;
+ break;
+ } else {
+ continue;
+ }
+ } else {
+ if (pFilterElem->fp == notNull_filter) {
+ qualified = true;
+ break;
+ } else if (pFilterElem->fp == isNull_filter) {
+ continue;
+ }
+ }
+
if (pFilterElem->fp(pFilterElem, pElem, pElem)) {
qualified = true;
break;
@@ -496,35 +510,35 @@ static STimeWindow getActiveTimeWindow(SWindowResInfo *pWindowResInfo, int64_t t
if (pWindowResInfo->curIndex == -1) { // the first window, from the previous stored value
w.skey = pWindowResInfo->prevSKey;
- if (pQuery->intervalTimeUnit == 'n' || pQuery->intervalTimeUnit == 'y') {
- w.ekey = taosAddNatualInterval(w.skey, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision) - 1;
+ if (pQuery->interval.intervalUnit == 'n' || pQuery->interval.intervalUnit == 'y') {
+ w.ekey = taosTimeAdd(w.skey, pQuery->interval.interval, pQuery->interval.intervalUnit, pQuery->precision);
} else {
- w.ekey = w.skey + pQuery->intervalTime - 1;
+ w.ekey = w.skey + pQuery->interval.interval - 1;
}
} else {
int32_t slot = curTimeWindowIndex(pWindowResInfo);
SWindowResult* pWindowRes = getWindowResult(pWindowResInfo, slot);
- w = GET_TIMEWINDOW(pWindowResInfo, pWindowRes);
+ w = pWindowRes->win;
}
if (w.skey > ts || w.ekey < ts) {
- if (pQuery->intervalTimeUnit == 'n' || pQuery->intervalTimeUnit == 'y') {
- w.skey = taosGetIntervalStartTimestamp(ts, pQuery->slidingTime, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision);
- w.ekey = taosAddNatualInterval(w.skey, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision) - 1;
+ if (pQuery->interval.intervalUnit == 'n' || pQuery->interval.intervalUnit == 'y') {
+ w.skey = taosTimeTruncate(ts, &pQuery->interval, pQuery->precision);
+ w.ekey = taosTimeAdd(w.skey, pQuery->interval.interval, pQuery->interval.intervalUnit, pQuery->precision) - 1;
} else {
int64_t st = w.skey;
if (st > ts) {
- st -= ((st - ts + pQuery->slidingTime - 1) / pQuery->slidingTime) * pQuery->slidingTime;
+ st -= ((st - ts + pQuery->interval.sliding - 1) / pQuery->interval.sliding) * pQuery->interval.sliding;
}
- int64_t et = st + pQuery->intervalTime - 1;
+ int64_t et = st + pQuery->interval.interval - 1;
if (et < ts) {
- st += ((ts - et + pQuery->slidingTime - 1) / pQuery->slidingTime) * pQuery->slidingTime;
+ st += ((ts - et + pQuery->interval.sliding - 1) / pQuery->interval.sliding) * pQuery->interval.sliding;
}
w.skey = st;
- w.ekey = w.skey + pQuery->intervalTime - 1;
+ w.ekey = w.skey + pQuery->interval.interval - 1;
}
}
@@ -536,8 +550,6 @@ static STimeWindow getActiveTimeWindow(SWindowResInfo *pWindowResInfo, int64_t t
w.ekey = pQuery->window.ekey;
}
- assert(ts >= w.skey && ts <= w.ekey);
-
return w;
}
@@ -610,7 +622,7 @@ static int32_t setWindowOutputBufByKey(SQueryRuntimeEnv *pRuntimeEnv, SWindowRes
}
// set time window for current result
- pWindowRes->skey = win->skey;
+ pWindowRes->win = (*win);
setWindowResOutputBufInitCtx(pRuntimeEnv, pWindowRes);
return TSDB_CODE_SUCCESS;
@@ -683,12 +695,12 @@ static int32_t doCheckQueryCompleted(SQueryRuntimeEnv *pRuntimeEnv, TSKEY lastKe
continue;
}
- TSKEY ekey = pResult->skey + pWindowResInfo->interval;
+ TSKEY ekey = pResult->win.ekey;
if ((ekey <= lastKey && QUERY_IS_ASC_QUERY(pQuery)) ||
- (pResult->skey >= lastKey && !QUERY_IS_ASC_QUERY(pQuery))) {
+ (pResult->win.skey >= lastKey && !QUERY_IS_ASC_QUERY(pQuery))) {
closeTimeWindow(pWindowResInfo, i);
} else {
- skey = pResult->skey;
+ skey = pResult->win.skey;
break;
}
}
@@ -701,7 +713,7 @@ static int32_t doCheckQueryCompleted(SQueryRuntimeEnv *pRuntimeEnv, TSKEY lastKe
pWindowResInfo->curIndex = i;
}
- pWindowResInfo->prevSKey = pWindowResInfo->pResult[pWindowResInfo->curIndex].skey;
+ pWindowResInfo->prevSKey = pWindowResInfo->pResult[pWindowResInfo->curIndex].win.skey;
// the number of completed slots are larger than the threshold, return current generated results to client.
if (numOfClosed > pWindowResInfo->threshold) {
@@ -729,9 +741,9 @@ static int32_t getNumOfRowsInTimeWindow(SQuery *pQuery, SDataBlockInfo *pDataBlo
int32_t startPos, TSKEY ekey, __block_search_fn_t searchFn, bool updateLastKey) {
assert(startPos >= 0 && startPos < pDataBlockInfo->rows);
- int32_t num = -1;
+ int32_t num = -1;
int32_t order = pQuery->order.order;
- int32_t step = GET_FORWARD_DIRECTION_FACTOR(order);
+ int32_t step = GET_FORWARD_DIRECTION_FACTOR(order);
STableQueryInfo* item = pQuery->current;
@@ -765,31 +777,36 @@ static int32_t getNumOfRowsInTimeWindow(SQuery *pQuery, SDataBlockInfo *pDataBlo
return num;
}
-static void doBlockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, bool closed, STimeWindow *pWin,
- int32_t offset, int32_t forwardStep, TSKEY *tsBuf, int32_t numOfTotal) {
+static void doBlockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, bool closed, STimeWindow *pWin, int32_t offset,
+ int32_t forwardStep, TSKEY *tsCol, int32_t numOfTotal) {
SQuery * pQuery = pRuntimeEnv->pQuery;
SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx;
+ bool hasPrev = pCtx[0].preAggVals.isSet;
+
if (IS_MASTER_SCAN(pRuntimeEnv) || closed) {
for (int32_t k = 0; k < pQuery->numOfOutput; ++k) {
- int32_t functionId = pQuery->pSelectExpr[k].base.functionId;
-
pCtx[k].nStartQueryTimestamp = pWin->skey;
pCtx[k].size = forwardStep;
pCtx[k].startOffset = (QUERY_IS_ASC_QUERY(pQuery)) ? offset : offset - (forwardStep - 1);
+ int32_t functionId = pQuery->pSelectExpr[k].base.functionId;
if ((aAggs[functionId].nStatus & TSDB_FUNCSTATE_SELECTIVITY) != 0) {
- pCtx[k].ptsList = &tsBuf[offset];
+ pCtx[k].ptsList = &tsCol[pCtx[k].startOffset];
}
// not a whole block involved in query processing, statistics data can not be used
- if (forwardStep != numOfTotal) {
+ // NOTE: the original value of isSet have been changed here
+ if (pCtx[k].preAggVals.isSet && forwardStep < numOfTotal) {
pCtx[k].preAggVals.isSet = false;
}
if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) {
aAggs[functionId].xFunction(&pCtx[k]);
}
+
+ // restore it
+ pCtx[k].preAggVals.isSet = hasPrev;
}
}
}
@@ -837,7 +854,7 @@ static int32_t getNextQualifiedWindow(SQueryRuntimeEnv *pRuntimeEnv, STimeWindow
int32_t startPos = 0;
// tumbling time window query, a special case of sliding time window query
- if (pQuery->slidingTime == pQuery->intervalTime && prevPosition != -1) {
+ if (pQuery->interval.sliding == pQuery->interval.interval && prevPosition != -1) {
int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order);
startPos = prevPosition + factor;
} else {
@@ -850,21 +867,21 @@ static int32_t getNextQualifiedWindow(SQueryRuntimeEnv *pRuntimeEnv, STimeWindow
*/
if (QUERY_IS_ASC_QUERY(pQuery) && primaryKeys[startPos] > pNext->ekey) {
TSKEY next = primaryKeys[startPos];
- if (pQuery->intervalTimeUnit == 'n' || pQuery->intervalTimeUnit == 'y') {
- pNext->skey = taosGetIntervalStartTimestamp(next, pQuery->slidingTime, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision);
- pNext->ekey = taosAddNatualInterval(pNext->skey, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision) - 1;
+ if (pQuery->interval.intervalUnit == 'n' || pQuery->interval.intervalUnit == 'y') {
+ pNext->skey = taosTimeTruncate(next, &pQuery->interval, pQuery->precision);
+ pNext->ekey = taosTimeAdd(pNext->skey, pQuery->interval.interval, pQuery->interval.intervalUnit, pQuery->precision) - 1;
} else {
- pNext->ekey += ((next - pNext->ekey + pQuery->slidingTime - 1)/pQuery->slidingTime) * pQuery->slidingTime;
- pNext->skey = pNext->ekey - pQuery->intervalTime + 1;
+ pNext->ekey += ((next - pNext->ekey + pQuery->interval.sliding - 1)/pQuery->interval.sliding) * pQuery->interval.sliding;
+ pNext->skey = pNext->ekey - pQuery->interval.interval + 1;
}
} else if ((!QUERY_IS_ASC_QUERY(pQuery)) && primaryKeys[startPos] < pNext->skey) {
TSKEY next = primaryKeys[startPos];
- if (pQuery->intervalTimeUnit == 'n' || pQuery->intervalTimeUnit == 'y') {
- pNext->skey = taosGetIntervalStartTimestamp(next, pQuery->slidingTime, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision);
- pNext->ekey = taosAddNatualInterval(pNext->skey, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision) - 1;
+ if (pQuery->interval.intervalUnit == 'n' || pQuery->interval.intervalUnit == 'y') {
+ pNext->skey = taosTimeTruncate(next, &pQuery->interval, pQuery->precision);
+ pNext->ekey = taosTimeAdd(pNext->skey, pQuery->interval.interval, pQuery->interval.intervalUnit, pQuery->precision) - 1;
} else {
- pNext->skey -= ((pNext->skey - next + pQuery->slidingTime - 1) / pQuery->slidingTime) * pQuery->slidingTime;
- pNext->ekey = pNext->skey + pQuery->intervalTime - 1;
+ pNext->skey -= ((pNext->skey - next + pQuery->interval.sliding - 1) / pQuery->interval.sliding) * pQuery->interval.sliding;
+ pNext->ekey = pNext->skey + pQuery->interval.interval - 1;
}
}
@@ -910,19 +927,11 @@ static char *getDataBlock(SQueryRuntimeEnv *pRuntimeEnv, SArithmeticSupport *sas
char *dataBlock = NULL;
SQuery *pQuery = pRuntimeEnv->pQuery;
- SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx;
int32_t functionId = pQuery->pSelectExpr[col].base.functionId;
if (functionId == TSDB_FUNC_ARITHM) {
sas->pArithExpr = &pQuery->pSelectExpr[col];
- // set the start offset to be the lowest start position, no matter asc/desc query order
- if (QUERY_IS_ASC_QUERY(pQuery)) {
- pCtx->startOffset = pQuery->pos;
- } else {
- pCtx->startOffset = pQuery->pos - (size - 1);
- }
-
sas->offset = 0;
sas->colList = pQuery->colList;
sas->numOfCols = pQuery->numOfCols;
@@ -1002,7 +1011,7 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *
}
int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order);
- if (QUERY_IS_INTERVAL_QUERY(pQuery)/* && tsCols != NULL*/) {
+ if (QUERY_IS_INTERVAL_QUERY(pQuery)) {
TSKEY ts = TSKEY_INITIAL_VAL;
if (tsCols == NULL) {
@@ -1094,8 +1103,25 @@ static int32_t setGroupResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, char *pDat
SDiskbasedResultBuf *pResultBuf = pRuntimeEnv->pResultBuf;
- int64_t v = -1;
// not assign result buffer yet, add new result buffer
+ char* d = pData;
+ int16_t len = bytes;
+ if (type == TSDB_DATA_TYPE_BINARY||type == TSDB_DATA_TYPE_NCHAR) {
+ d = varDataVal(pData);
+ len = varDataLen(pData);
+ } else if (type == TSDB_DATA_TYPE_FLOAT || type == TSDB_DATA_TYPE_DOUBLE) {
+ SQInfo* pQInfo = GET_QINFO_ADDR(pRuntimeEnv);
+ qError("QInfo:%p group by not supported on double/float/binary/nchar columns, abort", pQInfo);
+
+ longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_APP_ERROR);
+ }
+
+ SWindowResult *pWindowRes = doSetTimeWindowFromKey(pRuntimeEnv, &pRuntimeEnv->windowResInfo, d, len, true);
+ if (pWindowRes == NULL) {
+ return -1;
+ }
+
+ int64_t v = -1;
switch(type) {
case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_TINYINT: v = GET_INT8_VAL(pData); break;
@@ -1104,12 +1130,14 @@ static int32_t setGroupResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, char *pDat
case TSDB_DATA_TYPE_BIGINT: v = GET_INT64_VAL(pData); break;
}
- SWindowResult *pWindowRes = doSetTimeWindowFromKey(pRuntimeEnv, &pRuntimeEnv->windowResInfo, pData, bytes, true);
- if (pWindowRes == NULL) {
- return -1;
+ if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
+ pWindowRes->key = malloc(varDataTLen(pData));
+ varDataCopy(pWindowRes->key, pData);
+ } else {
+ pWindowRes->win.skey = v;
+ pWindowRes->win.ekey = v;
}
- pWindowRes->skey = v;
assert(pRuntimeEnv->windowResInfo.interval == 0);
if (pWindowRes->pos.pageId == -1) {
@@ -1180,7 +1208,7 @@ static int32_t doTSJoinFilter(SQueryRuntimeEnv *pRuntimeEnv, int32_t offset) {
#if defined(_DEBUG_VIEW)
printf("elem in comp ts file:%" PRId64 ", key:%" PRId64 ", tag:%"PRIu64", query order:%d, ts order:%d, traverse:%d, index:%d\n",
- elem.ts, key, elem.tag, pQuery->order.order, pRuntimeEnv->pTSBuf->tsOrder,
+ elem.ts, key, elem.tag.i64Key, pQuery->order.order, pRuntimeEnv->pTSBuf->tsOrder,
pRuntimeEnv->pTSBuf->cur.order, pRuntimeEnv->pTSBuf->cur.tsIndex);
#endif
@@ -1461,12 +1489,15 @@ void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, void* inputData, TSKEY
pCtx->preAggVals.dataBlockLoaded = (inputData != NULL);
// limit/offset query will affect this value
- pCtx->startOffset = QUERY_IS_ASC_QUERY(pQuery) ? pQuery->pos:0;
pCtx->size = QUERY_IS_ASC_QUERY(pQuery) ? pBlockInfo->rows - pQuery->pos : pQuery->pos + 1;
+ // minimum value no matter ascending/descending order query
+ pCtx->startOffset = QUERY_IS_ASC_QUERY(pQuery) ? pQuery->pos: (pQuery->pos - pCtx->size + 1);
+ assert(pCtx->startOffset >= 0);
+
uint32_t status = aAggs[functionId].nStatus;
if (((status & (TSDB_FUNCSTATE_SELECTIVITY | TSDB_FUNCSTATE_NEED_TS)) != 0) && (tsCol != NULL)) {
- pCtx->ptsList = tsCol;
+ pCtx->ptsList = &tsCol[pCtx->startOffset];
}
if (functionId >= TSDB_FUNC_FIRST_DST && functionId <= TSDB_FUNC_LAST_DST) {
@@ -1846,20 +1877,20 @@ static bool onlyQueryTags(SQuery* pQuery) {
/////////////////////////////////////////////////////////////////////////////////////////////
void getAlignQueryTimeWindow(SQuery *pQuery, int64_t key, int64_t keyFirst, int64_t keyLast, STimeWindow *win) {
- assert(key >= keyFirst && key <= keyLast && pQuery->slidingTime <= pQuery->intervalTime);
- win->skey = taosGetIntervalStartTimestamp(key, pQuery->slidingTime, pQuery->intervalTime, pQuery->slidingTimeUnit, pQuery->precision);
+ assert(key >= keyFirst && key <= keyLast && pQuery->interval.sliding <= pQuery->interval.interval);
+ win->skey = taosTimeTruncate(key, &pQuery->interval, pQuery->precision);
/*
- * if the realSkey > INT64_MAX - pQuery->intervalTime, the query duration between
+ * if the realSkey > INT64_MAX - pQuery->interval.interval, the query duration between
* realSkey and realEkey must be less than one interval.Therefore, no need to adjust the query ranges.
*/
- if (keyFirst > (INT64_MAX - pQuery->intervalTime)) {
- assert(keyLast - keyFirst < pQuery->intervalTime);
+ if (keyFirst > (INT64_MAX - pQuery->interval.interval)) {
+ assert(keyLast - keyFirst < pQuery->interval.interval);
win->ekey = INT64_MAX;
- } else if (pQuery->intervalTimeUnit == 'n' || pQuery->intervalTimeUnit == 'y') {
- win->ekey = taosAddNatualInterval(win->skey, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision) - 1;
+ } else if (pQuery->interval.intervalUnit == 'n' || pQuery->interval.intervalUnit == 'y') {
+ win->ekey = taosTimeAdd(win->skey, pQuery->interval.interval, pQuery->interval.intervalUnit, pQuery->precision) - 1;
} else {
- win->ekey = win->skey + pQuery->intervalTime - 1;
+ win->ekey = win->skey + pQuery->interval.interval - 1;
}
}
@@ -1951,40 +1982,39 @@ static void changeExecuteScanOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bo
// todo handle the case the the order irrelevant query type mixed up with order critical query type
// descending order query for last_row query
- if (isFirstLastRowQuery(pQuery) && !QUERY_IS_ASC_QUERY(pQuery)) {
- qDebug("QInfo:%p scan order changed for last_row query, old:%d, new:%d", GET_QINFO_ADDR(pQuery),
- pQuery->order.order, TSDB_ORDER_ASC);
+ if (isFirstLastRowQuery(pQuery)) {
+ qDebug("QInfo:%p scan order changed for last_row query, old:%d, new:%d", pQInfo, pQuery->order.order, TSDB_ORDER_ASC);
- SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
pQuery->order.order = TSDB_ORDER_ASC;
- assert (pQuery->window.skey <= pQuery->window.ekey);
+ if (pQuery->window.skey > pQuery->window.ekey) {
+ SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
+ }
+
+ return;
+ }
+
+ if (isGroupbyNormalCol(pQuery->pGroupbyExpr) && pQuery->order.order == TSDB_ORDER_DESC) {
+ pQuery->order.order = TSDB_ORDER_ASC;
+ if (pQuery->window.skey > pQuery->window.ekey) {
+ SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
+ }
doExchangeTimeWindow(pQInfo, &pQuery->window);
return;
}
- if (isGroupbyNormalCol(pQuery->pGroupbyExpr) && !QUERY_IS_ASC_QUERY(pQuery)) {
- pQuery->order.order = TSDB_ORDER_ASC;
- SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
- assert (pQuery->window.skey <= pQuery->window.ekey);
+ if (isPointInterpoQuery(pQuery) && pQuery->interval.interval == 0) {
+ if (!QUERY_IS_ASC_QUERY(pQuery)) {
+ qDebug(msg, GET_QINFO_ADDR(pQuery), "interp", pQuery->order.order, TSDB_ORDER_ASC, pQuery->window.skey,
+ pQuery->window.ekey, pQuery->window.ekey, pQuery->window.skey);
+ SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
+ }
- doExchangeTimeWindow(pQInfo, &pQuery->window);
+ pQuery->order.order = TSDB_ORDER_ASC;
return;
}
- if (isPointInterpoQuery(pQuery) && (pQuery->intervalTime == 0) && !QUERY_IS_ASC_QUERY(pQuery)) {
- qDebug(msg, GET_QINFO_ADDR(pQuery), "interp", pQuery->order.order, TSDB_ORDER_ASC, pQuery->window.skey,
- pQuery->window.ekey, pQuery->window.ekey, pQuery->window.skey);
- SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
-
- pQuery->order.order = TSDB_ORDER_ASC;
-
- assert (pQuery->window.skey <= pQuery->window.ekey);
- doExchangeTimeWindow(pQInfo, &pQuery->window);
- return;
- }
-
- if (pQuery->intervalTime == 0) {
+ if (pQuery->interval.interval == 0) {
if (onlyFirstQuery(pQuery)) {
if (!QUERY_IS_ASC_QUERY(pQuery)) {
qDebug(msg, GET_QINFO_ADDR(pQuery), "only-first", pQuery->order.order, TSDB_ORDER_ASC, pQuery->window.skey,
@@ -2055,13 +2085,14 @@ static int32_t getInitialPageNum(SQInfo *pQInfo) {
static void getIntermediateBufInfo(SQueryRuntimeEnv* pRuntimeEnv, int32_t* ps, int32_t* rowsize) {
SQuery* pQuery = pRuntimeEnv->pQuery;
+ int32_t MIN_ROWS_PER_PAGE = 4;
*rowsize = (int32_t)(pQuery->rowSize * GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, pRuntimeEnv->topBotQuery, pRuntimeEnv->stableQuery));
int32_t overhead = sizeof(tFilePage);
// one page contains at least two rows
*ps = DEFAULT_INTERN_BUF_PAGE_SIZE;
- while(((*rowsize) * 2) > (*ps) - overhead) {
+ while(((*rowsize) * MIN_ROWS_PER_PAGE) > (*ps) - overhead) {
*ps = (*ps << 1u);
}
@@ -2089,7 +2120,7 @@ static bool needToLoadDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SDataStatis *pDat
}
}
- // no statistics data
+ // no statistics data, load the true data block
if (index == -1) {
return true;
}
@@ -2099,8 +2130,17 @@ static bool needToLoadDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SDataStatis *pDat
return true;
}
- // all points in current column are NULL, no need to check its boundary value
+ // all data in current column are NULL, no need to check its boundary value
if (pDataStatis[index].numOfNull == numOfRows) {
+
+ // if isNULL query exists, load the null data column
+ for (int32_t j = 0; j < pFilterInfo->numOfFilters; ++j) {
+ SColumnFilterElem *pFilterElem = &pFilterInfo->pFilters[j];
+ if (pFilterElem->fp == isNull_filter) {
+ return true;
+ }
+ }
+
continue;
}
@@ -2185,43 +2225,58 @@ static bool overlapWithTimeWindow(SQuery* pQuery, SDataBlockInfo* pBlockInfo) {
return false;
}
-int32_t loadDataBlockOnDemand(SQueryRuntimeEnv *pRuntimeEnv, void* pQueryHandle, SDataBlockInfo* pBlockInfo, SDataStatis **pStatis, SArray** pDataBlock) {
+int32_t loadDataBlockOnDemand(SQueryRuntimeEnv *pRuntimeEnv, SWindowResInfo * pWindowResInfo, void* pQueryHandle, SDataBlockInfo* pBlockInfo, SDataStatis **pStatis, SArray** pDataBlock, uint32_t* status) {
SQuery *pQuery = pRuntimeEnv->pQuery;
- uint32_t status = 0;
+ *status = BLK_DATA_NO_NEEDED;
+
if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTSBuf > 0) {
- status = BLK_DATA_ALL_NEEDED;
+ *status = BLK_DATA_ALL_NEEDED;
} else { // check if this data block is required to load
// Calculate all time windows that are overlapping or contain current data block.
// If current data block is contained by all possible time window, do not load current data block.
if (QUERY_IS_INTERVAL_QUERY(pQuery) && overlapWithTimeWindow(pQuery, pBlockInfo)) {
- status = BLK_DATA_ALL_NEEDED;
+ *status = BLK_DATA_ALL_NEEDED;
}
- if (status != BLK_DATA_ALL_NEEDED) {
+ if ((*status) != BLK_DATA_ALL_NEEDED) {
+ // the pCtx[i] result is belonged to previous time window since the outputBuf has not been set yet,
+ // the filter result may be incorrect. So in case of interval query, we need to set the correct time output buffer
+ if (QUERY_IS_INTERVAL_QUERY(pQuery)) {
+ bool hasTimeWindow = false;
+ bool masterScan = IS_MASTER_SCAN(pRuntimeEnv);
+
+ TSKEY k = QUERY_IS_ASC_QUERY(pQuery)? pBlockInfo->window.skey:pBlockInfo->window.ekey;
+
+ STimeWindow win = getActiveTimeWindow(pWindowResInfo, k, pQuery);
+ if (setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pBlockInfo->tid, &win, masterScan, &hasTimeWindow) !=
+ TSDB_CODE_SUCCESS) {
+ // todo handle error in set result for timewindow
+ }
+ }
+
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
SSqlFuncMsg* pSqlFunc = &pQuery->pSelectExpr[i].base;
int32_t functionId = pSqlFunc->functionId;
int32_t colId = pSqlFunc->colInfo.colId;
-
- status |= aAggs[functionId].dataReqFunc(&pRuntimeEnv->pCtx[i], pBlockInfo->window.skey, pBlockInfo->window.ekey, colId);
- if ((status & BLK_DATA_ALL_NEEDED) == BLK_DATA_ALL_NEEDED) {
+ (*status) |= aAggs[functionId].dataReqFunc(&pRuntimeEnv->pCtx[i], pBlockInfo->window.skey, pBlockInfo->window.ekey, colId);
+ if (((*status) & BLK_DATA_ALL_NEEDED) == BLK_DATA_ALL_NEEDED) {
break;
}
}
}
}
- if (status == BLK_DATA_NO_NEEDED) {
+ if ((*status) == BLK_DATA_NO_NEEDED) {
qDebug("QInfo:%p data block discard, brange:%"PRId64 "-%"PRId64", rows:%d", GET_QINFO_ADDR(pRuntimeEnv),
pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows);
pRuntimeEnv->summary.discardBlocks += 1;
- } else if (status == BLK_DATA_STATIS_NEEDED) {
- if (tsdbRetrieveDataBlockStatisInfo(pQueryHandle, pStatis) != TSDB_CODE_SUCCESS) {
- // return DISK_DATA_LOAD_FAILED;
- }
+ } else if ((*status) == BLK_DATA_STATIS_NEEDED) {
+
+ // this function never returns error?
+ tsdbRetrieveDataBlockStatisInfo(pQueryHandle, pStatis);
pRuntimeEnv->summary.loadBlockStatis += 1;
@@ -2230,24 +2285,26 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv *pRuntimeEnv, void* pQueryHandle,
pRuntimeEnv->summary.totalCheckedRows += pBlockInfo->rows;
}
} else {
- assert(status == BLK_DATA_ALL_NEEDED);
+ assert((*status) == BLK_DATA_ALL_NEEDED);
// load the data block statistics to perform further filter
pRuntimeEnv->summary.loadBlockStatis += 1;
- if (tsdbRetrieveDataBlockStatisInfo(pQueryHandle, pStatis) != TSDB_CODE_SUCCESS) {
- }
+ tsdbRetrieveDataBlockStatisInfo(pQueryHandle, pStatis);
if (!needToLoadDataBlock(pRuntimeEnv, *pStatis, pRuntimeEnv->pCtx, pBlockInfo->rows)) {
// current block has been discard due to filter applied
pRuntimeEnv->summary.discardBlocks += 1;
qDebug("QInfo:%p data block discard, brange:%"PRId64 "-%"PRId64", rows:%d", GET_QINFO_ADDR(pRuntimeEnv),
pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows);
- return BLK_DATA_DISCARD;
+ (*status) = BLK_DATA_DISCARD;
}
pRuntimeEnv->summary.totalCheckedRows += pBlockInfo->rows;
pRuntimeEnv->summary.loadBlocks += 1;
*pDataBlock = tsdbRetrieveDataBlock(pQueryHandle, NULL);
+ if (*pDataBlock == NULL) {
+ return terrno;
+ }
}
return TSDB_CODE_SUCCESS;
@@ -2365,16 +2422,16 @@ static void ensureOutputBuffer(SQueryRuntimeEnv* pRuntimeEnv, SDataBlockInfo* pB
memset(tmp + sizeof(tFilePage) + bytes * pRec->rows, 0, (size_t)((newSize - pRec->rows) * bytes));
pQuery->sdata[i] = (tFilePage *)tmp;
}
-
+
// set the pCtx output buffer position
pRuntimeEnv->pCtx[i].aOutputBuf = pQuery->sdata[i]->data + pRec->rows * bytes;
-
+
int32_t functionId = pQuery->pSelectExpr[i].base.functionId;
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) {
pRuntimeEnv->pCtx[i].ptsOutputBuf = pRuntimeEnv->pCtx[0].aOutputBuf;
}
}
-
+
qDebug("QInfo:%p realloc output buffer, new size: %d rows, old:%" PRId64 ", remain:%" PRId64, GET_QINFO_ADDR(pRuntimeEnv),
newSize, pRec->capacity, newSize - pRec->rows);
@@ -2431,15 +2488,18 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
ensureOutputBuffer(pRuntimeEnv, &blockInfo);
SDataStatis *pStatis = NULL;
- SArray *pDataBlock = NULL;
- if (loadDataBlockOnDemand(pRuntimeEnv, pQueryHandle, &blockInfo, &pStatis, &pDataBlock) == BLK_DATA_DISCARD) {
- pQuery->current->lastKey = QUERY_IS_ASC_QUERY(pQuery)? blockInfo.window.ekey + step:blockInfo.window.skey + step;
- continue;
+ SArray * pDataBlock = NULL;
+ uint32_t status = 0;
+
+ int32_t ret = loadDataBlockOnDemand(pRuntimeEnv, &pRuntimeEnv->windowResInfo, pQueryHandle, &blockInfo, &pStatis, &pDataBlock, &status);
+ if (ret != TSDB_CODE_SUCCESS) {
+ break;
}
- if (terrno != TSDB_CODE_SUCCESS) { // load data block failed, abort query
- longjmp(pRuntimeEnv->env, terrno);
- break;
+ if (status == BLK_DATA_DISCARD) {
+ pQuery->current->lastKey =
+ QUERY_IS_ASC_QUERY(pQuery) ? blockInfo.window.ekey + step : blockInfo.window.skey + step;
+ continue;
}
// query start position can not move into tableApplyFunctionsOnBlock due to limit/offset condition
@@ -2806,6 +2866,7 @@ void copyResToQueryResultBuf(SQInfo *pQInfo, SQuery *pQuery) {
// all results have been return to client, try next group
if (pGroupResInfo->pos.pageId == pGroupResInfo->numOfDataPages) {
pGroupResInfo->numOfDataPages = 0;
+ pGroupResInfo->pos.pageId = 0;
pGroupResInfo->pos.rowId = 0;
// current results of group has been sent to client, try next group
@@ -2920,7 +2981,6 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) {
STableQueryInfo *item = taosArrayGetP(pGroup, i);
SIDList list = getDataBufPagesIdList(pRuntimeEnv->pResultBuf, TSDB_TABLEID(item->pTable)->tid);
-
if (taosArrayGetSize(list) > 0 && item->windowResInfo.size > 0) {
pTableList[numOfTables++] = item;
tid = TSDB_TABLEID(item->pTable)->tid;
@@ -2993,7 +3053,7 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) {
char *b = getPosInResultPage(pRuntimeEnv, PRIMARYKEY_TIMESTAMP_COL_INDEX, pWindowRes, page);
TSKEY ts = GET_INT64_VAL(b);
- assert(ts == pWindowRes->skey);
+ assert(ts == pWindowRes->win.skey);
int64_t num = getNumOfResultWindowRes(pQuery, pWindowRes);
if (num <= 0) {
cs.position[pos] += 1;
@@ -3357,7 +3417,7 @@ void skipResults(SQueryRuntimeEnv *pRuntimeEnv) {
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
int32_t functionId = pQuery->pSelectExpr[i].base.functionId;
int32_t bytes = pRuntimeEnv->pCtx[i].outputBytes;
-
+
memmove(pQuery->sdata[i]->data, (char*)pQuery->sdata[i]->data + bytes * numOfSkip, (size_t)(pQuery->rec.rows * bytes));
pRuntimeEnv->pCtx[i].aOutputBuf = ((char*) pQuery->sdata[i]->data) + pQuery->rec.rows * bytes;
@@ -3678,7 +3738,7 @@ static STableQueryInfo *createTableQueryInfo(SQueryRuntimeEnv *pRuntimeEnv, void
return pTableQueryInfo;
}
-void destroyTableQueryInfo(STableQueryInfo *pTableQueryInfo) {
+void destroyTableQueryInfoImpl(STableQueryInfo *pTableQueryInfo) {
if (pTableQueryInfo == NULL) {
return;
}
@@ -4238,8 +4298,8 @@ static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv, TSKEY* start) {
}
/*
- * 1. for interval without interpolation query we forward pQuery->intervalTime at a time for
- * pQuery->limit.offset times. Since hole exists, pQuery->intervalTime*pQuery->limit.offset value is
+ * 1. for interval without interpolation query we forward pQuery->interval.interval at a time for
+ * pQuery->limit.offset times. Since hole exists, pQuery->interval.interval*pQuery->limit.offset value is
* not valid. otherwise, we only forward pQuery->limit.offset number of points
*/
assert(pRuntimeEnv->windowResInfo.prevSKey == TSKEY_INITIAL_VAL);
@@ -4354,31 +4414,7 @@ static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv, TSKEY* start) {
return true;
}
-static void freeTableQueryInfo(STableGroupInfo* pTableGroupInfo) {
- if (pTableGroupInfo->pGroupList == NULL) {
- assert(pTableGroupInfo->numOfTables == 0);
- } else {
- size_t numOfGroups = taosArrayGetSize(pTableGroupInfo->pGroupList);
- for (int32_t i = 0; i < numOfGroups; ++i) {
- SArray *p = taosArrayGetP(pTableGroupInfo->pGroupList, i);
-
- size_t num = taosArrayGetSize(p);
- for(int32_t j = 0; j < num; ++j) {
- STableQueryInfo* item = taosArrayGetP(p, j);
- destroyTableQueryInfo(item);
- }
-
- taosArrayDestroy(p);
- }
-
- taosArrayDestroy(pTableGroupInfo->pGroupList);
- pTableGroupInfo->pGroupList = NULL;
- pTableGroupInfo->numOfTables = 0;
- }
-
- taosHashCleanup(pTableGroupInfo->map);
- pTableGroupInfo->map = NULL;
-}
+static void doDestroyTableQueryInfo(STableGroupInfo* pTableqinfoGroupInfo);
static int32_t setupQueryHandle(void* tsdb, SQInfo* pQInfo, bool isSTableQuery) {
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
@@ -4415,13 +4451,15 @@ static int32_t setupQueryHandle(void* tsdb, SQInfo* pQInfo, bool isSTableQuery)
terrno = TSDB_CODE_SUCCESS;
if (isFirstLastRowQuery(pQuery)) {
pRuntimeEnv->pQueryHandle = tsdbQueryLastRow(tsdb, &cond, &pQInfo->tableGroupInfo, pQInfo);
- if (pRuntimeEnv->pQueryHandle == NULL) { // no data in current stable, clear all
- freeTableQueryInfo(&pQInfo->tableqinfoGroupInfo);
- } else { // update the query time window
- pQuery->window = cond.twindow;
+ // update the query time window
+ pQuery->window = cond.twindow;
+
+ if (pQInfo->tableGroupInfo.numOfTables == 0) {
+ pQInfo->tableqinfoGroupInfo.numOfTables = 0;
+ } else {
size_t numOfGroups = GET_NUM_OF_TABLEGROUP(pQInfo);
- for (int32_t i = 0; i < numOfGroups; ++i) {
+ for(int32_t i = 0; i < numOfGroups; ++i) {
SArray *group = GET_TABLEGROUP(pQInfo, i);
size_t t = taosArrayGetSize(group);
@@ -4484,12 +4522,6 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo
return code;
}
- if (pQInfo->tableqinfoGroupInfo.numOfTables == 0) {
- qDebug("QInfo:%p no table qualified for tag filter, abort query", pQInfo);
- setQueryStatus(pQuery, QUERY_COMPLETED);
- return TSDB_CODE_SUCCESS;
- }
-
pQInfo->tsdb = tsdb;
pQInfo->vgId = vgId;
@@ -4572,7 +4604,7 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo
getAlignQueryTimeWindow(pQuery, pQuery->window.skey, sk, ek, &w);
pRuntimeEnv->pFillInfo = taosInitFillInfo(pQuery->order.order, w.skey, 0, (int32_t)pQuery->rec.capacity, pQuery->numOfOutput,
- pQuery->slidingTime, pQuery->slidingTimeUnit, (int8_t)pQuery->precision,
+ pQuery->interval.sliding, pQuery->interval.slidingUnit, (int8_t)pQuery->precision,
pQuery->fillType, pColInfo);
}
@@ -4612,7 +4644,7 @@ static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) {
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
SQuery* pQuery = pRuntimeEnv->pQuery;
SQueryCostInfo* summary = &pRuntimeEnv->summary;
-
+
int64_t st = taosGetTimestampMs();
TsdbQueryHandleT pQueryHandle = IS_MASTER_SCAN(pRuntimeEnv)? pRuntimeEnv->pQueryHandle : pRuntimeEnv->pSecQueryHandle;
@@ -4622,7 +4654,7 @@ static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) {
while (tsdbNextDataBlock(pQueryHandle)) {
summary->totalBlocks += 1;
-
+
if (IS_QUERY_KILLED(pQInfo)) {
longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED);
}
@@ -4650,16 +4682,23 @@ static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) {
setEnvForEachBlock(pQInfo, *pTableQueryInfo, &blockInfo);
}
+ uint32_t status = 0;
SDataStatis *pStatis = NULL;
- SArray *pDataBlock = NULL;
- if (loadDataBlockOnDemand(pRuntimeEnv, pQueryHandle, &blockInfo, &pStatis, &pDataBlock) == BLK_DATA_DISCARD) {
- pQuery->current->lastKey = QUERY_IS_ASC_QUERY(pQuery)? blockInfo.window.ekey + step:blockInfo.window.skey + step;
+ SArray *pDataBlock = NULL;
+
+ int32_t ret = loadDataBlockOnDemand(pRuntimeEnv, &pQuery->current->windowResInfo, pQueryHandle, &blockInfo, &pStatis, &pDataBlock, &status);
+ if (ret != TSDB_CODE_SUCCESS) {
+ break;
+ }
+
+ if (status == BLK_DATA_DISCARD) {
+ pQuery->current->lastKey = QUERY_IS_ASC_QUERY(pQuery)? blockInfo.window.ekey + step : blockInfo.window.skey + step;
continue;
}
summary->totalRows += blockInfo.rows;
stableApplyFunctionsOnBlock(pRuntimeEnv, &blockInfo, pStatis, pDataBlock, binarySearchForKey);
-
+
qDebug("QInfo:%p check data block completed, uid:%"PRId64", tid:%d, brange:%" PRId64 "-%" PRId64 ", numOfRows:%d, "
"lastKey:%" PRId64,
pQInfo, blockInfo.uid, blockInfo.tid, blockInfo.window.skey, blockInfo.window.ekey, blockInfo.rows,
@@ -5025,8 +5064,8 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
}
qDebug(
- "QInfo %p numOfTables:%"PRIu64", index:%d, numOfGroups:%" PRIzu ", %"PRId64" points returned, total:%"PRId64", offset:%" PRId64,
- pQInfo, pQInfo->tableqinfoGroupInfo.numOfTables, pQInfo->tableIndex, numOfGroups, pQuery->rec.rows, pQuery->rec.total,
+ "QInfo %p numOfTables:%" PRIu64 ", index:%d, numOfGroups:%" PRIzu ", %" PRId64 " points returned, total:%" PRId64 ", offset:%" PRId64,
+ pQInfo, (uint64_t)pQInfo->tableqinfoGroupInfo.numOfTables, pQInfo->tableIndex, numOfGroups, pQuery->rec.rows, pQuery->rec.total,
pQuery->limit.offset);
}
@@ -5425,7 +5464,7 @@ static void stableQueryImpl(SQInfo *pQInfo) {
(isFixedOutputQuery(pRuntimeEnv) && (!isPointInterpoQuery(pQuery)) && (!pRuntimeEnv->groupbyNormalCol))) {
multiTableQueryProcess(pQInfo);
} else {
- assert((pQuery->checkBuffer == 1 && pQuery->intervalTime == 0) || isPointInterpoQuery(pQuery) ||
+ assert((pQuery->checkBuffer == 1 && pQuery->interval.interval == 0) || isPointInterpoQuery(pQuery) ||
isFirstLastRowQuery(pQuery) || pRuntimeEnv->groupbyNormalCol);
sequentialTableProcess(pQInfo);
@@ -5463,6 +5502,7 @@ static int32_t getColumnIndexInSource(SQueryTableMsg *pQueryMsg, SSqlFuncMsg *pE
}
}
assert(0);
+ return -1;
}
bool validateExprColumnInfo(SQueryTableMsg *pQueryMsg, SSqlFuncMsg *pExprMsg, SColumnInfo* pTagCols) {
@@ -5471,8 +5511,8 @@ bool validateExprColumnInfo(SQueryTableMsg *pQueryMsg, SSqlFuncMsg *pExprMsg, SC
}
static bool validateQueryMsg(SQueryTableMsg *pQueryMsg) {
- if (pQueryMsg->intervalTime < 0) {
- qError("qmsg:%p illegal value of interval time %" PRId64, pQueryMsg, pQueryMsg->intervalTime);
+ if (pQueryMsg->interval.interval < 0) {
+ qError("qmsg:%p illegal value of interval time %" PRId64, pQueryMsg, pQueryMsg->interval.interval);
return false;
}
@@ -5551,8 +5591,12 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList,
pQueryMsg->window.skey = htobe64(pQueryMsg->window.skey);
pQueryMsg->window.ekey = htobe64(pQueryMsg->window.ekey);
- pQueryMsg->intervalTime = htobe64(pQueryMsg->intervalTime);
- pQueryMsg->slidingTime = htobe64(pQueryMsg->slidingTime);
+ pQueryMsg->interval.interval = htobe64(pQueryMsg->interval.interval);
+ pQueryMsg->interval.sliding = htobe64(pQueryMsg->interval.sliding);
+ pQueryMsg->interval.offset = htobe64(pQueryMsg->interval.offset);
+ pQueryMsg->interval.intervalUnit = pQueryMsg->interval.intervalUnit;
+ pQueryMsg->interval.slidingUnit = pQueryMsg->interval.slidingUnit;
+ pQueryMsg->interval.offsetUnit = pQueryMsg->interval.offsetUnit;
pQueryMsg->limit = htobe64(pQueryMsg->limit);
pQueryMsg->offset = htobe64(pQueryMsg->offset);
@@ -5765,7 +5809,7 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList,
qDebug("qmsg:%p query %d tables, type:%d, qrange:%" PRId64 "-%" PRId64 ", numOfGroupbyTagCols:%d, order:%d, "
"outputCols:%d, numOfCols:%d, interval:%" PRId64 ", fillType:%d, comptsLen:%d, compNumOfBlocks:%d, limit:%" PRId64 ", offset:%" PRId64,
pQueryMsg, pQueryMsg->numOfTables, pQueryMsg->queryType, pQueryMsg->window.skey, pQueryMsg->window.ekey, pQueryMsg->numOfGroupCols,
- pQueryMsg->order, pQueryMsg->numOfOutput, pQueryMsg->numOfCols, pQueryMsg->intervalTime,
+ pQueryMsg->order, pQueryMsg->numOfOutput, pQueryMsg->numOfCols, pQueryMsg->interval.interval,
pQueryMsg->fillType, pQueryMsg->tsLen, pQueryMsg->tsNumOfBlocks, pQueryMsg->limit, pQueryMsg->offset);
return TSDB_CODE_SUCCESS;
@@ -6104,10 +6148,7 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGrou
pQuery->order.orderColId = pQueryMsg->orderColId;
pQuery->pSelectExpr = pExprs;
pQuery->pGroupbyExpr = pGroupbyExpr;
- pQuery->intervalTime = pQueryMsg->intervalTime;
- pQuery->slidingTime = pQueryMsg->slidingTime;
- pQuery->intervalTimeUnit = pQueryMsg->intervalTimeUnit;
- pQuery->slidingTimeUnit = pQueryMsg->slidingTimeUnit;
+ memcpy(&pQuery->interval, &pQueryMsg->interval, sizeof(pQuery->interval));
pQuery->fillType = pQueryMsg->fillType;
pQuery->numOfTags = pQueryMsg->numOfTags;
pQuery->tagColList = pTagCols;
@@ -6312,17 +6353,43 @@ _error:
}
static void freeColumnFilterInfo(SColumnFilterInfo* pFilter, int32_t numOfFilters) {
- if (pFilter == NULL) {
+ if (pFilter == NULL || numOfFilters == 0) {
return;
}
+
for (int32_t i = 0; i < numOfFilters; i++) {
if (pFilter[i].filterstr) {
free((void*)(pFilter[i].pz));
}
}
+
free(pFilter);
}
+static void doDestroyTableQueryInfo(STableGroupInfo* pTableqinfoGroupInfo) {
+ if (pTableqinfoGroupInfo->pGroupList != NULL) {
+ int32_t numOfGroups = (int32_t) taosArrayGetSize(pTableqinfoGroupInfo->pGroupList);
+ for (int32_t i = 0; i < numOfGroups; ++i) {
+ SArray *p = taosArrayGetP(pTableqinfoGroupInfo->pGroupList, i);
+
+ size_t num = taosArrayGetSize(p);
+ for(int32_t j = 0; j < num; ++j) {
+ STableQueryInfo* item = taosArrayGetP(p, j);
+ destroyTableQueryInfoImpl(item);
+ }
+
+ taosArrayDestroy(p);
+ }
+ }
+
+ taosArrayDestroy(pTableqinfoGroupInfo->pGroupList);
+ taosHashCleanup(pTableqinfoGroupInfo->map);
+
+ pTableqinfoGroupInfo->pGroupList = NULL;
+ pTableqinfoGroupInfo->map = NULL;
+ pTableqinfoGroupInfo->numOfTables = 0;
+}
+
static void freeQInfo(SQInfo *pQInfo) {
if (!isValidQInfo(pQInfo)) {
return;
@@ -6383,10 +6450,9 @@ static void freeQInfo(SQInfo *pQInfo) {
taosTFree(pQuery);
}
- freeTableQueryInfo(&pQInfo->tableqinfoGroupInfo);
+ doDestroyTableQueryInfo(&pQInfo->tableqinfoGroupInfo);
taosTFree(pQInfo->pBuf);
-
tsdbDestroyTableGroup(&pQInfo->tableGroupInfo);
taosArrayDestroy(pQInfo->arrTableIdInfo);
@@ -6980,7 +7046,7 @@ void* qOpenQueryMgmt(int32_t vgId) {
return NULL;
}
- pQueryMgmt->qinfoPool = taosCacheInit(TSDB_DATA_TYPE_BIGINT, REFRESH_HANDLE_INTERVAL, true, freeqinfoFn, cacheName);
+ pQueryMgmt->qinfoPool = taosCacheInit(TSDB_CACHE_PTR_KEY, REFRESH_HANDLE_INTERVAL, true, freeqinfoFn, cacheName);
pQueryMgmt->closed = false;
pQueryMgmt->vgId = vgId;
@@ -7049,23 +7115,23 @@ void** qRegisterQInfo(void* pMgmt, uint64_t qInfo) {
qError("QInfo:%p failed to add qhandle into cache, since qMgmt is colsing", (void *)qInfo);
return NULL;
} else {
- uint64_t handleVal = (uint64_t) qInfo;
-
- void** handle = taosCachePut(pQueryMgmt->qinfoPool, &handleVal, sizeof(int64_t), &qInfo, POINTER_BYTES, DEFAULT_QHANDLE_LIFE_SPAN);
+ TSDB_CACHE_PTR_TYPE handleVal = (TSDB_CACHE_PTR_TYPE) qInfo;
+ void** handle = taosCachePut(pQueryMgmt->qinfoPool, &handleVal, sizeof(TSDB_CACHE_PTR_TYPE), &qInfo, sizeof(TSDB_CACHE_PTR_TYPE), DEFAULT_QHANDLE_LIFE_SPAN);
// pthread_mutex_unlock(&pQueryMgmt->lock);
return handle;
}
}
-void** qAcquireQInfo(void* pMgmt, uint64_t key) {
+void** qAcquireQInfo(void* pMgmt, uint64_t _key) {
SQueryMgmt *pQueryMgmt = pMgmt;
if (pQueryMgmt->qinfoPool == NULL || pQueryMgmt->closed) {
return NULL;
}
- void** handle = taosCacheAcquireByKey(pQueryMgmt->qinfoPool, &key, sizeof(uint64_t));
+ TSDB_CACHE_PTR_TYPE key = (TSDB_CACHE_PTR_TYPE)_key;
+ void** handle = taosCacheAcquireByKey(pQueryMgmt->qinfoPool, &key, sizeof(TSDB_CACHE_PTR_TYPE));
if (handle == NULL || *handle == NULL) {
return NULL;
} else {
diff --git a/src/query/src/qExtbuffer.c b/src/query/src/qExtbuffer.c
index 192a31ebf5..1d3120ead4 100644
--- a/src/query/src/qExtbuffer.c
+++ b/src/query/src/qExtbuffer.c
@@ -502,22 +502,22 @@ FORCE_INLINE int32_t compare_sd(tOrderDescriptor *pDescriptor, int32_t numOfRows
return compare_d(pDescriptor, numOfRows, idx1, data, numOfRows, idx2, data);
}
-static void swap(SColumnModel *pColumnModel, int32_t count, int32_t s1, char *data1, int32_t s2) {
+static void swap(SColumnModel *pColumnModel, int32_t count, int32_t s1, char *data1, int32_t s2, void* buf) {
for (int32_t i = 0; i < pColumnModel->numOfCols; ++i) {
void *first = COLMODEL_GET_VAL(data1, pColumnModel, count, s1, i);
void *second = COLMODEL_GET_VAL(data1, pColumnModel, count, s2, i);
SSchema* pSchema = &pColumnModel->pFields[i].field;
- tsDataSwap(first, second, pSchema->type, pSchema->bytes);
+ tsDataSwap(first, second, pSchema->type, pSchema->bytes, buf);
}
}
static void tColDataInsertSort(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t start, int32_t end, char *data,
- __col_compar_fn_t compareFn) {
+ __col_compar_fn_t compareFn, void* buf) {
for (int32_t i = start + 1; i <= end; ++i) {
for (int32_t j = i; j > start; --j) {
if (compareFn(pDescriptor, numOfRows, j, j - 1, data) == -1) {
- swap(pDescriptor->pColumnModel, numOfRows, j - 1, data, j);
+ swap(pDescriptor->pColumnModel, numOfRows, j - 1, data, j, buf);
} else {
break;
}
@@ -553,7 +553,7 @@ static void UNUSED_FUNC tSortDataPrint(int32_t type, char *prefix, char *startx,
}
static void median(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t start, int32_t end, char *data,
- __col_compar_fn_t compareFn) {
+ __col_compar_fn_t compareFn, void* buf) {
int32_t midIdx = ((end - start) >> 1) + start;
#if defined(_DEBUG_VIEW)
@@ -567,15 +567,16 @@ static void median(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t sta
tSortDataPrint(pDescriptor->pColumnModel->pFields[colIdx].field.type, "before", startx, midx, endx);
#endif
+ SColumnModel* pModel = pDescriptor->pColumnModel;
if (compareFn(pDescriptor, numOfRows, midIdx, start, data) == 1) {
- swap(pDescriptor->pColumnModel, numOfRows, start, data, midIdx);
+ swap(pModel, numOfRows, start, data, midIdx, buf);
}
if (compareFn(pDescriptor, numOfRows, midIdx, end, data) == 1) {
- swap(pDescriptor->pColumnModel, numOfRows, midIdx, data, start);
- swap(pDescriptor->pColumnModel, numOfRows, midIdx, data, end);
+ swap(pModel, numOfRows, midIdx, data, start, buf);
+ swap(pModel, numOfRows, midIdx, data, end, buf);
} else if (compareFn(pDescriptor, numOfRows, start, end, data) == 1) {
- swap(pDescriptor->pColumnModel, numOfRows, start, data, end);
+ swap(pModel, numOfRows, start, data, end, buf);
}
assert(compareFn(pDescriptor, numOfRows, midIdx, start, data) <= 0 &&
@@ -626,32 +627,20 @@ static UNUSED_FUNC void tRowModelDisplay(tOrderDescriptor *pDescriptor, int32_t
printf("\n");
}
-static int32_t qsort_call = 0;
-
-void tColDataQSort(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t start, int32_t end, char *data,
- int32_t orderType) {
- // short array sort, incur another sort procedure instead of quick sort process
- __col_compar_fn_t compareFn = (orderType == TSDB_ORDER_ASC) ? compare_sa : compare_sd;
-
- if (end - start + 1 <= 8) {
- tColDataInsertSort(pDescriptor, numOfRows, start, end, data, compareFn);
- return;
- }
-
+static void columnwiseQSortImpl(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t start, int32_t end, char *data,
+ int32_t orderType, __col_compar_fn_t compareFn, void* buf) {
#ifdef _DEBUG_VIEW
-// printf("before sort:\n");
-// tRowModelDisplay(pDescriptor, numOfRows, data, end - start + 1);
+ printf("before sort:\n");
+ tRowModelDisplay(pDescriptor, numOfRows, data, end - start + 1);
#endif
int32_t s = start, e = end;
- median(pDescriptor, numOfRows, start, end, data, compareFn);
+ median(pDescriptor, numOfRows, start, end, data, compareFn, buf);
#ifdef _DEBUG_VIEW
-// printf("%s called: %d\n", __FUNCTION__, qsort_call++);
+ // printf("%s called: %d\n", __FUNCTION__, qsort_call++);
#endif
- UNUSED(qsort_call);
-
int32_t end_same = end;
int32_t start_same = start;
@@ -663,17 +652,17 @@ void tColDataQSort(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t sta
}
if (ret == 0 && e != end_same) {
- swap(pDescriptor->pColumnModel, numOfRows, e, data, end_same--);
+ swap(pDescriptor->pColumnModel, numOfRows, e, data, end_same--, buf);
}
e--;
}
if (e != s) {
- swap(pDescriptor->pColumnModel, numOfRows, s, data, e);
+ swap(pDescriptor->pColumnModel, numOfRows, s, data, e, buf);
}
#ifdef _DEBUG_VIEW
-// tRowModelDisplay(pDescriptor, numOfRows, data, end - start + 1);
+ // tRowModelDisplay(pDescriptor, numOfRows, data, end - start + 1);
#endif
while (s < e) {
@@ -683,16 +672,16 @@ void tColDataQSort(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t sta
}
if (ret == 0 && s != start_same) {
- swap(pDescriptor->pColumnModel, numOfRows, s, data, start_same++);
+ swap(pDescriptor->pColumnModel, numOfRows, s, data, start_same++, buf);
}
s++;
}
if (s != e) {
- swap(pDescriptor->pColumnModel, numOfRows, s, data, e);
+ swap(pDescriptor->pColumnModel, numOfRows, s, data, e, buf);
}
#ifdef _DEBUG_VIEW
-// tRowModelDisplay(pDescriptor, numOfRows, data, end - start + 1);
+ // tRowModelDisplay(pDescriptor, numOfRows, data, end - start + 1);
#endif
}
@@ -702,14 +691,14 @@ void tColDataQSort(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t sta
int32_t right = end;
while (right > end_same && left <= end_same) {
- swap(pDescriptor->pColumnModel, numOfRows, left++, data, right--);
+ swap(pDescriptor->pColumnModel, numOfRows, left++, data, right--, buf);
}
// (pivotal+1) + steps of number that are identical pivotal
rightx += (end - end_same);
#ifdef _DEBUG_VIEW
-// tRowModelDisplay(pDescriptor, numOfRows, data, end - start + 1);
+ // tRowModelDisplay(pDescriptor, numOfRows, data, end - start + 1);
#endif
}
@@ -719,26 +708,52 @@ void tColDataQSort(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t sta
int32_t right = e - 1;
while (left < start_same && right >= start_same) {
- swap(pDescriptor->pColumnModel, numOfRows, left++, data, right--);
+ swap(pDescriptor->pColumnModel, numOfRows, left++, data, right--, buf);
}
// (pivotal-1) - steps of number that are identical pivotal
leftx -= (start_same - start);
#ifdef _DEBUG_VIEW
-// tRowModelDisplay(pDescriptor, numOfRows, data, end - start + 1);
+ // tRowModelDisplay(pDescriptor, numOfRows, data, end - start + 1);
#endif
}
if (leftx > start) {
- tColDataQSort(pDescriptor, numOfRows, start, leftx, data, orderType);
+ columnwiseQSortImpl(pDescriptor, numOfRows, start, leftx, data, orderType, compareFn, buf);
}
if (rightx < end) {
- tColDataQSort(pDescriptor, numOfRows, rightx, end, data, orderType);
+ columnwiseQSortImpl(pDescriptor, numOfRows, rightx, end, data, orderType, compareFn, buf);
}
}
+void tColDataQSort(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t start, int32_t end, char *data, int32_t order) {
+ // short array sort, incur another sort procedure instead of quick sort process
+ __col_compar_fn_t compareFn = (order == TSDB_ORDER_ASC) ? compare_sa : compare_sd;
+
+ SColumnModel* pModel = pDescriptor->pColumnModel;
+
+ size_t width = 0;
+ for(int32_t i = 0; i < pModel->numOfCols; ++i) {
+ SSchema* pSchema = &pModel->pFields[i].field;
+ if (width < pSchema->bytes) {
+ width = pSchema->bytes;
+ }
+ }
+
+ char* buf = malloc(width);
+ assert(width > 0 && buf != NULL);
+
+ if (end - start + 1 <= 8) {
+ tColDataInsertSort(pDescriptor, numOfRows, start, end, data, compareFn, buf);
+ } else {
+ columnwiseQSortImpl(pDescriptor, numOfRows, start, end, data, order, compareFn, buf);
+ }
+
+ free(buf);
+}
+
/*
* deep copy of sschema
*/
diff --git a/src/query/src/qFill.c b/src/query/src/qFill.c
index ddb63c5012..f186726c01 100644
--- a/src/query/src/qFill.c
+++ b/src/query/src/qFill.c
@@ -38,8 +38,11 @@ SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_
pFillInfo->numOfTags = numOfTags;
pFillInfo->numOfCols = numOfCols;
pFillInfo->precision = precision;
- pFillInfo->slidingTime = slidingTime;
- pFillInfo->slidingUnit = slidingUnit;
+
+ pFillInfo->interval.interval = slidingTime;
+ pFillInfo->interval.intervalUnit = slidingUnit;
+ pFillInfo->interval.sliding = slidingTime;
+ pFillInfo->interval.slidingUnit = slidingUnit;
pFillInfo->pData = malloc(POINTER_BYTES * numOfCols);
if (numOfTags > 0) {
@@ -108,21 +111,15 @@ void* taosDestoryFillInfo(SFillInfo* pFillInfo) {
return NULL;
}
-static TSKEY taosGetRevisedEndKey(TSKEY ekey, int32_t order, int64_t timeInterval, int8_t slidingTimeUnit, int8_t precision) {
- if (order == TSDB_ORDER_ASC) {
- return ekey;
- } else {
- return taosGetIntervalStartTimestamp(ekey, timeInterval, timeInterval, slidingTimeUnit, precision);
- }
-}
-
void taosFillSetStartInfo(SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey) {
if (pFillInfo->fillType == TSDB_FILL_NONE) {
return;
}
- pFillInfo->endKey = taosGetRevisedEndKey(endKey, pFillInfo->order, pFillInfo->slidingTime, pFillInfo->slidingUnit,
- pFillInfo->precision);
+ pFillInfo->endKey = endKey;
+ if (pFillInfo->order != TSDB_ORDER_ASC) {
+ pFillInfo->endKey = taosTimeTruncate(endKey, &pFillInfo->interval, pFillInfo->precision);
+ }
pFillInfo->rowIdx = 0;
pFillInfo->numOfRows = numOfRows;
@@ -172,30 +169,34 @@ int64_t getFilledNumOfRes(SFillInfo* pFillInfo, TSKEY ekey, int32_t maxNumOfRows
int32_t numOfRows = taosNumOfRemainRows(pFillInfo);
- TSKEY ekey1 = taosGetRevisedEndKey(ekey, pFillInfo->order, pFillInfo->slidingTime, pFillInfo->slidingUnit,
- pFillInfo->precision);
+ TSKEY ekey1 = ekey;
+ if (pFillInfo->order != TSDB_ORDER_ASC) {
+ pFillInfo->endKey = taosTimeTruncate(ekey, &pFillInfo->interval, pFillInfo->precision);
+ }
int64_t numOfRes = -1;
if (numOfRows > 0) { // still fill gap within current data block, not generating data after the result set.
TSKEY lastKey = tsList[pFillInfo->numOfRows - 1];
-
- if (pFillInfo->slidingUnit != 'y' && pFillInfo->slidingUnit != 'n') {
- numOfRes = (int64_t)(ABS(lastKey - pFillInfo->start) / pFillInfo->slidingTime) + 1;
- } else {
- numOfRes = taosCountNatualInterval(lastKey, pFillInfo->start, pFillInfo->slidingTime, pFillInfo->slidingUnit, pFillInfo->precision) + 1;
- }
+ numOfRes = taosTimeCountInterval(
+ lastKey,
+ pFillInfo->start,
+ pFillInfo->interval.sliding,
+ pFillInfo->interval.slidingUnit,
+ pFillInfo->precision);
+ numOfRes += 1;
assert(numOfRes >= numOfRows);
} else { // reach the end of data
if ((ekey1 < pFillInfo->start && FILL_IS_ASC_FILL(pFillInfo)) ||
(ekey1 > pFillInfo->start && !FILL_IS_ASC_FILL(pFillInfo))) {
return 0;
}
- // the numOfRes rows are all filled with specified policy
- if (pFillInfo->slidingUnit != 'y' && pFillInfo->slidingUnit != 'n') {
- numOfRes = (ABS(ekey1 - pFillInfo->start) / pFillInfo->slidingTime) + 1;
- } else {
- numOfRes = taosCountNatualInterval(ekey1, pFillInfo->start, pFillInfo->slidingTime, pFillInfo->slidingUnit, pFillInfo->precision) + 1;
- }
+ numOfRes = taosTimeCountInterval(
+ ekey1,
+ pFillInfo->start,
+ pFillInfo->interval.sliding,
+ pFillInfo->interval.slidingUnit,
+ pFillInfo->precision);
+ numOfRes += 1;
}
return (numOfRes > maxNumOfRows) ? maxNumOfRows : numOfRes;
@@ -374,12 +375,7 @@ static void doFillResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t* nu
setTagsValue(pFillInfo, data, *num);
}
-// TODO natual sliding time
- if (pFillInfo->slidingUnit != 'n' && pFillInfo->slidingUnit != 'y') {
- pFillInfo->start += (pFillInfo->slidingTime * step);
- } else {
- pFillInfo->start = taosAddNatualInterval(pFillInfo->start, pFillInfo->slidingTime*step, pFillInfo->slidingUnit, pFillInfo->precision);
- }
+ pFillInfo->start = taosTimeAdd(pFillInfo->start, pFillInfo->interval.sliding * step, pFillInfo->interval.slidingUnit, pFillInfo->precision);
pFillInfo->numOfCurrent++;
(*num) += 1;
@@ -486,12 +482,7 @@ int32_t generateDataBlockImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t nu
// set the tag value for final result
setTagsValue(pFillInfo, data, num);
- // TODO natual sliding time
- if (pFillInfo->slidingUnit != 'n' && pFillInfo->slidingUnit != 'y') {
- pFillInfo->start += (pFillInfo->slidingTime * step);
- } else {
- pFillInfo->start = taosAddNatualInterval(pFillInfo->start, pFillInfo->slidingTime*step, pFillInfo->slidingUnit, pFillInfo->precision);
- }
+ pFillInfo->start = taosTimeAdd(pFillInfo->start, pFillInfo->interval.sliding*step, pFillInfo->interval.slidingUnit, pFillInfo->precision);
pFillInfo->rowIdx += 1;
pFillInfo->numOfCurrent +=1;
diff --git a/src/query/src/qFilterfunc.c b/src/query/src/qFilterfunc.c
index 7e9f5c7da5..b6050dddd8 100644
--- a/src/query/src/qFilterfunc.c
+++ b/src/query/src/qFilterfunc.c
@@ -284,6 +284,14 @@ bool nequal_nchar(SColumnFilterElem *pFilter, char* minval, char *maxval) {
return wcsncmp((wchar_t *)pFilter->filterInfo.pz, varDataVal(minval), varDataLen(minval)/TSDB_NCHAR_SIZE) != 0;
}
+////////////////////////////////////////////////////////////////
+bool isNull_filter(SColumnFilterElem *pFilter, char* minval, char* maxval) {
+ return true;
+}
+
+bool notNull_filter(SColumnFilterElem *pFilter, char* minval, char* maxval) {
+ return true;
+}
////////////////////////////////////////////////////////////////
@@ -398,6 +406,8 @@ bool (*filterFunc_i8[])(SColumnFilterElem *pFilter, char *minval, char *maxval)
largeEqual_i8,
nequal_i8,
NULL,
+ isNull_filter,
+ notNull_filter,
};
bool (*filterFunc_i16[])(SColumnFilterElem *pFilter, char *minval, char *maxval) = {
@@ -409,6 +419,8 @@ bool (*filterFunc_i16[])(SColumnFilterElem *pFilter, char *minval, char *maxval)
largeEqual_i16,
nequal_i16,
NULL,
+ isNull_filter,
+ notNull_filter,
};
bool (*filterFunc_i32[])(SColumnFilterElem *pFilter, char *minval, char *maxval) = {
@@ -420,6 +432,8 @@ bool (*filterFunc_i32[])(SColumnFilterElem *pFilter, char *minval, char *maxval)
largeEqual_i32,
nequal_i32,
NULL,
+ isNull_filter,
+ notNull_filter,
};
bool (*filterFunc_i64[])(SColumnFilterElem *pFilter, char *minval, char *maxval) = {
@@ -431,6 +445,8 @@ bool (*filterFunc_i64[])(SColumnFilterElem *pFilter, char *minval, char *maxval)
largeEqual_i64,
nequal_i64,
NULL,
+ isNull_filter,
+ notNull_filter,
};
bool (*filterFunc_ds[])(SColumnFilterElem *pFilter, char *minval, char *maxval) = {
@@ -442,6 +458,8 @@ bool (*filterFunc_ds[])(SColumnFilterElem *pFilter, char *minval, char *maxval)
largeEqual_ds,
nequal_ds,
NULL,
+ isNull_filter,
+ notNull_filter,
};
bool (*filterFunc_dd[])(SColumnFilterElem *pFilter, char *minval, char *maxval) = {
@@ -453,6 +471,8 @@ bool (*filterFunc_dd[])(SColumnFilterElem *pFilter, char *minval, char *maxval)
largeEqual_dd,
nequal_dd,
NULL,
+ isNull_filter,
+ notNull_filter,
};
bool (*filterFunc_str[])(SColumnFilterElem* pFilter, char* minval, char *maxval) = {
@@ -464,6 +484,8 @@ bool (*filterFunc_str[])(SColumnFilterElem* pFilter, char* minval, char *maxval)
NULL,
nequal_str,
like_str,
+ isNull_filter,
+ notNull_filter,
};
bool (*filterFunc_nchar[])(SColumnFilterElem* pFitler, char* minval, char* maxval) = {
@@ -475,6 +497,8 @@ bool (*filterFunc_nchar[])(SColumnFilterElem* pFitler, char* minval, char* maxva
NULL,
nequal_nchar,
like_nchar,
+ isNull_filter,
+ notNull_filter,
};
bool (*rangeFilterFunc_i8[])(SColumnFilterElem *pFilter, char *minval, char *maxval) = {
diff --git a/src/query/src/qParserImpl.c b/src/query/src/qParserImpl.c
index 9629f24cc2..33237a58c2 100644
--- a/src/query/src/qParserImpl.c
+++ b/src/query/src/qParserImpl.c
@@ -135,7 +135,7 @@ tSQLExpr *tSQLExprIdValueCreate(SStrToken *pToken, int32_t optrType) {
pSQLExpr->val.nType = TSDB_DATA_TYPE_BIGINT;
pSQLExpr->nSQLOptr = TK_TIMESTAMP; // TK_TIMESTAMP used to denote the time value is in microsecond
} else if (optrType == TK_VARIABLE) {
- int32_t ret = getTimestampInUsFromStr(pToken->z, pToken->n, &pSQLExpr->val.i64Key);
+ int32_t ret = parseAbsoluteDuration(pToken->z, pToken->n, &pSQLExpr->val.i64Key);
UNUSED(ret);
pSQLExpr->val.nType = TSDB_DATA_TYPE_BIGINT;
@@ -179,7 +179,7 @@ tSQLExpr *tSQLExprCreateFunction(tSQLExprList *pList, SStrToken *pFuncToken, SSt
tSQLExpr *tSQLExprCreate(tSQLExpr *pLeft, tSQLExpr *pRight, int32_t optrType) {
tSQLExpr *pExpr = calloc(1, sizeof(tSQLExpr));
- if (pRight != NULL && pLeft != NULL) {
+ if (pLeft != NULL && pRight != NULL && (optrType != TK_IN)) {
char* endPos = pRight->token.z + pRight->token.n;
pExpr->token.z = pLeft->token.z;
pExpr->token.n = (uint32_t)(endPos - pExpr->token.z);
@@ -275,6 +275,11 @@ tSQLExpr *tSQLExprCreate(tSQLExpr *pLeft, tSQLExpr *pRight, int32_t optrType) {
} else {
pExpr->nSQLOptr = optrType;
pExpr->pLeft = pLeft;
+
+ if (pRight == NULL) {
+ pRight = calloc(1, sizeof(tSQLExpr));
+ }
+
pExpr->pRight = pRight;
}
@@ -438,44 +443,6 @@ void setDBName(SStrToken *pCpxName, SStrToken *pDB) {
pCpxName->n = pDB->n;
}
-int32_t getTimestampInUsFromStrImpl(int64_t val, char unit, int64_t *result) {
- *result = val;
-
- switch (unit) {
- case 's':
- (*result) *= MILLISECOND_PER_SECOND;
- break;
- case 'm':
- (*result) *= MILLISECOND_PER_MINUTE;
- break;
- case 'h':
- (*result) *= MILLISECOND_PER_HOUR;
- break;
- case 'd':
- (*result) *= MILLISECOND_PER_DAY;
- break;
- case 'w':
- (*result) *= MILLISECOND_PER_WEEK;
- break;
- case 'n':
- (*result) *= MILLISECOND_PER_MONTH;
- break;
- case 'y':
- (*result) *= MILLISECOND_PER_YEAR;
- break;
- case 'a':
- break;
- default: {
- ;
- return -1;
- }
- }
-
- /* get the value in microsecond */
- (*result) *= 1000L;
- return 0;
-}
-
void tSQLSetColumnInfo(TAOS_FIELD *pField, SStrToken *pName, TAOS_FIELD *pType) {
int32_t maxLen = sizeof(pField->name) / sizeof(pField->name[0]);
@@ -530,7 +497,7 @@ void tSQLSetColumnType(TAOS_FIELD *pField, SStrToken *type) {
* extract the select info out of sql string
*/
SQuerySQL *tSetQuerySQLElems(SStrToken *pSelectToken, tSQLExprList *pSelection, tVariantList *pFrom, tSQLExpr *pWhere,
- tVariantList *pGroupby, tVariantList *pSortOrder, SStrToken *pInterval,
+ tVariantList *pGroupby, tVariantList *pSortOrder, SIntervalVal *pInterval,
SStrToken *pSliding, tVariantList *pFill, SLimitVal *pLimit, SLimitVal *pGLimit) {
assert(pSelection != NULL);
@@ -553,7 +520,8 @@ SQuerySQL *tSetQuerySQLElems(SStrToken *pSelectToken, tSQLExprList *pSelection,
}
if (pInterval != NULL) {
- pQuery->interval = *pInterval;
+ pQuery->interval = pInterval->interval;
+ pQuery->offset = pInterval->offset;
}
if (pSliding != NULL) {
diff --git a/src/query/src/qPercentile.c b/src/query/src/qPercentile.c
index 1ce5861e52..3a8be781d5 100644
--- a/src/query/src/qPercentile.c
+++ b/src/query/src/qPercentile.c
@@ -70,6 +70,33 @@ static void resetBoundingBox(MinMaxEntry* range, int32_t type) {
}
}
+static int32_t setBoundingBox(MinMaxEntry* range, int16_t type, double minval, double maxval) {
+ if (minval > maxval) {
+ return -1;
+ }
+
+ switch(type) {
+ case TSDB_DATA_TYPE_TINYINT:
+ case TSDB_DATA_TYPE_SMALLINT:
+ case TSDB_DATA_TYPE_INT:
+ range->iMinVal = (int32_t) minval;
+ range->iMaxVal = (int32_t) maxval;
+ break;
+
+ case TSDB_DATA_TYPE_BIGINT:
+ range->i64MinVal = (int64_t) minval;
+ range->i64MaxVal = (int64_t) maxval;
+ break;
+ case TSDB_DATA_TYPE_FLOAT:
+ case TSDB_DATA_TYPE_DOUBLE:
+ range->dMinVal = minval;
+ range->dMaxVal = maxval;
+ break;
+ }
+
+ return 0;
+}
+
static void resetPosInfo(SSlotInfo* pInfo) {
pInfo->size = 0;
pInfo->pageId = -1;
@@ -135,6 +162,11 @@ int32_t tBucketBigIntHash(tMemBucket *pBucket, const void *value) {
return index;
} else {
+ // out of range
+ if (v < pBucket->range.i64MinVal || v > pBucket->range.i64MaxVal) {
+ return -1;
+ }
+
// todo hash for bigint and float and double
int64_t span = pBucket->range.i64MaxVal - pBucket->range.i64MinVal;
if (span < pBucket->numOfSlots) {
@@ -179,6 +211,11 @@ int32_t tBucketIntHash(tMemBucket *pBucket, const void *value) {
return index;
} else {
+ // out of range
+ if (v < pBucket->range.iMinVal || v > pBucket->range.iMaxVal) {
+ return -1;
+ }
+
// divide a range of [iMinVal, iMaxVal] into 1024 buckets
int32_t span = pBucket->range.iMaxVal - pBucket->range.iMinVal;
if (span < pBucket->numOfSlots) {
@@ -209,6 +246,12 @@ int32_t tBucketDoubleHash(tMemBucket *pBucket, const void *value) {
double posx = (v + DBL_MAX) / x;
return ((int32_t)posx) % pBucket->numOfSlots;
} else {
+
+ // out of range
+ if (v < pBucket->range.dMinVal || v > pBucket->range.dMaxVal) {
+ return -1;
+ }
+
// divide a range of [dMinVal, dMaxVal] into 1024 buckets
double span = pBucket->range.dMaxVal - pBucket->range.dMinVal;
if (span < pBucket->numOfSlots) {
@@ -262,7 +305,7 @@ static void resetSlotInfo(tMemBucket* pBucket) {
}
}
-tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType) {
+tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType, double minval, double maxval) {
tMemBucket *pBucket = (tMemBucket *)calloc(1, sizeof(tMemBucket));
if (pBucket == NULL) {
return NULL;
@@ -278,9 +321,14 @@ tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType) {
pBucket->maxCapacity = 200000;
+ if (setBoundingBox(&pBucket->range, pBucket->type, minval, maxval) != 0) {
+ uError("MemBucket:%p, invalid value range: %f-%f", pBucket, minval, maxval);
+ free(pBucket);
+ return NULL;
+ }
+
pBucket->elemPerPage = (pBucket->bufPageSize - sizeof(tFilePage))/pBucket->bytes;
pBucket->comparFn = getKeyComparFunc(pBucket->type);
- resetBoundingBox(&pBucket->range, pBucket->type);
pBucket->hashFunc = getHashFunc(pBucket->type);
if (pBucket->hashFunc == NULL) {
@@ -395,23 +443,25 @@ void tMemBucketUpdateBoundingBox(MinMaxEntry *r, char *data, int32_t dataType) {
/*
* in memory bucket, we only accept data array list
*/
-void tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size) {
+int32_t tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size) {
assert(pBucket != NULL && data != NULL && size > 0);
+
pBucket->total += (int32_t)size;
int32_t bytes = pBucket->bytes;
-
for (int32_t i = 0; i < size; ++i) {
char *d = (char *) data + i * bytes;
- int32_t slotIdx = (pBucket->hashFunc)(pBucket, d);
- assert(slotIdx >= 0);
+ int32_t index = (pBucket->hashFunc)(pBucket, d);
+ if (index == -1) { // the value is out of range, do not add it into bucket
+ return -1;
+ }
- tMemBucketSlot *pSlot = &pBucket->pSlots[slotIdx];
+ tMemBucketSlot *pSlot = &pBucket->pSlots[index];
tMemBucketUpdateBoundingBox(&pSlot->range, d, pBucket->type);
// ensure available memory pages to allocate
- int32_t groupId = getGroupId(pBucket->numOfSlots, slotIdx, pBucket->times);
+ int32_t groupId = getGroupId(pBucket->numOfSlots, index, pBucket->times);
int32_t pageId = -1;
if (pSlot->info.data == NULL || pSlot->info.data->num >= pBucket->elemPerPage) {
@@ -432,10 +482,12 @@ void tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size) {
pSlot->info.data->num += 1;
pSlot->info.size += 1;
}
+
+ return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////
-static void findMaxMinValue(tMemBucket *pMemBucket, double *maxVal, double *minVal) {
+static UNUSED_FUNC void findMaxMinValue(tMemBucket *pMemBucket, double *maxVal, double *minVal) {
*minVal = DBL_MAX;
*maxVal = -DBL_MAX;
@@ -681,16 +733,29 @@ double getPercentile(tMemBucket *pMemBucket, double percent) {
// find the min/max value, no need to scan all data in bucket
if (fabs(percent - 100.0) < DBL_EPSILON || (percent < DBL_EPSILON)) {
- double minx = 0, maxx = 0;
- findMaxMinValue(pMemBucket, &maxx, &minx);
+ MinMaxEntry* pRange = &pMemBucket->range;
- return fabs(percent - 100) < DBL_EPSILON ? maxx : minx;
+ switch(pMemBucket->type) {
+ case TSDB_DATA_TYPE_TINYINT:
+ case TSDB_DATA_TYPE_SMALLINT:
+ case TSDB_DATA_TYPE_INT:
+ return fabs(percent - 100) < DBL_EPSILON? pRange->iMaxVal:pRange->iMinVal;
+ case TSDB_DATA_TYPE_BIGINT: {
+ double v = (double)(fabs(percent - 100) < DBL_EPSILON ? pRange->i64MaxVal : pRange->i64MinVal);
+ return v;
+ }
+ case TSDB_DATA_TYPE_FLOAT:
+ case TSDB_DATA_TYPE_DOUBLE:
+ return fabs(percent - 100) < DBL_EPSILON? pRange->dMaxVal:pRange->dMinVal;
+ default:
+ return -1;
+ }
}
double percentVal = (percent * (pMemBucket->total - 1)) / ((double)100.0);
- int32_t orderIdx = (int32_t)percentVal;
// do put data by using buckets
+ int32_t orderIdx = (int32_t)percentVal;
return getPercentileImpl(pMemBucket, orderIdx, percentVal - orderIdx);
}
diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c
index 509362863c..2bd92c74a4 100644
--- a/src/query/src/qUtil.c
+++ b/src/query/src/qUtil.c
@@ -54,7 +54,7 @@ int32_t initWindowResInfo(SWindowResInfo *pWindowResInfo, SQueryRuntimeEnv *pRun
return TSDB_CODE_QRY_OUT_OF_MEMORY;
}
- pWindowResInfo->interval = pRuntimeEnv->pQuery->intervalTime;
+ pWindowResInfo->interval = pRuntimeEnv->pQuery->interval.interval;
pSummary->internalSupSize += sizeof(SWindowResult) * threshold;
pSummary->internalSupSize += (pRuntimeEnv->pQuery->numOfOutput * sizeof(SResultInfo) + pRuntimeEnv->interBufSize) * pWindowResInfo->capacity;
@@ -126,11 +126,26 @@ void clearFirstNTimeWindow(SQueryRuntimeEnv *pRuntimeEnv, int32_t num) {
int32_t numOfClosed = numOfClosedTimeWindow(pWindowResInfo);
assert(num >= 0 && num <= numOfClosed);
-
+
+ int16_t type = pWindowResInfo->type;
+
+ char *key = NULL;
+ int16_t bytes = -1;
+
for (int32_t i = 0; i < num; ++i) {
SWindowResult *pResult = &pWindowResInfo->pResult[i];
if (pResult->closed) { // remove the window slot from hash table
- taosHashRemove(pWindowResInfo->hashList, (const char *)&pResult->skey, pWindowResInfo->type);
+
+ // todo refactor
+ if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
+ key = varDataVal(pResult->key);
+ bytes = varDataLen(pResult->key);
+ } else {
+ key = (char*) &pResult->win.skey;
+ bytes = tDataTypeDesc[pWindowResInfo->type].nSize;
+ }
+
+ taosHashRemove(pWindowResInfo->hashList, (const char *)key, bytes);
} else {
break;
}
@@ -150,15 +165,24 @@ void clearFirstNTimeWindow(SQueryRuntimeEnv *pRuntimeEnv, int32_t num) {
}
pWindowResInfo->size = remain;
+
for (int32_t k = 0; k < pWindowResInfo->size; ++k) {
SWindowResult *pResult = &pWindowResInfo->pResult[k];
- int32_t *p = (int32_t *)taosHashGet(pWindowResInfo->hashList, (const char *)&pResult->skey,
- tDataTypeDesc[pWindowResInfo->type].nSize);
+
+ if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
+ key = varDataVal(pResult->key);
+ bytes = varDataLen(pResult->key);
+ } else {
+ key = (char*) &pResult->win.skey;
+ bytes = tDataTypeDesc[pWindowResInfo->type].nSize;
+ }
+
+ int32_t *p = (int32_t *)taosHashGet(pWindowResInfo->hashList, (const char *)key, bytes);
assert(p != NULL);
+
int32_t v = (*p - num);
assert(v >= 0 && v <= pWindowResInfo->size);
- taosHashPut(pWindowResInfo->hashList, (char *)&pResult->skey, tDataTypeDesc[pWindowResInfo->type].nSize,
- (char *)&v, sizeof(int32_t));
+ taosHashPut(pWindowResInfo->hashList, (char *)key, bytes, (char *)&v, sizeof(int32_t));
}
pWindowResInfo->curIndex = -1;
@@ -207,20 +231,19 @@ void removeRedundantWindow(SWindowResInfo *pWindowResInfo, TSKEY lastKey, int32_
}
// get the result order
- int32_t resultOrder = (pWindowResInfo->pResult[0].skey < pWindowResInfo->pResult[1].skey)? 1:-1;
-
+ int32_t resultOrder = (pWindowResInfo->pResult[0].win.skey < pWindowResInfo->pResult[1].win.skey)? 1:-1;
if (order != resultOrder) {
return;
}
int32_t i = 0;
if (order == QUERY_ASC_FORWARD_STEP) {
- TSKEY ekey = pWindowResInfo->pResult[i].skey + pWindowResInfo->interval;
+ TSKEY ekey = pWindowResInfo->pResult[i].win.ekey;
while (i < pWindowResInfo->size && (ekey < lastKey)) {
++i;
}
} else if (order == QUERY_DESC_FORWARD_STEP) {
- while (i < pWindowResInfo->size && (pWindowResInfo->pResult[i].skey > lastKey)) {
+ while (i < pWindowResInfo->size && (pWindowResInfo->pResult[i].win.skey > lastKey)) {
++i;
}
}
@@ -258,7 +281,7 @@ void clearTimeWindowResBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pWindow
pWindowRes->numOfRows = 0;
pWindowRes->pos = (SPosInfo){-1, -1};
pWindowRes->closed = false;
- pWindowRes->skey = TSKEY_INITIAL_VAL;
+ pWindowRes->win = TSWINDOW_INITIALIZER;
}
/**
@@ -268,7 +291,7 @@ void clearTimeWindowResBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pWindow
*/
void copyTimeWindowResBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *dst, const SWindowResult *src) {
dst->numOfRows = src->numOfRows;
- dst->skey = src->skey;
+ dst->win = src->win;
dst->closed = src->closed;
int32_t nOutputCols = pRuntimeEnv->pQuery->numOfOutput;
diff --git a/src/query/src/sql.c b/src/query/src/sql.c
index 373e57963c..a18efdeb74 100644
--- a/src/query/src/sql.c
+++ b/src/query/src/sql.c
@@ -114,6 +114,7 @@ typedef union {
tSQLExprList* yy224;
int64_t yy271;
tVariant yy312;
+ SIntervalVal yy314;
SCreateTableSQL* yy374;
tFieldList* yy449;
tVariantList* yy494;
@@ -126,17 +127,17 @@ typedef union {
#define ParseARG_FETCH SSqlInfo* pInfo = yypParser->pInfo
#define ParseARG_STORE yypParser->pInfo = pInfo
#define YYFALLBACK 1
-#define YYNSTATE 244
-#define YYNRULE 225
+#define YYNSTATE 252
+#define YYNRULE 230
#define YYNTOKEN 206
-#define YY_MAX_SHIFT 243
-#define YY_MIN_SHIFTREDUCE 403
-#define YY_MAX_SHIFTREDUCE 627
-#define YY_ERROR_ACTION 628
-#define YY_ACCEPT_ACTION 629
-#define YY_NO_ACTION 630
-#define YY_MIN_REDUCE 631
-#define YY_MAX_REDUCE 855
+#define YY_MAX_SHIFT 251
+#define YY_MIN_SHIFTREDUCE 416
+#define YY_MAX_SHIFTREDUCE 645
+#define YY_ERROR_ACTION 646
+#define YY_ACCEPT_ACTION 647
+#define YY_NO_ACTION 648
+#define YY_MIN_REDUCE 649
+#define YY_MAX_REDUCE 878
/************* End control #defines *******************************************/
/* Define the yytestcase() macro to be a no-op if is not already defined
@@ -202,122 +203,124 @@ typedef union {
** yy_default[] Default action for each state.
**
*********** Begin parsing tables **********************************************/
-#define YY_ACTTAB_COUNT (549)
+#define YY_ACTTAB_COUNT (566)
static const YYACTIONTYPE yy_action[] = {
- /* 0 */ 731, 444, 221, 729, 730, 629, 243, 510, 732, 445,
- /* 10 */ 734, 735, 733, 41, 43, 526, 35, 36, 523, 11,
- /* 20 */ 524, 29, 525, 444, 199, 39, 37, 40, 38, 155,
- /* 30 */ 241, 445, 748, 34, 33, 219, 218, 32, 31, 30,
- /* 40 */ 41, 43, 761, 35, 36, 136, 172, 173, 29, 137,
- /* 50 */ 21, 199, 39, 37, 40, 38, 184, 141, 160, 843,
- /* 60 */ 34, 33, 839, 772, 32, 31, 30, 404, 405, 406,
- /* 70 */ 407, 408, 409, 410, 411, 412, 413, 414, 415, 242,
- /* 80 */ 41, 43, 230, 35, 36, 746, 62, 137, 29, 137,
- /* 90 */ 21, 199, 39, 37, 40, 38, 159, 843, 27, 842,
- /* 100 */ 34, 33, 56, 838, 32, 31, 30, 105, 43, 8,
- /* 110 */ 35, 36, 63, 115, 769, 29, 761, 527, 199, 39,
- /* 120 */ 37, 40, 38, 168, 539, 747, 583, 34, 33, 18,
- /* 130 */ 156, 32, 31, 30, 16, 210, 236, 235, 209, 208,
- /* 140 */ 207, 234, 206, 233, 232, 231, 205, 727, 105, 715,
- /* 150 */ 716, 717, 718, 719, 720, 721, 722, 723, 724, 725,
- /* 160 */ 726, 35, 36, 798, 837, 194, 29, 177, 157, 199,
- /* 170 */ 39, 37, 40, 38, 181, 180, 21, 21, 34, 33,
- /* 180 */ 444, 12, 32, 31, 30, 164, 596, 750, 445, 587,
- /* 190 */ 153, 590, 154, 593, 105, 164, 596, 21, 17, 587,
- /* 200 */ 150, 590, 196, 593, 60, 26, 90, 89, 144, 169,
- /* 210 */ 217, 747, 747, 16, 149, 236, 235, 161, 162, 167,
- /* 220 */ 234, 198, 233, 232, 231, 142, 670, 161, 162, 128,
- /* 230 */ 222, 542, 747, 164, 596, 17, 143, 587, 750, 590,
- /* 240 */ 105, 593, 26, 39, 37, 40, 38, 100, 170, 145,
- /* 250 */ 797, 34, 33, 101, 26, 32, 31, 30, 32, 31,
- /* 260 */ 30, 78, 183, 564, 565, 161, 162, 230, 589, 152,
- /* 270 */ 592, 76, 80, 85, 88, 79, 240, 239, 97, 34,
- /* 280 */ 33, 82, 42, 32, 31, 30, 118, 119, 70, 66,
- /* 290 */ 69, 237, 42, 595, 679, 163, 61, 128, 132, 130,
- /* 300 */ 93, 92, 91, 595, 671, 187, 585, 128, 594, 588,
- /* 310 */ 750, 591, 171, 534, 47, 216, 215, 146, 594, 555,
- /* 320 */ 186, 147, 556, 46, 613, 148, 14, 597, 13, 139,
- /* 330 */ 42, 13, 50, 48, 3, 135, 75, 74, 140, 516,
- /* 340 */ 515, 595, 586, 46, 22, 138, 203, 10, 9, 51,
- /* 350 */ 22, 852, 530, 528, 531, 529, 594, 87, 86, 749,
- /* 360 */ 808, 807, 165, 804, 803, 166, 771, 741, 220, 776,
- /* 370 */ 763, 778, 102, 790, 789, 116, 117, 114, 681, 204,
- /* 380 */ 133, 24, 213, 678, 214, 851, 72, 850, 848, 26,
- /* 390 */ 120, 699, 25, 23, 185, 95, 134, 668, 81, 551,
- /* 400 */ 666, 83, 84, 664, 188, 663, 174, 129, 661, 660,
- /* 410 */ 659, 658, 657, 649, 131, 655, 653, 192, 52, 651,
- /* 420 */ 760, 57, 49, 58, 791, 44, 197, 195, 193, 191,
- /* 430 */ 189, 28, 212, 77, 223, 224, 225, 226, 227, 228,
- /* 440 */ 229, 238, 627, 176, 175, 626, 201, 178, 179, 53,
- /* 450 */ 625, 618, 182, 536, 64, 151, 186, 67, 552, 55,
- /* 460 */ 103, 158, 662, 59, 200, 94, 96, 123, 700, 121,
- /* 470 */ 126, 106, 107, 122, 124, 125, 127, 112, 108, 109,
- /* 480 */ 113, 745, 110, 656, 111, 1, 2, 190, 5, 557,
- /* 490 */ 104, 19, 6, 598, 20, 4, 15, 7, 65, 485,
- /* 500 */ 202, 481, 479, 478, 477, 474, 448, 211, 68, 45,
- /* 510 */ 71, 73, 22, 512, 511, 509, 54, 469, 467, 459,
- /* 520 */ 465, 461, 463, 457, 455, 484, 483, 482, 480, 476,
- /* 530 */ 475, 46, 446, 419, 417, 631, 630, 630, 630, 630,
- /* 540 */ 630, 630, 630, 630, 630, 630, 630, 98, 99,
+ /* 0 */ 751, 459, 11, 749, 750, 647, 251, 459, 752, 460,
+ /* 10 */ 754, 755, 753, 35, 36, 460, 37, 38, 159, 249,
+ /* 20 */ 170, 29, 141, 459, 206, 41, 39, 43, 40, 140,
+ /* 30 */ 145, 460, 865, 34, 33, 862, 141, 32, 31, 30,
+ /* 40 */ 35, 36, 781, 37, 38, 165, 866, 170, 29, 141,
+ /* 50 */ 62, 206, 41, 39, 43, 40, 191, 525, 164, 866,
+ /* 60 */ 34, 33, 27, 21, 32, 31, 30, 417, 418, 419,
+ /* 70 */ 420, 421, 422, 423, 424, 425, 426, 427, 428, 250,
+ /* 80 */ 35, 36, 181, 37, 38, 227, 226, 170, 29, 781,
+ /* 90 */ 176, 206, 41, 39, 43, 40, 174, 162, 767, 792,
+ /* 100 */ 34, 33, 56, 160, 32, 31, 30, 21, 36, 8,
+ /* 110 */ 37, 38, 63, 118, 170, 29, 770, 108, 206, 41,
+ /* 120 */ 39, 43, 40, 32, 31, 30, 599, 34, 33, 78,
+ /* 130 */ 875, 32, 31, 30, 238, 37, 38, 108, 238, 170,
+ /* 140 */ 29, 184, 766, 206, 41, 39, 43, 40, 188, 187,
+ /* 150 */ 789, 177, 34, 33, 224, 223, 32, 31, 30, 16,
+ /* 160 */ 218, 244, 243, 217, 216, 215, 242, 214, 241, 240,
+ /* 170 */ 239, 213, 747, 818, 735, 736, 737, 738, 739, 740,
+ /* 180 */ 741, 742, 743, 744, 745, 746, 169, 612, 103, 12,
+ /* 190 */ 603, 17, 606, 819, 609, 201, 169, 612, 26, 108,
+ /* 200 */ 603, 108, 606, 861, 609, 153, 169, 612, 173, 567,
+ /* 210 */ 603, 154, 606, 105, 609, 90, 89, 148, 166, 167,
+ /* 220 */ 34, 33, 205, 102, 32, 31, 30, 770, 166, 167,
+ /* 230 */ 26, 21, 557, 41, 39, 43, 40, 549, 166, 167,
+ /* 240 */ 194, 34, 33, 17, 193, 32, 31, 30, 860, 16,
+ /* 250 */ 26, 244, 243, 203, 21, 60, 242, 61, 241, 240,
+ /* 260 */ 239, 248, 247, 96, 175, 229, 767, 76, 80, 245,
+ /* 270 */ 190, 554, 21, 85, 88, 79, 18, 156, 121, 122,
+ /* 280 */ 605, 82, 608, 42, 70, 66, 69, 225, 770, 767,
+ /* 290 */ 135, 133, 601, 42, 611, 768, 93, 92, 91, 690,
+ /* 300 */ 168, 207, 131, 42, 611, 230, 545, 767, 546, 610,
+ /* 310 */ 699, 157, 691, 131, 611, 131, 604, 541, 607, 610,
+ /* 320 */ 538, 571, 539, 47, 540, 46, 580, 581, 602, 610,
+ /* 330 */ 572, 631, 613, 50, 14, 13, 13, 531, 543, 3,
+ /* 340 */ 544, 46, 48, 530, 75, 74, 811, 22, 178, 179,
+ /* 350 */ 51, 211, 10, 9, 829, 22, 87, 86, 101, 99,
+ /* 360 */ 158, 143, 144, 146, 147, 151, 152, 150, 139, 149,
+ /* 370 */ 769, 142, 828, 171, 825, 824, 172, 791, 761, 796,
+ /* 380 */ 228, 783, 798, 104, 810, 119, 120, 701, 117, 212,
+ /* 390 */ 615, 137, 24, 221, 698, 26, 222, 192, 874, 72,
+ /* 400 */ 873, 871, 123, 719, 25, 100, 23, 138, 566, 688,
+ /* 410 */ 81, 686, 83, 84, 684, 195, 780, 683, 161, 542,
+ /* 420 */ 180, 199, 132, 681, 680, 679, 52, 49, 678, 677,
+ /* 430 */ 109, 134, 44, 675, 204, 673, 671, 669, 667, 202,
+ /* 440 */ 200, 198, 196, 28, 136, 220, 57, 58, 812, 77,
+ /* 450 */ 231, 232, 233, 234, 235, 236, 237, 246, 209, 645,
+ /* 460 */ 53, 182, 183, 644, 110, 64, 67, 155, 186, 185,
+ /* 470 */ 682, 643, 94, 636, 676, 189, 126, 125, 720, 124,
+ /* 480 */ 127, 128, 130, 129, 95, 668, 1, 551, 193, 765,
+ /* 490 */ 2, 55, 113, 111, 114, 112, 115, 116, 59, 568,
+ /* 500 */ 163, 106, 197, 5, 573, 107, 6, 65, 614, 19,
+ /* 510 */ 4, 20, 15, 208, 616, 7, 210, 500, 496, 494,
+ /* 520 */ 493, 492, 489, 463, 219, 68, 45, 71, 73, 22,
+ /* 530 */ 527, 526, 524, 54, 484, 482, 474, 480, 476, 478,
+ /* 540 */ 472, 470, 499, 498, 497, 495, 491, 490, 46, 461,
+ /* 550 */ 432, 430, 649, 648, 648, 648, 648, 648, 648, 648,
+ /* 560 */ 648, 648, 648, 648, 97, 98,
};
static const YYCODETYPE yy_lookahead[] = {
- /* 0 */ 226, 1, 210, 229, 230, 207, 208, 5, 234, 9,
- /* 10 */ 236, 237, 238, 13, 14, 2, 16, 17, 5, 260,
- /* 20 */ 7, 21, 9, 1, 24, 25, 26, 27, 28, 209,
- /* 30 */ 210, 9, 240, 33, 34, 33, 34, 37, 38, 39,
- /* 40 */ 13, 14, 244, 16, 17, 260, 33, 34, 21, 260,
- /* 50 */ 210, 24, 25, 26, 27, 28, 258, 260, 269, 270,
- /* 60 */ 33, 34, 260, 210, 37, 38, 39, 45, 46, 47,
+ /* 0 */ 226, 1, 260, 229, 230, 207, 208, 1, 234, 9,
+ /* 10 */ 236, 237, 238, 13, 14, 9, 16, 17, 209, 210,
+ /* 20 */ 20, 21, 260, 1, 24, 25, 26, 27, 28, 260,
+ /* 30 */ 260, 9, 270, 33, 34, 260, 260, 37, 38, 39,
+ /* 40 */ 13, 14, 244, 16, 17, 269, 270, 20, 21, 260,
+ /* 50 */ 247, 24, 25, 26, 27, 28, 258, 5, 269, 270,
+ /* 60 */ 33, 34, 259, 210, 37, 38, 39, 45, 46, 47,
/* 70 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
- /* 80 */ 13, 14, 78, 16, 17, 245, 247, 260, 21, 260,
- /* 90 */ 210, 24, 25, 26, 27, 28, 269, 270, 259, 270,
- /* 100 */ 33, 34, 102, 260, 37, 38, 39, 210, 14, 98,
- /* 110 */ 16, 17, 101, 102, 261, 21, 244, 104, 24, 25,
- /* 120 */ 26, 27, 28, 243, 103, 245, 99, 33, 34, 108,
- /* 130 */ 258, 37, 38, 39, 85, 86, 87, 88, 89, 90,
- /* 140 */ 91, 92, 93, 94, 95, 96, 97, 226, 210, 228,
- /* 150 */ 229, 230, 231, 232, 233, 234, 235, 236, 237, 238,
- /* 160 */ 239, 16, 17, 266, 260, 268, 21, 126, 227, 24,
- /* 170 */ 25, 26, 27, 28, 133, 134, 210, 210, 33, 34,
- /* 180 */ 1, 44, 37, 38, 39, 1, 2, 246, 9, 5,
- /* 190 */ 260, 7, 260, 9, 210, 1, 2, 210, 98, 5,
- /* 200 */ 63, 7, 264, 9, 266, 105, 69, 70, 71, 243,
- /* 210 */ 243, 245, 245, 85, 77, 87, 88, 33, 34, 227,
- /* 220 */ 92, 37, 94, 95, 96, 260, 214, 33, 34, 217,
- /* 230 */ 243, 37, 245, 1, 2, 98, 260, 5, 246, 7,
- /* 240 */ 210, 9, 105, 25, 26, 27, 28, 98, 63, 260,
- /* 250 */ 266, 33, 34, 210, 105, 37, 38, 39, 37, 38,
- /* 260 */ 39, 72, 125, 115, 116, 33, 34, 78, 5, 132,
- /* 270 */ 7, 64, 65, 66, 67, 68, 60, 61, 62, 33,
- /* 280 */ 34, 74, 98, 37, 38, 39, 64, 65, 66, 67,
- /* 290 */ 68, 227, 98, 109, 214, 59, 266, 217, 64, 65,
- /* 300 */ 66, 67, 68, 109, 214, 262, 1, 217, 124, 5,
- /* 310 */ 246, 7, 127, 99, 103, 130, 131, 260, 124, 99,
- /* 320 */ 106, 260, 99, 103, 99, 260, 103, 99, 103, 260,
- /* 330 */ 98, 103, 103, 122, 98, 260, 128, 129, 260, 99,
- /* 340 */ 99, 109, 37, 103, 103, 260, 99, 128, 129, 120,
- /* 350 */ 103, 246, 5, 5, 7, 7, 124, 72, 73, 246,
- /* 360 */ 241, 241, 241, 241, 241, 241, 210, 242, 241, 210,
- /* 370 */ 244, 210, 210, 267, 267, 210, 210, 248, 210, 210,
- /* 380 */ 210, 210, 210, 210, 210, 210, 210, 210, 210, 105,
- /* 390 */ 210, 210, 210, 210, 244, 59, 210, 210, 210, 109,
- /* 400 */ 210, 210, 210, 210, 263, 210, 210, 210, 210, 210,
- /* 410 */ 210, 210, 210, 210, 210, 210, 210, 263, 119, 210,
- /* 420 */ 257, 211, 121, 211, 211, 118, 113, 117, 112, 111,
- /* 430 */ 110, 123, 75, 84, 83, 49, 80, 82, 53, 81,
- /* 440 */ 79, 75, 5, 5, 135, 5, 211, 135, 5, 211,
- /* 450 */ 5, 86, 126, 99, 215, 211, 106, 215, 99, 107,
- /* 460 */ 98, 1, 211, 103, 100, 212, 212, 219, 225, 224,
- /* 470 */ 221, 256, 255, 223, 222, 220, 218, 250, 254, 253,
- /* 480 */ 249, 244, 252, 211, 251, 216, 213, 98, 114, 99,
- /* 490 */ 98, 103, 114, 99, 103, 98, 98, 98, 72, 9,
- /* 500 */ 100, 5, 5, 5, 5, 5, 76, 15, 72, 16,
- /* 510 */ 129, 129, 103, 5, 5, 99, 98, 5, 5, 5,
- /* 520 */ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
- /* 530 */ 5, 103, 76, 59, 58, 0, 271, 271, 271, 271,
- /* 540 */ 271, 271, 271, 271, 271, 271, 271, 21, 21, 271,
- /* 550 */ 271, 271, 271, 271, 271, 271, 271, 271, 271, 271,
- /* 560 */ 271, 271, 271, 271, 271, 271, 271, 271, 271, 271,
+ /* 80 */ 13, 14, 60, 16, 17, 33, 34, 20, 21, 244,
+ /* 90 */ 66, 24, 25, 26, 27, 28, 243, 227, 245, 210,
+ /* 100 */ 33, 34, 102, 258, 37, 38, 39, 210, 14, 98,
+ /* 110 */ 16, 17, 101, 102, 20, 21, 246, 210, 24, 25,
+ /* 120 */ 26, 27, 28, 37, 38, 39, 99, 33, 34, 73,
+ /* 130 */ 246, 37, 38, 39, 78, 16, 17, 210, 78, 20,
+ /* 140 */ 21, 126, 245, 24, 25, 26, 27, 28, 133, 134,
+ /* 150 */ 261, 127, 33, 34, 130, 131, 37, 38, 39, 85,
+ /* 160 */ 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ /* 170 */ 96, 97, 226, 266, 228, 229, 230, 231, 232, 233,
+ /* 180 */ 234, 235, 236, 237, 238, 239, 1, 2, 210, 44,
+ /* 190 */ 5, 98, 7, 266, 9, 268, 1, 2, 105, 210,
+ /* 200 */ 5, 210, 7, 260, 9, 60, 1, 2, 227, 99,
+ /* 210 */ 5, 66, 7, 103, 9, 70, 71, 72, 33, 34,
+ /* 220 */ 33, 34, 37, 98, 37, 38, 39, 246, 33, 34,
+ /* 230 */ 105, 210, 37, 25, 26, 27, 28, 99, 33, 34,
+ /* 240 */ 262, 33, 34, 98, 106, 37, 38, 39, 260, 85,
+ /* 250 */ 105, 87, 88, 264, 210, 266, 92, 266, 94, 95,
+ /* 260 */ 96, 63, 64, 65, 243, 210, 245, 61, 62, 227,
+ /* 270 */ 125, 103, 210, 67, 68, 69, 108, 132, 61, 62,
+ /* 280 */ 5, 75, 7, 98, 67, 68, 69, 243, 246, 245,
+ /* 290 */ 61, 62, 1, 98, 109, 240, 67, 68, 69, 214,
+ /* 300 */ 59, 15, 217, 98, 109, 243, 5, 245, 7, 124,
+ /* 310 */ 214, 260, 214, 217, 109, 217, 5, 2, 7, 124,
+ /* 320 */ 5, 99, 7, 103, 9, 103, 115, 116, 37, 124,
+ /* 330 */ 99, 99, 99, 103, 103, 103, 103, 99, 5, 98,
+ /* 340 */ 7, 103, 122, 99, 128, 129, 267, 103, 33, 34,
+ /* 350 */ 120, 99, 128, 129, 241, 103, 73, 74, 61, 62,
+ /* 360 */ 260, 260, 260, 260, 260, 260, 260, 260, 260, 260,
+ /* 370 */ 246, 260, 241, 241, 241, 241, 241, 210, 242, 210,
+ /* 380 */ 241, 244, 210, 210, 267, 210, 210, 210, 248, 210,
+ /* 390 */ 104, 210, 210, 210, 210, 105, 210, 244, 210, 210,
+ /* 400 */ 210, 210, 210, 210, 210, 59, 210, 210, 109, 210,
+ /* 410 */ 210, 210, 210, 210, 210, 263, 257, 210, 263, 104,
+ /* 420 */ 210, 263, 210, 210, 210, 210, 119, 121, 210, 210,
+ /* 430 */ 256, 210, 118, 210, 113, 210, 210, 210, 210, 117,
+ /* 440 */ 112, 111, 110, 123, 210, 76, 211, 211, 211, 84,
+ /* 450 */ 83, 49, 80, 82, 53, 81, 79, 76, 211, 5,
+ /* 460 */ 211, 135, 5, 5, 255, 215, 215, 211, 5, 135,
+ /* 470 */ 211, 5, 212, 86, 211, 126, 219, 223, 225, 224,
+ /* 480 */ 222, 220, 218, 221, 212, 211, 216, 99, 106, 244,
+ /* 490 */ 213, 107, 252, 254, 251, 253, 250, 249, 103, 99,
+ /* 500 */ 1, 98, 98, 114, 99, 98, 114, 73, 99, 103,
+ /* 510 */ 98, 103, 98, 100, 104, 98, 100, 9, 5, 5,
+ /* 520 */ 5, 5, 5, 77, 15, 73, 16, 129, 129, 103,
+ /* 530 */ 5, 5, 99, 98, 5, 5, 5, 5, 5, 5,
+ /* 540 */ 5, 5, 5, 5, 5, 5, 5, 5, 103, 77,
+ /* 550 */ 59, 58, 0, 271, 271, 271, 271, 271, 271, 271,
+ /* 560 */ 271, 271, 271, 271, 21, 21, 271, 271, 271, 271,
/* 570 */ 271, 271, 271, 271, 271, 271, 271, 271, 271, 271,
/* 580 */ 271, 271, 271, 271, 271, 271, 271, 271, 271, 271,
/* 590 */ 271, 271, 271, 271, 271, 271, 271, 271, 271, 271,
@@ -336,83 +339,87 @@ static const YYCODETYPE yy_lookahead[] = {
/* 720 */ 271, 271, 271, 271, 271, 271, 271, 271, 271, 271,
/* 730 */ 271, 271, 271, 271, 271, 271, 271, 271, 271, 271,
/* 740 */ 271, 271, 271, 271, 271, 271, 271, 271, 271, 271,
- /* 750 */ 271, 271, 271, 271, 271,
+ /* 750 */ 271, 271, 271, 271, 271, 271, 271, 271, 271, 271,
+ /* 760 */ 271, 271, 271, 271, 271, 271, 271, 271, 271, 271,
+ /* 770 */ 271, 271,
};
-#define YY_SHIFT_COUNT (243)
+#define YY_SHIFT_COUNT (251)
#define YY_SHIFT_MIN (0)
-#define YY_SHIFT_MAX (535)
+#define YY_SHIFT_MAX (552)
static const unsigned short int yy_shift_ofst[] = {
- /* 0 */ 137, 49, 128, 184, 232, 179, 179, 179, 179, 179,
- /* 10 */ 179, 0, 22, 232, 13, 13, 13, 100, 179, 179,
- /* 20 */ 179, 179, 179, 189, 4, 4, 549, 194, 232, 232,
- /* 30 */ 232, 232, 232, 232, 232, 232, 232, 232, 232, 232,
- /* 40 */ 232, 232, 232, 232, 232, 13, 13, 2, 2, 2,
- /* 50 */ 2, 2, 2, 11, 2, 149, 179, 179, 179, 179,
- /* 60 */ 148, 148, 21, 179, 179, 179, 179, 179, 179, 179,
- /* 70 */ 179, 179, 179, 179, 179, 179, 179, 179, 179, 179,
- /* 80 */ 179, 179, 179, 179, 179, 179, 179, 179, 179, 179,
- /* 90 */ 179, 179, 179, 179, 179, 179, 179, 179, 179, 179,
- /* 100 */ 284, 336, 336, 290, 290, 336, 299, 301, 307, 313,
- /* 110 */ 310, 316, 318, 320, 308, 284, 336, 336, 357, 357,
- /* 120 */ 336, 349, 351, 386, 356, 355, 385, 358, 361, 336,
- /* 130 */ 366, 336, 366, 549, 549, 27, 67, 67, 67, 94,
- /* 140 */ 145, 218, 218, 218, 207, 246, 246, 246, 246, 222,
- /* 150 */ 234, 185, 41, 221, 221, 216, 214, 220, 223, 225,
- /* 160 */ 228, 263, 304, 305, 236, 211, 229, 240, 241, 247,
- /* 170 */ 208, 219, 347, 348, 285, 437, 309, 438, 440, 312,
- /* 180 */ 443, 445, 365, 326, 350, 354, 352, 360, 359, 362,
- /* 190 */ 460, 389, 390, 392, 388, 374, 391, 378, 394, 397,
- /* 200 */ 398, 364, 399, 400, 426, 490, 496, 497, 498, 499,
- /* 210 */ 500, 430, 492, 436, 493, 381, 382, 409, 508, 509,
- /* 220 */ 416, 418, 409, 512, 513, 514, 515, 516, 517, 518,
- /* 230 */ 519, 520, 521, 522, 523, 524, 525, 428, 456, 526,
- /* 240 */ 527, 474, 476, 535,
+ /* 0 */ 145, 74, 164, 185, 205, 6, 6, 6, 6, 6,
+ /* 10 */ 6, 0, 22, 205, 315, 315, 315, 93, 6, 6,
+ /* 20 */ 6, 6, 6, 56, 60, 60, 566, 195, 205, 205,
+ /* 30 */ 205, 205, 205, 205, 205, 205, 205, 205, 205, 205,
+ /* 40 */ 205, 205, 205, 205, 205, 315, 315, 52, 52, 52,
+ /* 50 */ 52, 52, 52, 11, 52, 125, 6, 6, 6, 6,
+ /* 60 */ 211, 211, 168, 6, 6, 6, 6, 6, 6, 6,
+ /* 70 */ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ /* 80 */ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ /* 90 */ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ /* 100 */ 6, 6, 290, 346, 346, 299, 299, 299, 346, 307,
+ /* 110 */ 306, 314, 321, 322, 328, 330, 332, 320, 290, 346,
+ /* 120 */ 346, 369, 369, 346, 365, 367, 402, 372, 371, 401,
+ /* 130 */ 374, 377, 346, 381, 346, 381, 346, 566, 566, 27,
+ /* 140 */ 67, 67, 67, 94, 119, 208, 208, 208, 206, 187,
+ /* 150 */ 187, 187, 187, 217, 229, 24, 15, 86, 86, 198,
+ /* 160 */ 138, 110, 222, 231, 232, 233, 275, 311, 291, 241,
+ /* 170 */ 286, 220, 230, 238, 244, 252, 216, 224, 301, 333,
+ /* 180 */ 283, 297, 454, 326, 457, 458, 334, 463, 466, 387,
+ /* 190 */ 349, 382, 388, 384, 395, 400, 403, 499, 404, 405,
+ /* 200 */ 407, 406, 389, 408, 392, 409, 412, 410, 414, 413,
+ /* 210 */ 417, 416, 434, 508, 513, 514, 515, 516, 517, 446,
+ /* 220 */ 509, 452, 510, 398, 399, 426, 525, 526, 433, 435,
+ /* 230 */ 426, 529, 530, 531, 532, 533, 534, 535, 536, 537,
+ /* 240 */ 538, 539, 540, 541, 542, 445, 472, 543, 544, 491,
+ /* 250 */ 493, 552,
};
-#define YY_REDUCE_COUNT (134)
-#define YY_REDUCE_MIN (-241)
-#define YY_REDUCE_MAX (273)
+#define YY_REDUCE_COUNT (138)
+#define YY_REDUCE_MIN (-258)
+#define YY_REDUCE_MAX (277)
static const short yy_reduce_ofst[] = {
- /* 0 */ -202, -79, -226, -211, -173, -103, -62, -120, -34, -33,
- /* 10 */ -13, -147, -180, -171, -59, -8, 64, -128, 43, -16,
- /* 20 */ 30, -208, -160, 12, 80, 90, -161, -241, -215, -203,
- /* 30 */ -198, -157, -96, -70, -68, -35, -24, -11, 57, 61,
- /* 40 */ 65, 69, 75, 78, 85, 105, 113, 119, 120, 121,
- /* 50 */ 122, 123, 124, 125, 127, 126, 156, 159, 161, 162,
- /* 60 */ 106, 107, 129, 165, 166, 168, 169, 170, 171, 172,
- /* 70 */ 173, 174, 175, 176, 177, 178, 180, 181, 182, 183,
- /* 80 */ 186, 187, 188, 190, 191, 192, 193, 195, 196, 197,
- /* 90 */ 198, 199, 200, 201, 202, 203, 204, 205, 206, 209,
- /* 100 */ 150, 210, 212, 141, 154, 213, 163, 215, 217, 224,
- /* 110 */ 226, 230, 233, 227, 231, 237, 235, 238, 239, 242,
- /* 120 */ 244, 243, 245, 250, 248, 252, 255, 249, 258, 251,
- /* 130 */ 253, 272, 254, 269, 273,
+ /* 0 */ -202, -54, -226, -224, -211, -73, -11, -147, 21, 44,
+ /* 10 */ 62, -111, -191, -238, -130, -19, 42, -155, -22, -93,
+ /* 20 */ -9, 55, -103, 85, 96, 98, -197, -258, -231, -230,
+ /* 30 */ -225, -57, -12, 51, 100, 101, 102, 103, 104, 105,
+ /* 40 */ 106, 107, 108, 109, 111, -116, 124, 113, 131, 132,
+ /* 50 */ 133, 134, 135, 136, 139, 137, 167, 169, 172, 173,
+ /* 60 */ 79, 117, 140, 175, 176, 177, 179, 181, 182, 183,
+ /* 70 */ 184, 186, 188, 189, 190, 191, 192, 193, 194, 196,
+ /* 80 */ 197, 199, 200, 201, 202, 203, 204, 207, 210, 212,
+ /* 90 */ 213, 214, 215, 218, 219, 221, 223, 225, 226, 227,
+ /* 100 */ 228, 234, 153, 235, 236, 152, 155, 158, 237, 159,
+ /* 110 */ 174, 209, 239, 242, 240, 243, 246, 248, 245, 247,
+ /* 120 */ 249, 250, 251, 256, 253, 255, 254, 257, 258, 261,
+ /* 130 */ 262, 264, 259, 260, 263, 272, 274, 270, 277,
};
static const YYACTIONTYPE yy_default[] = {
- /* 0 */ 628, 680, 669, 845, 845, 628, 628, 628, 628, 628,
- /* 10 */ 628, 773, 646, 845, 628, 628, 628, 628, 628, 628,
- /* 20 */ 628, 628, 628, 682, 682, 682, 768, 628, 628, 628,
- /* 30 */ 628, 628, 628, 628, 628, 628, 628, 628, 628, 628,
- /* 40 */ 628, 628, 628, 628, 628, 628, 628, 628, 628, 628,
- /* 50 */ 628, 628, 628, 628, 628, 628, 628, 775, 777, 628,
- /* 60 */ 794, 794, 766, 628, 628, 628, 628, 628, 628, 628,
- /* 70 */ 628, 628, 628, 628, 628, 628, 628, 628, 628, 628,
- /* 80 */ 628, 667, 628, 665, 628, 628, 628, 628, 628, 628,
- /* 90 */ 628, 628, 628, 628, 628, 628, 628, 654, 628, 628,
- /* 100 */ 628, 648, 648, 628, 628, 648, 801, 805, 799, 787,
- /* 110 */ 795, 786, 782, 781, 809, 628, 648, 648, 677, 677,
- /* 120 */ 648, 698, 696, 694, 686, 692, 688, 690, 684, 648,
- /* 130 */ 675, 648, 675, 714, 728, 628, 810, 844, 800, 828,
- /* 140 */ 827, 840, 834, 833, 628, 832, 831, 830, 829, 628,
- /* 150 */ 628, 628, 628, 836, 835, 628, 628, 628, 628, 628,
- /* 160 */ 628, 628, 628, 628, 812, 806, 802, 628, 628, 628,
- /* 170 */ 628, 628, 628, 628, 628, 628, 628, 628, 628, 628,
- /* 180 */ 628, 628, 628, 628, 765, 628, 628, 774, 628, 628,
- /* 190 */ 628, 628, 628, 628, 796, 628, 788, 628, 628, 628,
- /* 200 */ 628, 628, 628, 742, 628, 628, 628, 628, 628, 628,
- /* 210 */ 628, 628, 628, 628, 628, 628, 628, 849, 628, 628,
- /* 220 */ 628, 736, 847, 628, 628, 628, 628, 628, 628, 628,
- /* 230 */ 628, 628, 628, 628, 628, 628, 628, 701, 628, 652,
- /* 240 */ 650, 628, 644, 628,
+ /* 0 */ 646, 700, 689, 868, 868, 646, 646, 646, 646, 646,
+ /* 10 */ 646, 793, 664, 868, 646, 646, 646, 646, 646, 646,
+ /* 20 */ 646, 646, 646, 702, 702, 702, 788, 646, 646, 646,
+ /* 30 */ 646, 646, 646, 646, 646, 646, 646, 646, 646, 646,
+ /* 40 */ 646, 646, 646, 646, 646, 646, 646, 646, 646, 646,
+ /* 50 */ 646, 646, 646, 646, 646, 646, 646, 795, 797, 646,
+ /* 60 */ 815, 815, 786, 646, 646, 646, 646, 646, 646, 646,
+ /* 70 */ 646, 646, 646, 646, 646, 646, 646, 646, 646, 646,
+ /* 80 */ 646, 687, 646, 685, 646, 646, 646, 646, 646, 646,
+ /* 90 */ 646, 646, 646, 646, 646, 646, 674, 646, 646, 646,
+ /* 100 */ 646, 646, 646, 666, 666, 646, 646, 646, 666, 822,
+ /* 110 */ 826, 820, 808, 816, 807, 803, 802, 830, 646, 666,
+ /* 120 */ 666, 697, 697, 666, 718, 716, 714, 706, 712, 708,
+ /* 130 */ 710, 704, 666, 695, 666, 695, 666, 734, 748, 646,
+ /* 140 */ 831, 867, 821, 857, 856, 863, 855, 854, 646, 850,
+ /* 150 */ 851, 853, 852, 646, 646, 646, 646, 859, 858, 646,
+ /* 160 */ 646, 646, 646, 646, 646, 646, 646, 646, 646, 833,
+ /* 170 */ 646, 827, 823, 646, 646, 646, 646, 646, 646, 646,
+ /* 180 */ 646, 646, 646, 646, 646, 646, 646, 646, 646, 646,
+ /* 190 */ 646, 785, 646, 646, 794, 646, 646, 646, 646, 646,
+ /* 200 */ 646, 817, 646, 809, 646, 646, 646, 646, 646, 646,
+ /* 210 */ 646, 762, 646, 646, 646, 646, 646, 646, 646, 646,
+ /* 220 */ 646, 646, 646, 646, 646, 872, 646, 646, 646, 756,
+ /* 230 */ 870, 646, 646, 646, 646, 646, 646, 646, 646, 646,
+ /* 240 */ 646, 646, 646, 646, 646, 721, 646, 672, 670, 646,
+ /* 250 */ 662, 646,
};
/********** End of lemon-generated parsing tables *****************************/
@@ -492,12 +499,13 @@ static const YYCODETYPE yyFallback[] = {
0, /* VNODES => nothing */
1, /* IPTOKEN => ID */
0, /* DOT => nothing */
+ 0, /* CREATE => nothing */
+ 0, /* TABLE => nothing */
+ 1, /* DATABASE => ID */
0, /* TABLES => nothing */
0, /* STABLES => nothing */
0, /* VGROUPS => nothing */
0, /* DROP => nothing */
- 0, /* TABLE => nothing */
- 1, /* DATABASE => ID */
0, /* DNODE => nothing */
0, /* USER => nothing */
0, /* ACCOUNT => nothing */
@@ -509,7 +517,6 @@ static const YYCODETYPE yyFallback[] = {
0, /* LOCAL => nothing */
0, /* IF => nothing */
0, /* EXISTS => nothing */
- 0, /* CREATE => nothing */
0, /* PPS => nothing */
0, /* TSERIES => nothing */
0, /* DBS => nothing */
@@ -784,24 +791,24 @@ static const char *const yyTokenName[] = {
/* 57 */ "VNODES",
/* 58 */ "IPTOKEN",
/* 59 */ "DOT",
- /* 60 */ "TABLES",
- /* 61 */ "STABLES",
- /* 62 */ "VGROUPS",
- /* 63 */ "DROP",
- /* 64 */ "TABLE",
- /* 65 */ "DATABASE",
- /* 66 */ "DNODE",
- /* 67 */ "USER",
- /* 68 */ "ACCOUNT",
- /* 69 */ "USE",
- /* 70 */ "DESCRIBE",
- /* 71 */ "ALTER",
- /* 72 */ "PASS",
- /* 73 */ "PRIVILEGE",
- /* 74 */ "LOCAL",
- /* 75 */ "IF",
- /* 76 */ "EXISTS",
- /* 77 */ "CREATE",
+ /* 60 */ "CREATE",
+ /* 61 */ "TABLE",
+ /* 62 */ "DATABASE",
+ /* 63 */ "TABLES",
+ /* 64 */ "STABLES",
+ /* 65 */ "VGROUPS",
+ /* 66 */ "DROP",
+ /* 67 */ "DNODE",
+ /* 68 */ "USER",
+ /* 69 */ "ACCOUNT",
+ /* 70 */ "USE",
+ /* 71 */ "DESCRIBE",
+ /* 72 */ "ALTER",
+ /* 73 */ "PASS",
+ /* 74 */ "PRIVILEGE",
+ /* 75 */ "LOCAL",
+ /* 76 */ "IF",
+ /* 77 */ "EXISTS",
/* 78 */ "PPS",
/* 79 */ "TSERIES",
/* 80 */ "DBS",
@@ -1021,212 +1028,217 @@ static const char *const yyRuleName[] = {
/* 16 */ "dbPrefix ::= ids DOT",
/* 17 */ "cpxName ::=",
/* 18 */ "cpxName ::= DOT ids",
- /* 19 */ "cmd ::= SHOW dbPrefix TABLES",
- /* 20 */ "cmd ::= SHOW dbPrefix TABLES LIKE ids",
- /* 21 */ "cmd ::= SHOW dbPrefix STABLES",
- /* 22 */ "cmd ::= SHOW dbPrefix STABLES LIKE ids",
- /* 23 */ "cmd ::= SHOW dbPrefix VGROUPS",
- /* 24 */ "cmd ::= SHOW dbPrefix VGROUPS ids",
- /* 25 */ "cmd ::= DROP TABLE ifexists ids cpxName",
- /* 26 */ "cmd ::= DROP DATABASE ifexists ids",
- /* 27 */ "cmd ::= DROP DNODE ids",
- /* 28 */ "cmd ::= DROP USER ids",
- /* 29 */ "cmd ::= DROP ACCOUNT ids",
- /* 30 */ "cmd ::= USE ids",
- /* 31 */ "cmd ::= DESCRIBE ids cpxName",
- /* 32 */ "cmd ::= ALTER USER ids PASS ids",
- /* 33 */ "cmd ::= ALTER USER ids PRIVILEGE ids",
- /* 34 */ "cmd ::= ALTER DNODE ids ids",
- /* 35 */ "cmd ::= ALTER DNODE ids ids ids",
- /* 36 */ "cmd ::= ALTER LOCAL ids",
- /* 37 */ "cmd ::= ALTER LOCAL ids ids",
- /* 38 */ "cmd ::= ALTER DATABASE ids alter_db_optr",
- /* 39 */ "cmd ::= ALTER ACCOUNT ids acct_optr",
- /* 40 */ "cmd ::= ALTER ACCOUNT ids PASS ids acct_optr",
- /* 41 */ "ids ::= ID",
- /* 42 */ "ids ::= STRING",
- /* 43 */ "ifexists ::= IF EXISTS",
- /* 44 */ "ifexists ::=",
- /* 45 */ "ifnotexists ::= IF NOT EXISTS",
- /* 46 */ "ifnotexists ::=",
- /* 47 */ "cmd ::= CREATE DNODE ids",
- /* 48 */ "cmd ::= CREATE ACCOUNT ids PASS ids acct_optr",
- /* 49 */ "cmd ::= CREATE DATABASE ifnotexists ids db_optr",
- /* 50 */ "cmd ::= CREATE USER ids PASS ids",
- /* 51 */ "pps ::=",
- /* 52 */ "pps ::= PPS INTEGER",
- /* 53 */ "tseries ::=",
- /* 54 */ "tseries ::= TSERIES INTEGER",
- /* 55 */ "dbs ::=",
- /* 56 */ "dbs ::= DBS INTEGER",
- /* 57 */ "streams ::=",
- /* 58 */ "streams ::= STREAMS INTEGER",
- /* 59 */ "storage ::=",
- /* 60 */ "storage ::= STORAGE INTEGER",
- /* 61 */ "qtime ::=",
- /* 62 */ "qtime ::= QTIME INTEGER",
- /* 63 */ "users ::=",
- /* 64 */ "users ::= USERS INTEGER",
- /* 65 */ "conns ::=",
- /* 66 */ "conns ::= CONNS INTEGER",
- /* 67 */ "state ::=",
- /* 68 */ "state ::= STATE ids",
- /* 69 */ "acct_optr ::= pps tseries storage streams qtime dbs users conns state",
- /* 70 */ "keep ::= KEEP tagitemlist",
- /* 71 */ "cache ::= CACHE INTEGER",
- /* 72 */ "replica ::= REPLICA INTEGER",
- /* 73 */ "quorum ::= QUORUM INTEGER",
- /* 74 */ "days ::= DAYS INTEGER",
- /* 75 */ "minrows ::= MINROWS INTEGER",
- /* 76 */ "maxrows ::= MAXROWS INTEGER",
- /* 77 */ "blocks ::= BLOCKS INTEGER",
- /* 78 */ "ctime ::= CTIME INTEGER",
- /* 79 */ "wal ::= WAL INTEGER",
- /* 80 */ "fsync ::= FSYNC INTEGER",
- /* 81 */ "comp ::= COMP INTEGER",
- /* 82 */ "prec ::= PRECISION STRING",
- /* 83 */ "db_optr ::=",
- /* 84 */ "db_optr ::= db_optr cache",
- /* 85 */ "db_optr ::= db_optr replica",
- /* 86 */ "db_optr ::= db_optr quorum",
- /* 87 */ "db_optr ::= db_optr days",
- /* 88 */ "db_optr ::= db_optr minrows",
- /* 89 */ "db_optr ::= db_optr maxrows",
- /* 90 */ "db_optr ::= db_optr blocks",
- /* 91 */ "db_optr ::= db_optr ctime",
- /* 92 */ "db_optr ::= db_optr wal",
- /* 93 */ "db_optr ::= db_optr fsync",
- /* 94 */ "db_optr ::= db_optr comp",
- /* 95 */ "db_optr ::= db_optr prec",
- /* 96 */ "db_optr ::= db_optr keep",
- /* 97 */ "alter_db_optr ::=",
- /* 98 */ "alter_db_optr ::= alter_db_optr replica",
- /* 99 */ "alter_db_optr ::= alter_db_optr quorum",
- /* 100 */ "alter_db_optr ::= alter_db_optr keep",
- /* 101 */ "alter_db_optr ::= alter_db_optr blocks",
- /* 102 */ "alter_db_optr ::= alter_db_optr comp",
- /* 103 */ "alter_db_optr ::= alter_db_optr wal",
- /* 104 */ "alter_db_optr ::= alter_db_optr fsync",
- /* 105 */ "typename ::= ids",
- /* 106 */ "typename ::= ids LP signed RP",
- /* 107 */ "signed ::= INTEGER",
- /* 108 */ "signed ::= PLUS INTEGER",
- /* 109 */ "signed ::= MINUS INTEGER",
- /* 110 */ "cmd ::= CREATE TABLE ifnotexists ids cpxName create_table_args",
- /* 111 */ "create_table_args ::= LP columnlist RP",
- /* 112 */ "create_table_args ::= LP columnlist RP TAGS LP columnlist RP",
- /* 113 */ "create_table_args ::= USING ids cpxName TAGS LP tagitemlist RP",
- /* 114 */ "create_table_args ::= AS select",
- /* 115 */ "columnlist ::= columnlist COMMA column",
- /* 116 */ "columnlist ::= column",
- /* 117 */ "column ::= ids typename",
- /* 118 */ "tagitemlist ::= tagitemlist COMMA tagitem",
- /* 119 */ "tagitemlist ::= tagitem",
- /* 120 */ "tagitem ::= INTEGER",
- /* 121 */ "tagitem ::= FLOAT",
- /* 122 */ "tagitem ::= STRING",
- /* 123 */ "tagitem ::= BOOL",
- /* 124 */ "tagitem ::= NULL",
- /* 125 */ "tagitem ::= MINUS INTEGER",
- /* 126 */ "tagitem ::= MINUS FLOAT",
- /* 127 */ "tagitem ::= PLUS INTEGER",
- /* 128 */ "tagitem ::= PLUS FLOAT",
- /* 129 */ "select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt",
- /* 130 */ "union ::= select",
- /* 131 */ "union ::= LP union RP",
- /* 132 */ "union ::= union UNION ALL select",
- /* 133 */ "union ::= union UNION ALL LP select RP",
- /* 134 */ "cmd ::= union",
- /* 135 */ "select ::= SELECT selcollist",
- /* 136 */ "sclp ::= selcollist COMMA",
- /* 137 */ "sclp ::=",
- /* 138 */ "selcollist ::= sclp expr as",
- /* 139 */ "selcollist ::= sclp STAR",
- /* 140 */ "as ::= AS ids",
- /* 141 */ "as ::= ids",
- /* 142 */ "as ::=",
- /* 143 */ "from ::= FROM tablelist",
- /* 144 */ "tablelist ::= ids cpxName",
- /* 145 */ "tablelist ::= ids cpxName ids",
- /* 146 */ "tablelist ::= tablelist COMMA ids cpxName",
- /* 147 */ "tablelist ::= tablelist COMMA ids cpxName ids",
- /* 148 */ "tmvar ::= VARIABLE",
- /* 149 */ "interval_opt ::= INTERVAL LP tmvar RP",
- /* 150 */ "interval_opt ::=",
- /* 151 */ "fill_opt ::=",
- /* 152 */ "fill_opt ::= FILL LP ID COMMA tagitemlist RP",
- /* 153 */ "fill_opt ::= FILL LP ID RP",
- /* 154 */ "sliding_opt ::= SLIDING LP tmvar RP",
- /* 155 */ "sliding_opt ::=",
- /* 156 */ "orderby_opt ::=",
- /* 157 */ "orderby_opt ::= ORDER BY sortlist",
- /* 158 */ "sortlist ::= sortlist COMMA item sortorder",
- /* 159 */ "sortlist ::= item sortorder",
- /* 160 */ "item ::= ids cpxName",
- /* 161 */ "sortorder ::= ASC",
- /* 162 */ "sortorder ::= DESC",
- /* 163 */ "sortorder ::=",
- /* 164 */ "groupby_opt ::=",
- /* 165 */ "groupby_opt ::= GROUP BY grouplist",
- /* 166 */ "grouplist ::= grouplist COMMA item",
- /* 167 */ "grouplist ::= item",
- /* 168 */ "having_opt ::=",
- /* 169 */ "having_opt ::= HAVING expr",
- /* 170 */ "limit_opt ::=",
- /* 171 */ "limit_opt ::= LIMIT signed",
- /* 172 */ "limit_opt ::= LIMIT signed OFFSET signed",
- /* 173 */ "limit_opt ::= LIMIT signed COMMA signed",
- /* 174 */ "slimit_opt ::=",
- /* 175 */ "slimit_opt ::= SLIMIT signed",
- /* 176 */ "slimit_opt ::= SLIMIT signed SOFFSET signed",
- /* 177 */ "slimit_opt ::= SLIMIT signed COMMA signed",
- /* 178 */ "where_opt ::=",
- /* 179 */ "where_opt ::= WHERE expr",
- /* 180 */ "expr ::= LP expr RP",
- /* 181 */ "expr ::= ID",
- /* 182 */ "expr ::= ID DOT ID",
- /* 183 */ "expr ::= ID DOT STAR",
- /* 184 */ "expr ::= INTEGER",
- /* 185 */ "expr ::= MINUS INTEGER",
- /* 186 */ "expr ::= PLUS INTEGER",
- /* 187 */ "expr ::= FLOAT",
- /* 188 */ "expr ::= MINUS FLOAT",
- /* 189 */ "expr ::= PLUS FLOAT",
- /* 190 */ "expr ::= STRING",
- /* 191 */ "expr ::= NOW",
- /* 192 */ "expr ::= VARIABLE",
- /* 193 */ "expr ::= BOOL",
- /* 194 */ "expr ::= ID LP exprlist RP",
- /* 195 */ "expr ::= ID LP STAR RP",
- /* 196 */ "expr ::= expr AND expr",
- /* 197 */ "expr ::= expr OR expr",
- /* 198 */ "expr ::= expr LT expr",
- /* 199 */ "expr ::= expr GT expr",
- /* 200 */ "expr ::= expr LE expr",
- /* 201 */ "expr ::= expr GE expr",
- /* 202 */ "expr ::= expr NE expr",
- /* 203 */ "expr ::= expr EQ expr",
- /* 204 */ "expr ::= expr PLUS expr",
- /* 205 */ "expr ::= expr MINUS expr",
- /* 206 */ "expr ::= expr STAR expr",
- /* 207 */ "expr ::= expr SLASH expr",
- /* 208 */ "expr ::= expr REM expr",
- /* 209 */ "expr ::= expr LIKE expr",
- /* 210 */ "expr ::= expr IN LP exprlist RP",
- /* 211 */ "exprlist ::= exprlist COMMA expritem",
- /* 212 */ "exprlist ::= expritem",
- /* 213 */ "expritem ::= expr",
- /* 214 */ "expritem ::=",
- /* 215 */ "cmd ::= RESET QUERY CACHE",
- /* 216 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist",
- /* 217 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids",
- /* 218 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist",
- /* 219 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids",
- /* 220 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids",
- /* 221 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem",
- /* 222 */ "cmd ::= KILL CONNECTION INTEGER",
- /* 223 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER",
- /* 224 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER",
+ /* 19 */ "cmd ::= SHOW CREATE TABLE ids cpxName",
+ /* 20 */ "cmd ::= SHOW CREATE DATABASE ids",
+ /* 21 */ "cmd ::= SHOW dbPrefix TABLES",
+ /* 22 */ "cmd ::= SHOW dbPrefix TABLES LIKE ids",
+ /* 23 */ "cmd ::= SHOW dbPrefix STABLES",
+ /* 24 */ "cmd ::= SHOW dbPrefix STABLES LIKE ids",
+ /* 25 */ "cmd ::= SHOW dbPrefix VGROUPS",
+ /* 26 */ "cmd ::= SHOW dbPrefix VGROUPS ids",
+ /* 27 */ "cmd ::= DROP TABLE ifexists ids cpxName",
+ /* 28 */ "cmd ::= DROP DATABASE ifexists ids",
+ /* 29 */ "cmd ::= DROP DNODE ids",
+ /* 30 */ "cmd ::= DROP USER ids",
+ /* 31 */ "cmd ::= DROP ACCOUNT ids",
+ /* 32 */ "cmd ::= USE ids",
+ /* 33 */ "cmd ::= DESCRIBE ids cpxName",
+ /* 34 */ "cmd ::= ALTER USER ids PASS ids",
+ /* 35 */ "cmd ::= ALTER USER ids PRIVILEGE ids",
+ /* 36 */ "cmd ::= ALTER DNODE ids ids",
+ /* 37 */ "cmd ::= ALTER DNODE ids ids ids",
+ /* 38 */ "cmd ::= ALTER LOCAL ids",
+ /* 39 */ "cmd ::= ALTER LOCAL ids ids",
+ /* 40 */ "cmd ::= ALTER DATABASE ids alter_db_optr",
+ /* 41 */ "cmd ::= ALTER ACCOUNT ids acct_optr",
+ /* 42 */ "cmd ::= ALTER ACCOUNT ids PASS ids acct_optr",
+ /* 43 */ "ids ::= ID",
+ /* 44 */ "ids ::= STRING",
+ /* 45 */ "ifexists ::= IF EXISTS",
+ /* 46 */ "ifexists ::=",
+ /* 47 */ "ifnotexists ::= IF NOT EXISTS",
+ /* 48 */ "ifnotexists ::=",
+ /* 49 */ "cmd ::= CREATE DNODE ids",
+ /* 50 */ "cmd ::= CREATE ACCOUNT ids PASS ids acct_optr",
+ /* 51 */ "cmd ::= CREATE DATABASE ifnotexists ids db_optr",
+ /* 52 */ "cmd ::= CREATE USER ids PASS ids",
+ /* 53 */ "pps ::=",
+ /* 54 */ "pps ::= PPS INTEGER",
+ /* 55 */ "tseries ::=",
+ /* 56 */ "tseries ::= TSERIES INTEGER",
+ /* 57 */ "dbs ::=",
+ /* 58 */ "dbs ::= DBS INTEGER",
+ /* 59 */ "streams ::=",
+ /* 60 */ "streams ::= STREAMS INTEGER",
+ /* 61 */ "storage ::=",
+ /* 62 */ "storage ::= STORAGE INTEGER",
+ /* 63 */ "qtime ::=",
+ /* 64 */ "qtime ::= QTIME INTEGER",
+ /* 65 */ "users ::=",
+ /* 66 */ "users ::= USERS INTEGER",
+ /* 67 */ "conns ::=",
+ /* 68 */ "conns ::= CONNS INTEGER",
+ /* 69 */ "state ::=",
+ /* 70 */ "state ::= STATE ids",
+ /* 71 */ "acct_optr ::= pps tseries storage streams qtime dbs users conns state",
+ /* 72 */ "keep ::= KEEP tagitemlist",
+ /* 73 */ "cache ::= CACHE INTEGER",
+ /* 74 */ "replica ::= REPLICA INTEGER",
+ /* 75 */ "quorum ::= QUORUM INTEGER",
+ /* 76 */ "days ::= DAYS INTEGER",
+ /* 77 */ "minrows ::= MINROWS INTEGER",
+ /* 78 */ "maxrows ::= MAXROWS INTEGER",
+ /* 79 */ "blocks ::= BLOCKS INTEGER",
+ /* 80 */ "ctime ::= CTIME INTEGER",
+ /* 81 */ "wal ::= WAL INTEGER",
+ /* 82 */ "fsync ::= FSYNC INTEGER",
+ /* 83 */ "comp ::= COMP INTEGER",
+ /* 84 */ "prec ::= PRECISION STRING",
+ /* 85 */ "db_optr ::=",
+ /* 86 */ "db_optr ::= db_optr cache",
+ /* 87 */ "db_optr ::= db_optr replica",
+ /* 88 */ "db_optr ::= db_optr quorum",
+ /* 89 */ "db_optr ::= db_optr days",
+ /* 90 */ "db_optr ::= db_optr minrows",
+ /* 91 */ "db_optr ::= db_optr maxrows",
+ /* 92 */ "db_optr ::= db_optr blocks",
+ /* 93 */ "db_optr ::= db_optr ctime",
+ /* 94 */ "db_optr ::= db_optr wal",
+ /* 95 */ "db_optr ::= db_optr fsync",
+ /* 96 */ "db_optr ::= db_optr comp",
+ /* 97 */ "db_optr ::= db_optr prec",
+ /* 98 */ "db_optr ::= db_optr keep",
+ /* 99 */ "alter_db_optr ::=",
+ /* 100 */ "alter_db_optr ::= alter_db_optr replica",
+ /* 101 */ "alter_db_optr ::= alter_db_optr quorum",
+ /* 102 */ "alter_db_optr ::= alter_db_optr keep",
+ /* 103 */ "alter_db_optr ::= alter_db_optr blocks",
+ /* 104 */ "alter_db_optr ::= alter_db_optr comp",
+ /* 105 */ "alter_db_optr ::= alter_db_optr wal",
+ /* 106 */ "alter_db_optr ::= alter_db_optr fsync",
+ /* 107 */ "typename ::= ids",
+ /* 108 */ "typename ::= ids LP signed RP",
+ /* 109 */ "signed ::= INTEGER",
+ /* 110 */ "signed ::= PLUS INTEGER",
+ /* 111 */ "signed ::= MINUS INTEGER",
+ /* 112 */ "cmd ::= CREATE TABLE ifnotexists ids cpxName create_table_args",
+ /* 113 */ "create_table_args ::= LP columnlist RP",
+ /* 114 */ "create_table_args ::= LP columnlist RP TAGS LP columnlist RP",
+ /* 115 */ "create_table_args ::= USING ids cpxName TAGS LP tagitemlist RP",
+ /* 116 */ "create_table_args ::= AS select",
+ /* 117 */ "columnlist ::= columnlist COMMA column",
+ /* 118 */ "columnlist ::= column",
+ /* 119 */ "column ::= ids typename",
+ /* 120 */ "tagitemlist ::= tagitemlist COMMA tagitem",
+ /* 121 */ "tagitemlist ::= tagitem",
+ /* 122 */ "tagitem ::= INTEGER",
+ /* 123 */ "tagitem ::= FLOAT",
+ /* 124 */ "tagitem ::= STRING",
+ /* 125 */ "tagitem ::= BOOL",
+ /* 126 */ "tagitem ::= NULL",
+ /* 127 */ "tagitem ::= MINUS INTEGER",
+ /* 128 */ "tagitem ::= MINUS FLOAT",
+ /* 129 */ "tagitem ::= PLUS INTEGER",
+ /* 130 */ "tagitem ::= PLUS FLOAT",
+ /* 131 */ "select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt",
+ /* 132 */ "union ::= select",
+ /* 133 */ "union ::= LP union RP",
+ /* 134 */ "union ::= union UNION ALL select",
+ /* 135 */ "union ::= union UNION ALL LP select RP",
+ /* 136 */ "cmd ::= union",
+ /* 137 */ "select ::= SELECT selcollist",
+ /* 138 */ "sclp ::= selcollist COMMA",
+ /* 139 */ "sclp ::=",
+ /* 140 */ "selcollist ::= sclp expr as",
+ /* 141 */ "selcollist ::= sclp STAR",
+ /* 142 */ "as ::= AS ids",
+ /* 143 */ "as ::= ids",
+ /* 144 */ "as ::=",
+ /* 145 */ "from ::= FROM tablelist",
+ /* 146 */ "tablelist ::= ids cpxName",
+ /* 147 */ "tablelist ::= ids cpxName ids",
+ /* 148 */ "tablelist ::= tablelist COMMA ids cpxName",
+ /* 149 */ "tablelist ::= tablelist COMMA ids cpxName ids",
+ /* 150 */ "tmvar ::= VARIABLE",
+ /* 151 */ "interval_opt ::= INTERVAL LP tmvar RP",
+ /* 152 */ "interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP",
+ /* 153 */ "interval_opt ::=",
+ /* 154 */ "fill_opt ::=",
+ /* 155 */ "fill_opt ::= FILL LP ID COMMA tagitemlist RP",
+ /* 156 */ "fill_opt ::= FILL LP ID RP",
+ /* 157 */ "sliding_opt ::= SLIDING LP tmvar RP",
+ /* 158 */ "sliding_opt ::=",
+ /* 159 */ "orderby_opt ::=",
+ /* 160 */ "orderby_opt ::= ORDER BY sortlist",
+ /* 161 */ "sortlist ::= sortlist COMMA item sortorder",
+ /* 162 */ "sortlist ::= item sortorder",
+ /* 163 */ "item ::= ids cpxName",
+ /* 164 */ "sortorder ::= ASC",
+ /* 165 */ "sortorder ::= DESC",
+ /* 166 */ "sortorder ::=",
+ /* 167 */ "groupby_opt ::=",
+ /* 168 */ "groupby_opt ::= GROUP BY grouplist",
+ /* 169 */ "grouplist ::= grouplist COMMA item",
+ /* 170 */ "grouplist ::= item",
+ /* 171 */ "having_opt ::=",
+ /* 172 */ "having_opt ::= HAVING expr",
+ /* 173 */ "limit_opt ::=",
+ /* 174 */ "limit_opt ::= LIMIT signed",
+ /* 175 */ "limit_opt ::= LIMIT signed OFFSET signed",
+ /* 176 */ "limit_opt ::= LIMIT signed COMMA signed",
+ /* 177 */ "slimit_opt ::=",
+ /* 178 */ "slimit_opt ::= SLIMIT signed",
+ /* 179 */ "slimit_opt ::= SLIMIT signed SOFFSET signed",
+ /* 180 */ "slimit_opt ::= SLIMIT signed COMMA signed",
+ /* 181 */ "where_opt ::=",
+ /* 182 */ "where_opt ::= WHERE expr",
+ /* 183 */ "expr ::= LP expr RP",
+ /* 184 */ "expr ::= ID",
+ /* 185 */ "expr ::= ID DOT ID",
+ /* 186 */ "expr ::= ID DOT STAR",
+ /* 187 */ "expr ::= INTEGER",
+ /* 188 */ "expr ::= MINUS INTEGER",
+ /* 189 */ "expr ::= PLUS INTEGER",
+ /* 190 */ "expr ::= FLOAT",
+ /* 191 */ "expr ::= MINUS FLOAT",
+ /* 192 */ "expr ::= PLUS FLOAT",
+ /* 193 */ "expr ::= STRING",
+ /* 194 */ "expr ::= NOW",
+ /* 195 */ "expr ::= VARIABLE",
+ /* 196 */ "expr ::= BOOL",
+ /* 197 */ "expr ::= ID LP exprlist RP",
+ /* 198 */ "expr ::= ID LP STAR RP",
+ /* 199 */ "expr ::= expr IS NULL",
+ /* 200 */ "expr ::= expr IS NOT NULL",
+ /* 201 */ "expr ::= expr LT expr",
+ /* 202 */ "expr ::= expr GT expr",
+ /* 203 */ "expr ::= expr LE expr",
+ /* 204 */ "expr ::= expr GE expr",
+ /* 205 */ "expr ::= expr NE expr",
+ /* 206 */ "expr ::= expr EQ expr",
+ /* 207 */ "expr ::= expr AND expr",
+ /* 208 */ "expr ::= expr OR expr",
+ /* 209 */ "expr ::= expr PLUS expr",
+ /* 210 */ "expr ::= expr MINUS expr",
+ /* 211 */ "expr ::= expr STAR expr",
+ /* 212 */ "expr ::= expr SLASH expr",
+ /* 213 */ "expr ::= expr REM expr",
+ /* 214 */ "expr ::= expr LIKE expr",
+ /* 215 */ "expr ::= expr IN LP exprlist RP",
+ /* 216 */ "exprlist ::= exprlist COMMA expritem",
+ /* 217 */ "exprlist ::= expritem",
+ /* 218 */ "expritem ::= expr",
+ /* 219 */ "expritem ::=",
+ /* 220 */ "cmd ::= RESET QUERY CACHE",
+ /* 221 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist",
+ /* 222 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids",
+ /* 223 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist",
+ /* 224 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids",
+ /* 225 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids",
+ /* 226 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem",
+ /* 227 */ "cmd ::= KILL CONNECTION INTEGER",
+ /* 228 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER",
+ /* 229 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER",
};
#endif /* NDEBUG */
@@ -1703,212 +1715,217 @@ static const struct {
{ 209, -2 }, /* (16) dbPrefix ::= ids DOT */
{ 211, 0 }, /* (17) cpxName ::= */
{ 211, -2 }, /* (18) cpxName ::= DOT ids */
- { 208, -3 }, /* (19) cmd ::= SHOW dbPrefix TABLES */
- { 208, -5 }, /* (20) cmd ::= SHOW dbPrefix TABLES LIKE ids */
- { 208, -3 }, /* (21) cmd ::= SHOW dbPrefix STABLES */
- { 208, -5 }, /* (22) cmd ::= SHOW dbPrefix STABLES LIKE ids */
- { 208, -3 }, /* (23) cmd ::= SHOW dbPrefix VGROUPS */
- { 208, -4 }, /* (24) cmd ::= SHOW dbPrefix VGROUPS ids */
- { 208, -5 }, /* (25) cmd ::= DROP TABLE ifexists ids cpxName */
- { 208, -4 }, /* (26) cmd ::= DROP DATABASE ifexists ids */
- { 208, -3 }, /* (27) cmd ::= DROP DNODE ids */
- { 208, -3 }, /* (28) cmd ::= DROP USER ids */
- { 208, -3 }, /* (29) cmd ::= DROP ACCOUNT ids */
- { 208, -2 }, /* (30) cmd ::= USE ids */
- { 208, -3 }, /* (31) cmd ::= DESCRIBE ids cpxName */
- { 208, -5 }, /* (32) cmd ::= ALTER USER ids PASS ids */
- { 208, -5 }, /* (33) cmd ::= ALTER USER ids PRIVILEGE ids */
- { 208, -4 }, /* (34) cmd ::= ALTER DNODE ids ids */
- { 208, -5 }, /* (35) cmd ::= ALTER DNODE ids ids ids */
- { 208, -3 }, /* (36) cmd ::= ALTER LOCAL ids */
- { 208, -4 }, /* (37) cmd ::= ALTER LOCAL ids ids */
- { 208, -4 }, /* (38) cmd ::= ALTER DATABASE ids alter_db_optr */
- { 208, -4 }, /* (39) cmd ::= ALTER ACCOUNT ids acct_optr */
- { 208, -6 }, /* (40) cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */
- { 210, -1 }, /* (41) ids ::= ID */
- { 210, -1 }, /* (42) ids ::= STRING */
- { 212, -2 }, /* (43) ifexists ::= IF EXISTS */
- { 212, 0 }, /* (44) ifexists ::= */
- { 215, -3 }, /* (45) ifnotexists ::= IF NOT EXISTS */
- { 215, 0 }, /* (46) ifnotexists ::= */
- { 208, -3 }, /* (47) cmd ::= CREATE DNODE ids */
- { 208, -6 }, /* (48) cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */
- { 208, -5 }, /* (49) cmd ::= CREATE DATABASE ifnotexists ids db_optr */
- { 208, -5 }, /* (50) cmd ::= CREATE USER ids PASS ids */
- { 217, 0 }, /* (51) pps ::= */
- { 217, -2 }, /* (52) pps ::= PPS INTEGER */
- { 218, 0 }, /* (53) tseries ::= */
- { 218, -2 }, /* (54) tseries ::= TSERIES INTEGER */
- { 219, 0 }, /* (55) dbs ::= */
- { 219, -2 }, /* (56) dbs ::= DBS INTEGER */
- { 220, 0 }, /* (57) streams ::= */
- { 220, -2 }, /* (58) streams ::= STREAMS INTEGER */
- { 221, 0 }, /* (59) storage ::= */
- { 221, -2 }, /* (60) storage ::= STORAGE INTEGER */
- { 222, 0 }, /* (61) qtime ::= */
- { 222, -2 }, /* (62) qtime ::= QTIME INTEGER */
- { 223, 0 }, /* (63) users ::= */
- { 223, -2 }, /* (64) users ::= USERS INTEGER */
- { 224, 0 }, /* (65) conns ::= */
- { 224, -2 }, /* (66) conns ::= CONNS INTEGER */
- { 225, 0 }, /* (67) state ::= */
- { 225, -2 }, /* (68) state ::= STATE ids */
- { 214, -9 }, /* (69) acct_optr ::= pps tseries storage streams qtime dbs users conns state */
- { 226, -2 }, /* (70) keep ::= KEEP tagitemlist */
- { 228, -2 }, /* (71) cache ::= CACHE INTEGER */
- { 229, -2 }, /* (72) replica ::= REPLICA INTEGER */
- { 230, -2 }, /* (73) quorum ::= QUORUM INTEGER */
- { 231, -2 }, /* (74) days ::= DAYS INTEGER */
- { 232, -2 }, /* (75) minrows ::= MINROWS INTEGER */
- { 233, -2 }, /* (76) maxrows ::= MAXROWS INTEGER */
- { 234, -2 }, /* (77) blocks ::= BLOCKS INTEGER */
- { 235, -2 }, /* (78) ctime ::= CTIME INTEGER */
- { 236, -2 }, /* (79) wal ::= WAL INTEGER */
- { 237, -2 }, /* (80) fsync ::= FSYNC INTEGER */
- { 238, -2 }, /* (81) comp ::= COMP INTEGER */
- { 239, -2 }, /* (82) prec ::= PRECISION STRING */
- { 216, 0 }, /* (83) db_optr ::= */
- { 216, -2 }, /* (84) db_optr ::= db_optr cache */
- { 216, -2 }, /* (85) db_optr ::= db_optr replica */
- { 216, -2 }, /* (86) db_optr ::= db_optr quorum */
- { 216, -2 }, /* (87) db_optr ::= db_optr days */
- { 216, -2 }, /* (88) db_optr ::= db_optr minrows */
- { 216, -2 }, /* (89) db_optr ::= db_optr maxrows */
- { 216, -2 }, /* (90) db_optr ::= db_optr blocks */
- { 216, -2 }, /* (91) db_optr ::= db_optr ctime */
- { 216, -2 }, /* (92) db_optr ::= db_optr wal */
- { 216, -2 }, /* (93) db_optr ::= db_optr fsync */
- { 216, -2 }, /* (94) db_optr ::= db_optr comp */
- { 216, -2 }, /* (95) db_optr ::= db_optr prec */
- { 216, -2 }, /* (96) db_optr ::= db_optr keep */
- { 213, 0 }, /* (97) alter_db_optr ::= */
- { 213, -2 }, /* (98) alter_db_optr ::= alter_db_optr replica */
- { 213, -2 }, /* (99) alter_db_optr ::= alter_db_optr quorum */
- { 213, -2 }, /* (100) alter_db_optr ::= alter_db_optr keep */
- { 213, -2 }, /* (101) alter_db_optr ::= alter_db_optr blocks */
- { 213, -2 }, /* (102) alter_db_optr ::= alter_db_optr comp */
- { 213, -2 }, /* (103) alter_db_optr ::= alter_db_optr wal */
- { 213, -2 }, /* (104) alter_db_optr ::= alter_db_optr fsync */
- { 240, -1 }, /* (105) typename ::= ids */
- { 240, -4 }, /* (106) typename ::= ids LP signed RP */
- { 241, -1 }, /* (107) signed ::= INTEGER */
- { 241, -2 }, /* (108) signed ::= PLUS INTEGER */
- { 241, -2 }, /* (109) signed ::= MINUS INTEGER */
- { 208, -6 }, /* (110) cmd ::= CREATE TABLE ifnotexists ids cpxName create_table_args */
- { 242, -3 }, /* (111) create_table_args ::= LP columnlist RP */
- { 242, -7 }, /* (112) create_table_args ::= LP columnlist RP TAGS LP columnlist RP */
- { 242, -7 }, /* (113) create_table_args ::= USING ids cpxName TAGS LP tagitemlist RP */
- { 242, -2 }, /* (114) create_table_args ::= AS select */
- { 243, -3 }, /* (115) columnlist ::= columnlist COMMA column */
- { 243, -1 }, /* (116) columnlist ::= column */
- { 245, -2 }, /* (117) column ::= ids typename */
- { 227, -3 }, /* (118) tagitemlist ::= tagitemlist COMMA tagitem */
- { 227, -1 }, /* (119) tagitemlist ::= tagitem */
- { 246, -1 }, /* (120) tagitem ::= INTEGER */
- { 246, -1 }, /* (121) tagitem ::= FLOAT */
- { 246, -1 }, /* (122) tagitem ::= STRING */
- { 246, -1 }, /* (123) tagitem ::= BOOL */
- { 246, -1 }, /* (124) tagitem ::= NULL */
- { 246, -2 }, /* (125) tagitem ::= MINUS INTEGER */
- { 246, -2 }, /* (126) tagitem ::= MINUS FLOAT */
- { 246, -2 }, /* (127) tagitem ::= PLUS INTEGER */
- { 246, -2 }, /* (128) tagitem ::= PLUS FLOAT */
- { 244, -12 }, /* (129) select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */
- { 258, -1 }, /* (130) union ::= select */
- { 258, -3 }, /* (131) union ::= LP union RP */
- { 258, -4 }, /* (132) union ::= union UNION ALL select */
- { 258, -6 }, /* (133) union ::= union UNION ALL LP select RP */
- { 208, -1 }, /* (134) cmd ::= union */
- { 244, -2 }, /* (135) select ::= SELECT selcollist */
- { 259, -2 }, /* (136) sclp ::= selcollist COMMA */
- { 259, 0 }, /* (137) sclp ::= */
- { 247, -3 }, /* (138) selcollist ::= sclp expr as */
- { 247, -2 }, /* (139) selcollist ::= sclp STAR */
- { 261, -2 }, /* (140) as ::= AS ids */
- { 261, -1 }, /* (141) as ::= ids */
- { 261, 0 }, /* (142) as ::= */
- { 248, -2 }, /* (143) from ::= FROM tablelist */
- { 262, -2 }, /* (144) tablelist ::= ids cpxName */
- { 262, -3 }, /* (145) tablelist ::= ids cpxName ids */
- { 262, -4 }, /* (146) tablelist ::= tablelist COMMA ids cpxName */
- { 262, -5 }, /* (147) tablelist ::= tablelist COMMA ids cpxName ids */
- { 263, -1 }, /* (148) tmvar ::= VARIABLE */
- { 250, -4 }, /* (149) interval_opt ::= INTERVAL LP tmvar RP */
- { 250, 0 }, /* (150) interval_opt ::= */
- { 251, 0 }, /* (151) fill_opt ::= */
- { 251, -6 }, /* (152) fill_opt ::= FILL LP ID COMMA tagitemlist RP */
- { 251, -4 }, /* (153) fill_opt ::= FILL LP ID RP */
- { 252, -4 }, /* (154) sliding_opt ::= SLIDING LP tmvar RP */
- { 252, 0 }, /* (155) sliding_opt ::= */
- { 254, 0 }, /* (156) orderby_opt ::= */
- { 254, -3 }, /* (157) orderby_opt ::= ORDER BY sortlist */
- { 264, -4 }, /* (158) sortlist ::= sortlist COMMA item sortorder */
- { 264, -2 }, /* (159) sortlist ::= item sortorder */
- { 266, -2 }, /* (160) item ::= ids cpxName */
- { 267, -1 }, /* (161) sortorder ::= ASC */
- { 267, -1 }, /* (162) sortorder ::= DESC */
- { 267, 0 }, /* (163) sortorder ::= */
- { 253, 0 }, /* (164) groupby_opt ::= */
- { 253, -3 }, /* (165) groupby_opt ::= GROUP BY grouplist */
- { 268, -3 }, /* (166) grouplist ::= grouplist COMMA item */
- { 268, -1 }, /* (167) grouplist ::= item */
- { 255, 0 }, /* (168) having_opt ::= */
- { 255, -2 }, /* (169) having_opt ::= HAVING expr */
- { 257, 0 }, /* (170) limit_opt ::= */
- { 257, -2 }, /* (171) limit_opt ::= LIMIT signed */
- { 257, -4 }, /* (172) limit_opt ::= LIMIT signed OFFSET signed */
- { 257, -4 }, /* (173) limit_opt ::= LIMIT signed COMMA signed */
- { 256, 0 }, /* (174) slimit_opt ::= */
- { 256, -2 }, /* (175) slimit_opt ::= SLIMIT signed */
- { 256, -4 }, /* (176) slimit_opt ::= SLIMIT signed SOFFSET signed */
- { 256, -4 }, /* (177) slimit_opt ::= SLIMIT signed COMMA signed */
- { 249, 0 }, /* (178) where_opt ::= */
- { 249, -2 }, /* (179) where_opt ::= WHERE expr */
- { 260, -3 }, /* (180) expr ::= LP expr RP */
- { 260, -1 }, /* (181) expr ::= ID */
- { 260, -3 }, /* (182) expr ::= ID DOT ID */
- { 260, -3 }, /* (183) expr ::= ID DOT STAR */
- { 260, -1 }, /* (184) expr ::= INTEGER */
- { 260, -2 }, /* (185) expr ::= MINUS INTEGER */
- { 260, -2 }, /* (186) expr ::= PLUS INTEGER */
- { 260, -1 }, /* (187) expr ::= FLOAT */
- { 260, -2 }, /* (188) expr ::= MINUS FLOAT */
- { 260, -2 }, /* (189) expr ::= PLUS FLOAT */
- { 260, -1 }, /* (190) expr ::= STRING */
- { 260, -1 }, /* (191) expr ::= NOW */
- { 260, -1 }, /* (192) expr ::= VARIABLE */
- { 260, -1 }, /* (193) expr ::= BOOL */
- { 260, -4 }, /* (194) expr ::= ID LP exprlist RP */
- { 260, -4 }, /* (195) expr ::= ID LP STAR RP */
- { 260, -3 }, /* (196) expr ::= expr AND expr */
- { 260, -3 }, /* (197) expr ::= expr OR expr */
- { 260, -3 }, /* (198) expr ::= expr LT expr */
- { 260, -3 }, /* (199) expr ::= expr GT expr */
- { 260, -3 }, /* (200) expr ::= expr LE expr */
- { 260, -3 }, /* (201) expr ::= expr GE expr */
- { 260, -3 }, /* (202) expr ::= expr NE expr */
- { 260, -3 }, /* (203) expr ::= expr EQ expr */
- { 260, -3 }, /* (204) expr ::= expr PLUS expr */
- { 260, -3 }, /* (205) expr ::= expr MINUS expr */
- { 260, -3 }, /* (206) expr ::= expr STAR expr */
- { 260, -3 }, /* (207) expr ::= expr SLASH expr */
- { 260, -3 }, /* (208) expr ::= expr REM expr */
- { 260, -3 }, /* (209) expr ::= expr LIKE expr */
- { 260, -5 }, /* (210) expr ::= expr IN LP exprlist RP */
- { 269, -3 }, /* (211) exprlist ::= exprlist COMMA expritem */
- { 269, -1 }, /* (212) exprlist ::= expritem */
- { 270, -1 }, /* (213) expritem ::= expr */
- { 270, 0 }, /* (214) expritem ::= */
- { 208, -3 }, /* (215) cmd ::= RESET QUERY CACHE */
- { 208, -7 }, /* (216) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */
- { 208, -7 }, /* (217) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */
- { 208, -7 }, /* (218) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */
- { 208, -7 }, /* (219) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */
- { 208, -8 }, /* (220) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */
- { 208, -9 }, /* (221) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */
- { 208, -3 }, /* (222) cmd ::= KILL CONNECTION INTEGER */
- { 208, -5 }, /* (223) cmd ::= KILL STREAM INTEGER COLON INTEGER */
- { 208, -5 }, /* (224) cmd ::= KILL QUERY INTEGER COLON INTEGER */
+ { 208, -5 }, /* (19) cmd ::= SHOW CREATE TABLE ids cpxName */
+ { 208, -4 }, /* (20) cmd ::= SHOW CREATE DATABASE ids */
+ { 208, -3 }, /* (21) cmd ::= SHOW dbPrefix TABLES */
+ { 208, -5 }, /* (22) cmd ::= SHOW dbPrefix TABLES LIKE ids */
+ { 208, -3 }, /* (23) cmd ::= SHOW dbPrefix STABLES */
+ { 208, -5 }, /* (24) cmd ::= SHOW dbPrefix STABLES LIKE ids */
+ { 208, -3 }, /* (25) cmd ::= SHOW dbPrefix VGROUPS */
+ { 208, -4 }, /* (26) cmd ::= SHOW dbPrefix VGROUPS ids */
+ { 208, -5 }, /* (27) cmd ::= DROP TABLE ifexists ids cpxName */
+ { 208, -4 }, /* (28) cmd ::= DROP DATABASE ifexists ids */
+ { 208, -3 }, /* (29) cmd ::= DROP DNODE ids */
+ { 208, -3 }, /* (30) cmd ::= DROP USER ids */
+ { 208, -3 }, /* (31) cmd ::= DROP ACCOUNT ids */
+ { 208, -2 }, /* (32) cmd ::= USE ids */
+ { 208, -3 }, /* (33) cmd ::= DESCRIBE ids cpxName */
+ { 208, -5 }, /* (34) cmd ::= ALTER USER ids PASS ids */
+ { 208, -5 }, /* (35) cmd ::= ALTER USER ids PRIVILEGE ids */
+ { 208, -4 }, /* (36) cmd ::= ALTER DNODE ids ids */
+ { 208, -5 }, /* (37) cmd ::= ALTER DNODE ids ids ids */
+ { 208, -3 }, /* (38) cmd ::= ALTER LOCAL ids */
+ { 208, -4 }, /* (39) cmd ::= ALTER LOCAL ids ids */
+ { 208, -4 }, /* (40) cmd ::= ALTER DATABASE ids alter_db_optr */
+ { 208, -4 }, /* (41) cmd ::= ALTER ACCOUNT ids acct_optr */
+ { 208, -6 }, /* (42) cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */
+ { 210, -1 }, /* (43) ids ::= ID */
+ { 210, -1 }, /* (44) ids ::= STRING */
+ { 212, -2 }, /* (45) ifexists ::= IF EXISTS */
+ { 212, 0 }, /* (46) ifexists ::= */
+ { 215, -3 }, /* (47) ifnotexists ::= IF NOT EXISTS */
+ { 215, 0 }, /* (48) ifnotexists ::= */
+ { 208, -3 }, /* (49) cmd ::= CREATE DNODE ids */
+ { 208, -6 }, /* (50) cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */
+ { 208, -5 }, /* (51) cmd ::= CREATE DATABASE ifnotexists ids db_optr */
+ { 208, -5 }, /* (52) cmd ::= CREATE USER ids PASS ids */
+ { 217, 0 }, /* (53) pps ::= */
+ { 217, -2 }, /* (54) pps ::= PPS INTEGER */
+ { 218, 0 }, /* (55) tseries ::= */
+ { 218, -2 }, /* (56) tseries ::= TSERIES INTEGER */
+ { 219, 0 }, /* (57) dbs ::= */
+ { 219, -2 }, /* (58) dbs ::= DBS INTEGER */
+ { 220, 0 }, /* (59) streams ::= */
+ { 220, -2 }, /* (60) streams ::= STREAMS INTEGER */
+ { 221, 0 }, /* (61) storage ::= */
+ { 221, -2 }, /* (62) storage ::= STORAGE INTEGER */
+ { 222, 0 }, /* (63) qtime ::= */
+ { 222, -2 }, /* (64) qtime ::= QTIME INTEGER */
+ { 223, 0 }, /* (65) users ::= */
+ { 223, -2 }, /* (66) users ::= USERS INTEGER */
+ { 224, 0 }, /* (67) conns ::= */
+ { 224, -2 }, /* (68) conns ::= CONNS INTEGER */
+ { 225, 0 }, /* (69) state ::= */
+ { 225, -2 }, /* (70) state ::= STATE ids */
+ { 214, -9 }, /* (71) acct_optr ::= pps tseries storage streams qtime dbs users conns state */
+ { 226, -2 }, /* (72) keep ::= KEEP tagitemlist */
+ { 228, -2 }, /* (73) cache ::= CACHE INTEGER */
+ { 229, -2 }, /* (74) replica ::= REPLICA INTEGER */
+ { 230, -2 }, /* (75) quorum ::= QUORUM INTEGER */
+ { 231, -2 }, /* (76) days ::= DAYS INTEGER */
+ { 232, -2 }, /* (77) minrows ::= MINROWS INTEGER */
+ { 233, -2 }, /* (78) maxrows ::= MAXROWS INTEGER */
+ { 234, -2 }, /* (79) blocks ::= BLOCKS INTEGER */
+ { 235, -2 }, /* (80) ctime ::= CTIME INTEGER */
+ { 236, -2 }, /* (81) wal ::= WAL INTEGER */
+ { 237, -2 }, /* (82) fsync ::= FSYNC INTEGER */
+ { 238, -2 }, /* (83) comp ::= COMP INTEGER */
+ { 239, -2 }, /* (84) prec ::= PRECISION STRING */
+ { 216, 0 }, /* (85) db_optr ::= */
+ { 216, -2 }, /* (86) db_optr ::= db_optr cache */
+ { 216, -2 }, /* (87) db_optr ::= db_optr replica */
+ { 216, -2 }, /* (88) db_optr ::= db_optr quorum */
+ { 216, -2 }, /* (89) db_optr ::= db_optr days */
+ { 216, -2 }, /* (90) db_optr ::= db_optr minrows */
+ { 216, -2 }, /* (91) db_optr ::= db_optr maxrows */
+ { 216, -2 }, /* (92) db_optr ::= db_optr blocks */
+ { 216, -2 }, /* (93) db_optr ::= db_optr ctime */
+ { 216, -2 }, /* (94) db_optr ::= db_optr wal */
+ { 216, -2 }, /* (95) db_optr ::= db_optr fsync */
+ { 216, -2 }, /* (96) db_optr ::= db_optr comp */
+ { 216, -2 }, /* (97) db_optr ::= db_optr prec */
+ { 216, -2 }, /* (98) db_optr ::= db_optr keep */
+ { 213, 0 }, /* (99) alter_db_optr ::= */
+ { 213, -2 }, /* (100) alter_db_optr ::= alter_db_optr replica */
+ { 213, -2 }, /* (101) alter_db_optr ::= alter_db_optr quorum */
+ { 213, -2 }, /* (102) alter_db_optr ::= alter_db_optr keep */
+ { 213, -2 }, /* (103) alter_db_optr ::= alter_db_optr blocks */
+ { 213, -2 }, /* (104) alter_db_optr ::= alter_db_optr comp */
+ { 213, -2 }, /* (105) alter_db_optr ::= alter_db_optr wal */
+ { 213, -2 }, /* (106) alter_db_optr ::= alter_db_optr fsync */
+ { 240, -1 }, /* (107) typename ::= ids */
+ { 240, -4 }, /* (108) typename ::= ids LP signed RP */
+ { 241, -1 }, /* (109) signed ::= INTEGER */
+ { 241, -2 }, /* (110) signed ::= PLUS INTEGER */
+ { 241, -2 }, /* (111) signed ::= MINUS INTEGER */
+ { 208, -6 }, /* (112) cmd ::= CREATE TABLE ifnotexists ids cpxName create_table_args */
+ { 242, -3 }, /* (113) create_table_args ::= LP columnlist RP */
+ { 242, -7 }, /* (114) create_table_args ::= LP columnlist RP TAGS LP columnlist RP */
+ { 242, -7 }, /* (115) create_table_args ::= USING ids cpxName TAGS LP tagitemlist RP */
+ { 242, -2 }, /* (116) create_table_args ::= AS select */
+ { 243, -3 }, /* (117) columnlist ::= columnlist COMMA column */
+ { 243, -1 }, /* (118) columnlist ::= column */
+ { 245, -2 }, /* (119) column ::= ids typename */
+ { 227, -3 }, /* (120) tagitemlist ::= tagitemlist COMMA tagitem */
+ { 227, -1 }, /* (121) tagitemlist ::= tagitem */
+ { 246, -1 }, /* (122) tagitem ::= INTEGER */
+ { 246, -1 }, /* (123) tagitem ::= FLOAT */
+ { 246, -1 }, /* (124) tagitem ::= STRING */
+ { 246, -1 }, /* (125) tagitem ::= BOOL */
+ { 246, -1 }, /* (126) tagitem ::= NULL */
+ { 246, -2 }, /* (127) tagitem ::= MINUS INTEGER */
+ { 246, -2 }, /* (128) tagitem ::= MINUS FLOAT */
+ { 246, -2 }, /* (129) tagitem ::= PLUS INTEGER */
+ { 246, -2 }, /* (130) tagitem ::= PLUS FLOAT */
+ { 244, -12 }, /* (131) select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */
+ { 258, -1 }, /* (132) union ::= select */
+ { 258, -3 }, /* (133) union ::= LP union RP */
+ { 258, -4 }, /* (134) union ::= union UNION ALL select */
+ { 258, -6 }, /* (135) union ::= union UNION ALL LP select RP */
+ { 208, -1 }, /* (136) cmd ::= union */
+ { 244, -2 }, /* (137) select ::= SELECT selcollist */
+ { 259, -2 }, /* (138) sclp ::= selcollist COMMA */
+ { 259, 0 }, /* (139) sclp ::= */
+ { 247, -3 }, /* (140) selcollist ::= sclp expr as */
+ { 247, -2 }, /* (141) selcollist ::= sclp STAR */
+ { 261, -2 }, /* (142) as ::= AS ids */
+ { 261, -1 }, /* (143) as ::= ids */
+ { 261, 0 }, /* (144) as ::= */
+ { 248, -2 }, /* (145) from ::= FROM tablelist */
+ { 262, -2 }, /* (146) tablelist ::= ids cpxName */
+ { 262, -3 }, /* (147) tablelist ::= ids cpxName ids */
+ { 262, -4 }, /* (148) tablelist ::= tablelist COMMA ids cpxName */
+ { 262, -5 }, /* (149) tablelist ::= tablelist COMMA ids cpxName ids */
+ { 263, -1 }, /* (150) tmvar ::= VARIABLE */
+ { 250, -4 }, /* (151) interval_opt ::= INTERVAL LP tmvar RP */
+ { 250, -6 }, /* (152) interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */
+ { 250, 0 }, /* (153) interval_opt ::= */
+ { 251, 0 }, /* (154) fill_opt ::= */
+ { 251, -6 }, /* (155) fill_opt ::= FILL LP ID COMMA tagitemlist RP */
+ { 251, -4 }, /* (156) fill_opt ::= FILL LP ID RP */
+ { 252, -4 }, /* (157) sliding_opt ::= SLIDING LP tmvar RP */
+ { 252, 0 }, /* (158) sliding_opt ::= */
+ { 254, 0 }, /* (159) orderby_opt ::= */
+ { 254, -3 }, /* (160) orderby_opt ::= ORDER BY sortlist */
+ { 264, -4 }, /* (161) sortlist ::= sortlist COMMA item sortorder */
+ { 264, -2 }, /* (162) sortlist ::= item sortorder */
+ { 266, -2 }, /* (163) item ::= ids cpxName */
+ { 267, -1 }, /* (164) sortorder ::= ASC */
+ { 267, -1 }, /* (165) sortorder ::= DESC */
+ { 267, 0 }, /* (166) sortorder ::= */
+ { 253, 0 }, /* (167) groupby_opt ::= */
+ { 253, -3 }, /* (168) groupby_opt ::= GROUP BY grouplist */
+ { 268, -3 }, /* (169) grouplist ::= grouplist COMMA item */
+ { 268, -1 }, /* (170) grouplist ::= item */
+ { 255, 0 }, /* (171) having_opt ::= */
+ { 255, -2 }, /* (172) having_opt ::= HAVING expr */
+ { 257, 0 }, /* (173) limit_opt ::= */
+ { 257, -2 }, /* (174) limit_opt ::= LIMIT signed */
+ { 257, -4 }, /* (175) limit_opt ::= LIMIT signed OFFSET signed */
+ { 257, -4 }, /* (176) limit_opt ::= LIMIT signed COMMA signed */
+ { 256, 0 }, /* (177) slimit_opt ::= */
+ { 256, -2 }, /* (178) slimit_opt ::= SLIMIT signed */
+ { 256, -4 }, /* (179) slimit_opt ::= SLIMIT signed SOFFSET signed */
+ { 256, -4 }, /* (180) slimit_opt ::= SLIMIT signed COMMA signed */
+ { 249, 0 }, /* (181) where_opt ::= */
+ { 249, -2 }, /* (182) where_opt ::= WHERE expr */
+ { 260, -3 }, /* (183) expr ::= LP expr RP */
+ { 260, -1 }, /* (184) expr ::= ID */
+ { 260, -3 }, /* (185) expr ::= ID DOT ID */
+ { 260, -3 }, /* (186) expr ::= ID DOT STAR */
+ { 260, -1 }, /* (187) expr ::= INTEGER */
+ { 260, -2 }, /* (188) expr ::= MINUS INTEGER */
+ { 260, -2 }, /* (189) expr ::= PLUS INTEGER */
+ { 260, -1 }, /* (190) expr ::= FLOAT */
+ { 260, -2 }, /* (191) expr ::= MINUS FLOAT */
+ { 260, -2 }, /* (192) expr ::= PLUS FLOAT */
+ { 260, -1 }, /* (193) expr ::= STRING */
+ { 260, -1 }, /* (194) expr ::= NOW */
+ { 260, -1 }, /* (195) expr ::= VARIABLE */
+ { 260, -1 }, /* (196) expr ::= BOOL */
+ { 260, -4 }, /* (197) expr ::= ID LP exprlist RP */
+ { 260, -4 }, /* (198) expr ::= ID LP STAR RP */
+ { 260, -3 }, /* (199) expr ::= expr IS NULL */
+ { 260, -4 }, /* (200) expr ::= expr IS NOT NULL */
+ { 260, -3 }, /* (201) expr ::= expr LT expr */
+ { 260, -3 }, /* (202) expr ::= expr GT expr */
+ { 260, -3 }, /* (203) expr ::= expr LE expr */
+ { 260, -3 }, /* (204) expr ::= expr GE expr */
+ { 260, -3 }, /* (205) expr ::= expr NE expr */
+ { 260, -3 }, /* (206) expr ::= expr EQ expr */
+ { 260, -3 }, /* (207) expr ::= expr AND expr */
+ { 260, -3 }, /* (208) expr ::= expr OR expr */
+ { 260, -3 }, /* (209) expr ::= expr PLUS expr */
+ { 260, -3 }, /* (210) expr ::= expr MINUS expr */
+ { 260, -3 }, /* (211) expr ::= expr STAR expr */
+ { 260, -3 }, /* (212) expr ::= expr SLASH expr */
+ { 260, -3 }, /* (213) expr ::= expr REM expr */
+ { 260, -3 }, /* (214) expr ::= expr LIKE expr */
+ { 260, -5 }, /* (215) expr ::= expr IN LP exprlist RP */
+ { 269, -3 }, /* (216) exprlist ::= exprlist COMMA expritem */
+ { 269, -1 }, /* (217) exprlist ::= expritem */
+ { 270, -1 }, /* (218) expritem ::= expr */
+ { 270, 0 }, /* (219) expritem ::= */
+ { 208, -3 }, /* (220) cmd ::= RESET QUERY CACHE */
+ { 208, -7 }, /* (221) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */
+ { 208, -7 }, /* (222) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */
+ { 208, -7 }, /* (223) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */
+ { 208, -7 }, /* (224) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */
+ { 208, -8 }, /* (225) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */
+ { 208, -9 }, /* (226) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */
+ { 208, -3 }, /* (227) cmd ::= KILL CONNECTION INTEGER */
+ { 208, -5 }, /* (228) cmd ::= KILL STREAM INTEGER COLON INTEGER */
+ { 208, -5 }, /* (229) cmd ::= KILL QUERY INTEGER COLON INTEGER */
};
static void yy_accept(yyParser*); /* Forward Declaration */
@@ -2046,146 +2063,157 @@ static void yy_reduce(
case 18: /* cpxName ::= DOT ids */
{yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; yymsp[-1].minor.yy0.n += 1; }
break;
- case 19: /* cmd ::= SHOW dbPrefix TABLES */
+ case 19: /* cmd ::= SHOW CREATE TABLE ids cpxName */
+{
+ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n;
+ setDCLSQLElems(pInfo, TSDB_SQL_SHOW_CREATE_TABLE, 1, &yymsp[-1].minor.yy0);
+}
+ break;
+ case 20: /* cmd ::= SHOW CREATE DATABASE ids */
+{
+ setDCLSQLElems(pInfo, TSDB_SQL_SHOW_CREATE_DATABASE, 1, &yymsp[0].minor.yy0);
+}
+ break;
+ case 21: /* cmd ::= SHOW dbPrefix TABLES */
{
setShowOptions(pInfo, TSDB_MGMT_TABLE_TABLE, &yymsp[-1].minor.yy0, 0);
}
break;
- case 20: /* cmd ::= SHOW dbPrefix TABLES LIKE ids */
+ case 22: /* cmd ::= SHOW dbPrefix TABLES LIKE ids */
{
setShowOptions(pInfo, TSDB_MGMT_TABLE_TABLE, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0);
}
break;
- case 21: /* cmd ::= SHOW dbPrefix STABLES */
+ case 23: /* cmd ::= SHOW dbPrefix STABLES */
{
setShowOptions(pInfo, TSDB_MGMT_TABLE_METRIC, &yymsp[-1].minor.yy0, 0);
}
break;
- case 22: /* cmd ::= SHOW dbPrefix STABLES LIKE ids */
+ case 24: /* cmd ::= SHOW dbPrefix STABLES LIKE ids */
{
SStrToken token;
setDBName(&token, &yymsp[-3].minor.yy0);
setShowOptions(pInfo, TSDB_MGMT_TABLE_METRIC, &token, &yymsp[0].minor.yy0);
}
break;
- case 23: /* cmd ::= SHOW dbPrefix VGROUPS */
+ case 25: /* cmd ::= SHOW dbPrefix VGROUPS */
{
SStrToken token;
setDBName(&token, &yymsp[-1].minor.yy0);
setShowOptions(pInfo, TSDB_MGMT_TABLE_VGROUP, &token, 0);
}
break;
- case 24: /* cmd ::= SHOW dbPrefix VGROUPS ids */
+ case 26: /* cmd ::= SHOW dbPrefix VGROUPS ids */
{
SStrToken token;
setDBName(&token, &yymsp[-2].minor.yy0);
setShowOptions(pInfo, TSDB_MGMT_TABLE_VGROUP, &token, &yymsp[0].minor.yy0);
}
break;
- case 25: /* cmd ::= DROP TABLE ifexists ids cpxName */
+ case 27: /* cmd ::= DROP TABLE ifexists ids cpxName */
{
yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n;
setDropDBTableInfo(pInfo, TSDB_SQL_DROP_TABLE, &yymsp[-1].minor.yy0, &yymsp[-2].minor.yy0);
}
break;
- case 26: /* cmd ::= DROP DATABASE ifexists ids */
+ case 28: /* cmd ::= DROP DATABASE ifexists ids */
{ setDropDBTableInfo(pInfo, TSDB_SQL_DROP_DB, &yymsp[0].minor.yy0, &yymsp[-1].minor.yy0); }
break;
- case 27: /* cmd ::= DROP DNODE ids */
+ case 29: /* cmd ::= DROP DNODE ids */
{ setDCLSQLElems(pInfo, TSDB_SQL_DROP_DNODE, 1, &yymsp[0].minor.yy0); }
break;
- case 28: /* cmd ::= DROP USER ids */
+ case 30: /* cmd ::= DROP USER ids */
{ setDCLSQLElems(pInfo, TSDB_SQL_DROP_USER, 1, &yymsp[0].minor.yy0); }
break;
- case 29: /* cmd ::= DROP ACCOUNT ids */
+ case 31: /* cmd ::= DROP ACCOUNT ids */
{ setDCLSQLElems(pInfo, TSDB_SQL_DROP_ACCT, 1, &yymsp[0].minor.yy0); }
break;
- case 30: /* cmd ::= USE ids */
+ case 32: /* cmd ::= USE ids */
{ setDCLSQLElems(pInfo, TSDB_SQL_USE_DB, 1, &yymsp[0].minor.yy0);}
break;
- case 31: /* cmd ::= DESCRIBE ids cpxName */
+ case 33: /* cmd ::= DESCRIBE ids cpxName */
{
yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n;
setDCLSQLElems(pInfo, TSDB_SQL_DESCRIBE_TABLE, 1, &yymsp[-1].minor.yy0);
}
break;
- case 32: /* cmd ::= ALTER USER ids PASS ids */
+ case 34: /* cmd ::= ALTER USER ids PASS ids */
{ setAlterUserSQL(pInfo, TSDB_ALTER_USER_PASSWD, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, NULL); }
break;
- case 33: /* cmd ::= ALTER USER ids PRIVILEGE ids */
+ case 35: /* cmd ::= ALTER USER ids PRIVILEGE ids */
{ setAlterUserSQL(pInfo, TSDB_ALTER_USER_PRIVILEGES, &yymsp[-2].minor.yy0, NULL, &yymsp[0].minor.yy0);}
break;
- case 34: /* cmd ::= ALTER DNODE ids ids */
+ case 36: /* cmd ::= ALTER DNODE ids ids */
{ setDCLSQLElems(pInfo, TSDB_SQL_CFG_DNODE, 2, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); }
break;
- case 35: /* cmd ::= ALTER DNODE ids ids ids */
+ case 37: /* cmd ::= ALTER DNODE ids ids ids */
{ setDCLSQLElems(pInfo, TSDB_SQL_CFG_DNODE, 3, &yymsp[-2].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); }
break;
- case 36: /* cmd ::= ALTER LOCAL ids */
+ case 38: /* cmd ::= ALTER LOCAL ids */
{ setDCLSQLElems(pInfo, TSDB_SQL_CFG_LOCAL, 1, &yymsp[0].minor.yy0); }
break;
- case 37: /* cmd ::= ALTER LOCAL ids ids */
+ case 39: /* cmd ::= ALTER LOCAL ids ids */
{ setDCLSQLElems(pInfo, TSDB_SQL_CFG_LOCAL, 2, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); }
break;
- case 38: /* cmd ::= ALTER DATABASE ids alter_db_optr */
+ case 40: /* cmd ::= ALTER DATABASE ids alter_db_optr */
{ SStrToken t = {0}; setCreateDBSQL(pInfo, TSDB_SQL_ALTER_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy158, &t);}
break;
- case 39: /* cmd ::= ALTER ACCOUNT ids acct_optr */
+ case 41: /* cmd ::= ALTER ACCOUNT ids acct_optr */
{ setCreateAcctSQL(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-1].minor.yy0, NULL, &yymsp[0].minor.yy73);}
break;
- case 40: /* cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */
+ case 42: /* cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */
{ setCreateAcctSQL(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy73);}
break;
- case 41: /* ids ::= ID */
- case 42: /* ids ::= STRING */ yytestcase(yyruleno==42);
+ case 43: /* ids ::= ID */
+ case 44: /* ids ::= STRING */ yytestcase(yyruleno==44);
{yylhsminor.yy0 = yymsp[0].minor.yy0; }
yymsp[0].minor.yy0 = yylhsminor.yy0;
break;
- case 43: /* ifexists ::= IF EXISTS */
+ case 45: /* ifexists ::= IF EXISTS */
{yymsp[-1].minor.yy0.n = 1;}
break;
- case 44: /* ifexists ::= */
- case 46: /* ifnotexists ::= */ yytestcase(yyruleno==46);
+ case 46: /* ifexists ::= */
+ case 48: /* ifnotexists ::= */ yytestcase(yyruleno==48);
{yymsp[1].minor.yy0.n = 0;}
break;
- case 45: /* ifnotexists ::= IF NOT EXISTS */
+ case 47: /* ifnotexists ::= IF NOT EXISTS */
{yymsp[-2].minor.yy0.n = 1;}
break;
- case 47: /* cmd ::= CREATE DNODE ids */
+ case 49: /* cmd ::= CREATE DNODE ids */
{ setDCLSQLElems(pInfo, TSDB_SQL_CREATE_DNODE, 1, &yymsp[0].minor.yy0);}
break;
- case 48: /* cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */
+ case 50: /* cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */
{ setCreateAcctSQL(pInfo, TSDB_SQL_CREATE_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy73);}
break;
- case 49: /* cmd ::= CREATE DATABASE ifnotexists ids db_optr */
+ case 51: /* cmd ::= CREATE DATABASE ifnotexists ids db_optr */
{ setCreateDBSQL(pInfo, TSDB_SQL_CREATE_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy158, &yymsp[-2].minor.yy0);}
break;
- case 50: /* cmd ::= CREATE USER ids PASS ids */
+ case 52: /* cmd ::= CREATE USER ids PASS ids */
{ setCreateUserSQL(pInfo, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);}
break;
- case 51: /* pps ::= */
- case 53: /* tseries ::= */ yytestcase(yyruleno==53);
- case 55: /* dbs ::= */ yytestcase(yyruleno==55);
- case 57: /* streams ::= */ yytestcase(yyruleno==57);
- case 59: /* storage ::= */ yytestcase(yyruleno==59);
- case 61: /* qtime ::= */ yytestcase(yyruleno==61);
- case 63: /* users ::= */ yytestcase(yyruleno==63);
- case 65: /* conns ::= */ yytestcase(yyruleno==65);
- case 67: /* state ::= */ yytestcase(yyruleno==67);
+ case 53: /* pps ::= */
+ case 55: /* tseries ::= */ yytestcase(yyruleno==55);
+ case 57: /* dbs ::= */ yytestcase(yyruleno==57);
+ case 59: /* streams ::= */ yytestcase(yyruleno==59);
+ case 61: /* storage ::= */ yytestcase(yyruleno==61);
+ case 63: /* qtime ::= */ yytestcase(yyruleno==63);
+ case 65: /* users ::= */ yytestcase(yyruleno==65);
+ case 67: /* conns ::= */ yytestcase(yyruleno==67);
+ case 69: /* state ::= */ yytestcase(yyruleno==69);
{yymsp[1].minor.yy0.n = 0; }
break;
- case 52: /* pps ::= PPS INTEGER */
- case 54: /* tseries ::= TSERIES INTEGER */ yytestcase(yyruleno==54);
- case 56: /* dbs ::= DBS INTEGER */ yytestcase(yyruleno==56);
- case 58: /* streams ::= STREAMS INTEGER */ yytestcase(yyruleno==58);
- case 60: /* storage ::= STORAGE INTEGER */ yytestcase(yyruleno==60);
- case 62: /* qtime ::= QTIME INTEGER */ yytestcase(yyruleno==62);
- case 64: /* users ::= USERS INTEGER */ yytestcase(yyruleno==64);
- case 66: /* conns ::= CONNS INTEGER */ yytestcase(yyruleno==66);
- case 68: /* state ::= STATE ids */ yytestcase(yyruleno==68);
+ case 54: /* pps ::= PPS INTEGER */
+ case 56: /* tseries ::= TSERIES INTEGER */ yytestcase(yyruleno==56);
+ case 58: /* dbs ::= DBS INTEGER */ yytestcase(yyruleno==58);
+ case 60: /* streams ::= STREAMS INTEGER */ yytestcase(yyruleno==60);
+ case 62: /* storage ::= STORAGE INTEGER */ yytestcase(yyruleno==62);
+ case 64: /* qtime ::= QTIME INTEGER */ yytestcase(yyruleno==64);
+ case 66: /* users ::= USERS INTEGER */ yytestcase(yyruleno==66);
+ case 68: /* conns ::= CONNS INTEGER */ yytestcase(yyruleno==68);
+ case 70: /* state ::= STATE ids */ yytestcase(yyruleno==70);
{yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; }
break;
- case 69: /* acct_optr ::= pps tseries storage streams qtime dbs users conns state */
+ case 71: /* acct_optr ::= pps tseries storage streams qtime dbs users conns state */
{
yylhsminor.yy73.maxUsers = (yymsp[-2].minor.yy0.n>0)?atoi(yymsp[-2].minor.yy0.z):-1;
yylhsminor.yy73.maxDbs = (yymsp[-3].minor.yy0.n>0)?atoi(yymsp[-3].minor.yy0.z):-1;
@@ -2199,96 +2227,96 @@ static void yy_reduce(
}
yymsp[-8].minor.yy73 = yylhsminor.yy73;
break;
- case 70: /* keep ::= KEEP tagitemlist */
+ case 72: /* keep ::= KEEP tagitemlist */
{ yymsp[-1].minor.yy494 = yymsp[0].minor.yy494; }
break;
- case 71: /* cache ::= CACHE INTEGER */
- case 72: /* replica ::= REPLICA INTEGER */ yytestcase(yyruleno==72);
- case 73: /* quorum ::= QUORUM INTEGER */ yytestcase(yyruleno==73);
- case 74: /* days ::= DAYS INTEGER */ yytestcase(yyruleno==74);
- case 75: /* minrows ::= MINROWS INTEGER */ yytestcase(yyruleno==75);
- case 76: /* maxrows ::= MAXROWS INTEGER */ yytestcase(yyruleno==76);
- case 77: /* blocks ::= BLOCKS INTEGER */ yytestcase(yyruleno==77);
- case 78: /* ctime ::= CTIME INTEGER */ yytestcase(yyruleno==78);
- case 79: /* wal ::= WAL INTEGER */ yytestcase(yyruleno==79);
- case 80: /* fsync ::= FSYNC INTEGER */ yytestcase(yyruleno==80);
- case 81: /* comp ::= COMP INTEGER */ yytestcase(yyruleno==81);
- case 82: /* prec ::= PRECISION STRING */ yytestcase(yyruleno==82);
+ case 73: /* cache ::= CACHE INTEGER */
+ case 74: /* replica ::= REPLICA INTEGER */ yytestcase(yyruleno==74);
+ case 75: /* quorum ::= QUORUM INTEGER */ yytestcase(yyruleno==75);
+ case 76: /* days ::= DAYS INTEGER */ yytestcase(yyruleno==76);
+ case 77: /* minrows ::= MINROWS INTEGER */ yytestcase(yyruleno==77);
+ case 78: /* maxrows ::= MAXROWS INTEGER */ yytestcase(yyruleno==78);
+ case 79: /* blocks ::= BLOCKS INTEGER */ yytestcase(yyruleno==79);
+ case 80: /* ctime ::= CTIME INTEGER */ yytestcase(yyruleno==80);
+ case 81: /* wal ::= WAL INTEGER */ yytestcase(yyruleno==81);
+ case 82: /* fsync ::= FSYNC INTEGER */ yytestcase(yyruleno==82);
+ case 83: /* comp ::= COMP INTEGER */ yytestcase(yyruleno==83);
+ case 84: /* prec ::= PRECISION STRING */ yytestcase(yyruleno==84);
{ yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; }
break;
- case 83: /* db_optr ::= */
+ case 85: /* db_optr ::= */
{setDefaultCreateDbOption(&yymsp[1].minor.yy158);}
break;
- case 84: /* db_optr ::= db_optr cache */
+ case 86: /* db_optr ::= db_optr cache */
{ yylhsminor.yy158 = yymsp[-1].minor.yy158; yylhsminor.yy158.cacheBlockSize = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
yymsp[-1].minor.yy158 = yylhsminor.yy158;
break;
- case 85: /* db_optr ::= db_optr replica */
- case 98: /* alter_db_optr ::= alter_db_optr replica */ yytestcase(yyruleno==98);
+ case 87: /* db_optr ::= db_optr replica */
+ case 100: /* alter_db_optr ::= alter_db_optr replica */ yytestcase(yyruleno==100);
{ yylhsminor.yy158 = yymsp[-1].minor.yy158; yylhsminor.yy158.replica = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
yymsp[-1].minor.yy158 = yylhsminor.yy158;
break;
- case 86: /* db_optr ::= db_optr quorum */
- case 99: /* alter_db_optr ::= alter_db_optr quorum */ yytestcase(yyruleno==99);
+ case 88: /* db_optr ::= db_optr quorum */
+ case 101: /* alter_db_optr ::= alter_db_optr quorum */ yytestcase(yyruleno==101);
{ yylhsminor.yy158 = yymsp[-1].minor.yy158; yylhsminor.yy158.quorum = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
yymsp[-1].minor.yy158 = yylhsminor.yy158;
break;
- case 87: /* db_optr ::= db_optr days */
+ case 89: /* db_optr ::= db_optr days */
{ yylhsminor.yy158 = yymsp[-1].minor.yy158; yylhsminor.yy158.daysPerFile = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
yymsp[-1].minor.yy158 = yylhsminor.yy158;
break;
- case 88: /* db_optr ::= db_optr minrows */
+ case 90: /* db_optr ::= db_optr minrows */
{ yylhsminor.yy158 = yymsp[-1].minor.yy158; yylhsminor.yy158.minRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); }
yymsp[-1].minor.yy158 = yylhsminor.yy158;
break;
- case 89: /* db_optr ::= db_optr maxrows */
+ case 91: /* db_optr ::= db_optr maxrows */
{ yylhsminor.yy158 = yymsp[-1].minor.yy158; yylhsminor.yy158.maxRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); }
yymsp[-1].minor.yy158 = yylhsminor.yy158;
break;
- case 90: /* db_optr ::= db_optr blocks */
- case 101: /* alter_db_optr ::= alter_db_optr blocks */ yytestcase(yyruleno==101);
+ case 92: /* db_optr ::= db_optr blocks */
+ case 103: /* alter_db_optr ::= alter_db_optr blocks */ yytestcase(yyruleno==103);
{ yylhsminor.yy158 = yymsp[-1].minor.yy158; yylhsminor.yy158.numOfBlocks = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
yymsp[-1].minor.yy158 = yylhsminor.yy158;
break;
- case 91: /* db_optr ::= db_optr ctime */
+ case 93: /* db_optr ::= db_optr ctime */
{ yylhsminor.yy158 = yymsp[-1].minor.yy158; yylhsminor.yy158.commitTime = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
yymsp[-1].minor.yy158 = yylhsminor.yy158;
break;
- case 92: /* db_optr ::= db_optr wal */
- case 103: /* alter_db_optr ::= alter_db_optr wal */ yytestcase(yyruleno==103);
+ case 94: /* db_optr ::= db_optr wal */
+ case 105: /* alter_db_optr ::= alter_db_optr wal */ yytestcase(yyruleno==105);
{ yylhsminor.yy158 = yymsp[-1].minor.yy158; yylhsminor.yy158.walLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
yymsp[-1].minor.yy158 = yylhsminor.yy158;
break;
- case 93: /* db_optr ::= db_optr fsync */
- case 104: /* alter_db_optr ::= alter_db_optr fsync */ yytestcase(yyruleno==104);
+ case 95: /* db_optr ::= db_optr fsync */
+ case 106: /* alter_db_optr ::= alter_db_optr fsync */ yytestcase(yyruleno==106);
{ yylhsminor.yy158 = yymsp[-1].minor.yy158; yylhsminor.yy158.fsyncPeriod = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
yymsp[-1].minor.yy158 = yylhsminor.yy158;
break;
- case 94: /* db_optr ::= db_optr comp */
- case 102: /* alter_db_optr ::= alter_db_optr comp */ yytestcase(yyruleno==102);
+ case 96: /* db_optr ::= db_optr comp */
+ case 104: /* alter_db_optr ::= alter_db_optr comp */ yytestcase(yyruleno==104);
{ yylhsminor.yy158 = yymsp[-1].minor.yy158; yylhsminor.yy158.compressionLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
yymsp[-1].minor.yy158 = yylhsminor.yy158;
break;
- case 95: /* db_optr ::= db_optr prec */
+ case 97: /* db_optr ::= db_optr prec */
{ yylhsminor.yy158 = yymsp[-1].minor.yy158; yylhsminor.yy158.precision = yymsp[0].minor.yy0; }
yymsp[-1].minor.yy158 = yylhsminor.yy158;
break;
- case 96: /* db_optr ::= db_optr keep */
- case 100: /* alter_db_optr ::= alter_db_optr keep */ yytestcase(yyruleno==100);
+ case 98: /* db_optr ::= db_optr keep */
+ case 102: /* alter_db_optr ::= alter_db_optr keep */ yytestcase(yyruleno==102);
{ yylhsminor.yy158 = yymsp[-1].minor.yy158; yylhsminor.yy158.keep = yymsp[0].minor.yy494; }
yymsp[-1].minor.yy158 = yylhsminor.yy158;
break;
- case 97: /* alter_db_optr ::= */
+ case 99: /* alter_db_optr ::= */
{ setDefaultCreateDbOption(&yymsp[1].minor.yy158);}
break;
- case 105: /* typename ::= ids */
+ case 107: /* typename ::= ids */
{
yymsp[0].minor.yy0.type = 0;
tSQLSetColumnType (&yylhsminor.yy181, &yymsp[0].minor.yy0);
}
yymsp[0].minor.yy181 = yylhsminor.yy181;
break;
- case 106: /* typename ::= ids LP signed RP */
+ case 108: /* typename ::= ids LP signed RP */
{
if (yymsp[-1].minor.yy271 <= 0) {
yymsp[-3].minor.yy0.type = 0;
@@ -2300,84 +2328,84 @@ static void yy_reduce(
}
yymsp[-3].minor.yy181 = yylhsminor.yy181;
break;
- case 107: /* signed ::= INTEGER */
+ case 109: /* signed ::= INTEGER */
{ yylhsminor.yy271 = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
yymsp[0].minor.yy271 = yylhsminor.yy271;
break;
- case 108: /* signed ::= PLUS INTEGER */
+ case 110: /* signed ::= PLUS INTEGER */
{ yymsp[-1].minor.yy271 = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
break;
- case 109: /* signed ::= MINUS INTEGER */
+ case 111: /* signed ::= MINUS INTEGER */
{ yymsp[-1].minor.yy271 = -strtol(yymsp[0].minor.yy0.z, NULL, 10);}
break;
- case 110: /* cmd ::= CREATE TABLE ifnotexists ids cpxName create_table_args */
+ case 112: /* cmd ::= CREATE TABLE ifnotexists ids cpxName create_table_args */
{
yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n;
setCreatedTableName(pInfo, &yymsp[-2].minor.yy0, &yymsp[-3].minor.yy0);
}
break;
- case 111: /* create_table_args ::= LP columnlist RP */
+ case 113: /* create_table_args ::= LP columnlist RP */
{
yymsp[-2].minor.yy374 = tSetCreateSQLElems(yymsp[-1].minor.yy449, NULL, NULL, NULL, NULL, TSQL_CREATE_TABLE);
setSQLInfo(pInfo, yymsp[-2].minor.yy374, NULL, TSDB_SQL_CREATE_TABLE);
}
break;
- case 112: /* create_table_args ::= LP columnlist RP TAGS LP columnlist RP */
+ case 114: /* create_table_args ::= LP columnlist RP TAGS LP columnlist RP */
{
yymsp[-6].minor.yy374 = tSetCreateSQLElems(yymsp[-5].minor.yy449, yymsp[-1].minor.yy449, NULL, NULL, NULL, TSQL_CREATE_STABLE);
setSQLInfo(pInfo, yymsp[-6].minor.yy374, NULL, TSDB_SQL_CREATE_TABLE);
}
break;
- case 113: /* create_table_args ::= USING ids cpxName TAGS LP tagitemlist RP */
+ case 115: /* create_table_args ::= USING ids cpxName TAGS LP tagitemlist RP */
{
yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n;
yymsp[-6].minor.yy374 = tSetCreateSQLElems(NULL, NULL, &yymsp[-5].minor.yy0, yymsp[-1].minor.yy494, NULL, TSQL_CREATE_TABLE_FROM_STABLE);
setSQLInfo(pInfo, yymsp[-6].minor.yy374, NULL, TSDB_SQL_CREATE_TABLE);
}
break;
- case 114: /* create_table_args ::= AS select */
+ case 116: /* create_table_args ::= AS select */
{
yymsp[-1].minor.yy374 = tSetCreateSQLElems(NULL, NULL, NULL, NULL, yymsp[0].minor.yy150, TSQL_CREATE_STREAM);
setSQLInfo(pInfo, yymsp[-1].minor.yy374, NULL, TSDB_SQL_CREATE_TABLE);
}
break;
- case 115: /* columnlist ::= columnlist COMMA column */
+ case 117: /* columnlist ::= columnlist COMMA column */
{yylhsminor.yy449 = tFieldListAppend(yymsp[-2].minor.yy449, &yymsp[0].minor.yy181); }
yymsp[-2].minor.yy449 = yylhsminor.yy449;
break;
- case 116: /* columnlist ::= column */
+ case 118: /* columnlist ::= column */
{yylhsminor.yy449 = tFieldListAppend(NULL, &yymsp[0].minor.yy181);}
yymsp[0].minor.yy449 = yylhsminor.yy449;
break;
- case 117: /* column ::= ids typename */
+ case 119: /* column ::= ids typename */
{
tSQLSetColumnInfo(&yylhsminor.yy181, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy181);
}
yymsp[-1].minor.yy181 = yylhsminor.yy181;
break;
- case 118: /* tagitemlist ::= tagitemlist COMMA tagitem */
+ case 120: /* tagitemlist ::= tagitemlist COMMA tagitem */
{ yylhsminor.yy494 = tVariantListAppend(yymsp[-2].minor.yy494, &yymsp[0].minor.yy312, -1); }
yymsp[-2].minor.yy494 = yylhsminor.yy494;
break;
- case 119: /* tagitemlist ::= tagitem */
+ case 121: /* tagitemlist ::= tagitem */
{ yylhsminor.yy494 = tVariantListAppend(NULL, &yymsp[0].minor.yy312, -1); }
yymsp[0].minor.yy494 = yylhsminor.yy494;
break;
- case 120: /* tagitem ::= INTEGER */
- case 121: /* tagitem ::= FLOAT */ yytestcase(yyruleno==121);
- case 122: /* tagitem ::= STRING */ yytestcase(yyruleno==122);
- case 123: /* tagitem ::= BOOL */ yytestcase(yyruleno==123);
+ case 122: /* tagitem ::= INTEGER */
+ case 123: /* tagitem ::= FLOAT */ yytestcase(yyruleno==123);
+ case 124: /* tagitem ::= STRING */ yytestcase(yyruleno==124);
+ case 125: /* tagitem ::= BOOL */ yytestcase(yyruleno==125);
{toTSDBType(yymsp[0].minor.yy0.type); tVariantCreate(&yylhsminor.yy312, &yymsp[0].minor.yy0); }
yymsp[0].minor.yy312 = yylhsminor.yy312;
break;
- case 124: /* tagitem ::= NULL */
+ case 126: /* tagitem ::= NULL */
{ yymsp[0].minor.yy0.type = 0; tVariantCreate(&yylhsminor.yy312, &yymsp[0].minor.yy0); }
yymsp[0].minor.yy312 = yylhsminor.yy312;
break;
- case 125: /* tagitem ::= MINUS INTEGER */
- case 126: /* tagitem ::= MINUS FLOAT */ yytestcase(yyruleno==126);
- case 127: /* tagitem ::= PLUS INTEGER */ yytestcase(yyruleno==127);
- case 128: /* tagitem ::= PLUS FLOAT */ yytestcase(yyruleno==128);
+ case 127: /* tagitem ::= MINUS INTEGER */
+ case 128: /* tagitem ::= MINUS FLOAT */ yytestcase(yyruleno==128);
+ case 129: /* tagitem ::= PLUS INTEGER */ yytestcase(yyruleno==129);
+ case 130: /* tagitem ::= PLUS FLOAT */ yytestcase(yyruleno==130);
{
yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n;
yymsp[-1].minor.yy0.type = yymsp[0].minor.yy0.type;
@@ -2386,70 +2414,70 @@ static void yy_reduce(
}
yymsp[-1].minor.yy312 = yylhsminor.yy312;
break;
- case 129: /* select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */
+ case 131: /* select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */
{
- yylhsminor.yy150 = tSetQuerySQLElems(&yymsp[-11].minor.yy0, yymsp[-10].minor.yy224, yymsp[-9].minor.yy494, yymsp[-8].minor.yy66, yymsp[-4].minor.yy494, yymsp[-3].minor.yy494, &yymsp[-7].minor.yy0, &yymsp[-5].minor.yy0, yymsp[-6].minor.yy494, &yymsp[0].minor.yy188, &yymsp[-1].minor.yy188);
+ yylhsminor.yy150 = tSetQuerySQLElems(&yymsp[-11].minor.yy0, yymsp[-10].minor.yy224, yymsp[-9].minor.yy494, yymsp[-8].minor.yy66, yymsp[-4].minor.yy494, yymsp[-3].minor.yy494, &yymsp[-7].minor.yy314, &yymsp[-5].minor.yy0, yymsp[-6].minor.yy494, &yymsp[0].minor.yy188, &yymsp[-1].minor.yy188);
}
yymsp[-11].minor.yy150 = yylhsminor.yy150;
break;
- case 130: /* union ::= select */
+ case 132: /* union ::= select */
{ yylhsminor.yy25 = setSubclause(NULL, yymsp[0].minor.yy150); }
yymsp[0].minor.yy25 = yylhsminor.yy25;
break;
- case 131: /* union ::= LP union RP */
+ case 133: /* union ::= LP union RP */
{ yymsp[-2].minor.yy25 = yymsp[-1].minor.yy25; }
break;
- case 132: /* union ::= union UNION ALL select */
+ case 134: /* union ::= union UNION ALL select */
{ yylhsminor.yy25 = appendSelectClause(yymsp[-3].minor.yy25, yymsp[0].minor.yy150); }
yymsp[-3].minor.yy25 = yylhsminor.yy25;
break;
- case 133: /* union ::= union UNION ALL LP select RP */
+ case 135: /* union ::= union UNION ALL LP select RP */
{ yylhsminor.yy25 = appendSelectClause(yymsp[-5].minor.yy25, yymsp[-1].minor.yy150); }
yymsp[-5].minor.yy25 = yylhsminor.yy25;
break;
- case 134: /* cmd ::= union */
+ case 136: /* cmd ::= union */
{ setSQLInfo(pInfo, yymsp[0].minor.yy25, NULL, TSDB_SQL_SELECT); }
break;
- case 135: /* select ::= SELECT selcollist */
+ case 137: /* select ::= SELECT selcollist */
{
yylhsminor.yy150 = tSetQuerySQLElems(&yymsp[-1].minor.yy0, yymsp[0].minor.yy224, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
}
yymsp[-1].minor.yy150 = yylhsminor.yy150;
break;
- case 136: /* sclp ::= selcollist COMMA */
+ case 138: /* sclp ::= selcollist COMMA */
{yylhsminor.yy224 = yymsp[-1].minor.yy224;}
yymsp[-1].minor.yy224 = yylhsminor.yy224;
break;
- case 137: /* sclp ::= */
+ case 139: /* sclp ::= */
{yymsp[1].minor.yy224 = 0;}
break;
- case 138: /* selcollist ::= sclp expr as */
+ case 140: /* selcollist ::= sclp expr as */
{
yylhsminor.yy224 = tSQLExprListAppend(yymsp[-2].minor.yy224, yymsp[-1].minor.yy66, yymsp[0].minor.yy0.n?&yymsp[0].minor.yy0:0);
}
yymsp[-2].minor.yy224 = yylhsminor.yy224;
break;
- case 139: /* selcollist ::= sclp STAR */
+ case 141: /* selcollist ::= sclp STAR */
{
tSQLExpr *pNode = tSQLExprIdValueCreate(NULL, TK_ALL);
yylhsminor.yy224 = tSQLExprListAppend(yymsp[-1].minor.yy224, pNode, 0);
}
yymsp[-1].minor.yy224 = yylhsminor.yy224;
break;
- case 140: /* as ::= AS ids */
+ case 142: /* as ::= AS ids */
{ yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; }
break;
- case 141: /* as ::= ids */
+ case 143: /* as ::= ids */
{ yylhsminor.yy0 = yymsp[0].minor.yy0; }
yymsp[0].minor.yy0 = yylhsminor.yy0;
break;
- case 142: /* as ::= */
+ case 144: /* as ::= */
{ yymsp[1].minor.yy0.n = 0; }
break;
- case 143: /* from ::= FROM tablelist */
+ case 145: /* from ::= FROM tablelist */
{yymsp[-1].minor.yy494 = yymsp[0].minor.yy494;}
break;
- case 144: /* tablelist ::= ids cpxName */
+ case 146: /* tablelist ::= ids cpxName */
{
toTSDBType(yymsp[-1].minor.yy0.type);
yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n;
@@ -2458,7 +2486,7 @@ static void yy_reduce(
}
yymsp[-1].minor.yy494 = yylhsminor.yy494;
break;
- case 145: /* tablelist ::= ids cpxName ids */
+ case 147: /* tablelist ::= ids cpxName ids */
{
toTSDBType(yymsp[-2].minor.yy0.type);
toTSDBType(yymsp[0].minor.yy0.type);
@@ -2468,7 +2496,7 @@ static void yy_reduce(
}
yymsp[-2].minor.yy494 = yylhsminor.yy494;
break;
- case 146: /* tablelist ::= tablelist COMMA ids cpxName */
+ case 148: /* tablelist ::= tablelist COMMA ids cpxName */
{
toTSDBType(yymsp[-1].minor.yy0.type);
yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n;
@@ -2477,7 +2505,7 @@ static void yy_reduce(
}
yymsp[-3].minor.yy494 = yylhsminor.yy494;
break;
- case 147: /* tablelist ::= tablelist COMMA ids cpxName ids */
+ case 149: /* tablelist ::= tablelist COMMA ids cpxName ids */
{
toTSDBType(yymsp[-2].minor.yy0.type);
toTSDBType(yymsp[0].minor.yy0.type);
@@ -2487,22 +2515,23 @@ static void yy_reduce(
}
yymsp[-4].minor.yy494 = yylhsminor.yy494;
break;
- case 148: /* tmvar ::= VARIABLE */
+ case 150: /* tmvar ::= VARIABLE */
{yylhsminor.yy0 = yymsp[0].minor.yy0;}
yymsp[0].minor.yy0 = yylhsminor.yy0;
break;
- case 149: /* interval_opt ::= INTERVAL LP tmvar RP */
- case 154: /* sliding_opt ::= SLIDING LP tmvar RP */ yytestcase(yyruleno==154);
-{yymsp[-3].minor.yy0 = yymsp[-1].minor.yy0; }
+ case 151: /* interval_opt ::= INTERVAL LP tmvar RP */
+{yymsp[-3].minor.yy314.interval = yymsp[-1].minor.yy0; yymsp[-3].minor.yy314.offset.n = 0; yymsp[-3].minor.yy314.offset.z = NULL; yymsp[-3].minor.yy314.offset.type = 0;}
break;
- case 150: /* interval_opt ::= */
- case 155: /* sliding_opt ::= */ yytestcase(yyruleno==155);
-{yymsp[1].minor.yy0.n = 0; yymsp[1].minor.yy0.z = NULL; yymsp[1].minor.yy0.type = 0; }
+ case 152: /* interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */
+{yymsp[-5].minor.yy314.interval = yymsp[-3].minor.yy0; yymsp[-5].minor.yy314.offset = yymsp[-1].minor.yy0;}
break;
- case 151: /* fill_opt ::= */
+ case 153: /* interval_opt ::= */
+{memset(&yymsp[1].minor.yy314, 0, sizeof(yymsp[1].minor.yy314));}
+ break;
+ case 154: /* fill_opt ::= */
{yymsp[1].minor.yy494 = 0; }
break;
- case 152: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */
+ case 155: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */
{
tVariant A = {0};
toTSDBType(yymsp[-3].minor.yy0.type);
@@ -2512,33 +2541,39 @@ static void yy_reduce(
yymsp[-5].minor.yy494 = yymsp[-1].minor.yy494;
}
break;
- case 153: /* fill_opt ::= FILL LP ID RP */
+ case 156: /* fill_opt ::= FILL LP ID RP */
{
toTSDBType(yymsp[-1].minor.yy0.type);
yymsp[-3].minor.yy494 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1);
}
break;
- case 156: /* orderby_opt ::= */
- case 164: /* groupby_opt ::= */ yytestcase(yyruleno==164);
+ case 157: /* sliding_opt ::= SLIDING LP tmvar RP */
+{yymsp[-3].minor.yy0 = yymsp[-1].minor.yy0; }
+ break;
+ case 158: /* sliding_opt ::= */
+{yymsp[1].minor.yy0.n = 0; yymsp[1].minor.yy0.z = NULL; yymsp[1].minor.yy0.type = 0; }
+ break;
+ case 159: /* orderby_opt ::= */
+ case 167: /* groupby_opt ::= */ yytestcase(yyruleno==167);
{yymsp[1].minor.yy494 = 0;}
break;
- case 157: /* orderby_opt ::= ORDER BY sortlist */
- case 165: /* groupby_opt ::= GROUP BY grouplist */ yytestcase(yyruleno==165);
+ case 160: /* orderby_opt ::= ORDER BY sortlist */
+ case 168: /* groupby_opt ::= GROUP BY grouplist */ yytestcase(yyruleno==168);
{yymsp[-2].minor.yy494 = yymsp[0].minor.yy494;}
break;
- case 158: /* sortlist ::= sortlist COMMA item sortorder */
+ case 161: /* sortlist ::= sortlist COMMA item sortorder */
{
yylhsminor.yy494 = tVariantListAppend(yymsp[-3].minor.yy494, &yymsp[-1].minor.yy312, yymsp[0].minor.yy82);
}
yymsp[-3].minor.yy494 = yylhsminor.yy494;
break;
- case 159: /* sortlist ::= item sortorder */
+ case 162: /* sortlist ::= item sortorder */
{
yylhsminor.yy494 = tVariantListAppend(NULL, &yymsp[-1].minor.yy312, yymsp[0].minor.yy82);
}
yymsp[-1].minor.yy494 = yylhsminor.yy494;
break;
- case 160: /* item ::= ids cpxName */
+ case 163: /* item ::= ids cpxName */
{
toTSDBType(yymsp[-1].minor.yy0.type);
yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n;
@@ -2547,196 +2582,200 @@ static void yy_reduce(
}
yymsp[-1].minor.yy312 = yylhsminor.yy312;
break;
- case 161: /* sortorder ::= ASC */
+ case 164: /* sortorder ::= ASC */
{yymsp[0].minor.yy82 = TSDB_ORDER_ASC; }
break;
- case 162: /* sortorder ::= DESC */
+ case 165: /* sortorder ::= DESC */
{yymsp[0].minor.yy82 = TSDB_ORDER_DESC;}
break;
- case 163: /* sortorder ::= */
+ case 166: /* sortorder ::= */
{yymsp[1].minor.yy82 = TSDB_ORDER_ASC;}
break;
- case 166: /* grouplist ::= grouplist COMMA item */
+ case 169: /* grouplist ::= grouplist COMMA item */
{
yylhsminor.yy494 = tVariantListAppend(yymsp[-2].minor.yy494, &yymsp[0].minor.yy312, -1);
}
yymsp[-2].minor.yy494 = yylhsminor.yy494;
break;
- case 167: /* grouplist ::= item */
+ case 170: /* grouplist ::= item */
{
yylhsminor.yy494 = tVariantListAppend(NULL, &yymsp[0].minor.yy312, -1);
}
yymsp[0].minor.yy494 = yylhsminor.yy494;
break;
- case 168: /* having_opt ::= */
- case 178: /* where_opt ::= */ yytestcase(yyruleno==178);
- case 214: /* expritem ::= */ yytestcase(yyruleno==214);
+ case 171: /* having_opt ::= */
+ case 181: /* where_opt ::= */ yytestcase(yyruleno==181);
+ case 219: /* expritem ::= */ yytestcase(yyruleno==219);
{yymsp[1].minor.yy66 = 0;}
break;
- case 169: /* having_opt ::= HAVING expr */
- case 179: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==179);
+ case 172: /* having_opt ::= HAVING expr */
+ case 182: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==182);
{yymsp[-1].minor.yy66 = yymsp[0].minor.yy66;}
break;
- case 170: /* limit_opt ::= */
- case 174: /* slimit_opt ::= */ yytestcase(yyruleno==174);
+ case 173: /* limit_opt ::= */
+ case 177: /* slimit_opt ::= */ yytestcase(yyruleno==177);
{yymsp[1].minor.yy188.limit = -1; yymsp[1].minor.yy188.offset = 0;}
break;
- case 171: /* limit_opt ::= LIMIT signed */
- case 175: /* slimit_opt ::= SLIMIT signed */ yytestcase(yyruleno==175);
+ case 174: /* limit_opt ::= LIMIT signed */
+ case 178: /* slimit_opt ::= SLIMIT signed */ yytestcase(yyruleno==178);
{yymsp[-1].minor.yy188.limit = yymsp[0].minor.yy271; yymsp[-1].minor.yy188.offset = 0;}
break;
- case 172: /* limit_opt ::= LIMIT signed OFFSET signed */
- case 176: /* slimit_opt ::= SLIMIT signed SOFFSET signed */ yytestcase(yyruleno==176);
+ case 175: /* limit_opt ::= LIMIT signed OFFSET signed */
+ case 179: /* slimit_opt ::= SLIMIT signed SOFFSET signed */ yytestcase(yyruleno==179);
{yymsp[-3].minor.yy188.limit = yymsp[-2].minor.yy271; yymsp[-3].minor.yy188.offset = yymsp[0].minor.yy271;}
break;
- case 173: /* limit_opt ::= LIMIT signed COMMA signed */
- case 177: /* slimit_opt ::= SLIMIT signed COMMA signed */ yytestcase(yyruleno==177);
+ case 176: /* limit_opt ::= LIMIT signed COMMA signed */
+ case 180: /* slimit_opt ::= SLIMIT signed COMMA signed */ yytestcase(yyruleno==180);
{yymsp[-3].minor.yy188.limit = yymsp[0].minor.yy271; yymsp[-3].minor.yy188.offset = yymsp[-2].minor.yy271;}
break;
- case 180: /* expr ::= LP expr RP */
+ case 183: /* expr ::= LP expr RP */
{yymsp[-2].minor.yy66 = yymsp[-1].minor.yy66; }
break;
- case 181: /* expr ::= ID */
+ case 184: /* expr ::= ID */
{yylhsminor.yy66 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_ID);}
yymsp[0].minor.yy66 = yylhsminor.yy66;
break;
- case 182: /* expr ::= ID DOT ID */
+ case 185: /* expr ::= ID DOT ID */
{yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy66 = tSQLExprIdValueCreate(&yymsp[-2].minor.yy0, TK_ID);}
yymsp[-2].minor.yy66 = yylhsminor.yy66;
break;
- case 183: /* expr ::= ID DOT STAR */
+ case 186: /* expr ::= ID DOT STAR */
{yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy66 = tSQLExprIdValueCreate(&yymsp[-2].minor.yy0, TK_ALL);}
yymsp[-2].minor.yy66 = yylhsminor.yy66;
break;
- case 184: /* expr ::= INTEGER */
+ case 187: /* expr ::= INTEGER */
{yylhsminor.yy66 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_INTEGER);}
yymsp[0].minor.yy66 = yylhsminor.yy66;
break;
- case 185: /* expr ::= MINUS INTEGER */
- case 186: /* expr ::= PLUS INTEGER */ yytestcase(yyruleno==186);
+ case 188: /* expr ::= MINUS INTEGER */
+ case 189: /* expr ::= PLUS INTEGER */ yytestcase(yyruleno==189);
{yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_INTEGER; yylhsminor.yy66 = tSQLExprIdValueCreate(&yymsp[-1].minor.yy0, TK_INTEGER);}
yymsp[-1].minor.yy66 = yylhsminor.yy66;
break;
- case 187: /* expr ::= FLOAT */
+ case 190: /* expr ::= FLOAT */
{yylhsminor.yy66 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_FLOAT);}
yymsp[0].minor.yy66 = yylhsminor.yy66;
break;
- case 188: /* expr ::= MINUS FLOAT */
- case 189: /* expr ::= PLUS FLOAT */ yytestcase(yyruleno==189);
+ case 191: /* expr ::= MINUS FLOAT */
+ case 192: /* expr ::= PLUS FLOAT */ yytestcase(yyruleno==192);
{yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_FLOAT; yylhsminor.yy66 = tSQLExprIdValueCreate(&yymsp[-1].minor.yy0, TK_FLOAT);}
yymsp[-1].minor.yy66 = yylhsminor.yy66;
break;
- case 190: /* expr ::= STRING */
+ case 193: /* expr ::= STRING */
{yylhsminor.yy66 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_STRING);}
yymsp[0].minor.yy66 = yylhsminor.yy66;
break;
- case 191: /* expr ::= NOW */
+ case 194: /* expr ::= NOW */
{yylhsminor.yy66 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_NOW); }
yymsp[0].minor.yy66 = yylhsminor.yy66;
break;
- case 192: /* expr ::= VARIABLE */
+ case 195: /* expr ::= VARIABLE */
{yylhsminor.yy66 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_VARIABLE);}
yymsp[0].minor.yy66 = yylhsminor.yy66;
break;
- case 193: /* expr ::= BOOL */
+ case 196: /* expr ::= BOOL */
{yylhsminor.yy66 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_BOOL);}
yymsp[0].minor.yy66 = yylhsminor.yy66;
break;
- case 194: /* expr ::= ID LP exprlist RP */
-{
- yylhsminor.yy66 = tSQLExprCreateFunction(yymsp[-1].minor.yy224, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type);
-}
+ case 197: /* expr ::= ID LP exprlist RP */
+{ yylhsminor.yy66 = tSQLExprCreateFunction(yymsp[-1].minor.yy224, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); }
yymsp[-3].minor.yy66 = yylhsminor.yy66;
break;
- case 195: /* expr ::= ID LP STAR RP */
-{
- yylhsminor.yy66 = tSQLExprCreateFunction(NULL, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type);
-}
+ case 198: /* expr ::= ID LP STAR RP */
+{ yylhsminor.yy66 = tSQLExprCreateFunction(NULL, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); }
yymsp[-3].minor.yy66 = yylhsminor.yy66;
break;
- case 196: /* expr ::= expr AND expr */
-{yylhsminor.yy66 = tSQLExprCreate(yymsp[-2].minor.yy66, yymsp[0].minor.yy66, TK_AND);}
+ case 199: /* expr ::= expr IS NULL */
+{yylhsminor.yy66 = tSQLExprCreate(yymsp[-2].minor.yy66, NULL, TK_ISNULL);}
yymsp[-2].minor.yy66 = yylhsminor.yy66;
break;
- case 197: /* expr ::= expr OR expr */
-{yylhsminor.yy66 = tSQLExprCreate(yymsp[-2].minor.yy66, yymsp[0].minor.yy66, TK_OR); }
- yymsp[-2].minor.yy66 = yylhsminor.yy66;
+ case 200: /* expr ::= expr IS NOT NULL */
+{yylhsminor.yy66 = tSQLExprCreate(yymsp[-3].minor.yy66, NULL, TK_NOTNULL);}
+ yymsp[-3].minor.yy66 = yylhsminor.yy66;
break;
- case 198: /* expr ::= expr LT expr */
+ case 201: /* expr ::= expr LT expr */
{yylhsminor.yy66 = tSQLExprCreate(yymsp[-2].minor.yy66, yymsp[0].minor.yy66, TK_LT);}
yymsp[-2].minor.yy66 = yylhsminor.yy66;
break;
- case 199: /* expr ::= expr GT expr */
+ case 202: /* expr ::= expr GT expr */
{yylhsminor.yy66 = tSQLExprCreate(yymsp[-2].minor.yy66, yymsp[0].minor.yy66, TK_GT);}
yymsp[-2].minor.yy66 = yylhsminor.yy66;
break;
- case 200: /* expr ::= expr LE expr */
+ case 203: /* expr ::= expr LE expr */
{yylhsminor.yy66 = tSQLExprCreate(yymsp[-2].minor.yy66, yymsp[0].minor.yy66, TK_LE);}
yymsp[-2].minor.yy66 = yylhsminor.yy66;
break;
- case 201: /* expr ::= expr GE expr */
+ case 204: /* expr ::= expr GE expr */
{yylhsminor.yy66 = tSQLExprCreate(yymsp[-2].minor.yy66, yymsp[0].minor.yy66, TK_GE);}
yymsp[-2].minor.yy66 = yylhsminor.yy66;
break;
- case 202: /* expr ::= expr NE expr */
+ case 205: /* expr ::= expr NE expr */
{yylhsminor.yy66 = tSQLExprCreate(yymsp[-2].minor.yy66, yymsp[0].minor.yy66, TK_NE);}
yymsp[-2].minor.yy66 = yylhsminor.yy66;
break;
- case 203: /* expr ::= expr EQ expr */
+ case 206: /* expr ::= expr EQ expr */
{yylhsminor.yy66 = tSQLExprCreate(yymsp[-2].minor.yy66, yymsp[0].minor.yy66, TK_EQ);}
yymsp[-2].minor.yy66 = yylhsminor.yy66;
break;
- case 204: /* expr ::= expr PLUS expr */
+ case 207: /* expr ::= expr AND expr */
+{yylhsminor.yy66 = tSQLExprCreate(yymsp[-2].minor.yy66, yymsp[0].minor.yy66, TK_AND);}
+ yymsp[-2].minor.yy66 = yylhsminor.yy66;
+ break;
+ case 208: /* expr ::= expr OR expr */
+{yylhsminor.yy66 = tSQLExprCreate(yymsp[-2].minor.yy66, yymsp[0].minor.yy66, TK_OR); }
+ yymsp[-2].minor.yy66 = yylhsminor.yy66;
+ break;
+ case 209: /* expr ::= expr PLUS expr */
{yylhsminor.yy66 = tSQLExprCreate(yymsp[-2].minor.yy66, yymsp[0].minor.yy66, TK_PLUS); }
yymsp[-2].minor.yy66 = yylhsminor.yy66;
break;
- case 205: /* expr ::= expr MINUS expr */
+ case 210: /* expr ::= expr MINUS expr */
{yylhsminor.yy66 = tSQLExprCreate(yymsp[-2].minor.yy66, yymsp[0].minor.yy66, TK_MINUS); }
yymsp[-2].minor.yy66 = yylhsminor.yy66;
break;
- case 206: /* expr ::= expr STAR expr */
+ case 211: /* expr ::= expr STAR expr */
{yylhsminor.yy66 = tSQLExprCreate(yymsp[-2].minor.yy66, yymsp[0].minor.yy66, TK_STAR); }
yymsp[-2].minor.yy66 = yylhsminor.yy66;
break;
- case 207: /* expr ::= expr SLASH expr */
+ case 212: /* expr ::= expr SLASH expr */
{yylhsminor.yy66 = tSQLExprCreate(yymsp[-2].minor.yy66, yymsp[0].minor.yy66, TK_DIVIDE);}
yymsp[-2].minor.yy66 = yylhsminor.yy66;
break;
- case 208: /* expr ::= expr REM expr */
+ case 213: /* expr ::= expr REM expr */
{yylhsminor.yy66 = tSQLExprCreate(yymsp[-2].minor.yy66, yymsp[0].minor.yy66, TK_REM); }
yymsp[-2].minor.yy66 = yylhsminor.yy66;
break;
- case 209: /* expr ::= expr LIKE expr */
+ case 214: /* expr ::= expr LIKE expr */
{yylhsminor.yy66 = tSQLExprCreate(yymsp[-2].minor.yy66, yymsp[0].minor.yy66, TK_LIKE); }
yymsp[-2].minor.yy66 = yylhsminor.yy66;
break;
- case 210: /* expr ::= expr IN LP exprlist RP */
+ case 215: /* expr ::= expr IN LP exprlist RP */
{yylhsminor.yy66 = tSQLExprCreate(yymsp[-4].minor.yy66, (tSQLExpr*)yymsp[-1].minor.yy224, TK_IN); }
yymsp[-4].minor.yy66 = yylhsminor.yy66;
break;
- case 211: /* exprlist ::= exprlist COMMA expritem */
+ case 216: /* exprlist ::= exprlist COMMA expritem */
{yylhsminor.yy224 = tSQLExprListAppend(yymsp[-2].minor.yy224,yymsp[0].minor.yy66,0);}
yymsp[-2].minor.yy224 = yylhsminor.yy224;
break;
- case 212: /* exprlist ::= expritem */
+ case 217: /* exprlist ::= expritem */
{yylhsminor.yy224 = tSQLExprListAppend(0,yymsp[0].minor.yy66,0);}
yymsp[0].minor.yy224 = yylhsminor.yy224;
break;
- case 213: /* expritem ::= expr */
+ case 218: /* expritem ::= expr */
{yylhsminor.yy66 = yymsp[0].minor.yy66;}
yymsp[0].minor.yy66 = yylhsminor.yy66;
break;
- case 215: /* cmd ::= RESET QUERY CACHE */
+ case 220: /* cmd ::= RESET QUERY CACHE */
{ setDCLSQLElems(pInfo, TSDB_SQL_RESET_CACHE, 0);}
break;
- case 216: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */
+ case 221: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy449, NULL, TSDB_ALTER_TABLE_ADD_COLUMN);
setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
- case 217: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */
+ case 222: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
@@ -2747,14 +2786,14 @@ static void yy_reduce(
setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
- case 218: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */
+ case 223: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy449, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN);
setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
- case 219: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */
+ case 224: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
@@ -2765,7 +2804,7 @@ static void yy_reduce(
setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
- case 220: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */
+ case 225: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */
{
yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n;
@@ -2779,7 +2818,7 @@ static void yy_reduce(
setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
- case 221: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */
+ case 226: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */
{
yymsp[-6].minor.yy0.n += yymsp[-5].minor.yy0.n;
@@ -2791,13 +2830,13 @@ static void yy_reduce(
setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
- case 222: /* cmd ::= KILL CONNECTION INTEGER */
+ case 227: /* cmd ::= KILL CONNECTION INTEGER */
{setKillSQL(pInfo, TSDB_SQL_KILL_CONNECTION, &yymsp[0].minor.yy0);}
break;
- case 223: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */
+ case 228: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */
{yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSQL(pInfo, TSDB_SQL_KILL_STREAM, &yymsp[-2].minor.yy0);}
break;
- case 224: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */
+ case 229: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */
{yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSQL(pInfo, TSDB_SQL_KILL_QUERY, &yymsp[-2].minor.yy0);}
break;
default:
diff --git a/src/rpc/src/rpcCache.c b/src/rpc/src/rpcCache.c
index 751f03e52a..46b0d4e3bb 100644
--- a/src/rpc/src/rpcCache.c
+++ b/src/rpc/src/rpcCache.c
@@ -216,6 +216,7 @@ static void rpcCleanConnCache(void *handle, void *tmrId) {
if (pCache == NULL || pCache->maxSessions == 0) return;
if (pCache->pTimer != tmrId) return;
+ pthread_mutex_lock(&pCache->mutex);
uint64_t time = taosGetTimestampMs();
for (hash = 0; hash < pCache->maxSessions; ++hash) {
@@ -227,6 +228,7 @@ static void rpcCleanConnCache(void *handle, void *tmrId) {
// tTrace("timer, total connections in cache:%d", pCache->total);
taosTmrReset(rpcCleanConnCache, (int32_t)(pCache->keepTimer * 2), pCache, pCache->tmrCtrl, &pCache->pTimer);
+ pthread_mutex_unlock(&pCache->mutex);
}
static void rpcRemoveExpiredNodes(SConnCache *pCache, SConnHash *pNode, int hash, uint64_t time) {
diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c
index f59bf62ec5..414d37d8b8 100644
--- a/src/rpc/src/rpcMain.c
+++ b/src/rpc/src/rpcMain.c
@@ -195,7 +195,7 @@ static void rpcSendMsgToPeer(SRpcConn *pConn, void *data, int dataLen);
static void rpcSendReqHead(SRpcConn *pConn);
static void *rpcProcessMsgFromPeer(SRecvInfo *pRecv);
-static void rpcProcessIncomingMsg(SRpcConn *pConn, SRpcHead *pHead);
+static void rpcProcessIncomingMsg(SRpcConn *pConn, SRpcHead *pHead, SRpcReqContext *pContext);
static void rpcProcessConnError(void *param, void *id);
static void rpcProcessRetryTimer(void *, void *);
static void rpcProcessIdleTimer(void *param, void *tmrId);
@@ -240,7 +240,7 @@ void *rpcOpen(const SRpcInit *pInit) {
size_t size = sizeof(SRpcConn) * pRpc->sessions;
pRpc->connList = (SRpcConn *)calloc(1, size);
if (pRpc->connList == NULL) {
- tError("%s failed to allocate memory for taos connections, size:%ld", pRpc->label, size);
+ tError("%s failed to allocate memory for taos connections, size:%" PRId64, pRpc->label, (int64_t)size);
rpcClose(pRpc);
return NULL;
}
@@ -323,17 +323,17 @@ void *rpcMallocCont(int contLen) {
tError("failed to malloc msg, size:%d", size);
return NULL;
} else {
- tDebug("malloc mem: %p", start);
+ tTrace("malloc mem: %p", start);
}
return start + sizeof(SRpcReqContext) + sizeof(SRpcHead);
}
void rpcFreeCont(void *cont) {
- if ( cont ) {
+ if (cont) {
char *temp = ((char *)cont) - sizeof(SRpcHead) - sizeof(SRpcReqContext);
free(temp);
- tDebug("free mem: %p", temp);
+ tTrace("free mem: %p", temp);
}
}
@@ -553,7 +553,7 @@ static void rpcFreeMsg(void *msg) {
if ( msg ) {
char *temp = (char *)msg - sizeof(SRpcReqContext);
free(temp);
- tDebug("free mem: %p", temp);
+ tTrace("free mem: %p", temp);
}
}
@@ -580,6 +580,8 @@ static SRpcConn *rpcOpenConn(SRpcInfo *pRpc, char *peerFqdn, uint16_t peerPort,
void *shandle = (connType & RPC_CONN_TCP)? pRpc->tcphandle:pRpc->udphandle;
pConn->chandle = (*taosOpenConn[connType])(shandle, pConn, pConn->peerIp, pConn->peerPort);
if (pConn->chandle == NULL) {
+ tError("failed to connect to:0x%x:%d", pConn->peerIp, pConn->peerPort);
+
terrno = TSDB_CODE_RPC_NETWORK_UNAVAIL;
rpcCloseConn(pConn);
pConn = NULL;
@@ -817,9 +819,18 @@ static int rpcProcessReqHead(SRpcConn *pConn, SRpcHead *pHead) {
return TSDB_CODE_RPC_LAST_SESSION_NOT_FINISHED;
}
+ if (rpcContLenFromMsg(pHead->msgLen) <= 0) {
+ tDebug("%s, message body is empty, ignore", pConn->info);
+ return TSDB_CODE_RPC_APP_ERROR;
+ }
+
pConn->inTranId = pHead->tranId;
pConn->inType = pHead->msgType;
+ // start the progress timer to monitor the response from server app
+ if (pConn->connType != RPC_CONN_TCPS)
+ pConn->pTimer = taosTmrStart(rpcProcessProgressTimer, tsProgressTimer, pConn, pConn->pRpc->tmrCtrl);
+
return 0;
}
@@ -879,17 +890,32 @@ static int rpcProcessRspHead(SRpcConn *pConn, SRpcHead *pHead) {
pConn->outType = 0;
pConn->pReqMsg = NULL;
pConn->reqMsgLen = 0;
+ SRpcReqContext *pContext = pConn->pContext;
+
+ if (pHead->code == TSDB_CODE_RPC_REDIRECT) {
+ if (rpcContLenFromMsg(pHead->msgLen) < sizeof(SRpcEpSet)) {
+ // if EpSet is not included in the msg, treat it as NOT_READY
+ pHead->code = TSDB_CODE_RPC_NOT_READY;
+ } else {
+ pContext->redirect++;
+ if (pContext->redirect > TSDB_MAX_REPLICA) {
+ pHead->code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
+ tWarn("%s, too many redirects, quit", pConn->info);
+ }
+ }
+ }
return TSDB_CODE_SUCCESS;
}
-static SRpcConn *rpcProcessMsgHead(SRpcInfo *pRpc, SRecvInfo *pRecv) {
+static SRpcConn *rpcProcessMsgHead(SRpcInfo *pRpc, SRecvInfo *pRecv, SRpcReqContext **ppContext) {
int32_t sid;
SRpcConn *pConn = NULL;
SRpcHead *pHead = (SRpcHead *)pRecv->msg;
sid = htonl(pHead->destId);
+ *ppContext = NULL;
if (pHead->msgType >= TSDB_MSG_TYPE_MAX || pHead->msgType <= 0) {
tDebug("%s sid:%d, invalid message type:%d", pRpc->label, sid, pHead->msgType);
@@ -943,6 +969,7 @@ static SRpcConn *rpcProcessMsgHead(SRpcInfo *pRpc, SRecvInfo *pRecv) {
pConn->pIdleTimer = taosTmrStart(rpcProcessIdleTimer, tsRpcTimer*2, pConn, pRpc->tmrCtrl);
} else {
terrno = rpcProcessRspHead(pConn, pHead);
+ *ppContext = pConn->pContext;
}
}
@@ -1007,7 +1034,8 @@ static void *rpcProcessMsgFromPeer(SRecvInfo *pRecv) {
}
terrno = 0;
- pConn = rpcProcessMsgHead(pRpc, pRecv);
+ SRpcReqContext *pContext;
+ pConn = rpcProcessMsgHead(pRpc, pRecv, &pContext);
if (pHead->msgType >= 1 && pHead->msgType < TSDB_MSG_TYPE_MAX) {
tDebug("%s %p %p, %s received from 0x%x:%hu, parse code:0x%x len:%d sig:0x%08x:0x%08x:%d code:0x%x", pRpc->label,
@@ -1027,7 +1055,7 @@ static void *rpcProcessMsgFromPeer(SRecvInfo *pRecv) {
tDebug("%s %p %p, %s is sent with error code:0x%x", pRpc->label, pConn, (void *)pHead->ahandle, taosMsg[pHead->msgType+1], code);
}
} else { // msg is passed to app only parsing is ok
- rpcProcessIncomingMsg(pConn, pHead);
+ rpcProcessIncomingMsg(pConn, pHead, pContext);
}
}
@@ -1058,7 +1086,7 @@ static void rpcNotifyClient(SRpcReqContext *pContext, SRpcMsg *pMsg) {
rpcFreeCont(pContext->pCont);
}
-static void rpcProcessIncomingMsg(SRpcConn *pConn, SRpcHead *pHead) {
+static void rpcProcessIncomingMsg(SRpcConn *pConn, SRpcHead *pHead, SRpcReqContext *pContext) {
SRpcInfo *pRpc = pConn->pRpc;
SRpcMsg rpcMsg;
@@ -1068,29 +1096,18 @@ static void rpcProcessIncomingMsg(SRpcConn *pConn, SRpcHead *pHead) {
rpcMsg.pCont = pHead->content;
rpcMsg.msgType = pHead->msgType;
rpcMsg.code = pHead->code;
- rpcMsg.ahandle = pConn->ahandle;
if ( rpcIsReq(pHead->msgType) ) {
- if (rpcMsg.contLen > 0) {
- rpcMsg.handle = pConn;
- rpcAddRef(pRpc); // add the refCount for requests
+ rpcMsg.ahandle = pConn->ahandle;
+ rpcMsg.handle = pConn;
+ rpcAddRef(pRpc); // add the refCount for requests
- // start the progress timer to monitor the response from server app
- if (pConn->connType != RPC_CONN_TCPS)
- pConn->pTimer = taosTmrStart(rpcProcessProgressTimer, tsProgressTimer, pConn, pRpc->tmrCtrl);
-
- // notify the server app
- (*(pRpc->cfp))(&rpcMsg, NULL);
- } else {
- tDebug("%s, message body is empty, ignore", pConn->info);
- rpcFreeCont(rpcMsg.pCont);
- }
+ // notify the server app
+ (*(pRpc->cfp))(&rpcMsg, NULL);
} else {
// it's a response
- SRpcReqContext *pContext = pConn->pContext;
rpcMsg.handle = pContext;
- pConn->pContext = NULL;
- pConn->pReqMsg = NULL;
+ rpcMsg.ahandle = pContext->ahandle;
// for UDP, port may be changed by server, the port in epSet shall be used for cache
if (pHead->code != TSDB_CODE_RPC_TOO_SLOW) {
@@ -1099,19 +1116,6 @@ static void rpcProcessIncomingMsg(SRpcConn *pConn, SRpcHead *pHead) {
rpcCloseConn(pConn);
}
- if (pHead->code == TSDB_CODE_RPC_REDIRECT) {
- if (rpcMsg.contLen < sizeof(SRpcEpSet)) {
- // if EpSet is not included in the msg, treat it as NOT_READY
- pHead->code = TSDB_CODE_RPC_NOT_READY;
- } else {
- pContext->redirect++;
- if (pContext->redirect > TSDB_MAX_REPLICA) {
- pHead->code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
- tWarn("%s, too many redirects, quit", pConn->info);
- }
- }
- }
-
if (pHead->code == TSDB_CODE_RPC_REDIRECT) {
pContext->numOfTry = 0;
SRpcEpSet *pEpSet = (SRpcEpSet*)pHead->content;
@@ -1443,7 +1447,7 @@ static SRpcHead *rpcDecompressRpcMsg(SRpcHead *pHead) {
pNewHead->msgLen = rpcMsgLenFromCont(origLen);
rpcFreeMsg(pHead); // free the compressed message buffer
pHead = pNewHead;
- //tTrace("decompress rpc msg, compLen:%d, after:%d", compLen, contLen);
+ tTrace("decomp malloc mem: %p", temp);
} else {
tError("failed to allocate memory to decompress msg, contLen:%d", contLen);
}
diff --git a/src/rpc/src/rpcTcp.c b/src/rpc/src/rpcTcp.c
index 3475e0f317..dd9e7684e0 100644
--- a/src/rpc/src/rpcTcp.c
+++ b/src/rpc/src/rpcTcp.c
@@ -62,7 +62,7 @@ typedef struct {
char label[TSDB_LABEL_LEN];
int numOfThreads;
void * shandle;
- SThreadObj *pThreadObj;
+ SThreadObj **pThreadObj;
pthread_t thread;
} SServerObj;
@@ -90,7 +90,7 @@ void *taosInitTcpServer(uint32_t ip, uint16_t port, char *label, int numOfThread
tstrncpy(pServerObj->label, label, sizeof(pServerObj->label));
pServerObj->numOfThreads = numOfThreads;
- pServerObj->pThreadObj = (SThreadObj *)calloc(sizeof(SThreadObj), numOfThreads);
+ pServerObj->pThreadObj = (SThreadObj **)calloc(sizeof(SThreadObj *), numOfThreads);
if (pServerObj->pThreadObj == NULL) {
tError("TCP:%s no enough memory", label);
terrno = TAOS_SYSTEM_ERROR(errno);
@@ -104,19 +104,28 @@ void *taosInitTcpServer(uint32_t ip, uint16_t port, char *label, int numOfThread
pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
// initialize parameters in case it may encounter error later
- pThreadObj = pServerObj->pThreadObj;
for (int i = 0; i < numOfThreads; ++i) {
+ pThreadObj = (SThreadObj *)calloc(sizeof(SThreadObj), 1);
+ if (pThreadObj == NULL) {
+ tError("TCP:%s no enough memory", label);
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ for (int j=0; jpThreadObj[j]);
+ free(pServerObj->pThreadObj);
+ free(pServerObj);
+ return NULL;
+ }
+
+ pServerObj->pThreadObj[i] = pThreadObj;
pThreadObj->pollFd = -1;
taosResetPthread(&pThreadObj->thread);
pThreadObj->processData = fp;
tstrncpy(pThreadObj->label, label, sizeof(pThreadObj->label));
pThreadObj->shandle = shandle;
- pThreadObj++;
}
// initialize mutex, thread, fd which may fail
- pThreadObj = pServerObj->pThreadObj;
for (int i = 0; i < numOfThreads; ++i) {
+ pThreadObj = pServerObj->pThreadObj[i];
code = pthread_mutex_init(&(pThreadObj->mutex), NULL);
if (code < 0) {
tError("%s failed to init TCP process data mutex(%s)", label, strerror(errno));
@@ -137,7 +146,6 @@ void *taosInitTcpServer(uint32_t ip, uint16_t port, char *label, int numOfThread
}
pThreadObj->threadId = i;
- pThreadObj++;
}
pServerObj->fd = taosOpenTcpServerSocket(pServerObj->ip, pServerObj->port);
@@ -166,6 +174,11 @@ static void taosStopTcpThread(SThreadObj* pThreadObj) {
pThreadObj->stop = true;
eventfd_t fd = -1;
+ if (taosComparePthread(pThreadObj->thread, pthread_self())) {
+ pthread_detach(pthread_self());
+ return;
+ }
+
if (taosCheckPthreadValid(pThreadObj->thread) && pThreadObj->pollFd >= 0) {
// signal the thread to stop, try graceful method first,
// and use pthread_cancel when failed
@@ -183,15 +196,11 @@ static void taosStopTcpThread(SThreadObj* pThreadObj) {
}
}
- if (taosCheckPthreadValid(pThreadObj->thread)) pthread_join(pThreadObj->thread, NULL);
- if (pThreadObj->pollFd >=0) taosCloseSocket(pThreadObj->pollFd);
- if (fd != -1) taosCloseSocket(fd);
-
- while (pThreadObj->pHead) {
- SFdObj *pFdObj = pThreadObj->pHead;
- pThreadObj->pHead = pFdObj->next;
- taosFreeFdObj(pFdObj);
+ if (taosCheckPthreadValid(pThreadObj->thread) && pThreadObj->pollFd >= 0) {
+ pthread_join(pThreadObj->thread, NULL);
}
+
+ if (fd != -1) taosCloseSocket(fd);
}
void taosStopTcpServer(void *handle) {
@@ -199,7 +208,14 @@ void taosStopTcpServer(void *handle) {
if (pServerObj == NULL) return;
if(pServerObj->fd >=0) shutdown(pServerObj->fd, SHUT_RD);
- if (taosCheckPthreadValid(pServerObj->thread)) pthread_join(pServerObj->thread, NULL);
+
+ if (taosCheckPthreadValid(pServerObj->thread)) {
+ if (taosComparePthread(pServerObj->thread, pthread_self())) {
+ pthread_detach(pthread_self());
+ } else {
+ pthread_join(pServerObj->thread, NULL);
+ }
+ }
tDebug("%s TCP server is stopped", pServerObj->label);
}
@@ -210,9 +226,8 @@ void taosCleanUpTcpServer(void *handle) {
if (pServerObj == NULL) return;
for (int i = 0; i < pServerObj->numOfThreads; ++i) {
- pThreadObj = pServerObj->pThreadObj + i;
+ pThreadObj = pServerObj->pThreadObj[i];
taosStopTcpThread(pThreadObj);
- pthread_mutex_destroy(&(pThreadObj->mutex));
}
tDebug("%s TCP server is cleaned up", pServerObj->label);
@@ -245,9 +260,11 @@ static void *taosAcceptTcpConnection(void *arg) {
}
taosKeepTcpAlive(connFd);
+ struct timeval to={1, 0};
+ taosSetSockOpt(connFd, SOL_SOCKET, SO_RCVTIMEO, &to, sizeof(to));
// pick up the thread to handle this connection
- pThreadObj = pServerObj->pThreadObj + threadId;
+ pThreadObj = pServerObj->pThreadObj[threadId];
SFdObj *pFdObj = taosMallocFdObj(pThreadObj, connFd);
if (pFdObj) {
@@ -325,10 +342,8 @@ void taosCleanUpTcpClient(void *chandle) {
SThreadObj *pThreadObj = chandle;
if (pThreadObj == NULL) return;
+ tDebug ("%s TCP client will be cleaned up", pThreadObj->label);
taosStopTcpThread(pThreadObj);
- tDebug ("%s TCP client is cleaned up", pThreadObj->label);
-
- taosTFree(pThreadObj);
}
void *taosOpenTcpClientConnection(void *shandle, void *thandle, uint32_t ip, uint16_t port) {
@@ -363,7 +378,7 @@ void *taosOpenTcpClientConnection(void *shandle, void *thandle, uint32_t ip, uin
void taosCloseTcpConnection(void *chandle) {
SFdObj *pFdObj = chandle;
- if (pFdObj == NULL) return;
+ if (pFdObj == NULL || pFdObj->signature != pFdObj) return;
SThreadObj *pThreadObj = pFdObj->pThreadObj;
tDebug("%s %p TCP connection will be closed, FD:%p", pThreadObj->label, pFdObj->thandle, pFdObj);
@@ -376,7 +391,7 @@ void taosCloseTcpConnection(void *chandle) {
int taosSendTcpData(uint32_t ip, uint16_t port, void *data, int len, void *chandle) {
SFdObj *pFdObj = chandle;
- if (chandle == NULL) return -1;
+ if (pFdObj == NULL || pFdObj->signature != pFdObj) return -1;
return taosWriteMsg(pFdObj->fd, data, len);
}
@@ -419,11 +434,11 @@ static int taosReadTcpData(SFdObj *pFdObj, SRecvInfo *pInfo) {
msgLen = (int32_t)htonl((uint32_t)rpcHead.msgLen);
buffer = malloc(msgLen + tsRpcOverhead);
- if ( NULL == buffer) {
+ if (NULL == buffer) {
tError("%s %p TCP malloc(size:%d) fail", pThreadObj->label, pFdObj->thandle, msgLen);
return -1;
} else {
- tDebug("TCP malloc mem: %p", buffer);
+ tTrace("TCP malloc mem: %p", buffer);
}
msg = buffer + tsRpcOverhead;
@@ -501,8 +516,22 @@ static void *taosProcessTcpData(void *param) {
pFdObj->thandle = (*(pThreadObj->processData))(&recvInfo);
if (pFdObj->thandle == NULL) taosFreeFdObj(pFdObj);
}
+
+ if (pThreadObj->stop) break;
}
+ if (pThreadObj->pollFd >=0) taosCloseSocket(pThreadObj->pollFd);
+
+ while (pThreadObj->pHead) {
+ SFdObj *pFdObj = pThreadObj->pHead;
+ pThreadObj->pHead = pFdObj->next;
+ taosFreeFdObj(pFdObj);
+ }
+
+ pthread_mutex_destroy(&(pThreadObj->mutex));
+ tDebug("%s TCP thread exits ...", pThreadObj->label);
+ taosTFree(pThreadObj);
+
return NULL;
}
diff --git a/src/rpc/src/rpcUdp.c b/src/rpc/src/rpcUdp.c
index 8e24aed8f7..6f65304661 100644
--- a/src/rpc/src/rpcUdp.c
+++ b/src/rpc/src/rpcUdp.c
@@ -211,10 +211,10 @@ static void *taosRecvUdpData(void *param) {
char *tmsg = malloc(dataLen + tsRpcOverhead);
if (NULL == tmsg) {
- tError("%s failed to allocate memory, size:%ld", pConn->label, dataLen);
+ tError("%s failed to allocate memory, size:%" PRId64, pConn->label, (int64_t)dataLen);
continue;
} else {
- tDebug("UDP malloc mem: %p", tmsg);
+ tTrace("UDP malloc mem: %p", tmsg);
}
tmsg += tsRpcOverhead; // overhead for SRpcReqContext
diff --git a/src/sync/inc/syncInt.h b/src/sync/inc/syncInt.h
index cd1252f4b4..f681810646 100644
--- a/src/sync/inc/syncInt.h
+++ b/src/sync/inc/syncInt.h
@@ -114,26 +114,26 @@ typedef struct {
} SSyncFwds;
typedef struct SsyncPeer {
- int32_t nodeId;
- uint32_t ip;
- uint16_t port;
- char fqdn[TSDB_FQDN_LEN]; // peer ip string
- char id[TSDB_EP_LEN+16]; // peer vgId + end point
- int8_t role;
- int8_t sstatus; // sync status
- uint64_t version;
- uint64_t sversion; // track the peer version in retrieve process
- int syncFd;
- int peerFd; // forward FD
- int numOfRetrieves; // number of retrieves tried
- int fileChanged; // a flag to indicate file is changed during retrieving process
- void *timer;
- void *pConn;
- int notifyFd;
- int watchNum;
- int *watchFd;
- int8_t refCount; // reference count
- struct SSyncNode *pSyncNode;
+ int32_t nodeId;
+ uint32_t ip;
+ uint16_t port;
+ char fqdn[TSDB_FQDN_LEN]; // peer ip string
+ char id[TSDB_EP_LEN + 32]; // peer vgId + end point
+ int8_t role;
+ int8_t sstatus; // sync status
+ uint64_t version;
+ uint64_t sversion; // track the peer version in retrieve process
+ int syncFd;
+ int peerFd; // forward FD
+ int numOfRetrieves; // number of retrieves tried
+ int fileChanged; // a flag to indicate file is changed during retrieving process
+ void * timer;
+ void * pConn;
+ int notifyFd;
+ int watchNum;
+ int * watchFd;
+ int8_t refCount; // reference count
+ struct SSyncNode *pSyncNode;
} SSyncPeer;
typedef struct SSyncNode {
@@ -171,7 +171,6 @@ void syncBroadcastStatus(SSyncNode *pNode);
void syncAddPeerRef(SSyncPeer *pPeer);
int syncDecPeerRef(SSyncPeer *pPeer);
-
#ifdef __cplusplus
}
#endif
diff --git a/src/sync/inc/taosTcpPool.h b/src/sync/inc/taosTcpPool.h
index 5f7ca9ede5..261d190ad3 100644
--- a/src/sync/inc/taosTcpPool.h
+++ b/src/sync/inc/taosTcpPool.h
@@ -38,7 +38,6 @@ void taosCloseTcpThreadPool(ttpool_h);
void *taosAllocateTcpConn(void *, void *ahandle, int connFd);
void taosFreeTcpConn(void *);
-
#ifdef __cplusplus
}
#endif
diff --git a/src/sync/src/syncMain.c b/src/sync/src/syncMain.c
index f96b902efd..0daf0b9620 100644
--- a/src/sync/src/syncMain.c
+++ b/src/sync/src/syncMain.c
@@ -134,7 +134,7 @@ void syncCleanUp() {
void *syncStart(const SSyncInfo *pInfo) {
const SSyncCfg *pCfg = &pInfo->syncCfg;
- SSyncNode *pNode = (SSyncNode *) calloc(sizeof(SSyncNode), 1);
+ SSyncNode *pNode = (SSyncNode *)calloc(sizeof(SSyncNode), 1);
if (pNode == NULL) {
sError("no memory to allocate syncNode");
terrno = TAOS_SYSTEM_ERROR(errno);
@@ -167,6 +167,8 @@ void *syncStart(const SSyncInfo *pInfo) {
}
}
+ syncAddNodeRef(pNode);
+
if (pNode->selfIndex < 0) {
sInfo("vgId:%d, this node is not configured", pNode->vgId);
terrno = TSDB_CODE_SYN_INVALID_CONFIG;
@@ -174,11 +176,12 @@ void *syncStart(const SSyncInfo *pInfo) {
return NULL;
}
- nodeVersion = pInfo->version; // set the initial version
+ nodeVersion = pInfo->version; // set the initial version
nodeRole = (pNode->replica > 1) ? TAOS_SYNC_ROLE_UNSYNCED : TAOS_SYNC_ROLE_MASTER;
- sInfo("vgId:%d, %d replicas are configured, quorum:%d role:%s", pNode->vgId, pNode->replica, pNode->quorum, syncRole[nodeRole]);
+ sInfo("vgId:%d, %d replicas are configured, quorum:%d role:%s", pNode->vgId, pNode->replica, pNode->quorum,
+ syncRole[nodeRole]);
- pNode->pSyncFwds = calloc(sizeof(SSyncFwds) + tsMaxFwdInfo*sizeof(SFwdInfo), 1);
+ pNode->pSyncFwds = calloc(sizeof(SSyncFwds) + tsMaxFwdInfo * sizeof(SFwdInfo), 1);
if (pNode->pSyncFwds == NULL) {
sError("vgId:%d, no memory to allocate syncFwds", pNode->vgId);
terrno = TAOS_SYSTEM_ERROR(errno);
@@ -194,7 +197,6 @@ void *syncStart(const SSyncInfo *pInfo) {
}
syncAddArbitrator(pNode);
- syncAddNodeRef(pNode);
taosHashPut(vgIdHash, (const char *)&pNode->vgId, sizeof(int32_t), (char *)(&pNode), sizeof(SSyncNode *));
if (pNode->notifyRole) {
@@ -442,9 +444,7 @@ static void syncAddArbitrator(SSyncNode *pNode) {
pNode->peerInfo[TAOS_SYNC_MAX_REPLICA] = syncAddPeer(pNode, &nodeInfo);
}
-static void syncAddNodeRef(SSyncNode *pNode) {
- atomic_add_fetch_8(&pNode->refCount, 1);
-}
+static void syncAddNodeRef(SSyncNode *pNode) { atomic_add_fetch_8(&pNode->refCount, 1); }
static void syncDecNodeRef(SSyncNode *pNode) {
if (atomic_sub_fetch_8(&pNode->refCount, 1) == 0) {
@@ -455,9 +455,7 @@ static void syncDecNodeRef(SSyncNode *pNode) {
}
}
-void syncAddPeerRef(SSyncPeer *pPeer) {
- atomic_add_fetch_8(&pPeer->refCount, 1);
-}
+void syncAddPeerRef(SSyncPeer *pPeer) { atomic_add_fetch_8(&pPeer->refCount, 1); }
int syncDecPeerRef(SSyncPeer *pPeer) {
if (atomic_sub_fetch_8(&pPeer->refCount, 1) == 0) {
@@ -493,14 +491,15 @@ static SSyncPeer *syncAddPeer(SSyncNode *pNode, const SNodeInfo *pInfo) {
uint32_t ip = taosGetIpFromFqdn(pInfo->nodeFqdn);
if (ip == -1) return NULL;
- SSyncPeer *pPeer = (SSyncPeer *)calloc(1, sizeof(SSyncPeer));
+ SSyncPeer *pPeer = calloc(1, sizeof(SSyncPeer));
if (pPeer == NULL) return NULL;
pPeer->nodeId = pInfo->nodeId;
tstrncpy(pPeer->fqdn, pInfo->nodeFqdn, sizeof(pPeer->fqdn));
pPeer->ip = ip;
pPeer->port = pInfo->nodePort;
- snprintf(pPeer->id, sizeof(pPeer->id), "vgId:%d peer:%s:%d", pNode->vgId, pPeer->fqdn, pPeer->port);
+ pPeer->fqdn[sizeof(pPeer->fqdn) - 1] = 0;
+ snprintf(pPeer->id, sizeof(pPeer->id), "vgId:%d peer:%s:%u", pNode->vgId, pPeer->fqdn, pPeer->port);
pPeer->peerFd = -1;
pPeer->syncFd = -1;
@@ -511,9 +510,9 @@ static SSyncPeer *syncAddPeer(SSyncNode *pNode, const SNodeInfo *pInfo) {
sInfo("%s, it is configured", pPeer->id);
int ret = strcmp(pPeer->fqdn, tsNodeFqdn);
if (pPeer->nodeId == 0 || (ret > 0) || (ret == 0 && pPeer->port > tsSyncPort)) {
- sDebug("%s, start to check peer connection", pPeer->id);
int32_t checkMs = 100 + (pNode->vgId * 10) % 100;
- if (pNode->vgId) checkMs = tsStatusInterval * 2000 + 100;
+ if (pNode->vgId > 1) checkMs = tsStatusInterval * 2000 + checkMs;
+ sDebug("%s, start to check peer connection after %d ms", pPeer->id, checkMs);
taosTmrReset(syncCheckPeerConnection, checkMs, pPeer, syncTmrCtrl, &pPeer->timer);
}
@@ -572,10 +571,10 @@ static void syncChooseMaster(SSyncNode *pNode) {
replica = pNode->replica + 1;
}
- if (index < 0 && onlineNum > replica/2.0) {
+ if (index < 0 && onlineNum > replica / 2.0) {
// over half of nodes are online
for (int i = 0; i < pNode->replica; ++i) {
- //slave with highest version shall be master
+ // slave with highest version shall be master
pPeer = pNode->peerInfo[i];
if (pPeer->role == TAOS_SYNC_ROLE_SLAVE || pPeer->role == TAOS_SYNC_ROLE_MASTER) {
if (index < 0 || pPeer->version > pNode->peerInfo[index]->version) {
@@ -621,7 +620,7 @@ static SSyncPeer *syncCheckMaster(SSyncNode *pNode) {
if (onlineNum <= replica * 0.5) {
if (nodeRole != TAOS_SYNC_ROLE_UNSYNCED) {
nodeRole = TAOS_SYNC_ROLE_UNSYNCED;
- pNode->peerInfo[pNode->selfIndex]->role = nodeRole;
+ // pNode->peerInfo[pNode->selfIndex]->role = nodeRole;
(*pNode->notifyRole)(pNode->ahandle, nodeRole);
sInfo("vgId:%d, change to unsynced state, online:%d replica:%d", pNode->vgId, onlineNum, replica);
}
@@ -647,7 +646,7 @@ static SSyncPeer *syncCheckMaster(SSyncNode *pNode) {
static int syncValidateMaster(SSyncPeer *pPeer) {
SSyncNode *pNode = pPeer->pSyncNode;
- int code = 0;
+ int code = 0;
if (nodeRole == TAOS_SYNC_ROLE_MASTER && nodeVersion < pPeer->version) {
sDebug("%s, slave has higher version, restart all connections!!!", pPeer->id);
@@ -670,7 +669,7 @@ static void syncCheckRole(SSyncPeer *pPeer, SPeerStatus peersStatus[], int8_t ne
int8_t selfOldRole = nodeRole;
int8_t i, syncRequired = 0;
- pNode->peerInfo[pNode->selfIndex]->version = nodeVersion;
+ // pNode->peerInfo[pNode->selfIndex]->version = nodeVersion;
pPeer->role = newRole;
sDebug("%s, own role:%s, new peer role:%s", pPeer->id, syncRole[nodeRole], syncRole[pPeer->role]);
@@ -876,8 +875,6 @@ static void syncProcessForwardFromPeer(char *cont, SSyncPeer *pPeer) {
sError("%s, forward discarded, ver:%" PRIu64, pPeer->id, pHead->version);
}
}
-
- return;
}
static void syncProcessPeersStatusMsg(char *cont, SSyncPeer *pPeer) {
@@ -922,7 +919,7 @@ static int syncReadPeerMsg(SSyncPeer *pPeer, SSyncHead *pHead, char *cont) {
static int syncProcessPeerMsg(void *param, void *buffer) {
SSyncPeer *pPeer = param;
SSyncHead head;
- char * cont = (char *)buffer;
+ char * cont = buffer;
SSyncNode *pNode = pPeer->pSyncNode;
pthread_mutex_lock(&(pNode->mutex));
@@ -1065,7 +1062,7 @@ static void syncProcessIncommingConnection(int connFd, uint32_t sourceIp) {
return;
}
- int32_t vgId = firstPkt.syncHead.vgId;
+ int32_t vgId = firstPkt.syncHead.vgId;
SSyncNode **ppNode = (SSyncNode **)taosHashGet(vgIdHash, (const char *)&vgId, sizeof(int32_t));
if (ppNode == NULL || *ppNode == NULL) {
sError("vgId:%d, vgId could not be found", vgId);
diff --git a/src/sync/src/syncRestore.c b/src/sync/src/syncRestore.c
index 2a0bee3726..ebb6c3a0a9 100644
--- a/src/sync/src/syncRestore.c
+++ b/src/sync/src/syncRestore.c
@@ -23,10 +23,10 @@
#include "tsync.h"
#include "syncInt.h"
-static void syncRemoveExtraFile(SSyncPeer *pPeer, uint32_t sindex, uint32_t eindex) {
- char name[TSDB_FILENAME_LEN*2] = {0};
- char fname[TSDB_FILENAME_LEN*3] = {0};
- uint32_t magic;
+static void syncRemoveExtraFile(SSyncPeer *pPeer, int32_t sindex, int32_t eindex) {
+ char name[TSDB_FILENAME_LEN * 2] = {0};
+ char fname[TSDB_FILENAME_LEN * 3] = {0};
+ uint32_t magic;
uint64_t fversion;
int64_t size;
uint32_t index = sindex;
@@ -40,12 +40,12 @@ static void syncRemoveExtraFile(SSyncPeer *pPeer, uint32_t sindex, uint32_t eind
if (magic == 0) break;
snprintf(fname, sizeof(fname), "%s/%s", pNode->path, name);
- remove(fname);
+ (void)remove(fname);
sDebug("%s, %s is removed", pPeer->id, fname);
index++;
if (index > eindex) break;
- }
+ }
}
static int syncRestoreFile(SSyncPeer *pPeer, uint64_t *fversion) {
@@ -62,35 +62,36 @@ static int syncRestoreFile(SSyncPeer *pPeer, uint64_t *fversion) {
while (1) {
// read file info
int ret = taosReadMsg(pPeer->syncFd, &(minfo), sizeof(minfo));
- if (ret < 0 ) break;
+ if (ret < 0) break;
// if no more file from master, break;
if (minfo.name[0] == 0 || minfo.magic == 0) {
sDebug("%s, no more files to restore", pPeer->id);
// remove extra files after the current index
- syncRemoveExtraFile(pPeer, sinfo.index+1, TAOS_SYNC_MAX_INDEX);
- code = 0;
+ syncRemoveExtraFile(pPeer, sinfo.index + 1, TAOS_SYNC_MAX_INDEX);
+ code = 0;
break;
}
-
+
// remove extra files on slave between the current and last index
- syncRemoveExtraFile(pPeer, pindex+1, minfo.index-1);
+ syncRemoveExtraFile(pPeer, pindex + 1, minfo.index - 1);
pindex = minfo.index;
// check the file info
sinfo = minfo;
sDebug("%s, get file info:%s", pPeer->id, minfo.name);
- sinfo.magic = (*pNode->getFileInfo)(pNode->ahandle, sinfo.name, &sinfo.index, TAOS_SYNC_MAX_INDEX, &sinfo.size, &sinfo.fversion);
+ sinfo.magic = (*pNode->getFileInfo)(pNode->ahandle, sinfo.name, &sinfo.index, TAOS_SYNC_MAX_INDEX, &sinfo.size,
+ &sinfo.fversion);
// if file not there or magic is not the same, file shall be synced
memset(&fileAck, 0, sizeof(fileAck));
- fileAck.sync = (sinfo.magic != minfo.magic || sinfo.name[0] == 0) ? 1:0;
+ fileAck.sync = (sinfo.magic != minfo.magic || sinfo.name[0] == 0) ? 1 : 0;
// send file ack
ret = taosWriteMsg(pPeer->syncFd, &(fileAck), sizeof(fileAck));
- if (ret <0) break;
-
+ if (ret < 0) break;
+
// if sync is not required, continue
if (fileAck.sync == 0) {
sDebug("%s, %s is the same", pPeer->id, minfo.name);
@@ -99,10 +100,11 @@ static int syncRestoreFile(SSyncPeer *pPeer, uint64_t *fversion) {
// if sync is required, open file, receive from master, and write to file
// get the full path to file
+ minfo.name[sizeof(minfo.name) - 1] = 0;
snprintf(name, sizeof(name), "%s/%s", pNode->path, minfo.name);
int dfd = open(name, O_WRONLY | O_CREAT | O_TRUNC, S_IRWXU | S_IRWXG | S_IRWXO);
- if ( dfd < 0 ) {
+ if (dfd < 0) {
sError("%s, failed to open file:%s", pPeer->id, name);
break;
}
@@ -110,16 +112,15 @@ static int syncRestoreFile(SSyncPeer *pPeer, uint64_t *fversion) {
ret = taosCopyFds(pPeer->syncFd, dfd, minfo.size);
fsync(dfd);
close(dfd);
- if (ret<0) break;
+ if (ret < 0) break;
sDebug("%s, %s is received, size:%" PRId64, pPeer->id, minfo.name, minfo.size);
-
}
if (code == 0 && (minfo.fversion != sinfo.fversion)) {
- // data file is changed, code shall be set to 1
+ // data file is changed, code shall be set to 1
*fversion = minfo.fversion;
- code = 1;
+ code = 1;
}
if (code < 0) {
@@ -130,8 +131,8 @@ static int syncRestoreFile(SSyncPeer *pPeer, uint64_t *fversion) {
}
static int syncRestoreWal(SSyncPeer *pPeer) {
- SSyncNode *pNode = pPeer->pSyncNode;
- int ret, code = -1;
+ SSyncNode *pNode = pPeer->pSyncNode;
+ int ret, code = -1;
void *buffer = calloc(1024000, 1); // size for one record
if (buffer == NULL) return -1;
@@ -140,18 +141,21 @@ static int syncRestoreWal(SSyncPeer *pPeer) {
while (1) {
ret = taosReadMsg(pPeer->syncFd, pHead, sizeof(SWalHead));
- if (ret <0) break;
+ if (ret < 0) break;
+
+ if (pHead->len == 0) {
+ code = 0;
+ break;
+ } // wal sync over
- if (pHead->len == 0) {code = 0; break;} // wal sync over
-
ret = taosReadMsg(pPeer->syncFd, pHead->cont, pHead->len);
- if (ret <0) break;
+ if (ret < 0) break;
sDebug("%s, restore a record, ver:%" PRIu64, pPeer->id, pHead->version);
(*pNode->writeToCache)(pNode->ahandle, pHead, TAOS_QTYPE_WAL);
}
- if (code<0) {
+ if (code < 0) {
sError("%s, failed to restore wal(%s)", pPeer->id, strerror(errno));
}
@@ -159,10 +163,9 @@ static int syncRestoreWal(SSyncPeer *pPeer) {
return code;
}
-static char *syncProcessOneBufferedFwd(SSyncPeer *pPeer, char *offset)
-{
+static char *syncProcessOneBufferedFwd(SSyncPeer *pPeer, char *offset) {
SSyncNode *pNode = pPeer->pSyncNode;
- SWalHead *pHead = (SWalHead *) offset;
+ SWalHead * pHead = (SWalHead *)offset;
(*pNode->writeToCache)(pNode->ahandle, pHead, TAOS_QTYPE_FWD);
offset += pHead->len + sizeof(SWalHead);
@@ -171,7 +174,7 @@ static char *syncProcessOneBufferedFwd(SSyncPeer *pPeer, char *offset)
}
static int syncProcessBufferedFwd(SSyncPeer *pPeer) {
- SSyncNode *pNode = pPeer->pSyncNode;
+ SSyncNode * pNode = pPeer->pSyncNode;
SRecvBuffer *pRecv = pNode->pRecv;
int forwards = 0;
@@ -182,7 +185,7 @@ static int syncProcessBufferedFwd(SSyncPeer *pPeer) {
offset = syncProcessOneBufferedFwd(pPeer, offset);
forwards++;
}
-
+
pthread_mutex_lock(&pNode->mutex);
while (forwards < pRecv->forwards && pRecv->code == 0) {
@@ -199,7 +202,7 @@ static int syncProcessBufferedFwd(SSyncPeer *pPeer) {
}
int syncSaveIntoBuffer(SSyncPeer *pPeer, SWalHead *pHead) {
- SSyncNode *pNode = pPeer->pSyncNode;
+ SSyncNode * pNode = pPeer->pSyncNode;
SRecvBuffer *pRecv = pNode->pRecv;
if (pRecv == NULL) return -1;
@@ -259,9 +262,9 @@ static int syncRestoreDataStepByStep(SSyncPeer *pPeer) {
return -1;
}
- // if code > 0, data file is changed, notify app, and pass the version
+ // if code > 0, data file is changed, notify app, and pass the version
if (code > 0 && pNode->notifyFileSynced) {
- if ( (*pNode->notifyFileSynced)(pNode->ahandle, fversion) < 0 ) {
+ if ((*pNode->notifyFileSynced)(pNode->ahandle, fversion) < 0) {
sError("%s, app not in ready state", pPeer->id);
return -1;
}
@@ -296,8 +299,8 @@ void *syncRestoreData(void *param) {
if (syncOpenRecvBuffer(pNode) < 0) {
sError("%s, failed to allocate recv buffer", pPeer->id);
- } else {
- if ( syncRestoreDataStepByStep(pPeer) == 0) {
+ } else {
+ if (syncRestoreDataStepByStep(pPeer) == 0) {
sInfo("%s, it is synced successfully", pPeer->id);
nodeRole = TAOS_SYNC_ROLE_SLAVE;
syncBroadcastStatus(pNode);
@@ -311,7 +314,7 @@ void *syncRestoreData(void *param) {
(*pNode->notifyRole)(pNode->ahandle, nodeRole);
nodeSStatus = TAOS_SYNC_STATUS_INIT;
- taosClose(pPeer->syncFd)
+ taosClose(pPeer->syncFd);
syncCloseRecvBuffer(pNode);
__sync_fetch_and_sub(&tsSyncNum, 1);
syncDecPeerRef(pPeer);
diff --git a/src/sync/src/syncRetrieve.c b/src/sync/src/syncRetrieve.c
index 8aa317b1ac..60625d75ec 100644
--- a/src/sync/src/syncRetrieve.c
+++ b/src/sync/src/syncRetrieve.c
@@ -38,13 +38,13 @@ static int syncAddIntoWatchList(SSyncPeer *pPeer, char *name) {
return -1;
}
- if (pPeer->watchFd == NULL) pPeer->watchFd = malloc(sizeof(int)*tsMaxWatchFiles);
+ if (pPeer->watchFd == NULL) pPeer->watchFd = malloc(sizeof(int) * tsMaxWatchFiles);
if (pPeer->watchFd == NULL) {
sError("%s, failed to allocate watchFd", pPeer->id);
return -1;
}
- memset(pPeer->watchFd, -1, sizeof(int)*tsMaxWatchFiles);
+ memset(pPeer->watchFd, -1, sizeof(int) * tsMaxWatchFiles);
}
int *wd = pPeer->watchFd + pPeer->watchNum;
@@ -64,7 +64,7 @@ static int syncAddIntoWatchList(SSyncPeer *pPeer, char *name) {
sDebug("%s, monitor %s, wd:%d watchNum:%d", pPeer->id, name, *wd, pPeer->watchNum);
}
- pPeer->watchNum = (pPeer->watchNum +1) % tsMaxWatchFiles;
+ pPeer->watchNum = (pPeer->watchNum + 1) % tsMaxWatchFiles;
return 0;
}
@@ -72,20 +72,20 @@ static int syncAddIntoWatchList(SSyncPeer *pPeer, char *name) {
static int syncAreFilesModified(SSyncPeer *pPeer) {
if (pPeer->notifyFd <= 0) return 0;
- char buf[2048];
- int len = read(pPeer->notifyFd, buf, sizeof(buf));
+ char buf[2048];
+ int len = read(pPeer->notifyFd, buf, sizeof(buf));
if (len < 0 && errno != EAGAIN) {
- sError("%s, failed to read notify FD(%s)", pPeer->id, strerror(errno));
+ sError("%s, failed to read notify FD(%s)", pPeer->id, strerror(errno));
return -1;
}
-
- int code = 0;
- if (len > 0) {
+
+ int code = 0;
+ if (len > 0) {
const struct inotify_event *event;
char *ptr;
for (ptr = buf; ptr < buf + len; ptr += sizeof(struct inotify_event) + event->len) {
- event = (const struct inotify_event *) ptr;
- if ((event->mask & IN_MODIFY) || (event->mask & IN_DELETE)) {
+ event = (const struct inotify_event *)ptr;
+ if ((event->mask & IN_MODIFY) || (event->mask & IN_DELETE)) {
sDebug("%s, processed file is changed", pPeer->id);
pPeer->fileChanged = 1;
code = 1;
@@ -98,11 +98,11 @@ static int syncAreFilesModified(SSyncPeer *pPeer) {
}
static int syncRetrieveFile(SSyncPeer *pPeer) {
- SSyncNode * pNode = pPeer->pSyncNode;
- SFileInfo fileInfo;
- SFileAck fileAck;
- int code = -1;
- char name[TSDB_FILENAME_LEN * 2] = {0};
+ SSyncNode *pNode = pPeer->pSyncNode;
+ SFileInfo fileInfo;
+ SFileAck fileAck;
+ int code = -1;
+ char name[TSDB_FILENAME_LEN * 2] = {0};
memset(&fileInfo, 0, sizeof(fileInfo));
memset(&fileAck, 0, sizeof(fileAck));
@@ -110,17 +110,19 @@ static int syncRetrieveFile(SSyncPeer *pPeer) {
while (1) {
// retrieve file info
fileInfo.name[0] = 0;
- fileInfo.magic = (*pNode->getFileInfo)(pNode->ahandle, fileInfo.name, &fileInfo.index, TAOS_SYNC_MAX_INDEX, &fileInfo.size, &fileInfo.fversion);
- //fileInfo.size = htonl(size);
+ fileInfo.magic = (*pNode->getFileInfo)(pNode->ahandle, fileInfo.name, &fileInfo.index, TAOS_SYNC_MAX_INDEX,
+ &fileInfo.size, &fileInfo.fversion);
+ // fileInfo.size = htonl(size);
// send the file info
int32_t ret = taosWriteMsg(pPeer->syncFd, &(fileInfo), sizeof(fileInfo));
- if (ret < 0 ) break;
+ if (ret < 0) break;
// if no file anymore, break
- if (fileInfo.magic == 0 || fileInfo.name[0] == 0) {
- sDebug("%s, no more files to sync", pPeer->id);
- code = 0; break;
+ if (fileInfo.magic == 0 || fileInfo.name[0] == 0) {
+ sDebug("%s, no more files to sync", pPeer->id);
+ code = 0;
+ break;
}
// wait for the ack from peer
@@ -132,29 +134,29 @@ static int syncRetrieveFile(SSyncPeer *pPeer) {
// get the full path to file
snprintf(name, sizeof(name), "%s/%s", pNode->path, fileInfo.name);
-
+
// add the file into watch list
- if ( syncAddIntoWatchList(pPeer, name) <0) break;
+ if (syncAddIntoWatchList(pPeer, name) < 0) break;
// if sync is not required, continue
if (fileAck.sync == 0) {
- fileInfo.index++;
- sDebug("%s, %s is the same", pPeer->id, fileInfo.name);
- continue;
+ fileInfo.index++;
+ sDebug("%s, %s is the same", pPeer->id, fileInfo.name);
+ continue;
}
// send the file to peer
int sfd = open(name, O_RDONLY);
if (sfd < 0) break;
- ret = taosTSendFile(pPeer->syncFd, sfd, NULL, fileInfo.size);
+ ret = taosTSendFile(pPeer->syncFd, sfd, NULL, fileInfo.size);
close(sfd);
if (ret < 0) break;
- sDebug("%s, %s is sent, size:%" PRId64, pPeer->id, name, fileInfo.size);
- fileInfo.index++;
+ sDebug("%s, %s is sent, size:%" PRId64, pPeer->id, name, fileInfo.size);
+ fileInfo.index++;
- // check if processed files are modified
+ // check if processed files are modified
if (syncAreFilesModified(pPeer) != 0) break;
}
@@ -201,15 +203,15 @@ static int syncMonitorLastWal(SSyncPeer *pPeer, char *name) {
return -1;
}
- if (pPeer->watchFd == NULL) pPeer->watchFd = malloc(sizeof(int)*tsMaxWatchFiles);
+ if (pPeer->watchFd == NULL) pPeer->watchFd = malloc(sizeof(int) * tsMaxWatchFiles);
if (pPeer->watchFd == NULL) {
sError("%s, failed to allocate watchFd", pPeer->id);
return -1;
}
- memset(pPeer->watchFd, -1, sizeof(int)*tsMaxWatchFiles);
+ memset(pPeer->watchFd, -1, sizeof(int) * tsMaxWatchFiles);
int *wd = pPeer->watchFd;
-
+
*wd = inotify_add_watch(pPeer->notifyFd, name, IN_MODIFY | IN_CLOSE_WRITE);
if (*wd == -1) {
sError("%s, failed to watch last wal(%s)", pPeer->id, strerror(errno));
@@ -219,8 +221,8 @@ static int syncMonitorLastWal(SSyncPeer *pPeer, char *name) {
return 0;
}
-static uint32_t syncCheckLastWalChanges(SSyncPeer *pPeer, uint32_t *pEvent) {
- char buf[2048];
+static int32_t syncCheckLastWalChanges(SSyncPeer *pPeer, uint32_t *pEvent) {
+ char buf[2048];
int len = read(pPeer->notifyFd, buf, sizeof(buf));
if (len < 0 && errno != EAGAIN) {
sError("%s, failed to read notify FD(%s)", pPeer->id, strerror(errno));
@@ -231,26 +233,29 @@ static uint32_t syncCheckLastWalChanges(SSyncPeer *pPeer, uint32_t *pEvent) {
struct inotify_event *event;
for (char *ptr = buf; ptr < buf + len; ptr += sizeof(struct inotify_event) + event->len) {
- event = (struct inotify_event *) ptr;
+ event = (struct inotify_event *)ptr;
if (event->mask & IN_MODIFY) *pEvent = *pEvent | IN_MODIFY;
if (event->mask & IN_CLOSE_WRITE) *pEvent = *pEvent | IN_CLOSE_WRITE;
}
- if (pEvent != 0)
- sDebug("%s, last wal event:0x%x", pPeer->id, *pEvent);
+ if (pEvent != 0) sDebug("%s, last wal event:0x%x", pPeer->id, *pEvent);
return 0;
}
static int syncRetrieveLastWal(SSyncPeer *pPeer, char *name, uint64_t fversion, int64_t offset, uint32_t *pEvent) {
- SWalHead *pHead = (SWalHead *) malloc(640000);
- int code = -1;
- int32_t bytes = 0;
- int sfd;
+ SWalHead *pHead = malloc(640000);
+ int code = -1;
+ int32_t bytes = 0;
+ int sfd;
sfd = open(name, O_RDONLY);
- if (sfd < 0) return -1;
- lseek(sfd, offset, SEEK_SET);
+ if (sfd < 0) {
+ free(pHead);
+ return -1;
+ }
+
+ (void)lseek(sfd, offset, SEEK_SET);
sDebug("%s, retrieve last wal, offset:%" PRId64 " fversion:%" PRIu64, pPeer->id, offset, fversion);
while (1) {
@@ -263,34 +268,34 @@ static int syncRetrieveLastWal(SSyncPeer *pPeer, char *name, uint64_t fversion,
sDebug("%s, last wal is forwarded, ver:%" PRIu64, pPeer->id, pHead->version);
int ret = taosWriteMsg(pPeer->syncFd, pHead, wsize);
- if ( ret != wsize ) break;
+ if (ret != wsize) break;
pPeer->sversion = pHead->version;
bytes += wsize;
-
+
if (pHead->version >= fversion && fversion > 0) {
- code = 0;
- bytes = 0;
+ code = 0;
+ bytes = 0;
break;
}
}
free(pHead);
- taosClose(sfd);
+ close(sfd);
if (code == 0) return bytes;
return -1;
}
static int syncProcessLastWal(SSyncPeer *pPeer, char *wname, uint32_t index) {
- SSyncNode *pNode = pPeer->pSyncNode;
- int code = -1;
- char fname[TSDB_FILENAME_LEN * 2]; // full path to wal file
+ SSyncNode *pNode = pPeer->pSyncNode;
+ int code = -1;
+ char fname[TSDB_FILENAME_LEN * 2]; // full path to wal file
if (syncAreFilesModified(pPeer) != 0) return -1;
while (1) {
- int32_t once = 0; // last WAL has once ever been processed
+ int32_t once = 0; // last WAL has once ever been processed
int64_t offset = 0;
uint64_t fversion = 0;
uint32_t event = 0;
@@ -300,48 +305,48 @@ static int syncProcessLastWal(SSyncPeer *pPeer, char *wname, uint32_t index) {
sDebug("%s, start to retrieve last wal:%s", pPeer->id, fname);
// monitor last wal
- if (syncMonitorLastWal(pPeer, fname) <0) break;
+ if (syncMonitorLastWal(pPeer, fname) < 0) break;
while (1) {
int32_t bytes = syncRetrieveLastWal(pPeer, fname, fversion, offset, &event);
if (bytes < 0) break;
// check file changes
- if (syncCheckLastWalChanges(pPeer, &event) <0) break;
+ if (syncCheckLastWalChanges(pPeer, &event) < 0) break;
// if file is not updated or updated once, set the fversion and sstatus
if (((event & IN_MODIFY) == 0) || once) {
if (fversion == 0) {
pPeer->sstatus = TAOS_SYNC_STATUS_CACHE; // start to forward pkt
- fversion = nodeVersion; // must read data to fversion
+ fversion = nodeVersion; // must read data to fversion
}
}
// if all data up to fversion is read out, it is over
if (pPeer->sversion >= fversion && fversion > 0) {
- code = 0;
- sDebug("%s, data up to fversion:%ld has been read out, bytes:%d", pPeer->id, fversion, bytes);
+ code = 0;
+ sDebug("%s, data up to fversion:%" PRId64 " has been read out, bytes:%d", pPeer->id, fversion, bytes);
break;
- }
+ }
// if all data are read out, and no update
if ((bytes == 0) && ((event & IN_MODIFY) == 0)) {
// wal file is closed, break
- if (event & IN_CLOSE_WRITE) {
- code = 0;
+ if (event & IN_CLOSE_WRITE) {
+ code = 0;
sDebug("%s, current wal is closed", pPeer->id);
break;
}
-
+
// wal not closed, it means some data not flushed to disk, wait for a while
usleep(10000);
}
- // if bytes>0, file is updated, or fversion is not reached but file still open, read again
+ // if bytes>0, file is updated, or fversion is not reached but file still open, read again
once = 1;
- offset += bytes;
+ offset += bytes;
sDebug("%s, retrieve last wal, bytes:%d", pPeer->id, bytes);
- event = event & (~IN_MODIFY); // clear IN_MODIFY flag
+ event = event & (~IN_MODIFY); // clear IN_MODIFY flag
}
if (code < 0) break;
@@ -356,7 +361,7 @@ static int syncProcessLastWal(SSyncPeer *pPeer, char *wname, uint32_t index) {
break;
}
- // current last wal is closed, there is a new one
+ // current last wal is closed, there is a new one
sDebug("%s, last wal is closed, try new one", pPeer->id);
}
@@ -377,14 +382,14 @@ static int syncRetrieveWal(SSyncPeer *pPeer) {
while (1) {
// retrieve wal info
wname[0] = 0;
- code = (*pNode->getWalInfo)(pNode->ahandle, wname, &index);
+ code = (*pNode->getWalInfo)(pNode->ahandle, wname, &index);
if (code < 0) break; // error
if (wname[0] == 0) { // no wal file
sDebug("%s, no wal file", pPeer->id);
break;
- }
-
- if (code == 0) { // last wal
+ }
+
+ if (code == 0) { // last wal
code = syncProcessLastWal(pPeer, wname, index);
break;
}
@@ -392,26 +397,26 @@ static int syncRetrieveWal(SSyncPeer *pPeer) {
// get the full path to wal file
snprintf(fname, sizeof(fname), "%s/%s", pNode->path, wname);
- // send wal file,
+ // send wal file,
// inotify is not required, old wal file won't be modified, even remove is ok
if (stat(fname, &fstat) < 0) break;
size = fstat.st_size;
- sDebug("%s, retrieve wal:%s size:%d", pPeer->id, fname, size);
+ sDebug("%s, retrieve wal:%s size:%d", pPeer->id, fname, size);
int sfd = open(fname, O_RDONLY);
if (sfd < 0) break;
- code = taosTSendFile(pPeer->syncFd, sfd, NULL, size);
- close(sfd);
- if (code <0) break;
+ code = taosTSendFile(pPeer->syncFd, sfd, NULL, size);
+ close(sfd);
+ if (code < 0) break;
- index++;
+ index++;
- if (syncAreFilesModified(pPeer) != 0) break;
+ if (syncAreFilesModified(pPeer) != 0) break;
}
if (code == 0) {
- sDebug("%s, wal retrieve is finished", pPeer->id);
+ sDebug("%s, wal retrieve is finished", pPeer->id);
pPeer->sstatus = TAOS_SYNC_STATUS_CACHE;
SWalHead walHead;
memset(&walHead, 0, sizeof(walHead));
@@ -433,12 +438,12 @@ static int syncRetrieveDataStepByStep(SSyncPeer *pPeer) {
tstrncpy(firstPkt.fqdn, tsNodeFqdn, sizeof(firstPkt.fqdn));
firstPkt.port = tsSyncPort;
- if (write(pPeer->syncFd, (char *) &firstPkt, sizeof(firstPkt)) < 0) {
+ if (write(pPeer->syncFd, (char *)&firstPkt, sizeof(firstPkt)) < 0) {
sError("%s, failed to send syncCmd", pPeer->id);
return -1;
}
- pPeer->sversion = 0;
+ pPeer->sversion = 0;
pPeer->sstatus = TAOS_SYNC_STATUS_FILE;
sDebug("%s, start to retrieve file", pPeer->id);
if (syncRetrieveFile(pPeer) < 0) {
@@ -447,8 +452,7 @@ static int syncRetrieveDataStepByStep(SSyncPeer *pPeer) {
}
// if no files are synced, there must be wal to sync, sversion must be larger than one
- if (pPeer->sversion == 0)
- pPeer->sversion = 1;
+ if (pPeer->sversion == 0) pPeer->sversion = 1;
sDebug("%s, start to retrieve wal", pPeer->id);
if (syncRetrieveWal(pPeer) < 0) {
@@ -460,8 +464,8 @@ static int syncRetrieveDataStepByStep(SSyncPeer *pPeer) {
}
void *syncRetrieveData(void *param) {
- SSyncPeer * pPeer = (SSyncPeer *)param;
- SSyncNode *pNode = pPeer->pSyncNode;
+ SSyncPeer *pPeer = (SSyncPeer *)param;
+ SSyncNode *pNode = pPeer->pSyncNode;
taosBlockSIGPIPE();
pPeer->fileChanged = 0;
@@ -470,7 +474,7 @@ void *syncRetrieveData(void *param) {
sError("%s, failed to open socket to sync", pPeer->id);
} else {
sInfo("%s, sync tcp is setup", pPeer->id);
-
+
if (syncRetrieveDataStepByStep(pPeer) == 0) {
sDebug("%s, sync retrieve process is successful", pPeer->id);
} else {
@@ -482,12 +486,11 @@ void *syncRetrieveData(void *param) {
if (pPeer->fileChanged) {
// if file is changed 3 times continuously, start flow control
pPeer->numOfRetrieves++;
- if (pPeer->numOfRetrieves >= 2 && pNode->notifyFlowCtrl)
+ if (pPeer->numOfRetrieves >= 2 && pNode->notifyFlowCtrl)
(*pNode->notifyFlowCtrl)(pNode->ahandle, 4 << (pPeer->numOfRetrieves - 2));
} else {
pPeer->numOfRetrieves = 0;
- if (pNode->notifyFlowCtrl)
- (*pNode->notifyFlowCtrl)(pNode->ahandle, 0);
+ if (pNode->notifyFlowCtrl) (*pNode->notifyFlowCtrl)(pNode->ahandle, 0);
}
pPeer->fileChanged = 0;
diff --git a/src/sync/src/taosTcpPool.c b/src/sync/src/taosTcpPool.c
index 2f064ceb36..6a210a136f 100644
--- a/src/sync/src/taosTcpPool.c
+++ b/src/sync/src/taosTcpPool.c
@@ -45,8 +45,8 @@ typedef struct {
static void *taosAcceptPeerTcpConnection(void *argv);
static void *taosProcessTcpData(void *param);
+static void taosStopPoolThread(SThreadObj *pThread);
static SThreadObj *taosGetTcpThread(SPoolObj *pPool);
-static void taosStopPoolThread(SThreadObj* pThread);
void *taosOpenTcpThreadPool(SPoolInfo *pInfo) {
pthread_attr_t thattr;
@@ -58,8 +58,8 @@ void *taosOpenTcpThreadPool(SPoolInfo *pInfo) {
}
pPool->info = *pInfo;
-
- pPool->pThread = (SThreadObj **) calloc(sizeof(SThreadObj *), pInfo->numOfThreads);
+
+ pPool->pThread = (SThreadObj **)calloc(sizeof(SThreadObj *), pInfo->numOfThreads);
if (pPool->pThread == NULL) {
uError("TCP server, no enough memory");
free(pPool);
@@ -68,17 +68,19 @@ void *taosOpenTcpThreadPool(SPoolInfo *pInfo) {
pPool->acceptFd = taosOpenTcpServerSocket(pInfo->serverIp, pInfo->port);
if (pPool->acceptFd < 0) {
- free(pPool->pThread); free(pPool);
+ free(pPool->pThread);
+ free(pPool);
uError("failed to create TCP server socket, port:%d (%s)", pInfo->port, strerror(errno));
return NULL;
}
pthread_attr_init(&thattr);
pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
- if (pthread_create(&(pPool->thread), &thattr, (void *) taosAcceptPeerTcpConnection, pPool) != 0) {
+ if (pthread_create(&(pPool->thread), &thattr, (void *)taosAcceptPeerTcpConnection, pPool) != 0) {
uError("TCP server, failed to create accept thread, reason:%s", strerror(errno));
close(pPool->acceptFd);
- free(pPool->pThread); free(pPool);
+ free(pPool->pThread);
+ free(pPool);
return NULL;
}
@@ -89,29 +91,30 @@ void *taosOpenTcpThreadPool(SPoolInfo *pInfo) {
}
void taosCloseTcpThreadPool(void *param) {
- SPoolObj *pPool = (SPoolObj *)param;
- SThreadObj *pThread;
+ SPoolObj * pPool = (SPoolObj *)param;
+ SThreadObj *pThread;
- shutdown(pPool->acceptFd, SHUT_RD);
+ shutdown(pPool->acceptFd, SHUT_RD);
pthread_join(pPool->thread, NULL);
for (int i = 0; i < pPool->info.numOfThreads; ++i) {
pThread = pPool->pThread[i];
- if (pThread) taosStopPoolThread(pThread);
+ if (pThread) taosStopPoolThread(pThread);
}
+ uDebug("%p TCP pool is closed", pPool);
+
taosTFree(pPool->pThread);
free(pPool);
- uDebug("%p TCP pool is closed", pPool);
}
void *taosAllocateTcpConn(void *param, void *pPeer, int connFd) {
struct epoll_event event;
SPoolObj *pPool = (SPoolObj *)param;
- SConnObj *pConn = (SConnObj *) calloc(sizeof(SConnObj), 1);
+ SConnObj *pConn = (SConnObj *)calloc(sizeof(SConnObj), 1);
if (pConn == NULL) {
- terrno = TAOS_SYSTEM_ERROR(errno);
+ terrno = TAOS_SYSTEM_ERROR(errno);
return NULL;
}
@@ -131,7 +134,7 @@ void *taosAllocateTcpConn(void *param, void *pPeer, int connFd) {
if (epoll_ctl(pThread->pollFd, EPOLL_CTL_ADD, connFd, &event) < 0) {
uError("failed to add fd:%d(%s)", connFd, strerror(errno));
- terrno = TAOS_SYSTEM_ERROR(errno);
+ terrno = TAOS_SYSTEM_ERROR(errno);
free(pConn);
pConn = NULL;
} else {
@@ -143,8 +146,8 @@ void *taosAllocateTcpConn(void *param, void *pPeer, int connFd) {
}
void taosFreeTcpConn(void *param) {
- SConnObj * pConn = (SConnObj *)param;
- SThreadObj *pThread = pConn->pThread;
+ SConnObj * pConn = (SConnObj *)param;
+ SThreadObj *pThread = pConn->pThread;
uDebug("%p TCP connection will be closed, fd:%d", pThread, pConn->fd);
pConn->closedByApp = 1;
@@ -153,9 +156,9 @@ void taosFreeTcpConn(void *param) {
static void taosProcessBrokenLink(SConnObj *pConn) {
SThreadObj *pThread = pConn->pThread;
- SPoolObj *pPool = pThread->pPool;
- SPoolInfo *pInfo = &pPool->info;
-
+ SPoolObj * pPool = pThread->pPool;
+ SPoolInfo * pInfo = &pPool->info;
+
if (pConn->closedByApp == 0) shutdown(pConn->fd, SHUT_WR);
(*pInfo->processBrokenLink)(pConn->ahandle);
@@ -169,24 +172,24 @@ static void taosProcessBrokenLink(SConnObj *pConn) {
#define maxEvents 10
static void *taosProcessTcpData(void *param) {
- SThreadObj *pThread = (SThreadObj *) param;
- SPoolObj *pPool = pThread->pPool;
- SPoolInfo *pInfo = &pPool->info;
- SConnObj *pConn = NULL;
+ SThreadObj *pThread = (SThreadObj *)param;
+ SPoolObj * pPool = pThread->pPool;
+ SPoolInfo * pInfo = &pPool->info;
+ SConnObj * pConn = NULL;
struct epoll_event events[maxEvents];
void *buffer = malloc(pInfo->bufferSize);
taosBlockSIGPIPE();
while (1) {
- if (pThread->stop) break;
+ if (pThread->stop) break;
int fdNum = epoll_wait(pThread->pollFd, events, maxEvents, TAOS_EPOLL_WAIT_TIME);
if (pThread->stop) {
uDebug("%p TCP epoll thread is exiting...", pThread);
break;
}
- if (fdNum < 0) {
+ if (fdNum < 0) {
uError("epoll_wait failed (%s)", strerror(errno));
continue;
}
@@ -215,27 +218,31 @@ static void *taosProcessTcpData(void *param) {
taosFreeTcpConn(pConn);
continue;
}
- }
+ }
+
}
+
+ if (pThread->stop) break;
}
+ uDebug("%p TCP epoll thread exits", pThread);
+
close(pThread->pollFd);
free(pThread);
free(buffer);
- uDebug("%p TCP epoll thread exits", pThread);
- return NULL;
+ return NULL;
}
static void *taosAcceptPeerTcpConnection(void *argv) {
- SPoolObj *pPool = (SPoolObj *)argv;
- SPoolInfo *pInfo = &pPool->info;
+ SPoolObj * pPool = (SPoolObj *)argv;
+ SPoolInfo *pInfo = &pPool->info;
taosBlockSIGPIPE();
while (1) {
struct sockaddr_in clientAddr;
socklen_t addrlen = sizeof(clientAddr);
- int connFd = accept(pPool->acceptFd, (struct sockaddr *) &clientAddr, &addrlen);
+ int connFd = accept(pPool->acceptFd, (struct sockaddr *)&clientAddr, &addrlen);
if (connFd < 0) {
if (errno == EINVAL) {
uDebug("%p TCP server accept is exiting...", pPool);
@@ -246,7 +253,7 @@ static void *taosAcceptPeerTcpConnection(void *argv) {
}
}
- //uDebug("TCP connection from: 0x%x:%d", clientAddr.sin_addr.s_addr, clientAddr.sin_port);
+ // uDebug("TCP connection from: 0x%x:%d", clientAddr.sin_addr.s_addr, clientAddr.sin_port);
taosKeepTcpAlive(connFd);
(*pInfo->processIncomingConn)(connFd, clientAddr.sin_addr.s_addr);
}
@@ -260,7 +267,7 @@ static SThreadObj *taosGetTcpThread(SPoolObj *pPool) {
if (pThread) return pThread;
- pThread = (SThreadObj *) calloc(1, sizeof(SThreadObj));
+ pThread = (SThreadObj *)calloc(1, sizeof(SThreadObj));
if (pThread == NULL) return NULL;
pThread->pPool = pPool;
@@ -273,7 +280,7 @@ static SThreadObj *taosGetTcpThread(SPoolObj *pPool) {
pthread_attr_t thattr;
pthread_attr_init(&thattr);
pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
- int ret = pthread_create(&(pThread->thread), &thattr, (void *) taosProcessTcpData, pThread);
+ int ret = pthread_create(&(pThread->thread), &thattr, (void *)taosProcessTcpData, pThread);
pthread_attr_destroy(&thattr);
if (ret != 0) {
@@ -290,20 +297,20 @@ static SThreadObj *taosGetTcpThread(SPoolObj *pPool) {
return pThread;
}
-static void taosStopPoolThread(SThreadObj* pThread) {
+static void taosStopPoolThread(SThreadObj *pThread) {
pThread->stop = true;
-
+
if (pThread->thread == pthread_self()) {
pthread_detach(pthread_self());
return;
}
- // save thread ID into a local variable, since pThread is freed when the thread exits
+ // save thread ID into a local variable, since pThread is freed when the thread exits
pthread_t thread = pThread->thread;
// signal the thread to stop, try graceful method first,
// and use pthread_cancel when failed
- struct epoll_event event = { .events = EPOLLIN };
+ struct epoll_event event = {.events = EPOLLIN};
eventfd_t fd = eventfd(1, 0);
if (fd == -1) {
// failed to create eventfd, call pthread_cancel instead, which may result in data corruption
@@ -317,6 +324,5 @@ static void taosStopPoolThread(SThreadObj* pThread) {
}
pthread_join(thread, NULL);
- taosClose(fd);
+ if (fd >= 0) taosClose(fd);
}
-
diff --git a/src/sync/src/tarbitrator.c b/src/sync/src/tarbitrator.c
index b704b1ecae..360ea93f6c 100644
--- a/src/sync/src/tarbitrator.c
+++ b/src/sync/src/tarbitrator.c
@@ -27,29 +27,29 @@
#include "tsync.h"
#include "syncInt.h"
-static void arbSignalHandler(int32_t signum, siginfo_t *sigInfo, void *context);
-static void arbProcessIncommingConnection(int connFd, uint32_t sourceIp);
-static void arbProcessBrokenLink(void *param);
-static int arbProcessPeerMsg(void *param, void *buffer);
+static void arbSignalHandler(int32_t signum, siginfo_t *sigInfo, void *context);
+static void arbProcessIncommingConnection(int connFd, uint32_t sourceIp);
+static void arbProcessBrokenLink(void *param);
+static int arbProcessPeerMsg(void *param, void *buffer);
static tsem_t tsArbSem;
static ttpool_h tsArbTcpPool;
typedef struct {
- char id[TSDB_EP_LEN+24];
+ char id[TSDB_EP_LEN + 24];
int nodeFd;
void *pConn;
} SNodeConn;
int main(int argc, char *argv[]) {
- char arbLogPath[TSDB_FILENAME_LEN + 16] = {0};
+ char arbLogPath[TSDB_FILENAME_LEN + 16] = {0};
- for (int i=1; i TSDB_FILENAME_LEN) continue;
+ } else if (strcmp(argv[i], "-g") == 0 && i < argc - 1) {
+ if (strlen(argv[++i]) > TSDB_FILENAME_LEN) continue;
tstrncpy(arbLogPath, argv[i], sizeof(arbLogPath));
} else {
printf("\nusage: %s [options] \n", argv[0]);
@@ -62,8 +62,8 @@ int main(int argc, char *argv[]) {
}
sDebugFlag = debugFlag;
-
- if (tsem_init(&tsArbSem, 0, 0) != 0) {
+
+ if (tsem_init(&tsArbSem, 0, 0) != 0) {
printf("failed to create exit semphore\n");
exit(EXIT_FAILURE);
}
@@ -91,10 +91,10 @@ int main(int argc, char *argv[]) {
info.processIncomingMsg = arbProcessPeerMsg;
info.processIncomingConn = arbProcessIncommingConnection;
tsArbTcpPool = taosOpenTcpThreadPool(&info);
-
+
if (tsArbTcpPool == NULL) {
- sDebug("failed to open TCP thread pool, exit...");
- return -1;
+ sDebug("failed to open TCP thread pool, exit...");
+ return -1;
}
sInfo("TAOS arbitrator: %s:%d is running", tsNodeFqdn, tsArbitratorPort);
@@ -108,9 +108,8 @@ int main(int argc, char *argv[]) {
return 0;
}
-static void arbProcessIncommingConnection(int connFd, uint32_t sourceIp)
-{
- char ipstr[24];
+static void arbProcessIncommingConnection(int connFd, uint32_t sourceIp) {
+ char ipstr[24];
tinet_ntoa(ipstr, sourceIp);
sDebug("peer TCP connection from ip:%s", ipstr);
@@ -121,15 +120,16 @@ static void arbProcessIncommingConnection(int connFd, uint32_t sourceIp)
return;
}
- SNodeConn *pNode = (SNodeConn *) calloc(sizeof(SNodeConn), 1);
+ SNodeConn *pNode = (SNodeConn *)calloc(sizeof(SNodeConn), 1);
if (pNode == NULL) {
sError("failed to allocate memory(%s)", strerror(errno));
taosCloseSocket(connFd);
return;
}
- snprintf(pNode->id, sizeof(pNode->id), "vgId:%d peer:%s:%d", firstPkt.sourceId, firstPkt.fqdn, firstPkt.port);
- if (firstPkt.syncHead.vgId) {
+ firstPkt.fqdn[sizeof(firstPkt.fqdn) - 1] = 0;
+ snprintf(pNode->id, sizeof(pNode->id), "vgId:%d peer:%s:%d", firstPkt.sourceId, firstPkt.fqdn, firstPkt.port);
+ if (firstPkt.syncHead.vgId) {
sDebug("%s, vgId in head is not zero, close the connection", pNode->id);
taosTFree(pNode);
taosCloseSocket(connFd);
@@ -151,10 +151,10 @@ static void arbProcessBrokenLink(void *param) {
}
static int arbProcessPeerMsg(void *param, void *buffer) {
- SNodeConn * pNode = param;
- SSyncHead head;
- int bytes = 0;
- char *cont = (char *)buffer;
+ SNodeConn *pNode = param;
+ SSyncHead head;
+ int bytes = 0;
+ char * cont = (char *)buffer;
int hlen = taosReadMsg(pNode->nodeFd, &head, sizeof(head));
if (hlen != sizeof(head)) {
diff --git a/src/sync/test/syncClient.c b/src/sync/test/syncClient.c
index 16053d1088..23264dc8a0 100644
--- a/src/sync/test/syncClient.c
+++ b/src/sync/test/syncClient.c
@@ -25,31 +25,32 @@ typedef struct {
int num;
int numOfReqs;
int msgSize;
- tsem_t rspSem;
- tsem_t *pOverSem;
+ tsem_t rspSem;
+ tsem_t * pOverSem;
pthread_t thread;
- void *pRpc;
+ void * pRpc;
} SInfo;
void processResponse(SRpcMsg *pMsg, SRpcEpSet *pEpSet) {
SInfo *pInfo = (SInfo *)pMsg->ahandle;
- uDebug("thread:%d, response is received, type:%d contLen:%d code:0x%x", pInfo->index, pMsg->msgType, pMsg->contLen, pMsg->code);
+ uDebug("thread:%d, response is received, type:%d contLen:%d code:0x%x", pInfo->index, pMsg->msgType, pMsg->contLen,
+ pMsg->code);
if (pEpSet) pInfo->epSet = *pEpSet;
rpcFreeCont(pMsg->pCont);
- tsem_post(&pInfo->rspSem);
+ tsem_post(&pInfo->rspSem);
}
int tcount = 0;
void *sendRequest(void *param) {
- SInfo *pInfo = (SInfo *)param;
- SRpcMsg rpcMsg = {0};
-
+ SInfo * pInfo = (SInfo *)param;
+ SRpcMsg rpcMsg = {0};
+
uDebug("thread:%d, start to send request", pInfo->index);
- while ( pInfo->numOfReqs == 0 || pInfo->num < pInfo->numOfReqs) {
+ while (pInfo->numOfReqs == 0 || pInfo->num < pInfo->numOfReqs) {
pInfo->num++;
rpcMsg.pCont = rpcMallocCont(pInfo->msgSize);
rpcMsg.contLen = pInfo->msgSize;
@@ -57,8 +58,9 @@ void *sendRequest(void *param) {
rpcMsg.msgType = 1;
uDebug("thread:%d, send request, contLen:%d num:%d", pInfo->index, pInfo->msgSize, pInfo->num);
rpcSendRequest(pInfo->pRpc, &pInfo->epSet, &rpcMsg);
- if ( pInfo->num % 20000 == 0 )
+ if (pInfo->num % 20000 == 0) {
uInfo("thread:%d, %d requests have been sent", pInfo->index, pInfo->num);
+ }
tsem_wait(&pInfo->rspSem);
}
@@ -72,12 +74,12 @@ int main(int argc, char *argv[]) {
SRpcInit rpcInit;
SRpcEpSet epSet;
char secret[TSDB_KEY_LEN] = "mypassword";
- int msgSize = 128;
- int numOfReqs = 0;
- int appThreads = 1;
- char serverIp[40] = "127.0.0.1";
- struct timeval systemTime;
- int64_t startTime, endTime;
+ int msgSize = 128;
+ int numOfReqs = 0;
+ int appThreads = 1;
+ char serverIp[40] = "127.0.0.1";
+ struct timeval systemTime;
+ int64_t startTime, endTime;
pthread_attr_t thattr;
// server info
@@ -102,30 +104,30 @@ int main(int argc, char *argv[]) {
rpcInit.spi = 1;
rpcInit.connType = TAOS_CONN_CLIENT;
- for (int i=1; iindex = i;
pInfo->epSet = epSet;
pInfo->numOfReqs = numOfReqs;
@@ -177,18 +179,16 @@ int main(int argc, char *argv[]) {
do {
usleep(1);
- } while ( tcount < appThreads);
+ } while (tcount < appThreads);
gettimeofday(&systemTime, NULL);
- endTime = systemTime.tv_sec*1000000 + systemTime.tv_usec;
- float usedTime = (endTime - startTime)/1000.0; // mseconds
+ endTime = systemTime.tv_sec * 1000000 + systemTime.tv_usec;
+ float usedTime = (endTime - startTime) / 1000.0; // mseconds
- uInfo("it takes %.3f mseconds to send %d requests to server", usedTime, numOfReqs*appThreads);
- uInfo("Performance: %.3f requests per second, msgSize:%d bytes", 1000.0*numOfReqs*appThreads/usedTime, msgSize);
+ uInfo("it takes %.3f mseconds to send %d requests to server", usedTime, numOfReqs * appThreads);
+ uInfo("Performance: %.3f requests per second, msgSize:%d bytes", 1000.0 * numOfReqs * appThreads / usedTime, msgSize);
taosCloseLog();
return 0;
}
-
-
diff --git a/src/sync/test/syncServer.c b/src/sync/test/syncServer.c
index b802905038..380b971fa8 100644
--- a/src/sync/test/syncServer.c
+++ b/src/sync/test/syncServer.c
@@ -24,28 +24,27 @@
#include "twal.h"
#include "tsync.h"
-int msgSize = 128;
-int commit = 0;
-int dataFd = -1;
-void *qhandle = NULL;
-int walNum = 0;
+int msgSize = 128;
+int commit = 0;
+int dataFd = -1;
+void * qhandle = NULL;
+int walNum = 0;
uint64_t tversion = 0;
-void *syncHandle;
-int role;
-int nodeId;
-char path[256];
-int numOfWrites ;
+void * syncHandle;
+int role;
+int nodeId;
+char path[256];
+int numOfWrites;
SSyncInfo syncInfo;
SSyncCfg *pCfg;
-int writeIntoWal(SWalHead *pHead)
-{
+int writeIntoWal(SWalHead *pHead) {
if (dataFd < 0) {
- char walName[280];
+ char walName[280];
snprintf(walName, sizeof(walName), "%s/wal/wal.%d", path, walNum);
- remove(walName);
- dataFd = open(walName, O_CREAT | O_WRONLY, S_IRWXU | S_IRWXG | S_IRWXO);
- if (dataFd < 0) {
+ (void)remove(walName);
+ dataFd = open(walName, O_CREAT | O_WRONLY, S_IRWXU | S_IRWXG | S_IRWXO);
+ if (dataFd < 0) {
uInfo("failed to open wal file:%s(%s)", walName, strerror(errno));
return -1;
} else {
@@ -67,66 +66,63 @@ int writeIntoWal(SWalHead *pHead)
dataFd = -1;
numOfWrites = 0;
}
-
+
return 0;
}
-void confirmForward(void *ahandle, void *mhandle, int32_t code)
-{
- SRpcMsg *pMsg = (SRpcMsg *)mhandle;
+void confirmForward(void *ahandle, void *mhandle, int32_t code) {
+ SRpcMsg * pMsg = (SRpcMsg *)mhandle;
SWalHead *pHead = (SWalHead *)(((char *)pMsg->pCont) - sizeof(SWalHead));
uDebug("ver:%" PRIu64 ", confirm is received", pHead->version);
rpcFreeCont(pMsg->pCont);
- SRpcMsg rpcMsg;
+ SRpcMsg rpcMsg = {0};
rpcMsg.pCont = rpcMallocCont(msgSize);
rpcMsg.contLen = msgSize;
rpcMsg.handle = pMsg->handle;
rpcMsg.code = code;
rpcSendResponse(&rpcMsg);
- taosFreeQitem(mhandle);
+ taosFreeQitem(mhandle);
}
int processRpcMsg(void *item) {
- SRpcMsg *pMsg = (SRpcMsg *)item;
- SWalHead *pHead = (SWalHead *)(((char *)pMsg->pCont) - sizeof(SWalHead));
- int code = -1;
+ SRpcMsg * pMsg = (SRpcMsg *)item;
+ SWalHead *pHead = (SWalHead *)(((char *)pMsg->pCont) - sizeof(SWalHead));
+ int code = -1;
if (role != TAOS_SYNC_ROLE_MASTER) {
uError("not master, write failed, role:%s", syncRole[role]);
} else {
-
pHead->version = ++tversion;
pHead->msgType = pMsg->msgType;
pHead->len = pMsg->contLen;
uDebug("ver:%" PRIu64 ", pkt from client processed", pHead->version);
- writeIntoWal(pHead);
+ writeIntoWal(pHead);
syncForwardToPeer(syncHandle, pHead, item, TAOS_QTYPE_RPC);
code = 0;
}
- if (pCfg->quorum <= 1) {
- taosFreeQitem(item);
+ if (pCfg->quorum <= 1) {
rpcFreeCont(pMsg->pCont);
- SRpcMsg rpcMsg;
+ SRpcMsg rpcMsg = {0};
rpcMsg.pCont = rpcMallocCont(msgSize);
rpcMsg.contLen = msgSize;
rpcMsg.handle = pMsg->handle;
rpcMsg.code = code;
rpcSendResponse(&rpcMsg);
+ taosFreeQitem(item);
}
return code;
}
int processFwdMsg(void *item) {
-
SWalHead *pHead = (SWalHead *)item;
if (pHead->version <= tversion) {
@@ -142,11 +138,11 @@ int processFwdMsg(void *item) {
// write into cache
-/*
- if (pHead->handle) {
- syncSendFwdAck(syncHandle, pHead->handle, 0);
- }
-*/
+ /*
+ if (pHead->handle) {
+ syncSendFwdAck(syncHandle, pHead->handle, 0);
+ }
+ */
taosFreeQitem(item);
@@ -154,7 +150,6 @@ int processFwdMsg(void *item) {
}
int processWalMsg(void *item) {
-
SWalHead *pHead = (SWalHead *)item;
if (pHead->version <= tversion) {
@@ -168,11 +163,11 @@ int processWalMsg(void *item) {
// write into cache
-/*
- if (pHead->handle) {
- syncSendFwdAck(syncHandle, pHead->handle, 0);
- }
-*/
+ /*
+ if (pHead->handle) {
+ syncSendFwdAck(syncHandle, pHead->handle, 0);
+ }
+ */
taosFreeQitem(item);
@@ -180,15 +175,15 @@ int processWalMsg(void *item) {
}
void *processWriteQueue(void *param) {
- int type;
- void *item;
+ int type;
+ void *item;
while (1) {
int ret = taosReadQitem(qhandle, &type, &item);
if (ret <= 0) {
usleep(1000);
continue;
- }
+ }
if (type == TAOS_QTYPE_RPC) {
processRpcMsg(item);
@@ -196,8 +191,7 @@ void *processWriteQueue(void *param) {
processWalMsg(item);
} else if (type == TAOS_QTYPE_FWD) {
processFwdMsg(item);
- }
-
+ }
}
return NULL;
@@ -224,21 +218,19 @@ int retrieveAuthInfo(char *meterId, char *spi, char *encrypt, char *secret, char
}
void processRequestMsg(SRpcMsg *pMsg, SRpcEpSet *pEpSet) {
-
SRpcMsg *pTemp;
pTemp = taosAllocateQitem(sizeof(SRpcMsg));
memcpy(pTemp, pMsg, sizeof(SRpcMsg));
-
+
uDebug("request is received, type:%d, len:%d", pMsg->msgType, pMsg->contLen);
- taosWriteQitem(qhandle, TAOS_QTYPE_RPC, pTemp);
+ taosWriteQitem(qhandle, TAOS_QTYPE_RPC, pTemp);
}
-uint32_t getFileInfo(void *ahandle, char *name, uint32_t *index, uint32_t eindex, int64_t *size, uint64_t *fversion)
-{
- uint32_t magic;
- struct stat fstat;
- char aname[280];
+uint32_t getFileInfo(void *ahandle, char *name, uint32_t *index, uint32_t eindex, int64_t *size, uint64_t *fversion) {
+ uint32_t magic;
+ struct stat fstat;
+ char aname[280];
if (*index == 2) {
uInfo("wait for a while .....");
@@ -246,15 +238,15 @@ uint32_t getFileInfo(void *ahandle, char *name, uint32_t *index, uint32_t eindex
}
if (name[0] == 0) {
- // find the file
+ // find the file
snprintf(aname, sizeof(aname), "%s/data/data.%d", path, *index);
- sprintf(name, "data/data.%d", *index);
+ sprintf(name, "data/data.%d", *index);
} else {
snprintf(aname, sizeof(aname), "%s/%s", path, name);
}
uInfo("get file info:%s", aname);
- if ( stat(aname, &fstat) < 0 ) return 0;
+ if (stat(aname, &fstat) < 0) return 0;
*size = fstat.st_size;
magic = fstat.st_size;
@@ -262,24 +254,22 @@ uint32_t getFileInfo(void *ahandle, char *name, uint32_t *index, uint32_t eindex
return magic;
}
-int getWalInfo(void *ahandle, char *name, uint32_t *index) {
-
- struct stat fstat;
- char aname[280];
+int getWalInfo(void *ahandle, char *name, uint32_t *index) {
+ struct stat fstat;
+ char aname[280];
name[0] = 0;
- if (*index + 1> walNum) return 0;
+ if (*index + 1 > walNum) return 0;
snprintf(aname, sizeof(aname), "%s/wal/wal.%d", path, *index);
- sprintf(name, "wal/wal.%d", *index);
+ sprintf(name, "wal/wal.%d", *index);
uInfo("get wal info:%s", aname);
- if ( stat(aname, &fstat) < 0 ) return -1;
+ if (stat(aname, &fstat) < 0) return -1;
- if (*index >= walNum-1) return 0; // no more
+ if (*index >= walNum - 1) return 0; // no more
return 1;
-
}
int writeToCache(void *ahandle, void *data, int type) {
@@ -290,24 +280,19 @@ int writeToCache(void *ahandle, void *data, int type) {
int msgSize = pHead->len + sizeof(SWalHead);
void *pMsg = taosAllocateQitem(msgSize);
memcpy(pMsg, pHead, msgSize);
- taosWriteQitem(qhandle, type, pMsg);
+ taosWriteQitem(qhandle, type, pMsg);
return 0;
}
-void confirmFwd(void *ahandle, int64_t version) {
-
- return;
-}
+void confirmFwd(void *ahandle, int64_t version) { return; }
void notifyRole(void *ahandle, int8_t r) {
role = r;
printf("current role:%s\n", syncRole[role]);
}
-
void initSync() {
-
pCfg->replica = 1;
pCfg->quorum = 1;
syncInfo.vgId = 1;
@@ -339,20 +324,18 @@ void initSync() {
taosGetFqdn(pCfg->nodeInfo[4].nodeFqdn);
}
-void doSync()
-{
- for (int i=0; i<5; ++i) {
- if (tsSyncPort == pCfg->nodeInfo[i].nodePort)
- nodeId = pCfg->nodeInfo[i].nodeId;
+void doSync() {
+ for (int i = 0; i < 5; ++i) {
+ if (tsSyncPort == pCfg->nodeInfo[i].nodePort) nodeId = pCfg->nodeInfo[i].nodeId;
}
snprintf(path, sizeof(path), "/root/test/d%d", nodeId);
- strcpy(syncInfo.path, path);
+ tstrncpy(syncInfo.path, path, sizeof(syncInfo.path));
- if ( syncHandle == NULL) {
- syncHandle = syncStart(&syncInfo);
+ if (syncHandle == NULL) {
+ syncHandle = syncStart(&syncInfo);
} else {
- if (syncReconfig(syncHandle, pCfg) < 0) syncHandle = NULL;
+ if (syncReconfig(syncHandle, pCfg) < 0) syncHandle = NULL;
}
uInfo("nodeId:%d path:%s syncPort:%d", nodeId, path, tsSyncPort);
@@ -361,39 +344,39 @@ void doSync()
int main(int argc, char *argv[]) {
SRpcInit rpcInit;
char dataName[20] = "server.data";
- pCfg = &syncInfo.syncCfg;
+ pCfg = &syncInfo.syncCfg;
initSync();
memset(&rpcInit, 0, sizeof(rpcInit));
- rpcInit.localPort = 7000;
- rpcInit.label = "SER";
+ rpcInit.localPort = 7000;
+ rpcInit.label = "SER";
rpcInit.numOfThreads = 1;
- rpcInit.cfp = processRequestMsg;
- rpcInit.sessions = 1000;
- rpcInit.idleTime = tsShellActivityTimer*1500;
- rpcInit.afp = retrieveAuthInfo;
+ rpcInit.cfp = processRequestMsg;
+ rpcInit.sessions = 1000;
+ rpcInit.idleTime = tsShellActivityTimer * 1500;
+ rpcInit.afp = retrieveAuthInfo;
- for (int i=1; ireplica = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-q")==0 && i < argc-1) {
+ } else if (strcmp(argv[i], "-q") == 0 && i < argc - 1) {
pCfg->quorum = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-d")==0 && i < argc-1) {
+ } else if (strcmp(argv[i], "-d") == 0 && i < argc - 1) {
rpcDebugFlag = atoi(argv[++i]);
} else {
printf("\nusage: %s [options] \n", argv[0]);
@@ -403,7 +386,7 @@ int main(int argc, char *argv[]) {
printf(" [-m msgSize]: message body size, default is:%d\n", msgSize);
printf(" [-o compSize]: compression message size, default is:%d\n", tsCompressMsgSize);
printf(" [-w write]: write received data to file(0, 1, 2), default is:%d\n", commit);
- printf(" [-v version]: initial node version, default is:%ld\n", syncInfo.version);
+ printf(" [-v version]: initial node version, default is:%" PRId64 "\n", syncInfo.version);
printf(" [-r replica]: replicacation number, default is:%d\n", pCfg->replica);
printf(" [-q quorum]: quorum, default is:%d\n", pCfg->quorum);
printf(" [-d debugFlag]: debug flag, default:%d\n", rpcDebugFlag);
@@ -411,10 +394,10 @@ int main(int argc, char *argv[]) {
exit(0);
}
}
-
+
uDebugFlag = rpcDebugFlag;
- dDebugFlag = rpcDebugFlag;
- //tmrDebugFlag = rpcDebugFlag;
+ dDebugFlag = rpcDebugFlag;
+ // tmrDebugFlag = rpcDebugFlag;
tsAsyncLog = 0;
taosInitLog("server.log", 1000000, 10);
@@ -443,35 +426,39 @@ int main(int argc, char *argv[]) {
SNodesRole nroles;
while (1) {
- char c = getchar();
+ int c = getchar();
- switch(c) {
+ switch (c) {
case '1':
- pCfg->replica = 1; doSync();
- break;
+ pCfg->replica = 1;
+ doSync();
+ break;
case '2':
- pCfg->replica = 2; doSync();
+ pCfg->replica = 2;
+ doSync();
break;
case '3':
- pCfg->replica = 3; doSync();
+ pCfg->replica = 3;
+ doSync();
break;
case '4':
- pCfg->replica = 4; doSync();
+ pCfg->replica = 4;
+ doSync();
break;
case '5':
- pCfg->replica = 5; doSync();
+ pCfg->replica = 5;
+ doSync();
break;
case 's':
syncGetNodesRole(syncHandle, &nroles);
- for (int i=0; ireplica; ++i)
+ for (int i = 0; i < pCfg->replica; ++i)
printf("=== nodeId:%d role:%s\n", nroles.nodeId[i], syncRole[nroles.role[i]]);
break;
default:
break;
}
- if (c=='q') break;
-
+ if (c == 'q') break;
}
syncStop(syncHandle);
@@ -483,5 +470,3 @@ int main(int argc, char *argv[]) {
return 0;
}
-
-
diff --git a/src/tsdb/src/tsdbMemTable.c b/src/tsdb/src/tsdbMemTable.c
index f6a7f1b35c..4cf8ddd4bd 100644
--- a/src/tsdb/src/tsdbMemTable.c
+++ b/src/tsdb/src/tsdbMemTable.c
@@ -262,7 +262,9 @@ int tsdbAsyncCommit(STsdbRepo *pRepo) {
if (pIMem != NULL) {
ASSERT(pRepo->commit);
+ tsdbDebug("vgId:%d waiting for the commit thread", REPO_ID(pRepo));
code = pthread_join(pRepo->commitThread, NULL);
+ tsdbDebug("vgId:%d commit thread is finished", REPO_ID(pRepo));
if (code != 0) {
tsdbError("vgId:%d failed to thread join since %s", REPO_ID(pRepo), strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c
index a84bb69777..564d7f5db5 100644
--- a/src/tsdb/src/tsdbMeta.c
+++ b/src/tsdb/src/tsdbMeta.c
@@ -27,7 +27,8 @@ static int tsdbCompareSchemaVersion(const void *key1, const void *key2);
static int tsdbRestoreTable(void *pHandle, void *cont, int contLen);
static void tsdbOrgMeta(void *pHandle);
static char * getTagIndexKey(const void *pData);
-static STable *tsdbNewTable(STableCfg *pCfg, bool isSuper);
+static STable *tsdbNewTable();
+static STable *tsdbCreateTableFromCfg(STableCfg *pCfg, bool isSuper);
static void tsdbFreeTable(STable *pTable);
static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx, bool lock);
static void tsdbRemoveTableFromMeta(STsdbRepo *pRepo, STable *pTable, bool rmFromIdx, bool lock);
@@ -92,7 +93,7 @@ int tsdbCreateTable(TSDB_REPO_T *repo, STableCfg *pCfg) {
super = tsdbGetTableByUid(pMeta, pCfg->superUid);
if (super == NULL) { // super table not exists, try to create it
newSuper = 1;
- super = tsdbNewTable(pCfg, true);
+ super = tsdbCreateTableFromCfg(pCfg, true);
if (super == NULL) goto _err;
} else {
if (TABLE_TYPE(super) != TSDB_SUPER_TABLE || TABLE_UID(super) != pCfg->superUid) {
@@ -102,7 +103,7 @@ int tsdbCreateTable(TSDB_REPO_T *repo, STableCfg *pCfg) {
}
}
- table = tsdbNewTable(pCfg, false);
+ table = tsdbCreateTableFromCfg(pCfg, false);
if (table == NULL) goto _err;
// Register to meta
@@ -654,15 +655,24 @@ static char *getTagIndexKey(const void *pData) {
return res;
}
-static STable *tsdbNewTable(STableCfg *pCfg, bool isSuper) {
+static STable *tsdbNewTable() {
+ STable *pTable = (STable *)calloc(1, sizeof(*pTable));
+ if (pTable == NULL) {
+ terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
+ return NULL;
+ }
+
+ pTable->lastKey = TSKEY_INITIAL_VAL;
+
+ return pTable;
+}
+
+static STable *tsdbCreateTableFromCfg(STableCfg *pCfg, bool isSuper) {
STable *pTable = NULL;
size_t tsize = 0;
- pTable = (STable *)calloc(1, sizeof(STable));
- if (pTable == NULL) {
- terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
- goto _err;
- }
+ pTable = tsdbNewTable();
+ if (pTable == NULL) goto _err;
if (isSuper) {
pTable->type = TSDB_SUPER_TABLE;
@@ -731,8 +741,6 @@ static STable *tsdbNewTable(STableCfg *pCfg, bool isSuper) {
}
}
}
-
- pTable->lastKey = TSKEY_INITIAL_VAL;
}
T_REF_INC(pTable);
@@ -1139,11 +1147,9 @@ static int tsdbEncodeTable(void **buf, STable *pTable) {
}
static void *tsdbDecodeTable(void *buf, STable **pRTable) {
- STable *pTable = (STable *)calloc(1, sizeof(STable));
- if (pTable == NULL) {
- terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
- return NULL;
- }
+ STable *pTable = tsdbNewTable();
+ if (pTable == NULL) return NULL;
+
uint8_t type = 0;
buf = taosDecodeFixedU8(buf, &type);
diff --git a/src/tsdb/src/tsdbRWHelper.c b/src/tsdb/src/tsdbRWHelper.c
index bb0f00ef53..357093bd9e 100644
--- a/src/tsdb/src/tsdbRWHelper.c
+++ b/src/tsdb/src/tsdbRWHelper.c
@@ -1348,7 +1348,7 @@ static int tsdbLoadBlockDataImpl(SRWHelper *pHelper, SCompBlock *pCompBlock, SDa
int dcol = 0; // loop iter for SDataCols object
while (dcol < pDataCols->numOfCols) {
SDataCol *pDataCol = &(pDataCols->cols[dcol]);
- if (ccol >= pCompData->numOfCols) {
+ if (dcol != 0 && ccol >= pCompData->numOfCols) {
// Set current column as NULL and forward
dataColSetNEleNull(pDataCol, pCompBlock->numOfRows, pDataCols->maxPoints);
dcol++;
diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c
index d829a85754..d3f4747a96 100644
--- a/src/tsdb/src/tsdbRead.c
+++ b/src/tsdb/src/tsdbRead.c
@@ -248,6 +248,7 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab
STsdbMeta* pMeta = tsdbGetMeta(tsdb);
assert(pMeta != NULL && sizeOfGroup >= 1 && pCond != NULL && pCond->numOfCols > 0);
+ // todo apply the lastkey of table check to avoid to load header file
for (int32_t i = 0; i < sizeOfGroup; ++i) {
SArray* group = *(SArray**) taosArrayGet(groupList->pGroupList, i);
@@ -388,9 +389,9 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh
SDataRow row = *(SDataRow *)SL_GET_NODE_DATA(node);
TSKEY key = dataRowKey(row); // first timestamp in buffer
tsdbDebug("%p uid:%" PRId64 ", tid:%d check data in mem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64
- "-%" PRId64 ", lastKey:%" PRId64 ", %p",
+ "-%" PRId64 ", lastKey:%" PRId64 ", numOfRows:%"PRId64", %p",
pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, key, order, pMem->keyFirst, pMem->keyLast,
- pCheckInfo->lastKey, pHandle->qinfo);
+ pCheckInfo->lastKey, pMem->numOfRows, pHandle->qinfo);
if (ASCENDING_TRAVERSE(order)) {
assert(pCheckInfo->lastKey <= key);
@@ -410,9 +411,9 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh
SDataRow row = *(SDataRow *)SL_GET_NODE_DATA(node);
TSKEY key = dataRowKey(row); // first timestamp in buffer
tsdbDebug("%p uid:%" PRId64 ", tid:%d check data in imem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64
- "-%" PRId64 ", lastKey:%" PRId64 ", %p",
+ "-%" PRId64 ", lastKey:%" PRId64 ", numOfRows:%"PRId64", %p",
pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, key, order, pIMem->keyFirst, pIMem->keyLast,
- pCheckInfo->lastKey, pHandle->qinfo);
+ pCheckInfo->lastKey, pIMem->numOfRows, pHandle->qinfo);
if (ASCENDING_TRAVERSE(order)) {
assert(pCheckInfo->lastKey <= key);
@@ -696,22 +697,41 @@ static int32_t doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* p
pCheckInfo->pDataCols = tdNewDataCols(pMeta->maxRowBytes, pMeta->maxCols, pRepo->config.maxRowsPerFileBlock);
if (pCheckInfo->pDataCols == NULL) {
- tsdbError("%p failed to malloc buf, %p", pQueryHandle, pQueryHandle->qinfo);
+ tsdbError("%p failed to malloc buf for pDataCols, %p", pQueryHandle, pQueryHandle->qinfo);
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
- return terrno;
+ goto _error;
}
}
STSchema* pSchema = tsdbGetTableSchema(pCheckInfo->pTableObj);
- tdInitDataCols(pCheckInfo->pDataCols, pSchema);
- tdInitDataCols(pQueryHandle->rhelper.pDataCols[0], pSchema);
- tdInitDataCols(pQueryHandle->rhelper.pDataCols[1], pSchema);
+ int32_t code = tdInitDataCols(pCheckInfo->pDataCols, pSchema);
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%p failed to malloc buf for pDataCols, %p", pQueryHandle, pQueryHandle->qinfo);
+ terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
+ goto _error;
+ }
+
+ code = tdInitDataCols(pQueryHandle->rhelper.pDataCols[0], pSchema);
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%p failed to malloc buf for rhelper.pDataCols[0], %p", pQueryHandle, pQueryHandle->qinfo);
+ terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
+ goto _error;
+ }
+
+ code = tdInitDataCols(pQueryHandle->rhelper.pDataCols[1], pSchema);
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%p failed to malloc buf for rhelper.pDataCols[1], %p", pQueryHandle, pQueryHandle->qinfo);
+ terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
+ goto _error;
+ }
int16_t* colIds = pQueryHandle->defaultLoadColumn->pData;
int32_t ret = tsdbLoadBlockDataCols(&(pQueryHandle->rhelper), pBlock, pCheckInfo->pCompInfo, colIds, (int)(QH_GET_NUM_OF_COLS(pQueryHandle)));
if (ret != TSDB_CODE_SUCCESS) {
- return terrno;
+ int32_t c = terrno;
+ assert(c != TSDB_CODE_SUCCESS);
+ goto _error;
}
SDataBlockLoadInfo* pBlockLoadInfo = &pQueryHandle->dataBlockLoadInfo;
@@ -728,12 +748,19 @@ static int32_t doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* p
int64_t elapsedTime = (taosGetTimestampUs() - st);
pQueryHandle->cost.blockLoadTime += elapsedTime;
- tsdbDebug("%p load file block into buffer, index:%d, brange:%"PRId64"-%"PRId64" , rows:%d, elapsed time:%"PRId64 " us, %p",
+ tsdbDebug("%p load file block into buffer, index:%d, brange:%"PRId64"-%"PRId64", rows:%d, elapsed time:%"PRId64 " us, %p",
pQueryHandle, slotIndex, pBlock->keyFirst, pBlock->keyLast, pBlock->numOfRows, elapsedTime, pQueryHandle->qinfo);
-
return TSDB_CODE_SUCCESS;
+
+_error:
+ pBlock->numOfRows = 0;
+
+ tsdbError("%p error occurs in loading file block, index:%d, brange:%"PRId64"-%"PRId64", rows:%d, %p",
+ pQueryHandle, slotIndex, pBlock->keyFirst, pBlock->keyLast, pBlock->numOfRows, pQueryHandle->qinfo);
+ return terrno;
}
+static int32_t getEndPosInDataBlock(STsdbQueryHandle* pQueryHandle, SDataBlockInfo* pBlockInfo);
static int32_t doCopyRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, int32_t capacity, int32_t numOfRows, int32_t start, int32_t end);
static void moveDataToFront(STsdbQueryHandle* pQueryHandle, int32_t numOfRows, int32_t numOfCols);
static void doCheckGeneratedBlockRange(STsdbQueryHandle* pQueryHandle);
@@ -790,9 +817,10 @@ static int32_t handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SCompBloc
* Here the buffer is not enough, so only part of file block can be loaded into memory buffer
*/
assert(pQueryHandle->outputCapacity >= binfo.rows);
+ int32_t endPos = getEndPosInDataBlock(pQueryHandle, &binfo);
- if ((cur->pos == 0 && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
- (cur->pos == (binfo.rows - 1) && (!ASCENDING_TRAVERSE(pQueryHandle->order)))) {
+ if ((cur->pos == 0 && endPos == binfo.rows -1 && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
+ (cur->pos == (binfo.rows - 1) && endPos == 0 && (!ASCENDING_TRAVERSE(pQueryHandle->order)))) {
pQueryHandle->realNumOfRows = binfo.rows;
cur->rows = binfo.rows;
@@ -808,7 +836,6 @@ static int32_t handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SCompBloc
cur->pos = -1;
}
} else { // partially copy to dest buffer
- int32_t endPos = ASCENDING_TRAVERSE(pQueryHandle->order)? (binfo.rows - 1): 0;
copyAllRemainRowsFromFileBlock(pQueryHandle, pCheckInfo, &binfo, endPos);
cur->mixBlock = true;
}
@@ -1203,6 +1230,29 @@ static void copyAllRemainRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, STabl
cur->win.ekey, cur->rows, pQueryHandle->qinfo);
}
+int32_t getEndPosInDataBlock(STsdbQueryHandle* pQueryHandle, SDataBlockInfo* pBlockInfo) {
+ // NOTE: reverse the order to find the end position in data block
+ int32_t endPos = -1;
+ int32_t order = ASCENDING_TRAVERSE(pQueryHandle->order)? TSDB_ORDER_DESC : TSDB_ORDER_ASC;
+
+ SQueryFilePos* cur = &pQueryHandle->cur;
+ SDataCols* pCols = pQueryHandle->rhelper.pDataCols[0];
+
+ if (ASCENDING_TRAVERSE(pQueryHandle->order) && pQueryHandle->window.ekey >= pBlockInfo->window.ekey) {
+ endPos = pBlockInfo->rows - 1;
+ cur->mixBlock = (cur->pos != 0);
+ } else if (!ASCENDING_TRAVERSE(pQueryHandle->order) && pQueryHandle->window.ekey <= pBlockInfo->window.skey) {
+ endPos = 0;
+ cur->mixBlock = (cur->pos != pBlockInfo->rows - 1);
+ } else {
+ assert(pCols->numOfRows > 0);
+ endPos = doBinarySearchKey(pCols->cols[0].pData, pCols->numOfRows, pQueryHandle->window.ekey, order);
+ cur->mixBlock = true;
+ }
+
+ return endPos;
+}
+
// only return the qualified data to client in terms of query time window, data rows in the same block but do not
// be included in the query time window will be discarded
static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* pCheckInfo, SCompBlock* pBlock) {
@@ -1216,6 +1266,7 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
cur->pos >= 0 && cur->pos < pBlock->numOfRows);
TSKEY* tsArray = pCols->cols[0].pData;
+ assert(pCols->numOfRows == pBlock->numOfRows && tsArray[0] == pBlock->keyFirst && tsArray[pBlock->numOfRows-1] == pBlock->keyLast);
// for search the endPos, so the order needs to reverse
int32_t order = (pQueryHandle->order == TSDB_ORDER_ASC)? TSDB_ORDER_DESC:TSDB_ORDER_ASC;
@@ -1224,19 +1275,7 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
int32_t numOfCols = (int32_t)(QH_GET_NUM_OF_COLS(pQueryHandle));
STable* pTable = pCheckInfo->pTableObj;
- int32_t endPos = cur->pos;
-
- if (ASCENDING_TRAVERSE(pQueryHandle->order) && pQueryHandle->window.ekey > blockInfo.window.ekey) {
- endPos = blockInfo.rows - 1;
- cur->mixBlock = (cur->pos != 0);
- } else if (!ASCENDING_TRAVERSE(pQueryHandle->order) && pQueryHandle->window.ekey < blockInfo.window.skey) {
- endPos = 0;
- cur->mixBlock = (cur->pos != blockInfo.rows - 1);
- } else {
- assert(pCols->numOfRows > 0);
- endPos = doBinarySearchKey(pCols->cols[0].pData, pCols->numOfRows, pQueryHandle->window.ekey, order);
- cur->mixBlock = true;
- }
+ int32_t endPos = getEndPosInDataBlock(pQueryHandle, &blockInfo);
tsdbDebug("%p uid:%" PRIu64",tid:%d start merge data block, file block range:%"PRIu64"-%"PRIu64" rows:%d, start:%d,"
"end:%d, %p",
@@ -1338,8 +1377,8 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
}
cur->blockCompleted =
- (((pos >= endPos || cur->lastKey > pQueryHandle->window.ekey) && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
- ((pos <= endPos || cur->lastKey < pQueryHandle->window.ekey) && !ASCENDING_TRAVERSE(pQueryHandle->order)));
+ (((pos > endPos || cur->lastKey > pQueryHandle->window.ekey) && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
+ ((pos < endPos || cur->lastKey < pQueryHandle->window.ekey) && !ASCENDING_TRAVERSE(pQueryHandle->order)));
if (!ASCENDING_TRAVERSE(pQueryHandle->order)) {
SWAP(cur->win.skey, cur->win.ekey, TSKEY);
@@ -2071,13 +2110,19 @@ STimeWindow changeTableGroupByLastrow(STableGroupInfo *groupList) {
if (keyInfo.pTable != NULL) {
totalNumOfTable++;
taosArrayPush(pGroup, &keyInfo);
+ } else {
+ taosArrayDestroy(pGroup);
+
+ taosArrayRemove(groupList->pGroupList, j);
+ numOfGroups -= 1;
+ j -= 1;
}
}
// window does not being updated, so set the original
if (window.skey == INT64_MAX && window.ekey == INT64_MIN) {
window = TSWINDOW_INITIALIZER;
- assert(totalNumOfTable == 0);
+ assert(totalNumOfTable == 0 && taosArrayGetSize(groupList->pGroupList) == 0);
}
groupList->numOfTables = totalNumOfTable;
@@ -2398,6 +2443,14 @@ static bool indexedNodeFilterFp(const void* pNode, void* param) {
val = tdGetKVRowValOfCol(pTable->tagVal, pInfo->sch.colId);
}
+ if (pInfo->optr == TSDB_RELATION_ISNULL || pInfo->optr == TSDB_RELATION_NOTNULL) {
+ if (pInfo->optr == TSDB_RELATION_ISNULL) {
+ return (val == NULL) || isNull(val, pInfo->sch.type);
+ } else if (pInfo->optr == TSDB_RELATION_NOTNULL) {
+ return (val != NULL) && (!isNull(val, pInfo->sch.type));
+ }
+ }
+
int32_t ret = 0;
if (val == NULL) { //the val is possible to be null, so check it out carefully
ret = -1; // val is missing in table tags value pairs
@@ -2682,4 +2735,5 @@ void tsdbDestroyTableGroup(STableGroupInfo *pGroupList) {
}
taosArrayDestroy(pGroupList->pGroupList);
+ pGroupList->numOfTables = 0;
}
diff --git a/src/util/CMakeLists.txt b/src/util/CMakeLists.txt
index e63a085cc8..89c8e3dc39 100644
--- a/src/util/CMakeLists.txt
+++ b/src/util/CMakeLists.txt
@@ -3,7 +3,7 @@ PROJECT(TDengine)
AUX_SOURCE_DIRECTORY(src SRC)
ADD_LIBRARY(tutil ${SRC})
-TARGET_LINK_LIBRARIES(tutil pthread osdetail lz4)
+TARGET_LINK_LIBRARIES(tutil pthread osdetail lz4 z)
IF (TD_LINUX)
TARGET_LINK_LIBRARIES(tutil m rt)
diff --git a/src/util/inc/tcache.h b/src/util/inc/tcache.h
index af5f30c7c3..efd51f90ce 100644
--- a/src/util/inc/tcache.h
+++ b/src/util/inc/tcache.h
@@ -24,6 +24,14 @@ extern "C" {
#include "tlockfree.h"
#include "hash.h"
+#if defined(_TD_ARM_32)
+ #define TSDB_CACHE_PTR_KEY TSDB_DATA_TYPE_INT
+ #define TSDB_CACHE_PTR_TYPE int32_t
+#else
+ #define TSDB_CACHE_PTR_KEY TSDB_DATA_TYPE_BIGINT
+ #define TSDB_CACHE_PTR_TYPE int64_t
+#endif
+
typedef void (*__cache_free_fn_t)(void*);
typedef struct SCacheStatis {
diff --git a/src/util/inc/tscompression.h b/src/util/inc/tscompression.h
index 37d1e7b590..cca6d6e250 100644
--- a/src/util/inc/tscompression.h
+++ b/src/util/inc/tscompression.h
@@ -26,7 +26,7 @@ extern "C" {
#define COMP_OVERFLOW_BYTES 2
#define BITS_PER_BYTE 8
// Masks
-#define INT64MASK(_x) ((1ul << _x) - 1)
+#define INT64MASK(_x) ((((uint64_t)1) << _x) - 1)
#define INT32MASK(_x) (((uint32_t)1 << _x) - 1)
#define INT8MASK(_x) (((uint8_t)1 << _x) - 1)
// Compression algorithm
diff --git a/src/util/inc/tskiplist.h b/src/util/inc/tskiplist.h
index 4ba620dce0..a14a856561 100644
--- a/src/util/inc/tskiplist.h
+++ b/src/util/inc/tskiplist.h
@@ -136,6 +136,7 @@ typedef struct SSkipListIterator {
SSkipListNode *cur;
int32_t step; // the number of nodes that have been checked already
int32_t order; // order of the iterator
+ SSkipListNode *next; // next points to the true qualified node in skip list
} SSkipListIterator;
/**
diff --git a/src/util/src/hash.c b/src/util/src/hash.c
index 7427a2e4f3..625d4af1ac 100644
--- a/src/util/src/hash.c
+++ b/src/util/src/hash.c
@@ -738,7 +738,7 @@ void taosHashTableResize(SHashObj *pHashObj) {
int64_t et = taosGetTimestampUs();
- uDebug("hash table resize completed, new capacity:%"PRId64", load factor:%f, elapsed time:%fms", pHashObj->capacity,
+ uDebug("hash table resize completed, new capacity:%d, load factor:%f, elapsed time:%fms", (int32_t)pHashObj->capacity,
((double)pHashObj->size) / pHashObj->capacity, (et - st) / 1000.0);
}
diff --git a/src/util/src/tarray.c b/src/util/src/tarray.c
old mode 100755
new mode 100644
diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c
index ab489e2e46..2637699adb 100644
--- a/src/util/src/tcache.c
+++ b/src/util/src/tcache.c
@@ -71,7 +71,7 @@ static SCacheDataNode *taosCreateCacheNode(const char *key, size_t keyLen, const
* @param pCacheObj Cache object
* @param pNode Cache slot object
*/
-static void taosAddToTrash(SCacheObj *pCacheObj, SCacheDataNode *pNode);
+static void taosAddToTrashcan(SCacheObj *pCacheObj, SCacheDataNode *pNode);
/**
* remove nodes in trash with refCount == 0 in cache
@@ -80,7 +80,7 @@ static void taosAddToTrash(SCacheObj *pCacheObj, SCacheDataNode *pNode);
* @param force force model, if true, remove data in trash without check refcount.
* may cause corruption. So, forece model only applys before cache is closed
*/
-static void taosTrashCanEmpty(SCacheObj *pCacheObj, bool force);
+static void taosTrashcanEmpty(SCacheObj *pCacheObj, bool force);
/**
* release node
@@ -97,7 +97,7 @@ static FORCE_INLINE void taosCacheReleaseNode(SCacheObj *pCacheObj, SCacheDataNo
int32_t size = (int32_t)taosHashGetSize(pCacheObj->pHashTable);
assert(size > 0);
- uDebug("cache:%s, key:%p, %p is destroyed from cache, size:%dbytes, num:%d size:%" PRId64 "bytes",
+ uDebug("cache:%s, key:%p, %p is destroyed from cache, size:%dbytes, totalNum:%d size:%" PRId64 "bytes",
pCacheObj->name, pNode->key, pNode->data, pNode->size, size - 1, pCacheObj->totalSize);
if (pCacheObj->freeFp) {
@@ -165,7 +165,7 @@ SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool ext
return NULL;
}
- // set free cache node callback function for hash table
+ // set free cache node callback function
pCacheObj->freeFp = fn;
pCacheObj->refreshTime = refreshTimeInSeconds * 1000;
pCacheObj->extendLifespan = extendLifespan;
@@ -222,7 +222,7 @@ void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const v
taosTFree(p);
} else {
- taosAddToTrash(pCacheObj, p);
+ taosAddToTrashcan(pCacheObj, p);
uDebug("cache:%s, key:%p, %p exist in cache, updated old:%p", pCacheObj->name, key, pNode1->data, p->data);
}
}
@@ -260,7 +260,12 @@ static void incRefFn(void* ptNode) {
}
void *taosCacheAcquireByKey(SCacheObj *pCacheObj, const void *key, size_t keyLen) {
- if (pCacheObj == NULL || taosHashGetSize(pCacheObj->pHashTable) == 0 || pCacheObj->deleting == 1) {
+ if (pCacheObj == NULL || pCacheObj->deleting == 1) {
+ return NULL;
+ }
+
+ if (taosHashGetSize(pCacheObj->pHashTable) == 0) {
+ atomic_add_fetch_32(&pCacheObj->statistics.missCount, 1);
return NULL;
}
@@ -322,7 +327,12 @@ void *taosCacheTransfer(SCacheObj *pCacheObj, void **data) {
}
void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) {
- if (pCacheObj == NULL || (*data) == NULL || (taosHashGetSize(pCacheObj->pHashTable) + pCacheObj->numOfElemsInTrash == 0)) {
+ if (pCacheObj == NULL || taosHashGetSize(pCacheObj->pHashTable) + pCacheObj->numOfElemsInTrash == 0) {
+ return;
+ }
+
+ if ((*data) == NULL) {
+ uError("cache:%s, NULL data to release", pCacheObj->name);
return;
}
@@ -394,19 +404,19 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) {
"others already", pCacheObj->name, pNode->key, p->data, T_REF_VAL_GET(p), pNode->data);
assert(p->pTNodeHeader == NULL);
- taosAddToTrash(pCacheObj, p);
+ taosAddToTrashcan(pCacheObj, p);
} else {
uDebug("cache:%s, key:%p, %p successfully removed from hash table, refcnt:%d", pCacheObj->name, pNode->key,
pNode->data, ref);
if (ref > 0) {
assert(pNode->pTNodeHeader == NULL);
- taosAddToTrash(pCacheObj, pNode);
+ taosAddToTrashcan(pCacheObj, pNode);
} else { // ref == 0
atomic_sub_fetch_64(&pCacheObj->totalSize, pNode->size);
int32_t size = (int32_t)taosHashGetSize(pCacheObj->pHashTable);
- uDebug("cache:%s, key:%p, %p is destroyed from cache, size:%dbytes, num:%d size:%" PRId64 "bytes",
+ uDebug("cache:%s, key:%p, %p is destroyed from cache, size:%dbytes, totalNum:%d size:%" PRId64 "bytes",
pCacheObj->name, pNode->key, pNode->data, pNode->size, size, pCacheObj->totalSize);
if (pCacheObj->freeFp) {
@@ -427,6 +437,26 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) {
char* key = pNode->key;
char* p = pNode->data;
+// int32_t ref = T_REF_VAL_GET(pNode);
+//
+// if (ref == 1 && inTrashcan) {
+// // If it is the last ref, remove it from trashcan linked-list first, and then destroy it.Otherwise, it may be
+// // destroyed by refresh worker if decrease ref count before removing it from linked-list.
+// assert(pNode->pTNodeHeader->pData == pNode);
+//
+// __cache_wr_lock(pCacheObj);
+// doRemoveElemInTrashcan(pCacheObj, pNode->pTNodeHeader);
+// __cache_unlock(pCacheObj);
+//
+// ref = T_REF_DEC(pNode);
+// assert(ref == 0);
+//
+// doDestroyTrashcanElem(pCacheObj, pNode->pTNodeHeader);
+// } else {
+// ref = T_REF_DEC(pNode);
+// assert(ref >= 0);
+// }
+
int32_t ref = T_REF_DEC(pNode);
uDebug("cache:%s, key:%p, %p released, refcnt:%d, data in trashcan:%d", pCacheObj->name, key, p, ref, inTrashcan);
}
@@ -447,7 +477,7 @@ static bool travHashTableEmptyFn(void* param, void* data) {
if (T_REF_VAL_GET(pNode) == 0) {
taosCacheReleaseNode(pCacheObj, pNode);
} else { // do add to trashcan
- taosAddToTrash(pCacheObj, pNode);
+ taosAddToTrashcan(pCacheObj, pNode);
}
// this node should be remove from hash table
@@ -458,7 +488,7 @@ void taosCacheEmpty(SCacheObj *pCacheObj) {
SHashTravSupp sup = {.pCacheObj = pCacheObj, .fp = NULL, .time = taosGetTimestampMs()};
taosHashCondTraverse(pCacheObj->pHashTable, travHashTableEmptyFn, &sup);
- taosTrashCanEmpty(pCacheObj, false);
+ taosTrashcanEmpty(pCacheObj, false);
}
void taosCacheCleanup(SCacheObj *pCacheObj) {
@@ -498,7 +528,7 @@ SCacheDataNode *taosCreateCacheNode(const char *key, size_t keyLen, const char *
return pNewNode;
}
-void taosAddToTrash(SCacheObj *pCacheObj, SCacheDataNode *pNode) {
+void taosAddToTrashcan(SCacheObj *pCacheObj, SCacheDataNode *pNode) {
if (pNode->inTrashcan) { /* node is already in trash */
assert(pNode->pTNodeHeader != NULL && pNode->pTNodeHeader->pData == pNode);
return;
@@ -520,11 +550,11 @@ void taosAddToTrash(SCacheObj *pCacheObj, SCacheDataNode *pNode) {
pCacheObj->numOfElemsInTrash++;
__cache_unlock(pCacheObj);
- uDebug("cache:%s key:%p, %p move to trash, numOfElem in trash:%d", pCacheObj->name, pNode->key, pNode->data,
+ uDebug("cache:%s key:%p, %p move to trashcan, numOfElem in trashcan:%d", pCacheObj->name, pNode->key, pNode->data,
pCacheObj->numOfElemsInTrash);
}
-void taosTrashCanEmpty(SCacheObj *pCacheObj, bool force) {
+void taosTrashcanEmpty(SCacheObj *pCacheObj, bool force) {
__cache_wr_lock(pCacheObj);
if (pCacheObj->numOfElemsInTrash == 0) {
@@ -568,7 +598,7 @@ void doCleanupDataCache(SCacheObj *pCacheObj) {
// todo memory leak if there are object with refcount greater than 0 in hash table?
taosHashCleanup(pCacheObj->pHashTable);
- taosTrashCanEmpty(pCacheObj, true);
+ taosTrashcanEmpty(pCacheObj, true);
__cache_lock_destroy(pCacheObj);
@@ -643,7 +673,7 @@ void* taosCacheTimedRefresh(void *handle) {
doCacheRefresh(pCacheObj, now, NULL);
}
- taosTrashCanEmpty(pCacheObj, false);
+ taosTrashcanEmpty(pCacheObj, false);
}
return NULL;
diff --git a/src/util/src/tlog.c b/src/util/src/tlog.c
index a8587de767..09b0933fd6 100644
--- a/src/util/src/tlog.c
+++ b/src/util/src/tlog.c
@@ -139,14 +139,22 @@ static void taosUnLockFile(int32_t fd) {
}
static void taosKeepOldLog(char *oldName) {
- if (tsLogKeepDays <= 0) return;
+ if (tsLogKeepDays == 0) return;
int64_t fileSec = taosGetTimestampSec();
char fileName[LOG_FILE_NAME_LEN + 20];
snprintf(fileName, LOG_FILE_NAME_LEN + 20, "%s.%" PRId64, tsLogObj.logName, fileSec);
taosRename(oldName, fileName);
- taosRemoveOldLogFiles(tsLogDir, tsLogKeepDays);
+ if (tsLogKeepDays < 0) {
+ char compressFileName[LOG_FILE_NAME_LEN + 20];
+ snprintf(compressFileName, LOG_FILE_NAME_LEN + 20, "%s.%" PRId64 ".gz", tsLogObj.logName, fileSec);
+ if (taosCompressFile(fileName, compressFileName) == 0) {
+ (void)remove(fileName);
+ }
+ }
+
+ taosRemoveOldLogFiles(tsLogDir, ABS(tsLogKeepDays));
}
static void *taosThreadToOpenNewFile(void *param) {
@@ -433,7 +441,7 @@ void taosPrintLongString(const char *flags, int32_t dflag, const char *format, .
va_list argpointer;
char buffer[MAX_LOGLINE_DUMP_BUFFER_SIZE];
- int32_t len;
+ int32_t len;
struct tm Tm, *ptm;
struct timeval timeSecs;
time_t curTime;
diff --git a/src/util/src/tskiplist.c b/src/util/src/tskiplist.c
index 303c2440bf..bacdaef6c8 100644
--- a/src/util/src/tskiplist.c
+++ b/src/util/src/tskiplist.c
@@ -79,9 +79,12 @@ static SSkipListIterator* doCreateSkipListIterator(SSkipList *pSkipList, int32_t
// when order is TSDB_ORDER_ASC, return the last node with key less than val
// when order is TSDB_ORDER_DESC, return the first node with key large than val
-static SSkipListNode* getPriorNode(SSkipList* pSkipList, const char* val, int32_t order) {
+static SSkipListNode* getPriorNode(SSkipList* pSkipList, const char* val, int32_t order, SSkipListNode** pCur) {
__compar_fn_t comparFn = pSkipList->comparFn;
SSkipListNode *pNode = NULL;
+ if (pCur != NULL) {
+ *pCur = NULL;
+ }
if (order == TSDB_ORDER_ASC) {
pNode = pSkipList->pHead;
@@ -93,6 +96,9 @@ static SSkipListNode* getPriorNode(SSkipList* pSkipList, const char* val, int32_
pNode = p;
p = SL_GET_FORWARD_POINTER(p, i);
} else {
+ if (pCur != NULL) {
+ *pCur = p;
+ }
break;
}
}
@@ -107,6 +113,9 @@ static SSkipListNode* getPriorNode(SSkipList* pSkipList, const char* val, int32_
pNode = p;
p = SL_GET_BACKWARD_POINTER(p, i);
} else {
+ if (pCur != NULL) {
+ *pCur = p;
+ }
break;
}
}
@@ -295,7 +304,7 @@ SArray* tSkipListGet(SSkipList *pSkipList, SSkipListKey key) {
pthread_rwlock_wrlock(pSkipList->lock);
}
- SSkipListNode* pNode = getPriorNode(pSkipList, key, TSDB_ORDER_ASC);
+ SSkipListNode* pNode = getPriorNode(pSkipList, key, TSDB_ORDER_ASC, NULL);
while (1) {
SSkipListNode *p = SL_GET_FORWARD_POINTER(pNode, 0);
if (p == pSkipList->pTail) {
@@ -452,7 +461,7 @@ uint32_t tSkipListRemove(SSkipList *pSkipList, SSkipListKey key) {
pthread_rwlock_wrlock(pSkipList->lock);
}
- SSkipListNode* pNode = getPriorNode(pSkipList, key, TSDB_ORDER_ASC);
+ SSkipListNode* pNode = getPriorNode(pSkipList, key, TSDB_ORDER_ASC, NULL);
while (1) {
SSkipListNode *p = SL_GET_FORWARD_POINTER(pNode, 0);
if (p == pSkipList->pTail) {
@@ -545,7 +554,7 @@ SSkipListIterator *tSkipListCreateIterFromVal(SSkipList* pSkipList, const char*
pthread_rwlock_rdlock(pSkipList->lock);
}
- iter->cur = getPriorNode(pSkipList, val, order);
+ iter->cur = getPriorNode(pSkipList, val, order, &iter->next);
if (pSkipList->lock) {
pthread_rwlock_unlock(pSkipList->lock);
@@ -567,8 +576,22 @@ bool tSkipListIterNext(SSkipListIterator *iter) {
if (iter->order == TSDB_ORDER_ASC) { // ascending order iterate
iter->cur = SL_GET_FORWARD_POINTER(iter->cur, 0);
+
+ // a new node is inserted into between iter->cur and iter->next, ignore it
+ if (iter->cur != iter->next && (iter->next != NULL)) {
+ iter->cur = iter->next;
+ }
+
+ iter->next = SL_GET_FORWARD_POINTER(iter->cur, 0);
} else { // descending order iterate
iter->cur = SL_GET_BACKWARD_POINTER(iter->cur, 0);
+
+ // a new node is inserted into between iter->cur and iter->next, ignore it
+ if (iter->cur != iter->next && (iter->next != NULL)) {
+ iter->cur = iter->next;
+ }
+
+ iter->next = SL_GET_BACKWARD_POINTER(iter->cur, 0);
}
if (pSkipList->lock) {
@@ -715,9 +738,11 @@ SSkipListIterator* doCreateSkipListIterator(SSkipList *pSkipList, int32_t order)
iter->order = order;
if(order == TSDB_ORDER_ASC) {
iter->cur = pSkipList->pHead;
+ iter->next = SL_GET_FORWARD_POINTER(iter->cur, 0);
} else {
iter->cur = pSkipList->pTail;
+ iter->next = SL_GET_BACKWARD_POINTER(iter->cur, 0);
}
-
+
return iter;
}
\ No newline at end of file
diff --git a/src/util/src/tsocket.c b/src/util/src/tsocket.c
index 61896a86df..4cf73e6dff 100644
--- a/src/util/src/tsocket.c
+++ b/src/util/src/tsocket.c
@@ -16,7 +16,7 @@
#include "os.h"
#include "tulog.h"
#include "tsocket.h"
-#include "tutil.h"
+#include "taoserror.h"
int taosGetFqdn(char *fqdn) {
char hostname[1024];
@@ -56,7 +56,16 @@ uint32_t taosGetIpFromFqdn(const char *fqdn) {
freeaddrinfo(result);
return ip;
} else {
- uError("failed get the ip address, fqdn:%s, code:%d, reason:%s", fqdn, ret, gai_strerror(ret));
+#ifdef EAI_SYSTEM
+ if (ret == EAI_SYSTEM) {
+ uError("failed to get the ip address, fqdn:%s, code:%d, reason:%s", fqdn, ret, strerror(errno));
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ } else {
+ uError("failed to get the ip address, fqdn:%s, code:%d, reason:%s", fqdn, ret, gai_strerror(ret));
+ }
+#else
+ uError("failed to get the ip address, fqdn:%s, code:%d, reason:%s", fqdn, ret, gai_strerror(ret));
+#endif
return 0xFFFFFFFF;
}
}
diff --git a/src/util/tests/cacheTest.cpp b/src/util/tests/cacheTest.cpp
index 51221e0b35..0a4791f6a9 100644
--- a/src/util/tests/cacheTest.cpp
+++ b/src/util/tests/cacheTest.cpp
@@ -12,65 +12,65 @@ int32_t tsMaxMeterConnections = 200;
// test cache
TEST(testCase, client_cache_test) {
const int32_t REFRESH_TIME_IN_SEC = 2;
- SCacheObj* tscCacheHandle = taosCacheInit(TSDB_DATA_TYPE_BINARY, REFRESH_TIME_IN_SEC, 0, NULL, "test");
+ SCacheObj* tscMetaCache = taosCacheInit(TSDB_DATA_TYPE_BINARY, REFRESH_TIME_IN_SEC, 0, NULL, "test");
const char* key1 = "test1";
char data1[] = "test11";
- char* cachedObj = (char*) taosCachePut(tscCacheHandle, key1, strlen(key1), data1, strlen(data1)+1, 1);
+ char* cachedObj = (char*) taosCachePut(tscMetaCache, key1, strlen(key1), data1, strlen(data1)+1, 1);
sleep(REFRESH_TIME_IN_SEC+1);
printf("obj is still valid: %s\n", cachedObj);
char data2[] = "test22";
- taosCacheRelease(tscCacheHandle, (void**) &cachedObj, false);
+ taosCacheRelease(tscMetaCache, (void**) &cachedObj, false);
/* the object is cleared by cache clean operation */
- cachedObj = (char*) taosCachePut(tscCacheHandle, key1, strlen(key1), data2, strlen(data2)+1, 20);
+ cachedObj = (char*) taosCachePut(tscMetaCache, key1, strlen(key1), data2, strlen(data2)+1, 20);
printf("after updated: %s\n", cachedObj);
printf("start to remove data from cache\n");
- taosCacheRelease(tscCacheHandle, (void**) &cachedObj, false);
+ taosCacheRelease(tscMetaCache, (void**) &cachedObj, false);
printf("end of removing data from cache\n");
const char* key3 = "test2";
const char* data3 = "kkkkkkk";
- char* cachedObj2 = (char*) taosCachePut(tscCacheHandle, key3, strlen(key3), data3, strlen(data3) + 1, 1);
+ char* cachedObj2 = (char*) taosCachePut(tscMetaCache, key3, strlen(key3), data3, strlen(data3) + 1, 1);
printf("%s\n", cachedObj2);
- taosCacheRelease(tscCacheHandle, (void**) &cachedObj2, false);
+ taosCacheRelease(tscMetaCache, (void**) &cachedObj2, false);
sleep(3);
- char* d = (char*) taosCacheAcquireByKey(tscCacheHandle, key3, strlen(key3));
+ char* d = (char*) taosCacheAcquireByKey(tscMetaCache, key3, strlen(key3));
// assert(d == NULL);
char key5[] = "test5";
char data5[] = "data5kkkkk";
- cachedObj2 = (char*) taosCachePut(tscCacheHandle, key5, strlen(key5), data5, strlen(data5) + 1, 20);
+ cachedObj2 = (char*) taosCachePut(tscMetaCache, key5, strlen(key5), data5, strlen(data5) + 1, 20);
const char* data6= "new Data after updated";
- taosCacheRelease(tscCacheHandle, (void**) &cachedObj2, false);
+ taosCacheRelease(tscMetaCache, (void**) &cachedObj2, false);
- cachedObj2 = (char*) taosCachePut(tscCacheHandle, key5, strlen(key5), data6, strlen(data6) + 1, 20);
+ cachedObj2 = (char*) taosCachePut(tscMetaCache, key5, strlen(key5), data6, strlen(data6) + 1, 20);
printf("%s\n", cachedObj2);
- taosCacheRelease(tscCacheHandle, (void**) &cachedObj2, true);
+ taosCacheRelease(tscMetaCache, (void**) &cachedObj2, true);
const char* data7 = "add call update procedure";
- cachedObj2 = (char*) taosCachePut(tscCacheHandle, key5, strlen(key5), data7, strlen(data7) + 1, 20);
+ cachedObj2 = (char*) taosCachePut(tscMetaCache, key5, strlen(key5), data7, strlen(data7) + 1, 20);
printf("%s\n=======================================\n\n", cachedObj2);
- char* cc = (char*) taosCacheAcquireByKey(tscCacheHandle, key5, strlen(key5));
+ char* cc = (char*) taosCacheAcquireByKey(tscMetaCache, key5, strlen(key5));
- taosCacheRelease(tscCacheHandle, (void**) &cachedObj2, true);
- taosCacheRelease(tscCacheHandle, (void**) &cc, false);
+ taosCacheRelease(tscMetaCache, (void**) &cachedObj2, true);
+ taosCacheRelease(tscMetaCache, (void**) &cc, false);
const char* data8 = "ttft";
const char* key6 = "key6";
- char* ft = (char*) taosCachePut(tscCacheHandle, key6, strlen(key6), data8, strlen(data8), 20);
- taosCacheRelease(tscCacheHandle, (void**) &ft, false);
+ char* ft = (char*) taosCachePut(tscMetaCache, key6, strlen(key6), data8, strlen(data8), 20);
+ taosCacheRelease(tscMetaCache, (void**) &ft, false);
/**
* 140ns
@@ -78,14 +78,14 @@ TEST(testCase, client_cache_test) {
uint64_t startTime = taosGetTimestampUs();
printf("Cache Performance Test\nstart time:%" PRIu64 "\n", startTime);
for(int32_t i=0; i<1000; ++i) {
- char* dd = (char*) taosCacheAcquireByKey(tscCacheHandle, key6, strlen(key6));
+ char* dd = (char*) taosCacheAcquireByKey(tscMetaCache, key6, strlen(key6));
if (dd != NULL) {
// printf("get the data\n");
} else {
printf("data has been released\n");
}
- taosCacheRelease(tscCacheHandle, (void**) &dd, false);
+ taosCacheRelease(tscMetaCache, (void**) &dd, false);
}
uint64_t endTime = taosGetTimestampUs();
@@ -93,7 +93,7 @@ TEST(testCase, client_cache_test) {
printf("End of Test, %" PRIu64 "\nTotal Elapsed Time:%" PRIu64 " us.avg:%f us\n", endTime, el, el/1000.0);
- taosCacheCleanup(tscCacheHandle);
+ taosCacheCleanup(tscMetaCache);
}
TEST(testCase, cache_resize_test) {
diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c
index a4e88fb946..d89c383d6a 100644
--- a/src/vnode/src/vnodeMain.c
+++ b/src/vnode/src/vnodeMain.c
@@ -186,6 +186,12 @@ int32_t vnodeAlter(void *param, SMDCreateVnodeMsg *pVnodeCfg) {
return code;
}
+ code = walAlter(pVnode->wal, &pVnode->walCfg);
+ if (code != TSDB_CODE_SUCCESS) {
+ pVnode->status = TAOS_VN_STATUS_READY;
+ return code;
+ }
+
code = syncReconfig(pVnode->sync, &pVnode->syncCfg);
if (code != TSDB_CODE_SUCCESS) {
pVnode->status = TAOS_VN_STATUS_READY;
@@ -390,6 +396,7 @@ void vnodeRelease(void *pVnodeRaw) {
if (0 == tsEnableVnodeBak) {
vInfo("vgId:%d, vnode backup not enabled", pVnode->vgId);
} else {
+ taosRemoveDir(newDir);
taosRename(rootDir, newDir);
}
diff --git a/src/vnode/src/vnodeRead.c b/src/vnode/src/vnodeRead.c
index 017eeaf426..58e97075ac 100644
--- a/src/vnode/src/vnodeRead.c
+++ b/src/vnode/src/vnodeRead.c
@@ -58,7 +58,8 @@ int32_t vnodeProcessRead(void *param, SReadMsg *pReadMsg) {
return TSDB_CODE_APP_NOT_READY;
// TODO: Later, let slave to support query
- if (pVnode->syncCfg.replica > 1 && pVnode->role != TAOS_SYNC_ROLE_MASTER) {
+ // if (pVnode->syncCfg.replica > 1 && pVnode->role != TAOS_SYNC_ROLE_MASTER) {
+ if (pVnode->role != TAOS_SYNC_ROLE_SLAVE && pVnode->role != TAOS_SYNC_ROLE_MASTER) {
vDebug("vgId:%d, msgType:%s not processed, replica:%d role:%d", pVnode->vgId, taosMsg[msgType], pVnode->syncCfg.replica, pVnode->role);
return TSDB_CODE_APP_NOT_READY;
}
diff --git a/src/vnode/src/vnodeWrite.c b/src/vnode/src/vnodeWrite.c
index 1a9b05ed34..0c310439bb 100644
--- a/src/vnode/src/vnodeWrite.c
+++ b/src/vnode/src/vnodeWrite.c
@@ -130,8 +130,15 @@ static int32_t vnodeProcessCreateTableMsg(SVnodeObj *pVnode, void *pCont, SRspRe
int code = TSDB_CODE_SUCCESS;
STableCfg *pCfg = tsdbCreateTableCfgFromMsg((SMDCreateTableMsg *)pCont);
- if (pCfg == NULL) return terrno;
- if (tsdbCreateTable(pVnode->tsdb, pCfg) < 0) code = terrno;
+ if (pCfg == NULL) {
+ ASSERT(terrno != 0);
+ return terrno;
+ }
+
+ if (tsdbCreateTable(pVnode->tsdb, pCfg) < 0) {
+ code = terrno;
+ ASSERT(code != 0);
+ }
tsdbClearTableCfg(pCfg);
return code;
diff --git a/src/wal/src/walMain.c b/src/wal/src/walMain.c
index bebad69f32..1cdf34930b 100644
--- a/src/wal/src/walMain.c
+++ b/src/wal/src/walMain.c
@@ -69,6 +69,13 @@ static void walModuleInitFunc() {
wDebug("WAL module is initialized");
}
+static inline bool walNeedFsyncTimer(SWal *pWal) {
+ if (pWal->fsyncPeriod > 0 && pWal->level == TAOS_WAL_FSYNC) {
+ return true;
+ }
+ return false;
+}
+
void *walOpen(const char *path, const SWalCfg *pCfg) {
SWal *pWal = calloc(sizeof(SWal), 1);
if (pWal == NULL) {
@@ -95,7 +102,7 @@ void *walOpen(const char *path, const SWalCfg *pCfg) {
tstrncpy(pWal->path, path, sizeof(pWal->path));
pthread_mutex_init(&pWal->mutex, NULL);
- if (pWal->fsyncPeriod > 0 && pWal->level == TAOS_WAL_FSYNC) {
+ if (walNeedFsyncTimer(pWal)) {
pWal->timer = taosTmrStart(walProcessFsyncTimer, pWal->fsyncPeriod, pWal, walTmrCtrl);
if (pWal->timer == NULL) {
terrno = TAOS_SYSTEM_ERROR(errno);
@@ -127,6 +134,37 @@ void *walOpen(const char *path, const SWalCfg *pCfg) {
return pWal;
}
+int walAlter(twalh wal, const SWalCfg *pCfg) {
+ SWal *pWal = wal;
+ if (pWal == NULL) {
+ return TSDB_CODE_WAL_APP_ERROR;
+ }
+
+ if (pWal->level == pCfg->walLevel && pWal->fsyncPeriod == pCfg->fsyncPeriod) {
+ wDebug("wal:%s, old walLevel:%d fsync:%d, new walLevel:%d fsync:%d not change", pWal->name, pWal->level,
+ pWal->fsyncPeriod, pCfg->walLevel, pCfg->fsyncPeriod);
+ return TSDB_CODE_SUCCESS;
+ }
+
+ wInfo("wal:%s, change old walLevel:%d fsync:%d, new walLevel:%d fsync:%d", pWal->name, pWal->level, pWal->fsyncPeriod,
+ pCfg->walLevel, pCfg->fsyncPeriod);
+
+ pthread_mutex_lock(&pWal->mutex);
+ pWal->level = pCfg->walLevel;
+ pWal->fsyncPeriod = pCfg->fsyncPeriod;
+ if (walNeedFsyncTimer(pWal)) {
+ wInfo("wal:%s, reset fsync timer, walLevel:%d fsyncPeriod:%d", pWal->name, pWal->level, pWal->fsyncPeriod);
+ taosTmrReset(walProcessFsyncTimer, pWal->fsyncPeriod, pWal, &pWal->timer,walTmrCtrl);
+ } else {
+ wInfo("wal:%s, stop fsync timer, walLevel:%d fsyncPeriod:%d", pWal->name, pWal->level, pWal->fsyncPeriod);
+ taosTmrStop(pWal->timer);
+ pWal->timer = NULL;
+ }
+ pthread_mutex_unlock(&pWal->mutex);
+
+ return TSDB_CODE_SUCCESS;
+}
+
void walClose(void *handle) {
if (handle == NULL) return;
@@ -347,9 +385,10 @@ static void walRelease(SWal *pWal) {
static int walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp) {
char *name = pWal->name;
+ int size = 1024 * 1024; // default 1M buffer size
terrno = 0;
- char *buffer = malloc(1024000); // size for one record
+ char *buffer = malloc(size);
if (buffer == NULL) {
terrno = TAOS_SYSTEM_ERROR(errno);
return terrno;
@@ -357,7 +396,7 @@ static int walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp) {
SWalHead *pHead = (SWalHead *)buffer;
- int fd = open(name, O_RDONLY);
+ int fd = open(name, O_RDWR);
if (fd < 0) {
wError("wal:%s, failed to open for restore(%s)", name, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
@@ -367,29 +406,58 @@ static int walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp) {
wDebug("wal:%s, start to restore", name);
+ size_t offset = 0;
while (1) {
int ret = taosTRead(fd, pHead, sizeof(SWalHead));
- if ( ret == 0) break;
+ if (ret == 0) break;
- if (ret != sizeof(SWalHead)) {
- wWarn("wal:%s, failed to read head, skip, ret:%d(%s)", name, ret, strerror(errno));
+ if (ret < 0) {
+ wError("wal:%s, failed to read wal head part since %s", name, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
break;
}
+ if (ret < sizeof(SWalHead)) {
+ wError("wal:%s, failed to read head, ret:%d, skip the rest of file", name, ret);
+ taosFtruncate(fd, offset);
+ fsync(fd);
+ break;
+ }
+
if (!taosCheckChecksumWhole((uint8_t *)pHead, sizeof(SWalHead))) {
wWarn("wal:%s, cksum is messed up, skip the rest of file", name);
- terrno = TAOS_SYSTEM_ERROR(errno);
+ terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
+ ASSERT(false);
break;
- }
+ }
+
+ if (pHead->len > size - sizeof(SWalHead)) {
+ size = sizeof(SWalHead) + pHead->len;
+ buffer = realloc(buffer, size);
+ if (buffer == NULL) {
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ break;
+ }
+
+ pHead = (SWalHead *)buffer;
+ }
ret = taosTRead(fd, pHead->cont, pHead->len);
- if ( ret != pHead->len) {
- wWarn("wal:%s, failed to read body, skip, len:%d ret:%d", name, pHead->len, ret);
+ if (ret < 0) {
+ wError("wal:%s failed to read wal body part since %s", name, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
break;
}
+ if (ret < pHead->len) {
+ wError("wal:%s, failed to read body, len:%d ret:%d, skip the rest of file", name, pHead->len, ret);
+ taosFtruncate(fd, offset);
+ fsync(fd);
+ break;
+ }
+
+ offset = offset + sizeof(SWalHead) + pHead->len;
+
if (pWal->keep) pWal->version = pHead->version;
(*writeFp)(pVnode, pHead, TAOS_QTYPE_WAL);
}
@@ -484,6 +552,12 @@ static void walProcessFsyncTimer(void *param, void *tmrId) {
if (fsync(pWal->fd) < 0) {
wError("wal:%s, fsync failed(%s)", pWal->name, strerror(errno));
}
-
- pWal->timer = taosTmrStart(walProcessFsyncTimer, pWal->fsyncPeriod, pWal, walTmrCtrl);
+
+ if (walNeedFsyncTimer(pWal)) {
+ pWal->timer = taosTmrStart(walProcessFsyncTimer, pWal->fsyncPeriod, pWal, walTmrCtrl);
+ } else {
+ wInfo("wal:%s, stop fsync timer for walLevel:%d fsyncPeriod:%d", pWal->name, pWal->level, pWal->fsyncPeriod);
+ taosTmrStop(pWal->timer);
+ pWal->timer = NULL;
+ }
}
diff --git a/src/wal/test/waltest.c b/src/wal/test/waltest.c
index 073dbf72af..bbee1347b8 100644
--- a/src/wal/test/waltest.c
+++ b/src/wal/test/waltest.c
@@ -71,7 +71,7 @@ int main(int argc, char *argv[]) {
printf(" [-t total]: total wal files, default is:%d\n", total);
printf(" [-r rows]: rows of records per wal file, default is:%d\n", rows);
printf(" [-k keep]: keep the wal after closing, default is:%d\n", keep);
- printf(" [-v version]: initial version, default is:%ld\n", ver);
+ printf(" [-v version]: initial version, default is:%" PRId64 "\n", ver);
printf(" [-d debugFlag]: debug flag, default:%d\n", dDebugFlag);
printf(" [-h help]: print out this help\n\n");
exit(0);
@@ -97,7 +97,7 @@ int main(int argc, char *argv[]) {
exit(-1);
}
- printf("version starts from:%ld\n", ver);
+ printf("version starts from:%" PRId64 "\n", ver);
int contLen = sizeof(SWalHead) + size;
SWalHead *pHead = (SWalHead *) malloc(contLen);
diff --git a/tests/comparisonTest/tdengine/tdengineTest.c b/tests/comparisonTest/tdengine/tdengineTest.c
index 3d78a3d0a5..237403f525 100644
--- a/tests/comparisonTest/tdengine/tdengineTest.c
+++ b/tests/comparisonTest/tdengine/tdengineTest.c
@@ -278,7 +278,7 @@ void writeData() {
free(threads);
- printf("---- Spent %f seconds to insert %ld records, speed: %f Rows/Second\n", seconds, statis.totalRows, rs);
+ printf("---- Spent %f seconds to insert %" PRId64 " records, speed: %f Rows/Second\n", seconds, statis.totalRows, rs);
}
void readDataImp(void *param)
diff --git a/tests/examples/JDBC/JDBCDemo/.gitignore b/tests/examples/JDBC/JDBCDemo/.gitignore
new file mode 100644
index 0000000000..b79f223d17
--- /dev/null
+++ b/tests/examples/JDBC/JDBCDemo/.gitignore
@@ -0,0 +1,19 @@
+# custom
+/out/
+/logs/
+*.jar
+
+# Created by .ignore support plugin (hsz.mobi)
+.gitignore
+
+# Build Artifacts
+.gradle/*
+build/*
+target/*
+bin/*
+dependency-reduced-pom.xml
+
+# Eclipse Project Files
+.classpath
+.project
+.settings/*
diff --git a/tests/examples/JDBC/JDBCDemo/pom.xml b/tests/examples/JDBC/JDBCDemo/pom.xml
index f0234f2bd7..2113074674 100644
--- a/tests/examples/JDBC/JDBCDemo/pom.xml
+++ b/tests/examples/JDBC/JDBCDemo/pom.xml
@@ -9,21 +9,20 @@
1.0-SNAPSHOT
jar
-
-
-
- org.apache.maven.plugins
- maven-plugins
- 30
-
-
- org.apache.maven.plugins
- maven-assembly-plugin
- 3.0.0
-
-
-
+
+
+ org.apache.maven.plugins
+ maven-plugins
+ 30
+
+
+
+ org.apache.maven.plugins
+ maven-assembly-plugin
+ 3.0.0
+
+
org.apache.maven.plugins
maven-assembly-plugin
@@ -48,6 +47,7 @@
+
org.apache.maven.plugins
maven-compiler-plugin
@@ -65,5 +65,10 @@
taos-jdbcdriver
2.0.4
+
+ log4j
+ log4j
+ 1.2.17
+
diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JDBCConnectorChecker.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JDBCConnectorChecker.java
new file mode 100644
index 0000000000..74e586d7fd
--- /dev/null
+++ b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JDBCConnectorChecker.java
@@ -0,0 +1,172 @@
+package com.taosdata.example;
+
+import com.taosdata.jdbc.TSDBDriver;
+
+import java.sql.*;
+import java.util.Properties;
+
+public class JDBCConnectorChecker {
+ private static String host;
+ private static String dbName = "test";
+ private static String tbName = "weather";
+ private Connection connection;
+
+ /**
+ * get connection
+ **/
+ private void init() {
+ try {
+ Class.forName("com.taosdata.jdbc.TSDBDriver");
+ Properties properties = new Properties();
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host);
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
+ System.out.println("get connection starting...");
+ connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
+ if (connection != null)
+ System.out.println("[ OK ] Connection established.");
+ } catch (ClassNotFoundException | SQLException e) {
+ throw new RuntimeException("connection failed: " + host);
+ }
+ }
+
+ /**
+ * create database
+ */
+ private void createDatabase() {
+ String sql = "create database if not exists " + dbName;
+ exuete(sql);
+ }
+
+ /**
+ * use database
+ */
+ private void useDatabase() {
+ String sql = "use " + dbName;
+ exuete(sql);
+ }
+
+ /**
+ * select
+ */
+ private void checkSelect() {
+ final String sql = "select * from test.weather";
+ executeQuery(sql);
+ }
+
+ private void executeQuery(String sql) {
+ try (Statement statement = connection.createStatement()) {
+ long start = System.currentTimeMillis();
+ ResultSet resultSet = statement.executeQuery(sql);
+ long end = System.currentTimeMillis();
+ printSql(sql, true, (end - start));
+ printResult(resultSet);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ private void printResult(ResultSet resultSet) throws SQLException {
+ ResultSetMetaData metaData = resultSet.getMetaData();
+ while (resultSet.next()) {
+ for (int i = 1; i <= metaData.getColumnCount(); i++) {
+ String columnLabel = metaData.getColumnLabel(i);
+ String value = resultSet.getString(i);
+ System.out.printf("%s: %s\t", columnLabel, value);
+ }
+ System.out.println();
+ }
+ }
+
+ private String formatString(String str) {
+ StringBuilder sb = new StringBuilder();
+ int blankCnt = (26 - str.length()) / 2;
+ for (int j = 0; j < blankCnt; j++)
+ sb.append(" ");
+ sb.append(str);
+ for (int j = 0; j < blankCnt; j++)
+ sb.append(" ");
+ sb.append("|");
+ return sb.toString();
+ }
+
+
+ /**
+ * insert
+ */
+ private void checkInsert() {
+ final String sql = "insert into test.weather (ts, temperature, humidity) values(now, 20.5, 34)";
+ exuete(sql);
+ }
+
+ /**
+ * create table
+ */
+ private void createTable() {
+ final String sql = "create table if not exists " + dbName + "." + tbName + " (ts timestamp, temperature float, humidity int)";
+ exuete(sql);
+ }
+
+ private final void printSql(String sql, boolean succeed, long cost) {
+ System.out.println("[ " + (succeed ? "OK" : "ERROR!") + " ] time cost: " + cost + " ms, execute statement ====> " + sql);
+ }
+
+ private final void exuete(String sql) {
+ try (Statement statement = connection.createStatement()) {
+ long start = System.currentTimeMillis();
+ boolean execute = statement.execute(sql);
+ long end = System.currentTimeMillis();
+ printSql(sql, execute, (end - start));
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ private void close() {
+ try {
+ if (connection != null) {
+ this.connection.close();
+ System.out.println("connection closed.");
+ }
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ private void checkDropTable() {
+ final String sql = "drop table if exists " + dbName + "." + tbName + "";
+ exuete(sql);
+ }
+
+ public static void main(String[] args) {
+ for (int i = 0; i < args.length; i++) {
+ if ("-host".equalsIgnoreCase(args[i]) && i < args.length - 1) {
+ host = args[++i];
+ }
+ if ("-db".equalsIgnoreCase(args[i]) && i < args.length - 1) {
+ dbName = args[++i];
+ }
+ if ("-t".equalsIgnoreCase(args[i]) && i < args.length - 1) {
+ tbName = args[++i];
+ }
+ }
+
+ if (host == null) {
+ System.out.println("Usage: java -jar JDBCConnectorChecker.jar -host ");
+ return;
+ }
+
+ JDBCConnectorChecker checker = new JDBCConnectorChecker();
+ checker.init();
+ checker.createDatabase();
+ checker.useDatabase();
+ checker.checkDropTable();
+ checker.createTable();
+ checker.checkInsert();
+ checker.checkSelect();
+ checker.checkDropTable();
+ checker.close();
+ }
+
+}
diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/JdbcTaosdemo.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/JdbcTaosdemo.java
new file mode 100644
index 0000000000..c30d85a084
--- /dev/null
+++ b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/JdbcTaosdemo.java
@@ -0,0 +1,279 @@
+package com.taosdata.example.jdbcTaosdemo;
+
+import com.taosdata.example.jdbcTaosdemo.domain.JdbcTaosdemoConfig;
+import com.taosdata.example.jdbcTaosdemo.task.CreateTableTask;
+import com.taosdata.example.jdbcTaosdemo.task.InsertTableDatetimeTask;
+import com.taosdata.example.jdbcTaosdemo.task.InsertTableTask;
+import com.taosdata.example.jdbcTaosdemo.utils.ConnectionFactory;
+import com.taosdata.example.jdbcTaosdemo.utils.SqlSpeller;
+import com.taosdata.example.jdbcTaosdemo.utils.TimeStampUtil;
+import org.apache.log4j.Logger;
+
+import java.sql.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+public class JdbcTaosdemo {
+
+ private static Logger logger = Logger.getLogger(JdbcTaosdemo.class);
+ private final JdbcTaosdemoConfig config;
+ private Connection connection;
+
+ public JdbcTaosdemo(JdbcTaosdemoConfig config) {
+ this.config = config;
+ }
+
+ public static void main(String[] args) {
+ JdbcTaosdemoConfig config = new JdbcTaosdemoConfig(args);
+
+ boolean isHelp = Arrays.asList(args).contains("--help");
+ if (isHelp) {
+ JdbcTaosdemoConfig.printHelp();
+ return;
+ }
+ if (config.getHost() == null) {
+ JdbcTaosdemoConfig.printHelp();
+ return;
+ }
+
+ JdbcTaosdemo taosdemo = new JdbcTaosdemo(config);
+ taosdemo.init();
+ taosdemo.dropDatabase();
+ taosdemo.createDatabase();
+ taosdemo.useDatabase();
+ taosdemo.createSuperTable();
+ taosdemo.createTableMultiThreads();
+
+ boolean infinite = Arrays.asList(args).contains("--infinite");
+ if (infinite) {
+ logger.info("!!! Infinite Insert Mode Started. !!!!");
+ taosdemo.insertInfinite();
+ } else {
+ taosdemo.insertMultiThreads();
+ // single table select
+ taosdemo.selectFromTableLimit();
+ taosdemo.selectCountFromTable();
+ taosdemo.selectAvgMinMaxFromTable();
+ // super table select
+ taosdemo.selectFromSuperTableLimit();
+ taosdemo.selectCountFromSuperTable();
+ taosdemo.selectAvgMinMaxFromSuperTable();
+ // drop super table
+ if (config.isDeleteTable())
+ taosdemo.dropSuperTable();
+ taosdemo.close();
+ }
+ }
+
+
+ /**
+ * establish the connection
+ */
+ private void init() {
+ try {
+ Class.forName("com.taosdata.jdbc.TSDBDriver");
+ connection = ConnectionFactory.build(config);
+ if (connection != null)
+ logger.info("[ OK ] Connection established.");
+ } catch (ClassNotFoundException | SQLException e) {
+ logger.error(e.getMessage());
+ throw new RuntimeException("connection failed: " + config.getHost());
+ }
+ }
+
+ /**
+ * create database
+ */
+ private void createDatabase() {
+ String sql = SqlSpeller.createDatabaseSQL(config.getDbName(), config.getKeep(), config.getDays());
+ execute(sql);
+ }
+
+ /**
+ * drop database
+ */
+ private void dropDatabase() {
+ String sql = SqlSpeller.dropDatabaseSQL(config.getDbName());
+ execute(sql);
+ }
+
+ /**
+ * use database
+ */
+ private void useDatabase() {
+ String sql = SqlSpeller.useDatabaseSQL(config.getDbName());
+ execute(sql);
+ }
+
+ /**
+ * create super table
+ */
+ private void createSuperTable() {
+ String sql = SqlSpeller.createSuperTableSQL(config.getStbName());
+ execute(sql);
+ }
+
+ /**
+ * create table use super table with multi threads
+ */
+ private void createTableMultiThreads() {
+ try {
+ final int tableSize = config.getNumberOfTable() / config.getNumberOfThreads();
+ List threads = new ArrayList<>();
+ for (int i = 0; i < config.getNumberOfThreads(); i++) {
+ Thread thread = new Thread(new CreateTableTask(config, i * tableSize, tableSize), "Thread-" + i);
+ threads.add(thread);
+ thread.start();
+ }
+ for (Thread thread : threads) {
+ thread.join();
+ }
+ logger.info("<<< Multi Threads create table finished.");
+ } catch (InterruptedException e) {
+ logger.error(e.getMessage());
+ e.printStackTrace();
+ }
+ }
+
+ /**
+ * insert data infinitely
+ */
+ private void insertInfinite() {
+ try {
+ final long startDatetime = TimeStampUtil.datetimeToLong("2005-01-01 00:00:00.000");
+ final long finishDatetime = TimeStampUtil.datetimeToLong("2030-01-01 00:00:00.000");
+
+ final int tableSize = config.getNumberOfTable() / config.getNumberOfThreads();
+ List threads = new ArrayList<>();
+ for (int i = 0; i < config.getNumberOfThreads(); i++) {
+ Thread thread = new Thread(new InsertTableDatetimeTask(config, i * tableSize, tableSize, startDatetime, finishDatetime), "Thread-" + i);
+ threads.add(thread);
+ thread.start();
+ }
+ for (Thread thread : threads) {
+ thread.join();
+ }
+ logger.info("<<< Multi Threads insert table finished.");
+ } catch (InterruptedException e) {
+ logger.error(e.getMessage());
+ e.printStackTrace();
+ }
+ }
+
+ private void insertMultiThreads() {
+ try {
+ final int tableSize = config.getNumberOfTable() / config.getNumberOfThreads();
+ final int numberOfRecordsPerTable = config.getNumberOfRecordsPerTable();
+ List threads = new ArrayList<>();
+ for (int i = 0; i < config.getNumberOfThreads(); i++) {
+ Thread thread = new Thread(new InsertTableTask(config, i * tableSize, tableSize, numberOfRecordsPerTable), "Thread-" + i);
+ threads.add(thread);
+ thread.start();
+ }
+ for (Thread thread : threads) {
+ thread.join();
+ }
+ logger.info("<<< Multi Threads insert table finished.");
+ } catch (InterruptedException e) {
+ logger.error(e.getMessage());
+ e.printStackTrace();
+ }
+ }
+
+ private void selectFromTableLimit() {
+ String sql = SqlSpeller.selectFromTableLimitSQL(config.getDbName(), config.getTbPrefix(), 1, 10, 0);
+ executeQuery(sql);
+ }
+
+ private void selectCountFromTable() {
+ String sql = SqlSpeller.selectCountFromTableSQL(config.getDbName(), config.getTbPrefix(), 1);
+ executeQuery(sql);
+ }
+
+ private void selectAvgMinMaxFromTable() {
+ String sql = SqlSpeller.selectAvgMinMaxFromTableSQL("current", config.getDbName(), config.getTbPrefix(), 1);
+ executeQuery(sql);
+ }
+
+ private void selectFromSuperTableLimit() {
+ String sql = SqlSpeller.selectFromSuperTableLimitSQL(config.getDbName(), config.getStbName(), 10, 0);
+ executeQuery(sql);
+ }
+
+ private void selectCountFromSuperTable() {
+ String sql = SqlSpeller.selectCountFromSuperTableSQL(config.getDbName(), config.getStbName());
+ executeQuery(sql);
+ }
+
+ private void selectAvgMinMaxFromSuperTable() {
+ String sql = SqlSpeller.selectAvgMinMaxFromSuperTableSQL("current", config.getDbName(), config.getStbName());
+ executeQuery(sql);
+ }
+
+ private void close() {
+ try {
+ if (connection != null) {
+ this.connection.close();
+ logger.info("connection closed.");
+ }
+ } catch (SQLException e) {
+ logger.error(e.getMessage());
+ e.printStackTrace();
+ }
+ }
+
+ /**
+ * drop super table
+ */
+ private void dropSuperTable() {
+ String sql = SqlSpeller.dropSuperTableSQL(config.getDbName(), config.getStbName());
+ execute(sql);
+ }
+
+ /**
+ * execute sql, use this method when sql is create, alter, drop..
+ */
+ private void execute(String sql) {
+ try (Statement statement = connection.createStatement()) {
+ long start = System.currentTimeMillis();
+ boolean execute = statement.execute(sql);
+ long end = System.currentTimeMillis();
+ printSql(sql, execute, (end - start));
+ } catch (SQLException e) {
+ logger.error(e.getMessage());
+ e.printStackTrace();
+ }
+ }
+
+ private static void printSql(String sql, boolean succeed, long cost) {
+ System.out.println("[ " + (succeed ? "OK" : "ERROR!") + " ] time cost: " + cost + " ms, execute statement ====> " + sql);
+ }
+
+ private void executeQuery(String sql) {
+ try (Statement statement = connection.createStatement()) {
+ long start = System.currentTimeMillis();
+ ResultSet resultSet = statement.executeQuery(sql);
+ long end = System.currentTimeMillis();
+ printSql(sql, true, (end - start));
+ printResult(resultSet);
+ } catch (SQLException e) {
+ logger.error(e.getMessage());
+ e.printStackTrace();
+ }
+ }
+
+ private static void printResult(ResultSet resultSet) throws SQLException {
+ ResultSetMetaData metaData = resultSet.getMetaData();
+ while (resultSet.next()) {
+ StringBuilder sb = new StringBuilder();
+ for (int i = 1; i <= metaData.getColumnCount(); i++) {
+ String columnLabel = metaData.getColumnLabel(i);
+ String value = resultSet.getString(i);
+ sb.append(columnLabel + ": " + value + "\t");
+ }
+ System.out.println(sb.toString());
+ }
+ }
+
+}
diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/domain/JdbcTaosdemoConfig.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/domain/JdbcTaosdemoConfig.java
new file mode 100644
index 0000000000..3cca9a3d7a
--- /dev/null
+++ b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/domain/JdbcTaosdemoConfig.java
@@ -0,0 +1,153 @@
+package com.taosdata.example.jdbcTaosdemo.domain;
+
+public final class JdbcTaosdemoConfig {
+
+ //The host to connect to TDengine. Must insert one
+ private String host;
+ //The TCP/IP port number to use for the connection. Default is 6030.
+ private int port = 6030;
+ //The TDengine user name to use when connecting to the server. Default is 'root'
+ private String user = "root";
+ //The password to use when connecting to the server. Default is 'taosdata'
+ private String password = "taosdata";
+
+ //Destination database. Default is 'test'
+ private String dbName = "test";
+ //keep
+ private int keep = 365 * 20;
+ //days
+ private int days = 30;
+
+ //Super table Name. Default is 'meters'
+ private String stbName = "meters";
+ //Table name prefix. Default is 'd'
+ private String tbPrefix = "d";
+ //The number of tables. Default is 10.
+ private int numberOfTable = 10;
+ //The number of records per table. Default is 2
+ private int numberOfRecordsPerTable = 2;
+ //The number of records per request. Default is 100
+ private int numberOfRecordsPerRequest = 100;
+
+ //The number of threads. Default is 1.
+ private int numberOfThreads = 1;
+ //Delete data. Default is false
+ private boolean deleteTable = false;
+
+ public static void printHelp() {
+ System.out.println("Usage: java -jar JDBCConnectorChecker.jar [OPTION...]");
+ System.out.println("-h host The host to connect to TDengine. you must input one");
+ System.out.println("-p port The TCP/IP port number to use for the connection. Default is 6030");
+ System.out.println("-u user The TDengine user name to use when connecting to the server. Default is 'root'");
+ System.out.println("-P password The password to use when connecting to the server.Default is 'taosdata'");
+ System.out.println("-d database Destination database. Default is 'test'");
+ System.out.println("-m tablePrefix Table prefix name. Default is 'd'");
+ System.out.println("-t num_of_tables The number of tables. Default is 10");
+ System.out.println("-n num_of_records_per_table The number of records per table. Default is 2");
+ System.out.println("-r num_of_records_per_req The number of records per request. Default is 100");
+ System.out.println("-T num_of_threads The number of threads. Default is 1");
+ System.out.println("-D delete table Delete data methods. Default is false");
+ System.out.println("--help Give this help list");
+// System.out.println("--infinite infinite insert mode");
+ }
+
+ /**
+ * parse args from command line
+ *
+ * @param args command line args
+ * @return JdbcTaosdemoConfig
+ */
+ public JdbcTaosdemoConfig(String[] args) {
+ for (int i = 0; i < args.length; i++) {
+ if ("-h".equals(args[i]) && i < args.length - 1) {
+ host = args[++i];
+ }
+ if ("-p".equals(args[i]) && i < args.length - 1) {
+ port = Integer.parseInt(args[++i]);
+ }
+ if ("-u".equals(args[i]) && i < args.length - 1) {
+ user = args[++i];
+ }
+ if ("-P".equals(args[i]) && i < args.length - 1) {
+ password = args[++i];
+ }
+ if ("-d".equals(args[i]) && i < args.length - 1) {
+ dbName = args[++i];
+ }
+ if ("-m".equals(args[i]) && i < args.length - 1) {
+ tbPrefix = args[++i];
+ }
+ if ("-t".equals(args[i]) && i < args.length - 1) {
+ numberOfTable = Integer.parseInt(args[++i]);
+ }
+ if ("-n".equals(args[i]) && i < args.length - 1) {
+ numberOfRecordsPerTable = Integer.parseInt(args[++i]);
+ }
+ if ("-r".equals(args[i]) && i < args.length - 1) {
+ numberOfRecordsPerRequest = Integer.parseInt(args[++i]);
+ }
+ if ("-T".equals(args[i]) && i < args.length - 1) {
+ numberOfThreads = Integer.parseInt(args[++i]);
+ }
+ if ("-D".equals(args[i]) && i < args.length - 1) {
+ deleteTable = Boolean.parseBoolean(args[++i]);
+ }
+ }
+ }
+
+ public String getHost() {
+ return host;
+ }
+
+ public int getPort() {
+ return port;
+ }
+
+ public String getUser() {
+ return user;
+ }
+
+ public String getPassword() {
+ return password;
+ }
+
+ public String getDbName() {
+ return dbName;
+ }
+
+ public int getKeep() {
+ return keep;
+ }
+
+ public int getDays() {
+ return days;
+ }
+
+ public String getStbName() {
+ return stbName;
+ }
+
+ public String getTbPrefix() {
+ return tbPrefix;
+ }
+
+ public int getNumberOfTable() {
+ return numberOfTable;
+ }
+
+ public int getNumberOfRecordsPerTable() {
+ return numberOfRecordsPerTable;
+ }
+
+ public int getNumberOfThreads() {
+ return numberOfThreads;
+ }
+
+ public boolean isDeleteTable() {
+ return deleteTable;
+ }
+
+ public int getNumberOfRecordsPerRequest() {
+ return numberOfRecordsPerRequest;
+ }
+}
diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/task/CreateTableTask.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/task/CreateTableTask.java
new file mode 100644
index 0000000000..1da2c8647e
--- /dev/null
+++ b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/task/CreateTableTask.java
@@ -0,0 +1,42 @@
+package com.taosdata.example.jdbcTaosdemo.task;
+
+import com.taosdata.example.jdbcTaosdemo.domain.JdbcTaosdemoConfig;
+import com.taosdata.example.jdbcTaosdemo.utils.ConnectionFactory;
+import com.taosdata.example.jdbcTaosdemo.utils.SqlSpeller;
+import org.apache.log4j.Logger;
+
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.sql.Statement;
+
+public class CreateTableTask implements Runnable {
+
+ private static Logger logger = Logger.getLogger(CreateTableTask.class);
+ private final JdbcTaosdemoConfig config;
+ private final int startIndex;
+ private final int tableNumber;
+
+ public CreateTableTask(JdbcTaosdemoConfig config, int startIndex, int tableNumber) {
+ this.config = config;
+ this.startIndex = startIndex;
+ this.tableNumber = tableNumber;
+ }
+
+ @Override
+ public void run() {
+ try {
+ Connection connection = ConnectionFactory.build(config);
+ for (int i = startIndex; i < startIndex + tableNumber; i++) {
+ Statement statement = connection.createStatement();
+ String sql = SqlSpeller.createTableSQL(i + 1, config.getDbName(), config.getStbName());
+ statement.execute(sql);
+ statement.close();
+ logger.info(">>> " + sql);
+ }
+ connection.close();
+ } catch (SQLException e) {
+ logger.error(e.getMessage());
+ e.printStackTrace();
+ }
+ }
+}
diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/task/InsertTableDatetimeTask.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/task/InsertTableDatetimeTask.java
new file mode 100644
index 0000000000..4f60c25646
--- /dev/null
+++ b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/task/InsertTableDatetimeTask.java
@@ -0,0 +1,49 @@
+package com.taosdata.example.jdbcTaosdemo.task;
+
+import com.taosdata.example.jdbcTaosdemo.domain.JdbcTaosdemoConfig;
+import com.taosdata.example.jdbcTaosdemo.utils.ConnectionFactory;
+import com.taosdata.example.jdbcTaosdemo.utils.SqlSpeller;
+import org.apache.log4j.Logger;
+
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.sql.Statement;
+
+public class InsertTableDatetimeTask implements Runnable {
+ private static Logger logger = Logger.getLogger(InsertTableDatetimeTask.class);
+
+ private final JdbcTaosdemoConfig config;
+ private final int startTableIndex;
+ private final int tableNumber;
+ private final long startDatetime;
+ private final long finishedDatetime;
+
+ public InsertTableDatetimeTask(JdbcTaosdemoConfig config, int startTableIndex, int tableNumber, long startDatetime, long finishedDatetime) {
+ this.config = config;
+ this.startTableIndex = startTableIndex;
+ this.tableNumber = tableNumber;
+ this.startDatetime = startDatetime;
+ this.finishedDatetime = finishedDatetime;
+ }
+
+ @Override
+ public void run() {
+ try {
+ Connection connection = ConnectionFactory.build(config);
+ int valuesCount = config.getNumberOfRecordsPerRequest();
+ for (long ts = startDatetime; ts < finishedDatetime; ts += valuesCount) {
+ for (int i = startTableIndex; i < startTableIndex + tableNumber; i++) {
+ String sql = SqlSpeller.insertBatchSizeRowsSQL(config.getDbName(), config.getTbPrefix(), i + 1, ts, valuesCount);
+ Statement statement = connection.createStatement();
+ statement.execute(sql);
+ statement.close();
+ logger.info(Thread.currentThread().getName() + ">>> " + sql);
+ }
+ }
+ connection.close();
+ } catch (SQLException e) {
+ logger.error(e.getMessage());
+ e.printStackTrace();
+ }
+ }
+}
diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/task/InsertTableTask.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/task/InsertTableTask.java
new file mode 100644
index 0000000000..d6d6ebbff1
--- /dev/null
+++ b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/task/InsertTableTask.java
@@ -0,0 +1,52 @@
+package com.taosdata.example.jdbcTaosdemo.task;
+
+import com.taosdata.example.jdbcTaosdemo.domain.JdbcTaosdemoConfig;
+import com.taosdata.example.jdbcTaosdemo.utils.ConnectionFactory;
+import com.taosdata.example.jdbcTaosdemo.utils.SqlSpeller;
+import com.taosdata.example.jdbcTaosdemo.utils.TimeStampUtil;
+import org.apache.log4j.Logger;
+
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.concurrent.atomic.AtomicLong;
+
+public class InsertTableTask implements Runnable {
+ private static final Logger logger = Logger.getLogger(InsertTableTask.class);
+ private static AtomicLong beginTimestamp = new AtomicLong(TimeStampUtil.datetimeToLong("2005-01-01 00:00:00.000"));
+
+ private final JdbcTaosdemoConfig config;
+ private final int startIndex;
+ private final int tableNumber;
+ private final int recordsNumber;
+
+ public InsertTableTask(JdbcTaosdemoConfig config, int startIndex, int tableNumber, int recordsNumber) {
+ this.config = config;
+ this.startIndex = startIndex;
+ this.tableNumber = tableNumber;
+ this.recordsNumber = recordsNumber;
+ }
+
+ @Override
+ public void run() {
+ try {
+ Connection connection = ConnectionFactory.build(config);
+ // iterate insert
+ for (int j = 0; j < recordsNumber; j++) {
+ long ts = beginTimestamp.getAndIncrement();
+ // insert data into echo table
+ for (int i = startIndex; i < startIndex + tableNumber; i++) {
+ String sql = SqlSpeller.insertOneRowSQL(config.getDbName(), config.getTbPrefix(), i + 1, ts);
+ Statement statement = connection.createStatement();
+ statement.execute(sql);
+ statement.close();
+ logger.info(Thread.currentThread().getName() + ">>> " + sql);
+ }
+ }
+ connection.close();
+ } catch (SQLException e) {
+ logger.error(e.getMessage());
+ e.printStackTrace();
+ }
+ }
+}
diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/utils/ConnectionFactory.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/utils/ConnectionFactory.java
new file mode 100644
index 0000000000..52691f4de7
--- /dev/null
+++ b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/utils/ConnectionFactory.java
@@ -0,0 +1,32 @@
+package com.taosdata.example.jdbcTaosdemo.utils;
+
+import com.taosdata.example.jdbcTaosdemo.domain.JdbcTaosdemoConfig;
+import com.taosdata.jdbc.TSDBDriver;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.Properties;
+
+public class ConnectionFactory {
+
+ public static Connection build(JdbcTaosdemoConfig config) throws SQLException {
+ return build(config.getHost(), config.getPort(), config.getDbName(), config.getUser(), config.getPassword());
+ }
+
+ public static Connection build(String host, int port, String dbName) throws SQLException {
+ return build(host, port, dbName, "root", "taosdata");
+ }
+
+ private static Connection build(String host, int port, String dbName, String user, String password) throws SQLException {
+ Properties properties = new Properties();
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_USER, user);
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, password);
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
+ return DriverManager.getConnection("jdbc:TAOS://" + host + ":" + port + "/" + dbName + "", properties);
+ }
+
+
+}
diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/utils/SqlSpeller.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/utils/SqlSpeller.java
new file mode 100644
index 0000000000..7af97f3b1b
--- /dev/null
+++ b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/utils/SqlSpeller.java
@@ -0,0 +1,82 @@
+package com.taosdata.example.jdbcTaosdemo.utils;
+
+import java.util.Random;
+
+public class SqlSpeller {
+ private static final Random random = new Random(System.currentTimeMillis());
+ private static final String[] locations = {
+ "Beijing", "Shanghai", "Guangzhou", "Shenzhen",
+ "HangZhou", "Tianjin", "Wuhan", "Changsha", "Nanjing", "Xian"
+ };
+
+ public static String createDatabaseSQL(String dbName, int keep, int days) {
+ return "create database if not exists " + dbName + " keep " + keep + " days " + days;
+ }
+
+ public static String dropDatabaseSQL(String dbName) {
+ return "drop database if exists " + dbName;
+ }
+
+ public static String useDatabaseSQL(String dbName) {
+ return "use " + dbName;
+ }
+
+ public static String createSuperTableSQL(String superTableName) {
+ return "create table if not exists " + superTableName + "(ts timestamp, current float, voltage int, phase float) tags(location binary(64), groupId int)";
+ }
+
+ public static String dropSuperTableSQL(String dbName, String superTableName) {
+ return "drop table if exists " + dbName + "." + superTableName;
+ }
+
+ public static String createTableSQL(int tableIndex, String dbName, String superTableName) {
+ String location = locations[random.nextInt(locations.length)];
+ return "create table d" + tableIndex + " using " + dbName + "." + superTableName + " tags('" + location + "'," + tableIndex + ")";
+ }
+
+ public static String insertOneRowSQL(String dbName, String tbPrefix, int tableIndex, long ts) {
+ float current = 10 + random.nextFloat();
+ int voltage = 200 + random.nextInt(20);
+ float phase = random.nextFloat();
+ String sql = "insert into " + dbName + "." + tbPrefix + "" + tableIndex + " " + "values(" + ts + ", " + current + ", " + voltage + ", " + phase + ")";
+ return sql;
+ }
+
+ public static String insertBatchSizeRowsSQL(String dbName, String tbPrefix, int tbIndex, long ts, int valuesCount) {
+ float current = 10 + random.nextFloat();
+ int voltage = 200 + random.nextInt(20);
+ float phase = random.nextFloat();
+ StringBuilder sb = new StringBuilder();
+ sb.append("insert into " + dbName + "." + tbPrefix + "" + tbIndex + " " + "values");
+ for (int i = 0; i < valuesCount; i++) {
+ sb.append("(" + (ts + i) + ", " + current + ", " + voltage + ", " + phase + ") ");
+ }
+ return sb.toString();
+ }
+
+ public static String selectFromTableLimitSQL(String dbName, String tbPrefix, int tbIndex, int limit, int offset) {
+ return "select * from " + dbName + "." + tbPrefix + "" + tbIndex + " limit " + limit + " offset " + offset;
+ }
+
+ public static String selectCountFromTableSQL(String dbName, String tbPrefix, int tbIndex) {
+ return "select count(*) from " + dbName + "." + tbPrefix + "" + tbIndex;
+ }
+
+ public static String selectAvgMinMaxFromTableSQL(String field, String dbName, String tbPrefix, int tbIndex) {
+ return "select avg(" + field + "),min(" + field + "),max(" + field + ") from " + dbName + "." + tbPrefix + "" + tbIndex;
+ }
+
+ public static String selectFromSuperTableLimitSQL(String dbName, String stbName, int limit, int offset) {
+ return "select * from " + dbName + "." + stbName + " limit " + limit + " offset " + offset;
+ }
+
+ public static String selectCountFromSuperTableSQL(String dbName, String stableName) {
+ return "select count(*) from " + dbName + "." + stableName;
+ }
+
+ public static String selectAvgMinMaxFromSuperTableSQL(String field, String dbName, String stbName) {
+ return "select avg(" + field + "),min(" + field + "),max(" + field + ") from " + dbName + "." + stbName + "";
+ }
+
+
+}
\ No newline at end of file
diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/utils/TimeStampUtil.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/utils/TimeStampUtil.java
new file mode 100644
index 0000000000..d00471f581
--- /dev/null
+++ b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/utils/TimeStampUtil.java
@@ -0,0 +1,35 @@
+package com.taosdata.example.jdbcTaosdemo.utils;
+
+import java.sql.Date;
+import java.text.ParseException;
+import java.text.SimpleDateFormat;
+
+public class TimeStampUtil {
+ private static final String datetimeFormat = "yyyy-MM-dd HH:mm:ss.SSS";
+
+ public static long datetimeToLong(String dateTime) {
+ SimpleDateFormat sdf = new SimpleDateFormat(datetimeFormat);
+ try {
+ return sdf.parse(dateTime).getTime();
+ } catch (ParseException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ public static String longToDatetime(long time) {
+ SimpleDateFormat sdf = new SimpleDateFormat(datetimeFormat);
+ return sdf.format(new Date(time));
+ }
+
+ public static void main(String[] args) {
+ final String startTime = "2005-01-01 00:00:00.000";
+
+ long start = TimeStampUtil.datetimeToLong(startTime);
+ System.out.println(start);
+
+ String datetime = TimeStampUtil.longToDatetime(1519833600000L);
+ System.out.println(datetime);
+ }
+
+
+}
diff --git a/tests/examples/JDBC/JDBCDemo/src/main/resources/log4j.properties b/tests/examples/JDBC/JDBCDemo/src/main/resources/log4j.properties
new file mode 100644
index 0000000000..b445e5f52e
--- /dev/null
+++ b/tests/examples/JDBC/JDBCDemo/src/main/resources/log4j.properties
@@ -0,0 +1,21 @@
+### 设置###
+log4j.rootLogger=debug,stdout,DebugLog,ErrorLog
+### 输出信息到控制抬 ###
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.Target=System.out
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=[%-5p] %d{yyyy-MM-dd HH:mm:ss,SSS} method:%l%n%m%n
+### 输出DEBUG 级别以上的日志到=logs/error.log ###
+log4j.appender.DebugLog=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DebugLog.File=logs/debug.log
+log4j.appender.DebugLog.Append=true
+log4j.appender.DebugLog.Threshold=DEBUG
+log4j.appender.DebugLog.layout=org.apache.log4j.PatternLayout
+log4j.appender.DebugLog.layout.ConversionPattern=%-d{yyyy-MM-dd HH:mm:ss} [ %t:%r ] - [ %p ] %m%n
+### 输出ERROR 级别以上的日志到=logs/error.log ###
+log4j.appender.ErrorLog=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.ErrorLog.File=logs/error.log
+log4j.appender.ErrorLog.Append=true
+log4j.appender.ErrorLog.Threshold=ERROR
+log4j.appender.ErrorLog.layout=org.apache.log4j.PatternLayout
+log4j.appender.ErrorLog.layout.ConversionPattern=%-d{yyyy-MM-dd HH:mm:ss} [ %t:%r ] - [ %p ] %m%n
\ No newline at end of file
diff --git a/tests/examples/JDBC/calciteDemo/pom.xml b/tests/examples/JDBC/calciteDemo/pom.xml
new file mode 100644
index 0000000000..90eea8e2c4
--- /dev/null
+++ b/tests/examples/JDBC/calciteDemo/pom.xml
@@ -0,0 +1,53 @@
+
+
+ 4.0.0
+
+ com.taosdata.example.calcite
+ calciteDemo
+ 1.0-SNAPSHOT
+
+
+
+
+ org.slf4j
+ slf4j-simple
+ 1.7.25
+ compile
+
+
+
+ org.apache.calcite
+ calcite-core
+ 1.23.0
+
+
+ org.apache.commons
+ commons-dbcp2
+ 2.7.0
+
+
+ org.apache.calcite.avatica
+ avatica-core
+ 1.17.0
+
+
+
+
+ mysql
+ mysql-connector-java
+ 5.1.47
+
+
+
+
+ com.taosdata.jdbc
+ taos-jdbcdriver
+ 2.0.7
+
+
+
+
+
+
\ No newline at end of file
diff --git a/tests/examples/JDBC/calciteDemo/src/main/java/com/taosdata/example/calcite/CalciteDemo.java b/tests/examples/JDBC/calciteDemo/src/main/java/com/taosdata/example/calcite/CalciteDemo.java
new file mode 100644
index 0000000000..7e97956b78
--- /dev/null
+++ b/tests/examples/JDBC/calciteDemo/src/main/java/com/taosdata/example/calcite/CalciteDemo.java
@@ -0,0 +1,67 @@
+package com.taosdata.example.calcite;
+
+import org.apache.calcite.adapter.jdbc.JdbcSchema;
+import org.apache.calcite.jdbc.CalciteConnection;
+import org.apache.calcite.schema.Schema;
+import org.apache.calcite.schema.SchemaPlus;
+import org.apache.calcite.sql.parser.SqlParseException;
+import org.apache.commons.dbcp2.BasicDataSource;
+
+import java.sql.*;
+import java.util.Properties;
+
+public class CalciteDemo {
+
+ private static String url_taos = "jdbc:TAOS://192.168.236.135:6030/test";
+ private static String url_mysql = "jdbc:mysql://master:3306/test?useSSL=false&useUnicode=true&characterEncoding=UTF-8";
+
+ public static void main(String[] args) throws SqlParseException, ClassNotFoundException, SQLException {
+ Class.forName("org.apache.calcite.jdbc.Driver");
+ Properties info = new Properties();
+ info.setProperty("caseSensitive", "false");
+
+ Connection connection = DriverManager.getConnection("jdbc:calcite:", info);
+ CalciteConnection calciteConnection = connection.unwrap(CalciteConnection.class);
+
+ SchemaPlus rootSchema = calciteConnection.getRootSchema();
+
+ //这里hdb是在tdengine中创建的数据库名
+ Schema schema = mysqlTest(rootSchema);
+// Schema schema = tdengineTest(rootSchema);
+
+ //创建新的schema自动映射到原来的hdb数据库
+ rootSchema.add("test", schema);
+
+ Statement stmt = calciteConnection.createStatement();
+ //查询schema test中的表,表名是tdengine中的表
+ ResultSet rs = stmt.executeQuery("select * from test.t");
+ ResultSetMetaData metaData = rs.getMetaData();
+ while (rs.next()) {
+ for (int i = 1; i <= metaData.getColumnCount(); i++) {
+ System.out.println(metaData.getColumnLabel(i) + " : " + rs.getString(i));
+ }
+ }
+ }
+
+
+ private static Schema tdengineTest(SchemaPlus rootSchema) throws ClassNotFoundException {
+ Class.forName("com.taosdata.jdbc.TSDBDriver");
+ BasicDataSource dataSource = new BasicDataSource();
+ dataSource.setUrl(url_taos);
+ dataSource.setUsername("root");
+ dataSource.setPassword("taosdata");
+
+ return JdbcSchema.create(rootSchema, "test", dataSource, "hdb", null);
+ }
+
+ private static Schema mysqlTest(SchemaPlus rootSchema) throws ClassNotFoundException {
+ Class.forName("com.mysql.jdbc.Driver");
+ BasicDataSource dataSource = new BasicDataSource();
+ dataSource.setUrl(url_mysql);
+ dataSource.setUsername("root");
+ dataSource.setPassword("123456");
+
+ //Schema schema = JdbcSchema.create(rootSchema, "test", dataSource, "hdb", null);
+ return JdbcSchema.create(rootSchema, "test", dataSource, "test", null);
+ }
+}
diff --git a/tests/examples/JDBC/calciteDemo/src/main/resources/log4j.properties b/tests/examples/JDBC/calciteDemo/src/main/resources/log4j.properties
new file mode 100644
index 0000000000..1a77ec520c
--- /dev/null
+++ b/tests/examples/JDBC/calciteDemo/src/main/resources/log4j.properties
@@ -0,0 +1,6 @@
+log4j.rootLogger=info,stdout
+
+#console
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern= [%d{yyyy-MM-dd HH:mm:ss a}]:%p %l%m%n
\ No newline at end of file
diff --git a/tests/examples/c/demo.c b/tests/examples/c/demo.c
index f0e970c332..d64c0de1ce 100644
--- a/tests/examples/c/demo.c
+++ b/tests/examples/c/demo.c
@@ -19,6 +19,7 @@
#include
#include
#include
+#include
#include // TAOS header file
int main(int argc, char *argv[]) {
@@ -67,7 +68,7 @@ int main(int argc, char *argv[]) {
// insert 10 records
int i = 0;
for (i = 0; i < 10; ++i) {
- sprintf(qstr, "insert into m1 values (%ld, %d, %d, %d, %d, %f, %lf, '%s')", 1546300800000 + i * 1000, i, i, i, i*10000000, i*1.0, i*2.0, "hello");
+ sprintf(qstr, "insert into m1 values (%" PRId64 ", %d, %d, %d, %d, %f, %lf, '%s')", 1546300800000 + i * 1000, i, i, i, i*10000000, i*1.0, i*2.0, "hello");
printf("qstr: %s\n", qstr);
if (taos_query(taos, qstr)) {
printf("insert row: %i, reason:%s\n", i, taos_errstr(taos));
diff --git a/tests/examples/go/src/taosapp/taosapp.go b/tests/examples/go/src/taosapp/taosapp.go
deleted file mode 100644
index 30126ea571..0000000000
--- a/tests/examples/go/src/taosapp/taosapp.go
+++ /dev/null
@@ -1,302 +0,0 @@
-/*
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-package main
-
-import (
- "database/sql"
- "time"
- "log"
- "fmt"
- _ "github.com/taosdata/driver-go/taosSql"
-)
-
-func main() {
- taosDriverName := "taosSql"
- demodb := "demodb"
- demot := "demot"
-
- fmt.Printf("\n======== start demo test ========\n")
- // open connect to taos server
- db, err := sql.Open(taosDriverName, "root:taosdata@/tcp(127.0.0.1:0)/")
- if err != nil {
- log.Fatalf("Open database error: %s\n", err)
- }
- defer db.Close()
-
- drop_database(db, demodb)
- create_database(db, demodb)
- use_database(db, demodb)
- create_table(db, demot)
- insert_data(db, demot)
- select_data(db, demot)
-
- fmt.Printf("\n======== start stmt mode test ========\n")
-
- demodbStmt := "demodbStmt"
- demotStmt := "demotStmt"
- drop_database_stmt(db, demodbStmt)
- create_database_stmt(db, demodbStmt)
- use_database_stmt(db, demodbStmt)
- create_table_stmt(db, demotStmt)
- insert_data_stmt(db, demotStmt)
- select_data_stmt(db, demotStmt)
-
- fmt.Printf("\n======== end demo test ========\n")
-}
-
-func drop_database(db *sql.DB, demodb string) {
- st := time.Now().Nanosecond()
- res, err := db.Exec("drop database if exists " + demodb)
- checkErr(err, "drop database if exists " + demodb)
-
- affectd, err := res.RowsAffected()
- checkErr(err, "drop db, res.RowsAffected")
-
- et := time.Now().Nanosecond()
-
- fmt.Printf("drop database result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9)
-}
-
-func create_database(db *sql.DB, demodb string) {
- st := time.Now().Nanosecond()
- // create database
- res, err := db.Exec("create database " + demodb)
- checkErr(err, "create db, db.Exec")
-
- affectd, err := res.RowsAffected()
- checkErr(err, "create db, res.RowsAffected")
-
- et := time.Now().Nanosecond()
-
- fmt.Printf("create database result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9)
-
- return
-}
-
-func use_database(db *sql.DB, demodb string) {
- st := time.Now().Nanosecond()
- // use database
- res, err := db.Exec("use " + demodb) // notes: must no quote to db name
- checkErr(err, "use db db.Exec")
-
- affectd, err := res.RowsAffected()
- checkErr(err, "use db, res.RowsAffected")
-
- et := time.Now().Nanosecond()
-
- fmt.Printf("use database result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9)
-}
-
-func create_table(db *sql.DB, demot string) {
- st := time.Now().Nanosecond()
- // create table
- res, err := db.Exec("create table " + demot + " (ts timestamp, id int, name binary(8), len tinyint, flag bool, notes binary(8), fv float, dv double)")
- checkErr(err, "create table db.Exec")
-
- affectd, err := res.RowsAffected()
- checkErr(err, "create table res.RowsAffected")
-
- et := time.Now().Nanosecond()
- fmt.Printf("create table result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9)
-}
-
-func insert_data(db *sql.DB, demot string) {
- st := time.Now().Nanosecond()
- // insert data
- res, err := db.Exec("insert into " + demot +
- " values (now, 100, 'beijing', 10, true, 'one', 123.456, 123.456)" +
- " (now+1s, 101, 'shanghai', 11, true, 'two', 789.123, 789.123)" +
- " (now+2s, 102, 'shenzhen', 12, false, 'three', 456.789, 456.789)")
-
- checkErr(err, "insert data, db.Exec")
-
- affectd, err := res.RowsAffected()
- checkErr(err, "insert data res.RowsAffected")
-
- et := time.Now().Nanosecond()
- fmt.Printf("insert data result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9)
-}
-
-func select_data(db *sql.DB, demot string) {
- st := time.Now().Nanosecond()
-
- rows, err := db.Query("select * from ? " , demot) // go text mode
- checkErr(err, "select db.Query")
-
- fmt.Printf("%10s%s%8s %5s %9s%s %s %8s%s %7s%s %8s%s %4s%s %5s%s\n", " ","ts", " ", "id"," ", "name"," ","len", " ","flag"," ", "notes", " ", "fv", " ", " ", "dv")
- var affectd int
- for rows.Next() {
- var ts string
- var name string
- var id int
- var len int8
- var flag bool
- var notes string
- var fv float32
- var dv float64
-
- err = rows.Scan(&ts, &id, &name, &len, &flag, ¬es, &fv, &dv)
- checkErr(err, "select rows.Scan")
-
- fmt.Printf("%s\t", ts)
- fmt.Printf("%d\t",id)
- fmt.Printf("%10s\t",name)
- fmt.Printf("%d\t",len)
- fmt.Printf("%t\t",flag)
- fmt.Printf("%s\t",notes)
- fmt.Printf("%06.3f\t",fv)
- fmt.Printf("%09.6f\n",dv)
-
- affectd++
- }
-
- et := time.Now().Nanosecond()
- fmt.Printf("insert data result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9)
- fmt.Printf("insert data result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9)
-}
-
-func drop_database_stmt(db *sql.DB,demodb string) {
- st := time.Now().Nanosecond()
- // drop test db
- res, err := db.Exec("drop database if exists " + demodb)
- checkErr(err, "drop database " + demodb)
-
- affectd, err := res.RowsAffected()
- checkErr(err, "drop db, res.RowsAffected")
-
- et := time.Now().Nanosecond()
- fmt.Printf("drop database result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9)
-}
-
-func create_database_stmt(db *sql.DB,demodb string) {
- st := time.Now().Nanosecond()
- // create database
- //var stmt interface{}
- stmt, err := db.Prepare("create database ?")
- checkErr(err, "create db, db.Prepare")
-
- //var res driver.Result
- res, err := stmt.Exec(demodb)
- checkErr(err, "create db, stmt.Exec")
-
- //fmt.Printf("Query OK, %d row(s) affected()", res.RowsAffected())
- affectd, err := res.RowsAffected()
- checkErr(err, "create db, res.RowsAffected")
-
- et := time.Now().Nanosecond()
- fmt.Printf("create database result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9)
-}
-
-func use_database_stmt (db *sql.DB,demodb string) {
- st := time.Now().Nanosecond()
- // create database
- //var stmt interface{}
- stmt, err := db.Prepare("use " + demodb)
- checkErr(err, "use db, db.Prepare")
-
- res, err := stmt.Exec()
- checkErr(err, "use db, stmt.Exec")
-
- affectd, err := res.RowsAffected()
- checkErr(err, "use db, res.RowsAffected")
-
- et := time.Now().Nanosecond()
- fmt.Printf("use database result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9)
-}
-
-func create_table_stmt (db *sql.DB,demot string) {
- st := time.Now().Nanosecond()
- // create table
- // (ts timestamp, id int, name binary(8), len tinyint, flag bool, notes binary(8), fv float, dv double)
- stmt, err := db.Prepare("create table ? (? timestamp, ? int, ? binary(10), ? tinyint, ? bool, ? binary(8), ? float, ? double)")
- checkErr(err, "create table db.Prepare")
-
- res, err := stmt.Exec(demot, "ts", "id", "name", "len", "flag", "notes", "fv", "dv")
- checkErr(err, "create table stmt.Exec")
-
- affectd, err := res.RowsAffected()
- checkErr(err, "create table res.RowsAffected")
-
- et := time.Now().Nanosecond()
- fmt.Printf("create table result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9)
-}
-
-func insert_data_stmt(db *sql.DB,demot string) {
- st := time.Now().Nanosecond()
- // insert data into table
- stmt, err := db.Prepare("insert into ? values(?, ?, ?, ?, ?, ?, ?, ?) (?, ?, ?, ?, ?, ?, ?, ?) (?, ?, ?, ?, ?, ?, ?, ?)")
- checkErr(err, "insert db.Prepare")
-
- res, err := stmt.Exec(demot, "now" , 1000, "'haidian'" , 6, true, "'AI world'", 6987.654, 321.987,
- "now+1s", 1001, "'changyang'" , 7, false, "'DeepMode'", 12356.456, 128634.456,
- "now+2s", 1002, "'chuangping'" , 8, true, "'database'", 3879.456, 65433478.456,)
- checkErr(err, "insert data, stmt.Exec")
-
- affectd, err := res.RowsAffected()
- checkErr(err, "res.RowsAffected")
-
- et := time.Now().Nanosecond()
- fmt.Printf("insert data result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9)
-}
-
-func select_data_stmt(db *sql.DB, demot string) {
- st := time.Now().Nanosecond()
-
- stmt, err := db.Prepare("select ?, ?, ?, ?, ?, ?, ?, ? from ?" ) // go binary mode
- checkErr(err, "db.Prepare")
-
- rows, err := stmt.Query("ts", "id","name","len", "flag","notes", "fv", "dv", demot)
- checkErr(err, "stmt.Query")
-
- fmt.Printf("%10s%s%8s %5s %8s%s %s %10s%s %7s%s %8s%s %11s%s %14s%s\n", " ","ts", " ", "id"," ", "name"," ","len", " ","flag"," ", "notes", " ", "fv", " ", " ", "dv")
- var affectd int
- for rows.Next() {
- var ts string
- var name string
- var id int
- var len int8
- var flag bool
- var notes string
- var fv float32
- var dv float64
-
- err = rows.Scan(&ts, &id, &name, &len, &flag, ¬es, &fv, &dv)
- //fmt.Println("start scan fields from row.rs, &fv:", &fv)
- //err = rows.Scan(&fv)
- checkErr(err, "rows.Scan")
-
- fmt.Printf("%s\t", ts)
- fmt.Printf("%d\t",id)
- fmt.Printf("%10s\t",name)
- fmt.Printf("%d\t",len)
- fmt.Printf("%t\t",flag)
- fmt.Printf("%s\t",notes)
- fmt.Printf("%06.3f\t",fv)
- fmt.Printf("%09.6f\n",dv)
-
- affectd++
-
- }
-
- et := time.Now().Nanosecond()
- fmt.Printf("insert data result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9)
-}
-
-func checkErr(err error, prompt string) {
- if err != nil {
- fmt.Printf("%s\n", prompt)
- panic(err)
- }
-}
diff --git a/tests/examples/go/taosdemo.go b/tests/examples/go/taosdemo.go
new file mode 100644
index 0000000000..b42e1e6d70
--- /dev/null
+++ b/tests/examples/go/taosdemo.go
@@ -0,0 +1,409 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+package main
+
+import (
+ "database/sql"
+ "fmt"
+ _ "github.com/taosdata/driver-go/taosSql"
+ "os"
+ "sync"
+ "runtime"
+ "strconv"
+ "time"
+ "flag"
+ "math/rand"
+ //"golang.org/x/sys/unix"
+)
+
+const (
+ maxLocationSize = 32
+ maxSqlBufSize = 65480
+)
+
+var locations = [maxLocationSize]string {
+ "Beijing", "Shanghai", "Guangzhou", "Shenzhen",
+ "HangZhou", "Tianjin", "Wuhan", "Changsha",
+ "Nanjing", "Xian"}
+
+type config struct {
+ hostName string
+ serverPort int
+ user string
+ password string
+ dbName string
+ supTblName string
+ tablePrefix string
+ numOftables int
+ numOfRecordsPerTable int
+ numOfRecordsPerReq int
+ numOfThreads int
+ startTimestamp string
+ startTs int64
+
+ keep int
+ days int
+}
+
+var configPara config
+var taosDriverName = "taosSql"
+var url string
+
+func init() {
+ flag.StringVar(&configPara.hostName, "h", "127.0.0.1","The host to connect to TDengine server.")
+ flag.IntVar(&configPara.serverPort, "p", 6030, "The TCP/IP port number to use for the connection to TDengine server.")
+ flag.StringVar(&configPara.user, "u", "root", "The TDengine user name to use when connecting to the server.")
+ flag.StringVar(&configPara.password, "P", "taosdata", "The password to use when connecting to the server.")
+ flag.StringVar(&configPara.dbName, "d", "test", "Destination database.")
+ flag.StringVar(&configPara.tablePrefix, "m", "d", "Table prefix name.")
+ flag.IntVar(&configPara.numOftables, "t", 2, "The number of tables.")
+ flag.IntVar(&configPara.numOfRecordsPerTable, "n", 10, "The number of records per table.")
+ flag.IntVar(&configPara.numOfRecordsPerReq, "r", 3, "The number of records per request.")
+ flag.IntVar(&configPara.numOfThreads, "T", 1, "The number of threads.")
+ flag.StringVar(&configPara.startTimestamp, "s", "2020-10-01 08:00:00", "The start timestamp for one table.")
+ flag.Parse()
+
+ configPara.keep = 365 * 20
+ configPara.days = 30
+ configPara.supTblName = "meters"
+
+ startTs, err := time.ParseInLocation("2006-01-02 15:04:05", configPara.startTimestamp, time.Local)
+ if err==nil {
+ configPara.startTs = startTs.UnixNano() / 1e6
+ }
+}
+
+func printAllArgs() {
+ fmt.Printf("\n============= args parse result: =============\n")
+ fmt.Printf("dbName: %v\n", configPara.hostName)
+ fmt.Printf("serverPort: %v\n", configPara.serverPort)
+ fmt.Printf("usr: %v\n", configPara.user)
+ fmt.Printf("password: %v\n", configPara.password)
+ fmt.Printf("dbName: %v\n", configPara.dbName)
+ fmt.Printf("tablePrefix: %v\n", configPara.tablePrefix)
+ fmt.Printf("numOftables: %v\n", configPara.numOftables)
+ fmt.Printf("numOfRecordsPerTable: %v\n", configPara.numOfRecordsPerTable)
+ fmt.Printf("numOfRecordsPerReq: %v\n", configPara.numOfRecordsPerReq)
+ fmt.Printf("numOfThreads: %v\n", configPara.numOfThreads)
+ fmt.Printf("startTimestamp: %v[%v]\n", configPara.startTimestamp, configPara.startTs)
+ fmt.Printf("================================================\n")
+}
+
+func main() {
+ printAllArgs()
+ fmt.Printf("Please press enter key to continue....\n")
+ fmt.Scanln()
+
+ url = "root:taosdata@/tcp(" + configPara.hostName + ":" + strconv.Itoa(configPara.serverPort) + ")/"
+ //url = fmt.Sprintf("%s:%s@/tcp(%s:%d)/%s?interpolateParams=true", configPara.user, configPara.password, configPara.hostName, configPara.serverPort, configPara.dbName)
+ // open connect to taos server
+ //db, err := sql.Open(taosDriverName, url)
+ //if err != nil {
+ // fmt.Println("Open database error: %s\n", err)
+ // os.Exit(1)
+ //}
+ //defer db.Close()
+
+ createDatabase(configPara.dbName, configPara.supTblName)
+ fmt.Printf("======== create database success! ========\n\n")
+
+ //create_table(db, stblName)
+ multiThreadCreateTable(configPara.numOfThreads, configPara.numOftables, configPara.dbName, configPara.tablePrefix)
+ fmt.Printf("======== create super table and child tables success! ========\n\n")
+
+ //insert_data(db, demot)
+ multiThreadInsertData(configPara.numOfThreads, configPara.numOftables, configPara.dbName, configPara.tablePrefix)
+ fmt.Printf("======== insert data into child tables success! ========\n\n")
+
+ //select_data(db, demot)
+ selectTest(configPara.dbName, configPara.tablePrefix, configPara.supTblName)
+ fmt.Printf("======== select data success! ========\n\n")
+
+ fmt.Printf("======== end demo ========\n")
+}
+
+func createDatabase(dbName string, supTblName string) {
+ db, err := sql.Open(taosDriverName, url)
+ if err != nil {
+ fmt.Println("Open database error: %s\n", err)
+ os.Exit(1)
+ }
+ defer db.Close()
+
+ // drop database if exists
+ sqlStr := "drop database if exists " + dbName
+ _, err = db.Exec(sqlStr)
+ checkErr(err, sqlStr)
+
+ time.Sleep(time.Second)
+
+ // create database
+ sqlStr = "create database " + dbName + " keep " + strconv.Itoa(configPara.keep) + " days " + strconv.Itoa(configPara.days)
+ _, err = db.Exec(sqlStr)
+ checkErr(err, sqlStr)
+
+ // use database
+ //sqlStr = "use " + dbName
+ //_, err = db.Exec(sqlStr)
+ //checkErr(err, sqlStr)
+
+ sqlStr = "create table if not exists " + dbName + "." + supTblName + " (ts timestamp, current float, voltage int, phase float) tags(location binary(64), groupId int);"
+ _, err = db.Exec(sqlStr)
+ checkErr(err, sqlStr)
+}
+
+func multiThreadCreateTable(threads int, ntables int, dbName string, tablePrefix string) {
+ st := time.Now().UnixNano()
+
+ if (threads < 1) {
+ threads = 1;
+ }
+
+ a := ntables / threads;
+ if (a < 1) {
+ threads = ntables;
+ a = 1;
+ }
+
+ b := ntables % threads;
+
+ last := 0;
+ endTblId := 0
+ wg := sync.WaitGroup{}
+ for i := 0; i < threads; i++ {
+ startTblId := last
+ if (i < b ) {
+ endTblId = last + a
+ } else {
+ endTblId = last + a - 1
+ }
+ last = endTblId + 1
+ wg.Add(1)
+ go createTable(dbName, tablePrefix, startTblId, endTblId, &wg)
+ }
+ wg.Wait()
+
+ et := time.Now().UnixNano()
+ fmt.Printf("create tables spent duration: %6.6fs\n", (float32(et-st))/1e9)
+}
+
+func createTable(dbName string, childTblPrefix string, startTblId int, endTblId int, wg *sync.WaitGroup) {
+ //fmt.Printf("subThread[%d]: create table from %d to %d \n", unix.Gettid(), startTblId, endTblId)
+ // windows.GetCurrentThreadId()
+
+ db, err := sql.Open(taosDriverName, url)
+ if err != nil {
+ fmt.Println("Open database error: %s\n", err)
+ os.Exit(1)
+ }
+ defer db.Close()
+
+ for i := startTblId; i <= endTblId; i++ {
+ sqlStr := "create table if not exists " + dbName + "." + childTblPrefix + strconv.Itoa(i) + " using " + dbName + ".meters tags('" + locations[i%maxLocationSize] + "', " + strconv.Itoa(i) + ");"
+ //fmt.Printf("sqlStr: %v\n", sqlStr)
+ _, err = db.Exec(sqlStr)
+ checkErr(err, sqlStr)
+ }
+ wg.Done()
+ runtime.Goexit()
+}
+
+func generateRowData(ts int64) string {
+ voltage := rand.Int() % 1000
+ current := 200 + rand.Float32()
+ phase := rand.Float32()
+ values := "( " + strconv.FormatInt(ts, 10) + ", " + strconv.FormatFloat(float64(current), 'f', 6, 64) + ", " + strconv.Itoa(voltage) + ", " + strconv.FormatFloat(float64(phase), 'f', 6, 64) + " ) "
+ return values
+}
+func insertData(dbName string, childTblPrefix string, startTblId int, endTblId int, wg *sync.WaitGroup) {
+ //fmt.Printf("subThread[%d]: insert data to table from %d to %d \n", unix.Gettid(), startTblId, endTblId)
+ // windows.GetCurrentThreadId()
+
+ db, err := sql.Open(taosDriverName, url)
+ if err != nil {
+ fmt.Println("Open database error: %s\n", err)
+ os.Exit(1)
+ }
+ defer db.Close()
+
+ tmpTs := configPara.startTs;
+ //rand.New(rand.NewSource(time.Now().UnixNano()))
+ for tID := startTblId; tID <= endTblId; tID++{
+ totalNum := 0
+ for {
+ sqlStr := "insert into " + dbName + "." + childTblPrefix + strconv.Itoa(tID) + " values "
+ currRowNum := 0
+ for {
+ tmpTs += 1000
+ valuesOfRow := generateRowData(tmpTs)
+ currRowNum += 1
+ totalNum += 1
+
+ sqlStr = fmt.Sprintf("%s %s", sqlStr, valuesOfRow)
+
+ if (currRowNum >= configPara.numOfRecordsPerReq || totalNum >= configPara.numOfRecordsPerTable) {
+ break
+ }
+ }
+
+ res, err := db.Exec(sqlStr)
+ checkErr(err, sqlStr)
+
+ count, err := res.RowsAffected()
+ checkErr(err, "rows affected")
+
+ if (count != int64(currRowNum)) {
+ fmt.Printf("insert data, expect affected:%d, actual:%d\n", currRowNum, count)
+ os.Exit(1)
+ }
+
+ if (totalNum >= configPara.numOfRecordsPerTable) {
+ break
+ }
+ }
+ }
+
+ wg.Done()
+ runtime.Goexit()
+}
+func multiThreadInsertData(threads int, ntables int, dbName string, tablePrefix string) {
+ st := time.Now().UnixNano()
+
+ if (threads < 1) {
+ threads = 1;
+ }
+
+ a := ntables / threads;
+ if (a < 1) {
+ threads = ntables;
+ a = 1;
+ }
+
+ b := ntables % threads;
+
+ last := 0;
+ endTblId := 0
+ wg := sync.WaitGroup{}
+ for i := 0; i < threads; i++ {
+ startTblId := last
+ if (i < b ) {
+ endTblId = last + a
+ } else {
+ endTblId = last + a - 1
+ }
+ last = endTblId + 1
+ wg.Add(1)
+ go insertData(dbName, tablePrefix, startTblId , endTblId, &wg)
+ }
+ wg.Wait()
+
+ et := time.Now().UnixNano()
+ fmt.Printf("insert data spent duration: %6.6fs\n", (float32(et-st))/1e9)
+}
+func selectTest(dbName string, tbPrefix string, supTblName string){
+ db, err := sql.Open(taosDriverName, url)
+ if err != nil {
+ fmt.Println("Open database error: %s\n", err)
+ os.Exit(1)
+ }
+ defer db.Close()
+
+ // select sql 1
+ limit := 3
+ offset := 0
+ sqlStr := "select * from " + dbName + "." + supTblName + " limit " + strconv.Itoa(limit) + " offset " + strconv.Itoa(offset)
+ rows, err := db.Query(sqlStr)
+ checkErr(err, sqlStr)
+
+ defer rows.Close()
+ fmt.Printf("query sql: %s\n", sqlStr)
+ for rows.Next() {
+ var (
+ ts string
+ current float32
+ voltage int
+ phase float32
+ location string
+ groupid int
+ )
+ err := rows.Scan(&ts, ¤t, &voltage, &phase, &location, &groupid)
+ if err != nil {
+ checkErr(err, "rows scan fail")
+ }
+
+ fmt.Printf("ts:%s\t current:%f\t voltage:%d\t phase:%f\t location:%s\t groupid:%d\n", ts, current, voltage, phase, location, groupid)
+ }
+ // check iteration error
+ if rows.Err() != nil {
+ checkErr(err, "rows next iteration error")
+ }
+
+ // select sql 2
+ sqlStr = "select avg(voltage), min(voltage), max(voltage) from " + dbName + "." + tbPrefix + strconv.Itoa( rand.Int() % configPara.numOftables)
+ rows, err = db.Query(sqlStr)
+ checkErr(err, sqlStr)
+
+ defer rows.Close()
+ fmt.Printf("\nquery sql: %s\n", sqlStr)
+ for rows.Next() {
+ var (
+ voltageAvg float32
+ voltageMin int
+ voltageMax int
+ )
+ err := rows.Scan(&voltageAvg, &voltageMin, &voltageMax)
+ if err != nil {
+ checkErr(err, "rows scan fail")
+ }
+
+ fmt.Printf("avg(voltage):%f\t min(voltage):%d\t max(voltage):%d\n", voltageAvg, voltageMin, voltageMax)
+ }
+ // check iteration error
+ if rows.Err() != nil {
+ checkErr(err, "rows next iteration error")
+ }
+
+ // select sql 3
+ sqlStr = "select last(*) from " + dbName + "." + supTblName
+ rows, err = db.Query(sqlStr)
+ checkErr(err, sqlStr)
+
+ defer rows.Close()
+ fmt.Printf("\nquery sql: %s\n", sqlStr)
+ for rows.Next() {
+ var (
+ lastTs string
+ lastCurrent float32
+ lastVoltage int
+ lastPhase float32
+ )
+ err := rows.Scan(&lastTs, &lastCurrent, &lastVoltage, &lastPhase)
+ if err != nil {
+ checkErr(err, "rows scan fail")
+ }
+
+ fmt.Printf("last(ts):%s\t last(current):%f\t last(voltage):%d\t last(phase):%f\n", lastTs, lastCurrent, lastVoltage, lastPhase)
+ }
+ // check iteration error
+ if rows.Err() != nil {
+ checkErr(err, "rows next iteration error")
+ }
+}
+func checkErr(err error, prompt string) {
+ if err != nil {
+ fmt.Printf("%s\n", prompt)
+ panic(err)
+ }
+}
diff --git a/tests/examples/python/read_example.py b/tests/examples/python/read_example.py
index dd1475ec14..73052ab2df 100644
--- a/tests/examples/python/read_example.py
+++ b/tests/examples/python/read_example.py
@@ -22,8 +22,12 @@ if __name__ == '__main__':
# @password : Password
# @database : Database to use when connecting to TDengine server
# @config : Configuration directory
- conn = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos")
-
+ if len(sys.argv)>1:
+ hostname=sys.argv[1]
+ conn = taos.connect(host=hostname, user="root", password="taosdata", config="/etc/taos")
+ else:
+ conn = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos")
+
# Generate a cursor object to run SQL commands
c1 = conn.cursor()
# Create a database named db
diff --git a/tests/gotest/batchtest.sh b/tests/gotest/batchtest.sh
new file mode 100644
index 0000000000..a027dd0d7c
--- /dev/null
+++ b/tests/gotest/batchtest.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+
+bash ./case001/case001.sh
+#bash ./case002/case002.sh
+#bash ./case003/case003.sh
diff --git a/tests/gotest/case001/case001.go b/tests/gotest/case001/case001.go
new file mode 100644
index 0000000000..1d5ede6d21
--- /dev/null
+++ b/tests/gotest/case001/case001.go
@@ -0,0 +1,308 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+package main
+
+import (
+ "database/sql"
+ "fmt"
+ _ "github.com/taosdata/driver-go/taosSql"
+ "log"
+ "time"
+)
+
+func main() {
+ taosDriverName := "taosSql"
+ demodb := "demodb"
+ demot := "demot"
+
+ fmt.Printf("\n======== start demo test ========\n")
+ // open connect to taos server
+ db, err := sql.Open(taosDriverName, "root:taosdata@/tcp(192.168.1.217:7100)/")
+ if err != nil {
+ log.Fatalf("Open database error: %s\n", err)
+ }
+ defer db.Close()
+
+ drop_database(db, demodb)
+ create_database(db, demodb)
+ use_database(db, demodb)
+ create_table(db, demot)
+ insert_data(db, demot)
+ select_data(db, demot)
+
+ fmt.Printf("\n======== start stmt mode test ========\n")
+
+ demodbStmt := "demodbStmt"
+ demotStmt := "demotStmt"
+ drop_database_stmt(db, demodbStmt)
+ create_database_stmt(db, demodbStmt)
+ use_database_stmt(db, demodbStmt)
+ create_table_stmt(db, demotStmt)
+ insert_data_stmt(db, demotStmt)
+ select_data_stmt(db, demotStmt)
+
+ fmt.Printf("\n======== end demo test ========\n")
+}
+
+func drop_database(db *sql.DB, demodb string) {
+ st := time.Now().Nanosecond()
+ res, err := db.Exec("drop database if exists " + demodb)
+ checkErr(err, "drop database if exists "+demodb)
+
+ affectd, err := res.RowsAffected()
+ checkErr(err, "drop db, res.RowsAffected")
+
+ et := time.Now().Nanosecond()
+
+ fmt.Printf("drop database result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1e9)
+
+ //sleep 50毫秒
+ time.Sleep(time.Duration(50)* time.Millisecond)
+}
+
+func create_database(db *sql.DB, demodb string) {
+ st := time.Now().Nanosecond()
+ // create database
+ res, err := db.Exec("create database " + demodb)
+ checkErr(err, "create db, db.Exec")
+
+ affectd, err := res.RowsAffected()
+ checkErr(err, "create db, res.RowsAffected")
+
+ et := time.Now().Nanosecond()
+
+ fmt.Printf("create database result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1e9)
+
+ return
+}
+
+func use_database(db *sql.DB, demodb string) {
+ st := time.Now().Nanosecond()
+ // use database
+ res, err := db.Exec("use " + demodb) // notes: must no quote to db name
+ checkErr(err, "use db db.Exec")
+
+ affectd, err := res.RowsAffected()
+ checkErr(err, "use db, res.RowsAffected")
+
+ et := time.Now().Nanosecond()
+
+ fmt.Printf("use database result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1e9)
+}
+
+func create_table(db *sql.DB, demot string) {
+ st := time.Now().Nanosecond()
+ // create table
+ res, err := db.Exec("create table " + demot + " (ts timestamp, id int, name binary(8), len tinyint, flag bool, notes binary(8), fv float, dv double)")
+ checkErr(err, "create table db.Exec")
+
+ affectd, err := res.RowsAffected()
+ checkErr(err, "create table res.RowsAffected")
+
+ et := time.Now().Nanosecond()
+ fmt.Printf("create table result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1e9)
+}
+
+func insert_data(db *sql.DB, demot string) {
+ st := time.Now().Nanosecond()
+ // insert data
+ res, err := db.Exec("insert into " + demot +
+ " values (now, 100, 'beijing', 10, true, 'one', 123.456, 123.456)" +
+ " (now+1s, 101, 'shanghai', 11, true, 'two', 789.123, 789.123)" +
+ " (now+2s, 102, 'shenzhen', 12, false, 'three', 456.789, 456.789)")
+
+ checkErr(err, "insert data, db.Exec")
+
+ affectd, err := res.RowsAffected()
+ checkErr(err, "insert data res.RowsAffected")
+
+ et := time.Now().Nanosecond()
+ fmt.Printf("insert data result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1e9)
+}
+
+func select_data(db *sql.DB, demot string) {
+ st := time.Now().Nanosecond()
+
+ rows, err := db.Query("select * from ? ", demot) // go text mode
+ checkErr(err, "select db.Query")
+
+ fmt.Printf("%10s%s%8s %5s %9s%s %s %8s%s %7s%s %8s%s %4s%s %5s%s\n", " ", "ts", " ", "id", " ", "name", " ", "len", " ", "flag", " ", "notes", " ", "fv", " ", " ", "dv")
+ var affectd int
+
+ //decoder := mahonia.NewDecoder("gbk") // 把原来ANSI格式的文本文件里的字符,用gbk进行解码。
+
+ for rows.Next() {
+ var ts string
+ var name string
+ var id int
+ var len int8
+ var flag bool
+ var notes string
+ var fv float32
+ var dv float64
+
+ err = rows.Scan(&ts, &id, &name, &len, &flag, ¬es, &fv, &dv)
+ checkErr(err, "select rows.Scan")
+
+ fmt.Printf("%s|\t", ts)
+ fmt.Printf("%d|\t", id)
+ fmt.Printf("%10s|\t", name)
+ fmt.Printf("%d|\t", len)
+ fmt.Printf("%t|\t", flag)
+ fmt.Printf("%s|\t", notes)
+ fmt.Printf("%06.3f|\t", fv)
+ fmt.Printf("%09.6f|\n\n", dv)
+
+ affectd++
+ }
+
+ et := time.Now().Nanosecond()
+ fmt.Printf("select data result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1e9)
+ //fmt.Printf("insert data result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9)
+}
+
+func drop_database_stmt(db *sql.DB, demodb string) {
+ st := time.Now().Nanosecond()
+ // drop test db
+ res, err := db.Exec("drop database if exists " + demodb)
+ checkErr(err, "drop database "+demodb)
+
+ affectd, err := res.RowsAffected()
+ checkErr(err, "drop db, res.RowsAffected")
+
+ et := time.Now().Nanosecond()
+ fmt.Printf("drop database result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1e9)
+}
+
+func create_database_stmt(db *sql.DB, demodb string) {
+ st := time.Now().Nanosecond()
+ // create database
+ //var stmt interface{}
+ stmt, err := db.Prepare("create database ?")
+ checkErr(err, "create db, db.Prepare")
+
+ //var res driver.Result
+ res, err := stmt.Exec(demodb)
+ checkErr(err, "create db, stmt.Exec")
+
+ //fmt.Printf("Query OK, %d row(s) affected()", res.RowsAffected())
+ affectd, err := res.RowsAffected()
+ checkErr(err, "create db, res.RowsAffected")
+
+ et := time.Now().Nanosecond()
+ fmt.Printf("create database result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1e9)
+}
+
+func use_database_stmt(db *sql.DB, demodb string) {
+ st := time.Now().Nanosecond()
+ // create database
+ //var stmt interface{}
+ stmt, err := db.Prepare("use " + demodb)
+ checkErr(err, "use db, db.Prepare")
+
+ res, err := stmt.Exec()
+ checkErr(err, "use db, stmt.Exec")
+
+ affectd, err := res.RowsAffected()
+ checkErr(err, "use db, res.RowsAffected")
+
+ et := time.Now().Nanosecond()
+ fmt.Printf("use database result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1e9)
+}
+
+func create_table_stmt(db *sql.DB, demot string) {
+ st := time.Now().Nanosecond()
+ // create table
+ // (ts timestamp, id int, name binary(8), len tinyint, flag bool, notes binary(8), fv float, dv double)
+ stmt, err := db.Prepare("create table ? (? timestamp, ? int, ? binary(10), ? tinyint, ? bool, ? binary(8), ? float, ? double)")
+ checkErr(err, "create table db.Prepare")
+
+ res, err := stmt.Exec(demot, "ts", "id", "name", "len", "flag", "notes", "fv", "dv")
+ checkErr(err, "create table stmt.Exec")
+
+ affectd, err := res.RowsAffected()
+ checkErr(err, "create table res.RowsAffected")
+
+ et := time.Now().Nanosecond()
+ fmt.Printf("create table result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1e9)
+}
+
+func insert_data_stmt(db *sql.DB, demot string) {
+ st := time.Now().Nanosecond()
+ // insert data into table
+ stmt, err := db.Prepare("insert into ? values(?, ?, ?, ?, ?, ?, ?, ?) (?, ?, ?, ?, ?, ?, ?, ?) (?, ?, ?, ?, ?, ?, ?, ?)")
+ checkErr(err, "insert db.Prepare")
+
+ res, err := stmt.Exec(demot, "now", 1000, "'haidian'", 6, true, "'AI world'", 6987.654, 321.987,
+ "now+1s", 1001, "'changyang'", 7, false, "'DeepMode'", 12356.456, 128634.456,
+ "now+2s", 1002, "'chuangping'", 8, true, "'database'", 3879.456, 65433478.456)
+ checkErr(err, "insert data, stmt.Exec")
+
+ affectd, err := res.RowsAffected()
+ checkErr(err, "res.RowsAffected")
+
+ et := time.Now().Nanosecond()
+ fmt.Printf("insert data result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1e9)
+}
+
+func select_data_stmt(db *sql.DB, demot string) {
+ st := time.Now().Nanosecond()
+
+ stmt, err := db.Prepare("select ?, ?, ?, ?, ?, ?, ?, ? from ?") // go binary mode
+ checkErr(err, "db.Prepare")
+
+ rows, err := stmt.Query("ts", "id", "name", "len", "flag", "notes", "fv", "dv", demot)
+ checkErr(err, "stmt.Query")
+
+ fmt.Printf("%10s%s%8s %5s %8s%s %s %10s%s %7s%s %8s%s %11s%s %14s%s\n", " ", "ts", " ", "id", " ", "name", " ", "len", " ", "flag", " ", "notes", " ", "fv", " ", " ", "dv")
+ var affectd int
+ for rows.Next() {
+ var ts string
+ var name string
+ var id int
+ var len int8
+ var flag bool
+ var notes string
+ var fv float32
+ var dv float64
+
+ err = rows.Scan(&ts, &id, &name, &len, &flag, ¬es, &fv, &dv)
+ //fmt.Println("start scan fields from row.rs, &fv:", &fv)
+ //err = rows.Scan(&fv)
+ checkErr(err, "rows.Scan")
+
+ fmt.Printf("%s|\t", ts)
+ fmt.Printf("%d|\t", id)
+ fmt.Printf("%10s|\t", name)
+ fmt.Printf("%d|\t", len)
+ fmt.Printf("%t|\t", flag)
+ fmt.Printf("%s|\t", notes)
+ fmt.Printf("%06.3f|\t", fv)
+ fmt.Printf("%09.6f|\n", dv)
+
+ affectd++
+
+ }
+
+ et := time.Now().Nanosecond()
+ fmt.Printf("select data result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1e9)
+}
+
+func checkErr(err error, prompt string) {
+ if err != nil {
+ fmt.Printf("%s\n", prompt)
+ panic(err)
+ }
+}
diff --git a/tests/gotest/case001/case001.sh b/tests/gotest/case001/case001.sh
new file mode 100644
index 0000000000..5a9034c4d1
--- /dev/null
+++ b/tests/gotest/case001/case001.sh
@@ -0,0 +1,70 @@
+#!/bin/bash
+
+##################################################
+#
+# Do go test
+#
+##################################################
+
+set +e
+#set -x
+
+script_dir="$(dirname $(readlink -f $0))"
+#echo "pwd: $script_dir, para0: $0"
+
+execName=$0
+execName=`echo ${execName##*/}`
+goName=`echo ${execName%.*}`
+
+###### step 1: start one taosd
+scriptDir=$script_dir/../../script/sh
+bash $scriptDir/stop_dnodes.sh
+bash $scriptDir/deploy.sh -n dnode1 -i 1
+bash $scriptDir/cfg.sh -n dnode1 -c walLevel -v 0
+bash $scriptDir/exec.sh -n dnode1 -s start
+
+###### step 2: set config item
+TAOS_CFG=/etc/taos/taos.cfg
+HOSTNAME=`hostname -f`
+
+if [ ! -f ${TAOS_CFG} ]; then
+ touch -f $TAOS_CFG
+fi
+
+echo " " > $TAOS_CFG
+echo "firstEp ${HOSTNAME}:7100" >> $TAOS_CFG
+echo "secondEp ${HOSTNAME}:7200" >> $TAOS_CFG
+echo "serverPort 7100" >> $TAOS_CFG
+#echo "dataDir $DATA_DIR" >> $TAOS_CFG
+#echo "logDir $LOG_DIR" >> $TAOS_CFG
+#echo "scriptDir ${CODE_DIR}/../script" >> $TAOS_CFG
+echo "numOfLogLines 100000000" >> $TAOS_CFG
+echo "dDebugFlag 135" >> $TAOS_CFG
+echo "mDebugFlag 135" >> $TAOS_CFG
+echo "sdbDebugFlag 135" >> $TAOS_CFG
+echo "rpcDebugFlag 135" >> $TAOS_CFG
+echo "tmrDebugFlag 131" >> $TAOS_CFG
+echo "cDebugFlag 135" >> $TAOS_CFG
+echo "httpDebugFlag 135" >> $TAOS_CFG
+echo "monitorDebugFlag 135" >> $TAOS_CFG
+echo "udebugFlag 135" >> $TAOS_CFG
+echo "tablemetakeeptimer 5" >> $TAOS_CFG
+echo "wal 0" >> $TAOS_CFG
+echo "asyncLog 0" >> $TAOS_CFG
+echo "locale en_US.UTF-8" >> $TAOS_CFG
+echo "enableCoreFile 1" >> $TAOS_CFG
+echo " " >> $TAOS_CFG
+
+ulimit -n 600000
+ulimit -c unlimited
+#
+##sudo sysctl -w kernel.core_pattern=$TOP_DIR/core.%p.%e
+#
+
+###### step 3: start build
+cd $script_dir
+rm -f go.*
+go mod init $goName
+go build
+sleep 1s
+sudo ./$goName
diff --git a/tests/gotest/test.sh b/tests/gotest/test.sh
new file mode 100644
index 0000000000..fe80d44295
--- /dev/null
+++ b/tests/gotest/test.sh
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+##################################################
+#
+# Do go test
+#
+##################################################
+
+set +e
+#set -x
+
+FILE_NAME=
+RELEASE=0
+while getopts "f:" arg
+do
+ case $arg in
+ f)
+ FILE_NAME=$OPTARG
+ echo "input file: $FILE_NAME"
+ ;;
+ ?)
+ echo "unknow argument"
+ ;;
+ esac
+done
+
+# start one taosd
+bash ../script/sh/stop_dnodes.sh
+bash ../script/sh/deploy.sh -n dnode1 -i 1
+bash ../script/sh/cfg.sh -n dnode1 -c walLevel -v 0
+bash ../script/sh/exec.sh -n dnode1 -s start
+
+# start build test go file
+caseDir=`echo ${FILE_NAME%/*}`
+echo "caseDir: $caseDir"
+cd $caseDir
+rm go.*
+go mod init $caseDir
+go build
+sleep 1s
+./$caseDir
+
diff --git a/tests/perftest-scripts/coverage_test.sh b/tests/perftest-scripts/coverage_test.sh
index a0c8fe4b3f..5085ec89d0 100755
--- a/tests/perftest-scripts/coverage_test.sh
+++ b/tests/perftest-scripts/coverage_test.sh
@@ -53,7 +53,7 @@ function buildTDengine {
function runGeneralCaseOneByOne {
while read -r line; do
if [[ $line =~ ^./test.sh* ]]; then
- case=`echo $line | grep -w "general\|unique\/mnode\/mgmt33.sim\|unique\/stable\/dnode3.sim\|unique\/cluster\/balance3.sim\|unique\/arbitrator\/offline_replica2_alterTable_online.sim"|awk '{print $NF}'`
+ case=`echo $line | grep sim$ |awk '{print $NF}'`
if [ -n "$case" ]; then
./test.sh -f $case > /dev/null 2>&1 && \
diff --git a/tests/pytest/client/alterDatabase.py b/tests/pytest/client/alterDatabase.py
new file mode 100644
index 0000000000..fa397d16c5
--- /dev/null
+++ b/tests/pytest/client/alterDatabase.py
@@ -0,0 +1,55 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+from util.log import *
+from util.cases import *
+from util.sql import *
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ def run(self):
+ tdSql.prepare()
+
+ tdSql.query('select database()')
+ tdSql.checkData(0, 0, "db")
+
+ tdSql.execute("alter database db comp 2")
+ tdSql.query("show databases")
+ tdSql.checkData(0, 14, 2)
+
+ tdSql.execute("alter database db keep 365")
+ tdSql.query("show databases")
+ tdSql.checkData(0, 7, "3650,3650,365")
+
+ tdSql.execute("alter database db quorum 2")
+ tdSql.query("show databases")
+ tdSql.checkData(0, 5, 2)
+
+ tdSql.execute("alter database db blocks 100")
+ tdSql.query("show databases")
+ tdSql.checkData(0, 9, 100)
+
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
diff --git a/tests/pytest/client/twoClients.py b/tests/pytest/client/twoClients.py
new file mode 100644
index 0000000000..1a1b36c554
--- /dev/null
+++ b/tests/pytest/client/twoClients.py
@@ -0,0 +1,96 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import os
+import sys
+sys.path.insert(0, os.getcwd())
+from util.log import *
+from util.sql import *
+from util.dnodes import *
+import taos
+
+
+class TwoClients:
+ def initConnection(self):
+ self.host = "127.0.0.1"
+ self.user = "root"
+ self.password = "taosdata"
+ self.config = "/home/xp/git/TDengine/sim/dnode1/cfg"
+
+ def run(self):
+ tdDnodes.init("")
+ tdDnodes.setTestCluster(False)
+ tdDnodes.setValgrind(False)
+
+ tdDnodes.stopAll()
+ tdDnodes.deploy(1)
+ tdDnodes.start(1)
+
+ # first client create a stable and insert data
+ conn1 = taos.connect(self.host, self.user, self.password, self.config)
+ cursor1 = conn1.cursor()
+ cursor1.execute("drop database if exists db")
+ cursor1.execute("create database db")
+ cursor1.execute("use db")
+ cursor1.execute("create table tb (ts timestamp, id int) tags(loc nchar(30))")
+ cursor1.execute("insert into t0 using tb tags('beijing') values(now, 1)")
+
+ # second client alter the table created by cleint
+ conn2 = taos.connect(self.host, self.user, self.password, self.config)
+ cursor2 = conn2.cursor()
+ cursor2.execute("use db")
+ cursor2.execute("alter table tb add column name nchar(30)")
+
+ # first client should not be able to use the origin metadata
+ tdSql.init(cursor1, True)
+ tdSql.error("insert into t0 values(now, 2)")
+
+ # first client should be able to insert data with udpated medadata
+ tdSql.execute("insert into t0 values(now, 2, 'test')")
+ tdSql.query("select * from tb")
+ tdSql.checkRows(2)
+
+ # second client drop the table
+ cursor2.execute("drop table t0")
+ cursor2.execute("create table t0 using tb tags('beijing')")
+
+ tdSql.execute("insert into t0 values(now, 2, 'test')")
+ tdSql.query("select * from tb")
+ tdSql.checkRows(1)
+
+ # error expected for two clients drop the same cloumn
+ cursor2.execute("alter table tb drop column name")
+ tdSql.error("alter table tb drop column name")
+
+ cursor2.execute("alter table tb add column speed int")
+ tdSql.error("alter table tb add column speed int")
+
+
+ tdSql.execute("alter table tb add column size int")
+ tdSql.query("describe tb")
+ tdSql.checkRows(5)
+ tdSql.checkData(0, 0, "ts")
+ tdSql.checkData(1, 0, "id")
+ tdSql.checkData(2, 0, "speed")
+ tdSql.checkData(3, 0, "size")
+ tdSql.checkData(4, 0, "loc")
+
+
+ cursor1.close()
+ cursor2.close()
+ conn1.close()
+ conn2.close()
+
+clients = TwoClients()
+clients.initConnection()
+clients.run()
\ No newline at end of file
diff --git a/tests/pytest/concurrent_inquiry.py b/tests/pytest/concurrent_inquiry.py
new file mode 100644
index 0000000000..faefc8a1c2
--- /dev/null
+++ b/tests/pytest/concurrent_inquiry.py
@@ -0,0 +1,146 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+import threading
+import taos
+
+import json
+import time
+import random
+# query sql
+query_sql = [
+# first supertable
+"select count(*) from test.meters where c1 > 50;",
+"select count(*) from test.meters where c2 >= 50 and c2 < 100;",
+"select count(*) from test.meters where c3 != 5;",
+"select count(*) from test.meters where t3 > 2;",
+"select count(*) from test.meters where ts <> '2020-05-13 10:00:00.002';",
+"select count(*) from test.meters where t7 like 'fi%';",
+"select count(*) from test.meters where t7 like '_econd';",
+"select count(*) from test.meters interval(1n) order by ts desc;",
+"select first(*) from test.meters;",
+"select last(*) from test.meters;",
+"select last_row(*) from test.meters;",
+"select twa(c1) from test.t1 where ts > 1500000001000 and ts < 1500000101000" ,
+"select avg(c1) from test.meters;",
+"select bottom(c1, 2) from test.t1;",
+"select diff(c1) from test.t1;",
+"select leastsquares(c1, 1, 1) from test.t1 ;",
+"select max(c1) from test.meters;",
+"select min(c1) from test.meters;",
+"select c1 + c2 * c3 + c1 / c5 + c4 + c2 from test.t1;",
+"select percentile(c1, 50) from test.t1;",
+"select spread(c1) from test.t1 ;",
+"select stddev(c1) from test.t1;",
+"select sum(c1) from test.meters;",
+"select top(c1, 2) from test.meters;"
+"select twa(c6) from test.t1 where ts > 1500000001000 and ts < 1500000101000" ,
+"select avg(c6) from test.meters;",
+"select bottom(c6, 2) from test.t1;",
+"select diff(c6) from test.t1;",
+"select leastsquares(c6, 1, 1) from test.t1 ;",
+"select max(c6) from test.meters;",
+"select min(c6) from test.meters;",
+"select c6 + c2 * c3 + c6 / c5 + c4 + c2 from test.t1;",
+"select percentile(c6, 50) from test.t1;",
+"select spread(c6) from test.t1 ;",
+"select stddev(c6) from test.t1;",
+"select sum(c6) from test.meters;",
+"select top(c6, 2) from test.meters;",
+# second supertable
+"select count(*) from test.meters1 where c1 > 50;",
+"select count(*) from test.meters1 where c2 >= 50 and c2 < 100;",
+"select count(*) from test.meters1 where c3 != 5;",
+"select count(*) from test.meters1 where t3 > 2;",
+"select count(*) from test.meters1 where ts <> '2020-05-13 10:00:00.002';",
+"select count(*) from test.meters1 where t7 like 'fi%';",
+"select count(*) from test.meters1 where t7 like '_econd';",
+"select count(*) from test.meters1 interval(1n) order by ts desc;",
+"select first(*) from test.meters1;",
+"select last(*) from test.meters1;",
+"select last_row(*) from test.meters1;",
+"select twa(c1) from test.m1 where ts > 1500000001000 and ts < 1500000101000" ,
+"select avg(c1) from test.meters1;",
+"select bottom(c1, 2) from test.m1;",
+"select diff(c1) from test.m1;",
+"select leastsquares(c1, 1, 1) from test.m1 ;",
+"select max(c1) from test.meters1;",
+"select min(c1) from test.meters1;",
+"select c1 + c2 * c3 + c1 / c5 + c3 + c2 from test.m1;",
+"select percentile(c1, 50) from test.m1;",
+"select spread(c1) from test.m1 ;",
+"select stddev(c1) from test.m1;",
+"select sum(c1) from test.meters1;",
+"select top(c1, 2) from test.meters1;",
+"select twa(c6) from test.m1 where ts > 1500000001000 and ts < 1500000101000" ,
+"select avg(c6) from test.meters1;",
+"select bottom(c6, 2) from test.m1;",
+"select diff(c6) from test.m1;",
+"select leastsquares(c6, 1, 1) from test.m1 ;",
+"select max(c6) from test.meters1;",
+"select min(c6) from test.meters1;",
+"select c6 + c2 * c3 + c6 / c5 + c3 + c2 from test.m1;",
+"select percentile(c6, 50) from test.m1;",
+"select spread(c6) from test.m1 ;",
+"select stddev(c6) from test.m1;",
+"select sum(c6) from test.meters1;",
+"select top(c6, 2) from test.meters1;"
+]
+
+class ConcurrentInquiry:
+ def initConnection(self):
+ self.numOfTherads = 50
+ self.ts=1500000001000
+
+
+ def query_thread(self,threadID):
+ host = "10.211.55.14"
+ user = "root"
+ password = "taosdata"
+ conn = taos.connect(
+ host,
+ user,
+ password,
+ )
+ cl = conn.cursor()
+
+ print("Thread %d: starting" % threadID)
+
+ while True:
+ ran_query_sql=query_sql
+ random.shuffle(ran_query_sql)
+ for i in ran_query_sql:
+ print("Thread %d : %s"% (threadID,i))
+ try:
+ cl.execute(i)
+ cl.fetchall
+ except Exception as e:
+ print(
+ "Failure thread%d, sql: %s,exception: %s" %
+ (threadID, str(i),str(e)))
+
+
+ print("Thread %d: finishing" % threadID)
+
+
+
+ def run(self):
+
+ threads = []
+ for i in range(50):
+ thread = threading.Thread(target=self.query_thread, args=(i,))
+ threads.append(thread)
+ thread.start()
+
+q = ConcurrentInquiry()
+q.initConnection()
+q.run()
diff --git a/tests/pytest/crash_gen.py b/tests/pytest/crash_gen.py
index c0a8fd1f00..fee355eef9 100755
--- a/tests/pytest/crash_gen.py
+++ b/tests/pytest/crash_gen.py
@@ -1516,6 +1516,8 @@ class Task():
if errno in [
0x05, # TSDB_CODE_RPC_NOT_READY
# 0x200, # invalid SQL, TODO: re-examine with TD-934
+ 0x217, # "db not selected", client side defined error code
+ 0x218, # "Table does not exist" client side defined error code
0x360, 0x362,
0x369, # tag already exists
0x36A, 0x36B, 0x36D,
@@ -1763,9 +1765,9 @@ class TaskCreateDb(StateTransitionTask):
return state.canCreateDb()
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
+ # self.execWtSql(wt, "create database db replica {}".format(Dice.throw(3)+1))
self.execWtSql(wt, "create database db")
-
class TaskDropDb(StateTransitionTask):
@classmethod
def getEndState(cls):
@@ -1832,7 +1834,7 @@ class TdSuperTable:
return dbc.query("SELECT * FROM db.{}".format(self._stName)) > 0
def ensureTable(self, dbc: DbConn, regTableName: str):
- sql = "select tbname from {} where tbname in ('{}')".format(self._stName, regTableName)
+ sql = "select tbname from db.{} where tbname in ('{}')".format(self._stName, regTableName)
if dbc.query(sql) >= 1 : # reg table exists already
return
sql = "CREATE TABLE {} USING {} tags ({})".format(
@@ -1916,9 +1918,9 @@ class TaskReadData(StateTransitionTask):
'max(speed)',
'first(speed)',
'last(speed)',
- # 'top(speed)', # TODO: not supported?
- # 'bottom(speed)', # TODO: not supported?
- # 'percentile(speed, 10)', # TODO: TD-1316
+ 'top(speed, 50)', # TODO: not supported?
+ 'bottom(speed, 50)', # TODO: not supported?
+ 'apercentile(speed, 10)', # TODO: TD-1316
'last_row(speed)',
# Transformation Functions
# 'diff(speed)', # TODO: no supported?!
@@ -1928,7 +1930,9 @@ class TaskReadData(StateTransitionTask):
None
])
try:
+ # Run the query against the regular table first
dbc.execute("select {} from db.{}".format(aggExpr, rTbName))
+ # Then run it against the super table
if aggExpr not in ['stddev(speed)']: #TODO: STDDEV not valid for super tables?!
dbc.execute("select {} from db.{}".format(aggExpr, sTable.getName()))
except taos.error.ProgrammingError as err:
@@ -2022,7 +2026,7 @@ class TaskRestartService(StateTransitionTask):
return state.canDropFixedSuperTable() # Basicallly when we have the super table
return False # don't run this otherwise
- CHANCE_TO_RESTART_SERVICE = 100
+ CHANCE_TO_RESTART_SERVICE = 200
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
if not gConfig.auto_start_service: # only execute when we are in -a mode
print("_a", end="", flush=True)
diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh
index d600a003b0..b679942054 100755
--- a/tests/pytest/fulltest.sh
+++ b/tests/pytest/fulltest.sh
@@ -16,6 +16,8 @@ python3 ./test.py -f insert/nchar.py
python3 ./test.py -f insert/nchar-unicode.py
python3 ./test.py -f insert/multi.py
python3 ./test.py -f insert/randomNullCommit.py
+python3 insert/retentionpolicy.py
+python3 ./test.py -f insert/alterTableAndInsert.py
python3 ./test.py -f table/column_name.py
python3 ./test.py -f table/column_num.py
@@ -154,6 +156,7 @@ python3 ./test.py -f stream/new.py
python3 ./test.py -f stream/stream1.py
python3 ./test.py -f stream/stream2.py
python3 ./test.py -f stream/parser.py
+python3 ./test.py -f stream/history.py
#alter table
python3 ./test.py -f alter/alter_table_crash.py
@@ -161,6 +164,7 @@ python3 ./test.py -f alter/alter_table_crash.py
# client
python3 ./test.py -f client/client.py
python3 ./test.py -f client/version.py
+python3 ./test.py -f client/alterDatabase.py
# Misc
python3 testCompress.py
@@ -185,6 +189,15 @@ python3 ./test.py -f functions/function_stddev.py
python3 ./test.py -f functions/function_sum.py
python3 ./test.py -f functions/function_top.py
#python3 ./test.py -f functions/function_twa.py
+python3 queryCount.py
+python3 ./test.py -f query/queryGroupbyWithInterval.py
+python3 client/twoClients.py
+python3 test.py -f query/queryInterval.py
# tools
python3 test.py -f tools/taosdemo.py
+
+# subscribe
+python3 test.py -f subscribe/singlemeter.py
+#python3 test.py -f subscribe/stability.py
+python3 test.py -f subscribe/supertable.py
\ No newline at end of file
diff --git a/tests/pytest/import_merge/importToCommit.py b/tests/pytest/import_merge/importToCommit.py
index 9a17ae95fa..7bec5fcd5d 100644
--- a/tests/pytest/import_merge/importToCommit.py
+++ b/tests/pytest/import_merge/importToCommit.py
@@ -33,7 +33,7 @@ class TDTestCase:
tdDnodes.start(1)
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
- tdSql.execute('create database db cache 128 maxtables 10')
+ tdSql.execute('create database db cache 128')
tdSql.execute('use db')
tdLog.info("================= step1")
diff --git a/tests/pytest/insert/alterTableAndInsert.py b/tests/pytest/insert/alterTableAndInsert.py
new file mode 100644
index 0000000000..a0447704f3
--- /dev/null
+++ b/tests/pytest/insert/alterTableAndInsert.py
@@ -0,0 +1,40 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+from util.log import *
+from util.cases import *
+from util.sql import *
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ def run(self):
+ tdSql.prepare()
+
+ tdSql.execute("create table cars(ts timestamp, speed int) tags(id int)")
+ tdSql.execute("create table car0 using cars tags(0)")
+ tdSql.execute("insert into car0 values(now, 1)")
+ tdSql.execute("alter table cars add column c2 int")
+ tdSql.execute("insert into car0(ts, 'speed') values(now, 2)")
+ tdSql.checkAffectedRows(1)
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/insert/restful.py b/tests/pytest/insert/restful.py
new file mode 100644
index 0000000000..bf9bde99f0
--- /dev/null
+++ b/tests/pytest/insert/restful.py
@@ -0,0 +1,60 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import requests, json
+import threading
+import string
+import random
+
+class RestfulInsert:
+ def init(self):
+ self.header = {'Authorization': 'Basic cm9vdDp0YW9zZGF0YQ=='}
+ self.url = "http://127.0.0.1:6041/rest/sql"
+ self.ts = 1104508800000
+ self.numOfThreads = 50
+
+ def get_random_string(self, length):
+ letters = string.ascii_lowercase
+ result_str = ''.join(random.choice(letters) for i in range(length))
+ return result_str
+
+ def insertData(self, threadID):
+ print("thread %d started" % threadID)
+ data = "create table test.tb%d(ts timestamp, name nchar(20))" % threadID
+ requests.post(self.url, data, headers = self.header)
+ name = self.get_random_string(10)
+ start = self.ts
+ while True:
+ start += 1
+ data = "insert into test.tb%d values(%d, '%s')" % (threadID, start, name)
+ requests.post(self.url, data, headers = self.header)
+
+ def run(self):
+ data = "drop database if exists test"
+ requests.post(self.url, data, headers = self.header)
+ data = "create database test keep 7300"
+ requests.post(self.url, data, headers = self.header)
+
+ threads = []
+ for i in range(self.numOfThreads):
+ thread = threading.Thread(target=self.insertData, args=(i,))
+ thread.start()
+ threads.append(thread)
+
+ for i in range(self.numOfThreads):
+ threads[i].join()
+
+ri = RestfulInsert()
+ri.init()
+ri.run()
\ No newline at end of file
diff --git a/tests/pytest/insert/retentionpolicy.py b/tests/pytest/insert/retentionpolicy.py
new file mode 100644
index 0000000000..c69060b5ae
--- /dev/null
+++ b/tests/pytest/insert/retentionpolicy.py
@@ -0,0 +1,122 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import os
+import datetime
+sys.path.insert(0, os.getcwd())
+import taos
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.dnodes import *
+
+
+class TDTestRetetion:
+ def init(self):
+ self.queryRows=0
+ tdLog.debug("start to execute %s" % __file__)
+ tdLog.info("prepare cluster")
+ tdDnodes.init("")
+ tdDnodes.setTestCluster(False)
+ tdDnodes.setValgrind(False)
+ tdDnodes.stopAll()
+ tdDnodes.deploy(1)
+ tdDnodes.start(1)
+ print(tdDnodes.getDnodesRootDir())
+ self.conn = taos.connect(config=tdDnodes.getSimCfgPath())
+ tdSql.init(self.conn.cursor())
+ tdSql.execute('reset query cache')
+ def checkRows(self, expectRows,sql):
+ if self.queryRows == expectRows:
+ tdLog.info("sql:%s, queryRows:%d == expect:%d" % (sql, self.queryRows, expectRows))
+ else:
+ caller = inspect.getframeinfo(inspect.stack()[1][0])
+ args = (caller.filename, caller.lineno, sql, self.queryRows, expectRows)
+ os.system("sudo timedatectl set-ntp true")
+ time.sleep(40)
+ tdLog.exit("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args)
+
+ def run(self):
+
+ tdLog.info("=============== step1")
+ tdSql.execute('create database test keep 3 days 1;')
+ tdSql.execute('use test;')
+ tdSql.execute('create table test(ts timestamp,i int);')
+
+ cmd = 'insert into test values(now-2d,1)(now-1d,2)(now,3)(now+1d,4);'
+ tdLog.info(cmd)
+ tdSql.execute(cmd)
+ tdSql.query('select * from test')
+ tdSql.checkRows(4)
+
+ tdLog.info("=============== step2")
+ tdDnodes.stop(1)
+ os.system("sudo timedatectl set-ntp false")
+ os.system("sudo date -s $(date -d \"${DATE} 2 days\" \"+%Y%m%d\")")
+ tdDnodes.start(1)
+ cmd = 'insert into test values(now,5);'
+ tdDnodes.stop(1)
+ tdDnodes.start(1)
+
+ tdLog.info(cmd)
+ tdSql.execute(cmd)
+ self.queryRows=tdSql.query('select * from test')
+ if self.queryRows==4:
+ self.checkRows(4,cmd)
+ return 0
+ else:
+ self.checkRows(5,cmd)
+ tdLog.info("=============== step3")
+ tdDnodes.stop(1)
+ os.system("sudo date -s $(date -d \"${DATE} 2 days\" \"+%Y%m%d\")")
+ tdDnodes.start(1)
+ tdLog.info(cmd)
+ tdSql.execute(cmd)
+ self.queryRows=tdSql.query('select * from test')
+ if self.queryRows==4:
+ self.checkRows(4,cmd)
+ return 0
+ cmd = 'insert into test values(now-1d,6);'
+ tdLog.info(cmd)
+ tdSql.execute(cmd)
+ self.queryRows=tdSql.query('select * from test')
+ self.checkRows(6,cmd)
+ tdLog.info("=============== step4")
+ tdDnodes.stop(1)
+ tdDnodes.start(1)
+ cmd = 'insert into test values(now,7);'
+ tdLog.info(cmd)
+ tdSql.execute(cmd)
+ self.queryRows=tdSql.query('select * from test')
+ self.checkRows(7,cmd)
+
+ tdLog.info("=============== step5")
+ tdDnodes.stop(1)
+ tdDnodes.start(1)
+ cmd='select * from test where ts > now-1d'
+ self.queryRows=tdSql.query('select * from test where ts > now-1d')
+ self.checkRows(1,cmd)
+
+ def stop(self):
+ os.system("sudo timedatectl set-ntp true")
+ time.sleep(40)
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+clients = TDTestRetetion()
+clients.init()
+clients.run()
+clients.stop()
+
diff --git a/tests/pytest/query/intervalOffset.py b/tests/pytest/query/intervalOffset.py
new file mode 100644
index 0000000000..6d4dd720ee
--- /dev/null
+++ b/tests/pytest/query/intervalOffset.py
@@ -0,0 +1,225 @@
+###################################################################
+# Copyright (c) 2020 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import *
+from util.cases import *
+from util.sql import *
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+
+ def general(self):
+ tdSql.execute("create table meters(ts timestamp, s int) tags(id int)")
+ tdSql.execute("create table t0 using meters tags(0)")
+ tdSql.execute("create table t1 using meters tags(1)")
+ tdSql.execute("create table t2 using meters tags(2)")
+ tdSql.execute("create table t3 using meters tags(3)")
+ tdSql.execute("create table t4 using meters tags(4)")
+
+ tdSql.execute("insert into t0 values('2019-01-01 00:00:00', 1)")
+ tdSql.execute("insert into t1 values('2019-01-01 00:00:01', 1)")
+ tdSql.execute("insert into t2 values('2019-01-01 00:01:00', 1)")
+ tdSql.execute("insert into t1 values('2019-01-01 00:01:01', 1)")
+ tdSql.execute("insert into t1 values('2019-01-01 00:01:02', 1)")
+ tdSql.execute("insert into t1 values('2019-01-01 00:01:03', 1)")
+ tdSql.execute("insert into t1 values('2019-01-01 00:01:30', 1)")
+ tdSql.execute("insert into t1 values('2019-01-01 00:01:50', 1)")
+ tdSql.execute("insert into t2 values('2019-01-01 00:02:00', 1)")
+ tdSql.execute("insert into t3 values('2019-01-01 00:02:02', 1)")
+ tdSql.execute("insert into t3 values('2019-01-01 00:02:59', 1)")
+ tdSql.execute("insert into t4 values('2019-01-01 00:02:59', 1)")
+ tdSql.execute("insert into t1 values('2019-01-01 00:03:10', 1)")
+ tdSql.execute("insert into t2 values('2019-01-01 00:08:00', 1)")
+ tdSql.execute("insert into t1 values('2019-01-01 00:08:00', 1)")
+
+ tdSql.query("select count(*) from meters interval(1m, 1s)")
+ tdSql.checkData(0, 1, 1)
+ tdSql.checkData(1, 1, 2)
+ tdSql.checkData(2, 1, 6)
+ tdSql.checkData(3, 1, 3)
+ tdSql.checkData(4, 1, 1)
+ tdSql.checkData(5, 1, 2)
+
+ tdSql.query("select count(*) from meters interval(1m, 2s)")
+ tdSql.checkData(0, 1, 2)
+ tdSql.checkData(1, 1, 2)
+ tdSql.checkData(2, 1, 5)
+ tdSql.checkData(3, 1, 3)
+ tdSql.checkData(4, 1, 1)
+ tdSql.checkData(5, 1, 2)
+
+ tdSql.query("select count(*) from meters interval(90s, 1500a)")
+ tdSql.checkData(0, 1, 2)
+ tdSql.checkData(1, 1, 5)
+ tdSql.checkData(2, 1, 5)
+ tdSql.checkData(3, 1, 1)
+ tdSql.checkData(4, 1, 2)
+
+ def singleTable(self):
+ tdSql.execute("create table car(ts timestamp, s int)")
+ tdSql.execute("insert into car values('2019-01-01 00:00:00', 1)")
+ tdSql.execute("insert into car values('2019-05-13 12:00:00', 1)")
+ tdSql.execute("insert into car values('2019-12-31 23:59:59', 1)")
+ tdSql.execute("insert into car values('2020-01-01 12:00:00', 1)")
+ tdSql.execute("insert into car values('2020-01-02 12:00:00', 1)")
+ tdSql.execute("insert into car values('2020-01-03 12:00:00', 1)")
+ tdSql.execute("insert into car values('2020-01-04 12:00:00', 1)")
+ tdSql.execute("insert into car values('2020-01-05 12:00:00', 1)")
+ tdSql.execute("insert into car values('2020-01-31 12:00:00', 1)")
+ tdSql.execute("insert into car values('2020-02-01 12:00:00', 1)")
+ tdSql.execute("insert into car values('2020-02-02 12:00:00', 1)")
+ tdSql.execute("insert into car values('2020-02-29 12:00:00', 1)")
+ tdSql.execute("insert into car values('2020-03-01 12:00:00', 1)")
+ tdSql.execute("insert into car values('2020-03-02 12:00:00', 1)")
+ tdSql.execute("insert into car values('2020-03-15 12:00:00', 1)")
+ tdSql.execute("insert into car values('2020-03-31 12:00:00', 1)")
+ tdSql.execute("insert into car values('2020-05-01 12:00:00', 1)")
+
+ tdSql.query("select count(*) from car interval(1n, 10d)")
+ tdSql.checkData(0, 1, 1)
+ tdSql.checkData(1, 1, 1)
+ tdSql.checkData(2, 1, 6)
+ tdSql.checkData(3, 1, 3)
+ tdSql.checkData(4, 1, 3)
+ tdSql.checkData(5, 1, 2)
+ tdSql.checkData(6, 1, 1)
+
+ tdSql.query("select count(*) from car interval(1n, 10d) order by ts desc")
+ tdSql.checkData(0, 1, 1)
+ tdSql.checkData(1, 1, 2)
+ tdSql.checkData(2, 1, 3)
+ tdSql.checkData(3, 1, 3)
+ tdSql.checkData(4, 1, 6)
+ tdSql.checkData(5, 1, 1)
+ tdSql.checkData(6, 1, 1)
+
+ tdSql.query("select count(*) from car interval(2n, 5d)")
+ tdSql.checkData(0, 1, 1)
+ tdSql.checkData(1, 1, 1)
+ tdSql.checkData(2, 1, 6)
+ tdSql.checkData(3, 1, 6)
+ tdSql.checkData(4, 1, 3)
+
+ tdSql.query("select count(*) from car interval(2n) order by ts desc")
+ tdSql.checkData(0, 1, 3)
+ tdSql.checkData(1, 1, 6)
+ tdSql.checkData(2, 1, 6)
+ tdSql.checkData(3, 1, 1)
+ tdSql.checkData(4, 1, 1)
+
+ tdSql.query("select count(*) from car interval(1y, 1n)")
+ tdSql.checkData(0, 1, 1)
+ tdSql.checkData(1, 1, 8)
+ tdSql.checkData(2, 1, 8)
+
+ tdSql.query("select count(*) from car interval(1y, 2n)")
+ tdSql.checkData(0, 1, 1)
+ tdSql.checkData(1, 1, 11)
+ tdSql.checkData(2, 1, 5)
+
+ tdSql.query("select count(*) from car where ts > '2019-05-14 00:00:00' interval(1y, 5d)")
+ tdSql.checkData(0, 1, 6)
+ tdSql.checkData(1, 1, 9)
+
+ def superTable(self):
+ tdSql.execute("create table cars(ts timestamp, s int) tags(id int)")
+ tdSql.execute("create table car0 using cars tags(0)")
+ tdSql.execute("create table car1 using cars tags(1)")
+ tdSql.execute("create table car2 using cars tags(2)")
+ tdSql.execute("create table car3 using cars tags(3)")
+ tdSql.execute("create table car4 using cars tags(4)")
+
+ tdSql.execute("insert into car0 values('2019-01-01 00:00:00', 1)")
+ tdSql.execute("insert into car1 values('2019-05-13 12:00:00', 1)")
+ tdSql.execute("insert into car2 values('2019-12-31 23:59:59', 1)")
+ tdSql.execute("insert into car1 values('2020-01-01 12:00:00', 1)")
+ tdSql.execute("insert into car1 values('2020-01-02 12:00:00', 1)")
+ tdSql.execute("insert into car1 values('2020-01-03 12:00:00', 1)")
+ tdSql.execute("insert into car1 values('2020-01-04 12:00:00', 1)")
+ tdSql.execute("insert into car1 values('2020-01-05 12:00:00', 1)")
+ tdSql.execute("insert into car1 values('2020-01-31 12:00:00', 1)")
+ tdSql.execute("insert into car1 values('2020-02-01 12:00:00', 1)")
+ tdSql.execute("insert into car2 values('2020-02-02 12:00:00', 1)")
+ tdSql.execute("insert into car2 values('2020-02-29 12:00:00', 1)")
+ tdSql.execute("insert into car3 values('2020-03-01 12:00:00', 1)")
+ tdSql.execute("insert into car3 values('2020-03-02 12:00:00', 1)")
+ tdSql.execute("insert into car3 values('2020-03-15 12:00:00', 1)")
+ tdSql.execute("insert into car4 values('2020-03-31 12:00:00', 1)")
+ tdSql.execute("insert into car3 values('2020-05-01 12:00:00', 1)")
+
+ tdSql.query("select count(*) from cars interval(1n, 10d)")
+ tdSql.checkData(0, 1, 1)
+ tdSql.checkData(1, 1, 1)
+ tdSql.checkData(2, 1, 6)
+ tdSql.checkData(3, 1, 3)
+ tdSql.checkData(4, 1, 3)
+ tdSql.checkData(5, 1, 2)
+ tdSql.checkData(6, 1, 1)
+
+ tdSql.query("select count(*) from cars interval(1n, 10d) order by ts desc")
+ tdSql.checkData(0, 1, 1)
+ tdSql.checkData(1, 1, 2)
+ tdSql.checkData(2, 1, 3)
+ tdSql.checkData(3, 1, 3)
+ tdSql.checkData(4, 1, 6)
+ tdSql.checkData(5, 1, 1)
+ tdSql.checkData(6, 1, 1)
+
+ tdSql.query("select count(*) from cars interval(2n, 5d)")
+ tdSql.checkData(0, 1, 1)
+ tdSql.checkData(1, 1, 1)
+ tdSql.checkData(2, 1, 6)
+ tdSql.checkData(3, 1, 6)
+ tdSql.checkData(4, 1, 3)
+
+ tdSql.query("select count(*) from cars interval(2n) order by ts desc")
+ tdSql.checkData(0, 1, 3)
+ tdSql.checkData(1, 1, 6)
+ tdSql.checkData(2, 1, 6)
+ tdSql.checkData(3, 1, 1)
+ tdSql.checkData(4, 1, 1)
+
+ tdSql.query("select count(*) from cars interval(1y, 1n)")
+ tdSql.checkData(0, 1, 1)
+ tdSql.checkData(1, 1, 8)
+ tdSql.checkData(2, 1, 8)
+
+ tdSql.query("select count(*) from cars interval(1y, 2n)")
+ tdSql.checkData(0, 1, 1)
+ tdSql.checkData(1, 1, 11)
+ tdSql.checkData(2, 1, 5)
+
+ tdSql.query("select count(*) from cars where ts > '2019-05-14 00:00:00' interval(1y, 5d)")
+ tdSql.checkData(0, 1, 6)
+ tdSql.checkData(1, 1, 9)
+
+ def run(self):
+ tdSql.prepare()
+ self.general()
+ self.singleTable()
+ self.superTable()
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
+
diff --git a/tests/pytest/query/natualInterval.py b/tests/pytest/query/natualInterval.py
index 1ed91e1c68..434cd392bb 100644
--- a/tests/pytest/query/natualInterval.py
+++ b/tests/pytest/query/natualInterval.py
@@ -89,10 +89,10 @@ class TDTestCase:
def superTable(self):
tdSql.execute("create table cars(ts timestamp, s int) tags(id int)")
tdSql.execute("create table car0 using cars tags(0)")
- tdSql.execute("create table car1 using cars tags(0)")
- tdSql.execute("create table car2 using cars tags(0)")
- tdSql.execute("create table car3 using cars tags(0)")
- tdSql.execute("create table car4 using cars tags(0)")
+ tdSql.execute("create table car1 using cars tags(1)")
+ tdSql.execute("create table car2 using cars tags(2)")
+ tdSql.execute("create table car3 using cars tags(3)")
+ tdSql.execute("create table car4 using cars tags(4)")
tdSql.execute("insert into car0 values('2019-01-01 00:00:00', 1)")
tdSql.execute("insert into car1 values('2019-05-13 12:00:00', 1)")
diff --git a/tests/pytest/query/queryGroupbyWithInterval.py b/tests/pytest/query/queryGroupbyWithInterval.py
new file mode 100644
index 0000000000..14f6999021
--- /dev/null
+++ b/tests/pytest/query/queryGroupbyWithInterval.py
@@ -0,0 +1,53 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import tdLog
+from util.cases import tdCases
+from util.sql import tdSql
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ def run(self):
+ tdSql.prepare()
+
+ tdSql.execute(
+ "create table stest(ts timestamp,size INT,filenum INT) tags (appname binary(500),tenant binary(500))")
+ tdSql.execute(
+ "insert into test1 using stest tags('test1','aaa') values ('2020-09-04 16:53:54.003',210,3)")
+ tdSql.execute(
+ "insert into test2 using stest tags('test1','aaa') values ('2020-09-04 16:53:56.003',210,3)")
+ tdSql.execute(
+ "insert into test11 using stest tags('test11','bbb') values ('2020-09-04 16:53:57.003',210,3)")
+ tdSql.execute(
+ "insert into test12 using stest tags('test11','bbb') values ('2020-09-04 16:53:58.003',210,3)")
+ tdSql.execute(
+ "insert into test21 using stest tags('test21','ccc') values ('2020-09-04 16:53:59.003',210,3)")
+ tdSql.execute(
+ "insert into test22 using stest tags('test21','ccc') values ('2020-09-04 16:54:54.003',210,3)")
+
+ tdSql.query("select sum(size) from stest interval(1d) group by appname")
+ tdSql.checkRows(3)
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/query/queryInterval.py b/tests/pytest/query/queryInterval.py
new file mode 100644
index 0000000000..9922201604
--- /dev/null
+++ b/tests/pytest/query/queryInterval.py
@@ -0,0 +1,66 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import tdLog
+from util.cases import tdCases
+from util.sql import tdSql
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ self.ts = 1593548685000
+
+ def run(self):
+ tdSql.prepare()
+
+ tdSql.execute("create table st (ts timestamp, voltage int) tags (loc nchar(30))")
+ tdSql.execute("insert into t0 using st tags('beijing') values(%d, 220) (%d, 221) (%d, 225) (%d, 228) (%d, 222)"
+ % (self.ts, self.ts + 1000000000, self.ts + 2000000000, self.ts + 3000000000, self.ts + 6000000000))
+ tdSql.execute("insert into t1 using st tags('shanghai') values(%d, 220) (%d, 221) (%d, 225) (%d, 228) (%d, 222)"
+ % (self.ts, self.ts + 2000000000, self.ts + 4000000000, self.ts + 5000000000, self.ts + 7000000000))
+
+ tdSql.query("select avg(voltage) from st interval(1n)")
+ tdSql.checkRows(3)
+ tdSql.checkData(0, 1, 221.4)
+ tdSql.checkData(1, 1, 227.0)
+ tdSql.checkData(2, 1, 222.0)
+
+ tdSql.query("select avg(voltage) from st interval(1n, 15d)")
+ tdSql.checkRows(4)
+ tdSql.checkData(0, 1, 220.333333)
+ tdSql.checkData(1, 1, 224.666666)
+ tdSql.checkData(2, 1, 225.0)
+ tdSql.checkData(3, 1, 222.0)
+
+ tdSql.query("select avg(voltage) from st interval(1n, 15d) group by loc")
+ tdSql.checkRows(7)
+ tdSql.checkData(0, 1, 220.5)
+ tdSql.checkData(1, 1, 226.5)
+ tdSql.checkData(2, 1, 222.0)
+ tdSql.checkData(3, 1, 220.0)
+ tdSql.checkData(4, 1, 221.0)
+ tdSql.checkData(5, 1, 226.5)
+ tdSql.checkData(6, 1, 222.0)
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/query/queryNormal.py b/tests/pytest/query/queryNormal.py
index 1ab285bbad..13393117d6 100644
--- a/tests/pytest/query/queryNormal.py
+++ b/tests/pytest/query/queryNormal.py
@@ -35,13 +35,16 @@ class TDTestCase:
tdSql.execute(
"insert into tb2 using stb1 tags(2,'tb2', '表2') values ('2020-04-18 15:00:02.000', 3, 2.1), ('2020-04-18 15:00:03.000', 4, 2.2)")
- # inner join --- bug
+ tdSql.error("select * from tb 1")
+
tdSql.query("select * from tb1 a, tb2 b where a.ts = b.ts")
tdSql.checkRows(0)
# join 3 tables -- bug exists
tdSql.error("select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_p.id, stb_p.dscrption, stb_p.pressure,stb_v.velocity from stb_p, stb_t, stb_v where stb_p.ts=stb_t.ts and stb_p.ts=stb_v.ts and stb_p.id = stb_t.id")
+ tdSql.error("select * from stb1 whern c1 > 'test' limit 100")
+
# query show stable
tdSql.query("show stables")
tdSql.checkRows(1)
diff --git a/tests/pytest/query/queryNullValueTest.py b/tests/pytest/query/queryNullValueTest.py
index f521f2e5e9..bc0b11827e 100644
--- a/tests/pytest/query/queryNullValueTest.py
+++ b/tests/pytest/query/queryNullValueTest.py
@@ -42,6 +42,9 @@ class TDTestCase:
tdSql.prepare()
for i in range(len(self.types)):
+ tdSql.execute("drop table if exists t0")
+ tdSql.execute("drop table if exists t1")
+
print("======== checking type %s ==========" % self.types[i])
tdSql.execute("create table t0 (ts timestamp, col %s)" % self.types[i])
tdSql.execute("insert into t0 values (%d, NULL)" % (self.ts))
diff --git a/tests/pytest/queryCount.py b/tests/pytest/queryCount.py
new file mode 100644
index 0000000000..7cc8f61f4d
--- /dev/null
+++ b/tests/pytest/queryCount.py
@@ -0,0 +1,91 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+import threading
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.dnodes import *
+
+
+class QueryCountMultiThread:
+ def initConnection(self):
+ self.records = 10000000
+ self.numOfTherads = 50
+ self.ts = 1537146000000
+ self.host = "127.0.0.1"
+ self.user = "root"
+ self.password = "taosdata"
+ self.config = "/home/xp/git/TDengine/sim/dnode1/cfg"
+ self.conn = taos.connect(
+ self.host,
+ self.user,
+ self.password,
+ self.config)
+
+ def insertData(self, threadID):
+ cursor = self.conn.cursor()
+ print("Thread %d: starting" % threadID)
+ base = 200000 * threadID
+ for i in range(200):
+ query = "insert into tb values"
+ for j in range(1000):
+ query += "(%d, %d, 'test')" % (self.ts + base + i * 1000 + j, base + i * 1000 + j)
+ cursor.execute(query)
+ cursor.close()
+ print("Thread %d: finishing" % threadID)
+
+ def run(self):
+ tdDnodes.init("")
+ tdDnodes.setTestCluster(False)
+ tdDnodes.setValgrind(False)
+
+ tdDnodes.stopAll()
+ tdDnodes.deploy(1)
+ tdDnodes.start(1)
+
+ cursor = self.conn.cursor()
+ cursor.execute("drop database if exists db")
+ cursor.execute("create database db")
+ cursor.execute("use db")
+ cursor.execute("create table tb (ts timestamp, id int, name nchar(30))")
+ cursor.close()
+
+ threads = []
+ for i in range(50):
+ thread = threading.Thread(target=self.insertData, args=(i,))
+ threads.append(thread)
+ thread.start()
+
+ for i in range(50):
+ threads[i].join()
+
+ cursor = self.conn.cursor()
+ cursor.execute("use db")
+ sql = "select count(*) from tb"
+ cursor.execute(sql)
+ data = cursor.fetchall()
+
+ if(data[0][0] == 10000000):
+ tdLog.info("sql:%s, row:%d col:%d data:%d == expect:%d" % (sql, 0, 0, data[0][0], 10000000))
+ else:
+ tdLog.exit("queryCount.py failed: sql:%s failed, row:%d col:%d data:%d != expect:%d" % (sql, 0, 0, data[0][0], 10000000))
+
+ cursor.close()
+ self.conn.close()
+
+q = QueryCountMultiThread()
+q.initConnection()
+q.run()
\ No newline at end of file
diff --git a/tests/pytest/stream/history.py b/tests/pytest/stream/history.py
new file mode 100644
index 0000000000..890580001c
--- /dev/null
+++ b/tests/pytest/stream/history.py
@@ -0,0 +1,63 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import time
+import taos
+from util.log import tdLog
+from util.cases import tdCases
+from util.sql import tdSql
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ def run(self):
+ tdSql.prepare()
+
+ tdSql.execute("create table cars(ts timestamp, s int) tags(id int)")
+ tdSql.execute("create table car0 using cars tags(0)")
+ tdSql.execute("create table car1 using cars tags(1)")
+ tdSql.execute("create table car2 using cars tags(2)")
+ tdSql.execute("create table car3 using cars tags(3)")
+ tdSql.execute("create table car4 using cars tags(4)")
+
+ tdSql.execute("insert into car0 values('2019-01-01 00:00:00.103', 1)")
+ tdSql.execute("insert into car1 values('2019-01-01 00:00:00.234', 1)")
+ tdSql.execute("insert into car0 values('2019-01-01 00:00:01.012', 1)")
+ tdSql.execute("insert into car0 values('2019-01-01 00:00:02.003', 1)")
+ tdSql.execute("insert into car2 values('2019-01-01 00:00:02.328', 1)")
+ tdSql.execute("insert into car0 values('2019-01-01 00:00:03.139', 1)")
+ tdSql.execute("insert into car0 values('2019-01-01 00:00:04.348', 1)")
+ tdSql.execute("insert into car0 values('2019-01-01 00:00:05.783', 1)")
+ tdSql.execute("insert into car1 values('2019-01-01 00:00:01.893', 1)")
+ tdSql.execute("insert into car1 values('2019-01-01 00:00:02.712', 1)")
+ tdSql.execute("insert into car1 values('2019-01-01 00:00:03.982', 1)")
+ tdSql.execute("insert into car3 values('2019-01-01 00:00:01.389', 1)")
+ tdSql.execute("insert into car4 values('2019-01-01 00:00:01.829', 1)")
+
+ tdSql.execute("create table strm as select count(*) from cars interval(4s)")
+ tdSql.waitedQuery("select * from strm", 2, 100)
+ tdSql.checkData(0, 1, 11)
+ tdSql.checkData(1, 1, 2)
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/tag_lite/datatype-without-alter.py b/tests/pytest/tag_lite/datatype-without-alter.py
index 42bc42bdbf..da52e149a5 100644
--- a/tests/pytest/tag_lite/datatype-without-alter.py
+++ b/tests/pytest/tag_lite/datatype-without-alter.py
@@ -38,7 +38,7 @@ class TDTestCase:
tdLog.info("drop database db if exits")
tdSql.execute('drop database if exists db')
tdLog.info("================= step1")
- tdSql.execute('create database db maxtables 4')
+ tdSql.execute('create database db')
tdLog.sleep(5)
tdSql.execute('use db')
diff --git a/tests/pytest/tag_lite/datatype.py b/tests/pytest/tag_lite/datatype.py
index bc99cf74b0..f7fa9fa3a2 100644
--- a/tests/pytest/tag_lite/datatype.py
+++ b/tests/pytest/tag_lite/datatype.py
@@ -38,7 +38,7 @@ class TDTestCase:
tdLog.info("drop database db if exits")
tdSql.execute('drop database if exists db')
tdLog.info("================= step1")
- tdSql.execute('create database db maxtables 4')
+ tdSql.execute('create database db')
tdLog.sleep(5)
tdSql.execute('use db')
diff --git a/tests/pytest/tools/taosdemo.py b/tests/pytest/tools/taosdemo.py
index 54d33c90f3..5bf8ebaf03 100644
--- a/tests/pytest/tools/taosdemo.py
+++ b/tests/pytest/tools/taosdemo.py
@@ -16,6 +16,7 @@ import os
from util.log import *
from util.cases import *
from util.sql import *
+from util.dnodes import *
class TDTestCase:
@@ -25,11 +26,30 @@ class TDTestCase:
self.numberOfTables = 10000
self.numberOfRecords = 100
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root)-len("/build/bin")]
+ break
+ return buildPath
def run(self):
tdSql.prepare()
-
- os.system("yes | taosdemo -t %d -n %d" % (self.numberOfTables, self.numberOfRecords))
+ buildPath = self.getBuildPath()
+ if (buildPath == ""):
+ tdLog.exit("taosd not found!")
+ else:
+ tdLog.info("taosd found in %s" % buildPath)
+ binPath = buildPath+ "/build/bin/"
+ os.system("yes | %staosdemo -t %d -n %d" % (binPath,self.numberOfTables, self.numberOfRecords))
tdSql.execute("use test")
tdSql.query("select count(*) from meters")
diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py
index 627d712474..9abec354c6 100644
--- a/tests/pytest/util/sql.py
+++ b/tests/pytest/util/sql.py
@@ -123,8 +123,12 @@ class TDSql:
def checkData(self, row, col, data):
self.checkRowCol(row, col)
- if str(self.queryResult[row][col]) != str(data):
- if isinstance(data, float) and abs(self.queryResult[row][col] - data) <= 0.000001:
+ if self.queryResult[row][col] != data:
+ if str(self.queryResult[row][col]) == str(data):
+ tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
+ (self.sql, row, col, self.queryResult[row][col], data))
+ return
+ elif isinstance(data, float) and abs(self.queryResult[row][col] - data) <= 0.000001:
tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" %
(self.sql, row, col, self.queryResult[row][col], data))
return
diff --git a/tests/script/fullGeneralSuite.sim b/tests/script/fullGeneralSuite.sim
index d137e53d27..4dd3957f39 100644
--- a/tests/script/fullGeneralSuite.sim
+++ b/tests/script/fullGeneralSuite.sim
@@ -130,7 +130,6 @@ run general/parser/join.sim
run general/parser/join_multivnode.sim
run general/parser/select_with_tags.sim
run general/parser/groupby.sim
-run general/parser/bug.sim
run general/parser/tags_dynamically_specifiy.sim
run general/parser/set_tag_vals.sim
#unsupport run general/parser/repeatAlter.sim
@@ -221,3 +220,5 @@ run general/stream/table_del.sim
run general/stream/metrics_del.sim
run general/stream/table_replica1_vnoden.sim
run general/stream/metrics_replica1_vnoden.sim
+run general/db/show_create_db.sim
+run general/db/show_create_table.sim
diff --git a/tests/script/general/db/alter_option.sim b/tests/script/general/db/alter_option.sim
index 49c75966ca..c8aa2480c5 100644
--- a/tests/script/general/db/alter_option.sim
+++ b/tests/script/general/db/alter_option.sim
@@ -218,7 +218,10 @@ if $data12_db != 1 then
return -1
endi
-sql_error alter database db wal 2
+sql alter database db wal 1
+sql alter database db wal 2
+sql alter database db wal 1
+sql alter database db wal 2
sql_error alter database db wal 0
sql_error alter database db wal 3
sql_error alter database db wal 4
@@ -226,11 +229,13 @@ sql_error alter database db wal -1
sql_error alter database db wal 1000
print ============== step fsync
-sql_error alter database db fsync 2
-sql_error alter database db fsync 3
-sql_error alter database db fsync 4
+sql alter database db fsync 0
+sql alter database db fsync 1
+sql alter database db fsync 3600
+sql alter database db fsync 18000
+sql alter database db fsync 180000
+sql_error alter database db fsync 180001
sql_error alter database db fsync -1
-sql_error alter database db fsync 1000
print ============== step comp
sql show databases
diff --git a/tests/script/general/db/delete_reusevnode.sim b/tests/script/general/db/delete_reusevnode.sim
index 5cfe7729ed..5781c384fe 100644
--- a/tests/script/general/db/delete_reusevnode.sim
+++ b/tests/script/general/db/delete_reusevnode.sim
@@ -1,5 +1,6 @@
system sh/stop_dnodes.sh
+system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
system sh/deploy.sh -n dnode1 -i 1
print ========= start dnodes
diff --git a/tests/script/general/db/delete_reusevnode2.sim b/tests/script/general/db/delete_reusevnode2.sim
index 9fa1969425..0db2440b3b 100644
--- a/tests/script/general/db/delete_reusevnode2.sim
+++ b/tests/script/general/db/delete_reusevnode2.sim
@@ -1,5 +1,6 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
+system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
print ========= start dnodes
system sh/exec.sh -n dnode1 -s start
diff --git a/tests/script/general/db/dropdnodes.sim b/tests/script/general/db/dropdnodes.sim
index 884a88490e..c7bbdf73a4 100644
--- a/tests/script/general/db/dropdnodes.sim
+++ b/tests/script/general/db/dropdnodes.sim
@@ -4,6 +4,8 @@ system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/deploy.sh -n dnode2 -i 2
system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4
print ========== prepare data
system sh/exec.sh -n dnode1 -s start
diff --git a/tests/script/general/db/show_create_db.sim b/tests/script/general/db/show_create_db.sim
new file mode 100644
index 0000000000..baa7b253e1
--- /dev/null
+++ b/tests/script/general/db/show_create_db.sim
@@ -0,0 +1,32 @@
+system sh/stop_dnodes.sh
+
+system sh/deploy.sh -n dnode1 -i 1
+system sh/cfg.sh -n dnode1 -c walLevel -v 0
+
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+sql connect
+
+print =============== step2
+sql create database db
+sql show create database db
+
+if $rows != 1 then
+ return -1
+endi
+
+print =============== step3
+sql use db
+sql show create database db
+
+if $rows != 1 then
+ return -1
+endi
+
+if $data00 != db then
+ return -1
+endi
+
+sql drop database db
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/general/db/show_create_table.sim b/tests/script/general/db/show_create_table.sim
new file mode 100644
index 0000000000..8338638709
--- /dev/null
+++ b/tests/script/general/db/show_create_table.sim
@@ -0,0 +1,87 @@
+system sh/stop_dnodes.sh
+
+system sh/deploy.sh -n dnode1 -i 1
+system sh/cfg.sh -n dnode1 -c walLevel -v 0
+
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+sql connect
+
+print ===============create three type table
+sql create database db
+sql use db
+sql create table meters(ts timestamp, f binary(8)) tags(loc int, zone binary(8))
+sql create table t0 using meters tags(1,'ch')
+sql create table normalTbl(ts timestamp, zone binary(8))
+
+sql use db
+sql show create table meters
+if $rows != 1 then
+ return -1
+endi
+
+print ===============check sub table
+sql show create table t0
+if $rows != 1 then
+ return -1
+endi
+if $data00 == 't0' then
+ return -1
+endi
+
+print ===============check normal table
+
+sql show create table normalTbl
+if $rows != 1 then
+ return -1
+endi
+
+if $data00 == 'normalTbl' then
+ return -1
+endi
+
+print ===============check super table
+sql show create table meters
+if $rows != 1 then
+ return -1
+endi
+
+if $data00 == 'meters' then
+ return -1
+endi
+
+print ===============check sub table with prefix
+
+sql show create table db.t0
+if $rows != 1 then
+ return -1
+endi
+
+if $data00 == 't0' then
+ return -1
+endi
+
+print ===============check normal table with prefix
+sql show create table db.normalTbl
+if $rows != 1 then
+ return -1
+endi
+
+if $data00 == 'normalTbl' then
+ return -1
+endi
+
+
+print ===============check super table with prefix
+sql show create table db.meters
+if $rows != 1 then
+ return -1
+endi
+
+if $data00 == 'meters' then
+ return -1
+endi
+
+sql drop database db
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/general/http/autocreate.sim b/tests/script/general/http/autocreate.sim
index 6a005b028a..98d64ab839 100644
--- a/tests/script/general/http/autocreate.sim
+++ b/tests/script/general/http/autocreate.sim
@@ -24,5 +24,10 @@ print curl 127.0.0.1:7111/rest/sql -----> $system_content
# return -1
#endi
+sql select * from db.win_cpu_windows_1_processor
+print rows: $rows
+if $rows != 1 then
+ return -1
+endi
#system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/general/http/chunked.sim b/tests/script/general/http/chunked.sim
new file mode 100644
index 0000000000..6592c761c6
--- /dev/null
+++ b/tests/script/general/http/chunked.sim
@@ -0,0 +1,37 @@
+system sh/stop_dnodes.sh
+sleep 3000
+system sh/deploy.sh -n dnode1 -i 1
+system sh/cfg.sh -n dnode1 -c wallevel -v 0
+system sh/cfg.sh -n dnode1 -c http -v 1
+system sh/cfg.sh -n dnode1 -c maxSQLLength -v 340032
+system sh/exec.sh -n dnode1 -s start
+
+sleep 3000
+sql connect
+
+print ============================ dnode1 start
+
+print =============== step1 - prepare data
+sql create database d1
+sql use d1
+
+sql create table table_rest (ts timestamp, i int)
+print sql length is 270KB
+restful d1 table_rest 1591072800 10000
+restful d1 table_rest 1591172800 10000
+restful d1 table_rest 1591272800 10000
+restful d1 table_rest 1591372800 10000
+restful d1 table_rest 1591472800 10000
+restful d1 table_rest 1591572800 10000
+restful d1 table_rest 1591672800 10000
+restful d1 table_rest 1591772800 10000
+restful d1 table_rest 1591872800 10000
+restful d1 table_rest 1591972800 10000
+
+sql select * from table_rest;
+print rows: $rows
+if $rows != 100000 then
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/general/http/grafana.sim b/tests/script/general/http/grafana.sim
index cea804cfbb..c7866e5f4c 100644
--- a/tests/script/general/http/grafana.sim
+++ b/tests/script/general/http/grafana.sim
@@ -54,13 +54,13 @@ print =============== step2 - login
system_content curl 127.0.0.1:7111/grafana/
print 1-> $system_content
-if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then
+if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then
return -1
endi
system_content curl 127.0.0.1:7111/grafana/xx
print 2-> $system_content
-if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then
+if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then
return -1
endi
@@ -72,7 +72,7 @@ endi
system_content curl 127.0.0.1:7111/grafana/root/1/123/1/1/3
print 4-> $system_content
-if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then
+if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then
return -1
endi
@@ -84,13 +84,13 @@ endi
system_content curl -H 'Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' -d 'show databases' 127.0.0.1:7111/grafana/root/1/login
print 6-> $system_content
-if $system_content != @{"status":"error","code":5010,"desc":"invalid type of Authorization"}@ then
+if $system_content != @{"status":"error","code":4386,"desc":"invalid type of Authorization"}@ then
return -1
endi
system_content curl -H 'Authorization: eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' -d 'show databases' 127.0.0.1:7111/grafana/root/1/login
print 7-> $system_content
-if $system_content != @{"status":"error","code":5010,"desc":"invalid type of Authorization"}@ then
+if $system_content != @{"status":"error","code":4387,"desc":"invalid format of Authorization"}@ then
return -1
endi
diff --git a/tests/script/general/http/gzip.sim b/tests/script/general/http/gzip.sim
new file mode 100644
index 0000000000..9c77567abb
--- /dev/null
+++ b/tests/script/general/http/gzip.sim
@@ -0,0 +1,39 @@
+system sh/stop_dnodes.sh
+sleep 3000
+system sh/deploy.sh -n dnode1 -i 1
+system sh/cfg.sh -n dnode1 -c wallevel -v 0
+system sh/cfg.sh -n dnode1 -c http -v 1
+system sh/cfg.sh -n dnode1 -c maxSQLLength -v 340032
+system sh/exec.sh -n dnode1 -s start
+
+sleep 3000
+sql connect
+
+print ============================ dnode1 start
+
+print =============== step1 - prepare data
+sql create database d1
+sql use d1
+
+sql create table table_rest (ts timestamp, i int)
+print sql length is 270KB
+restful d1 table_rest 1591072800 10000 gzip
+restful d1 table_rest 1591172800 10000 gzip
+restful d1 table_rest 1591272800 10000 gzip
+restful d1 table_rest 1591372800 10000 gzip
+restful d1 table_rest 1591472800 10000 gzip
+restful d1 table_rest 1591572800 10000 gzip
+restful d1 table_rest 1591672800 10000 gzip
+restful d1 table_rest 1591772800 10000 gzip
+restful d1 table_rest 1591872800 10000 gzip
+restful d1 table_rest 1591972800 10000 gzip
+
+sql select * from table_rest;
+print rows: $rows
+if $rows != 100000 then
+ return -1
+endi
+
+system curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_rest' 127.0.0.1:7111/rest/sql --compressed
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/general/http/restful_full.sim b/tests/script/general/http/restful_full.sim
index b7f98e49e0..8d2f1a7c00 100644
--- a/tests/script/general/http/restful_full.sim
+++ b/tests/script/general/http/restful_full.sim
@@ -14,26 +14,26 @@ print =============== step1 - login
system_content curl 127.0.0.1:7111/rest/
print 1-> $system_content
-if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then
+if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then
return -1
endi
system_content curl 127.0.0.1:7111/rest/xx
print 2-> $system_content
-if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then
+if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then
return -1
endi
system_content curl 127.0.0.1:7111/rest/login
print 3-> $system_content
-if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then
+if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then
return -1
endi
#4
system_content curl 127.0.0.1:7111/rest/login/root
print 4-> $system_content
-if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then
+if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then
return -1
endi
@@ -58,13 +58,13 @@ endi
#8
system_content curl -H 'Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' -d 'show databases' 127.0.0.1:7111/rest/login/root/1
print 8-> $system_content
-if $system_content != @{"status":"error","code":5010,"desc":"invalid type of Authorization"}@ then
+if $system_content != @{"status":"error","code":4386,"desc":"invalid type of Authorization"}@ then
return -1
endi
system_content curl -H 'Authorization: eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' -d 'show databases' 127.0.0.1:7111/rest/login/root/1
print 9-> $system_content
-if $system_content != @{"status":"error","code":5010,"desc":"invalid type of Authorization"}@ then
+if $system_content != @{"status":"error","code":4387,"desc":"invalid format of Authorization"}@ then
return -1
endi
@@ -100,7 +100,7 @@ endi
#14
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '' 127.0.0.1:7111/rest/sql
print 14-> $system_content
-if $system_content != @{"status":"error","code":5012,"desc":"no sql input"}@ then
+if $system_content != @{"status":"error","code":4359,"desc":"no sql input"}@ then
return -1
endi
@@ -119,7 +119,7 @@ endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' used1' 127.0.0.1:7111/rest/sql
print 17-> $system_content
-if $system_content != @{"status":"error","code":534,"desc":"Syntax errr in SQL"}@ then
+if $system_content != @{"status":"error","code":534,"desc":"Syntax error in SQL"}@ then
return -1
endi
diff --git a/tests/script/general/http/telegraf.sim b/tests/script/general/http/telegraf.sim
index e54af99ad7..4018d9661a 100644
--- a/tests/script/general/http/telegraf.sim
+++ b/tests/script/general/http/telegraf.sim
@@ -16,224 +16,224 @@ print =============== step1 - parse
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/
print $system_content
-if $system_content != @{"status":"error","code":5022,"desc":"database name can not be null"}@ then
+if $system_content != @{"status":"error","code":4448,"desc":"database name can not be null"}@ then
return -1
endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/
print $system_content
-if $system_content != @{"status":"error","code":5022,"desc":"database name can not be null"}@ then
+if $system_content != @{"status":"error","code":4448,"desc":"database name can not be null"}@ then
return -1
endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/d123456789012345678901234567890123456
print $system_content
-if $system_content != @{"status":"error","code":5023,"desc":"database name too long"}@ then
+if $system_content != @{"status":"error","code":4449,"desc":"database name too long"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[]' 127.0.0.1:7111/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":5027,"desc":"metric name not find"}@ then
+if $system_content != @{"status":"error","code":4453,"desc":"metric name not find"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{}' 127.0.0.1:7111/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":5027,"desc":"metric name not find"}@ then
+if $system_content != @{"status":"error","code":4453,"desc":"metric name not find"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{}]' 127.0.0.1:7111/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":5027,"desc":"metric name not find"}@ then
+if $system_content != @{"status":"error","code":4453,"desc":"metric name not find"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"metrics": []}' 127.0.0.1:7111/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":5025,"desc":"metrics size is 0"}@ then
+if $system_content != @{"status":"error","code":4451,"desc":"metrics size is 0"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"metrics": [{}]}' 127.0.0.1:7111/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":5027,"desc":"metric name not find"}@ then
+if $system_content != @{"status":"error","code":4453,"desc":"metric name not find"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"metrics": 12}' 127.0.0.1:7111/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":5025,"desc":"metrics size is 0"}@ then
+if $system_content != @{"status":"error","code":4451,"desc":"metrics size is 0"}@ then
return -1
endi
#system_content curl -u root:taosdata -d '{"metrics": [{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}]}' 127.0.0.1:7111/telegraf/db/root/taosdata1
#print $system_content
-#if $system_content != @{"status":"error","code":5026,"desc":"metrics size can not more than 50"}@ then
+#if $system_content != @{"status":"error","code":4452,"desc":"metrics size can not more than 50"}@ then
# return -1
#endi
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":5027,"desc":"metric name not find"}@ then
+if $system_content != @{"status":"error","code":4453,"desc":"metric name not find"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":111,"tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":5028,"desc":"metric name type should be string"}@ then
+if $system_content != @{"status":"error","code":4454,"desc":"metric name type should be string"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":5029,"desc":"metric name length is 0"}@ then
+if $system_content != @{"status":"error","code":4455,"desc":"metric name length is 0"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":5030,"desc":"metric name length too long"}@ then
+if $system_content != @{"status":"error","code":4456,"desc":"metric name length too long"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"}}' 127.0.0.1:7111/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":5031,"desc":"timestamp not find"}@ then
+if $system_content != @{"status":"error","code":4457,"desc":"timestamp not find"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":""}' 127.0.0.1:7111/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":5032,"desc":"timestamp type should be integer"}@ then
+if $system_content != @{"status":"error","code":4458,"desc":"timestamp type should be integer"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":-1}' 127.0.0.1:7111/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":5033,"desc":"timestamp value smaller than 0"}@ then
+if $system_content != @{"status":"error","code":4459,"desc":"timestamp value smaller than 0"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":5034,"desc":"tags not find"}@ then
+if $system_content != @{"status":"error","code":4460,"desc":"tags not find"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":5035,"desc":"tags size is 0"}@ then
+if $system_content != @{"status":"error","code":4461,"desc":"tags size is 0"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":"","timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":5035,"desc":"tags size is 0"}@ then
+if $system_content != @{"status":"error","code":4461,"desc":"tags size is 0"}@ then
return -1
endi
#system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor","host":"windows","instance":"1","objectname":"Processor","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata
#print $system_content
-#if $system_content != @{"status":"error","code":5036,"desc":"tags size too long"}@ then
+#if $system_content != @{"status":"error","code":4461,"desc":"tags size too long"}@ then
# return -1
#endi
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":5035,"desc":"tags size is 0"}@ then
+if $system_content != @{"status":"error","code":4461,"desc":"tags size is 0"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"":"windows"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":5038,"desc":"tag name is null"}@ then
+if $system_content != @{"status":"error","code":4464,"desc":"tag name is null"}@ then
return -1
endi
#system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host111111111111222222222222222222222":""},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1
#print $system_content
-#if $system_content != @{"status":"error","code":5039,"desc":"tag name length too long"}@ then
+#if $system_content != @{"status":"error","code":4465,"desc":"tag name length too long"}@ then
# return -1
#endi
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":true},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":5040,"desc":"tag value type should be number or string"}@ then
+if $system_content != @{"status":"error","code":4466,"desc":"tag value type should be number or string"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":""},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":5041,"desc":"tag value is null"}@ then
+if $system_content != @{"status":"error","code":4467,"desc":"tag value is null"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"5022":"111"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":5042,"desc":"table is null"}@ then
+if $system_content != @{"status":"error","code":4468,"desc":"table is null"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"host111111111111222222222222222222222host111111111111222222222222222222222host111111111111222222222222222222222host111111111111222222222222222222222host111111111111222222222222222222222host111111111111222222222222222222222host111111111111222222222222222222222host111111111111222222222222222222222"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":5043,"desc":"table name length too long"}@ then
+if $system_content != @{"status":"error","code":4469,"desc":"table name length too long"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"fields":{},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":5045,"desc":"fields size is 0"}@ then
+if $system_content != @{"status":"error","code":4471,"desc":"fields size is 0"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"fields":{"":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":5048,"desc":"field name is null"}@ then
+if $system_content != @{"status":"error","code":4474,"desc":"field name is null"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":"","Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":5051,"desc":"field value is null"}@ then
+if $system_content != @{"status":"error","code":4477,"desc":"field value is null"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":true,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":5050,"desc":"field value type should be number or string"}@ then
+if $system_content != @{"status":"error","code":4476,"desc":"field value type should be number or string"}@ then
return -1
endi
diff --git a/tests/script/general/http/testSuite.sim b/tests/script/general/http/testSuite.sim
index d91e9f452d..f35362bf07 100644
--- a/tests/script/general/http/testSuite.sim
+++ b/tests/script/general/http/testSuite.sim
@@ -1,3 +1,5 @@
+run general/http/autocreate.sim
+run general/http/chunked.sim
run general/http/restful.sim
run general/http/restful_insert.sim
run general/http/restful_limit.sim
diff --git a/tests/script/general/parser/auto_create_tb_drop_tb.sim b/tests/script/general/parser/auto_create_tb_drop_tb.sim
index 68e9c5afb7..be334bca4a 100644
--- a/tests/script/general/parser/auto_create_tb_drop_tb.sim
+++ b/tests/script/general/parser/auto_create_tb_drop_tb.sim
@@ -1,7 +1,7 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 1
+system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode1 -c ctime -v 30
system sh/exec.sh -n dnode1 -s start
sleep 3000
diff --git a/tests/script/general/parser/dbtbnameValidate.sim b/tests/script/general/parser/dbtbnameValidate.sim
index cf72f3ae61..b46a222282 100644
--- a/tests/script/general/parser/dbtbnameValidate.sim
+++ b/tests/script/general/parser/dbtbnameValidate.sim
@@ -86,8 +86,6 @@ print ========== insert data by multi-format
sql create table abc.tk_mt (ts timestamp, a int, b binary(16), c bool, d float, e double, f nchar(16)) tags (t1 int, t2 binary(16))
sql create table abc.tk_subt001 using tk_mt tags(1, 'subt001')
-sql insert into abc.tk_subt001 values (now-1y, 1, 'binary_1', true, 1.001, 2.001, 'nchar_1')
-sql insert into abc.tk_subt001 values (now-1n, 2, 'binary_2', true, 1.002, 2.002, 'nchar_2')
sql insert into abc.tk_subt001 values (now-1w, 3, 'binary_3', true, 1.003, 2.003, 'nchar_3')
sql insert into abc.tk_subt001 (ts, a, c, e, f) values (now-1d, 4, false, 2.004, 'nchar_4')
sql insert into abc.tk_subt001 (ts, a, c, e, f) values (now-1h, 5, false, 2.005, 'nchar_5')
@@ -95,35 +93,29 @@ sql insert into abc.tk_subt001 (ts, b, d) values (now-1m, 'binary_6',
sql insert into abc.tk_subt001 (ts, b, d) values (now-1s, 'binary_7', 1.007)
sql insert into abc.tk_subt001 (ts, b, d) values (now-1a, 'binary_8', 1.008)
sql select * from tk_subt001
-if $rows != 8 then
- print ==== expect rows is 8, but actually is $rows
+if $rows != 6 then
+ print ==== expect rows is 6, but actually is $rows
return -1
endi
-sql insert into abc.tk_subt002 using tk_mt tags (22,'subt002x') values (now-2y, 2008, 'binary_2008', false, 2008.001, 2008.001, 'nchar_2008')
-sql insert into abc.tk_subt002 using tk_mt tags (2, 'subt002') values (now-1y, 2007, 'binary_2007', false, 2007.001, 2007.001, 'nchar_2007')
-sql insert into abc.tk_subt002 using tk_mt tags (2, 'subt002') values (now-1n, 2006, 'binary_2006', true, 2006.001, 2006.001, 'nchar_2006')
-sql insert into abc.tk_subt002 using tk_mt tags (2, 'subt002') values (now+1s, 2001, 'binary_2001', true, 2001.001, 2001.001, 'nchar_2001')
+sql insert into abc.tk_subt002 using tk_mt tags (22, 'subt002x') values (now+1s, 2001, 'binary_2001', true, 2001.001, 2001.001, 'nchar_2001')
sql insert into abc.tk_subt002 using tk_mt tags (2, 'subt002') values (now+1m, 2002, 'binary_2002', false, 2002.001, 2002.001, 'nchar_2002')
sql insert into abc.tk_subt002 using tk_mt tags (2, 'subt002') values (now+1h, 2003, 'binary_2003', false, 2003.001, 2003.001, 'nchar_2003')
sql insert into abc.tk_subt002 using tk_mt tags (2, 'subt002') values (now+1d, 2004, 'binary_2004', true, 2004.001, 2004.001, 'nchar_2004')
sql insert into abc.tk_subt002 using tk_mt tags (2, 'subt002') values (now+1w, 2005, 'binary_2005', false, 2005.001, 2005.001, 'nchar_2005')
sql select * from tk_subt002
-if $rows != 8 then
- print ==== expect rows is 8, but actually is $rows
+if $rows != 5 then
+ print ==== expect rows is 5, but actually is $rows
return -1
endi
-sql insert into abc.tk_subt003 (ts, a, c, e, f) using tk_mt tags (3, 'subt003') values (now-5y, 3001, false, 3001.001, 'nchar_3001')
-sql insert into abc.tk_subt003 (ts, a, c, e, f) using tk_mt tags (3, 'subt003') values (now-4y, 3002, false, 3002.001, 'nchar_3002')
-sql insert into abc.tk_subt003 (ts, a, c, e, f) using tk_mt tags (3, 'subt003') values (now-3y, 3003, true , 3003.001, 'nchar_3003')
-sql insert into abc.tk_subt003 (ts, a, c, e, f) using tk_mt tags (3, 'subt003') values (now-2y, 3004, false, 3004.001, 'nchar_3004')
-sql insert into abc.tk_subt003 values (now-37d, 3005, 'binary_3005', false, 3005.001, 3005.001, 'nchar_3005')
+sql insert into abc.tk_subt003 (ts, a, c, e, f) using tk_mt tags (3, 'subt003') values (now-38d, 3004, false, 3004.001, 'nchar_3004')
+sql insert into abc.tk_subt003 (ts, a, c, e, f) using tk_mt tags (3, 'subt003') values (now-37d, 3005, false, 3005.001, 'nchar_3005')
sql insert into abc.tk_subt003 values (now-36d, 3006, 'binary_3006', true, 3006.001, 3006.001, 'nchar_3006')
sql insert into abc.tk_subt003 (ts, a, c, e, f) using tk_mt tags (33, 'subt003x') values (now-35d, 3007, false, 3007.001, 'nchar_3007')
sql select * from tk_subt003
-if $rows != 7 then
- print ==== expect rows is 7, but actually is $rows
+if $rows != 4 then
+ print ==== expect rows is 4, but actually is $rows
return -1
endi
diff --git a/tests/script/general/parser/fill.sim b/tests/script/general/parser/fill.sim
index 488f807fbc..f89c27d71f 100644
--- a/tests/script/general/parser/fill.sim
+++ b/tests/script/general/parser/fill.sim
@@ -850,6 +850,8 @@ if $rows != 12 then
return -1
endi
+print =====================>td-1442
+sql_error select count(*) from m_fl_tb0 interval(1s) fill(prev);
print =============== clear
sql drop database $db
diff --git a/tests/script/general/parser/first_last_query.sim b/tests/script/general/parser/first_last_query.sim
index d11bdccb12..d43cd52878 100644
--- a/tests/script/general/parser/first_last_query.sim
+++ b/tests/script/general/parser/first_last_query.sim
@@ -14,12 +14,14 @@ $i = 0
$db = $dbPrefix . $i
$stb = $stbPrefix . $i
+print use $db
sql use $db
##### select first/last from table
## TBASE-331
print ====== select first/last from table
$tb = $tbPrefix . 0
+print select first(*) from $tb
sql select first(*) from $tb
if $rows != 1 then
return -1
@@ -58,6 +60,7 @@ if $data09 != NCHAR then
return -1
endi
+print select last(*) from $tb
sql select last(*) from $tb
if $rows != 1 then
return -1
@@ -65,22 +68,23 @@ endi
if $data00 != @18-09-18 01:40:00.000@ then
return -1
endi
-#if $data01 != NULL then
+
if $data01 != 999 then
return -1
-endi
-#if $data02 != NULL then
+endi
+
if $data02 != 999 then
return -1
-endi
-#if $data03 != NULL then
+endi
+
if $data03 != 999.00000 then
return -1
endi
-#if $data04 != NULL then
+
if $data04 != 999.000000000 then
return -1
endi
+
#if $data05 != NULL then
if $data05 != 999 then
return -1
@@ -127,7 +131,7 @@ if $data01 != 0 then
return -1
endi
-#add check for out of range first/last query
+print =============> add check for out of range first/last query
sql select first(ts),last(ts) from first_tb4 where ts>'2018-9-18 1:40:01';
if $row != 0 then
return -1
@@ -136,4 +140,130 @@ endi
sql select first(ts),last(ts) from first_tb4 where ts<'2018-9-17 8:50:0';
if $row != 0 then
return -1
+endi
+
+#first/last mix up query
+#select first(size),last(size) from stest interval(1d) group by tbname;
+print =====================>td-1477
+
+sql create table stest(ts timestamp,size INT,filenum INT) tags (appname binary(500),tenant binary(500));
+sql insert into test1 using stest tags('test1','aaa') values ('2020-09-04 16:53:54.003',210,3);
+sql insert into test2 using stest tags('test1','aaa') values ('2020-09-04 16:53:56.003',210,3);
+sql insert into test11 using stest tags('test11','bbb') values ('2020-09-04 16:53:57.003',210,3);
+sql insert into test12 using stest tags('test11','bbb') values ('2020-09-04 16:53:58.003',210,3);
+sql insert into test21 using stest tags('test21','ccc') values ('2020-09-04 16:53:59.003',210,3);
+sql insert into test22 using stest tags('test21','ccc') values ('2020-09-04 16:54:54.003',210,3);
+sql select sum(size) from stest group by appname;
+if $rows != 3 then
+ return -1
+endi
+
+if $data00 != 420 then
+ return -1
+endi
+if $data10 != 420 then
+ return -1
+endi
+if $data20 != 420 then
+ return -1
+endi
+
+if $data01 != @test1@ then
+return -1
+endi
+if $data11 != @test11@ then
+return -1
+endi
+if $data21 != @test21@ then
+return -1
+endi
+
+sql select sum(size) from stest interval(1d) group by appname;
+if $rows != 3 then
+ return -1
+endi
+
+#2020-09-04 00:00:00.000 | 420 | test1 |
+#2020-09-04 00:00:00.000 | 420 | test11 |
+#2020-09-04 00:00:00.000 | 420 | test21 |
+if $data00 != @20-09-04 00:00:00.000@ then
+ return -1
+endi
+
+if $data10 != @20-09-04 00:00:00.000@ then
+ return -1
+endi
+
+if $data20 != @20-09-04 00:00:00.000@ then
+ return -1
+endi
+
+if $data01 != 420 then
+ print expect 420 , actual $data01
+ return -1
+endi
+
+if $data11 != 420 then
+ return -1
+endi
+
+if $data21 != 420 then
+ return -1
+endi
+
+if $data02 != @test1@ then
+return -1
+endi
+if $data12 != @test11@ then
+return -1
+endi
+if $data22 != @test21@ then
+return -1
+endi
+
+print ===================>td-1477, one table has only one block occurs this bug.
+sql select first(size),count(*),LAST(SIZE) from stest where tbname in ('test1', 'test2') interval(1d) group by tbname;
+if $rows != 2 then
+ return -1
+endi
+
+if $data00 != @20-09-04 00:00:00.000@ then
+ return -1
+endi
+
+if $data01 != 210 then
+ return -1
+endi
+
+if $data02 != 1 then
+ return -1
+endi
+
+if $data03 != 210 then
+ return -1
+endi
+
+if $data04 != @test1@ then
+ return -1
+endi
+
+if $data10 != @20-09-04 00:00:00.000@ then
+ return -1
+endi
+
+if $data11 != 210 then
+ return -1
+endi
+
+if $data12 != 1 then
+ return -1
+endi
+
+if $data13 != 210 then
+ return -1
+endi
+
+if $data14 != @test2@ then
+ print expect test2 , actual: $data14
+ return -1
endi
\ No newline at end of file
diff --git a/tests/script/general/parser/groupby.sim b/tests/script/general/parser/groupby.sim
index 255e00ca41..bd0d3c1a12 100644
--- a/tests/script/general/parser/groupby.sim
+++ b/tests/script/general/parser/groupby.sim
@@ -423,6 +423,8 @@ if $data97 != @group_tb0@ then
return -1
endi
+print ---------------------------------> group by binary|nchar data add cases
+
#=========================== group by multi tags ======================
sql create table st (ts timestamp, c int) tags (t1 int, t2 int, t3 int, t4 int);
diff --git a/tests/script/general/parser/join.sim b/tests/script/general/parser/join.sim
index ef3245ccaf..254571bda1 100644
--- a/tests/script/general/parser/join.sim
+++ b/tests/script/general/parser/join.sim
@@ -205,10 +205,12 @@ if $rows != 9 then
endi
if $data00 != @70-01-01 08:01:40.100@ then
+ print $data00
return -1
endi
if $data10 != @70-01-01 08:01:40.200@ then
+ print $data10
return -1
endi
diff --git a/tests/script/general/parser/lastrow_query.sim b/tests/script/general/parser/lastrow_query.sim
index 5fc47ed15d..f81a48d5b2 100644
--- a/tests/script/general/parser/lastrow_query.sim
+++ b/tests/script/general/parser/lastrow_query.sim
@@ -154,6 +154,7 @@ if $rows != 46 then
endi
print ========>td-1317, empty table last_row query crashed
+sql drop table if exists m1;
sql create table m1(ts timestamp, k int) tags (a int);
sql create table t1 using m1 tags(1);
sql create table t2 using m1 tags(2);
@@ -172,3 +173,55 @@ sql select last_row(*) from m1 where tbname in ('t1')
if $rows != 0 then
return -1
endi
+
+sql insert into t1 values('2019-1-1 1:1:1', 1);
+print ===================> last_row query against normal table along with ts/tbname
+sql select last_row(*),ts,'k' from t1;
+if $rows != 1 then
+ return -1
+endi
+
+print ===================> last_row + user-defined column + normal tables
+sql select last_row(ts), 'abc', 1234.9384, ts from t1
+if $rows != 1 then
+ return -1
+endi
+
+if $data01 != @abc@ then
+ print expect abc, actual $data02
+ return -1
+endi
+
+if $data02 != 1234.938400000 then
+ return -1
+endi
+
+if $data03 != @19-01-01 01:01:01.000@ then
+ print expect 19-01-01 01:01:01.000, actual:$data03
+ return -1
+endi
+
+print ===================> last_row + stable + ts/tag column + condition + udf
+sql select last_row(*), ts, 'abc', 123.981, tbname from m1
+if $rows != 1 then
+ return -1
+endi
+
+if $data02 != @19-01-01 01:01:01.000@ then
+ return -1
+endi
+
+if $data03 != @abc@ then
+ return -1
+endi
+
+if $data04 != 123.981000000 then
+ print expect 123.981000000, actual: $data04
+ return -1
+endi
+
+sql create table tu(ts timestamp, k int)
+sql select last_row(*) from tu
+if $row != 0 then
+ return -1
+endi
\ No newline at end of file
diff --git a/tests/script/general/parser/testSuite.sim b/tests/script/general/parser/testSuite.sim
index 6790564cc7..3dd80b8e38 100644
--- a/tests/script/general/parser/testSuite.sim
+++ b/tests/script/general/parser/testSuite.sim
@@ -99,6 +99,8 @@ run general/parser/union.sim
sleep 2000
run general/parser/constCol.sim
sleep 2000
+run general/parser/where.sim
+sleep 2000
run general/parser/timestamp.sim
sleep 2000
run general/parser/sliding.sim
diff --git a/tests/script/general/parser/timestamp_query.sim b/tests/script/general/parser/timestamp_query.sim
index 6994b2d295..783c03602b 100644
--- a/tests/script/general/parser/timestamp_query.sim
+++ b/tests/script/general/parser/timestamp_query.sim
@@ -21,6 +21,10 @@ $tsu = $rowNum * $delta
$tsu = $tsu - $delta
$tsu = $tsu + $ts0
+print ==================>issue #3481, normal column not allowed,
+sql_error select ts,c1,min(c2) from ts_stb0
+
+
##### select from supertable
$tb = $tbPrefix . 0
sql select first(c1), last(c1), (1537325400 - 1537146000)/(5*60) v from $tb where ts >= $ts0 and ts < $tsu interval(5m) fill(value, -1)
diff --git a/tests/script/general/parser/topbot.sim b/tests/script/general/parser/topbot.sim
index 2faee55460..c2b41888d7 100644
--- a/tests/script/general/parser/topbot.sim
+++ b/tests/script/general/parser/topbot.sim
@@ -5,7 +5,7 @@ system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/cfg.sh -n dnode1 -c maxtablespervnode -v 200
system sh/exec.sh -n dnode1 -s start
-sleep 3000
+sleep 1000
sql connect
$dbPrefix = tb_db
@@ -25,7 +25,7 @@ $stb = $stbPrefix . $i
sql drop database $db -x step1
step1:
-sql create database $db cache 16
+sql create database $db cache 16 maxrows 4096 keep 36500
print ====== create tables
sql use $db
sql create table $stb (ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 smallint, c6 tinyint, c7 bool, c8 binary(10), c9 nchar(10)) tags(t1 int)
@@ -132,15 +132,15 @@ sleep 5000
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed
sql connect
-sleep 3000
+sleep 1000
sql select count(*) from t1.test where ts>10000 and ts<90000 interval(5000a)
if $rows != 3 then
return -1
endi
-print =========>td-1308
-sql create database db;
+print ==============>td-1308
+sql create database db keep 36500;
sql use db;
sql create table stb (ts timestamp, c1 int, c2 binary(10)) tags(t1 binary(10));
@@ -158,4 +158,108 @@ if $rows != 1 then
return -1
endi
+print =======================>td-1446
+sql create table t(ts timestamp, k int)
+$ts = 6000
+while $ts < 7000
+ sql insert into t values ( $ts , $ts )
+ $ts = $ts + 1
+endw
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+system sh/exec.sh -n dnode1 -s start
+sql connect
+sleep 1000
+sql use db;
+
+$ts = 1000
+while $ts < 5096
+ sql insert into t values ( $ts , $ts )
+ $ts = $ts + 1
+endw
+
+sql select * from t where ts < 6500
+if $rows != 4596 then
+ print expect 4596, actual: $rows
+ return -1
+endi
+
+sql select * from t where ts < 7000
+if $rows != 5096 then
+ return -1
+endi
+
+sql select * from t where ts <= 6000
+if $rows != 4097 then
+ return -1
+endi
+
+sql select * from t where ts <= 6001
+if $rows != 4098 then
+ return -1
+endi
+
+print ======================>td-1454
+sql select count(*)/10, count(*)+99 from t
+if $rows != 1 then
+ return -1
+endi
+
+if $data00 != 509.600000000 then
+ return -1
+endi
+
+if $data01 != 5195.000000000 then
+ return -1
+endi
+
+print =======================>td-1596
+sql create table t2(ts timestamp, k int)
+sql insert into t2 values('2020-1-2 1:1:1', 1);
+sql insert into t2 values('2020-2-2 1:1:1', 1);
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+system sh/exec.sh -n dnode1 -s start
+sql connect
+sleep 1000
+
+sql use db
+sql select count(*), first(ts), last(ts) from t2 interval(1d);
+if $rows != 2 then
+ return -1
+endi
+
+if $data00 != @20-01-02 00:00:00.000@ then
+ print expect 20-01-02 00:00:00.000, actual: $data00
+ return -1
+endi
+
+if $data10 != @20-02-02 00:00:00.000@ then
+ return -1
+endi
+
+if $data01 != 1 then
+ return -1
+endi
+
+if $data11 != 1 then
+ return -1
+endi
+
+if $data02 != @20-01-02 01:01:01.000@ then
+ return -1
+endi
+
+if $data12 != @20-02-02 01:01:01.000@ then
+ return -1
+endi
+
+if $data03 != @20-01-02 01:01:01.000@ then
+ return -1
+endi
+
+if $data13 != @20-02-02 01:01:01.000@ then
+ return -1
+endi
+
system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/general/parser/where.sim b/tests/script/general/parser/where.sim
index dd3b11c2dc..e609dda652 100644
--- a/tests/script/general/parser/where.sim
+++ b/tests/script/general/parser/where.sim
@@ -2,6 +2,7 @@ system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 0
+system sh/cfg.sh -n dnode1 -c maxtablespervnode -v 4
system sh/exec.sh -n dnode1 -s start
sleep 3000
@@ -93,7 +94,11 @@ endi
## select specified columns
+print select c1 from $mt
sql select c1 from $mt
+
+print rows $rows
+print totalNum $totalNum
if $rows != $totalNum then
return -1
endi
@@ -233,6 +238,13 @@ if $data11 != @19-01-01 09:10:00.000@ then
endi
sql create table tb_where_NULL (ts timestamp, c1 float, c2 binary(10))
+
+print ===================>td-1604
+sql_error insert into tb_where_NULL values(?, ?, ?)
+sql_error insert into tb_where_NULL values(now, 1, ?)
+sql_error insert into tb_where_NULL values(?, 1, '')
+sql_error insert into tb_where_NULL values(now, ?, '12')
+
sql insert into tb_where_NULL values ('2019-01-01 09:00:00.000', 1, 'val1')
sql insert into tb_where_NULL values ('2019-01-01 09:00:01.000', NULL, NULL)
sql insert into tb_where_NULL values ('2019-01-01 09:00:02.000', 2, 'val2')
@@ -307,14 +319,27 @@ sleep 2000
system sh/exec.sh -n dnode1 -s start
-sql select * from wh_mt0 where c3 = 'abc' and tbname in ('test_null_filter');
+sql_error select * from wh_mt0 where c3 = 'abc' and tbname in ('test_null_filter');
+
+sql select * from wh_mt0 where c3 = '1' and tbname in ('test_null_filter');
if $row != 0 then
return -1
endi
-sql select * from wh_mt0 where c3 = 'abc' and tbname in ('test_null_filter');
-if $row != 0 then
+sql select * from wh_mt0 where c3 = '1';
+if $row == 0 then
return -1
endi
+sql select * from wh_mt0 where c3 is null and tbname in ('test_null_filter');
+if $rows != 10000 then
+ return -1
+endi
+
+sql select * from wh_mt0 where c3 is not null and tbname in ('test_null_filter');
+if $rows != 0 then
+ return -1
+endi
+
+
system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/general/stable/disk.sim b/tests/script/general/stable/disk.sim
index f49d023928..a67ef6d790 100644
--- a/tests/script/general/stable/disk.sim
+++ b/tests/script/general/stable/disk.sim
@@ -4,6 +4,7 @@ system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
system sh/exec.sh -n dnode1 -s start
sleep 3000
diff --git a/tests/script/general/stable/vnode3.sim b/tests/script/general/stable/vnode3.sim
index 560c5e38fc..3409aef9f8 100644
--- a/tests/script/general/stable/vnode3.sim
+++ b/tests/script/general/stable/vnode3.sim
@@ -3,6 +3,7 @@ system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
system sh/exec.sh -n dnode1 -s start
sleep 3000
diff --git a/tests/script/general/table/vgroup.sim b/tests/script/general/table/vgroup.sim
index 75fda328f3..f4496e2f19 100644
--- a/tests/script/general/table/vgroup.sim
+++ b/tests/script/general/table/vgroup.sim
@@ -2,6 +2,7 @@ system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 4
+system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
system sh/exec.sh -n dnode1 -s start
sleep 3000
diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt
index 063d11bd9d..977ef452ab 100644
--- a/tests/script/jenkins/basic.txt
+++ b/tests/script/jenkins/basic.txt
@@ -77,6 +77,9 @@ cd ../../../debug; make
./test.sh -f general/field/smallint.sim
./test.sh -f general/field/tinyint.sim
+./test.sh -f general/http/autocreate.sim
+./test.sh -f general/http/chunked.sim
+./test.sh -f general/http/gzip.sim
./test.sh -f general/http/restful.sim
./test.sh -f general/http/restful_insert.sim
./test.sh -f general/http/restful_limit.sim
@@ -275,6 +278,7 @@ cd ../../../debug; make
./test.sh -f unique/dnode/balancex.sim
./test.sh -f unique/dnode/offline1.sim
./test.sh -f unique/dnode/offline2.sim
+./test.sh -f unique/dnode/reason.sim
./test.sh -f unique/dnode/remove1.sim
./test.sh -f unique/dnode/remove2.sim
./test.sh -f unique/dnode/vnode_clean.sim
diff --git a/tests/script/sh/deploy.sh b/tests/script/sh/deploy.sh
index 0d444a5a6e..8fccb1442f 100755
--- a/tests/script/sh/deploy.sh
+++ b/tests/script/sh/deploy.sh
@@ -128,6 +128,7 @@ echo "tmrDebugFlag 131" >> $TAOS_CFG
echo "udebugFlag 135" >> $TAOS_CFG
echo "sdebugFlag 135" >> $TAOS_CFG
echo "wdebugFlag 135" >> $TAOS_CFG
+echo "cqdebugFlag 135" >> $TAOS_CFG
echo "monitor 0" >> $TAOS_CFG
echo "monitorInterval 1" >> $TAOS_CFG
echo "http 0" >> $TAOS_CFG
diff --git a/tests/script/tmp/prepare.sim b/tests/script/tmp/prepare.sim
index 8b8f206233..343c422e9f 100644
--- a/tests/script/tmp/prepare.sim
+++ b/tests/script/tmp/prepare.sim
@@ -34,11 +34,11 @@ system sh/cfg.sh -n dnode4 -c http -v 1
return
# for crash_gen
-system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 10
system sh/cfg.sh -n dnode1 -c rpcMaxTime -v 101
system sh/cfg.sh -n dnode1 -c cache -v 2
system sh/cfg.sh -n dnode1 -c keep -v 36500
-system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode1 -c walLevel -v 1
# for windows
diff --git a/tests/script/unique/cluster/balance1.sim b/tests/script/unique/cluster/balance1.sim
index 728ead25fe..ec0c58d416 100644
--- a/tests/script/unique/cluster/balance1.sim
+++ b/tests/script/unique/cluster/balance1.sim
@@ -35,6 +35,16 @@ system sh/cfg.sh -n dnode6 -c wallevel -v 1
system sh/cfg.sh -n dnode7 -c wallevel -v 1
system sh/cfg.sh -n dnode8 -c wallevel -v 1
+system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode5 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode6 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode7 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode8 -c maxTablesPerVnode -v 4
+
+
print ============== step1
print ========= start dnode1
system sh/exec.sh -n dnode1 -s start
diff --git a/tests/script/unique/cluster/balance2.sim b/tests/script/unique/cluster/balance2.sim
index 6d2c4bdad7..b8477965c6 100644
--- a/tests/script/unique/cluster/balance2.sim
+++ b/tests/script/unique/cluster/balance2.sim
@@ -35,6 +35,16 @@ system sh/cfg.sh -n dnode6 -c mnodeEqualVnodeNum -v 0
system sh/cfg.sh -n dnode7 -c mnodeEqualVnodeNum -v 0
system sh/cfg.sh -n dnode8 -c mnodeEqualVnodeNum -v 0
+system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode5 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode6 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode7 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode8 -c maxTablesPerVnode -v 4
+
+
print ============== step1
print ========= start dnode1
system sh/exec.sh -n dnode1 -s start
diff --git a/tests/script/unique/cluster/vgroup100.sim b/tests/script/unique/cluster/vgroup100.sim
index cddb38cefd..bde6dd2462 100644
--- a/tests/script/unique/cluster/vgroup100.sim
+++ b/tests/script/unique/cluster/vgroup100.sim
@@ -42,9 +42,11 @@ $count = 2
while $count < 102
$db = d . $count
$tb = $db . .t
+ $tb2 = $db . .t2
sql create database $db replica 3 cache 1 blocks 3
sql create table $tb (ts timestamp, i int)
sql insert into $tb values(now, 1)
+ sql create table $tb2 as select count(*) from $tb interval(10s)
$count = $count + 1
print insert into $tb values(now, 1) ==> finished
endw
@@ -74,7 +76,7 @@ print ============================== step6
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start
-sleep 3000
+sleep 10000
print ============================== step7
diff --git a/tests/script/unique/dnode/alternativeRole.sim b/tests/script/unique/dnode/alternativeRole.sim
index b5d861c44f..af627490ba 100644
--- a/tests/script/unique/dnode/alternativeRole.sim
+++ b/tests/script/unique/dnode/alternativeRole.sim
@@ -16,6 +16,11 @@ system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3
+system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 4
+
+
print ========== step1
system sh/exec.sh -n dnode1 -s start
sleep 3000
diff --git a/tests/script/unique/dnode/balance1.sim b/tests/script/unique/dnode/balance1.sim
index b246197742..10d185f353 100644
--- a/tests/script/unique/dnode/balance1.sim
+++ b/tests/script/unique/dnode/balance1.sim
@@ -25,6 +25,11 @@ system sh/cfg.sh -n dnode2 -c wallevel -v 2
system sh/cfg.sh -n dnode3 -c wallevel -v 2
system sh/cfg.sh -n dnode4 -c wallevel -v 2
+system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxTablesPerVnode -v 4
+
print ========== step1
system sh/exec.sh -n dnode1 -s start
sql connect
diff --git a/tests/script/unique/dnode/balance2.sim b/tests/script/unique/dnode/balance2.sim
index 1a80e890a0..23897df690 100644
--- a/tests/script/unique/dnode/balance2.sim
+++ b/tests/script/unique/dnode/balance2.sim
@@ -12,6 +12,12 @@ system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 4
system sh/cfg.sh -n dnode4 -c mnodeEqualVnodeNum -v 4
system sh/cfg.sh -n dnode5 -c mnodeEqualVnodeNum -v 4
+system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode5 -c maxTablesPerVnode -v 4
+
system sh/cfg.sh -n dnode1 -c wallevel -v 1
system sh/cfg.sh -n dnode2 -c wallevel -v 1
system sh/cfg.sh -n dnode3 -c wallevel -v 1
diff --git a/tests/script/unique/dnode/balance3.sim b/tests/script/unique/dnode/balance3.sim
index 1f81fde968..9f7f3abb8b 100644
--- a/tests/script/unique/dnode/balance3.sim
+++ b/tests/script/unique/dnode/balance3.sim
@@ -21,6 +21,13 @@ system sh/cfg.sh -n dnode4 -c wallevel -v 1
system sh/cfg.sh -n dnode5 -c wallevel -v 1
system sh/cfg.sh -n dnode6 -c wallevel -v 1
+system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode5 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode6 -c maxTablesPerVnode -v 4
+
print ========== step1
system sh/exec.sh -n dnode1 -s start
sql connect
diff --git a/tests/script/unique/dnode/balancex.sim b/tests/script/unique/dnode/balancex.sim
index 6f3b7dfb74..da8197e22d 100644
--- a/tests/script/unique/dnode/balancex.sim
+++ b/tests/script/unique/dnode/balancex.sim
@@ -15,6 +15,11 @@ system sh/cfg.sh -n dnode2 -c wallevel -v 1
system sh/cfg.sh -n dnode3 -c wallevel -v 1
system sh/cfg.sh -n dnode4 -c wallevel -v 1
+system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxTablesPerVnode -v 4
+
print ========== step1
system sh/exec.sh -n dnode1 -s start
sql connect
diff --git a/tests/script/unique/dnode/offline2.sim b/tests/script/unique/dnode/offline2.sim
index 2d1467ad59..e8a5460de4 100644
--- a/tests/script/unique/dnode/offline2.sim
+++ b/tests/script/unique/dnode/offline2.sim
@@ -16,6 +16,13 @@ system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 4
system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 4
system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 4
+
+system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxTablesPerVnode -v 4
+
+
system sh/cfg.sh -n dnode1 -c wallevel -v 1
system sh/cfg.sh -n dnode2 -c wallevel -v 1
system sh/cfg.sh -n dnode3 -c wallevel -v 1
diff --git a/tests/script/unique/dnode/reason.sim b/tests/script/unique/dnode/reason.sim
new file mode 100644
index 0000000000..ad61a81b97
--- /dev/null
+++ b/tests/script/unique/dnode/reason.sim
@@ -0,0 +1,132 @@
+system sh/stop_dnodes.sh
+
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+
+print ========== step1
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+sql connect
+sql create dnode $hostname2
+
+sql show dnodes
+print dnode1 off: $data7_1
+print dnode2 off: $data7_2
+if $data7_2 != @status not received@ then
+ return -1
+endi
+
+print ========== step2
+system sh/exec.sh -n dnode2 -s start
+sleep 3000
+sql show dnodes
+print dnode1 off: $data7_1
+print dnode2 off: $data7_2
+
+print ========== step3
+system sh/exec.sh -n dnode2 -s stop
+sleep 3000
+sql show dnodes
+print dnode1 off: $data7_1
+print dnode2 off: $data7_2
+if $data7_2 != @status msg timeout@ then
+ return -1
+endi
+
+print ========== step4
+sql drop dnode $hostname2
+sleep 5000
+sql show dnodes
+if $rows != 1 then
+ return -1
+endi
+
+print ========== step5
+system sh/exec.sh -n dnode2 -s start
+sql create dnode $hostname2
+sleep 3000
+sql show dnodes
+print dnode1 off: $data7_1
+print dnode2 off: $data7_3
+if $data7_3 != @dnodeId not match@ then
+ return -1
+endi
+
+print ========== step6
+system sh/deploy.sh -n dnode4 -i 4
+system sh/cfg.sh -n dnode4 -c mnodeEqualVnodeNum -v 3
+system sh/exec.sh -n dnode4 -s start
+sql create dnode $hostname4
+
+sleep 3000
+sql show dnodes
+print dnode1 off: $data7_1
+print dnode4 off: $data7_4
+if $data7_4 != @mnEqualVn not match@ then
+ return -1
+endi
+
+print ========== step7
+system sh/deploy.sh -n dnode5 -i 5
+system sh/cfg.sh -n dnode5 -c statusInterval -v 3
+system sh/exec.sh -n dnode5 -s start
+sql create dnode $hostname5
+
+sleep 3000
+sql show dnodes
+print dnode1 off: $data7_1
+print dnode5 off: $data7_5
+if $data7_5 != @interval not match@ then
+ return -1
+endi
+
+print ========== step8
+system sh/deploy.sh -n dnode6 -i 6
+system sh/cfg.sh -n dnode6 -c balance -v 0
+system sh/exec.sh -n dnode6 -s start
+sql create dnode $hostname6
+
+sleep 3000
+sql show dnodes
+print dnode1 off: $data7_1
+print dnode6 off: $data7_6
+if $data7_6 != @balance not match@ then
+ return -1
+endi
+
+print ========== step9
+system sh/deploy.sh -n dnode7 -i 7
+system sh/cfg.sh -n dnode7 -c maxTablesPerVnode -v 3000
+system sh/exec.sh -n dnode7 -s start
+sql create dnode $hostname7
+
+sleep 3000
+sql show dnodes
+print dnode1 off: $data7_1
+print dnode7 off: $data7_7
+if $data7_7 != @maxTabPerVn not match@ then
+ return -1
+endi
+
+print ========== step10
+system sh/deploy.sh -n dnode8 -i 8
+system sh/cfg.sh -n dnode8 -c maxVgroupsPerDb -v 3
+system sh/exec.sh -n dnode8 -s start
+sql create dnode $hostname8
+
+sleep 3000
+sql show dnodes
+print dnode1 off: $data7_1
+print dnode8 off: $data7_8
+if $data7_8 != @maxVgPerDb not match@ then
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+system sh/exec.sh -n dnode2 -s stop -x SIGINT
+system sh/exec.sh -n dnode3 -s stop -x SIGINT
+system sh/exec.sh -n dnode4 -s stop -x SIGINT
+system sh/exec.sh -n dnode5 -s stop -x SIGINT
+system sh/exec.sh -n dnode6 -s stop -x SIGINT
+system sh/exec.sh -n dnode7 -s stop -x SIGINT
+system sh/exec.sh -n dnode8 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/dnode/remove1.sim b/tests/script/unique/dnode/remove1.sim
index 7786b9f9d1..6bde68b8b5 100644
--- a/tests/script/unique/dnode/remove1.sim
+++ b/tests/script/unique/dnode/remove1.sim
@@ -15,6 +15,11 @@ system sh/cfg.sh -n dnode2 -c wallevel -v 1
system sh/cfg.sh -n dnode3 -c wallevel -v 1
system sh/cfg.sh -n dnode4 -c wallevel -v 1
+system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxTablesPerVnode -v 4
+
print ========== step1
system sh/exec.sh -n dnode1 -s start
sleep 3000
diff --git a/tests/script/unique/dnode/remove2.sim b/tests/script/unique/dnode/remove2.sim
index cd0331235a..903e262be7 100644
--- a/tests/script/unique/dnode/remove2.sim
+++ b/tests/script/unique/dnode/remove2.sim
@@ -15,6 +15,11 @@ system sh/cfg.sh -n dnode2 -c wallevel -v 2
system sh/cfg.sh -n dnode3 -c wallevel -v 2
system sh/cfg.sh -n dnode4 -c wallevel -v 2
+system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxTablesPerVnode -v 4
+
print ========== step1
system sh/exec.sh -n dnode1 -s start
sleep 3000
diff --git a/tests/script/unique/dnode/vnode_clean.sim b/tests/script/unique/dnode/vnode_clean.sim
index 6df4bf78e8..f90f3d3fd0 100644
--- a/tests/script/unique/dnode/vnode_clean.sim
+++ b/tests/script/unique/dnode/vnode_clean.sim
@@ -15,6 +15,11 @@ system sh/cfg.sh -n dnode2 -c wallevel -v 1
system sh/cfg.sh -n dnode3 -c wallevel -v 1
system sh/cfg.sh -n dnode4 -c wallevel -v 1
+system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxTablesPerVnode -v 4
+
print ========== step1
system sh/exec.sh -n dnode1 -s start
sql connect
diff --git a/tests/script/unique/http/admin.sim b/tests/script/unique/http/admin.sim
index dc17520d02..8833397487 100644
--- a/tests/script/unique/http/admin.sim
+++ b/tests/script/unique/http/admin.sim
@@ -33,25 +33,25 @@ print =============== step1 - login
system_content curl 127.0.0.1:7111/admin/
print 1-> $system_content
-if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then
+if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then
return -1
endi
system_content curl 127.0.0.1:7111/admin/xx
print 2-> $system_content
-if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then
+if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then
return -1
endi
system_content curl 127.0.0.1:7111/admin/login
print 3-> $system_content
-if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then
+if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then
return -1
endi
system_content curl 127.0.0.1:7111/admin/login/root
print 4-> $system_content
-if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then
+if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then
return -1
endi
@@ -69,13 +69,13 @@ endi
system_content curl -H 'Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.' -d 'show databases' 127.0.0.1:7111/admin/login/root/1
print 7-> $system_content
-if $system_content != @{"status":"error","code":5010,"desc":"invalid type of Authorization"}@ then
+if $system_content != @{"status":"error","code":4386,"desc":"invalid type of Authorization"}@ then
return -1
endi
system_content curl -H 'Authorization: Taosd eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' 127.0.0.1:7111/admin/login/root/1
print 8-> $system_content
-if $system_content != @{"status":"error","code":5053,"desc":"parse http auth token error"}@ then
+if $system_content != @{"status":"error","code":4389,"desc":"invalid taosd Authorization"}@ then
return -1
endi
@@ -105,7 +105,7 @@ endi
system_content curl 127.0.0.1:7111/admin/logout
print 11 -----> $system_content
-if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then
+if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then
return -1
endi
@@ -168,7 +168,7 @@ print =============== step7 - use dbs
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'use d1;' 127.0.0.1:7111/admin/all
print 23-> $system_content
-if $system_content != @{"status":"error","code":5017,"desc":"no need to execute use db cmd"}@ then
+if $system_content != @{"status":"error","code":4360,"desc":"no need to execute use db cmd"}@ then
return -1
endi
diff --git a/tests/script/unique/http/opentsdb.sim b/tests/script/unique/http/opentsdb.sim
index 4901c5b3fd..5269817165 100644
--- a/tests/script/unique/http/opentsdb.sim
+++ b/tests/script/unique/http/opentsdb.sim
@@ -13,62 +13,62 @@ print ============================ dnode1 start
print =============== step1 - parse
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/
print $system_content
-if $system_content != @{"status":"error","code":5057,"desc":"database name can not be null"}@ then
+if $system_content != @{"status":"error","code":4496,"desc":"database name can not be null"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db123456789012345678901234567890db
print $system_content
-if $system_content != @{"status":"error","code":5058,"desc":"database name too long"}@ then
+if $system_content != @{"status":"error","code":4497,"desc":"database name too long"}@ then
return -1
endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/
print $system_content
-if $system_content != @{"status":"error","code":5057,"desc":"database name can not be null"}@ then
+if $system_content != @{"status":"error","code":4496,"desc":"database name can not be null"}@ then
return -1
endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put2
print $system_content
-if $system_content != @{"status":"error","code":5009,"desc":"http url parse error"}@ then
+if $system_content != @{"status":"error","code":4354,"desc":"invalid url format"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[]' 127.0.0.1:7111/opentsdb/db/put
print $system_content
-if $system_content != @{"status":"error","code":5060,"desc":"metrics size is 0"}@ then
+if $system_content != @{"status":"error","code":4499,"desc":"metrics size is 0"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[' 127.0.0.1:7111/opentsdb/db/put
print $system_content
-if $system_content != @{"status":"error","code":5059,"desc":"invalid opentsdb json fromat"}@ then
+if $system_content != @{"status":"error","code":4498,"desc":"invalid opentsdb json fromat"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{}' 127.0.0.1:7111/opentsdb/db/put
print $system_content
-if $system_content != @{"status":"error","code":5060,"desc":"metrics size is 0"}@ then
+if $system_content != @{"status":"error","code":4499,"desc":"metrics size is 0"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{}]' 127.0.0.1:7111/opentsdb/db/put
print $system_content
-if $system_content != @{"status":"error","code":5062,"desc":"metric name not find"}@ then
+if $system_content != @{"status":"error","code":4501,"desc":"metric name not find"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": 1,"timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
print $system_content
-if $system_content != @{"status":"error","code":5063,"desc":"metric name type should be string"}@ then
+if $system_content != @{"status":"error","code":4502,"desc":"metric name type should be string"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": "","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
print $system_content
-if $system_content != @{"status":"error","code":5064,"desc":"metric name length is 0"}@ then
+if $system_content != @{"status":"error","code":4503,"desc":"metric name length is 0"}@ then
return -1
endi
@@ -80,25 +80,25 @@ endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
print $system_content
-if $system_content != @{"status":"error","code":5066,"desc":"timestamp not find"}@ then
+if $system_content != @{"status":"error","code":4505,"desc":"timestamp not find"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": "2","value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
print $system_content
-if $system_content != @{"status":"error","code":5067,"desc":"timestamp type should be integer"}@ then
+if $system_content != @{"status":"error","code":4506,"desc":"timestamp type should be integer"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": -1,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
print $system_content
-if $system_content != @{"status":"error","code":5068,"desc":"timestamp value smaller than 0"}@ then
+if $system_content != @{"status":"error","code":4507,"desc":"timestamp value smaller than 0"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
print $system_content
-if $system_content != @{"status":"error","code":5078,"desc":"value not find"}@ then
+if $system_content != @{"status":"error","code":4517,"desc":"value not find"}@ then
return -1
endi
@@ -106,19 +106,19 @@ endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18}]' 127.0.0.1:7111/opentsdb/db/put
print $system_content
-if $system_content != @{"status":"error","code":5069,"desc":"tags not find"}@ then
+if $system_content != @{"status":"error","code":4508,"desc":"tags not find"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {}}]' 127.0.0.1:7111/opentsdb/db/put
print $system_content
-if $system_content != @{"status":"error","code":5070,"desc":"tags size is 0"}@ then
+if $system_content != @{"status":"error","code":4509,"desc":"tags size is 0"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": 0}]' 127.0.0.1:7111/opentsdb/db/put
print $system_content
-if $system_content != @{"status":"error","code":5070,"desc":"tags size is 0"}@ then
+if $system_content != @{"status":"error","code":4509,"desc":"tags size is 0"}@ then
return -1
endi
@@ -130,25 +130,25 @@ endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"": "web01"}}]' 127.0.0.1:7111/opentsdb/db/put
print $system_content
-if $system_content != @{"status":"error","code":5073,"desc":"tag name is null"}@ then
+if $system_content != @{"status":"error","code":4512,"desc":"tag name is null"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host01123456789001123456789001123456789001123456789001123456789001123456789": "01"}}]' 127.0.0.1:7111/opentsdb/db/put
print $system_content
-if $system_content != @{"status":"error","code":5074,"desc":"tag name length too long"}@ then
+if $system_content != @{"status":"error","code":4513,"desc":"tag name length too long"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web011234567890011234567890011234567890011234567890011234567890011234567890011234567890011234567890"}}]' 127.0.0.1:7111/opentsdb/db/put
print $system_content
-if $system_content != @{"status":"error","code":5077,"desc":"tag value can not more than 64"}@ then
+if $system_content != @{"status":"error","code":4516,"desc":"tag value can not more than 64"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": ""}}]' 127.0.0.1:7111/opentsdb/db/put
print $system_content
-if $system_content != @{"status":"error","code":5076,"desc":"tag value is null"}@ then
+if $system_content != @{"status":"error","code":4515,"desc":"tag value is null"}@ then
return -1
endi
diff --git a/tests/script/unique/mnode/mgmt23.sim b/tests/script/unique/mnode/mgmt23.sim
index 7e60ab908b..d1820ef8c6 100644
--- a/tests/script/unique/mnode/mgmt23.sim
+++ b/tests/script/unique/mnode/mgmt23.sim
@@ -65,7 +65,7 @@ endi
print ============== step4
sql drop dnode $hostname2
-sleep 8000
+sleep 10000
sql show mnodes
$dnode1Role = $data2_1
diff --git a/tests/script/unique/stream/table_move.sim b/tests/script/unique/stream/table_move.sim
index d2437e4920..fe19e6f402 100644
--- a/tests/script/unique/stream/table_move.sim
+++ b/tests/script/unique/stream/table_move.sim
@@ -30,6 +30,11 @@ system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 4
+system sh/cfg.sh -n dnode4 -c maxTablesPerVnode -v 4
+
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3
diff --git a/tests/test-all.sh b/tests/test-all.sh
index 275c6b1677..84b663809d 100755
--- a/tests/test-all.sh
+++ b/tests/test-all.sh
@@ -9,8 +9,9 @@ NC='\033[0m'
function runSimCaseOneByOne {
while read -r line; do
- if [[ $line =~ ^run.* ]]; then
- case=`echo $line | awk '{print $NF}'`
+ if [[ $line =~ ^./test.sh* ]]; then
+ case=`echo $line | grep sim$ |awk '{print $NF}'`
+
start_time=`date +%s`
./test.sh -f $case > /dev/null 2>&1 && \
echo -e "${GREEN}$case success${NC}" | tee -a out.log || \
@@ -54,7 +55,7 @@ if [ "$2" != "python" ]; then
runSimCaseOneByOne regressionSuite.sim
elif [ "$1" == "full" ]; then
echo "### run TSIM full test ###"
- runSimCaseOneByOne fullGeneralSuite.sim
+ runSimCaseOneByOne jenkins/basic.txt
elif [ "$1" == "smoke" ] || [ -z "$1" ]; then
echo "### run TSIM smoke test ###"
runSimCaseOneByOne basicSuite.sim
diff --git a/tests/test/c/CMakeLists.txt b/tests/test/c/CMakeLists.txt
index e1fedaee3c..26aa20e647 100644
--- a/tests/test/c/CMakeLists.txt
+++ b/tests/test/c/CMakeLists.txt
@@ -5,6 +5,12 @@ INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/os/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/util/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/common/inc)
+INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc)
+INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/mnode/inc)
+INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/tsdb/inc)
+INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/plugins/http/inc)
+INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/inc)
+INCLUDE_DIRECTORIES(${TD_ENTERPRISE_DIR}/src/inc)
IF (TD_LINUX)
#add_executable(insertPerTable insertPerTable.c)
@@ -28,6 +34,16 @@ IF (TD_LINUX)
#add_executable(createNormalTable createNormalTable.c)
#target_link_libraries(createNormalTable taos_static tutil common pthread)
- add_executable(queryPerformance queryPerformance.c)
- target_link_libraries(queryPerformance taos_static tutil common pthread)
+ #add_executable(queryPerformance queryPerformance.c)
+ #target_link_libraries(queryPerformance taos_static tutil common pthread)
+
+ #add_executable(httpTest httpTest.c)
+ #target_link_libraries(httpTest taos_static tutil common pthread mnode monitor http tsdb twal vnode cJson lz4)
+
+ #add_executable(cacheTest cacheTest.c)
+ #target_link_libraries(cacheTest taos_static tutil common pthread mnode monitor http tsdb twal vnode cJson lz4)
+
+ #add_executable(invalidTableId invalidTableId.c)
+ #target_link_libraries(invalidTableId taos_static tutil common pthread)
ENDIF()
+
diff --git a/tests/test/c/cacheTest.c b/tests/test/c/cacheTest.c
new file mode 100644
index 0000000000..54aca0038e
--- /dev/null
+++ b/tests/test/c/cacheTest.c
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#define _DEFAULT_SOURCE
+#include
+#include "os.h"
+#include "taos.h"
+#include "tcache.h"
+#include "tulog.h"
+#include "tutil.h"
+
+#define MAX_REFRESH_TIME_SEC 2
+#define MAX_RANDOM_POINTS 20000
+#define GREEN "\033[1;32m"
+#define NC "\033[0m"
+
+int32_t tsKeepTimeInSec = 3;
+int32_t tsNumOfRows = 1000000;
+int32_t tsSizeOfRow = 64 * 1024;
+void * tsCacheObj = NULL;
+int32_t destroyTimes = 0;
+
+typedef int64_t CacheTestKey;
+typedef struct CacheTestRow {
+ int32_t index;
+ void ** ppRow;
+ void * data;
+} CacheTestRow;
+
+CacheTestRow *initRow(int32_t index) {
+ CacheTestRow *row = calloc(sizeof(CacheTestRow), 1);
+ row->index = index;
+ row->data = malloc(tsSizeOfRow * sizeof(int8_t));
+ return row;
+}
+
+void detroyRow(void *data) {
+ CacheTestRow *row = *(CacheTestRow **)data;
+ free(row->data);
+ free(row);
+ destroyTimes++;
+ if (destroyTimes % 50000 == 0) {
+ pPrint("%s ===> destroyTimes:%d %s", GREEN, destroyTimes, NC);
+ }
+}
+
+void initCache() {
+ tsCacheObj = taosCacheInit(TSDB_DATA_TYPE_BIGINT, MAX_REFRESH_TIME_SEC, true, detroyRow, "cachetest");
+}
+
+void putRowInCache() {
+ for (int index = 0; index < tsNumOfRows; ++index) {
+ CacheTestRow *row = initRow(index);
+ uint64_t key = (uint64_t)row;
+ void **ppRow = taosCachePut(tsCacheObj, &key, sizeof(int64_t), &row, sizeof(int64_t), tsKeepTimeInSec * 1000);
+ row->ppRow = ppRow;
+ taosCacheRelease(tsCacheObj, (void **)&ppRow, false);
+ }
+}
+
+void cleanupCache() {
+ taosCacheCleanup(tsCacheObj);
+}
+
+void initGetMemory() {
+ osInit();
+ taos_init();
+}
+
+float getProcMemory() {
+ float procMemoryUsedMB = 0;
+ taosGetProcMemory(&procMemoryUsedMB);
+ return procMemoryUsedMB;
+}
+
+void doTest() {
+ initCache();
+ pPrint("%s initialize procMemory %f MB %s", GREEN, getProcMemory(), NC);
+
+ putRowInCache();
+ pPrint("%s insert %d rows, procMemory %f MB %s", GREEN, tsNumOfRows, getProcMemory(), NC);
+
+ int32_t sleepMs = (MAX_REFRESH_TIME_SEC * 3) * 1000 + tsKeepTimeInSec * 1000;
+ taosMsleep(sleepMs);
+ pPrint("%s after sleep %d ms, procMemory %f MB %s", GREEN, sleepMs, getProcMemory(), NC);
+
+ cleanupCache();
+ taosMsleep(sleepMs);
+ pPrint("%s after cleanup cache, procMemory %f MB %s", GREEN, getProcMemory(), NC);
+
+ malloc_trim(0);
+ taosMsleep(sleepMs);
+ pPrint("%s after malloc_trim, procMemory %f MB %s", GREEN, getProcMemory(), NC);
+}
+
+void printHelp() {
+ char indent[10] = " ";
+ printf("Used to test the performance of cache\n");
+
+ printf("%s%s\n", indent, "-k");
+ printf("%s%s%s%d\n", indent, indent, "KeepTimeInSec, default is ", tsKeepTimeInSec);
+ printf("%s%s\n", indent, "-n");
+ printf("%s%s%s%d\n", indent, indent, "NumOfRows, default is ", tsNumOfRows);
+ printf("%s%s\n", indent, "-s");
+ printf("%s%s%s%d\n", indent, indent, "SizeOfData, default is ", tsSizeOfRow);
+
+ exit(EXIT_SUCCESS);
+}
+
+void parseArgument(int argc, char *argv[]) {
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-h") == 0 || strcmp(argv[i], "--help") == 0) {
+ printHelp();
+ exit(0);
+ } else if (strcmp(argv[i], "-k") == 0) {
+ tsKeepTimeInSec = atoi(argv[++i]);
+ } else if (strcmp(argv[i], "-n") == 0) {
+ tsNumOfRows = atoi(argv[++i]);
+ } else if (strcmp(argv[i], "-s") == 0) {
+ tsSizeOfRow = atoi(argv[++i]);
+ } else {
+ }
+ }
+
+ pPrint("%s KeepTimeInSec:%d %s", GREEN, tsKeepTimeInSec, NC);
+ pPrint("%s NumOfRows:%d %s", GREEN, tsNumOfRows, NC);
+ pPrint("%s SizeOfData:%d %s", GREEN, tsSizeOfRow, NC);
+}
+
+int main(int argc, char *argv[]) {
+ initGetMemory();
+ parseArgument(argc, argv);
+ doTest();
+}
diff --git a/tests/test/c/createNormalTable.c b/tests/test/c/createNormalTable.c
index 18a648b9e1..60253e2add 100644
--- a/tests/test/c/createNormalTable.c
+++ b/tests/test/c/createNormalTable.c
@@ -50,7 +50,9 @@ void createDbAndSTable();
int main(int argc, char *argv[]) {
shellParseArgument(argc, argv);
taos_init();
- createDbAndSTable();
+ if (replica != 0) {
+ createDbAndSTable();
+ }
pPrint("%d threads are spawned to create table", numOfThreads);
@@ -134,14 +136,31 @@ void *threadFunc(void *param) {
int64_t startMs = taosGetTimestampMs();
- for (int32_t t = pInfo->tableBeginIndex; t < pInfo->tableEndIndex; ++t) {
- sprintf(qstr, "create table %s%d (ts timestamp, i int)", stableName, t);
- TAOS_RES *pSql = taos_query(con, qstr);
- code = taos_errno(pSql);
- if (code != 0) {
- pError("failed to create table %s%d, reason:%s", stableName, t, tstrerror(code));
+ if (replica != 0) {
+ for (int32_t t = pInfo->tableBeginIndex; t < pInfo->tableEndIndex; ++t) {
+ sprintf(qstr, "create table %s%d (ts timestamp, i int)", stableName, t);
+ TAOS_RES *pSql = taos_query(con, qstr);
+ code = taos_errno(pSql);
+ if (code != 0) {
+ pError("failed to create table %s%d, reason:%s", stableName, t, tstrerror(code));
+ }
+ taos_free_result(pSql);
+ }
+ } else {
+ for (int32_t t = pInfo->tableBeginIndex; t < pInfo->tableEndIndex; ++t) {
+ sprintf(qstr, "insert into %s%d values(now, 1)", stableName, t);
+ TAOS_RES *pSql = taos_query(con, qstr);
+ code = taos_errno(pSql);
+ if (code != 0) {
+ if (code != TSDB_CODE_MND_INVALID_TABLE_NAME) {
+ pError("failed to create table %s%d, reason:%s", stableName, t, tstrerror(code));
+ }
+ if (code == TSDB_CODE_VND_INVALID_VGROUP_ID) {
+ exit(0);
+ }
+ }
+ taos_free_result(pSql);
}
- taos_free_result(pSql);
}
float createTableSpeed = 0;
diff --git a/tests/test/c/httpTest.c b/tests/test/c/httpTest.c
new file mode 100644
index 0000000000..261546770e
--- /dev/null
+++ b/tests/test/c/httpTest.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "os.h"
+#include "os.h"
+#include "tglobal.h"
+#include "taoserror.h"
+#include "httpSystem.h"
+
+void signal_handler(int signum) {
+ httpStopSystem();
+ httpCleanUpSystem();
+ exit(EXIT_SUCCESS);
+}
+
+int main(int argc, char *argv[]) {
+ struct sigaction act;
+ act.sa_handler = signal_handler;
+ sigaction(SIGTERM, &act, NULL);
+ sigaction(SIGHUP, &act, NULL);
+ sigaction(SIGINT, &act, NULL);
+ sigaction(SIGABRT, &act, NULL);
+
+ // Initialize the system
+ if (httpInitSystem() < 0) {
+ exit(EXIT_FAILURE);
+ }
+
+ if (httpStartSystem() < 0) {
+ exit(EXIT_FAILURE);
+ }
+
+ while (1) {
+ sleep(1000);
+ }
+}
diff --git a/tests/test/c/importPerTable.c b/tests/test/c/importPerTable.c
index b4e8b68703..68f7898050 100644
--- a/tests/test/c/importPerTable.c
+++ b/tests/test/c/importPerTable.c
@@ -20,6 +20,7 @@
#include "ttimer.h"
#include "tutil.h"
#include "tglobal.h"
+#include "osTime.h"
#define MAX_RANDOM_POINTS 20000
#define GREEN "\033[1;32m"
@@ -43,14 +44,16 @@ void createDbAndTable();
void insertData();
int32_t randomData[MAX_RANDOM_POINTS];
-int64_t rowsPerTable = 10000;
+int64_t rowsPerTable = 1000000;
int64_t pointsPerTable = 1;
-int64_t numOfThreads = 1;
-int64_t numOfTablesPerThread = 1;
+int64_t numOfThreads = 10;
+int64_t numOfTablesPerThread = 100;
char dbName[32] = "db";
char stableName[64] = "st";
-int32_t cache = 16384;
-int32_t tables = 1000;
+int64_t totalUs = 0;
+int64_t reqNum = 0;
+int64_t maxUs = 0;
+int64_t minUs = 100000000;
int main(int argc, char *argv[]) {
shellParseArgument(argc, argv);
@@ -58,6 +61,38 @@ int main(int argc, char *argv[]) {
taos_init();
createDbAndTable();
insertData();
+ int64_t avgUs = totalUs / reqNum;
+ pError("%s totalUs:%ld, avgUs:%ld maxUs:%ld minUs:%ld reqNum:%ld %s\n", GREEN, totalUs, avgUs, maxUs, minUs, reqNum, NC);
+}
+
+int32_t query(void *con, char *qstr) {
+ int64_t begin = taosGetTimestampUs();
+
+ TAOS_RES *pSql = taos_query(con, qstr);
+ int32_t code = taos_errno(pSql);
+ if (code != 0) {
+ pError("failed to exec sql:%s, code:%d reason:%s", qstr, taos_errno(con), taos_errstr(con));
+ exit(0);
+ }
+ taos_free_result(pSql);
+
+ int64_t us = taosGetTimestampUs() - begin;
+ maxUs = MAX(us, maxUs);
+ minUs = MIN(us, minUs);
+ atomic_add_fetch_64(&totalUs, us);
+ atomic_add_fetch_64(&reqNum, 1);
+ if (reqNum > 100000) {
+ int64_t avgUs = totalUs / reqNum;
+ if (us > avgUs * 100) {
+ pError("sql:%s", qstr);
+ pError("%s totalUs:%ld, avgUs:%ld maxUs:%ld minUs:%ld reqNum:%ld %s\n", GREEN, totalUs, avgUs, maxUs, minUs,
+ reqNum, NC);
+ taosMsleep(1000);
+ exit(0);
+ }
+ }
+
+ return code;
}
void createDbAndTable() {
@@ -79,14 +114,14 @@ void createDbAndTable() {
exit(1);
}
- sprintf(qstr, "create database if not exists %s cache %d tables %d", dbName, cache, tables);
- if (taos_query(con, qstr)) {
+ sprintf(qstr, "create database if not exists %s", dbName);
+ if (query(con, qstr)) {
pError("failed to create database:%s, code:%d reason:%s", dbName, taos_errno(con), taos_errstr(con));
exit(0);
}
sprintf(qstr, "use %s", dbName);
- if (taos_query(con, qstr)) {
+ if (query(con, qstr)) {
pError("failed to use db, code:%d reason:%s", taos_errno(con), taos_errstr(con));
exit(0);
}
@@ -102,14 +137,14 @@ void createDbAndTable() {
}
sprintf(qstr + len, ") tags(t int)");
- if (taos_query(con, qstr)) {
+ if (query(con, qstr)) {
pError("failed to create stable, code:%d reason:%s", taos_errno(con), taos_errstr(con));
exit(0);
}
for (int64_t t = 0; t < totalTables; ++t) {
sprintf(qstr, "create table if not exists %s%ld using %s tags(%ld)", stableName, t, stableName, t);
- if (taos_query(con, qstr)) {
+ if (query(con, qstr)) {
pError("failed to create table %s%" PRId64 ", reason:%s", stableName, t, taos_errstr(con));
exit(0);
}
@@ -122,7 +157,7 @@ void createDbAndTable() {
}
sprintf(qstr + len, ")");
- if (taos_query(con, qstr)) {
+ if (query(con, qstr)) {
pError("failed to create table %s%ld, reason:%s", stableName, t, taos_errstr(con));
exit(0);
}
@@ -207,7 +242,7 @@ void *syncTest(void *param) {
}
sprintf(qstr, "use %s", pInfo->dbName);
- taos_query(con, qstr);
+ query(con, qstr);
gettimeofday(&systemTime, NULL);
st = systemTime.tv_sec * 1000000 + systemTime.tv_usec;
@@ -229,7 +264,7 @@ void *syncTest(void *param) {
}
len += sprintf(sql + len, ")");
if (len > maxBytes) {
- if (taos_query(con, qstr)) {
+ if (query(con, qstr)) {
pError("thread:%d, failed to import table:%s%ld row:%ld, reason:%s", pInfo->threadIndex, pInfo->stableName,
table, row, taos_errstr(con));
}
@@ -246,7 +281,7 @@ void *syncTest(void *param) {
}
if (len != strlen(inserStr)) {
- taos_query(con, qstr);
+ query(con, qstr);
}
gettimeofday(&systemTime, NULL);
@@ -284,10 +319,6 @@ void printHelp() {
printf("%s%s%s%" PRId64 "\n", indent, indent, "Number of threads to be used, default is ", numOfThreads);
printf("%s%s\n", indent, "-n");
printf("%s%s%s%" PRId64 "\n", indent, indent, "Number of tables per thread, default is ", numOfTablesPerThread);
- printf("%s%s\n", indent, "-tables");
- printf("%s%s%s%d\n", indent, indent, "Database parameters tables, default is ", tables);
- printf("%s%s\n", indent, "-cache");
- printf("%s%s%s%d\n", indent, indent, "Database parameters cache, default is ", cache);
exit(EXIT_SUCCESS);
}
@@ -311,10 +342,6 @@ void shellParseArgument(int argc, char *argv[]) {
numOfThreads = atoi(argv[++i]);
} else if (strcmp(argv[i], "-n") == 0) {
numOfTablesPerThread = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-tables") == 0) {
- tables = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-cache") == 0) {
- cache = atoi(argv[++i]);
} else {
}
}
@@ -323,8 +350,6 @@ void shellParseArgument(int argc, char *argv[]) {
pPrint("%spointsPerTable:%" PRId64 "%s", GREEN, pointsPerTable, NC);
pPrint("%snumOfThreads:%" PRId64 "%s", GREEN, numOfThreads, NC);
pPrint("%snumOfTablesPerThread:%" PRId64 "%s", GREEN, numOfTablesPerThread, NC);
- pPrint("%scache:%d%s", GREEN, cache, NC);
- pPrint("%stables:%d%s", GREEN, tables, NC);
pPrint("%sdbName:%s%s", GREEN, dbName, NC);
pPrint("%stableName:%s%s", GREEN, stableName, NC);
pPrint("%sstart to run%s", GREEN, NC);
diff --git a/tests/test/c/invalidTableId.c b/tests/test/c/invalidTableId.c
new file mode 100644
index 0000000000..581387a4ee
--- /dev/null
+++ b/tests/test/c/invalidTableId.c
@@ -0,0 +1,220 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#define _DEFAULT_SOURCE
+#include "os.h"
+#include "taoserror.h"
+#include "taos.h"
+#include "tulog.h"
+#include "tutil.h"
+#include "tglobal.h"
+#include "hash.h"
+
+#define MAX_RANDOM_POINTS 20000
+#define GREEN "\033[1;32m"
+#define NC "\033[0m"
+
+#define MAX_DB_NUM 100
+void * con;
+char dbNames[MAX_DB_NUM][48];
+int32_t dbNum = 0;
+void parseArgument(int argc, char *argv[]);
+void connDb();
+void getDbNames();
+void printDbNames();
+void queryTables(char *dbName);
+void checkTables(char *dbName);
+
+int main(int argc, char *argv[]) {
+ parseArgument(argc, argv);
+ taos_init();
+ connDb();
+ getDbNames();
+ printDbNames();
+ for (int dbIndex = 0; dbIndex < dbNum; ++dbIndex) {
+ queryTables((char*)(dbNames[dbIndex]));
+ checkTables((char*)(dbNames[dbIndex]));
+ }
+
+ pPrint("all %d database is checked", dbNum);
+}
+
+void connDb() {
+ con = taos_connect(NULL, "root", "taosdata", NULL, 0);
+ if (con == NULL) {
+ pError("failed to connect to DB, reason:%s", taos_errstr(con));
+ exit(0);
+ }
+}
+
+void getDbNames() {
+ if (dbNum != 0) return;
+
+ char * qstr = "show databases";
+ TAOS_RES *result = taos_query(con, qstr);
+ int32_t code = taos_errno(result);
+ if (result == NULL || code != 0) {
+ pError("failed to exec sql:%s, code:0x%x reason:%s", qstr, code & 0XFFFF, tstrerror(code));
+ exit(0);
+ }
+
+ TAOS_ROW row;
+ int num_fields = taos_num_fields(result);
+ if (num_fields <= 0) return;
+ while ((row = taos_fetch_row(result))) {
+ char * dbName = (char*)dbNames[dbNum];
+ int32_t *length = taos_fetch_lengths(result);
+ int len = length[0];
+ memcpy(dbName, (char *)row[0], len);
+ dbName[len] = 0;
+ dbNum++;
+ }
+
+ taos_free_result(result);
+}
+
+void printDbNames() {
+ for (int dbIndex = 0; dbIndex < dbNum; ++dbIndex) {
+ pPrint("db:%d %s", dbIndex, dbNames[dbIndex]);
+ }
+}
+
+void queryTables(char *dbName) {
+ char qstr[1024];
+ char fileName[1024];
+ char ts[35] = {0};
+ int32_t precision = 1000;
+
+ sprintf(qstr, "show %s.tables", dbName);
+ sprintf(fileName, "%s_tables.txt", dbName);
+
+ TAOS_RES *result = taos_query(con, qstr);
+ int32_t code = taos_errno(result);
+ if (result == NULL || code != 0) {
+ pError("failed to exec sql:%s, code:0x%x reason:%s", qstr, code & 0XFFFF, tstrerror(code));
+ exit(0);
+ }
+
+ FILE *fp = fopen(fileName, "w");
+ if (!fp) return;
+
+ TAOS_ROW row;
+ int32_t rows = 0;
+ while ((row = taos_fetch_row(result))) {
+ char tableName[256] = {0};
+ int32_t *length = taos_fetch_lengths(result);
+ int len = length[0];
+ memcpy(tableName, (char *)row[0], len);
+ tableName[len] = 0;
+
+ int64_t t = *((int64_t *)row[1]);
+ time_t tt = t / 1000;
+ struct tm *ptm = localtime(&tt);
+ int32_t tl = (int32_t)strftime(ts, 35, "%Y-%m-%d %H:%M:%S", ptm);
+ snprintf(ts + tl, 5, ".%03ld", t % precision);
+
+ // fprintf(fp, "%s %s\n", tableName, ts);
+ fprintf(fp, "%s.%s\n", dbName, tableName);
+ rows++;
+ }
+
+ taos_free_result(result);
+ fclose(fp);
+ pPrint("db:%s has %d tables, write to %s", dbName, rows, fileName);
+}
+
+void checkTables(char *dbName) {
+ char qstr[1024];
+ char fileName1[1024];
+ char fileName2[1024];
+
+ sprintf(qstr, "show %s.tables", dbName);
+ sprintf(fileName1, "%s_tables.txt", dbName);
+ sprintf(fileName2, "%s_count.txt", dbName);
+
+ FILE *fp1 = fopen(fileName1, "r");
+ if (!fp1) return;
+
+ FILE *fp2 = fopen(fileName2, "w");
+ if (!fp2) return;
+
+ int32_t successRows = 0;
+ int32_t failedRows = 0;
+ char tbName[256];
+ while (!feof(fp1)) {
+ int size = fscanf(fp1, "%s", tbName);
+ if (size <= 0) {
+ break;
+ }
+
+ sprintf(qstr, "select count(*) from %s", tbName);
+ TAOS_RES *result = taos_query(con, qstr);
+ int32_t code = taos_errno(result);
+ if (result == NULL || code != 0) {
+ pError("failed to exec sql:%s, code:0x%x reason:%s", qstr, code & 0XFFFF, tstrerror(code));
+ fprintf(fp2, "%s failed to exec sql:%s, code:0x%x reason:%s", tbName, qstr, code & 0XFFFF, tstrerror(code));
+ taos_free_result(result);
+ failedRows++;
+ continue;
+ }
+
+ TAOS_ROW row;
+ int64_t count = 0;
+ while ((row = taos_fetch_row(result))) {
+ count = *((int64_t *)row[0]);
+ }
+ fprintf(fp2, "%s %" PRId64 "\n", tbName, count);
+
+ successRows++;
+ if (successRows % 1000 == 0) {
+ pPrint("query %d tables", successRows);
+ }
+ taos_free_result(result);
+ }
+
+ fclose(fp1);
+ fclose(fp2);
+ pPrint("db:%s query tables, success:%d failed:%d write to %s", dbName, successRows, failedRows, fileName2);
+}
+
+void printHelp() {
+ char indent[10] = " ";
+ printf("Used to checkTables\n");
+
+ printf("%s%s\n", indent, "-c");
+ printf("%s%s%s%s\n", indent, indent, "Configuration directory, default is ", configDir);
+ printf("%s%s\n", indent, "-d");
+ printf("%s%s%s%s\n", indent, indent, "The name of the database to be checked, default is ", "all");
+
+ exit(EXIT_SUCCESS);
+}
+
+void parseArgument(int argc, char *argv[]) {
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-h") == 0 || strcmp(argv[i], "--help") == 0) {
+ printHelp();
+ exit(0);
+ } else if (strcmp(argv[i], "-d") == 0) {
+ strcpy(dbNames[0], argv[++i]);
+ dbNum++;
+ } else if (strcmp(argv[i], "-c") == 0) {
+ strcpy(configDir, argv[++i]);
+ } else {
+ }
+ }
+
+ pPrint("%s configDir:%s %s", GREEN, configDir, NC);
+ pPrint("%s start to checkTables %s", GREEN, NC);
+}
diff --git a/tests/tsim/inc/sim.h b/tests/tsim/inc/sim.h
index 6f3bc7099d..58e58a442c 100644
--- a/tests/tsim/inc/sim.h
+++ b/tests/tsim/inc/sim.h
@@ -84,6 +84,7 @@ enum {
SIM_CMD_SQL,
SIM_CMD_SQL_ERROR,
SIM_CMD_SQL_SLOW,
+ SIM_CMD_RESTFUL,
SIM_CMD_TEST,
SIM_CMD_RETURN,
SIM_CMD_END
@@ -172,6 +173,7 @@ bool simExecuteReturnCmd(SScript *script, char *option);
bool simExecuteSqlCmd(SScript *script, char *option);
bool simExecuteSqlErrorCmd(SScript *script, char *rest);
bool simExecuteSqlSlowCmd(SScript *script, char *option);
+bool simExecuteRestfulCmd(SScript *script, char *rest);
void simVisuallizeOption(SScript *script, char *src, char *dst);
#endif
\ No newline at end of file
diff --git a/tests/tsim/src/simExe.c b/tests/tsim/src/simExe.c
index adc2fd0b9d..7f786dfaa9 100644
--- a/tests/tsim/src/simExe.c
+++ b/tests/tsim/src/simExe.c
@@ -739,36 +739,22 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) {
((((int)(*((char *)row[i]))) == 1) ? "1" : "0"));
break;
case TSDB_DATA_TYPE_TINYINT:
- sprintf(value, "%d", (int)(*((char *)row[i])));
+ sprintf(value, "%d", *((int8_t *)row[i]));
break;
case TSDB_DATA_TYPE_SMALLINT:
- sprintf(value, "%d", (int)(*((short *)row[i])));
+ sprintf(value, "%d", *((int16_t *)row[i]));
break;
case TSDB_DATA_TYPE_INT:
- sprintf(value, "%d", *((int *)row[i]));
+ sprintf(value, "%d", *((int32_t *)row[i]));
break;
case TSDB_DATA_TYPE_BIGINT:
sprintf(value, "%" PRId64, *((int64_t *)row[i]));
break;
- case TSDB_DATA_TYPE_FLOAT:{
-#ifdef _TD_ARM_32_
- float fv = 0;
- *(int32_t*)(&fv) = *(int32_t*)row[i];
- sprintf(value, "%.5f", fv);
-#else
- sprintf(value, "%.5f", *((float *)row[i]));
-#endif
- }
+ case TSDB_DATA_TYPE_FLOAT:
+ sprintf(value, "%.5f", GET_FLOAT_VAL(row[i]));
break;
- case TSDB_DATA_TYPE_DOUBLE: {
-#ifdef _TD_ARM_32_
- double dv = 0;
- *(int64_t*)(&dv) = *(int64_t*)row[i];
- sprintf(value, "%.9lf", dv);
-#else
- sprintf(value, "%.9lf", *((double *)row[i]));
-#endif
- }
+ case TSDB_DATA_TYPE_DOUBLE:
+ sprintf(value, "%.9lf", GET_DOUBLE_VAL(row[i]));
break;
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
@@ -915,6 +901,47 @@ bool simExecuteSqlSlowCmd(SScript *script, char *rest) {
return simExecuteSqlImpCmd(script, rest, isSlow);
}
+bool simExecuteRestfulCmd(SScript *script, char *rest) {
+ FILE *fp = NULL;
+ char filename[256];
+ sprintf(filename, "%s/tmp.sql", tsScriptDir);
+ fp = fopen(filename, "w");
+ if (fp == NULL) {
+ fprintf(stderr, "ERROR: failed to open file: %s\n", filename);
+ return false;
+ }
+
+ char db[64] = {0};
+ char tb[64] = {0};
+ char gzip[32] = {0};
+ int32_t ts;
+ int32_t times;
+ sscanf(rest, "%s %s %d %d %s", db, tb, &ts, ×, gzip);
+
+ fprintf(fp, "insert into %s.%s values ", db, tb);
+ for (int i = 0; i < times; ++i) {
+ fprintf(fp, "(%d000, %d)", ts + i, ts);
+ }
+ fprintf(fp, " \n");
+ fflush(fp);
+ fclose(fp);
+
+ char cmd[1024] = {0};
+ if (strcmp(gzip, "gzip") == 0) {
+ sprintf(cmd,
+ "curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' --header "
+ "--compressed --data-ascii @%s 127.0.0.1:7111/rest/sql",
+ filename);
+ } else {
+ sprintf(cmd,
+ "curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' --header "
+ "'Transfer-Encoding: chunked' --data-ascii @%s 127.0.0.1:7111/rest/sql",
+ filename);
+ }
+
+ return simExecuteSystemCmd(script, cmd);
+}
+
bool simExecuteSqlErrorCmd(SScript *script, char *rest) {
char buf[3000];
SCmdLine *line = &script->lines[script->linePos];
diff --git a/tests/tsim/src/simParse.c b/tests/tsim/src/simParse.c
index 8dcf83806f..2e6121304f 100644
--- a/tests/tsim/src/simParse.c
+++ b/tests/tsim/src/simParse.c
@@ -721,6 +721,12 @@ bool simParseSqlSlowCmd(char *rest, SCommand *pCmd, int lineNum) {
return true;
}
+bool simParseRestfulCmd(char *rest, SCommand *pCmd, int lineNum) {
+ simParseSqlCmd(rest, pCmd, lineNum);
+ cmdLine[numOfLines - 1].cmdno = SIM_CMD_RESTFUL;
+ return true;
+}
+
bool simParseSystemCmd(char *rest, SCommand *pCmd, int lineNum) {
int expLen;
@@ -1020,6 +1026,14 @@ void simInitsimCmdList() {
simCmdList[cmdno].executeCmd = simExecuteSqlSlowCmd;
simAddCmdIntoHash(&(simCmdList[cmdno]));
+ cmdno = SIM_CMD_RESTFUL;
+ simCmdList[cmdno].cmdno = cmdno;
+ strcpy(simCmdList[cmdno].name, "restful");
+ simCmdList[cmdno].nlen = (int16_t)strlen(simCmdList[cmdno].name);
+ simCmdList[cmdno].parseCmd = simParseRestfulCmd;
+ simCmdList[cmdno].executeCmd = simExecuteRestfulCmd;
+ simAddCmdIntoHash(&(simCmdList[cmdno]));
+
/* test is only an internal command */
cmdno = SIM_CMD_TEST;
simCmdList[cmdno].cmdno = cmdno;