diff --git a/README.md b/README.md index 41629d6df3..2d84389f78 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,7 @@ [![Build Status](https://travis-ci.org/taosdata/TDengine.svg?branch=master)](https://travis-ci.org/taosdata/TDengine) [![Build status](https://ci.appveyor.com/api/projects/status/kf3pwh2or5afsgl9/branch/master?svg=true)](https://ci.appveyor.com/project/sangshuduo/tdengine-2n8ge/branch/master) [![Coverage Status](https://coveralls.io/repos/github/taosdata/TDengine/badge.svg?branch=develop)](https://coveralls.io/github/taosdata/TDengine?branch=develop) +[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4201/badge)](https://bestpractices.coreinfrastructure.org/projects/4201) [![TDengine](TDenginelogo.png)](https://www.taosdata.com) diff --git a/cmake/define.inc b/cmake/define.inc index b1e8f097be..c72995159f 100755 --- a/cmake/define.inc +++ b/cmake/define.inc @@ -33,11 +33,7 @@ IF (TD_LINUX_64) ADD_DEFINITIONS(-D_M_X64) ADD_DEFINITIONS(-D_TD_LINUX_64) SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g3 -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") - - FIND_PATH(ICONV_INCLUDE_EXIST iconv.h /usr/include/ /usr/local/include/) - IF (ICONV_INCLUDE_EXIST) - ADD_DEFINITIONS(-DUSE_LIBICONV) - ENDIF () + ADD_DEFINITIONS(-DUSE_LIBICONV) ENDIF () IF (TD_LINUX_32) @@ -47,8 +43,10 @@ IF (TD_LINUX_32) ENDIF () IF (TD_ARM_64) + ADD_DEFINITIONS(-D_M_X64) ADD_DEFINITIONS(-D_TD_ARM_64_) ADD_DEFINITIONS(-D_TD_ARM_) + ADD_DEFINITIONS(-DUSE_LIBICONV) SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ENDIF () @@ -132,6 +130,7 @@ ENDIF () IF (TD_WINDOWS_32) ADD_DEFINITIONS(-D_TD_WINDOWS_32) + ADD_DEFINITIONS(-DUSE_LIBICONV) ENDIF () INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/inc) diff --git a/cmake/install.inc b/cmake/install.inc index 98a60ace7b..997101c8d9 100755 --- a/cmake/install.inc +++ b/cmake/install.inc @@ -1,22 +1,14 @@ -IF (TD_LINUX_64) +IF (TD_LINUX) SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh") INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")") INSTALL(CODE "execute_process(COMMAND chmod 777 ${TD_MAKE_INSTALL_SH})") INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} ${TD_COMMUNITY_DIR} ${PROJECT_BINARY_DIR})") -ELSEIF (TD_LINUX_32) - IF (NOT TD_ARM) - EXIT () - ENDIF () - SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh") - INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")") - INSTALL(CODE "execute_process(COMMAND chmod 777 ${TD_MAKE_INSTALL_SH})") - INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} ${TD_COMMUNITY_DIR} ${PROJECT_BINARY_DIR})") -ELSEIF (TD_WINDOWS_64) +ELSEIF (TD_WINDOWS) SET(CMAKE_INSTALL_PREFIX C:/TDengine) IF (NOT TD_GODLL) - INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/go DESTINATION connector) - INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/grafana DESTINATION connector) - INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/python DESTINATION connector) + #INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/go DESTINATION connector) + #INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/grafana DESTINATION connector) + #INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/python DESTINATION connector) INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/tests/examples DESTINATION .) INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/packaging/cfg DESTINATION .) INSTALL(FILES ${TD_COMMUNITY_DIR}/src/inc/taos.h DESTINATION include) @@ -33,7 +25,7 @@ ELSEIF (TD_WINDOWS_64) INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/libtaos.dll DESTINATION driver) INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/libtaos.dll.a DESTINATION driver) ENDIF () -ELSEIF (TD_DARWIN_64) +ELSEIF (TD_DARWIN) SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh") INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")") INSTALL(CODE "execute_process(COMMAND chmod 777 ${TD_MAKE_INSTALL_SH})") diff --git a/cmake/platform.inc b/cmake/platform.inc index 4cb6262471..11ab8f301d 100755 --- a/cmake/platform.inc +++ b/cmake/platform.inc @@ -43,7 +43,6 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux") ELSEIF (${CMAKE_SIZEOF_VOID_P} MATCHES 4) SET(TD_LINUX_32 TRUE) MESSAGE(STATUS "The current platform is Linux 32-bit") - ELSE () MESSAGE(FATAL_ERROR "The current platform is Linux neither 32-bit nor 64-bit, not supported yet") EXIT () @@ -81,14 +80,17 @@ ENDIF () # cmake -DCPUTYPE=aarch32 .. or cmake -DCPUTYPE=aarch64 IF (${CPUTYPE} MATCHES "aarch32") SET(TD_LINUX TRUE) + SET(TD_LINUX_32 FALSE) SET(TD_ARM_32 TRUE) MESSAGE(STATUS "input cpuType: aarch32") ELSEIF (${CPUTYPE} MATCHES "aarch64") SET(TD_LINUX TRUE) + SET(TD_LINUX_64 FALSE) SET(TD_ARM_64 TRUE) MESSAGE(STATUS "input cpuType: aarch64") ELSEIF (${CPUTYPE} MATCHES "mips64") SET(TD_LINUX TRUE) + SET(TD_LINUX_64 FALSE) SET(TD_MIPS_64 TRUE) MESSAGE(STATUS "input cpuType: mips64") ELSEIF (${CPUTYPE} MATCHES "x64") diff --git a/documentation/webdocs/markdowndocs/Connector.md b/documentation/webdocs/markdowndocs/Connector.md index 563d306128..fcd6976cb0 100644 --- a/documentation/webdocs/markdowndocs/Connector.md +++ b/documentation/webdocs/markdowndocs/Connector.md @@ -286,7 +286,7 @@ Connection conn = DriverManager.getConnection(jdbcUrl); > `6030` is the default port and `log` is the default database for system monitor. A normal JDBC URL looks as follows: -`jdbc:TSDB://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` +`jdbc:TAOS://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` values in `{}` are necessary while values in `[]` are optional。Each option in the above URL denotes: diff --git a/documentation/webdocs/markdowndocs/connector-ch.md b/documentation/webdocs/markdowndocs/connector-ch.md index ec97816a27..79e7e918b3 100644 --- a/documentation/webdocs/markdowndocs/connector-ch.md +++ b/documentation/webdocs/markdowndocs/connector-ch.md @@ -281,7 +281,7 @@ Connection conn = DriverManager.getConnection(jdbcUrl); > 端口 6030 为默认连接端口,JDBC URL 中的 log 为系统本身的监控数据库。 TDengine 的 JDBC URL 规范格式为: -`jdbc:TSDB://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` +`jdbc:TAOS://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` 其中,`{}` 中的内容必须,`[]` 中为可选。配置参数说明如下: diff --git a/documentation20/webdocs/markdowndocs/Connector.md b/documentation20/webdocs/markdowndocs/Connector.md index 6d981478df..e5ba6d5185 100644 --- a/documentation20/webdocs/markdowndocs/Connector.md +++ b/documentation20/webdocs/markdowndocs/Connector.md @@ -281,103 +281,100 @@ For the time being, TDengine supports subscription on one or multiple tables. It ## Java Connector -TDengine 为了方便 Java 应用使用,提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现。目前可以通过 [Sonatype Repository][1] 搜索并下载。 +To Java delevopers, TDengine provides `taos-jdbcdriver` according to the JDBC(3.0) API. Users can find and download it through [Sonatype Repository][1]. -由于 TDengine 是使用 c 语言开发的,使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。 +Since the native language of TDengine is C, the necessary TDengine library should be checked before using the taos-jdbcdriver: -* libtaos.so - 在 linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so,该目录包含在 Linux 自动扫描路径上,无需单独指定。 +* libtaos.so (Linux) + After TDengine is installed successfully, the library `libtaos.so` will be automatically copied to the `/usr/lib/`, which is the system's default search path. -* taos.dll - 在 windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。 +* taos.dll (Windows) + After TDengine client is installed, the library `taos.dll` will be automatically copied to the `C:/Windows/System32`, which is the system's default search path. -> 注意:在 windows 环境开发时需要安装 TDengine 对应的 [windows 客户端][14],Linux 服务器安装完 TDengine 之后默认已安装 client,也可以单独安装 [Linux 客户端][15] 连接远程 TDengine Server。 +> Note: Please make sure that [TDengine Windows client][14] has been installed if developing on Windows. Now although TDengine client would be defaultly installed together with TDengine server, it can also be installed [alone][15]. -TDengine 的 JDBC 驱动实现尽可能的与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征的差异导致 taos-jdbcdriver 并未完全实现 JDBC 标准规范。在使用时需要注意以下几点: +Since TDengine is time-series database, there are still some differences compared with traditional databases in using TDengine JDBC driver: +* TDengine doesn't allow to delete/modify a single record, and thus JDBC driver also has no such method. +* No support for transaction +* No support for union between tables +* No support for nested query,`There is at most one open ResultSet for each Connection. Thus, TSDB JDBC Driver will close current ResultSet if it is not closed and a new query begins`. -* TDengine 不提供针对单条数据记录的删除和修改的操作,驱动中也没有支持相关方法。 -* 由于不支持删除和修改,所以也不支持事务操作。 -* 目前不支持表间的 union 操作。 -* 目前不支持嵌套查询(nested query),对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet还没关闭的情况下执行了新的查询,TSDBJDBCDriver 则会自动关闭上一个 ResultSet。 +## Version list of TAOS-JDBCDriver and required TDengine and JDK - -## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本 - -| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 | +| taos-jdbcdriver | TDengine | JDK | | --- | --- | --- | -| 1.0.3 | 1.6.1.x 及以上 | 1.8.x | -| 1.0.2 | 1.6.1.x 及以上 | 1.8.x | -| 1.0.1 | 1.6.1.x 及以上 | 1.8.x | -| 2.0.0 | 2.0.0.x 及以上 | 1.8.x | +| 2.0.2 | 2.0.0.x or higher | 1.8.x | +| 1.0.3 | 1.6.1.x or higher | 1.8.x | +| 1.0.2 | 1.6.1.x or higher | 1.8.x | +| 1.0.1 | 1.6.1.x or higher | 1.8.x | -## TDengine DataType 和 Java DataType +## DataType in TDengine and Java -TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下: +The datatypes in TDengine include timestamp, number, string and boolean, which are converted as follows in Java: -| TDengine DataType | Java DataType | -| --- | --- | -| TIMESTAMP | java.sql.Timestamp | -| INT | java.lang.Integer | -| BIGINT | java.lang.Long | -| FLOAT | java.lang.Float | -| DOUBLE | java.lang.Double | +| TDengine | Java | +| --- | --- | +| TIMESTAMP | java.sql.Timestamp | +| INT | java.lang.Integer | +| BIGINT | java.lang.Long | +| FLOAT | java.lang.Float | +| DOUBLE | java.lang.Double | | SMALLINT, TINYINT |java.lang.Short | -| BOOL | java.lang.Boolean | -| BINARY, NCHAR | java.lang.String | +| BOOL | java.lang.Boolean | +| BINARY, NCHAR | java.lang.String | -## 如何获取 TAOS-JDBCDriver +## How to get TAOS-JDBC Driver -### maven 仓库 +### maven repository -目前 taos-jdbcdriver 已经发布到 [Sonatype Repository][1] 仓库,且各大仓库都已同步。 +taos-jdbcdriver has been published to [Sonatype Repository][1]: * [sonatype][8] * [mvnrepository][9] * [maven.aliyun][10] -maven 项目中使用如下 pom.xml 配置即可: +Using the following pom.xml for maven projects ```xml com.taosdata.jdbc taos-jdbcdriver - 2.0.0 + 2.0.2 ``` -### 源码编译打包 +### JAR file from the source code -下载 [TDengine][3] 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package` 即可生成相应 jar 包。 +After downloading the [TDengine][3] source code, execute `mvn clean package` in the directory `src/connector/jdbc` and then the corresponding jar file is generated. +## Usage -## 使用说明 +### get the connection -### 获取连接 - -如下所示配置即可获取 TDengine Connection: ```java Class.forName("com.taosdata.jdbc.TSDBDriver"); String jdbcUrl = "jdbc:TAOS://127.0.0.1:6030/log?user=root&password=taosdata"; Connection conn = DriverManager.getConnection(jdbcUrl); ``` -> 端口 6030 为默认连接端口,JDBC URL 中的 log 为系统本身的监控数据库。 +> `6030` is the default port and `log` is the default database for system monitor. -TDengine 的 JDBC URL 规范格式为: -`jdbc:TSDB://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` +A normal JDBC URL looks as follows: +`jdbc:TAOS://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` -其中,`{}` 中的内容必须,`[]` 中为可选。配置参数说明如下: +values in `{}` are necessary while values in `[]` are optional。Each option in the above URL denotes: -* user:登录 TDengine 用户名,默认值 root。 -* password:用户登录密码,默认值 taosdata。 -* charset:客户端使用的字符集,默认值为系统字符集。 -* cfgdir:客户端配置文件目录路径,Linux OS 上默认值 /etc/taos ,Windows OS 上默认值 C:/TDengine/cfg。 -* locale:客户端语言环境,默认值系统当前 locale。 -* timezone:客户端使用的时区,默认值为系统当前时区。 +* user:user name for login, defaultly root。 +* password:password for login,defaultly taosdata。 +* charset:charset for client,defaultly system charset +* cfgdir:log directory for client, defaultly _/etc/taos/_ on Linux and _C:/TDengine/cfg_ on Windows。 +* locale:language for client,defaultly system locale。 +* timezone:timezone for client,defaultly system timezone。 -以上参数可以在 3 处配置,`优先级由高到低`分别如下: -1. JDBC URL 参数 - 如上所述,可以在 JDBC URL 的参数中指定。 +The options above can be configures (`ordered by priority`): +1. JDBC URL + + As explained above. 2. java.sql.DriverManager.getConnection(String jdbcUrl, Properties connProps) ```java public Connection getConn() throws Exception{ @@ -395,9 +392,9 @@ public Connection getConn() throws Exception{ } ``` -3. 客户端配置文件 taos.cfg +3. Configuration file (taos.cfg) - linux 系统默认配置文件为 /var/lib/taos/taos.cfg,windows 系统默认配置文件路径为 C:\TDengine\cfg\taos.cfg。 + Default configuration file is _/var/lib/taos/taos.cfg_ On Linux and _C:\TDengine\cfg\taos.cfg_ on Windows ```properties # client default username # defaultUser root @@ -411,9 +408,9 @@ public Connection getConn() throws Exception{ # system locale # locale en_US.UTF-8 ``` -> 更多详细配置请参考[客户端配置][13] +> More options can refer to [client configuration][13] -### 创建数据库和表 +### Create databases and tables ```java Statement stmt = conn.createStatement(); @@ -427,9 +424,9 @@ stmt.executeUpdate("use db"); // create table stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)"); ``` -> 注意:如果不使用 `use db` 指定数据库,则后续对表的操作都需要增加数据库名称作为前缀,如 db.tb。 +> Note: if no step like `use db`, the name of database must be added as prefix like _db.tb_ when operating on tables -### 插入数据 +### Insert data ```java // insert data @@ -437,10 +434,10 @@ int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now System.out.println("insert " + affectedRows + " rows."); ``` -> now 为系统内部函数,默认为服务器当前时间。 -> `now + 1s` 代表服务器当前时间往后加 1 秒,数字后面代表时间单位:a(毫秒), s(秒), m(分), h(小时), d(天),w(周), n(月), y(年)。 +> _now_ is the server time. +> _now+1s_ is 1 second later than current server time. The time unit includes: _a_(millisecond), _s_(second), _m_(minute), _h_(hour), _d_(day), _w_(week), _n_(month), _y_(year). -### 查询数据 +### Query database ```java // query data @@ -458,22 +455,22 @@ while(resultSet.next()){ System.out.printf("%s, %d, %s\n", ts, temperature, humidity); } ``` -> 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。 +> query is consistent with relational database. The subscript start with 1 when retrieving return results. It is recommended to use the column name to retrieve results. - -### 关闭资源 +### Close all ```java resultSet.close(); stmt.close(); conn.close(); ``` -> `注意务必要将 connection 进行关闭`,否则会出现连接泄露。 -## 与连接池使用 +> `please make sure the connection is closed to avoid the error like connection leakage` + +## Using connection pool **HikariCP** -* 引入相应 HikariCP maven 依赖: +* dependence in pom.xml: ```xml com.zaxxer @@ -482,7 +479,7 @@ conn.close(); ``` -* 使用示例如下: +* Examples: ```java public static void main(String[] args) throws SQLException { HikariConfig config = new HikariConfig(); @@ -508,8 +505,69 @@ conn.close(); connection.close(); // put back to conneciton pool } ``` -> 通过 HikariDataSource.getConnection() 获取连接后,使用完成后需要调用 close() 方法,实际上它并不会关闭连接,只是放回连接池中。 -> 更多 HikariCP 使用问题请查看[官方说明][5] +> The close() method will not close the connection from HikariDataSource.getConnection(). Instead, the connection is put back to the connection pool. +> More instructions can refer to [User Guide][5] + +**Druid** + +* dependency in pom.xml: + +```xml + + com.alibaba + druid + 1.1.20 + +``` + +* Examples: +```java +public static void main(String[] args) throws Exception { + Properties properties = new Properties(); + properties.put("driverClassName","com.taosdata.jdbc.TSDBDriver"); + properties.put("url","jdbc:TAOS://127.0.0.1:6030/log"); + properties.put("username","root"); + properties.put("password","taosdata"); + + properties.put("maxActive","10"); //maximum number of connection in the pool + properties.put("initialSize","3");//initial number of connection + properties.put("maxWait","10000");//maximum wait milliseconds for get connection from pool + properties.put("minIdle","3");//minimum number of connection in the pool + + properties.put("timeBetweenEvictionRunsMillis","3000");// the interval milliseconds to test connection + + properties.put("minEvictableIdleTimeMillis","60000");//the minimum milliseconds to keep idle + properties.put("maxEvictableIdleTimeMillis","90000");//the maximum milliseconds to keep idle + + properties.put("validationQuery","describe log.dn"); //validation query + properties.put("testWhileIdle","true"); // test connection while idle + properties.put("testOnBorrow","false"); // don't need while testWhileIdle is true + properties.put("testOnReturn","false"); // don't need while testWhileIdle is true + + //create druid datasource + DataSource ds = DruidDataSourceFactory.createDataSource(properties); + Connection connection = ds.getConnection(); // get connection + Statement statement = connection.createStatement(); // get statement + + //query or insert + // ... + + connection.close(); // put back to conneciton pool +} +``` +> More instructions can refer to [User Guide][6] + +**Notice** +* TDengine `v1.6.4.1` provides a function `select server_status()` to check heartbeat. It is highly recommended to use this function for `Validation Query`. + +As follows,`1` will be returned if `select server_status()` is successfully executed。 +```shell +taos> select server_status(); +server_status()| +================ +1 | +Query OK, 1 row(s) in set (0.000141s) +``` ## Python Connector @@ -821,3 +879,18 @@ An example of using the NodeJS connector to create a table with weather data and An example of using the NodeJS connector to achieve the same things but without all the object wrappers that wrap around the data returned to achieve higher functionality can be found [here](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example-raw.js) +[1]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver +[2]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver +[3]: https://github.com/taosdata/TDengine +[4]: https://www.taosdata.com/blog/2019/12/03/jdbcdriver%e6%89%be%e4%b8%8d%e5%88%b0%e5%8a%a8%e6%80%81%e9%93%be%e6%8e%a5%e5%ba%93/ +[5]: https://github.com/brettwooldridge/HikariCP +[6]: https://github.com/alibaba/druid +[7]: https://github.com/taosdata/TDengine/issues +[8]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver +[9]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver +[10]: https://maven.aliyun.com/mvn/search +[11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate +[12]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo +[13]: https://www.taosdata.com/cn/documentation20/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE +[14]: https://www.taosdata.com/cn/documentation20/connector/#Windows +[15]: https://www.taosdata.com/cn/getting-started/#%E5%BF%AB%E9%80%9F%E4%B8%8A%E6%89%8B \ No newline at end of file diff --git a/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md b/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md index 3cf811ab66..293aac8d23 100644 --- a/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md +++ b/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md @@ -31,7 +31,7 @@ taos> DESCRIBE meters; - 时间格式为```YYYY-MM-DD HH:mm:ss.MS```, 默认时间分辨率为毫秒。比如:```2017-08-12 18:25:58.128``` - 内部函数now是服务器的当前时间 -- 插入记录时,如果时间戳为0,插入数据时使用服务器当前时间 +- 插入记录时,如果时间戳为now,插入数据时使用服务器当前时间 - Epoch Time: 时间戳也可以是一个长整数,表示从1970-01-01 08:00:00.000开始的毫秒数 - 时间可以加减,比如 now-2h,表明查询时刻向前推2个小时(最近2小时)。数字后面的时间单位:a(毫秒), s(秒), m(分), h(小时), d(天),w(周), n(月), y(年)。比如select * from t1 where ts > now-2w and ts <= now-1w, 表示查询两周前整整一周的数据 - TDengine暂不支持时间窗口按照自然年和自然月切分。Where条件中的时间窗口单位的换算关系如下:interval(1y) 等效于 interval(365d), interval(1n) 等效于 interval(30d), interval(1w) 等效于 interval(7d) @@ -157,7 +157,7 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic ```mysql DROP TABLE [IF EXISTS] stb_name; ``` - 删除STable会自动删除通过STable创建的字表。 + 删除STable会自动删除通过STable创建的子表。 - **显示当前数据库下的所有超级表信息** @@ -206,7 +206,7 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic ``` 修改超级表的标签名,从超级表修改某个标签名后,该超级表下的所有子表也会自动更新该标签名。 -- **修改字表标签值** +- **修改子表标签值** ```mysql ALTER TABLE tb_name SET TAG tag_name=new_tag_value; @@ -994,4 +994,4 @@ SELECT AVG(current),MAX(current),LEASTSQUARES(current, start_val, step_val), PER - 列名最大长度为65,最多允许1024列,最少需要2列,第一列必须是时间戳 - 标签最多允许128个,可以0个,标签总长度不超过16k个字符 - SQL语句最大长度65480个字符,但可通过系统配置参数maxSQLLength修改,最长可配置为8M -- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制 \ No newline at end of file +- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制 diff --git a/documentation20/webdocs/markdowndocs/administrator-ch.md b/documentation20/webdocs/markdowndocs/administrator-ch.md index 470c718af1..813d06a660 100644 --- a/documentation20/webdocs/markdowndocs/administrator-ch.md +++ b/documentation20/webdocs/markdowndocs/administrator-ch.md @@ -39,7 +39,7 @@ Raw DataSize = numOfTables * rowSizePerTable * rowsPerTable 用户可以通过参数keep,设置数据在磁盘中的最大保存时长。为进一步减少存储成本,TDengine还提供多级存储,最冷的数据可以存放在最廉价的存储介质上,应用的访问不用做任何调整,只是读取速度降低了。 -为提高速度,可以配置多快硬盘,这样可以并发写入或读取数据。需要提醒的是,TDengine采取多副本的方式提供数据的高可靠,因此不再需要采用昂贵的磁盘阵列。 +为提高速度,可以配置多块硬盘,这样可以并发写入或读取数据。需要提醒的是,TDengine采取多副本的方式提供数据的高可靠,因此不再需要采用昂贵的磁盘阵列。 ### 物理机或虚拟机台数 diff --git a/documentation20/webdocs/markdowndocs/advanced features-ch.md b/documentation20/webdocs/markdowndocs/advanced features-ch.md index 690f2a6268..b1d050c8cc 100644 --- a/documentation20/webdocs/markdowndocs/advanced features-ch.md +++ b/documentation20/webdocs/markdowndocs/advanced features-ch.md @@ -295,6 +295,117 @@ $ taos 这时,因为电流超过了10A,您应该可以看到示例程序将它输出到了屏幕上。 您可以继续插入一些数据观察示例程序的输出。 +### Java 使用数据订阅功能 + +订阅功能也提供了 Java 开发接口,相关说明请见 [Java Connector](https://www.taosdata.com/cn/documentation20/connector/)。需要注意的是,目前 Java 接口没有提供异步订阅模式,但用户程序可以通过创建 `TimerTask` 等方式达到同样的效果。 + +下面以一个示例程序介绍其具体使用方法。它所完成的功能与前面介绍的 C 语言示例基本相同,也是订阅数据库中所有电流超过 10A 的记录。 + +#### 准备数据 + +```sql +# 创建 power 库 +taos> create database power; +# 切换库 +taos> use power; +# 创建超级表 +taos> create table meters(ts timestamp, current float, voltage int, phase int) tags(location binary(64), groupId int); +# 创建表 +taos> create table d1001 using meters tags ("Beijing.Chaoyang", 2); +taos> create table d1002 using meters tags ("Beijing.Haidian", 2); +# 插入测试数据 +taos> insert into d1001 values("2020-08-15 12:00:00.000", 12, 220, 1),("2020-08-15 12:10:00.000", 12.3, 220, 2),("2020-08-15 12:20:00.000", 12.2, 220, 1); +taos> insert into d1002 values("2020-08-15 12:00:00.000", 9.9, 220, 1),("2020-08-15 12:10:00.000", 10.3, 220, 1),("2020-08-15 12:20:00.000", 11.2, 220, 1); +# 从超级表 meters 查询电流大于 10A 的记录 +taos> select * from meters where current > 10; + ts | current | voltage | phase | location | groupid | +=========================================================================================================== + 2020-08-15 12:10:00.000 | 10.30000 | 220 | 1 | Beijing.Haidian | 2 | + 2020-08-15 12:20:00.000 | 11.20000 | 220 | 1 | Beijing.Haidian | 2 | + 2020-08-15 12:00:00.000 | 12.00000 | 220 | 1 | Beijing.Chaoyang | 2 | + 2020-08-15 12:10:00.000 | 12.30000 | 220 | 2 | Beijing.Chaoyang | 2 | + 2020-08-15 12:20:00.000 | 12.20000 | 220 | 1 | Beijing.Chaoyang | 2 | +Query OK, 5 row(s) in set (0.004896s) +``` + +#### 示例程序 + +```java +public class SubscribeDemo { + private static final String topic = "topic-meter-current-bg-10"; + private static final String sql = "select * from meters where current > 10"; + + public static void main(String[] args) { + Connection connection = null; + TSDBSubscribe subscribe = null; + + try { + Class.forName("com.taosdata.jdbc.TSDBDriver"); + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + String jdbcUrl = "jdbc:TAOS://127.0.0.1:6030/power?user=root&password=taosdata"; + connection = DriverManager.getConnection(jdbcUrl, properties); + subscribe = ((TSDBConnection) connection).subscribe(topic, sql, true); // 创建订阅 + int count = 0; + while (count < 10) { + TimeUnit.SECONDS.sleep(1); // 等待1秒,避免频繁调用 consume,给服务端造成压力 + TSDBResultSet resultSet = subscribe.consume(); // 消费数据 + if (resultSet == null) { + continue; + } + ResultSetMetaData metaData = resultSet.getMetaData(); + while (resultSet.next()) { + int columnCount = metaData.getColumnCount(); + for (int i = 1; i <= columnCount; i++) { + System.out.print(metaData.getColumnLabel(i) + ": " + resultSet.getString(i) + "\t"); + } + System.out.println(); + count++; + } + } + } catch (Exception e) { + e.printStackTrace(); + } finally { + try { + if (null != subscribe) + subscribe.close(true); // 关闭订阅 + if (connection != null) + connection.close(); + } catch (SQLException throwables) { + throwables.printStackTrace(); + } + } + } +} +``` + +运行示例程序,首先,它会消费符合查询条件的所有历史数据: + +```shell +# java -jar subscribe.jar + +ts: 1597464000000 current: 12.0 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid : 2 +ts: 1597464600000 current: 12.3 voltage: 220 phase: 2 location: Beijing.Chaoyang groupid : 2 +ts: 1597465200000 current: 12.2 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid : 2 +ts: 1597464600000 current: 10.3 voltage: 220 phase: 1 location: Beijing.Haidian groupid : 2 +ts: 1597465200000 current: 11.2 voltage: 220 phase: 1 location: Beijing.Haidian groupid : 2 +``` + +接着,使用 taos 客户端向表中新增一条数据: + +```sql +# taos +taos> use power; +taos> insert into d1001 values("2020-08-15 12:40:00.000", 12.4, 220, 1); +``` + +因为这条数据的电流大于10A,示例程序会将其消费: + +```shell +ts: 1597466400000 current: 12.4 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid: 2 +``` + ## 缓存(Cache) diff --git a/documentation20/webdocs/markdowndocs/cluster-ch.md b/documentation20/webdocs/markdowndocs/cluster-ch.md index 2df6d2cb0e..afe0272387 100644 --- a/documentation20/webdocs/markdowndocs/cluster-ch.md +++ b/documentation20/webdocs/markdowndocs/cluster-ch.md @@ -107,7 +107,7 @@ CREATE DATABASE demo replica 3; ``` 一个DB里的数据会被切片分到多个vnode group,vnode group里的vnode数目就是DB的副本数,同一个vnode group里各vnode的数据是完全一致的。为保证高可用性,vnode group里的vnode一定要分布在不同的dnode里(实际部署时,需要在不同的物理机上),只要一个vgroup里超过半数的vnode处于工作状态,这个vgroup就能正常的对外服务。 -一个dnode里可能有多个DB的数据,因此一个dnode离线时,可能会影响到多个DB。如果一个vnode group里的一半或一半以上的vnode不工作,那么该vnode group就无法对外服务,无法插入或读取数据,这样会影响到它所属的DB的一部分表的d读写操作。 +一个dnode里可能有多个DB的数据,因此一个dnode离线时,可能会影响到多个DB。如果一个vnode group里的一半或一半以上的vnode不工作,那么该vnode group就无法对外服务,无法插入或读取数据,这样会影响到它所属的DB的一部分表的读写操作。 因为vnode的引入,无法简单的给出结论:“集群中过半dnode工作,集群就应该工作”。但是对于简单的情形,很好下结论。比如副本数为3,只有三个dnode,那如果仅有一个节点不工作,整个集群还是可以正常工作的,但如果有两个节点不工作,那整个集群就无法正常工作了。 diff --git a/documentation20/webdocs/markdowndocs/connector-ch.md b/documentation20/webdocs/markdowndocs/connector-ch.md index 2cd12c3779..6b22004c43 100644 --- a/documentation20/webdocs/markdowndocs/connector-ch.md +++ b/documentation20/webdocs/markdowndocs/connector-ch.md @@ -45,11 +45,11 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine 创建数据库连接,初始化连接上下文。其中需要用户提供的参数包含: - - ip:TDengine管理主节点的IP地址 - - user:用户名 - - pass:密码 - - db:数据库名字,如果用户没有提供,也可以正常连接,用户可以通过该连接创建新的数据库,如果用户提供了数据库名字,则说明该数据库用户已经创建好,缺省使用该数据库 - - port:端口号 + - ip:TDengine管理主节点的IP地址 + - user:用户名 + - pass:密码 + - db:数据库名字,如果用户没有提供,也可以正常连接,用户可以通过该连接创建新的数据库,如果用户提供了数据库名字,则说明该数据库用户已经创建好,缺省使用该数据库 + - port:端口号 返回值为空表示失败。应用程序需要保存返回的参数,以便后续API调用。 @@ -157,25 +157,25 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine 异步执行SQL语句。 - * taos:调用taos_connect返回的数据库连接 - * sql:需要执行的SQL语句 - * fp:用户定义的回调函数,其第三个参数`code`用于指示操作是否成功,`0`表示成功,负数表示失败(调用`taos_errstr`获取失败原因)。应用在定义回调函数的时候,主要处理第二个参数`TAOS_RES *`,该参数是查询返回的结果集 - * param:应用提供一个用于回调的参数 + * taos:调用taos_connect返回的数据库连接 + * sql:需要执行的SQL语句 + * fp:用户定义的回调函数,其第三个参数`code`用于指示操作是否成功,`0`表示成功,负数表示失败(调用`taos_errstr`获取失败原因)。应用在定义回调函数的时候,主要处理第二个参数`TAOS_RES *`,该参数是查询返回的结果集 + * param:应用提供一个用于回调的参数 - `void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param);` 批量获取异步查询的结果集,只能与`taos_query_a`配合使用。其中: - * res:`taos_query_a`回调时返回的结果集 - * fp:回调函数。其参数`param`是用户可定义的传递给回调函数的参数结构体;`numOfRows`是获取到的数据的行数(不是整个查询结果集的函数)。 在回调函数中,应用可以通过调用`taos_fetch_row`前向迭代获取批量记录中每一行记录。读完一块内的所有记录后,应用需要在回调函数中继续调用`taos_fetch_rows_a`获取下一批记录进行处理,直到返回的记录数(numOfRows)为零(结果返回完成)或记录数为负值(查询出错)。 + * res:`taos_query_a`回调时返回的结果集 + * fp:回调函数。其参数`param`是用户可定义的传递给回调函数的参数结构体;`numOfRows`是获取到的数据的行数(不是整个查询结果集的函数)。 在回调函数中,应用可以通过调用`taos_fetch_row`前向迭代获取批量记录中每一行记录。读完一块内的所有记录后,应用需要在回调函数中继续调用`taos_fetch_rows_a`获取下一批记录进行处理,直到返回的记录数(numOfRows)为零(结果返回完成)或记录数为负值(查询出错)。 - `void taos_fetch_row_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), void *param);` 异步获取一条记录。其中: - * res:`taos_query_a`回调时返回的结果集 - * fp:回调函数。其参数`param`是应用提供的一个用于回调的参数。回调时,第三个参数`row`指向一行记录。不同于`taos_fetch_rows_a`,应用无需调用`taos_fetch_row`来获取一行数据,更加简单,但数据提取性能不及批量获取的API。 + * res:`taos_query_a`回调时返回的结果集 + * fp:回调函数。其参数`param`是应用提供的一个用于回调的参数。回调时,第三个参数`row`指向一行记录。不同于`taos_fetch_rows_a`,应用无需调用`taos_fetch_row`来获取一行数据,更加简单,但数据提取性能不及批量获取的API。 TDengine的异步API均采用非阻塞调用模式。应用程序可以用多线程同时打开多张表,并可以同时对每张打开的表进行查询或者插入操作。需要指出的是,**客户端应用必须确保对同一张表的操作完全串行化**,即对同一个表的插入或查询操作未完成时(未返回时),不能够执行第二个插入或查询操作。 @@ -232,12 +232,12 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时 - `TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sql, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), int64_t stime, void *param, void (*callback)(void *))` 该API用来创建数据流,其中: - * taos:已经建立好的数据库连接 - * sql:SQL查询语句(仅能使用查询语句) - * fp:用户定义的回调函数指针,每次流式计算完成后,TDengine将查询的结果(TAOS_ROW)、查询状态(TAOS_RES)、用户定义参数(PARAM)传递给回调函数,在回调函数内,用户可以使用taos_num_fields获取结果集列数,taos_fetch_fields获取结果集每列数据的类型。 - * stime:是流式计算开始的时间,如果是0,表示从现在开始,如果不为零,表示从指定的时间开始计算(UTC时间从1970/1/1算起的毫秒数) - * param:是应用提供的用于回调的一个参数,回调时,提供给应用 - * callback: 第二个回调函数,会在连续查询自动停止时被调用。 + * taos:已经建立好的数据库连接 + * sql:SQL查询语句(仅能使用查询语句) + * fp:用户定义的回调函数指针,每次流式计算完成后,TDengine将查询的结果(TAOS_ROW)、查询状态(TAOS_RES)、用户定义参数(PARAM)传递给回调函数,在回调函数内,用户可以使用taos_num_fields获取结果集列数,taos_fetch_fields获取结果集每列数据的类型。 + * stime:是流式计算开始的时间,如果是0,表示从现在开始,如果不为零,表示从指定的时间开始计算(UTC时间从1970/1/1算起的毫秒数) + * param:是应用提供的用于回调的一个参数,回调时,提供给应用 + * callback: 第二个回调函数,会在连续查询自动停止时被调用。 返回值为NULL,表示创建成功,返回值不为空,表示成功。 @@ -254,21 +254,21 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时 * `TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval)` 该函数负责启动订阅服务,成功时返回订阅对象,失败时返回 `NULL`,其参数为: - * taos:已经建立好的数据库连接 - * restart:如果订阅已经存在,是重新开始,还是继续之前的订阅 - * topic:订阅的主题(即名称),此参数是订阅的唯一标识 - * sql:订阅的查询语句,此语句只能是 `select` 语句,只应查询原始数据,只能按时间正序查询数据 - * fp:收到查询结果时的回调函数(稍后介绍函数原型),只在异步调用时使用,同步调用时此参数应该传 `NULL` - * param:调用回调函数时的附加参数,系统API将其原样传递到回调函数,不进行任何处理 - * interval:轮询周期,单位为毫秒。异步调用时,将根据此参数周期性的调用回调函数,为避免对系统性能造成影响,不建议将此参数设置的过小;同步调用时,如两次调用`taos_consume`的间隔小于此周期,API将会阻塞,直到时间间隔超过此周期。 + * taos:已经建立好的数据库连接 + * restart:如果订阅已经存在,是重新开始,还是继续之前的订阅 + * topic:订阅的主题(即名称),此参数是订阅的唯一标识 + * sql:订阅的查询语句,此语句只能是 `select` 语句,只应查询原始数据,只能按时间正序查询数据 + * fp:收到查询结果时的回调函数(稍后介绍函数原型),只在异步调用时使用,同步调用时此参数应该传 `NULL` + * param:调用回调函数时的附加参数,系统API将其原样传递到回调函数,不进行任何处理 + * interval:轮询周期,单位为毫秒。异步调用时,将根据此参数周期性的调用回调函数,为避免对系统性能造成影响,不建议将此参数设置的过小;同步调用时,如两次调用`taos_consume`的间隔小于此周期,API将会阻塞,直到时间间隔超过此周期。 * `typedef void (*TAOS_SUBSCRIBE_CALLBACK)(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code)` 异步模式下,回调函数的原型,其参数为: - * tsub:订阅对象 - * res:查询结果集,注意结果集中可能没有记录 - * param:调用 `taos_subscribe`时客户程序提供的附加参数 - * code:错误码 + * tsub:订阅对象 + * res:查询结果集,注意结果集中可能没有记录 + * param:调用 `taos_subscribe`时客户程序提供的附加参数 + * code:错误码 * `TAOS_RES *taos_consume(TAOS_SUB *tsub)` @@ -306,7 +306,7 @@ TDengine 的 JDBC 驱动实现尽可能的与关系型数据库驱动保持一 | taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 | | --- | --- | --- | -| 2.0.0 | 2.0.0.x 及以上 | 1.8.x | +| 2.0.2 | 2.0.0.x 及以上 | 1.8.x | | 1.0.3 | 1.6.1.x 及以上 | 1.8.x | | 1.0.2 | 1.6.1.x 及以上 | 1.8.x | | 1.0.1 | 1.6.1.x 及以上 | 1.8.x | @@ -341,7 +341,7 @@ maven 项目中使用如下 pom.xml 配置即可: com.taosdata.jdbc taos-jdbcdriver - 2.0.1 + 2.0.2 ``` @@ -363,7 +363,7 @@ Connection conn = DriverManager.getConnection(jdbcUrl); > 端口 6030 为默认连接端口,JDBC URL 中的 log 为系统本身的监控数据库。 TDengine 的 JDBC URL 规范格式为: -`jdbc:TSDB://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` +`jdbc:TAOS://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` 其中,`{}` 中的内容必须,`[]` 中为可选。配置参数说明如下: @@ -460,6 +460,49 @@ while(resultSet.next()){ > 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。 +### 订阅 + +#### 创建 + +```java +TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from meters", false); +``` + +`subscribe` 方法的三个参数含义如下: + +* topic:订阅的主题(即名称),此参数是订阅的唯一标识 +* sql:订阅的查询语句,此语句只能是 `select` 语句,只应查询原始数据,只能按时间正序查询数据 +* restart:如果订阅已经存在,是重新开始,还是继续之前的订阅 + +如上面的例子将使用 SQL 语句 `select * from meters` 创建一个名为 `topic' 的订阅,如果这个订阅已经存在,将继续之前的查询进度,而不是从头开始消费所有的数据。 + +#### 消费数据 + +```java +int total = 0; +while(true) { + TSDBResultSet rs = sub.consume(); + int count = 0; + while(rs.next()) { + count++; + } + total += count; + System.out.printf("%d rows consumed, total %d\n", count, total); + Thread.sleep(1000); +} +``` + +`consume` 方法返回一个结果集,其中包含从上次 `consume` 到目前为止的所有新数据。请务必按需选择合理的调用 `consume` 的频率(如例子中的`Thread.sleep(1000)`),否则会给服务端造成不必要的压力。 + +#### 关闭订阅 + +```java +sub.close(true); +``` + +`close` 方法关闭一个订阅。如果其参数为 `true` 表示保留订阅进度信息,后续可以创建同名订阅继续消费数据;如为 `false` 则不保留订阅进度。 + + ### 关闭资源 ```java @@ -603,7 +646,7 @@ Query OK, 1 row(s) in set (0.000141s) #### Linux -用户可以在源代码的src/connector/python文件夹下找到python2和python3的安装包。用户可以通过pip命令安装: +用户可以在源代码的src/connector/python(或者tar.gz的/connector/python)文件夹下找到python2和python3的connector安装包。用户可以通过pip命令安装: ​ `pip install src/connector/python/linux/python2/` @@ -931,12 +974,12 @@ HTTP请求URL采用`sqlutc`时,返回结果集的时间戳将采用UTC时间 ## Go Connector -TDengine提供了GO驱动程序`taosSql`. `taosSql`实现了GO语言的内置接口`database/sql/driver`。用户只需按如下方式引入包就可以在应用程序中访问TDengin, 详见`https://github.com/taosdata/driver-go/blob/develop/taosSql/driver_test.go` +TDengine提供了GO驱动程序`taosSql`. `taosSql`实现了GO语言的内置接口`database/sql/driver`。用户只需按如下方式引入包就可以在应用程序中访问TDengine, 详见`https://github.com/taosdata/driver-go/blob/develop/taosSql/driver_test.go` ```Go import ( "database/sql" - _ "github.com/taosdata/driver-go/taoSql" + _ "github.com/taosdata/driver-go/taosSql" ) ``` ### 常用API @@ -982,7 +1025,7 @@ npm install td2.0-connector - Xcode - - 然后通过Xcode安装 + - 然后通过Xcode安装 ``` Command Line Tools @@ -1108,4 +1151,4 @@ promise2.then(function(result) { [12]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo [13]: https://www.taosdata.com/cn/documentation20/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE [14]: https://www.taosdata.com/cn/documentation20/connector/#Windows -[15]: https://www.taosdata.com/cn/getting-started/#%E5%BF%AB%E9%80%9F%E4%B8%8A%E6%89%8B \ No newline at end of file +[15]: https://www.taosdata.com/cn/getting-started/#%E5%BF%AB%E9%80%9F%E4%B8%8A%E6%89%8B diff --git a/documentation20/webdocs/markdowndocs/faq-ch.md b/documentation20/webdocs/markdowndocs/faq-ch.md index bca9cff8e6..a4111e78fc 100644 --- a/documentation20/webdocs/markdowndocs/faq-ch.md +++ b/documentation20/webdocs/markdowndocs/faq-ch.md @@ -19,7 +19,7 @@ #### 4. 如何让TDengine crash时生成core文件? 请看为此问题撰写的技术博客 -#### 5. 遇到错误"failed to connect to server", 我怎么办? +#### 5. 遇到错误"Unable to establish connection", 我怎么办? 客户端遇到链接故障,请按照下面的步骤进行检查: diff --git a/documentation20/webdocs/markdowndocs/faq.md b/documentation20/webdocs/markdowndocs/faq.md index ec0bc2957a..ce7d2ebf5e 100644 --- a/documentation20/webdocs/markdowndocs/faq.md +++ b/documentation20/webdocs/markdowndocs/faq.md @@ -10,7 +10,7 @@ Version 2.X is a complete refactoring of the previous version, and configuration 4. Enjoy the latest stable version of TDengine 5. If the data needs to be migrated or the data file is corrupted, please contact the official technical support team for assistance -#### 2. When encoutered with the error "failed to connect to server", what can I do? +#### 2. When encoutered with the error "Unable to establish connection", what can I do? The client may encounter connection errors. Please follow the steps below for troubleshooting: diff --git a/packaging/release.sh b/packaging/release.sh index 7a585431a2..2302b45875 100755 --- a/packaging/release.sh +++ b/packaging/release.sh @@ -10,6 +10,7 @@ set -e # -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | ...] # -V [stable | beta] # -l [full | lite] +# -n [2.0.0.3] # set parameters by default value verMode=edge # [cluster, edge] @@ -17,8 +18,9 @@ verType=stable # [stable, beta] cpuType=x64 # [aarch32 | aarch64 | x64 | x86 | mips64 ...] osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | ...] pagMode=full # [full | lite] +verNumber="" -while getopts "hv:V:c:o:l:" arg +while getopts "hv:V:c:o:l:n:" arg do case $arg in v) @@ -37,12 +39,21 @@ do #echo "pagMode=$OPTARG" pagMode=$(echo $OPTARG) ;; + n) + #echo "verNumber=$OPTARG" + verNumber=$(echo $OPTARG) + ;; o) #echo "osType=$OPTARG" osType=$(echo $OPTARG) ;; h) - echo "Usage: `basename $0` -v [cluster | edge] -c [aarch32 | aarch64 | x64 | x86 | mips64 ...] -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | ...] -V [stable | beta] -l [full | lite]" + echo "Usage: `basename $0` -v [cluster | edge] " + echo " -c [aarch32 | aarch64 | x64 | x86 | mips64 ...] " + echo " -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | ...] " + echo " -V [stable | beta] " + echo " -l [full | lite] " + echo " -n [version number] " exit 0 ;; ?) #unknow option @@ -52,7 +63,7 @@ do esac done -echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode}" +echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode} verNumber=${verNumber}" curr_dir=$(pwd) @@ -80,7 +91,6 @@ function is_valid_version() { if [[ $1 =~ $rx ]]; then return 0 fi - return 1 } @@ -89,26 +99,25 @@ function vercomp () { echo 0 exit 0 fi + local IFS=. local i ver1=($1) ver2=($2) + # fill empty fields in ver1 with zeros for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)); do ver1[i]=0 done for ((i=0; i<${#ver1[@]}; i++)); do - if [[ -z ${ver2[i]} ]] - then + if [[ -z ${ver2[i]} ]]; then # fill empty fields in ver2 with zeros ver2[i]=0 fi - if ((10#${ver1[i]} > 10#${ver2[i]})) - then + if ((10#${ver1[i]} > 10#${ver2[i]})); then echo 1 exit 0 fi - if ((10#${ver1[i]} < 10#${ver2[i]})) - then + if ((10#${ver1[i]} < 10#${ver2[i]})); then echo 2 exit 0 fi @@ -120,10 +129,11 @@ function vercomp () { version=$(cat ${versioninfo} | grep " version" | cut -d '"' -f2) compatible_version=$(cat ${versioninfo} | grep " compatible_version" | cut -d '"' -f2) -while true; do - read -p "Do you want to release a new version? [y/N]: " is_version_change +if [ -z ${verNumber} ]; then + while true; do + read -p "Do you want to release a new version? [y/N]: " is_version_change - if [[ ( "${is_version_change}" == "y") || ( "${is_version_change}" == "Y") ]]; then + if [[ ( "${is_version_change}" == "y") || ( "${is_version_change}" == "Y") ]]; then read -p "Please enter the new version: " tversion while true; do if (! is_valid_version $tversion) || [ "$(vercomp $tversion $version)" = '2' ]; then @@ -152,13 +162,24 @@ while true; do done break - elif [[ ( "${is_version_change}" == "n") || ( "${is_version_change}" == "N") ]]; then + elif [[ ( "${is_version_change}" == "n") || ( "${is_version_change}" == "N") ]]; then echo "Use old version: ${version} compatible version: ${compatible_version}." break - else + else continue - fi -done + fi + done +else + echo "old version: $version, new version: $verNumber" + #if ( ! is_valid_version $verNumber ) || [[ "$(vercomp $version $verNumber)" == '2' ]]; then + # echo "please enter correct version" + # exit 0 + #else + version=${verNumber} + #fi +fi + +echo "=======================new version number: ${version}======================================" # output the version info to the buildinfo file. build_time=$(date +"%F %R") diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index 0700ed4682..f27595a356 100644 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -168,6 +168,7 @@ function install_bin() { ${csudo} rm -f ${bin_link_dir}/taosdemo || : ${csudo} rm -f ${bin_link_dir}/rmtaos || : ${csudo} rm -f ${bin_link_dir}/tarbitrator || : + ${csudo} rm -f ${bin_link_dir}/set_core || : ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/* @@ -176,6 +177,7 @@ function install_bin() { [ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || : [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || : + [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : if [ "$verMode" == "cluster" ]; then @@ -196,8 +198,10 @@ function install_lib() { ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so - ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : - ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : + if [ -d ${lib64_link_dir} ]; then + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : + ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : + fi #if [ "$verMode" == "cluster" ]; then # # Compatible with version 1.5 @@ -205,6 +209,8 @@ function install_lib() { # ${csudo} ln -s ${install_main_dir}/connector/taos-jdbcdriver-1.0.2-dist.jar ${v15_java_app_dir}/JDBCDriver-1.0.2-dist.jar # ${csudo} chmod 777 ${v15_java_app_dir} || : #fi + + ${csudo} ldconfig } function install_header() { diff --git a/packaging/tools/install_client.sh b/packaging/tools/install_client.sh index 605944e9b3..6a1b7be191 100755 --- a/packaging/tools/install_client.sh +++ b/packaging/tools/install_client.sh @@ -31,6 +31,7 @@ cfg_install_dir="/etc/taos" if [ "$osType" != "Darwin" ]; then bin_link_dir="/usr/bin" lib_link_dir="/usr/lib" + lib64_link_dir="/usr/lib64" inc_link_dir="/usr/include" else bin_link_dir="/usr/local/bin" @@ -45,7 +46,7 @@ install_main_dir="/usr/local/taos" bin_dir="/usr/local/taos/bin" # v1.5 jar dir -v15_java_app_dir="/usr/local/lib/taos" +#v15_java_app_dir="/usr/local/lib/taos" # Color setting RED='\033[0;31m' @@ -87,15 +88,17 @@ function install_bin() { ${csudo} rm -f ${bin_link_dir}/taosdump || : fi ${csudo} rm -f ${bin_link_dir}/rmtaos || : + ${csudo} rm -f ${bin_link_dir}/set_core || : ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/* - #Make link + #Make link [ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || : if [ "$osType" == "Darwin" ]; then [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : fi [ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/rmtaos || : + [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : } function clean_lib() { @@ -106,17 +109,25 @@ function clean_lib() { function install_lib() { # Remove links ${csudo} rm -f ${lib_link_dir}/libtaos.* || : - ${csudo} rm -rf ${v15_java_app_dir} || : + ${csudo} rm -f ${lib64_link_dir}/libtaos.* || : + #${csudo} rm -rf ${v15_java_app_dir} || : ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* if [ "$osType" != "Darwin" ]; then ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so + + if [ -d "${lib64_link_dir}" ]; then + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : + ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : + fi else ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib ${csudo} ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib fi + + ${csudo} ldconfig } function install_header() { diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index 2200c7f13d..1a5c4d75b5 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -34,6 +34,7 @@ cfg_install_dir="/etc/taos" if [ "$osType" != "Darwin" ]; then bin_link_dir="/usr/bin" lib_link_dir="/usr/lib" + lib64_link_dir="/usr/lib64" inc_link_dir="/usr/include" else bin_link_dir="/usr/local/bin" @@ -141,6 +142,7 @@ function install_bin() { ${csudo} rm -f ${bin_link_dir}/taosd || : ${csudo} rm -f ${bin_link_dir}/taosdemo || : ${csudo} rm -f ${bin_link_dir}/taosdump || : + ${csudo} rm -f ${bin_link_dir}/set_core || : fi ${csudo} rm -f ${bin_link_dir}/rmtaos || : @@ -149,6 +151,7 @@ function install_bin() { if [ "$osType" != "Darwin" ]; then ${csudo} cp -r ${script_dir}/remove.sh ${install_main_dir}/bin + ${csudo} cp -r ${script_dir}/set_core.sh ${install_main_dir}/bin else ${csudo} cp -r ${script_dir}/remove_client.sh ${install_main_dir}/bin fi @@ -161,6 +164,7 @@ function install_bin() { [ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || : [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : + [ -x ${install_main_dir}/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : fi if [ "$osType" != "Darwin" ]; then @@ -173,17 +177,25 @@ function install_bin() { function install_lib() { # Remove links ${csudo} rm -f ${lib_link_dir}/libtaos.* || : + ${csudo} rm -f ${lib64_link_dir}/libtaos.* || : versioninfo=$(${script_dir}/get_version.sh ${source_dir}/src/util/src/version.c) if [ "$osType" != "Darwin" ]; then ${csudo} cp ${binary_dir}/build/lib/libtaos.so.${versioninfo} ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* ${csudo} ln -sf ${install_main_dir}/driver/libtaos.so.${versioninfo} ${lib_link_dir}/libtaos.so.1 ${csudo} ln -sf ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so + + if [ -d "${lib64_link_dir}" ]; then + ${csudo} ln -sf ${install_main_dir}/driver/libtaos.so.${versioninfo} ${lib64_link_dir}/libtaos.so.1 + ${csudo} ln -sf ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so + fi else ${csudo} cp ${binary_dir}/build/lib/libtaos.${versioninfo}.dylib ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* ${csudo} ln -sf ${install_main_dir}/driver/libtaos.${versioninfo}.dylib ${lib_link_dir}/libtaos.1.dylib ${csudo} ln -sf ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib fi + + ${csudo} ldconfig } function install_header() { diff --git a/packaging/tools/makeclient.sh b/packaging/tools/makeclient.sh index 6120f9fcc2..855d0b9c27 100755 --- a/packaging/tools/makeclient.sh +++ b/packaging/tools/makeclient.sh @@ -45,7 +45,7 @@ if [ "$osType" != "Darwin" ]; then strip ${build_dir}/bin/taos bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh" else - bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${script_dir}/remove_client.sh" + bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${script_dir}/remove_client.sh ${script_dir}/set_core.sh" fi lib_files="${build_dir}/lib/libtaos.so.${version}" else diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index 1d65dd8069..16e0354dcf 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -36,7 +36,7 @@ if [ "$pagMode" == "lite" ]; then strip ${build_dir}/bin/taos bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh" else - bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove.sh" + bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove.sh ${script_dir}/set_core.sh" fi lib_files="${build_dir}/lib/libtaos.so.${version}" diff --git a/packaging/tools/post.sh b/packaging/tools/post.sh index e9a742e632..0feb64c795 100755 --- a/packaging/tools/post.sh +++ b/packaging/tools/post.sh @@ -90,6 +90,7 @@ function install_bin() { ${csudo} rm -f ${bin_link_dir}/taosd || : ${csudo} rm -f ${bin_link_dir}/taosdemo || : ${csudo} rm -f ${bin_link_dir}/rmtaos || : + ${csudo} rm -f ${bin_link_dir}/set_core || : ${csudo} chmod 0555 ${bin_dir}/* @@ -97,7 +98,7 @@ function install_bin() { [ -x ${bin_dir}/taos ] && ${csudo} ln -s ${bin_dir}/taos ${bin_link_dir}/taos || : [ -x ${bin_dir}/taosd ] && ${csudo} ln -s ${bin_dir}/taosd ${bin_link_dir}/taosd || : [ -x ${bin_dir}/taosdemo ] && ${csudo} ln -s ${bin_dir}/taosdemo ${bin_link_dir}/taosdemo || : -# [ -x ${bin_dir}/remove.sh ] && ${csudo} ln -s ${bin_dir}/remove.sh ${bin_link_dir}/rmtaos || : + [ -x ${bin_dir}/set_core.sh ] && ${csudo} ln -s ${bin_dir}/set_core.sh ${bin_link_dir}/set_core || : } function install_config() { diff --git a/packaging/tools/preun.sh b/packaging/tools/preun.sh index 0533410802..07b43c0e49 100755 --- a/packaging/tools/preun.sh +++ b/packaging/tools/preun.sh @@ -8,6 +8,7 @@ NC='\033[0m' bin_link_dir="/usr/bin" lib_link_dir="/usr/lib" +lib64_link_dir="/usr/lib64" inc_link_dir="/usr/include" data_link_dir="/usr/local/taos/data" @@ -104,10 +105,12 @@ ${csudo} rm -f ${bin_link_dir}/taos || : ${csudo} rm -f ${bin_link_dir}/taosd || : ${csudo} rm -f ${bin_link_dir}/taosdemo || : ${csudo} rm -f ${bin_link_dir}/taosdump || : +${csudo} rm -f ${bin_link_dir}/set_core || : ${csudo} rm -f ${cfg_link_dir}/* || : ${csudo} rm -f ${inc_link_dir}/taos.h || : -${csudo} rm -f ${inc_link_dir}/taoserror.h || : -${csudo} rm -f ${lib_link_dir}/libtaos.* || : +${csudo} rm -f ${inc_link_dir}/taoserror.h || : +${csudo} rm -f ${lib_link_dir}/libtaos.* || : +${csudo} rm -f ${lib64_link_dir}/libtaos.* || : ${csudo} rm -f ${log_link_dir} || : ${csudo} rm -f ${data_link_dir} || : diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh index 9fb8731449..63e09dc568 100755 --- a/packaging/tools/remove.sh +++ b/packaging/tools/remove.sh @@ -18,11 +18,12 @@ log_link_dir="/usr/local/taos/log" cfg_link_dir="/usr/local/taos/cfg" bin_link_dir="/usr/bin" lib_link_dir="/usr/lib" +lib64_link_dir="/usr/lib64" inc_link_dir="/usr/include" install_nginxd_dir="/usr/local/nginxd" # v1.5 jar dir -v15_java_app_dir="/usr/local/lib/taos" +#v15_java_app_dir="/usr/local/lib/taos" service_config_dir="/etc/systemd/system" taos_service_name="taosd" @@ -78,7 +79,8 @@ function clean_bin() { function clean_lib() { # Remove link ${csudo} rm -f ${lib_link_dir}/libtaos.* || : - ${csudo} rm -rf ${v15_java_app_dir} || : + ${csudo} rm -f ${lib64_link_dir}/libtaos.* || : + #${csudo} rm -rf ${v15_java_app_dir} || : } function clean_header() { diff --git a/packaging/tools/remove_client.sh b/packaging/tools/remove_client.sh index 9210546a9f..4bc278fcf0 100755 --- a/packaging/tools/remove_client.sh +++ b/packaging/tools/remove_client.sh @@ -15,11 +15,12 @@ log_link_dir="/usr/local/taos/log" cfg_link_dir="/usr/local/taos/cfg" bin_link_dir="/usr/bin" lib_link_dir="/usr/lib" +lib64_link_dir="/usr/lib64" inc_link_dir="/usr/include" # v1.5 jar dir -v15_java_app_dir="/usr/local/lib/taos" +#v15_java_app_dir="/usr/local/lib/taos" csudo="" if command -v sudo > /dev/null; then @@ -43,7 +44,8 @@ function clean_bin() { function clean_lib() { # Remove link ${csudo} rm -f ${lib_link_dir}/libtaos.* || : - ${csudo} rm -rf ${v15_java_app_dir} || : + ${csudo} rm -f ${lib64_link_dir}/libtaos.* || : + #${csudo} rm -rf ${v15_java_app_dir} || : } function clean_header() { diff --git a/packaging/tools/set_core.sh b/packaging/tools/set_core.sh new file mode 100755 index 0000000000..083b0516db --- /dev/null +++ b/packaging/tools/set_core.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# +# This file is used to set config for core when taosd crash + +set -e +# set -x + +csudo="" +if command -v sudo > /dev/null; then + csudo="sudo" +fi + +#ulimit -c unlimited +${csudo} sed -i '/ulimit -c unlimited/d' /etc/profile ||: +${csudo} sed -i '$a\ulimit -c unlimited' /etc/profile ||: +source /etc/profile + +${csudo} mkdir -p /coredump ||: +${csudo} sysctl -w kernel.core_pattern='/coredump/core-%e-%p' ||: +${csudo} echo '/coredump/core-%e-%p' | ${csudo} tee /proc/sys/kernel/core_pattern ||: diff --git a/src/balance/CMakeLists.txt b/src/balance/CMakeLists.txt index d5d9ba1d9c..6c26e50b87 100644 --- a/src/balance/CMakeLists.txt +++ b/src/balance/CMakeLists.txt @@ -3,11 +3,11 @@ PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/mnode/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/dnode/inc) -INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/sdb/inc) +INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/tsdb/inc) INCLUDE_DIRECTORIES(${TD_ENTERPRISE_DIR}/src/inc) INCLUDE_DIRECTORIES(inc) AUX_SOURCE_DIRECTORY(src SRC) IF (TD_LINUX) ADD_LIBRARY(balance ${SRC}) -ENDIF () \ No newline at end of file +ENDIF () diff --git a/src/balance/src/balance.c b/src/balance/src/balance.c index 53ed860aa1..53638f1025 100644 --- a/src/balance/src/balance.c +++ b/src/balance/src/balance.c @@ -214,8 +214,8 @@ static bool balanceCheckVgroupReady(SVgObj *pVgroup, SVnodeGid *pRmVnode) { * desc: remove one vnode from vgroup * all vnodes in vgroup should in ready state, except the balancing one **/ -static void balanceRemoveVnode(SVgObj *pVgroup) { - if (pVgroup->numOfVnodes <= 1) return; +static int32_t balanceRemoveVnode(SVgObj *pVgroup) { + if (pVgroup->numOfVnodes <= 1) return -1; SVnodeGid *pRmVnode = NULL; SVnodeGid *pSelVnode = NULL; @@ -258,9 +258,11 @@ static void balanceRemoveVnode(SVgObj *pVgroup) { if (!balanceCheckVgroupReady(pVgroup, pSelVnode)) { mDebug("vgId:%d, is not ready", pVgroup->vgId); + return -1; } else { mDebug("vgId:%d, is ready, discard dnode:%d", pVgroup->vgId, pSelVnode->dnodeId); balanceDiscardVnode(pVgroup, pSelVnode); + return TSDB_CODE_SUCCESS; } } @@ -407,22 +409,22 @@ static int32_t balanceMonitorVgroups() { int32_t dbReplica = pVgroup->pDb->cfg.replications; int32_t vgReplica = pVgroup->numOfVnodes; + int32_t code = -1; if (vgReplica > dbReplica) { mInfo("vgId:%d, replica:%d numOfVnodes:%d, try remove one vnode", pVgroup->vgId, dbReplica, vgReplica); hasUpdatingVgroup = true; - balanceRemoveVnode(pVgroup); + code = balanceRemoveVnode(pVgroup); } else if (vgReplica < dbReplica) { mInfo("vgId:%d, replica:%d numOfVnodes:%d, try add one vnode", pVgroup->vgId, dbReplica, vgReplica); hasUpdatingVgroup = true; - int32_t code = balanceAddVnode(pVgroup, NULL, NULL); - if (code == TSDB_CODE_SUCCESS) { - mnodeDecVgroupRef(pVgroup); - break; - } + code = balanceAddVnode(pVgroup, NULL, NULL); } mnodeDecVgroupRef(pVgroup); + if (code == TSDB_CODE_SUCCESS) { + break; + } } sdbFreeIter(pIter); diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index b15286fe80..2ca6ba6691 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -210,7 +210,7 @@ void tscTagCondRelease(STagCond* pCond); void tscGetSrcColumnInfo(SSrcColumnInfo* pColInfo, SQueryInfo* pQueryInfo); void tscSetFreeHeatBeat(STscObj* pObj); -bool tscShouldFreeHeatBeat(SSqlObj* pHb); +bool tscShouldFreeHeartBeat(SSqlObj* pHb); bool tscShouldBeFreed(SSqlObj* pSql); STableMetaInfo* tscGetTableMetaInfoFromCmd(SSqlCmd *pCmd, int32_t subClauseIndex, int32_t tableIndex); @@ -277,6 +277,9 @@ void tscAsyncQuerySingleRowForNextVnode(void *param, TAOS_RES *tres, int numOfRo void tscTryQueryNextClause(SSqlObj* pSql, __async_cb_func_t fp); int tscSetMgmtEpSetFromCfg(const char *first, const char *second); +bool tscSetSqlOwner(SSqlObj* pSql); +void tscClearSqlOwner(SSqlObj* pSql); + void* malloc_throw(size_t size); void* calloc_throw(size_t nmemb, size_t size); char* strdup_throw(const char* str); diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index b38e6377a9..6d02bc7fbd 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -80,8 +80,9 @@ typedef struct STableMetaInfo { * 2. keep the vgroup index for multi-vnode insertion */ int32_t vgroupIndex; - char name[TSDB_TABLE_ID_LEN]; // (super) table name - SArray* tagColList; // SArray, involved tag columns + char name[TSDB_TABLE_FNAME_LEN]; // (super) table name + char aliasName[TSDB_TABLE_NAME_LEN]; // alias name of table specified in query sql + SArray* tagColList; // SArray, involved tag columns } STableMetaInfo; /* the structure for sql function in select clause */ @@ -106,7 +107,7 @@ typedef struct SColumnIndex { typedef struct SFieldSupInfo { bool visible; SExprInfo *pArithExprInfo; - SSqlExpr * pSqlExpr; + SSqlExpr *pSqlExpr; } SFieldSupInfo; typedef struct SFieldInfo { @@ -128,7 +129,7 @@ typedef struct SCond { } SCond; typedef struct SJoinNode { - char tableId[TSDB_TABLE_ID_LEN]; + char tableId[TSDB_TABLE_FNAME_LEN]; uint64_t uid; int16_t tagColId; } SJoinNode; @@ -162,7 +163,7 @@ typedef struct SParamInfo { } SParamInfo; typedef struct STableDataBlocks { - char tableId[TSDB_TABLE_ID_LEN]; + char tableId[TSDB_TABLE_FNAME_LEN]; int8_t tsSource; // where does the UNIX timestamp come from, server or client bool ordered; // if current rows are ordered or not int64_t vgId; // virtual group id @@ -255,6 +256,7 @@ typedef struct SResRec { typedef struct { int64_t numOfRows; // num of results in current retrieved + int64_t numOfRowsGroup; // num of results of current group int64_t numOfTotal; // num of total results int64_t numOfClauseTotal; // num of total result in current subclause char * pRsp; @@ -301,6 +303,7 @@ typedef struct STscObj { typedef struct SSqlObj { void *signature; + pthread_t owner; // owner of sql object, by which it is executed STscObj *pTscObj; void *pRpcCtx; void (*fp)(); @@ -419,7 +422,6 @@ char *tscGetErrorMsgPayload(SSqlCmd *pCmd); int32_t tscInvalidSQLErrMsg(char *msg, const char *additionalInfo, const char *sql); int32_t tscToSQLCmd(SSqlObj *pSql, struct SSqlInfo *pInfo); -//void tscGetResultColumnChr(SSqlRes *pRes, SFieldInfo* pFieldInfo, int32_t column); static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pFieldInfo, int32_t columnIndex) { SFieldSupInfo* pInfo = (SFieldSupInfo*) TARRAY_GET_ELEM(pFieldInfo->pSupportInfo, columnIndex); diff --git a/src/client/src/TSDBJNIConnector.c b/src/client/src/TSDBJNIConnector.c index 5036983424..34204f96bf 100644 --- a/src/client/src/TSDBJNIConnector.c +++ b/src/client/src/TSDBJNIConnector.c @@ -583,7 +583,7 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp(JNIEn return 0l; } - return (long)res; + return (jlong)res; } JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_unsubscribeImp(JNIEnv *env, jobject jobj, jlong sub, diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c index 759c6e5e1e..b05aef76eb 100644 --- a/src/client/src/tscAsync.c +++ b/src/client/src/tscAsync.c @@ -220,14 +220,13 @@ void taos_fetch_rows_a(TAOS_RES *taosa, void (*fp)(void *, TAOS_RES *, int), voi if (pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE) { tscFetchDatablockFromSubquery(pSql); } else if (pRes->completed) { - if(pCmd->command == TSDB_SQL_FETCH) { + if(pCmd->command == TSDB_SQL_FETCH || (pCmd->command >= TSDB_SQL_SERV_STATUS && pCmd->command <= TSDB_SQL_CURRENT_USER)) { if (hasMoreVnodesToTry(pSql)) { // sequentially retrieve data from remain vnodes. tscTryQueryNextVnode(pSql, tscAsyncQueryRowsForNextVnode); - return; } else { /* - * all available virtual node has been checked already, now we need to check - * for the next subclause queries + * all available virtual nodes in current clause has been checked already, now try the + * next one in the following union subclause */ if (pCmd->clauseIndex < pCmd->numOfClause - 1) { tscTryQueryNextClause(pSql, tscAsyncQueryRowsForNextVnode); @@ -235,11 +234,12 @@ void taos_fetch_rows_a(TAOS_RES *taosa, void (*fp)(void *, TAOS_RES *, int), voi } /* - * 1. has reach the limitation - * 2. no remain virtual nodes to be retrieved anymore + * 1. has reach the limitation + * 2. no remain virtual nodes to be retrieved anymore */ (*pSql->fetchFp)(param, pSql, 0); } + return; } else if (pCmd->command == TSDB_SQL_RETRIEVE || pCmd->command == TSDB_SQL_RETRIEVE_LOCALMERGE) { // in case of show command, return no data diff --git a/src/client/src/tscFunctionImpl.c b/src/client/src/tscFunctionImpl.c index 9c0951cdd6..5c708dffee 100644 --- a/src/client/src/tscFunctionImpl.c +++ b/src/client/src/tscFunctionImpl.c @@ -1942,11 +1942,12 @@ static void do_top_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pData, static void do_bottom_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pData, int64_t ts, uint16_t type, SExtTagsInfo *pTagInfo, char *pTags, int16_t stage) { - tValuePair **pList = pInfo->res; - tVariant val = {0}; tVariantCreateFromBinary(&val, pData, tDataTypeDesc[type].nSize, type); - + + tValuePair **pList = pInfo->res; + assert(pList != NULL); + if (pInfo->num < maxLen) { if (pInfo->num == 0) { valuePairAssign(pList[pInfo->num], type, (const char*) &val.i64Key, ts, pTags, pTagInfo, stage); diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c index a56a01ba02..caaaa5bc18 100644 --- a/src/client/src/tscLocal.c +++ b/src/client/src/tscLocal.c @@ -293,7 +293,7 @@ static void tscProcessCurrentDB(SSqlObj *pSql) { char db[TSDB_DB_NAME_LEN] = {0}; extractDBName(pSql->pTscObj->db, db); - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex); SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0); pExpr->resType = TSDB_DATA_TYPE_BINARY; @@ -314,7 +314,7 @@ static void tscProcessCurrentDB(SSqlObj *pSql) { static void tscProcessServerVer(SSqlObj *pSql) { const char* v = pSql->pTscObj->sversion; - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex); SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0); pExpr->resType = TSDB_DATA_TYPE_BINARY; diff --git a/src/client/src/tscLocalMerge.c b/src/client/src/tscLocalMerge.c index b7d8a0b7b2..99c3bc4fb3 100644 --- a/src/client/src/tscLocalMerge.c +++ b/src/client/src/tscLocalMerge.c @@ -68,7 +68,7 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SLocalReducer *pReducer, tOrderDesc SSqlExpr * pExpr = tscSqlExprGet(pQueryInfo, i); pCtx->aOutputBuf = - pReducer->pResultBuf->data + tscFieldInfoGetOffset(pQueryInfo, i) * pReducer->resColModel->capacity; + pReducer->pResultBuf->data + pExpr->offset * pReducer->resColModel->capacity; pCtx->order = pQueryInfo->order.order; pCtx->functionId = pExpr->functionId; @@ -142,6 +142,7 @@ static SFillColInfo* createFillColInfo(SQueryInfo* pQueryInfo) { pFillCol[i].col.bytes = pExpr->resBytes; pFillCol[i].col.type = (int8_t)pExpr->resType; + pFillCol[i].col.colId = pExpr->colInfo.colId; pFillCol[i].flag = pExpr->colInfo.flag; pFillCol[i].col.offset = offset; pFillCol[i].functionId = pExpr->functionId; @@ -321,6 +322,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd pReducer->finalRowSize = tscGetResRowLength(pQueryInfo->exprList); pReducer->resColModel = finalmodel; pReducer->resColModel->capacity = pReducer->nResultBufSize; + assert(pReducer->finalRowSize > 0); if (pReducer->finalRowSize > 0) { pReducer->resColModel->capacity /= pReducer->finalRowSize; @@ -328,7 +330,6 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd assert(pReducer->finalRowSize <= pReducer->rowSize); pReducer->pFinalRes = calloc(1, pReducer->rowSize * pReducer->resColModel->capacity); -// pReducer->pBufForInterpo = calloc(1, pReducer->nResultBufSize); if (pReducer->pTempBuffer == NULL || pReducer->discardData == NULL || pReducer->pResultBuf == NULL || /*pReducer->pBufForInterpo == NULL || */pReducer->pFinalRes == NULL || pReducer->prevRowOfInput == NULL) { @@ -379,20 +380,6 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd 4096, (int32_t)numOfCols, pQueryInfo->slidingTime, pQueryInfo->slidingTimeUnit, tinfo.precision, pQueryInfo->fillType, pFillCol); } - - int32_t startIndex = pQueryInfo->fieldsInfo.numOfOutput - pQueryInfo->groupbyExpr.numOfGroupCols; - - if (pQueryInfo->groupbyExpr.numOfGroupCols > 0 && pReducer->pFillInfo != NULL) { - pReducer->pFillInfo->pTags[0] = (char *)pReducer->pFillInfo->pTags + POINTER_BYTES * pQueryInfo->groupbyExpr.numOfGroupCols; - for (int32_t i = 1; i < pQueryInfo->groupbyExpr.numOfGroupCols; ++i) { - SSchema *pSchema = getColumnModelSchema(pReducer->resColModel, startIndex + i - 1); - pReducer->pFillInfo->pTags[i] = pSchema->bytes + pReducer->pFillInfo->pTags[i - 1]; - } - } else { - if (pReducer->pFillInfo != NULL) { - assert(pReducer->pFillInfo->pTags == NULL); - } - } } static int32_t tscFlushTmpBufferImpl(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePage *pPage, @@ -835,7 +822,7 @@ void adjustLoserTreeFromNewData(SLocalReducer *pLocalReducer, SLocalDataSource * } } -void savePrevRecordAndSetupInterpoInfo(SLocalReducer *pLocalReducer, SQueryInfo *pQueryInfo, SFillInfo *pFillInfo) { +void savePrevRecordAndSetupFillInfo(SLocalReducer *pLocalReducer, SQueryInfo *pQueryInfo, SFillInfo *pFillInfo) { // discard following dataset in the same group and reset the interpolation information STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); @@ -856,24 +843,6 @@ void savePrevRecordAndSetupInterpoInfo(SLocalReducer *pLocalReducer, SQueryInfo tColModelAppend(pModel, pLocalReducer->discardData, pLocalReducer->prevRowOfInput, 0, 1, 1); } -// todo merge with following function -// static void reversedCopyResultToDstBuf(SQueryInfo* pQueryInfo, SSqlRes *pRes, tFilePage *pFinalDataPage) { -// -// for (int32_t i = 0; i < pQueryInfo->exprList.numOfExprs; ++i) { -// TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i); -// -// int32_t offset = tscFieldInfoGetOffset(pQueryInfo, i); -// char * src = pFinalDataPage->data + (pRes->numOfRows - 1) * pField->bytes + pRes->numOfRows * offset; -// char * dst = pRes->data + pRes->numOfRows * offset; -// -// for (int32_t j = 0; j < pRes->numOfRows; ++j) { -// memcpy(dst, src, (size_t)pField->bytes); -// dst += pField->bytes; -// src -= pField->bytes; -// } -// } -//} - static void reversedCopyFromInterpolationToDstBuf(SQueryInfo *pQueryInfo, SSqlRes *pRes, tFilePage **pResPages, SLocalReducer *pLocalReducer) { assert(0); @@ -896,72 +865,66 @@ static void reversedCopyFromInterpolationToDstBuf(SQueryInfo *pQueryInfo, SSqlRe } } +static void genFinalResWithoutFill(SSqlRes* pRes, SLocalReducer *pLocalReducer, SQueryInfo* pQueryInfo) { + assert(pQueryInfo->intervalTime == 0 || pQueryInfo->fillType == TSDB_FILL_NONE); + + tFilePage * pBeforeFillData = pLocalReducer->pResultBuf; + + pRes->data = pLocalReducer->pFinalRes; + pRes->numOfRows = pBeforeFillData->num; + + if (pQueryInfo->limit.offset > 0) { + if (pQueryInfo->limit.offset < pRes->numOfRows) { + int32_t prevSize = (int32_t)pBeforeFillData->num; + tColModelErase(pLocalReducer->resColModel, pBeforeFillData, prevSize, 0, (int32_t)pQueryInfo->limit.offset - 1); + + /* remove the hole in column model */ + tColModelCompact(pLocalReducer->resColModel, pBeforeFillData, prevSize); + + pRes->numOfRows -= pQueryInfo->limit.offset; + pQueryInfo->limit.offset = 0; + } else { + pQueryInfo->limit.offset -= pRes->numOfRows; + pRes->numOfRows = 0; + } + } + + pRes->numOfRowsGroup += pRes->numOfRows; + + // impose the limitation of output rows on the final result + if (pQueryInfo->limit.limit >= 0 && pRes->numOfRowsGroup > pQueryInfo->limit.limit) { + int32_t prevSize = (int32_t)pBeforeFillData->num; + int32_t overflow = (int32_t)(pRes->numOfRowsGroup - pQueryInfo->limit.limit); + assert(overflow < pRes->numOfRows); + + pRes->numOfRowsGroup = pQueryInfo->limit.limit; + pRes->numOfRows -= overflow; + pBeforeFillData->num -= overflow; + + tColModelCompact(pLocalReducer->resColModel, pBeforeFillData, prevSize); + + // set remain data to be discarded, and reset the interpolation information + savePrevRecordAndSetupFillInfo(pLocalReducer, pQueryInfo, pLocalReducer->pFillInfo); + } + + memcpy(pRes->data, pBeforeFillData->data, pRes->numOfRows * pLocalReducer->finalRowSize); + + pRes->numOfClauseTotal += pRes->numOfRows; + pBeforeFillData->num = 0; +} + /* * Note: pRes->pLocalReducer may be null, due to the fact that "tscDestroyLocalReducer" is called * by "interuptHandler" function in shell */ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneOutput) { - SSqlCmd * pCmd = &pSql->cmd; - SSqlRes * pRes = &pSql->res; + SSqlCmd *pCmd = &pSql->cmd; + SSqlRes *pRes = &pSql->res; - tFilePage * pFinalDataPage = pLocalReducer->pResultBuf; + tFilePage *pBeforeFillData = pLocalReducer->pResultBuf; SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SFillInfo *pFillInfo = pLocalReducer->pFillInfo; -// if (pRes->pLocalReducer != pLocalReducer) { -// /* -// * Release the SSqlObj is called, and it is int destroying function invoked by other thread. -// * However, the other thread will WAIT until current process fully completes. -// * Since the flag of release struct is set by doLocalReduce function -// */ -// assert(pRes->pLocalReducer == NULL); -// } - - // no interval query, no fill operation - if (pQueryInfo->intervalTime == 0 || pQueryInfo->fillType == TSDB_FILL_NONE) { - pRes->data = pLocalReducer->pFinalRes; - pRes->numOfRows = pFinalDataPage->num; - pRes->numOfClauseTotal += pRes->numOfRows; - - if (pQueryInfo->limit.offset > 0) { - if (pQueryInfo->limit.offset < pRes->numOfRows) { - int32_t prevSize = (int32_t)pFinalDataPage->num; - tColModelErase(pLocalReducer->resColModel, pFinalDataPage, prevSize, 0, (int32_t)pQueryInfo->limit.offset - 1); - - /* remove the hole in column model */ - tColModelCompact(pLocalReducer->resColModel, pFinalDataPage, prevSize); - - pRes->numOfRows -= pQueryInfo->limit.offset; - pRes->numOfClauseTotal -= pQueryInfo->limit.offset; - pQueryInfo->limit.offset = 0; - } else { - pQueryInfo->limit.offset -= pRes->numOfRows; - pRes->numOfRows = 0; - pRes->numOfClauseTotal = 0; - } - } - - if (pQueryInfo->limit.limit >= 0 && pRes->numOfClauseTotal > pQueryInfo->limit.limit) { - /* impose the limitation of output rows on the final result */ - int32_t prevSize = (int32_t)pFinalDataPage->num; - int32_t overflow = (int32_t)(pRes->numOfClauseTotal - pQueryInfo->limit.limit); - assert(overflow < pRes->numOfRows); - - pRes->numOfClauseTotal = pQueryInfo->limit.limit; - pRes->numOfRows -= overflow; - pFinalDataPage->num -= overflow; - - tColModelCompact(pLocalReducer->resColModel, pFinalDataPage, prevSize); - - /* set remain data to be discarded, and reset the interpolation information */ - savePrevRecordAndSetupInterpoInfo(pLocalReducer, pQueryInfo, pLocalReducer->pFillInfo); - } - - memcpy(pRes->data, pFinalDataPage->data, pRes->numOfRows * pLocalReducer->finalRowSize); - pFinalDataPage->num = 0; - return; - } - - SFillInfo *pFillInfo = pLocalReducer->pFillInfo; int64_t actualETime = MAX(pQueryInfo->window.skey, pQueryInfo->window.ekey); tFilePage **pResPages = malloc(POINTER_BYTES * pQueryInfo->fieldsInfo.numOfOutput); @@ -969,7 +932,7 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i); pResPages[i] = calloc(1, sizeof(tFilePage) + pField->bytes * pLocalReducer->resColModel->capacity); } - + while (1) { int64_t newRows = taosGenerateDataBlock(pFillInfo, pResPages, pLocalReducer->resColModel->capacity); @@ -986,7 +949,6 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO pRes->data = pLocalReducer->pFinalRes; pRes->numOfRows = newRows; - pRes->numOfClauseTotal += newRows; pQueryInfo->limit.offset = 0; break; @@ -1000,7 +962,7 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO break; } - /* all output for current group are completed */ + // all output in current group are completed int32_t totalRemainRows = (int32_t)getFilledNumOfRes(pFillInfo, actualETime, pLocalReducer->resColModel->capacity); if (totalRemainRows <= 0) { break; @@ -1010,17 +972,16 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO } if (pRes->numOfRows > 0) { - if (pQueryInfo->limit.limit >= 0 && pRes->numOfClauseTotal > pQueryInfo->limit.limit) { - int32_t overflow = (int32_t)(pRes->numOfClauseTotal - pQueryInfo->limit.limit); - pRes->numOfRows -= overflow; + int32_t currentTotal = (int32_t)(pRes->numOfRowsGroup + pRes->numOfRows); + if (pQueryInfo->limit.limit >= 0 && currentTotal > pQueryInfo->limit.limit) { + int32_t overflow = (int32_t)(currentTotal - pQueryInfo->limit.limit); + + pRes->numOfRows -= overflow; assert(pRes->numOfRows >= 0); - pRes->numOfClauseTotal = pQueryInfo->limit.limit; - pFinalDataPage->num -= overflow; - /* set remain data to be discarded, and reset the interpolation information */ - savePrevRecordAndSetupInterpoInfo(pLocalReducer, pQueryInfo, pFillInfo); + savePrevRecordAndSetupFillInfo(pLocalReducer, pQueryInfo, pFillInfo); } if (pQueryInfo->order.order == TSDB_ORDER_ASC) { @@ -1032,9 +993,12 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO } else { // todo bug?? reversedCopyFromInterpolationToDstBuf(pQueryInfo, pRes, pResPages, pLocalReducer); } + + pRes->numOfRowsGroup += pRes->numOfRows; + pRes->numOfClauseTotal += pRes->numOfRows; } - pFinalDataPage->num = 0; + pBeforeFillData->num = 0; for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { taosTFree(pResPages[i]); } @@ -1227,7 +1191,10 @@ static bool saveGroupResultInfo(SSqlObj *pSql) { SSqlRes *pRes = &pSql->res; SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); - pRes->numOfGroups += 1; + + if (pRes->numOfRowsGroup > 0) { + pRes->numOfGroups += 1; + } // the output group is limited by the slimit clause if (reachGroupResultLimit(pQueryInfo, pRes)) { @@ -1248,7 +1215,7 @@ static bool saveGroupResultInfo(SSqlObj *pSql) { * @param noMoreCurrentGroupRes * @return if current group is skipped, return false, and do NOT record it into pRes->numOfGroups */ -bool doGenerateFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCurrentGroupRes) { +bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCurrentGroupRes) { SSqlCmd *pCmd = &pSql->cmd; SSqlRes *pRes = &pSql->res; @@ -1266,7 +1233,12 @@ bool doGenerateFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool no pRes->numOfRows = 0; pQueryInfo->slimit.offset -= 1; pLocalReducer->discard = !noMoreCurrentGroupRes; - + + if (pLocalReducer->discard) { + SColumnModel *pInternModel = pLocalReducer->pDesc->pColumnModel; + tColModelAppend(pInternModel, pLocalReducer->discardData, pLocalReducer->pTempBuffer->data, 0, 1, 1); + } + return false; } @@ -1277,13 +1249,21 @@ bool doGenerateFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool no // tColModelDisplay(pLocalReducer->resColModel, pLocalReducer->pBufForInterpo, pResBuf->num, pResBuf->num); #endif - SFillInfo* pFillInfo = pLocalReducer->pFillInfo; - if (pFillInfo != NULL) { - taosFillSetStartInfo(pFillInfo, (int32_t)pResBuf->num, pQueryInfo->window.ekey); - taosFillCopyInputDataFromOneFilePage(pFillInfo, pResBuf); - } - doFillResult(pSql, pLocalReducer, noMoreCurrentGroupRes); + + // no interval query, no fill operation + if (pQueryInfo->intervalTime == 0 || pQueryInfo->fillType == TSDB_FILL_NONE) { + genFinalResWithoutFill(pRes, pLocalReducer, pQueryInfo); + } else { + SFillInfo* pFillInfo = pLocalReducer->pFillInfo; + if (pFillInfo != NULL) { + taosFillSetStartInfo(pFillInfo, (int32_t)pResBuf->num, pQueryInfo->window.ekey); + taosFillCopyInputDataFromOneFilePage(pFillInfo, pResBuf); + } + + doFillResult(pSql, pLocalReducer, noMoreCurrentGroupRes); + } + return true; } @@ -1299,7 +1279,7 @@ void resetOutputBuf(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer) { // static void resetEnvForNewResultset(SSqlRes *pRes, SSqlCmd *pCmd, SLocalReducer *pLocalReducer) { // In handling data in other groups, we need to reset the interpolation information for a new group data pRes->numOfRows = 0; - pRes->numOfClauseTotal = 0; + pRes->numOfRowsGroup = 0; SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); @@ -1363,12 +1343,12 @@ static bool doHandleLastRemainData(SSqlObj *pSql) { if ((isAllSourcesCompleted(pLocalReducer) && !pLocalReducer->hasPrevRow) || pLocalReducer->pLocalDataSrc[0] == NULL || prevGroupCompleted) { // if fillType == TSDB_FILL_NONE, return directly - if (pQueryInfo->fillType != TSDB_FILL_NONE) { + if (pQueryInfo->fillType != TSDB_FILL_NONE && + ((pRes->numOfRowsGroup < pQueryInfo->limit.limit && pQueryInfo->limit.limit > 0) || (pQueryInfo->limit.limit < 0))) { int64_t etime = (pQueryInfo->window.skey < pQueryInfo->window.ekey) ? pQueryInfo->window.ekey : pQueryInfo->window.skey; - assert(pFillInfo->numOfRows == 0); int32_t rows = (int32_t)getFilledNumOfRes(pFillInfo, etime, pLocalReducer->resColModel->capacity); - if (rows > 0) { // do interpo + if (rows > 0) { doFillResult(pSql, pLocalReducer, true); } } @@ -1533,7 +1513,7 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) { */ if ((!sameGroup && pResBuf->num > 0) || (pResBuf->num == pLocalReducer->resColModel->capacity)) { // does not belong to the same group - bool notSkipped = doGenerateFinalResults(pSql, pLocalReducer, !sameGroup); + bool notSkipped = genFinalResults(pSql, pLocalReducer, !sameGroup); // this row needs to discard, since it belongs to the group of previous if (pLocalReducer->discard && sameGroup) { @@ -1602,7 +1582,7 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) { } if (pLocalReducer->pResultBuf->num) { - doGenerateFinalResults(pSql, pLocalReducer, true); + genFinalResults(pSql, pLocalReducer, true); } assert(pLocalReducer->status == TSC_LOCALREDUCE_IN_PROGRESS && pRes->row == 0); diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index 33c8799624..e97d6c32e0 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -989,7 +989,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) { } int validateTableName(char *tblName, int len, SSQLToken* psTblToken) { - tstrncpy(psTblToken->z, tblName, TSDB_TABLE_ID_LEN); + tstrncpy(psTblToken->z, tblName, TSDB_TABLE_FNAME_LEN); psTblToken->n = len; psTblToken->type = TK_ID; @@ -1038,7 +1038,7 @@ int tsParseInsertSql(SSqlObj *pSql) { } if (NULL == pCmd->pTableList) { - pCmd->pTableList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false); + pCmd->pTableList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false); pCmd->pDataBlocks = taosArrayInit(4, POINTER_BYTES); if (NULL == pCmd->pTableList || NULL == pSql->cmd.pDataBlocks) { code = TSDB_CODE_TSC_OUT_OF_MEMORY; @@ -1077,7 +1077,7 @@ int tsParseInsertSql(SSqlObj *pSql) { } pCmd->curSql = sToken.z; - char buf[TSDB_TABLE_ID_LEN]; + char buf[TSDB_TABLE_FNAME_LEN]; SSQLToken sTblToken; sTblToken.z = buf; // Check if the table name available or not diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index 0df1c7ddc5..b996dd958a 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -613,11 +613,13 @@ int taos_stmt_execute(TAOS_STMT* stmt) { if (sql == NULL) { ret = TSDB_CODE_TSC_OUT_OF_MEMORY; } else { - taosTFree(pStmt->pSql->sqlstr); - pStmt->pSql->sqlstr = sql; - SSqlObj* pSql = taos_query((TAOS*)pStmt->taos, pStmt->pSql->sqlstr); - ret = taos_errno(pSql); - taos_free_result(pSql); + if (pStmt->pSql != NULL) { + taos_free_result(pStmt->pSql); + pStmt->pSql = NULL; + } + pStmt->pSql = taos_query((TAOS*)pStmt->taos, sql); + ret = taos_errno(pStmt->pSql); + free(sql); } } return ret; diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index bc33aac174..e5bb516ee6 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -86,7 +86,7 @@ static int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQueryS static int32_t tsRewriteFieldNameIfNecessary(SSqlCmd* pCmd, SQueryInfo* pQueryInfo); static int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo); static int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd, SQueryInfo* pQueryInfo); -static int32_t buildArithmeticExprString(tSQLExpr* pExpr, char** exprString); +static int32_t arithmeticExprToString(tSQLExpr* pExpr, char** exprString); static int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo); static int32_t validateArithmeticSQLExpr(SSqlCmd* pCmd, tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList, int32_t* type); static int32_t validateEp(char* ep); @@ -1087,11 +1087,11 @@ int32_t setObjFullName(char* fullName, const char* account, SSQLToken* pDB, SSQL *xlen = totalLen; } - if (totalLen < TSDB_TABLE_ID_LEN) { + if (totalLen < TSDB_TABLE_FNAME_LEN) { fullName[totalLen] = 0; } - return (totalLen < TSDB_TABLE_ID_LEN) ? TSDB_CODE_SUCCESS : TSDB_CODE_TSC_INVALID_SQL; + return (totalLen < TSDB_TABLE_FNAME_LEN) ? TSDB_CODE_SUCCESS : TSDB_CODE_TSC_INVALID_SQL; } static void extractColumnNameFromString(tSQLExprItem* pItem) { @@ -1106,21 +1106,143 @@ static void extractColumnNameFromString(tSQLExprItem* pItem) { } } +static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t exprIndex, tSQLExprItem* pItem) { + const char* msg1 = "invalid column name, or illegal column type"; + const char* msg2 = "invalid arithmetic expression in select clause"; + const char* msg3 = "tag columns can not be used in arithmetic expression"; + const char* msg4 = "columns from different table mixed up in arithmetic expression"; + + // arithmetic function in select clause + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, clauseIndex); + + SColumnList columnList = {0}; + int32_t arithmeticType = NON_ARITHMEIC_EXPR; + + if (validateArithmeticSQLExpr(pCmd, pItem->pNode, pQueryInfo, &columnList, &arithmeticType) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + int32_t tableIndex = columnList.ids[0].tableIndex; + + // todo potential data overflow + char* arithmeticExprStr = malloc(1024*1024); + char* p = arithmeticExprStr; + + if (arithmeticType == NORMAL_ARITHMETIC) { + pQueryInfo->type |= TSDB_QUERY_TYPE_PROJECTION_QUERY; + + // all columns in arithmetic expression must belong to the same table + for (int32_t f = 1; f < columnList.num; ++f) { + if (columnList.ids[f].tableIndex != tableIndex) { + taosTFree(arithmeticExprStr); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); + } + } + + if (arithmeticExprToString(pItem->pNode, &p) != TSDB_CODE_SUCCESS) { + taosTFree(arithmeticExprStr); + return TSDB_CODE_TSC_INVALID_SQL; + } + + // expr string is set as the parameter of function + SColumnIndex index = {.tableIndex = tableIndex}; + + SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_ARITHM, &index, TSDB_DATA_TYPE_DOUBLE, sizeof(double), + sizeof(double), false); + + char* name = (pItem->aliasName != NULL)? pItem->aliasName:arithmeticExprStr; + tstrncpy(pExpr->aliasName, name, sizeof(pExpr->aliasName)); + + tExprNode* pNode = NULL; + SArray* colList = taosArrayInit(10, sizeof(SColIndex)); + + int32_t ret = exprTreeFromSqlExpr(pCmd, &pNode, pItem->pNode, pQueryInfo->exprList, pQueryInfo, colList); + if (ret != TSDB_CODE_SUCCESS) { + tExprTreeDestroy(&pNode, NULL); + taosTFree(arithmeticExprStr); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); + } + + size_t numOfNode = taosArrayGetSize(colList); + for(int32_t k = 0; k < numOfNode; ++k) { + SColIndex* pIndex = taosArrayGet(colList, k); + if (pIndex->flag == 1) { + taosTFree(arithmeticExprStr); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + } + } + + SBufferWriter bw = tbufInitWriter(NULL, false); + + TRY(0) { + exprTreeToBinary(&bw, pNode); + } CATCH(code) { + tbufCloseWriter(&bw); + UNUSED(code); + // TODO: other error handling + } END_TRY + + size_t len = tbufTell(&bw); + char* c = tbufGetData(&bw, true); + + // set the serialized binary string as the parameter of arithmetic expression + addExprParams(pExpr, c, TSDB_DATA_TYPE_BINARY, (int32_t)len, index.tableIndex); + + insertResultField(pQueryInfo, exprIndex, &columnList, sizeof(double), TSDB_DATA_TYPE_DOUBLE, pExpr->aliasName, pExpr); + + taosArrayDestroy(colList); + tExprTreeDestroy(&pNode, NULL); + } else { + if (arithmeticExprToString(pItem->pNode, &p) != TSDB_CODE_SUCCESS) { + taosTFree(arithmeticExprStr); + return TSDB_CODE_TSC_INVALID_SQL; + } + + columnList.num = 0; + columnList.ids[0] = (SColumnIndex) {0, 0}; + + char* name = (pItem->aliasName != NULL)? pItem->aliasName:arithmeticExprStr; + insertResultField(pQueryInfo, exprIndex, &columnList, sizeof(double), TSDB_DATA_TYPE_DOUBLE, name, NULL); + + int32_t slot = tscNumOfFields(pQueryInfo) - 1; + SFieldSupInfo* pInfo = tscFieldInfoGetSupp(&pQueryInfo->fieldsInfo, slot); + + if (pInfo->pSqlExpr == NULL) { + SExprInfo* pArithExprInfo = calloc(1, sizeof(SExprInfo)); + + // arithmetic expression always return result in the format of double float + pArithExprInfo->bytes = sizeof(double); + pArithExprInfo->interBytes = sizeof(double); + pArithExprInfo->type = TSDB_DATA_TYPE_DOUBLE; + + int32_t ret = exprTreeFromSqlExpr(pCmd, &pArithExprInfo->pExpr, pItem->pNode, pQueryInfo->exprList, pQueryInfo, NULL); + if (ret != TSDB_CODE_SUCCESS) { + tExprTreeDestroy(&pArithExprInfo->pExpr, NULL); + taosTFree(arithmeticExprStr); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "invalid expression in select clause"); + } + + pInfo->pArithExprInfo = pArithExprInfo; + } + } + + taosTFree(arithmeticExprStr); + return TSDB_CODE_SUCCESS; +} + int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSelection, bool isSTable) { assert(pSelection != NULL && pCmd != NULL); - const char* msg1 = "invalid column name, or illegal column type"; const char* msg2 = "functions can not be mixed up"; const char* msg3 = "not support query expression"; - const char* msg4 = "columns from different table mixed up in arithmetic expression"; const char* msg5 = "invalid function name"; SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, clauseIndex); - + if (pQueryInfo->colList == NULL) { pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES); } - + for (int32_t i = 0; i < pSelection->nExpr; ++i) { int32_t outputIndex = (int32_t)tscSqlExprNumOfExprs(pQueryInfo); tSQLExprItem* pItem = &pSelection->a[i]; @@ -1147,96 +1269,11 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel } } else if (pItem->pNode->nSQLOptr >= TK_PLUS && pItem->pNode->nSQLOptr <= TK_REM) { - // arithmetic function in select clause - SColumnList columnList = {0}; - int32_t arithmeticType = NON_ARITHMEIC_EXPR; - - if (validateArithmeticSQLExpr(pCmd, pItem->pNode, pQueryInfo, &columnList, &arithmeticType) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + int32_t code = handleArithmeticExpr(pCmd, clauseIndex, i, pItem); + if (code != TSDB_CODE_SUCCESS) { + return code; } - - int32_t tableIndex = columnList.ids[0].tableIndex; - char arithmeticExprStr[1024] = {0}; - char* p = arithmeticExprStr; - - if (arithmeticType == NORMAL_ARITHMETIC) { - pQueryInfo->type |= TSDB_QUERY_TYPE_PROJECTION_QUERY; - - // all columns in arithmetic expression must belong to the same table - for (int32_t f = 1; f < columnList.num; ++f) { - if (columnList.ids[f].tableIndex != tableIndex) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); - } - } - - if (buildArithmeticExprString(pItem->pNode, &p) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_TSC_INVALID_SQL; - } - - // expr string is set as the parameter of function - SColumnIndex index = {.tableIndex = tableIndex}; - SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_ARITHM, &index, TSDB_DATA_TYPE_DOUBLE, - sizeof(double), sizeof(double), false); - - /* todo alias name should use the original sql string */ - char* name = (pItem->aliasName != NULL)? pItem->aliasName:arithmeticExprStr; - tstrncpy(pExpr->aliasName, name, sizeof(pExpr->aliasName)); - - tExprNode* pNode = NULL; - SArray* colList = taosArrayInit(10, sizeof(SColIndex)); - - int32_t ret = exprTreeFromSqlExpr(pCmd, &pNode, pItem->pNode, pQueryInfo->exprList, pQueryInfo, colList); - if (ret != TSDB_CODE_SUCCESS) { - tExprTreeDestroy(&pNode, NULL); - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "invalid arithmetic expression in select clause"); - } - - SBufferWriter bw = tbufInitWriter(NULL, false); - TRY(0) { - exprTreeToBinary(&bw, pNode); - } CATCH(code) { - tbufCloseWriter(&bw); - UNUSED(code); - // TODO: other error handling - } END_TRY - - size_t len = tbufTell(&bw); - char* c = tbufGetData(&bw, true); - - // set the serialized binary string as the parameter of arithmetic expression - addExprParams(pExpr, c, TSDB_DATA_TYPE_BINARY, (int32_t)len, index.tableIndex); - - insertResultField(pQueryInfo, i, &columnList, sizeof(double), TSDB_DATA_TYPE_DOUBLE, pExpr->aliasName, pExpr); - - taosArrayDestroy(colList); - tExprTreeDestroy(&pNode, NULL); - } else { - columnList.num = 0; - columnList.ids[0] = (SColumnIndex) {0, 0}; - - insertResultField(pQueryInfo, i, &columnList, sizeof(double), TSDB_DATA_TYPE_DOUBLE, "dummy_column", NULL); - - int32_t slot = tscNumOfFields(pQueryInfo) - 1; - SFieldSupInfo* pInfo = tscFieldInfoGetSupp(&pQueryInfo->fieldsInfo, slot); - - if (pInfo->pSqlExpr == NULL) { - SExprInfo* pArithExprInfo = calloc(1, sizeof(SExprInfo)); - - // arithmetic expression always return result in the format of double float - pArithExprInfo->bytes = sizeof(double); - pArithExprInfo->interBytes = sizeof(double); - pArithExprInfo->type = TSDB_DATA_TYPE_DOUBLE; - - int32_t ret = exprTreeFromSqlExpr(pCmd, &pArithExprInfo->pExpr, pItem->pNode, pQueryInfo->exprList, pQueryInfo, NULL); - if (ret != TSDB_CODE_SUCCESS) { - tExprTreeDestroy(&pArithExprInfo->pExpr, NULL); - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "invalid expression in select clause"); - } - - pInfo->pArithExprInfo = pArithExprInfo; - } - } } else { /* * not support such expression @@ -1872,6 +1909,8 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col if (changeFunctionID(optr, &functionId) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_SQL; } + tscInsertPrimaryTSSourceColumn(pQueryInfo, &index); + colIndex += 1; // the first column is ts pExpr = tscSqlExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, resultSize, false); addExprParams(pExpr, val, TSDB_DATA_TYPE_DOUBLE, sizeof(double), 0); @@ -2106,13 +2145,10 @@ int32_t getTableIndexImpl(SSQLToken* pTableToken, SQueryInfo* pQueryInfo, SColum } pIndex->tableIndex = COLUMN_INDEX_INITIAL_VAL; - char tableName[TSDB_TABLE_ID_LEN] = {0}; - for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, i); - extractTableName(pTableMetaInfo->name, tableName); - - if (strncasecmp(tableName, pTableToken->z, pTableToken->n) == 0 && strlen(tableName) == pTableToken->n) { + char* name = pTableMetaInfo->aliasName; + if (strncasecmp(name, pTableToken->z, pTableToken->n) == 0 && strlen(name) == pTableToken->n) { pIndex->tableIndex = i; break; } @@ -2354,7 +2390,7 @@ bool validateIpAddress(const char* ip, size_t size) { strncpy(tmp, ip, size); - in_addr_t epAddr = inet_addr(tmp); + in_addr_t epAddr = taosInetAddr(tmp); return epAddr != INADDR_NONE; } @@ -3081,14 +3117,14 @@ static int32_t getJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* } // todo error handle / such as and /or mixed with +/-/*/ -int32_t buildArithmeticExprString(tSQLExpr* pExpr, char** exprString) { +int32_t doArithmeticExprToString(tSQLExpr* pExpr, char** exprString) { tSQLExpr* pLeft = pExpr->pLeft; tSQLExpr* pRight = pExpr->pRight; *(*exprString)++ = '('; if (pLeft->nSQLOptr >= TK_PLUS && pLeft->nSQLOptr <= TK_REM) { - buildArithmeticExprString(pLeft, exprString); + doArithmeticExprToString(pLeft, exprString); } else { int32_t ret = tSQLExprNodeToString(pLeft, exprString); if (ret != TSDB_CODE_SUCCESS) { @@ -3099,7 +3135,7 @@ int32_t buildArithmeticExprString(tSQLExpr* pExpr, char** exprString) { optrToString(pExpr, exprString); if (pRight->nSQLOptr >= TK_PLUS && pRight->nSQLOptr <= TK_REM) { - buildArithmeticExprString(pRight, exprString); + doArithmeticExprToString(pRight, exprString); } else { int32_t ret = tSQLExprNodeToString(pRight, exprString); if (ret != TSDB_CODE_SUCCESS) { @@ -3112,6 +3148,19 @@ int32_t buildArithmeticExprString(tSQLExpr* pExpr, char** exprString) { return TSDB_CODE_SUCCESS; } +static int32_t arithmeticExprToString(tSQLExpr* pExpr, char** str) { + char* start = *str; + + int32_t code = doArithmeticExprToString(pExpr, str); + if (code == TSDB_CODE_SUCCESS) { // remove out the parenthesis + int32_t len = (int32_t)strlen(start); + memmove(start, start + 1, len - 2); + start[len - 2] = 0; + } + + return code; +} + static int32_t validateSQLExpr(SSqlCmd* pCmd, tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList, int32_t* type) { if (pExpr->nSQLOptr == TK_ID) { if (*type == NON_ARITHMEIC_EXPR) { @@ -3615,7 +3664,7 @@ static int32_t setTableCondForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SStringBuilder sb1; memset(&sb1, 0, sizeof(sb1)); taosStringBuilderAppendStringLen(&sb1, QUERY_COND_REL_PREFIX_IN, QUERY_COND_REL_PREFIX_IN_LEN); - char db[TSDB_TABLE_ID_LEN] = {0}; + char db[TSDB_TABLE_FNAME_LEN] = {0}; // remove the duplicated input table names int32_t num = 0; @@ -3640,7 +3689,7 @@ static int32_t setTableCondForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, taosStringBuilderAppendStringLen(&sb1, TBNAME_LIST_SEP, 1); } - char idBuf[TSDB_TABLE_ID_LEN] = {0}; + char idBuf[TSDB_TABLE_FNAME_LEN] = {0}; int32_t xlen = (int32_t)strlen(segments[i]); SSQLToken t = {.z = segments[i], .n = xlen, .type = TK_STRING}; @@ -4345,7 +4394,8 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { const char* msg16 = "only support one column"; const char* msg17 = "invalid column name"; const char* msg18 = "primary timestamp column cannot be dropped"; - + const char* msg19 = "invalid new tag name"; + SSqlCmd* pCmd = &pSql->cmd; SAlterTableSQL* pAlterSQL = pInfo->pAlterInfo; SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); @@ -4446,12 +4496,12 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { SSQLToken srcToken = {.z = pSrcItem->pVar.pz, .n = pSrcItem->pVar.nLen, .type = TK_STRING}; if (getColumnIndexByName(pCmd, &srcToken, pQueryInfo, &srcIndex) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_TSC_INVALID_SQL; + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg17); } SSQLToken destToken = {.z = pDstItem->pVar.pz, .n = pDstItem->pVar.nLen, .type = TK_STRING}; if (getColumnIndexByName(pCmd, &destToken, pQueryInfo, &destIndex) == TSDB_CODE_SUCCESS) { - return TSDB_CODE_TSC_INVALID_SQL; + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg19); } char name[TSDB_COL_NAME_LEN] = {0}; @@ -5872,15 +5922,16 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) { int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { assert(pQuerySql != NULL && (pQuerySql->from == NULL || pQuerySql->from->nExpr > 0)); - const char* msg0 = "invalid table name"; - const char* msg1 = "table name too long"; - const char* msg2 = "point interpolation query needs timestamp"; - const char* msg5 = "fill only available for interval query"; - const char* msg6 = "start(end) time of query range required or time range too large"; - const char* msg7 = "illegal number of tables in from clause"; - const char* msg8 = "too many columns in selection clause"; - const char* msg9 = "TWA query requires both the start and end time"; - const char* msg10= "too many tables in from clause"; + const char* msg0 = "invalid table name"; + const char* msg1 = "table name too long"; + const char* msg2 = "point interpolation query needs timestamp"; + const char* msg5 = "fill only available for interval query"; + const char* msg6 = "start(end) time of query range required or time range too large"; + const char* msg7 = "illegal number of tables in from clause"; + const char* msg8 = "too many columns in selection clause"; + const char* msg9 = "TWA query requires both the start and end time"; + const char* msg10 = "too many tables in from clause"; + const char* msg11 = "invalid table alias name"; int32_t code = TSDB_CODE_SUCCESS; @@ -5912,18 +5963,18 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { return doLocalQueryProcess(pCmd, pQueryInfo, pQuerySql); } - if (pQuerySql->from->nExpr > TSDB_MAX_JOIN_TABLE_NUM) { + if (pQuerySql->from->nExpr > TSDB_MAX_JOIN_TABLE_NUM * 2) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7); } pQueryInfo->command = TSDB_SQL_SELECT; - if (pQuerySql->from->nExpr > 2) { + if (pQuerySql->from->nExpr > 4) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg10); } // set all query tables, which are maybe more than one. - for (int32_t i = 0; i < pQuerySql->from->nExpr; ++i) { + for (int32_t i = 0; i < pQuerySql->from->nExpr; ) { tVariant* pTableItem = &pQuerySql->from->a[i].pVar; if (pTableItem->nType != TSDB_DATA_TYPE_BINARY) { @@ -5937,24 +5988,39 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0); } - if (pQueryInfo->numOfTables <= i) { // more than one table + if (pQueryInfo->numOfTables <= i/2) { // more than one table tscAddEmptyMetaInfo(pQueryInfo); } - STableMetaInfo* pMeterInfo1 = tscGetMetaInfo(pQueryInfo, i); + STableMetaInfo* pTableMetaInfo1 = tscGetMetaInfo(pQueryInfo, i/2); SSQLToken t = {.type = TSDB_DATA_TYPE_BINARY, .n = pTableItem->nLen, .z = pTableItem->pz}; - if (tscSetTableFullName(pMeterInfo1, &t, pSql) != TSDB_CODE_SUCCESS) { + if (tscSetTableFullName(pTableMetaInfo1, &t, pSql) != TSDB_CODE_SUCCESS) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } - code = tscGetTableMeta(pSql, pMeterInfo1); + tVariant* pTableItem1 = &pQuerySql->from->a[i + 1].pVar; + SSQLToken aliasName = {.z = pTableItem1->pz, .n = pTableItem1->nLen, .type = TK_STRING}; + if (tscValidateName(&aliasName) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg11); + } + + // has no table alias name + if (memcmp(pTableItem->pz, pTableItem1->pz, pTableItem1->nLen) == 0) { + extractTableName(pTableMetaInfo1->name, pTableMetaInfo1->aliasName); + } else { + tstrncpy(pTableMetaInfo1->aliasName, pTableItem1->pz, sizeof(pTableMetaInfo1->aliasName)); + } + + code = tscGetTableMeta(pSql, pTableMetaInfo1); if (code != TSDB_CODE_SUCCESS) { return code; } + + i += 2; } - assert(pQueryInfo->numOfTables == pQuerySql->from->nExpr); + assert(pQueryInfo->numOfTables == pQuerySql->from->nExpr / 2); bool isSTable = false; if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { @@ -6122,12 +6188,14 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSQLExpr* pS if (ret != TSDB_CODE_SUCCESS) { return ret; } - + + STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta; + int32_t numOfColumns = tscGetNumOfColumns(pTableMeta); + *pExpr = calloc(1, sizeof(tExprNode)); (*pExpr)->nodeType = TSQL_NODE_COL; (*pExpr)->pSchema = calloc(1, sizeof(SSchema)); - - STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta; + SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, index.columnIndex); *(*pExpr)->pSchema = *pSchema; @@ -6136,7 +6204,8 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSQLExpr* pS tstrncpy(colIndex.name, pSchema->name, sizeof(colIndex.name)); colIndex.colId = pSchema->colId; colIndex.colIndex = index.columnIndex; - + colIndex.flag = (index.columnIndex >= numOfColumns)? 1:0; + taosArrayPush(pCols, &colIndex); } diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 3390710472..6b75b680b1 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -45,19 +45,27 @@ void tscSaveSubscriptionProgress(void* sub); static int32_t minMsgSize() { return tsRpcHeadSize + 100; } static void tscSetDnodeEpSet(SSqlObj* pSql, SCMVgroupInfo* pVgroupInfo) { + assert(pSql != NULL && pVgroupInfo != NULL && pVgroupInfo->numOfEps > 0); + SRpcEpSet* pEpSet = &pSql->epSet; - pEpSet->inUse = 0; - if (pVgroupInfo == NULL) { - pEpSet->numOfEps = 0; - return; - } + pEpSet->inUse = 0; + + // apply the FQDN string length check here + bool hasFqdn = false; pEpSet->numOfEps = pVgroupInfo->numOfEps; for(int32_t i = 0; i < pVgroupInfo->numOfEps; ++i) { strcpy(pEpSet->fqdn[i], pVgroupInfo->epAddr[i].fqdn); pEpSet->port[i] = pVgroupInfo->epAddr[i].port; + + if (!hasFqdn) { + hasFqdn = (strlen(pEpSet->fqdn[i]) > 0); + } } + + assert(hasFqdn); } + static void tscDumpMgmtEpSet(SRpcEpSet *epSet) { taosCorBeginRead(&tscMgmtEpSet.version); *epSet = tscMgmtEpSet.epSet; @@ -127,21 +135,6 @@ void tscPrintMgmtEp() { } } -/* - * For each management node, try twice at least in case of poor network situation. - * If the client start to connect to a non-management node from the client, and the first retry may fail due to - * the poor network quality. And then, the second retry get the response with redirection command. - * The retry will not be executed since only *two* retry is allowed in case of single management node in the cluster. - * Therefore, we need to multiply the retry times by factor of 2 to fix this problem. - */ -UNUSED_FUNC -static int32_t tscGetMgmtConnMaxRetryTimes() { - int32_t factor = 2; - SRpcEpSet dump; - tscDumpMgmtEpSet(&dump); - return dump.numOfEps * factor; -} - void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) { STscObj *pObj = (STscObj *)param; if (pObj == NULL) return; @@ -178,45 +171,25 @@ void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) { void tscProcessActivityTimer(void *handle, void *tmrId) { STscObj *pObj = (STscObj *)handle; - - if (pObj == NULL) return; - if (pObj->signature != pObj) return; - if (pObj->pTimer != tmrId) return; - - if (pObj->pHb == NULL) { - SSqlObj *pSql = (SSqlObj *)calloc(1, sizeof(SSqlObj)); - if (NULL == pSql) return; - - pSql->fp = tscProcessHeartBeatRsp; - - SQueryInfo *pQueryInfo = NULL; - tscGetQueryInfoDetailSafely(&pSql->cmd, 0, &pQueryInfo); - pQueryInfo->command = TSDB_SQL_HB; - - pSql->cmd.command = TSDB_SQL_HB; - if (TSDB_CODE_SUCCESS != tscAllocPayload(&(pSql->cmd), TSDB_DEFAULT_PAYLOAD_SIZE)) { - taosTFree(pSql); - return; - } - - pSql->cmd.command = TSDB_SQL_HB; - pSql->param = pObj; - pSql->pTscObj = pObj; - pSql->signature = pSql; - pObj->pHb = pSql; - tscAddSubqueryInfo(&pObj->pHb->cmd); - - tscDebug("%p pHb is allocated, pObj:%p", pObj->pHb, pObj); - } - - if (tscShouldFreeHeatBeat(pObj->pHb)) { - tscDebug("%p free HB object and release connection", pObj); - tscFreeSqlObj(pObj->pHb); - tscCloseTscObj(pObj); + if (pObj == NULL || pObj->signature != pObj) { return; } - tscProcessSql(pObj->pHb); + SSqlObj* pHB = pObj->pHb; + if (pObj->pTimer != tmrId || pHB == NULL) { + return; + } + + if (tscShouldFreeHeartBeat(pHB)) { + tscDebug("%p free HB object and release connection", pHB); + tscFreeSqlObj(pHB); + tscCloseTscObj(pObj); + } else { + int32_t code = tscProcessSql(pHB); + if (code != TSDB_CODE_SUCCESS) { + tscError("%p failed to sent HB to server, reason:%s", pHB, tstrerror(code)); + } + } } int tscSendMsgToServer(SSqlObj *pSql) { @@ -272,6 +245,8 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) { return; } + pSql->pRpcCtx = NULL; // clear the rpcCtx + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); if (pQueryInfo != NULL && pQueryInfo->type == TSDB_QUERY_TYPE_FREE_RESOURCE) { tscDebug("%p sqlObj needs to be released or DB connection is closed, cmd:%d type:%d, pObj:%p signature:%p", @@ -424,21 +399,18 @@ int doProcessSql(SSqlObj *pSql) { } int tscProcessSql(SSqlObj *pSql) { - char * name = NULL; + char *name = NULL; SSqlCmd *pCmd = &pSql->cmd; - SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); STableMetaInfo *pTableMetaInfo = NULL; uint32_t type = 0; if (pQueryInfo != NULL) { pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - if (pTableMetaInfo != NULL) { - name = pTableMetaInfo->name; - } - + name = (pTableMetaInfo != NULL)? pTableMetaInfo->name:NULL; type = pQueryInfo->type; - + // while numOfTables equals to 0, it must be Heartbeat assert((pQueryInfo->numOfTables == 0 && pQueryInfo->command == TSDB_SQL_HB) || pQueryInfo->numOfTables > 0); } @@ -450,7 +422,6 @@ int tscProcessSql(SSqlObj *pSql) { return pSql->res.code; } } else if (pCmd->command < TSDB_SQL_LOCAL) { - //pSql->epSet = tscMgmtEpSet; } else { // local handler return (*tscProcessMsgRsp[pCmd->command])(pSql); @@ -597,11 +568,11 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char } else { pVgroupInfo = &pTableMeta->vgroupInfo; } - tscSetDnodeEpSet(pSql, pVgroupInfo); - if (pVgroupInfo != NULL) { - pQueryMsg->head.vgId = htonl(pVgroupInfo->vgId); - } + assert(pVgroupInfo != NULL); + + tscSetDnodeEpSet(pSql, pVgroupInfo); + pQueryMsg->head.vgId = htonl(pVgroupInfo->vgId); STableIdInfo *pTableIdInfo = (STableIdInfo *)pMsg; pTableIdInfo->tid = htonl(pTableMeta->id.tid); @@ -1460,7 +1431,7 @@ int tscProcessRetrieveLocalMergeRsp(SSqlObj *pSql) { SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); if (pRes->code == TSDB_CODE_SUCCESS && pRes->numOfRows > 0) { - tscSetResultPointer(pQueryInfo, pRes); + tscCreateResPointerInfo(pRes, pQueryInfo); } pRes->row = 0; @@ -1562,7 +1533,7 @@ int tscBuildMultiMeterMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) { // fill head info SMgmtHead *pMgmt = (SMgmtHead *)(pCmd->payload + tsRpcHeadSize); - memset(pMgmt->db, 0, TSDB_TABLE_ID_LEN); // server don't need the db + memset(pMgmt->db, 0, TSDB_TABLE_FNAME_LEN); // server don't need the db SCMMultiTableInfoMsg *pInfoMsg = (SCMMultiTableInfoMsg *)(pCmd->payload + tsRpcHeadSize + sizeof(SMgmtHead)); pInfoMsg->numOfTables = htonl((int32_t)pCmd->count); @@ -1603,7 +1574,7 @@ int tscBuildMultiMeterMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) { //// tagLen += strlen(pQueryInfo->tagCond.tbnameCond.cond) * TSDB_NCHAR_SIZE; //// } //// -//// int32_t joinCondLen = (TSDB_TABLE_ID_LEN + sizeof(int16_t)) * 2; +//// int32_t joinCondLen = (TSDB_TABLE_FNAME_LEN + sizeof(int16_t)) * 2; //// int32_t elemSize = sizeof(SSuperTableMetaElemMsg) * pQueryInfo->numOfTables; //// //// int32_t colSize = pQueryInfo->groupbyExpr.numOfGroupCols*sizeof(SColIndex); @@ -1884,11 +1855,10 @@ int tscProcessSTableVgroupRsp(SSqlObj *pSql) { for (int32_t k = 0; k < pVgroups->numOfEps; ++k) { pVgroups->epAddr[k].port = htons(pVgroups->epAddr[k].port); - } - - pMsg += size; } + + pMsg += size; } return pSql->res.code; @@ -1965,8 +1935,37 @@ int tscProcessShowRsp(SSqlObj *pSql) { return 0; } +static void createHBObj(STscObj* pObj) { + if (pObj->pHb != NULL) { + return; + } + + SSqlObj *pSql = (SSqlObj *)calloc(1, sizeof(SSqlObj)); + if (NULL == pSql) return; + + pSql->fp = tscProcessHeartBeatRsp; + + SQueryInfo *pQueryInfo = NULL; + tscGetQueryInfoDetailSafely(&pSql->cmd, 0, &pQueryInfo); + pQueryInfo->command = TSDB_SQL_HB; + + pSql->cmd.command = pQueryInfo->command; + if (TSDB_CODE_SUCCESS != tscAllocPayload(&(pSql->cmd), TSDB_DEFAULT_PAYLOAD_SIZE)) { + taosTFree(pSql); + return; + } + + pSql->param = pObj; + pSql->pTscObj = pObj; + pSql->signature = pSql; + pObj->pHb = pSql; + tscAddSubqueryInfo(&pObj->pHb->cmd); + + tscDebug("%p HB is allocated, pObj:%p", pObj->pHb, pObj); +} + int tscProcessConnectRsp(SSqlObj *pSql) { - char temp[TSDB_TABLE_ID_LEN * 2]; + char temp[TSDB_TABLE_FNAME_LEN * 2]; STscObj *pObj = pSql->pTscObj; SSqlRes *pRes = &pSql->res; @@ -1986,6 +1985,9 @@ int tscProcessConnectRsp(SSqlObj *pSql) { pObj->writeAuth = pConnect->writeAuth; pObj->superAuth = pConnect->superAuth; pObj->connId = htonl(pConnect->connId); + + createHBObj(pObj); + taosTmrReset(tscProcessActivityTimer, tsShellActivityTimer * 500, pObj, tscTmr, &pObj->pTimer); return 0; @@ -2114,21 +2116,6 @@ int tscProcessRetrieveRspFromNode(SSqlObj *pSql) { return 0; } -int tscProcessRetrieveRspFromLocal(SSqlObj *pSql) { - SSqlRes * pRes = &pSql->res; - SSqlCmd * pCmd = &pSql->cmd; - SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); - - SRetrieveTableRsp *pRetrieve = (SRetrieveTableRsp *)pRes->pRsp; - - pRes->numOfRows = htonl(pRetrieve->numOfRows); - pRes->data = pRetrieve->data; - - tscSetResultPointer(pQueryInfo, pRes); - pRes->row = 0; - return 0; -} - void tscTableMetaCallBack(void *param, TAOS_RES *res, int code); static int32_t getTableMetaFromMgmt(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) { diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 3e5280401f..29c8aa0a56 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -141,7 +141,7 @@ static void syncConnCallback(void *param, TAOS_RES *tres, int code) { SSqlObj *pSql = (SSqlObj *) tres; assert(pSql != NULL); - sem_post(&pSql->rspSem); + tsem_post(&pSql->rspSem); } TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port) { @@ -156,7 +156,7 @@ TAOS *taos_connect(const char *ip, const char *user, const char *pass, const cha pSql->param = pSql; tscProcessSql(pSql); - sem_wait(&pSql->rspSem); + tsem_wait(&pSql->rspSem); if (pSql->res.code != TSDB_CODE_SUCCESS) { terrno = pSql->res.code; @@ -181,6 +181,7 @@ TAOS *taos_connect(const char *ip, const char *user, const char *pass, const cha return NULL; } + TAOS *taos_connect_c(const char *ip, uint8_t ipLen, const char *user, uint8_t userLen, const char *pass, uint8_t passLen, const char *db, uint8_t dbLen, uint16_t port) { char ipBuf[TSDB_EP_LEN] = {0}; @@ -215,53 +216,29 @@ void taos_close(TAOS *taos) { } if (pObj->pHb != NULL) { + if (pObj->pHb->pRpcCtx != NULL) { // wait for rsp from dnode + rpcCancelRequest(pObj->pHb->pRpcCtx); + } + tscSetFreeHeatBeat(pObj); - } else { - tscCloseTscObj(pObj); + tscFreeSqlObj(pObj->pHb); } + + tscCloseTscObj(pObj); } void waitForQueryRsp(void *param, TAOS_RES *tres, int code) { assert(tres != NULL); SSqlObj *pSql = (SSqlObj *) tres; - sem_post(&pSql->rspSem); + tsem_post(&pSql->rspSem); } static void waitForRetrieveRsp(void *param, TAOS_RES *tres, int numOfRows) { SSqlObj* pSql = (SSqlObj*) tres; - sem_post(&pSql->rspSem); + tsem_post(&pSql->rspSem); } -TAOS_RES* taos_query(TAOS *taos, const char *sqlstr) { - STscObj *pObj = (STscObj *)taos; - if (pObj == NULL || pObj->signature != pObj) { - terrno = TSDB_CODE_TSC_DISCONNECTED; - return NULL; - } - - int32_t sqlLen = (int32_t)strlen(sqlstr); - if (sqlLen > tsMaxSQLStringLen) { - tscError("sql string exceeds max length:%d", tsMaxSQLStringLen); - terrno = TSDB_CODE_TSC_INVALID_SQL; - return NULL; - } - - taosNotePrintTsc(sqlstr); - - SSqlObj* pSql = calloc(1, sizeof(SSqlObj)); - if (pSql == NULL) { - tscError("failed to malloc sqlObj"); - terrno = TSDB_CODE_TSC_OUT_OF_MEMORY; - return NULL; - } - - doAsyncQuery(pObj, pSql, waitForQueryRsp, taos, sqlstr, sqlLen); - - // wait for the callback function to post the semaphore - tsem_wait(&pSql->rspSem); - return pSql; -} TAOS_RES* taos_query_c(TAOS *taos, const char *sqlstr, uint32_t sqlLen) { STscObj *pObj = (STscObj *)taos; if (pObj == NULL || pObj->signature != pObj) { @@ -274,7 +251,9 @@ TAOS_RES* taos_query_c(TAOS *taos, const char *sqlstr, uint32_t sqlLen) { terrno = TSDB_CODE_TSC_INVALID_SQL; return NULL; } - + + taosNotePrintTsc(sqlstr); + SSqlObj* pSql = calloc(1, sizeof(SSqlObj)); if (pSql == NULL) { tscError("failed to malloc sqlObj"); @@ -282,11 +261,17 @@ TAOS_RES* taos_query_c(TAOS *taos, const char *sqlstr, uint32_t sqlLen) { return NULL; } + tsem_init(&pSql->rspSem, 0, 0); doAsyncQuery(pObj, pSql, waitForQueryRsp, taos, sqlstr, sqlLen); tsem_wait(&pSql->rspSem); return pSql; } + +TAOS_RES* taos_query(TAOS *taos, const char *sqlstr) { + return taos_query_c(taos, sqlstr, (uint32_t)strlen(sqlstr)); +} + int taos_result_precision(TAOS_RES *res) { SSqlObj *pSql = (SSqlObj *)res; if (pSql == NULL || pSql->signature != pSql) return 0; @@ -422,7 +407,10 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) { pCmd->command == TSDB_SQL_INSERT) { return NULL; } - + + // set the sql object owner + tscSetSqlOwner(pSql); + // current data set are exhausted, fetch more data from node if (pRes->row >= pRes->numOfRows && (pRes->completed != true || hasMoreVnodesToTry(pSql) || hasMoreClauseToTry(pSql)) && (pCmd->command == TSDB_SQL_RETRIEVE || @@ -438,10 +426,13 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) { pCmd->command == TSDB_SQL_CLI_VERSION || pCmd->command == TSDB_SQL_CURRENT_USER )) { taos_fetch_rows_a(res, waitForRetrieveRsp, pSql->pTscObj); - sem_wait(&pSql->rspSem); + tsem_wait(&pSql->rspSem); } - return doSetResultRowData(pSql, true); + void* data = doSetResultRowData(pSql, true); + + tscClearSqlOwner(pSql); + return data; } int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows) { @@ -509,7 +500,7 @@ int taos_select_db(TAOS *taos, const char *db) { } // send free message to vnode to free qhandle and corresponding resources in vnode -static bool tscFreeQhandleInVnode(SSqlObj* pSql) { +static bool tscKillQueryInVnode(SSqlObj* pSql) { SSqlCmd* pCmd = &pSql->cmd; SSqlRes* pRes = &pSql->res; @@ -557,16 +548,14 @@ void taos_free_result(TAOS_RES *res) { } pQueryInfo->type = TSDB_QUERY_TYPE_FREE_RESOURCE; - if (!tscFreeQhandleInVnode(pSql)) { + if (!tscKillQueryInVnode(pSql)) { tscFreeSqlObj(pSql); tscDebug("%p sqlObj is freed by app", pSql); } } -// todo should not be used in async query int taos_errno(TAOS_RES *tres) { SSqlObj *pSql = (SSqlObj *) tres; - if (pSql == NULL || pSql->signature != pSql) { return terrno; } @@ -728,7 +717,7 @@ static void asyncCallback(void *param, TAOS_RES *tres, int code) { assert(param != NULL); SSqlObj *pSql = ((SSqlObj *)param); pSql->res.code = code; - sem_post(&pSql->rspSem); + tsem_post(&pSql->rspSem); } int taos_validate_sql(TAOS *taos, const char *sql) { @@ -779,7 +768,7 @@ int taos_validate_sql(TAOS *taos, const char *sql) { pSql->param = pSql; int code = tsParseSql(pSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { - sem_wait(&pSql->rspSem); + tsem_wait(&pSql->rspSem); code = pSql->res.code; } if (code != TSDB_CODE_SUCCESS) { @@ -812,7 +801,7 @@ static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t t } char *nextStr; - char tblName[TSDB_TABLE_ID_LEN]; + char tblName[TSDB_TABLE_FNAME_LEN]; int payloadLen = 0; char *pMsg = pCmd->payload; while (1) { diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c index d0591de011..9dd47888d2 100644 --- a/src/client/src/tscStream.c +++ b/src/client/src/tscStream.c @@ -135,6 +135,14 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) { etime = pStream->stime + (etime - pStream->stime) / pStream->interval * pStream->interval; } pQueryInfo->window.ekey = etime; + if (pQueryInfo->window.skey >= pQueryInfo->window.ekey) { + int64_t timer = pStream->slidingTime; + if (pStream->precision == TSDB_TIME_PRECISION_MICRO) { + timer /= 1000l; + } + tscSetRetryTimer(pStream, pSql, timer); + return; + } } // launch stream computing in a new thread diff --git a/src/client/src/tscSub.c b/src/client/src/tscSub.c index e9f2c1dc1d..76bce19668 100644 --- a/src/client/src/tscSub.c +++ b/src/client/src/tscSub.c @@ -33,7 +33,7 @@ typedef struct SSubscriptionProgress { typedef struct SSub { void * signature; char topic[32]; - sem_t sem; + tsem_t sem; int64_t lastSyncTime; int64_t lastConsumeTime; TAOS * taos; @@ -85,7 +85,7 @@ static void asyncCallback(void *param, TAOS_RES *tres, int code) { assert(param != NULL); SSub *pSub = ((SSub *)param); pSub->pSql->res.code = code; - sem_post(&pSub->sem); + tsem_post(&pSub->sem); } @@ -154,7 +154,7 @@ static SSub* tscCreateSubscription(STscObj* pObj, const char* topic, const char* code = tsParseSql(pSql, false); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { - sem_wait(&pSub->sem); + tsem_wait(&pSub->sem); code = pSql->res.code; } if (code != TSDB_CODE_SUCCESS) { @@ -339,7 +339,7 @@ static int tscLoadSubscriptionProgress(SSub* pSub) { fclose(fp); taosArraySort(progress, tscCompareSubscriptionProgress); - tscDebug("subscription progress loaded, %zu tables: %s", taosArrayGetSize(progress), pSub->topic); + tscDebug("subscription progress loaded, %" PRIzu " tables: %s", taosArrayGetSize(progress), pSub->topic); return 1; } @@ -405,16 +405,20 @@ TAOS_SUB *taos_subscribe(TAOS *taos, int restart, const char* topic, const char return pSub; } -void taos_free_result_imp(SSqlObj* pSql, int keepCmd); - TAOS_RES *taos_consume(TAOS_SUB *tsub) { SSub *pSub = (SSub *)tsub; if (pSub == NULL) return NULL; tscSaveSubscriptionProgress(pSub); - SSqlObj* pSql = pSub->pSql; + SSqlObj *pSql = pSub->pSql; SSqlRes *pRes = &pSql->res; + SSqlCmd *pCmd = &pSql->cmd; + STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0); + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + if (taosArrayGetSize(pSub->progress) > 0) { // fix crash in single tabel subscription + pQueryInfo->window.skey = ((SSubscriptionProgress*)taosArrayGet(pSub->progress, 0))->key; + } if (pSub->pTimer == NULL) { int64_t duration = taosGetTimestampMs() - pSub->lastConsumeTime; @@ -436,8 +440,6 @@ TAOS_RES *taos_consume(TAOS_SUB *tsub) { tscDebug("table synchronization completed"); } - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); - uint32_t type = pQueryInfo->type; tscFreeSqlResult(pSql); pRes->numOfRows = 1; @@ -445,13 +447,13 @@ TAOS_RES *taos_consume(TAOS_SUB *tsub) { pSql->cmd.command = TSDB_SQL_SELECT; pQueryInfo->type = type; - tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0)->vgroupIndex = 0; + pTableMetaInfo->vgroupIndex = 0; pSql->fp = asyncCallback; pSql->fetchFp = asyncCallback; pSql->param = pSub; tscDoQuery(pSql); - sem_wait(&pSub->sem); + tsem_wait(&pSub->sem); if (pRes->code != TSDB_CODE_SUCCESS) { continue; diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 8a596d8893..75644c355c 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -362,7 +362,7 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) { } size_t numOfCols = taosArrayGetSize(pNewQueryInfo->colList); - tscDebug("%p subquery:%p tableIndex:%d, vgroupIndex:%d, type:%d, exprInfo:%zu, colList:%zu, fieldsInfo:%d, name:%s", + tscDebug("%p subquery:%p tableIndex:%d, vgroupIndex:%d, type:%d, exprInfo:%" PRIzu ", colList:%" PRIzu ", fieldsInfo:%d, name:%s", pSql, pNew, 0, pTableMetaInfo->vgroupIndex, pNewQueryInfo->type, taosArrayGetSize(pNewQueryInfo->exprList), numOfCols, pNewQueryInfo->fieldsInfo.numOfOutput, pTableMetaInfo->name); } @@ -522,7 +522,7 @@ static void issueTSCompQuery(SSqlObj* pSql, SJoinSupporter* pSupporter, SSqlObj* tscDebug( "%p subquery:%p tableIndex:%d, vgroupIndex:%d, numOfVgroups:%d, type:%d, ts_comp query to retrieve timestamps, " - "numOfExpr:%zu, colList:%zu, numOfOutputFields:%d, name:%s", + "numOfExpr:%" PRIzu ", colList:%" PRIzu ", numOfOutputFields:%d, name:%s", pParent, pSql, 0, pTableMetaInfo->vgroupIndex, pTableMetaInfo->vgroupList->numOfVgroups, pQueryInfo->type, tscSqlExprNumOfExprs(pQueryInfo), numOfCols, pQueryInfo->fieldsInfo.numOfOutput, pTableMetaInfo->name); @@ -1026,9 +1026,11 @@ void tscSetupOutputColumnIndex(SSqlObj* pSql) { } SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); - pRes->pColumnIndex = calloc(1, sizeof(SColumnIndex) * pQueryInfo->fieldsInfo.numOfOutput); - for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { + int32_t numOfExprs = (int32_t)tscSqlExprNumOfExprs(pQueryInfo); + pRes->pColumnIndex = calloc(1, sizeof(SColumnIndex) * numOfExprs); + + for (int32_t i = 0; i < numOfExprs; ++i) { SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); int32_t tableIndexOfSub = -1; @@ -1045,8 +1047,8 @@ void tscSetupOutputColumnIndex(SSqlObj* pSql) { SSqlCmd* pSubCmd = &pSql->pSubs[tableIndexOfSub]->cmd; SQueryInfo* pSubQueryInfo = tscGetQueryInfoDetail(pSubCmd, 0); - size_t numOfExprs = taosArrayGetSize(pSubQueryInfo->exprList); - for (int32_t k = 0; k < numOfExprs; ++k) { + size_t numOfSubExpr = taosArrayGetSize(pSubQueryInfo->exprList); + for (int32_t k = 0; k < numOfSubExpr; ++k) { SSqlExpr* pSubExpr = tscSqlExprGet(pSubQueryInfo, k); if (pExpr->functionId == pSubExpr->functionId && pExpr->colInfo.colId == pSubExpr->colInfo.colId) { pRes->pColumnIndex[i] = (SColumnIndex){.tableIndex = tableIndexOfSub, .columnIndex = k}; @@ -1054,6 +1056,10 @@ void tscSetupOutputColumnIndex(SSqlObj* pSql) { } } } + + // restore the offset value for super table query in case of final result. + tscRestoreSQLFuncForSTableQuery(pQueryInfo); + tscFieldInfoUpdateOffset(pQueryInfo); } void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) { @@ -1079,7 +1085,7 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) { if (taos_errno(pSql) != TSDB_CODE_SUCCESS) { assert(taos_errno(pSql) == code); - tscError("%p abort query, code:%d, global code:%d", pSql, code, pParentSql->res.code); + tscError("%p abort query, code:%s, global code:%s", pSql, tstrerror(code), tstrerror(pParentSql->res.code)); pParentSql->res.code = code; quitAllSubquery(pParentSql, pSupporter); @@ -1225,7 +1231,7 @@ int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter tscDebug( "%p subquery:%p tableIndex:%d, vgroupIndex:%d, type:%d, transfer to tid_tag query to retrieve (tableId, tags), " - "exprInfo:%zu, colList:%zu, fieldsInfo:%d, tagIndex:%d, name:%s", + "exprInfo:%" PRIzu ", colList:%" PRIzu ", fieldsInfo:%d, tagIndex:%d, name:%s", pSql, pNew, tableIndex, pTableMetaInfo->vgroupIndex, pNewQueryInfo->type, tscSqlExprNumOfExprs(pNewQueryInfo), numOfCols, pNewQueryInfo->fieldsInfo.numOfOutput, index.columnIndex, pNewQueryInfo->pTableMetaInfo[0]->name); } else { @@ -1260,7 +1266,7 @@ int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter tscDebug( "%p subquery:%p tableIndex:%d, vgroupIndex:%d, type:%u, transfer to ts_comp query to retrieve timestamps, " - "exprInfo:%zu, colList:%zu, fieldsInfo:%d, name:%s", + "exprInfo:%" PRIzu ", colList:%" PRIzu ", fieldsInfo:%d, name:%s", pSql, pNew, tableIndex, pTableMetaInfo->vgroupIndex, pNewQueryInfo->type, tscSqlExprNumOfExprs(pNewQueryInfo), numOfCols, pNewQueryInfo->fieldsInfo.numOfOutput, pNewQueryInfo->pTableMetaInfo[0]->name); } @@ -1915,7 +1921,7 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) { pSql->pSubs = calloc(size, POINTER_BYTES); pSql->numOfSubs = (uint16_t)size; - tscDebug("%p submit data to %zu vnode(s)", pSql, size); + tscDebug("%p submit data to %" PRIzu " vnode(s)", pSql, size); SSubqueryState *pState = calloc(1, sizeof(SSubqueryState)); pState->numOfTotal = pSql->numOfSubs; @@ -1949,7 +1955,7 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) { tscDebug("%p sub:%p create subObj success. orderOfSub:%d", pSql, pNew, numOfSub); numOfSub++; } else { - tscDebug("%p prepare submit data block failed in async insertion, vnodeIdx:%d, total:%zu, code:%s", pSql, numOfSub, + tscDebug("%p prepare submit data block failed in async insertion, vnodeIdx:%d, total:%" PRIzu ", code:%s", pSql, numOfSub, size, tstrerror(pRes->code)); goto _error; } @@ -2052,12 +2058,10 @@ void tscBuildResFromSubqueries(SSqlObj *pSql) { } while (1) { - if (pRes->row < pRes->numOfRows) { - assert(0); - } + assert (pRes->row >= pRes->numOfRows); doBuildResFromSubqueries(pSql); - sem_post(&pSql->rspSem); + tsem_post(&pSql->rspSem); return; @@ -2083,7 +2087,7 @@ void tscBuildResFromSubqueries(SSqlObj *pSql) { // free(pState); // // pRes->completed = true; // set query completed -// sem_post(&pSql->rspSem); +// tsem_post(&pSql->rspSem); // return; // } diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index c742db42ab..211e673754 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -158,6 +158,7 @@ void taos_init() { pthread_once(&tscinit, taos_init_imp); } void taos_cleanup() { if (tscCacheHandle != NULL) { taosCacheCleanup(tscCacheHandle); + tscCacheHandle = NULL; } if (tscQhandle != NULL) { diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 1b6d18be0c..64a871ff74 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -387,7 +387,7 @@ void tscFreeSqlObj(SSqlObj* pSql) { pCmd->allocSize = 0; taosTFree(pSql->sqlstr); - sem_destroy(&pSql->rspSem); + tsem_destroy(&pSql->rspSem); free(pSql); } @@ -644,7 +644,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SArray* pTableDataBlockList) { STableDataBlocks* pOneTableBlock = taosArrayGetP(pTableDataBlockList, 0); int32_t expandSize = getRowExpandSize(pOneTableBlock->pTableMeta); - void* pVnodeDataBlockHashList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false); + void* pVnodeDataBlockHashList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false); SArray* pVnodeDataBlockList = taosArrayInit(8, POINTER_BYTES); size_t total = taosArrayGetSize(pTableDataBlockList); @@ -858,12 +858,13 @@ void tscFieldInfoCopy(SFieldInfo* dst, const SFieldInfo* src) { } TAOS_FIELD* tscFieldInfoGetField(SFieldInfo* pFieldInfo, int32_t index) { + assert(index < pFieldInfo->numOfOutput); return TARRAY_GET_ELEM(pFieldInfo->pFields, index); } int16_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index) { SFieldSupInfo* pInfo = tscFieldInfoGetSupp(&pQueryInfo->fieldsInfo, index); - assert(pInfo != NULL); + assert(pInfo != NULL && pInfo->pSqlExpr != NULL); return pInfo->pSqlExpr->offset; } @@ -1393,7 +1394,7 @@ void tscSetFreeHeatBeat(STscObj* pObj) { pQueryInfo->type = TSDB_QUERY_TYPE_FREE_RESOURCE; } -bool tscShouldFreeHeatBeat(SSqlObj* pHb) { +bool tscShouldFreeHeartBeat(SSqlObj* pHb) { assert(pHb == pHb->signature); SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pHb->cmd, 0); @@ -1680,6 +1681,77 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, void (*fp)(), void* param, int32_t cm return pNew; } +// current sql function is not direct output result, so create a dummy output field +static void doSetNewFieldInfo(SQueryInfo* pNewQueryInfo, SSqlExpr* pExpr) { + TAOS_FIELD f = {.type = (uint8_t)pExpr->resType, .bytes = pExpr->resBytes}; + tstrncpy(f.name, pExpr->aliasName, sizeof(f.name)); + + SFieldSupInfo* pInfo1 = tscFieldInfoAppend(&pNewQueryInfo->fieldsInfo, &f); + + pInfo1->pSqlExpr = pExpr; + pInfo1->visible = false; +} + +static void doSetSqlExprAndResultFieldInfo(SQueryInfo* pQueryInfo, SQueryInfo* pNewQueryInfo, int64_t uid) { + int32_t numOfOutput = (int32_t)tscSqlExprNumOfExprs(pNewQueryInfo); + if (numOfOutput == 0) { + return; + } + + size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo); + SFieldInfo* pFieldInfo = &pQueryInfo->fieldsInfo; + + // set the field info in pNewQueryInfo object + for (int32_t i = 0; i < numOfExprs; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); + + if (pExpr->uid == uid) { + if (i < pFieldInfo->numOfOutput) { + SFieldSupInfo* pInfo = tscFieldInfoGetSupp(pFieldInfo, i); + + if (pInfo->pSqlExpr != NULL) { + TAOS_FIELD* p = tscFieldInfoGetField(pFieldInfo, i); + assert(strcmp(p->name, pExpr->aliasName) == 0); + + SFieldSupInfo* pInfo1 = tscFieldInfoAppend(&pNewQueryInfo->fieldsInfo, p); + *pInfo1 = *pInfo; + } else { + assert(pInfo->pArithExprInfo != NULL); + doSetNewFieldInfo(pNewQueryInfo, pExpr); + } + } else { // it is a arithmetic column, does not have actual field for sqlExpr, so build it + doSetNewFieldInfo(pNewQueryInfo, pExpr); + } + } + } + + // make sure the the sqlExpr for each fields is correct + numOfExprs = tscSqlExprNumOfExprs(pNewQueryInfo); + + // update the pSqlExpr pointer in SFieldSupInfo according the field name + // make sure the pSqlExpr point to the correct SqlExpr in pNewQueryInfo, not SqlExpr in pQueryInfo + for (int32_t f = 0; f < pNewQueryInfo->fieldsInfo.numOfOutput; ++f) { + TAOS_FIELD* field = tscFieldInfoGetField(&pNewQueryInfo->fieldsInfo, f); + + bool matched = false; + for (int32_t k1 = 0; k1 < numOfExprs; ++k1) { + SSqlExpr* pExpr1 = tscSqlExprGet(pNewQueryInfo, k1); + + if (strcmp(field->name, pExpr1->aliasName) == 0) { // establish link according to the result field name + SFieldSupInfo* pInfo = tscFieldInfoGetSupp(&pNewQueryInfo->fieldsInfo, f); + pInfo->pSqlExpr = pExpr1; + + matched = true; + break; + } + } + + assert(matched); + } + + tscFieldInfoUpdateOffset(pNewQueryInfo); +} + SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void* param, int32_t cmd, SSqlObj* pPrevSql) { SSqlCmd* pCmd = &pSql->cmd; SSqlObj* pNew = (SSqlObj*)calloc(1, sizeof(SSqlObj)); @@ -1773,49 +1845,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void uint64_t uid = pTableMetaInfo->pTableMeta->id.uid; tscSqlExprCopy(pNewQueryInfo->exprList, pQueryInfo->exprList, uid, true); - int32_t numOfOutput = (int32_t)tscSqlExprNumOfExprs(pNewQueryInfo); - - if (numOfOutput > 0) { // todo refactor to extract method - size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo); - SFieldInfo* pFieldInfo = &pQueryInfo->fieldsInfo; - - for (int32_t i = 0; i < numOfExprs; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); - - if (pExpr->uid == uid) { - TAOS_FIELD* p = tscFieldInfoGetField(pFieldInfo, i); - SFieldSupInfo* pInfo = tscFieldInfoGetSupp(pFieldInfo, i); - - SFieldSupInfo* pInfo1 = tscFieldInfoAppend(&pNewQueryInfo->fieldsInfo, p); - *pInfo1 = *pInfo; - } - } - - // make sure the the sqlExpr for each fields is correct - // todo handle the agg arithmetic expression - numOfExprs = tscSqlExprNumOfExprs(pNewQueryInfo); - - for(int32_t f = 0; f < pNewQueryInfo->fieldsInfo.numOfOutput; ++f) { - TAOS_FIELD* field = tscFieldInfoGetField(&pNewQueryInfo->fieldsInfo, f); - bool matched = false; - - for(int32_t k1 = 0; k1 < numOfExprs; ++k1) { - SSqlExpr* pExpr1 = tscSqlExprGet(pNewQueryInfo, k1); - - if (strcmp(field->name, pExpr1->aliasName) == 0) { // establish link according to the result field name - SFieldSupInfo* pInfo = tscFieldInfoGetSupp(&pNewQueryInfo->fieldsInfo, f); - pInfo->pSqlExpr = pExpr1; - - matched = true; - break; - } - } - - assert(matched); - } - - tscFieldInfoUpdateOffset(pNewQueryInfo); - } + doSetSqlExprAndResultFieldInfo(pQueryInfo, pNewQueryInfo, uid); pNew->fp = fp; pNew->fetchFp = fp; @@ -1864,7 +1894,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void size_t size = taosArrayGetSize(pNewQueryInfo->colList); tscDebug( - "%p new subquery:%p, tableIndex:%d, vgroupIndex:%d, type:%d, exprInfo:%zu, colList:%zu," + "%p new subquery:%p, tableIndex:%d, vgroupIndex:%d, type:%d, exprInfo:%" PRIzu ", colList:%" PRIzu "," "fieldInfo:%d, name:%s, qrang:%" PRId64 " - %" PRId64 " order:%d, limit:%" PRId64, pSql, pNew, tableIndex, pTableMetaInfo->vgroupIndex, pNewQueryInfo->type, tscSqlExprNumOfExprs(pNewQueryInfo), size, pNewQueryInfo->fieldsInfo.numOfOutput, pFinalInfo->name, pNewQueryInfo->window.skey, @@ -2013,6 +2043,10 @@ bool hasMoreVnodesToTry(SSqlObj* pSql) { } int32_t numOfVgroups = pTableMetaInfo->vgroupList->numOfVgroups; + if (pTableMetaInfo->pVgroupTables != NULL) { + numOfVgroups = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables); + } + return tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0) && (!tscHasReachLimitation(pQueryInfo, pRes)) && (pTableMetaInfo->vgroupIndex < numOfVgroups - 1); } @@ -2107,43 +2141,6 @@ void tscTryQueryNextClause(SSqlObj* pSql, __async_cb_func_t fp) { } } -//void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pFieldInfo, int32_t columnIndex) { -// SFieldSupInfo* pInfo = TARRAY_GET_ELEM(pFieldInfo->pSupportInfo, columnIndex); -// assert(pInfo->pSqlExpr != NULL); -// -// int32_t type = pInfo->pSqlExpr->resType; -// int32_t bytes = pInfo->pSqlExpr->resBytes; -// -// char* pData = pRes->data + pInfo->pSqlExpr->offset * pRes->numOfRows + bytes * pRes->row; -// -// if (type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BINARY) { -// int32_t realLen = varDataLen(pData); -// assert(realLen <= bytes - VARSTR_HEADER_SIZE); -// -// if (isNull(pData, type)) { -// pRes->tsrow[columnIndex] = NULL; -// } else { -// pRes->tsrow[columnIndex] = ((tstr*)pData)->data; -// } -// -// if (realLen < pInfo->pSqlExpr->resBytes - VARSTR_HEADER_SIZE) { // todo refactor -// *(pData + realLen + VARSTR_HEADER_SIZE) = 0; -// } -// -// pRes->length[columnIndex] = realLen; -// } else { -// assert(bytes == tDataTypeDesc[type].nSize); -// -// if (isNull(pData, type)) { -// pRes->tsrow[columnIndex] = NULL; -// } else { -// pRes->tsrow[columnIndex] = pData; -// } -// -// pRes->length[columnIndex] = bytes; -// } -//} - void* malloc_throw(size_t size) { void* p = malloc(size); if (p == NULL) { @@ -2200,3 +2197,21 @@ int tscSetMgmtEpSetFromCfg(const char *first, const char *second) { return 0; } + +bool tscSetSqlOwner(SSqlObj* pSql) { + SSqlRes* pRes = &pSql->res; + + // set the sql object owner + uint64_t threadId = taosGetPthreadId(); + if (atomic_val_compare_exchange_64(&pSql->owner, 0, threadId) != 0) { + pRes->code = TSDB_CODE_QRY_IN_EXEC; + return false; + } + + return true; +} + +void tscClearSqlOwner(SSqlObj* pSql) { + assert(taosCheckPthreadValid(pSql->owner)); + atomic_store_64(&pSql->owner, 0); +} \ No newline at end of file diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c index 0be0216174..e7f40442a0 100644 --- a/src/common/src/tdataformat.c +++ b/src/common/src/tdataformat.c @@ -13,6 +13,7 @@ * along with this program. If not, see . */ #include "tdataformat.h" +#include "tulog.h" #include "talgo.h" #include "tcoding.h" #include "wchar.h" @@ -311,10 +312,14 @@ void dataColSetOffset(SDataCol *pCol, int nEle) { SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows) { SDataCols *pCols = (SDataCols *)calloc(1, sizeof(SDataCols)); - if (pCols == NULL) return NULL; + if (pCols == NULL) { + uDebug("malloc failure, size:%"PRId64" failed, reason:%s", sizeof(SDataCols), strerror(errno)); + return NULL; + } pCols->cols = (SDataCol *)calloc(maxCols, sizeof(SDataCol)); if (pCols->cols == NULL) { + uDebug("malloc failure, size:%"PRId64" failed, reason:%s", sizeof(SDataCol) * maxCols, strerror(errno)); tdFreeDataCols(pCols); return NULL; } @@ -326,6 +331,7 @@ SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows) { pCols->buf = malloc(pCols->bufSize); if (pCols->buf == NULL) { + uDebug("malloc failure, size:%"PRId64" failed, reason:%s", sizeof(SDataCol) * maxCols, strerror(errno)); tdFreeDataCols(pCols); return NULL; } diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index 0d7df38b83..391e5f5935 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -54,7 +54,7 @@ int8_t tsDaylight = 0; char tsTimezone[TSDB_TIMEZONE_LEN] = {0}; char tsLocale[TSDB_LOCALE_LEN] = {0}; char tsCharset[TSDB_LOCALE_LEN] = {0}; // default encode string -int32_t tsEnableCoreFile = 0; +int32_t tsEnableCoreFile = 1; int32_t tsMaxBinaryDisplayWidth = 30; /* @@ -1315,6 +1315,7 @@ bool taosCheckGlobalCfg() { tsDnodeShellPort = tsServerPort + TSDB_PORT_DNODESHELL; // udp[6035-6039] tcp[6035] tsDnodeDnodePort = tsServerPort + TSDB_PORT_DNODEDNODE; // udp/tcp tsSyncPort = tsServerPort + TSDB_PORT_SYNC; + tsHttpPort = tsServerPort + TSDB_PORT_HTTP; return true; } diff --git a/src/common/src/ttimezone.c b/src/common/src/ttimezone.c index edb6aea7f9..62b1e0bb5c 100644 --- a/src/common/src/ttimezone.c +++ b/src/common/src/ttimezone.c @@ -50,7 +50,7 @@ void tsSetTimeZone() { #endif #endif - int32_t tz = (-timezone * MILLISECOND_PER_SECOND) / MILLISECOND_PER_HOUR; + int32_t tz = (int32_t)((-timezone * MILLISECOND_PER_SECOND) / MILLISECOND_PER_HOUR); tz += daylight; /* diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java index 062cb63cfd..2ce39b7ee4 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java @@ -84,12 +84,17 @@ public class TSDBConnection implements Connection { } } - public TSDBSubscribe createSubscribe() throws SQLException { - if (!this.connector.isClosed()) { - return new TSDBSubscribe(this.connector); - } else { + public TSDBSubscribe subscribe(String topic, String sql, boolean restart) throws SQLException { + if (this.connector.isClosed()) { throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); } + + long id = this.connector.subscribe(topic, sql, restart, 0); + if (id == 0) { + throw new SQLException(TSDBConstants.WrapErrMsg("failed to create subscription")); + } + + return new TSDBSubscribe(this.connector, id); } public PreparedStatement prepareStatement(String sql) throws SQLException { diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java index 13fa2eda81..bab3c79089 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java @@ -254,29 +254,29 @@ public class TSDBJNIConnector { private native int closeConnectionImp(long connection); /** - * Subscribe to a table in TSDB + * Create a subscription */ - public long subscribe(String topic, String sql, boolean restart, int period) { + long subscribe(String topic, String sql, boolean restart, int period) { return subscribeImp(this.taos, restart, topic, sql, period); } - public native long subscribeImp(long connection, boolean restart, String topic, String sql, int period); + private native long subscribeImp(long connection, boolean restart, String topic, String sql, int period); /** - * Consume a subscribed table + * Consume a subscription */ - public long consume(long subscription) { - return this.consumeImp(subscription); + long consume(long subscription) { + return this.consumeImp(subscription); } private native long consumeImp(long subscription); /** - * Unsubscribe a table + * Unsubscribe, close a subscription * * @param subscription */ - public void unsubscribe(long subscription, boolean isKeep) { + void unsubscribe(long subscription, boolean isKeep) { unsubscribeImp(subscription, isKeep); } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java index 1cf024f30e..777eef53d1 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java @@ -158,9 +158,9 @@ public class TSDBStatement implements Statement { throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); } else if (resultSetPointer == TSDBConstants.JNI_NULL_POINTER) { // no result set is retrieved + this.connecter.freeResultSet(pSql); res = false; } - this.connecter.freeResultSet(pSql); return res; } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBSubscribe.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBSubscribe.java index e20c6a815c..deffd9aa2a 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBSubscribe.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBSubscribe.java @@ -22,81 +22,28 @@ import java.util.concurrent.*; public class TSDBSubscribe { private TSDBJNIConnector connecter = null; - private static ScheduledExecutorService pool; - private static Map timerTaskMap = new ConcurrentHashMap<>(); - private static Map scheduledMap = new ConcurrentHashMap(); + private long id = 0; - private static class TimerInstance { - private static final ScheduledExecutorService instance = Executors.newScheduledThreadPool(1); - } - - public static ScheduledExecutorService getTimerInstance() { - return TimerInstance.instance; - } - - public TSDBSubscribe(TSDBJNIConnector connecter) throws SQLException { + TSDBSubscribe(TSDBJNIConnector connecter, long id) throws SQLException { if (null != connecter) { this.connecter = connecter; + this.id = id; } else { throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); } } /** - * sync subscribe + * consume * - * @param topic - * @param sql - * @param restart - * @param period - * @throws SQLException + * @throws OperationsException, SQLException */ - public long subscribe(String topic, String sql, boolean restart, int period) throws SQLException { + public TSDBResultSet consume() throws OperationsException, SQLException { if (this.connecter.isClosed()) { throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); } - if (period < 1000) { - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.INVALID_VARIABLES)); - } - return this.connecter.subscribe(topic, sql, restart, period); - } - /** - * async subscribe - * - * @param topic - * @param sql - * @param restart - * @param period - * @param callBack - * @throws SQLException - */ - public long subscribe(String topic, String sql, boolean restart, int period, TSDBSubscribeCallBack callBack) throws SQLException { - if (this.connecter.isClosed()) { - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); - } - final long subscription = this.connecter.subscribe(topic, sql, restart, period); - if (null != callBack) { - pool = getTimerInstance(); - - TSDBTimerTask timerTask = new TSDBTimerTask(subscription, callBack); - - timerTaskMap.put(subscription, timerTask); - - ScheduledFuture scheduledFuture = pool.scheduleAtFixedRate(timerTask, 1, 1000, TimeUnit.MILLISECONDS); - scheduledMap.put(subscription, scheduledFuture); - } - return subscription; - } - - public TSDBResultSet consume(long subscription) throws OperationsException, SQLException { - if (this.connecter.isClosed()) { - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); - } - if (0 == subscription) { - throw new OperationsException("Invalid use of consume"); - } - long resultSetPointer = this.connecter.consume(subscription); + long resultSetPointer = this.connecter.consume(this.id); if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) { throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); @@ -108,77 +55,16 @@ public class TSDBSubscribe { } /** - * cancel subscribe + * close subscription * - * @param subscription - * @param isKeep + * @param keepProgress * @throws SQLException */ - public void unsubscribe(long subscription, boolean isKeep) throws SQLException { + public void close(boolean keepProgress) throws SQLException { if (this.connecter.isClosed()) { throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); } - - if (null != timerTaskMap.get(subscription)) { - synchronized (timerTaskMap.get(subscription)) { - while (1 == timerTaskMap.get(subscription).getState()) { - try { - Thread.sleep(10); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - timerTaskMap.get(subscription).setState(2); - if (!timerTaskMap.isEmpty() && timerTaskMap.containsKey(subscription)) { - timerTaskMap.get(subscription).cancel(); - timerTaskMap.remove(subscription); - scheduledMap.get(subscription).cancel(false); - scheduledMap.remove(subscription); - } - this.connecter.unsubscribe(subscription, isKeep); - } - } else { - this.connecter.unsubscribe(subscription, isKeep); - } - } - - class TSDBTimerTask extends TimerTask { - private long subscription; - private TSDBSubscribeCallBack callBack; - // 0: not running 1: running 2: cancel - private int state = 0; - - public TSDBTimerTask(long subscription, TSDBSubscribeCallBack callBack) { - this.subscription = subscription; - this.callBack = callBack; - } - - public int getState() { - return this.state; - } - - public void setState(int state) { - this.state = state; - } - - @Override - public void run() { - synchronized (this) { - if (2 == state) { - return; - } - - state = 1; - - try { - callBack.invoke(consume(subscription)); - } catch (Exception e) { - this.cancel(); - throw new RuntimeException(e); - } - state = 0; - } - } + this.connecter.unsubscribe(this.id, keepProgress); } } diff --git a/src/connector/jdbc/src/test/java/TestAsyncTSDBSubscribe.java b/src/connector/jdbc/src/test/java/TestAsyncTSDBSubscribe.java deleted file mode 100644 index 03a4761b91..0000000000 --- a/src/connector/jdbc/src/test/java/TestAsyncTSDBSubscribe.java +++ /dev/null @@ -1,90 +0,0 @@ -import com.taosdata.jdbc.*; -import org.apache.commons.lang3.StringUtils; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.SQLException; -import java.util.Properties; - -public class TestAsyncTSDBSubscribe { - public static void main(String[] args) throws SQLException { - String usage = "java -cp taos-jdbcdriver-2.0.0_dev-dist.jar com.taosdata.jdbc.TSDBSubscribe -db dbName -topic topicName " + - "-tname tableName -h host"; - if (args.length < 2) { - System.err.println(usage); - return; - } - - String dbName = ""; - String tName = ""; - String host = "localhost"; - String topic = ""; - for (int i = 0; i < args.length; i++) { - if ("-db".equalsIgnoreCase(args[i]) && i < args.length - 1) { - dbName = args[++i]; - } - if ("-tname".equalsIgnoreCase(args[i]) && i < args.length - 1) { - tName = args[++i]; - } - if ("-h".equalsIgnoreCase(args[i]) && i < args.length - 1) { - host = args[++i]; - } - if ("-topic".equalsIgnoreCase(args[i]) && i < args.length - 1) { - topic = args[++i]; - } - } - if (StringUtils.isEmpty(dbName) || StringUtils.isEmpty(tName) || StringUtils.isEmpty(topic)) { - System.err.println(usage); - return; - } - - Connection connection = null; - long subscribId = 0; - try { - Class.forName("com.taosdata.jdbc.TSDBDriver"); - Properties properties = new Properties(); - properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host); - properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); - connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/" + dbName + "?user=root&password=taosdata", properties); - String rawSql = "select * from " + tName + ";"; - TSDBSubscribe subscribe = ((TSDBConnection) connection).createSubscribe(); - subscribId = subscribe.subscribe(topic, rawSql, false, 1000, new CallBack("first")); - long subscribId2 = subscribe.subscribe("test", rawSql, false, 1000, new CallBack("second")); - int a = 0; - Thread.sleep(2000); - subscribe.unsubscribe(subscribId, true); - System.err.println("cancel subscribe"); - } catch (Exception e) { - e.printStackTrace(); - if (null != connection && !connection.isClosed()) { - connection.close(); - } - } - } - - private static class CallBack implements TSDBSubscribeCallBack { - private String name = ""; - - public CallBack(String name) { - this.name = name; - } - - @Override - public void invoke(TSDBResultSet resultSet) { - try { - while (null !=resultSet && resultSet.next()) { - System.out.print("callback_" + name + ": "); - for (int i = 1; i <= resultSet.getMetaData().getColumnCount(); i++) { - System.out.printf(i + ": " + resultSet.getString(i) + "\t"); - } - System.out.println(); - } - } catch (Exception e) { - e.printStackTrace(); - } - } - } -} diff --git a/src/connector/jdbc/src/test/java/TestTSDBSubscribe.java b/src/connector/jdbc/src/test/java/TestTSDBSubscribe.java index 598ef4bbc0..df730efa69 100644 --- a/src/connector/jdbc/src/test/java/TestTSDBSubscribe.java +++ b/src/connector/jdbc/src/test/java/TestTSDBSubscribe.java @@ -2,82 +2,76 @@ import com.taosdata.jdbc.TSDBConnection; import com.taosdata.jdbc.TSDBDriver; import com.taosdata.jdbc.TSDBResultSet; import com.taosdata.jdbc.TSDBSubscribe; -import org.apache.commons.lang3.StringUtils; import java.sql.Connection; import java.sql.DriverManager; import java.util.Properties; public class TestTSDBSubscribe { + + public static TSDBConnection connectTDengine(String host, String database) throws Exception { + Class.forName("com.taosdata.jdbc.TSDBDriver"); + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + + String cs = String.format("jdbc:TAOS://%s:0/%s?user=root&password=taosdata", host, database); + return (TSDBConnection)DriverManager.getConnection(cs, properties); + } + + + public static void main(String[] args) throws Exception { - String usage = "java -cp taos-jdbcdriver-2.0.0_dev-dist.jar com.taosdata.jdbc.TSDBSubscribe -db dbName " + - "-topic topicName -tname tableName -h host"; + String usage = "java -Djava.ext.dirs=../ TestTSDBSubscribe [-host host] <-db database> <-topic topic> <-sql sql>"; if (args.length < 2) { System.err.println(usage); return; } - String dbName = ""; - String tName = ""; - String host = "localhost"; - String topic = ""; + String host = "localhost", database = "", topic = "", sql = ""; for (int i = 0; i < args.length; i++) { if ("-db".equalsIgnoreCase(args[i]) && i < args.length - 1) { - dbName = args[++i]; - } - if ("-tname".equalsIgnoreCase(args[i]) && i < args.length - 1) { - tName = args[++i]; - } - if ("-h".equalsIgnoreCase(args[i]) && i < args.length - 1) { - host = args[++i]; + database = args[++i]; } if ("-topic".equalsIgnoreCase(args[i]) && i < args.length - 1) { topic = args[++i]; } + if ("-host".equalsIgnoreCase(args[i]) && i < args.length - 1) { + host = args[++i]; + } + if ("-sql".equalsIgnoreCase(args[i]) && i < args.length - 1) { + sql = args[++i]; + } } - if (StringUtils.isEmpty(dbName) || StringUtils.isEmpty(tName) || StringUtils.isEmpty(topic)) { - System.err.println(usage); - return; + if (database.isEmpty() || topic.isEmpty() || sql.isEmpty()) { + System.err.println(usage); + return; } - Connection connection = null; - TSDBSubscribe subscribe = null; - long subscribId = 0; + TSDBConnection connection = null; + TSDBSubscribe sub = null; try { - Class.forName("com.taosdata.jdbc.TSDBDriver"); - Properties properties = new Properties(); - properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host); - properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); - connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/" + dbName + "?user=root&password=taosdata" - , properties); - String rawSql = "select * from " + tName + ";"; - subscribe = ((TSDBConnection) connection).createSubscribe(); - subscribId = subscribe.subscribe(topic, rawSql, false, 1000); - int a = 0; - TSDBResultSet resSet = null; - while (true) { + connection = connectTDengine(host, database); + sub = ((TSDBConnection) connection).subscribe(topic, sql, false); + + int total = 0; + while(true) { + TSDBResultSet rs = sub.consume(); + int count = 0; + while(rs.next()) { + count++; + } + total += count; + System.out.printf("%d rows consumed, total %d\n", count, total); Thread.sleep(900); - resSet = subscribe.consume(subscribId); - - while (resSet.next()) { - for (int i = 1; i <= resSet.getMetaData().getColumnCount(); i++) { - System.out.printf(i + ": " + resSet.getString(i) + "\t"); - } - System.out.println("\n======" + a + "=========="); - } - - a++; - if (a >= 10) { - break; - } } } catch (Exception e) { e.printStackTrace(); } finally { - if (null != subscribe && 0 != subscribId) { - subscribe.unsubscribe(subscribId, true); + if (null != sub) { + sub.close(true); } if (null != connection) { connection.close(); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/AsyncSubscribeTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/AsyncSubscribeTest.java deleted file mode 100644 index c14624e683..0000000000 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/AsyncSubscribeTest.java +++ /dev/null @@ -1,100 +0,0 @@ -package com.taosdata.jdbc; - -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.Properties; - -import static org.junit.Assert.assertTrue; - -public class AsyncSubscribeTest extends BaseTest { - Connection connection = null; - Statement statement = null; - String dbName = "test"; - String tName = "t0"; - String host = "localhost"; - String topic = "test"; - long subscribId = 0; - - @Before - public void createDatabase() throws SQLException { - try { - Class.forName("com.taosdata.jdbc.TSDBDriver"); - } catch (ClassNotFoundException e) { - return; - } - Properties properties = new Properties(); - properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host); - properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); - connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/" + "?user=root&password=taosdata" - , properties); - - statement = connection.createStatement(); - statement.executeUpdate("create database if not exists " + dbName); - statement.executeUpdate("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)"); - long ts = System.currentTimeMillis(); - for (int i = 0; i < 2; i++) { - ts += i; - String sql = "insert into " + dbName + "." + tName + " values (" + ts + ", " + (100 + i) + ", " + i + ")"; - statement.executeUpdate(sql); - } - } - - @Test - public void subscribe() throws Exception { - TSDBSubscribe subscribe = null; - try { - String rawSql = "select * from " + dbName + "." + tName + ";"; - System.out.println(rawSql); - subscribe = ((TSDBConnection) connection).createSubscribe(); - subscribId = subscribe.subscribe(topic, rawSql, false, 1000, new CallBack("first")); - - assertTrue(subscribId > 0); - } catch (Exception e) { - e.printStackTrace(); - } - - Thread.sleep(2000); - subscribe.unsubscribe(subscribId, true); - } - - private static class CallBack implements TSDBSubscribeCallBack { - private String name = ""; - - public CallBack(String name) { - this.name = name; - } - - @Override - public void invoke(TSDBResultSet resultSet) { - try { - while (null != resultSet && resultSet.next()) { - System.out.print("callback_" + name + ": "); - for (int i = 1; i <= resultSet.getMetaData().getColumnCount(); i++) { - System.out.printf(i + ": " + resultSet.getString(i) + "\t"); - } - System.out.println(); - } - resultSet.close(); - - } catch (Exception e) { - e.printStackTrace(); - } - } - } - - @After - public void close() throws Exception { - statement.executeQuery("drop database test"); - statement.close(); - connection.close(); - Thread.sleep(10); - } -} \ No newline at end of file diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java index d7f56ac468..2dc27adab7 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java @@ -49,20 +49,16 @@ public class SubscribeTest extends BaseTest { @Test public void subscribe() throws Exception { TSDBSubscribe subscribe = null; - long subscribId = 0; try { String rawSql = "select * from " + dbName + "." + tName + ";"; System.out.println(rawSql); - subscribe = ((TSDBConnection) connection).createSubscribe(); - subscribId = subscribe.subscribe(topic, rawSql, false, 1000); - - assertTrue(subscribId > 0); + subscribe = ((TSDBConnection) connection).subscribe(topic, rawSql, false); int a = 0; while (true) { Thread.sleep(900); - TSDBResultSet resSet = subscribe.consume(subscribId); + TSDBResultSet resSet = subscribe.consume(); while (resSet.next()) { for (int i = 1; i <= resSet.getMetaData().getColumnCount(); i++) { @@ -79,8 +75,8 @@ public class SubscribeTest extends BaseTest { } catch (Exception e) { e.printStackTrace(); } finally { - if (null != subscribe && 0 != subscribId) { - subscribe.unsubscribe(subscribId, true); + if (null != subscribe) { + subscribe.close(true); } } } diff --git a/src/dnode/inc/dnodeCheck.h b/src/dnode/inc/dnodeCheck.h new file mode 100644 index 0000000000..a4880b3c11 --- /dev/null +++ b/src/dnode/inc/dnodeCheck.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_DNODE_CHECK_H +#define TDENGINE_DNODE_CHECK_H + +#ifdef __cplusplus +extern "C" { +#endif + +int32_t dnodeInitCheck(); +void dnodeCleanupCheck(); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/dnode/src/dnodeCheck.c b/src/dnode/src/dnodeCheck.c new file mode 100644 index 0000000000..dfdc3fa53f --- /dev/null +++ b/src/dnode/src/dnodeCheck.c @@ -0,0 +1,269 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _DEFAULT_SOURCE +#include "os.h" +#include "taosdef.h" +#include "tglobal.h" +#include "mnode.h" +#include "dnodeInt.h" +#include "dnodeCheck.h" + +typedef struct { + bool enable; + char * name; + int32_t (*initFp)(); + int32_t (*startFp)(); + void (*cleanUpFp)(); + void (*stopFp)(); +} SCheckItem; + +static SCheckItem tsCheckItem[TSDB_CHECK_ITEM_MAX] = {{0}}; +int64_t tsMinFreeMemSizeForStart = 0; + +static int bindTcpPort(int port) { + int serverSocket; + struct sockaddr_in server_addr; + + if ((serverSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)) < 0) { + dError("socket() fail: %s", strerror(errno)); + return -1; + } + + bzero(&server_addr, sizeof(server_addr)); + server_addr.sin_family = AF_INET; + server_addr.sin_port = htons(port); + server_addr.sin_addr.s_addr = htonl(INADDR_ANY); + + if (bind(serverSocket, (struct sockaddr *)&server_addr, sizeof(server_addr)) < 0) { + dError("port:%d tcp bind() fail: %s", port, strerror(errno)); + close(serverSocket); + return -1; + } + + if (listen(serverSocket, 5) < 0) { + dError("port:%d listen() fail: %s", port, strerror(errno)); + close(serverSocket); + return -1; + } + + close(serverSocket); + return 0; +} + +static int bindUdpPort(int port) { + int serverSocket; + struct sockaddr_in server_addr; + + if ((serverSocket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP)) < 0) { + dError("socket() fail: %s", strerror(errno)); + return -1; + } + + bzero(&server_addr, sizeof(server_addr)); + server_addr.sin_family = AF_INET; + server_addr.sin_port = htons(port); + server_addr.sin_addr.s_addr = htonl(INADDR_ANY); + + if (bind(serverSocket, (struct sockaddr *)&server_addr, sizeof(server_addr)) < 0) { + dError("port:%d udp bind() fail: %s", port, strerror(errno)); + close(serverSocket); + return -1; + } + + close(serverSocket); + return 0; +} + +static int dnodeCheckNetwork() { + int ret; + int startPort = tsServerPort; + + for (int port = startPort; port < startPort + 12; port++) { + ret = bindTcpPort(port); + if (0 != ret) { + dError("failed to tcp bind port %d, quit", port); + return -1; + } + ret = bindUdpPort(port); + if (0 != ret) { + dError("failed to udp bind port %d, quit", port); + return -1; + } + } + + return 0; +} + +static int dnodeCheckMem() { + float memoryUsedMB; + float memoryAvailMB; + if (true != taosGetSysMemory(&memoryUsedMB)) { + dError("failed to get system mem infomation, errno:%u, reason:%s", errno, strerror(errno)); + return -1; + } + + memoryAvailMB = (float)tsTotalMemoryMB - memoryUsedMB; + + if (memoryAvailMB < tsMinFreeMemSizeForStart) { + dError("free mem %f too little, quit", memoryAvailMB); + return -1; + } + + return 0; +} + +static int dnodeCheckCpu() { + // TODO: + return 0; +} + +static int dnodeCheckDisk() { + if (tsAvailDataDirGB < tsMinimalDataDirGB) { + dError("free disk size: %f GB, too little, quit", tsAvailDataDirGB); + return -1; + } + + if (tsAvailLogDirGB < tsMinimalLogDirGB) { + dError("free disk size: %f GB, too little, quit", tsAvailLogDirGB); + return -1; + } + + if (tsAvailTmpDirectorySpace < tsReservedTmpDirectorySpace) { + dError("free disk size: %f GB, too little, quit", tsAvailTmpDirectorySpace); + return -1; + } + + return 0; +} + +static int dnodeCheckOs() { + // TODO: + + return 0; +} +static int dnodeCheckAccess() { + // TODO: + + return 0; +} + +static int dnodeCheckVersion() { + // TODO: + + return 0; +} + +static int dnodeCheckDatafile() { + // TODO: + + return 0; +} + +static void dnodeAllocCheckItem() { + tsCheckItem[TSDB_CHECK_ITEM_NETWORK].enable = false; + tsCheckItem[TSDB_CHECK_ITEM_NETWORK].name = "network"; + tsCheckItem[TSDB_CHECK_ITEM_NETWORK].initFp = NULL; + tsCheckItem[TSDB_CHECK_ITEM_NETWORK].cleanUpFp = NULL; + tsCheckItem[TSDB_CHECK_ITEM_NETWORK].startFp = dnodeCheckNetwork; + tsCheckItem[TSDB_CHECK_ITEM_NETWORK].stopFp = NULL; + + tsCheckItem[TSDB_CHECK_ITEM_MEM].enable = true; + tsCheckItem[TSDB_CHECK_ITEM_MEM].name = "mem"; + tsCheckItem[TSDB_CHECK_ITEM_MEM].initFp = NULL; + tsCheckItem[TSDB_CHECK_ITEM_MEM].cleanUpFp = NULL; + tsCheckItem[TSDB_CHECK_ITEM_MEM].startFp = dnodeCheckMem; + tsCheckItem[TSDB_CHECK_ITEM_MEM].stopFp = NULL; + + tsCheckItem[TSDB_CHECK_ITEM_CPU].enable = true; + tsCheckItem[TSDB_CHECK_ITEM_CPU].name = "cpu"; + tsCheckItem[TSDB_CHECK_ITEM_CPU].initFp = NULL; + tsCheckItem[TSDB_CHECK_ITEM_CPU].cleanUpFp = NULL; + tsCheckItem[TSDB_CHECK_ITEM_CPU].startFp = dnodeCheckCpu; + tsCheckItem[TSDB_CHECK_ITEM_CPU].stopFp = NULL; + + tsCheckItem[TSDB_CHECK_ITEM_DISK].enable = true; + tsCheckItem[TSDB_CHECK_ITEM_DISK].name = "disk"; + tsCheckItem[TSDB_CHECK_ITEM_DISK].initFp = NULL; + tsCheckItem[TSDB_CHECK_ITEM_DISK].cleanUpFp = NULL; + tsCheckItem[TSDB_CHECK_ITEM_DISK].startFp = dnodeCheckDisk; + tsCheckItem[TSDB_CHECK_ITEM_DISK].stopFp = NULL; + + tsCheckItem[TSDB_CHECK_ITEM_OS].enable = true; + tsCheckItem[TSDB_CHECK_ITEM_OS].name = "os"; + tsCheckItem[TSDB_CHECK_ITEM_OS].initFp = NULL; + tsCheckItem[TSDB_CHECK_ITEM_OS].cleanUpFp = NULL; + tsCheckItem[TSDB_CHECK_ITEM_OS].startFp = dnodeCheckOs; + tsCheckItem[TSDB_CHECK_ITEM_OS].stopFp = NULL; + + tsCheckItem[TSDB_CHECK_ITEM_ACCESS].enable = true; + tsCheckItem[TSDB_CHECK_ITEM_ACCESS].name = "access"; + tsCheckItem[TSDB_CHECK_ITEM_ACCESS].initFp = NULL; + tsCheckItem[TSDB_CHECK_ITEM_ACCESS].cleanUpFp = NULL; + tsCheckItem[TSDB_CHECK_ITEM_ACCESS].startFp = dnodeCheckAccess; + tsCheckItem[TSDB_CHECK_ITEM_ACCESS].stopFp = NULL; + + tsCheckItem[TSDB_CHECK_ITEM_VERSION].enable = true; + tsCheckItem[TSDB_CHECK_ITEM_VERSION].name = "version"; + tsCheckItem[TSDB_CHECK_ITEM_VERSION].initFp = NULL; + tsCheckItem[TSDB_CHECK_ITEM_VERSION].cleanUpFp = NULL; + tsCheckItem[TSDB_CHECK_ITEM_VERSION].startFp = dnodeCheckVersion; + tsCheckItem[TSDB_CHECK_ITEM_VERSION].stopFp = NULL; + + tsCheckItem[TSDB_CHECK_ITEM_DATAFILE].enable = true; + tsCheckItem[TSDB_CHECK_ITEM_DATAFILE].name = "datafile"; + tsCheckItem[TSDB_CHECK_ITEM_DATAFILE].initFp = NULL; + tsCheckItem[TSDB_CHECK_ITEM_DATAFILE].cleanUpFp = NULL; + tsCheckItem[TSDB_CHECK_ITEM_DATAFILE].startFp = dnodeCheckDatafile; + tsCheckItem[TSDB_CHECK_ITEM_DATAFILE].stopFp = NULL; +} + +void dnodeCleanupCheck() { + for (ECheckItemType index = 0; index < TSDB_CHECK_ITEM_MAX; ++index) { + if (tsCheckItem[index].enable && tsCheckItem[index].stopFp) { + (*tsCheckItem[index].stopFp)(); + } + if (tsCheckItem[index].cleanUpFp) { + (*tsCheckItem[index].cleanUpFp)(); + } + } +} + +int32_t dnodeInitCheck() { + dnodeAllocCheckItem(); + + for (ECheckItemType index = 0; index < TSDB_CHECK_ITEM_MAX; ++index) { + if (tsCheckItem[index].initFp) { + if ((*tsCheckItem[index].initFp)() != 0) { + dError("failed to init check item:%s", tsCheckItem[index].name); + return -1; + } + } + } + + for (ECheckItemType index = 0; index < TSDB_CHECK_ITEM_MAX; ++index) { + if (tsCheckItem[index].enable && tsCheckItem[index].startFp) { + if ((*tsCheckItem[index].startFp)() != 0) { + dError("failed to check item:%s", tsCheckItem[index].name); + exit(-1); + } + } + } + + return 0; +} + + + diff --git a/src/dnode/src/dnodeMain.c b/src/dnode/src/dnodeMain.c index 96f8f43265..dded10bd1c 100644 --- a/src/dnode/src/dnodeMain.c +++ b/src/dnode/src/dnodeMain.c @@ -24,6 +24,7 @@ #include "dnodeMgmt.h" #include "dnodePeer.h" #include "dnodeModule.h" +#include "dnodeCheck.h" #include "dnodeVRead.h" #include "dnodeVWrite.h" #include "dnodeMRead.h" @@ -48,6 +49,7 @@ typedef struct { } SDnodeComponent; static const SDnodeComponent tsDnodeComponents[] = { + {"check", dnodeInitCheck, dnodeCleanupCheck}, // NOTES: dnodeInitCheck must be first component !!! {"storage", dnodeInitStorage, dnodeCleanupStorage}, {"vread", dnodeInitVnodeRead, dnodeCleanupVnodeRead}, {"vwrite", dnodeInitVnodeWrite, dnodeCleanupVnodeWrite}, diff --git a/src/dnode/src/dnodeSystem.c b/src/dnode/src/dnodeSystem.c index 2519684878..543e1c9639 100644 --- a/src/dnode/src/dnodeSystem.c +++ b/src/dnode/src/dnodeSystem.c @@ -22,7 +22,7 @@ #include "dnodeMain.h" static void signal_handler(int32_t signum, siginfo_t *sigInfo, void *context); -static sem_t exitSem; +static tsem_t exitSem; int32_t main(int32_t argc, char *argv[]) { // Set global configuration file @@ -88,7 +88,7 @@ int32_t main(int32_t argc, char *argv[]) { #endif } - if (sem_init(&exitSem, 0, 0) != 0) { + if (tsem_init(&exitSem, 0, 0) != 0) { printf("failed to create exit semphore\n"); exit(EXIT_FAILURE); } @@ -117,7 +117,7 @@ int32_t main(int32_t argc, char *argv[]) { syslog(LOG_INFO, "Started TDengine service successfully."); - for (int res = sem_wait(&exitSem); res != 0; res = sem_wait(&exitSem)) { + for (int res = tsem_wait(&exitSem); res != 0; res = tsem_wait(&exitSem)) { if (res != EINTR) { syslog(LOG_ERR, "failed to wait exit semphore: %d", res); break; @@ -157,5 +157,5 @@ static void signal_handler(int32_t signum, siginfo_t *sigInfo, void *context) { sigaction(SIGUSR2, &act, NULL); // inform main thread to exit - sem_post(&exitSem); + tsem_post(&exitSem); } diff --git a/src/dnode/src/dnodeTelemetry.c b/src/dnode/src/dnodeTelemetry.c index 892fd1d903..8ed4a9518b 100644 --- a/src/dnode/src/dnodeTelemetry.c +++ b/src/dnode/src/dnodeTelemetry.c @@ -36,7 +36,7 @@ #include "dnodeInt.h" #include "dnodeTelemetry.h" -static sem_t tsExitSem; +static tsem_t tsExitSem; static pthread_t tsTelemetryThread; #define TELEMETRY_SERVER "telemetry.taosdata.com" @@ -266,7 +266,7 @@ int32_t dnodeInitTelemetry() { return 0; } - if (sem_init(&tsExitSem, 0, 0) == -1) { + if (tsem_init(&tsExitSem, 0, 0) == -1) { // just log the error, it is ok for telemetry to fail dTrace("failed to create semaphore for telemetry, reason:%s", strerror(errno)); return 0; @@ -291,8 +291,8 @@ void dnodeCleanupTelemetry() { } if (tsTelemetryThread) { - sem_post(&tsExitSem); + tsem_post(&tsExitSem); pthread_join(tsTelemetryThread, NULL); - sem_destroy(&tsExitSem); + tsem_destroy(&tsExitSem); } } diff --git a/src/dnode/src/dnodeVRead.c b/src/dnode/src/dnodeVRead.c index cb53bb5e60..d66ebf9772 100644 --- a/src/dnode/src/dnodeVRead.c +++ b/src/dnode/src/dnodeVRead.c @@ -202,16 +202,17 @@ static void *dnodeProcessReadQueue(void *param) { break; } - dDebug("%p, msg:%s will be processed in vread queue, qtype:%d", pReadMsg->rpcMsg.ahandle, - taosMsg[pReadMsg->rpcMsg.msgType], type); + dDebug("%p, msg:%s will be processed in vread queue, qtype:%d, msg:%p", pReadMsg->rpcMsg.ahandle, + taosMsg[pReadMsg->rpcMsg.msgType], type, pReadMsg); + int32_t code = vnodeProcessRead(pVnode, pReadMsg); if (type == TAOS_QTYPE_RPC && code != TSDB_CODE_QRY_NOT_READY) { dnodeSendRpcReadRsp(pVnode, pReadMsg, code); } else { if (code == TSDB_CODE_QRY_HAS_RSP) { - dnodeSendRpcReadRsp(pVnode, pReadMsg, TSDB_CODE_SUCCESS); - } else { + dnodeSendRpcReadRsp(pVnode, pReadMsg, pReadMsg->rpcMsg.code); + } else { // code == TSDB_CODE_NOT_READY, do not return msg to client dnodeDispatchNonRspMsg(pVnode, pReadMsg, code); } } diff --git a/src/inc/query.h b/src/inc/query.h index ec1e458b62..0c18f85dc3 100644 --- a/src/inc/query.h +++ b/src/inc/query.h @@ -76,6 +76,9 @@ void* qGetResultRetrieveMsg(qinfo_t qinfo); */ int32_t qKillQuery(qinfo_t qinfo); +int32_t qQueryCompleted(qinfo_t qinfo); + + /** * destroy query info structure * @param qHandle diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h index ff05c840da..3dea8da18a 100644 --- a/src/inc/taosdef.h +++ b/src/inc/taosdef.h @@ -232,7 +232,7 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size); #define TSDB_NODE_NAME_LEN 64 #define TSDB_TABLE_NAME_LEN 193 // it is a null-terminated string #define TSDB_DB_NAME_LEN 33 -#define TSDB_TABLE_ID_LEN (TSDB_ACCT_LEN + TSDB_DB_NAME_LEN + TSDB_TABLE_NAME_LEN) +#define TSDB_TABLE_FNAME_LEN (TSDB_ACCT_LEN + TSDB_DB_NAME_LEN + TSDB_TABLE_NAME_LEN) #define TSDB_COL_NAME_LEN 65 #define TSDB_MAX_SAVED_SQL_LEN TSDB_MAX_COLUMNS * 64 #define TSDB_MAX_SQL_LEN TSDB_PAYLOAD_SIZE @@ -242,6 +242,7 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size); #define TSDB_MAX_BYTES_PER_ROW 16384 #define TSDB_MAX_TAGS_LEN 16384 #define TSDB_MAX_TAGS 128 +#define TSDB_MAX_TAG_CONDITIONS 1024 #define TSDB_AUTH_LEN 16 #define TSDB_KEY_LEN 16 @@ -393,6 +394,7 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size); #define TSDB_PORT_DNODESHELL 0 #define TSDB_PORT_DNODEDNODE 5 #define TSDB_PORT_SYNC 10 +#define TSDB_PORT_HTTP 11 #define TAOS_QTYPE_RPC 0 #define TAOS_QTYPE_FWD 1 @@ -416,6 +418,19 @@ typedef enum { TSDB_MOD_MAX } EModuleType; + typedef enum { + TSDB_CHECK_ITEM_NETWORK, + TSDB_CHECK_ITEM_MEM, + TSDB_CHECK_ITEM_CPU, + TSDB_CHECK_ITEM_DISK, + TSDB_CHECK_ITEM_OS, + TSDB_CHECK_ITEM_ACCESS, + TSDB_CHECK_ITEM_VERSION, + TSDB_CHECK_ITEM_DATAFILE, + TSDB_CHECK_ITEM_MAX + } ECheckItemType; + + #ifdef __cplusplus } #endif diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index ece08ae173..9af4cee28a 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -219,6 +219,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_QRY_DUP_JOIN_KEY, 0, 0x0705, "Duplicated TAOS_DEFINE_ERROR(TSDB_CODE_QRY_EXCEED_TAGS_LIMIT, 0, 0x0706, "Tag conditon too many") TAOS_DEFINE_ERROR(TSDB_CODE_QRY_NOT_READY, 0, 0x0707, "Query not ready") TAOS_DEFINE_ERROR(TSDB_CODE_QRY_HAS_RSP, 0, 0x0708, "Query should response") +TAOS_DEFINE_ERROR(TSDB_CODE_QRY_IN_EXEC, 0, 0x0709, "Multiple retrieval of this query") // grant TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_EXPIRED, 0, 0x0800, "License expired") diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index 6d43568ecd..265bf47d6d 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -246,13 +246,13 @@ typedef struct { uint64_t uid; uint64_t superTableUid; uint64_t createdTime; - char tableId[TSDB_TABLE_ID_LEN]; - char superTableId[TSDB_TABLE_ID_LEN]; + char tableId[TSDB_TABLE_FNAME_LEN]; + char superTableId[TSDB_TABLE_FNAME_LEN]; char data[]; } SMDCreateTableMsg; typedef struct { - char tableId[TSDB_TABLE_ID_LEN]; + char tableId[TSDB_TABLE_FNAME_LEN]; char db[TSDB_ACCT_LEN + TSDB_DB_NAME_LEN]; int8_t igExists; int8_t getMeta; @@ -265,12 +265,12 @@ typedef struct { } SCMCreateTableMsg; typedef struct { - char tableId[TSDB_TABLE_ID_LEN]; + char tableId[TSDB_TABLE_FNAME_LEN]; int8_t igNotExists; } SCMDropTableMsg; typedef struct { - char tableId[TSDB_TABLE_ID_LEN]; + char tableId[TSDB_TABLE_FNAME_LEN]; char db[TSDB_ACCT_LEN + TSDB_DB_NAME_LEN]; int16_t type; /* operation type */ int16_t numOfCols; /* number of schema */ @@ -297,7 +297,7 @@ typedef struct { typedef struct { char clientVersion[TSDB_VERSION_LEN]; char msgVersion[TSDB_VERSION_LEN]; - char db[TSDB_TABLE_ID_LEN]; + char db[TSDB_TABLE_FNAME_LEN]; } SCMConnectMsg; typedef struct { @@ -347,14 +347,14 @@ typedef struct { int32_t vgId; int32_t sid; uint64_t uid; - char tableId[TSDB_TABLE_ID_LEN]; + char tableId[TSDB_TABLE_FNAME_LEN]; } SMDDropTableMsg; typedef struct { int32_t contLen; int32_t vgId; uint64_t uid; - char tableId[TSDB_TABLE_ID_LEN]; + char tableId[TSDB_TABLE_FNAME_LEN]; } SMDDropSTableMsg; typedef struct { @@ -527,7 +527,7 @@ typedef struct { } SCMCreateDbMsg, SCMAlterDbMsg; typedef struct { - char db[TSDB_TABLE_ID_LEN]; + char db[TSDB_TABLE_FNAME_LEN]; uint8_t ignoreNotExists; } SCMDropDbMsg, SCMUseDbMsg; @@ -637,7 +637,7 @@ typedef struct { } SMDCreateVnodeMsg, SMDAlterVnodeMsg; typedef struct { - char tableId[TSDB_TABLE_ID_LEN]; + char tableId[TSDB_TABLE_FNAME_LEN]; int16_t createFlag; char tags[]; } SCMTableInfoMsg; @@ -664,7 +664,7 @@ typedef struct { typedef struct STableMetaMsg { int32_t contLen; - char tableId[TSDB_TABLE_ID_LEN]; // table id + char tableId[TSDB_TABLE_FNAME_LEN]; // table id uint8_t numOfTags; uint8_t precision; uint8_t tableType; @@ -685,7 +685,7 @@ typedef struct SMultiTableMeta { typedef struct { int32_t dataLen; - char name[TSDB_TABLE_ID_LEN]; + char name[TSDB_TABLE_FNAME_LEN]; char data[TSDB_MAX_TAGS_LEN + TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * TSDB_MAX_TAGS]; } STagData; @@ -771,7 +771,7 @@ typedef struct { uint64_t uid; uint64_t stime; // stream starting time int32_t status; - char tableId[TSDB_TABLE_ID_LEN]; + char tableId[TSDB_TABLE_FNAME_LEN]; } SMDAlterStreamMsg; typedef struct { diff --git a/src/inc/tsdb.h b/src/inc/tsdb.h index bab17322f0..4776d1cda7 100644 --- a/src/inc/tsdb.h +++ b/src/inc/tsdb.h @@ -167,9 +167,14 @@ typedef struct SDataBlockInfo { } SDataBlockInfo; typedef struct { - size_t numOfTables; + void *pTable; + TSKEY lastKey; +} STableKeyInfo; + +typedef struct { + size_t numOfTables; SArray *pGroupList; - SHashObj *map; // speedup acquire the tableQueryInfo from STableId + SHashObj *map; // speedup acquire the tableQueryInfo by table uid } STableGroupInfo; /** @@ -177,24 +182,24 @@ typedef struct { * * @param tsdb tsdb handle * @param pCond query condition, including time window, result set order, and basic required columns for each block - * @param tableqinfoGroupInfo tableId list in the form of set, seperated into different groups according to group by condition + * @param tableInfoGroup table object list in the form of set, grouped into different sets according to the + * group by condition * @param qinfo query info handle from query processor * @return */ -TsdbQueryHandleT *tsdbQueryTables(TSDB_REPO_T *tsdb, STsdbQueryCond *pCond, STableGroupInfo *tableqinfoGroupInfo, void *qinfo); +TsdbQueryHandleT *tsdbQueryTables(TSDB_REPO_T *tsdb, STsdbQueryCond *pCond, STableGroupInfo *tableInfoGroup, void *qinfo); /** * Get the last row of the given query time window for all the tables in STableGroupInfo object. * Note that only one data block with only row will be returned while invoking retrieve data block function for * all tables in this group. * - * @param tsdb tsdb handle - * @param pCond query condition, including time window, result set order, and basic required columns for each - * block - * @param tableqinfoGroupInfo tableId list. + * @param tsdb tsdb handle + * @param pCond query condition, including time window, result set order, and basic required columns for each block + * @param tableInfo table list. * @return */ -TsdbQueryHandleT tsdbQueryLastRow(TSDB_REPO_T *tsdb, STsdbQueryCond *pCond, STableGroupInfo *tableqinfoGroupInfo, void *qinfo); +TsdbQueryHandleT tsdbQueryLastRow(TSDB_REPO_T *tsdb, STsdbQueryCond *pCond, STableGroupInfo *tableInfo, void *qinfo); /** * get the queried table object list @@ -260,7 +265,7 @@ SArray *tsdbRetrieveDataBlock(TsdbQueryHandleT *pQueryHandle, SArray *pColumnIdL * @param stableid. super table sid * @param pTagCond. tag query condition */ -int32_t tsdbQuerySTableByTagCond(TSDB_REPO_T *tsdb, uint64_t uid, const char *pTagCond, size_t len, +int32_t tsdbQuerySTableByTagCond(TSDB_REPO_T *tsdb, uint64_t uid, TSKEY key, const char *pTagCond, size_t len, int16_t tagNameRelType, const char *tbnameCond, STableGroupInfo *pGroupList, SColIndex *pColIndex, int32_t numOfCols); @@ -278,7 +283,7 @@ void tsdbDestroyTableGroup(STableGroupInfo *pGroupList); * @param pGroupInfo the generated result * @return */ -int32_t tsdbGetOneTableGroup(TSDB_REPO_T *tsdb, uint64_t uid, STableGroupInfo *pGroupInfo); +int32_t tsdbGetOneTableGroup(TSDB_REPO_T *tsdb, uint64_t uid, TSKEY startKey, STableGroupInfo *pGroupInfo); /** * diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index 750335a037..692b5e49a0 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -128,6 +128,9 @@ static int32_t shellRunSingleCommand(TAOS *con, char *command) { if (regex_match(command, "^[ \t]*(quit|q|exit)[ \t;]*$", REG_EXTENDED | REG_ICASE)) { taos_close(con); write_history(); +#ifdef WINDOWS + exit(EXIT_SUCCESS); +#endif return -1; } @@ -307,7 +310,7 @@ void shellRunCommandOnServer(TAOS *con, char command[]) { if (error_no == 0) { printf("Query OK, %d row(s) in set (%.6fs)\n", numOfRows, (et - st) / 1E6); } else { - printf("Query interrupted (%s), %d row(s) in set (%.6fs)\n", taos_errstr(con), numOfRows, (et - st) / 1E6); + printf("Query interrupted (%s), %d row(s) in set (%.6fs)\n", taos_errstr(pSql), numOfRows, (et - st) / 1E6); } } else { int num_rows_affacted = taos_affected_rows(pSql); @@ -368,6 +371,18 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) { tt = (time_t)(val / 1000); } +/* comment out as it make testcases like select_with_tags.sim fail. + but in windows, this may cause the call to localtime crash if tt < 0, + need to find a better solution. + if (tt < 0) { + tt = 0; + } + */ + +#ifdef WINDOWS + if (tt < 0) tt = 0; +#endif + struct tm* ptm = localtime(&tt); size_t pos = strftime(buf, 32, "%Y-%m-%d %H:%M:%S", ptm); @@ -736,7 +751,9 @@ void read_history() { FILE *f = fopen(f_history, "r"); if (f == NULL) { - fprintf(stderr, "Opening file %s\n", f_history); +#ifndef WINDOWS + fprintf(stderr, "Failed to open file %s\n", f_history); +#endif return; } @@ -761,7 +778,9 @@ void write_history() { FILE *f = fopen(f_history, "w"); if (f == NULL) { - fprintf(stderr, "Opening file %s\n", f_history); +#ifndef WINDOWS + fprintf(stderr, "Failed to open file %s for write\n", f_history); +#endif return; } diff --git a/src/kit/shell/src/shellWindows.c b/src/kit/shell/src/shellWindows.c index 7297f23931..279d3b9cdd 100644 --- a/src/kit/shell/src/shellWindows.c +++ b/src/kit/shell/src/shellWindows.c @@ -222,6 +222,6 @@ void *shellLoopQuery(void *arg) { return NULL; } -void get_history_path(char *history) { sprintf(history, "%s/%s", ".", HISTORY_FILE); } +void get_history_path(char *history) { sprintf(history, "C:/TDengine/%s", HISTORY_FILE); } void exitShell() { exit(EXIT_SUCCESS); } diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 9d46ac5055..192cb3145c 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -250,7 +250,7 @@ typedef struct DemoArguments { static struct argp argp = {options, parse_opt, 0, 0}; void parse_args(int argc, char *argv[], SDemoArguments *arguments) { - argp_parse(&argp, argc, argv, 0, 0, &arguments); + argp_parse(&argp, argc, argv, 0, 0, arguments); if (arguments->abort) { #ifndef _ALPINE error(10, 0, "ABORTED"); @@ -306,7 +306,7 @@ typedef struct DemoArguments { printf("%s%s\n", indent, "-R"); printf("%s%s%s\n", indent, indent, "rate, Out of order data's rate--if order=1 Default 10, min: 0, max: 50."); printf("%s%s\n", indent, "-D"); - printf("%s%s%s\n", indent, indent, "Delete data methods——0: don't delete, 1: delete by table, 2: delete by stable, 3: delete by database."); + printf("%s%s%s\n", indent, indent, "Delete data methods 0: don't delete, 1: delete by table, 2: delete by stable, 3: delete by database."); } void parse_args(int argc, char *argv[], SDemoArguments *arguments) { @@ -440,9 +440,9 @@ typedef struct { char* cols; bool use_metric; - sem_t mutex_sem; + tsem_t mutex_sem; int notFinished; - sem_t lock_sem; + tsem_t lock_sem; } info; typedef struct { @@ -459,9 +459,9 @@ typedef struct { int data_of_order; int data_of_rate; - sem_t *mutex_sem; - int *notFinished; - sem_t *lock_sem; + tsem_t *mutex_sem; + int *notFinished; + tsem_t *lock_sem; } sTable; /* ******************************* Global @@ -729,9 +729,9 @@ int main(int argc, char *argv[]) { t_info->end_table_id = i < b ? last + a : last + a - 1; last = t_info->end_table_id + 1; - sem_init(&(t_info->mutex_sem), 0, 1); + tsem_init(&(t_info->mutex_sem), 0, 1); t_info->notFinished = t_info->end_table_id - t_info->start_table_id + 1; - sem_init(&(t_info->lock_sem), 0, 0); + tsem_init(&(t_info->lock_sem), 0, 0); if (query_mode == SYNC) { pthread_create(pids + i, NULL, syncWrite, t_info); @@ -762,8 +762,8 @@ int main(int argc, char *argv[]) { for (int i = 0; i < threads; i++) { info *t_info = infos + i; taos_close(t_info->taos); - sem_destroy(&(t_info->mutex_sem)); - sem_destroy(&(t_info->lock_sem)); + tsem_destroy(&(t_info->mutex_sem)); + tsem_destroy(&(t_info->lock_sem)); } free(pids); @@ -1021,8 +1021,8 @@ void multiThreadCreateTable(char* cols, bool use_metric, int threads, int ntable for (int i = 0; i < threads; i++) { info *t_info = infos + i; - sem_destroy(&(t_info->mutex_sem)); - sem_destroy(&(t_info->lock_sem)); + tsem_destroy(&(t_info->mutex_sem)); + tsem_destroy(&(t_info->lock_sem)); } free(pids); @@ -1272,7 +1272,7 @@ void *asyncWrite(void *sarg) { taos_query_a(winfo->taos, "show databases", callBack, tb_info); } - sem_wait(&(winfo->lock_sem)); + tsem_wait(&(winfo->lock_sem)); free(tb_infos); return NULL; @@ -1292,10 +1292,10 @@ void callBack(void *param, TAOS_RES *res, int code) { // If finished; if (tb_info->counter >= tb_info->target) { - sem_wait(tb_info->mutex_sem); + tsem_wait(tb_info->mutex_sem); (*(tb_info->notFinished))--; - if (*(tb_info->notFinished) == 0) sem_post(tb_info->lock_sem); - sem_post(tb_info->mutex_sem); + if (*(tb_info->notFinished) == 0) tsem_post(tb_info->lock_sem); + tsem_post(tb_info->mutex_sem); return; } diff --git a/src/mnode/src/mnodeDb.c b/src/mnode/src/mnodeDb.c index 7f9be7eb54..f6249986fd 100644 --- a/src/mnode/src/mnodeDb.c +++ b/src/mnode/src/mnodeDb.c @@ -193,7 +193,7 @@ void mnodeDecDbRef(SDbObj *pDb) { } SDbObj *mnodeGetDbByTableId(char *tableId) { - char db[TSDB_TABLE_ID_LEN], *pos; + char db[TSDB_TABLE_FNAME_LEN], *pos; // tableId format should be : acct.db.table pos = strstr(tableId, TS_PATH_DELIMITER); @@ -1046,7 +1046,7 @@ static int32_t mnodeProcessDropDbMsg(SMnodeMsg *pMsg) { if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDb(pDrop->db); if (pMsg->pDb == NULL) { if (pDrop->ignoreNotExists) { - mDebug("db:%s, db is not exist, think drop success", pDrop->db); + mDebug("db:%s, db is not exist, treat as success", pDrop->db); return TSDB_CODE_SUCCESS; } else { mError("db:%s, failed to drop, invalid db", pDrop->db); diff --git a/src/mnode/src/mnodeProfile.c b/src/mnode/src/mnodeProfile.c index 353dd59671..01a824baa7 100644 --- a/src/mnode/src/mnodeProfile.c +++ b/src/mnode/src/mnodeProfile.c @@ -35,8 +35,8 @@ #include "mnodeVgroup.h" #include "mnodeWrite.h" -#define CONN_KEEP_TIME (tsShellActivityTimer * 3) -#define CONN_CHECK_TIME (tsShellActivityTimer * 2) +#define CONN_KEEP_TIME (tsShellActivityTimer * 3000) +#define CONN_CHECK_TIME (tsShellActivityTimer * 2000) #define QUERY_ID_SIZE 20 #define QUERY_STREAM_SAVE_SIZE 20 @@ -73,13 +73,13 @@ int32_t mnodeInitProfile() { void mnodeCleanupProfile() { if (tsMnodeConnCache != NULL) { - mInfo("conn cache is cleanup"); taosCacheCleanup(tsMnodeConnCache); tsMnodeConnCache = NULL; } } SConnObj *mnodeCreateConn(char *user, uint32_t ip, uint16_t port) { +#if 0 int32_t connSize = taosHashGetSize(tsMnodeConnCache->pHashTable); if (connSize > tsMaxShellConns) { mError("failed to create conn for user:%s ip:%s:%u, conns:%d larger than maxShellConns:%d, ", user, taosIpStr(ip), @@ -87,6 +87,7 @@ SConnObj *mnodeCreateConn(char *user, uint32_t ip, uint16_t port) { terrno = TSDB_CODE_MND_TOO_MANY_SHELL_CONNS; return NULL; } +#endif int32_t connId = atomic_add_fetch_32(&tsConnIndex, 1); if (connId == 0) atomic_add_fetch_32(&tsConnIndex, 1); @@ -100,7 +101,7 @@ SConnObj *mnodeCreateConn(char *user, uint32_t ip, uint16_t port) { tstrncpy(connObj.user, user, sizeof(connObj.user)); SConnObj *pConn = taosCachePut(tsMnodeConnCache, &connId, sizeof(int32_t), &connObj, sizeof(connObj), CONN_KEEP_TIME); - + mDebug("connId:%d, is created, user:%s ip:%s:%u", connId, user, taosIpStr(ip), port); return pConn; } @@ -112,7 +113,7 @@ void mnodeReleaseConn(SConnObj *pConn) { SConnObj *mnodeAccquireConn(int32_t connId, char *user, uint32_t ip, uint16_t port) { uint64_t expireTime = CONN_KEEP_TIME * 1000 + (uint64_t)taosGetTimestampMs(); - SConnObj *pConn = taosCacheUpdateExpireTimeByName(tsMnodeConnCache, &connId, sizeof(int32_t), expireTime); + SConnObj *pConn = taosCacheAcquireByKey(tsMnodeConnCache, &connId, sizeof(int32_t)); if (pConn == NULL) { mDebug("connId:%d, is already destroyed, user:%s ip:%s:%u", connId, user, taosIpStr(ip), port); return NULL; diff --git a/src/mnode/src/mnodeSdb.c b/src/mnode/src/mnodeSdb.c index efca748e9f..3651aa8aad 100644 --- a/src/mnode/src/mnodeSdb.c +++ b/src/mnode/src/mnodeSdb.c @@ -874,7 +874,7 @@ void *sdbOpenTable(SSdbTableDesc *pDesc) { if (pTable->keyType == SDB_KEY_STRING || pTable->keyType == SDB_KEY_VAR_STRING) { hashFp = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); } - pTable->iHandle = taosHashInit(pTable->hashSessions, hashFp, true); + pTable->iHandle = taosHashInit(pTable->hashSessions, hashFp, true, true); tsSdbObj.numOfTables++; tsSdbObj.tableList[pTable->tableId] = pTable; diff --git a/src/mnode/src/mnodeShow.c b/src/mnode/src/mnodeShow.c index 995bfbe840..8a84b66a33 100644 --- a/src/mnode/src/mnodeShow.c +++ b/src/mnode/src/mnodeShow.c @@ -302,7 +302,7 @@ static int32_t mnodeProcessConnectMsg(SMnodeMsg *pMsg) { SAcctObj *pAcct = pUser->pAcct; if (pConnectMsg->db[0]) { - char dbName[TSDB_TABLE_ID_LEN * 3] = {0}; + char dbName[TSDB_TABLE_FNAME_LEN * 3] = {0}; sprintf(dbName, "%x%s%s", pAcct->acctId, TS_PATH_DELIMITER, pConnectMsg->db); SDbObj *pDb = mnodeGetDb(dbName); if (pDb == NULL) { @@ -377,7 +377,8 @@ static bool mnodeCheckShowFinished(SShowObj *pShow) { } static bool mnodeAccquireShowObj(SShowObj *pShow) { - SShowObj **ppShow = taosCacheAcquireByKey(tsMnodeShowCache, &pShow, sizeof(int64_t)); + uint64_t handleVal = (uint64_t)pShow; + SShowObj **ppShow = taosCacheAcquireByKey(tsMnodeShowCache, &handleVal, sizeof(int64_t)); if (ppShow) { mDebug("%p, show is accquired from cache, data:%p, index:%d", pShow, ppShow, pShow->index); return true; @@ -389,7 +390,8 @@ static bool mnodeAccquireShowObj(SShowObj *pShow) { static void* mnodePutShowObj(SShowObj *pShow) { if (tsMnodeShowCache != NULL) { pShow->index = atomic_add_fetch_32(&tsShowObjIndex, 1); - SShowObj **ppShow = taosCachePut(tsMnodeShowCache, &pShow, sizeof(int64_t), &pShow, sizeof(int64_t), 6); + uint64_t handleVal = (uint64_t)pShow; + SShowObj **ppShow = taosCachePut(tsMnodeShowCache, &handleVal, sizeof(int64_t), &pShow, sizeof(int64_t), 6000); pShow->ppShow = (void**)ppShow; mDebug("%p, show is put into cache, data:%p index:%d", pShow, ppShow, pShow->index); return pShow; diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index 7b928fe885..dbc1bffa5c 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -215,7 +215,7 @@ static int32_t mnodeChildTableActionEncode(SSdbOper *pOper) { assert(pTable != NULL && pOper->rowData != NULL); int32_t len = strlen(pTable->info.tableId); - if (len >= TSDB_TABLE_ID_LEN) return TSDB_CODE_MND_INVALID_TABLE_ID; + if (len >= TSDB_TABLE_FNAME_LEN) return TSDB_CODE_MND_INVALID_TABLE_ID; memcpy(pOper->rowData, pTable->info.tableId, len); memset(pOper->rowData + len, 0, 1); @@ -246,7 +246,7 @@ static int32_t mnodeChildTableActionDecode(SSdbOper *pOper) { if (pTable == NULL) return TSDB_CODE_MND_OUT_OF_MEMORY; int32_t len = strlen(pOper->rowData); - if (len >= TSDB_TABLE_ID_LEN) { + if (len >= TSDB_TABLE_FNAME_LEN) { free(pTable); return TSDB_CODE_MND_INVALID_TABLE_ID; } @@ -348,7 +348,7 @@ static int32_t mnodeInitChildTables() { .tableId = SDB_TABLE_CTABLE, .tableName = "ctables", .hashSessions = TSDB_DEFAULT_CTABLES_HASH_SIZE, - .maxRowSize = sizeof(SChildTableObj) + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16) + TSDB_TABLE_ID_LEN + TSDB_CQ_SQL_SIZE, + .maxRowSize = sizeof(SChildTableObj) + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16) + TSDB_TABLE_FNAME_LEN + TSDB_CQ_SQL_SIZE, .refCountPos = (int8_t *)(&tObj.refCount) - (int8_t *)&tObj, .keyType = SDB_KEY_VAR_STRING, .insertFp = mnodeChildTableActionInsert, @@ -387,7 +387,7 @@ static void mnodeAddTableIntoStable(SSuperTableObj *pStable, SChildTableObj *pCt atomic_add_fetch_32(&pStable->numOfTables, 1); if (pStable->vgHash == NULL) { - pStable->vgHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false); + pStable->vgHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, false); } if (pStable->vgHash != NULL) { @@ -479,7 +479,7 @@ static int32_t mnodeSuperTableActionEncode(SSdbOper *pOper) { assert(pOper->pObj != NULL && pOper->rowData != NULL); int32_t len = strlen(pStable->info.tableId); - if (len >= TSDB_TABLE_ID_LEN) len = TSDB_CODE_MND_INVALID_TABLE_ID; + if (len >= TSDB_TABLE_FNAME_LEN) len = TSDB_CODE_MND_INVALID_TABLE_ID; memcpy(pOper->rowData, pStable->info.tableId, len); memset(pOper->rowData + len, 0, 1); @@ -503,7 +503,7 @@ static int32_t mnodeSuperTableActionDecode(SSdbOper *pOper) { if (pStable == NULL) return TSDB_CODE_MND_OUT_OF_MEMORY; int32_t len = strlen(pOper->rowData); - if (len >= TSDB_TABLE_ID_LEN){ + if (len >= TSDB_TABLE_FNAME_LEN){ free(pStable); return TSDB_CODE_MND_INVALID_TABLE_ID; } @@ -539,7 +539,7 @@ static int32_t mnodeInitSuperTables() { .tableId = SDB_TABLE_STABLE, .tableName = "stables", .hashSessions = TSDB_DEFAULT_STABLES_HASH_SIZE, - .maxRowSize = sizeof(SSuperTableObj) + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16) + TSDB_TABLE_ID_LEN, + .maxRowSize = sizeof(SSuperTableObj) + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16) + TSDB_TABLE_FNAME_LEN, .refCountPos = (int8_t *)(&tObj.refCount) - (int8_t *)&tObj, .keyType = SDB_KEY_VAR_STRING, .insertFp = mnodeSuperTableActionInsert, @@ -751,7 +751,7 @@ static int32_t mnodeProcessDropTableMsg(SMnodeMsg *pMsg) { if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pDrop->tableId); if (pMsg->pTable == NULL) { if (pDrop->igNotExists) { - mDebug("app:%p:%p, table:%s, table is not exist, think drop success", pMsg->rpcMsg.ahandle, pMsg, pDrop->tableId); + mDebug("app:%p:%p, table:%s, table is not exist, treat as success", pMsg->rpcMsg.ahandle, pMsg, pDrop->tableId); return TSDB_CODE_SUCCESS; } else { mError("app:%p:%p, table:%s, failed to drop table, table not exist", pMsg->rpcMsg.ahandle, pMsg, pDrop->tableId); @@ -1464,7 +1464,7 @@ static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *pMsg) { // reserve space int32_t contLen = sizeof(SCMSTableVgroupRspMsg) + 32 * sizeof(SCMVgroupInfo) + sizeof(SVgroupsInfo); for (int32_t i = 0; i < numOfTable; ++i) { - char *stableName = (char*)pInfo + sizeof(SCMSTableVgroupMsg) + (TSDB_TABLE_ID_LEN) * i; + char *stableName = (char*)pInfo + sizeof(SCMSTableVgroupMsg) + (TSDB_TABLE_FNAME_LEN) * i; SSuperTableObj *pTable = mnodeGetSuperTable(stableName); if (pTable != NULL && pTable->vgHash != NULL) { contLen += (taosHashGetSize(pTable->vgHash) * sizeof(SCMVgroupInfo) + sizeof(SVgroupsInfo)); @@ -1481,7 +1481,7 @@ static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *pMsg) { char *msg = (char *)pRsp + sizeof(SCMSTableVgroupRspMsg); for (int32_t i = 0; i < numOfTable; ++i) { - char * stableName = (char *)pInfo + sizeof(SCMSTableVgroupMsg) + (TSDB_TABLE_ID_LEN)*i; + char * stableName = (char *)pInfo + sizeof(SCMSTableVgroupMsg) + (TSDB_TABLE_FNAME_LEN)*i; SSuperTableObj *pTable = mnodeGetSuperTable(stableName); if (pTable == NULL) { mError("app:%p:%p, stable:%s, not exist while get stable vgroup info", pMsg->rpcMsg.ahandle, pMsg, stableName); @@ -1828,7 +1828,7 @@ static int32_t mnodeSendDropChildTableMsg(SMnodeMsg *pMsg, bool needReturn) { return TSDB_CODE_MND_OUT_OF_MEMORY; } - tstrncpy(pDrop->tableId, pTable->info.tableId, TSDB_TABLE_ID_LEN); + tstrncpy(pDrop->tableId, pTable->info.tableId, TSDB_TABLE_FNAME_LEN); pDrop->vgId = htonl(pTable->vgId); pDrop->contLen = htonl(sizeof(SMDDropTableMsg)); pDrop->sid = htonl(pTable->sid); @@ -2079,7 +2079,7 @@ static int32_t mnodeDoGetChildTableMeta(SMnodeMsg *pMsg, STableMetaMsg *pMeta) { pMeta->sid = htonl(pTable->sid); pMeta->precision = pDb->cfg.precision; pMeta->tableType = pTable->info.type; - tstrncpy(pMeta->tableId, pTable->info.tableId, TSDB_TABLE_ID_LEN); + tstrncpy(pMeta->tableId, pTable->info.tableId, TSDB_TABLE_FNAME_LEN); if (pTable->info.type == TSDB_CHILD_TABLE) { pMeta->sversion = htons(pTable->superTable->sversion); @@ -2448,7 +2448,7 @@ static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) { pMultiMeta->numOfTables = 0; for (int32_t t = 0; t < pInfo->numOfTables; ++t) { - char * tableId = (char *)(pInfo->tableIds + t * TSDB_TABLE_ID_LEN); + char * tableId = (char *)(pInfo->tableIds + t * TSDB_TABLE_FNAME_LEN); SChildTableObj *pTable = mnodeGetChildTable(tableId); if (pTable == NULL) continue; diff --git a/src/os/CMakeLists.txt b/src/os/CMakeLists.txt index b4ad4ad915..4e44d29a02 100644 --- a/src/os/CMakeLists.txt +++ b/src/os/CMakeLists.txt @@ -1,15 +1,11 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) -IF (TD_LINUX_64) - ADD_SUBDIRECTORY(src/linux64) -ELSEIF (TD_LINUX_32) - ADD_SUBDIRECTORY(src/linux32) -ELSEIF (TD_DARWIN_64) +IF (TD_LINUX) + ADD_SUBDIRECTORY(src/linux) +ELSEIF (TD_DARWIN) ADD_SUBDIRECTORY(src/darwin) -ELSEIF (TD_WINDOWS_64) - ADD_SUBDIRECTORY(src/windows) -ELSEIF (TD_WINDOWS_32) +ELSEIF (TD_WINDOWS) ADD_SUBDIRECTORY(src/windows) ENDIF () diff --git a/src/os/inc/os.h b/src/os/inc/os.h index 700b29ce98..4953416bde 100644 --- a/src/os/inc/os.h +++ b/src/os/inc/os.h @@ -24,6 +24,10 @@ extern "C" { #include "osDarwin.h" #endif +#ifdef _TD_ARM_64_ +#include "osArm64.h" +#endif + #ifdef _TD_LINUX_64 #include "osLinux64.h" #endif @@ -41,6 +45,7 @@ extern "C" { #endif #include "osAtomic.h" +#include "osCommon.h" #include "osDef.h" #include "osDir.h" #include "osFile.h" diff --git a/src/os/inc/osArm64.h b/src/os/inc/osArm64.h new file mode 100644 index 0000000000..3ae08b45f4 --- /dev/null +++ b/src/os/inc/osArm64.h @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_OS_ARM64_H +#define TDENGINE_OS_ARM64_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/os/inc/osCommon.h b/src/os/inc/osCommon.h new file mode 100644 index 0000000000..70d2b2c0c2 --- /dev/null +++ b/src/os/inc/osCommon.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_OS_COMMON_H +#define TDENGINE_OS_COMMON_H + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef TAOS_OS_DEF_ZU + #define PRIzu "zu" +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/os/inc/osSemphone.h b/src/os/inc/osSemphone.h index c4fc98988a..fd88d2d798 100644 --- a/src/os/inc/osSemphone.h +++ b/src/os/inc/osSemphone.h @@ -32,6 +32,7 @@ extern "C" { bool taosCheckPthreadValid(pthread_t thread); int64_t taosGetPthreadId(); void taosResetPthread(pthread_t *thread); +bool taosComparePthread(pthread_t first, pthread_t second); #ifdef __cplusplus } diff --git a/src/os/inc/osSocket.h b/src/os/inc/osSocket.h index 58f95c3c2d..ecc69ec3d3 100644 --- a/src/os/inc/osSocket.h +++ b/src/os/inc/osSocket.h @@ -65,6 +65,10 @@ void taosBlockSIGPIPE(); // TAOS_OS_FUNC_SOCKET_SETSOCKETOPT int taosSetSockOpt(SOCKET socketfd, int level, int optname, void *optval, int optlen); +// TAOS_OS_FUNC_SOCKET_INET +uint32_t taosInetAddr(char *ipAddr); +const char *taosInetNtoa(struct in_addr ipInt); + #ifdef __cplusplus } #endif diff --git a/src/os/inc/osWindows.h b/src/os/inc/osWindows.h index caab61536e..994e3b991c 100644 --- a/src/os/inc/osWindows.h +++ b/src/os/inc/osWindows.h @@ -97,6 +97,9 @@ typedef SOCKET eventfd_t; #define TAOS_OS_DEF_EPOLL #define TAOS_EPOLL_WAIT_TIME 100 +#define TAOS_OS_DEF_ZU + #define PRIzu "ld" + #define TAOS_OS_FUNC_STRING_WCHAR int twcslen(const wchar_t *wcs); #define TAOS_OS_FUNC_STRING_GETLINE @@ -161,9 +164,25 @@ int gettimeofday(struct timeval *ptv, void *pTimeZone); #define MSG_NOSIGNAL 0 #define SO_NO_CHECK 0x1234 #define SOL_TCP 0x1234 -#define TCP_KEEPCNT 0x1234 -#define TCP_KEEPIDLE 0x1234 -#define TCP_KEEPINTVL 0x1234 + +#ifndef TCP_KEEPCNT + #define TCP_KEEPCNT 0x1234 +#endif + +#ifndef TCP_KEEPIDLE + #define TCP_KEEPIDLE 0x1234 +#endif + +#ifndef TCP_KEEPINTVL + #define TCP_KEEPINTVL 0x1234 +#endif + +#ifdef _MSC_VER +#if _MSC_VER >= 1900 + #define TAOS_OS_FUNC_SOCKET_INET +#endif +#endif + #define SHUT_RDWR SD_BOTH #define SHUT_RD SD_RECEIVE #define SHUT_WR SD_SEND diff --git a/src/os/src/detail/osMemory.c b/src/os/src/detail/osMemory.c index 3bbe806369..dfd320be89 100644 --- a/src/os/src/detail/osMemory.c +++ b/src/os/src/detail/osMemory.c @@ -42,7 +42,7 @@ static bool random_alloc_fail(size_t size, const char* file, uint32_t line) { } if (fpAllocLog != NULL) { - fprintf(fpAllocLog, "%s:%d: memory allocation of %zu bytes will fail.\n", file, line, size); + fprintf(fpAllocLog, "%s:%d: memory allocation of %" PRIzu " bytes will fail.\n", file, line, size); } return true; @@ -159,7 +159,7 @@ static void* malloc_detect_leak(size_t size, const char* file, uint32_t line) { } if (size > UINT32_MAX && fpAllocLog != NULL) { - fprintf(fpAllocLog, "%s:%d: size too large: %zu.\n", file, line, size); + fprintf(fpAllocLog, "%s:%d: size too large: %" PRIzu ".\n", file, line, size); } blk->file = file; @@ -207,7 +207,7 @@ static void* realloc_detect_leak(void* ptr, size_t size, const char* file, uint3 } if (size > UINT32_MAX && fpAllocLog != NULL) { - fprintf(fpAllocLog, "%s:%d: size too large: %zu.\n", file, line, size); + fprintf(fpAllocLog, "%s:%d: size too large: %" PRIzu ".\n", file, line, size); } blk = (SMemBlock*)p; @@ -295,7 +295,7 @@ static void dump_memory_leak() { atomic_store_ptr(&lock, 0); - fprintf(fpAllocLog, "\nnumber of blocks: %zu, total bytes: %zu\n", numOfBlk, totalSize); + fprintf(fpAllocLog, "\nnumber of blocks: %" PRIzu ", total bytes: %" PRIzu "\n", numOfBlk, totalSize); fflush(fpAllocLog); } diff --git a/src/os/src/detail/osSemphone.c b/src/os/src/detail/osSemphone.c index 1f1ef268c6..74f8859029 100644 --- a/src/os/src/detail/osSemphone.c +++ b/src/os/src/detail/osSemphone.c @@ -21,5 +21,6 @@ bool taosCheckPthreadValid(pthread_t thread) { return thread != 0; } int64_t taosGetPthreadId() { return (int64_t)pthread_self(); } void taosResetPthread(pthread_t *thread) { *thread = 0; } +bool taosComparePthread(pthread_t first, pthread_t second) { return first == second; } #endif \ No newline at end of file diff --git a/src/os/src/detail/osSocket.c b/src/os/src/detail/osSocket.c index c8ad6a5acc..8a51c389e9 100644 --- a/src/os/src/detail/osSocket.c +++ b/src/os/src/detail/osSocket.c @@ -57,4 +57,16 @@ int taosSetSockOpt(SOCKET socketfd, int level, int optname, void *optval, int op return setsockopt(socketfd, level, optname, optval, (socklen_t)optlen); } +#endif + +#ifndef TAOS_OS_FUNC_SOCKET_INET + +uint32_t taosInetAddr(char *ipAddr) { + return inet_addr(ipAddr); +} + +const char *taosInetNtoa(struct in_addr ipInt) { + return inet_ntoa(ipInt); +} + #endif \ No newline at end of file diff --git a/src/os/src/detail/osTime.c b/src/os/src/detail/osTime.c index ced1643d2b..57634e468a 100644 --- a/src/os/src/detail/osTime.c +++ b/src/os/src/detail/osTime.c @@ -61,8 +61,15 @@ int64_t user_mktime64(const unsigned int year0, const unsigned int mon0, res = res*24; res = ((res + hour) * 60 + min) * 60 + sec; +#ifdef _MSC_VER +#if _MSC_VER >= 1900 + int64_t timezone = _timezone; +#endif +#endif + return (res + timezone); } + // ==== mktime() kernel code =================// static int64_t m_deltaUtc = 0; void deltaToUtcInitOnce() { diff --git a/src/os/src/linux64/CMakeLists.txt b/src/os/src/linux/CMakeLists.txt similarity index 100% rename from src/os/src/linux64/CMakeLists.txt rename to src/os/src/linux/CMakeLists.txt diff --git a/src/os/src/linux64/linuxEnv.c b/src/os/src/linux/linuxEnv.c similarity index 100% rename from src/os/src/linux64/linuxEnv.c rename to src/os/src/linux/linuxEnv.c diff --git a/src/os/src/windows/w64Atomic.c b/src/os/src/windows/w64Atomic.c index 0425f4ed3f..9fc3eae672 100644 --- a/src/os/src/windows/w64Atomic.c +++ b/src/os/src/windows/w64Atomic.c @@ -43,7 +43,11 @@ long interlocked_add_fetch_32(long volatile* ptr, long val) { } __int64 interlocked_add_fetch_64(__int64 volatile* ptr, __int64 val) { +#ifdef _WIN64 return _InterlockedExchangeAdd64(ptr, val) + val; +#else + return _InterlockedExchangeAdd(ptr, val) + val; +#endif } // and diff --git a/src/os/src/windows/w64Dir.c b/src/os/src/windows/w64Dir.c index 7816dac0d6..c486cd0d40 100644 --- a/src/os/src/windows/w64Dir.c +++ b/src/os/src/windows/w64Dir.c @@ -23,7 +23,7 @@ void taosRemoveDir(char *rootDir) { int taosMkDir(const char *path, mode_t mode) { uError("%s not implemented yet", __FUNCTION__); - return -1; + return 0; } void taosMvDir(char* destDir, char *srcDir) { diff --git a/src/os/src/windows/w64Env.c b/src/os/src/windows/w64Env.c index c6046ecae7..5544c4ba39 100644 --- a/src/os/src/windows/w64Env.c +++ b/src/os/src/windows/w64Env.c @@ -21,8 +21,9 @@ extern void taosWinSocketInit(); void osInit() { + taosSetCoreDump(); if (configDir[0] == 0) { - strcpy(configDir, "~/TDengine/cfg"); + strcpy(configDir, "C:/TDengine/cfg"); } strcpy(tsVnodeDir, "C:/TDengine/data"); diff --git a/src/os/src/windows/w64Lz4.c b/src/os/src/windows/w64Lz4.c index 96556c1f1c..631a22e572 100644 --- a/src/os/src/windows/w64Lz4.c +++ b/src/os/src/windows/w64Lz4.c @@ -21,9 +21,29 @@ #include "tulog.h" #include "tutil.h" +unsigned char _MyBitScanForward64(unsigned long *ret, uint64_t x) { + unsigned long x0 = (unsigned long)x, top, bottom; + _BitScanForward(&top, (unsigned long)(x >> 32)); + _BitScanForward(&bottom, x0); + *ret = x0 ? bottom : 32 + top; + return x != 0; +} + +unsigned char _MyBitScanReverse64(unsigned long *ret, uint64_t x) { + unsigned long x1 = (unsigned long)(x >> 32), top, bottom; + _BitScanReverse(&top, x1); + _BitScanReverse(&bottom, (unsigned long)x); + *ret = x1 ? top + 32 : bottom; + return x != 0; +} + int32_t BUILDIN_CLZL(uint64_t val) { unsigned long r = 0; +#ifdef _WIN64 _BitScanReverse64(&r, val); +#else + _MyBitScanReverse64(&r, val); +#endif return (int)(r >> 3); } @@ -35,7 +55,11 @@ int32_t BUILDIN_CLZ(uint32_t val) { int32_t BUILDIN_CTZL(uint64_t val) { unsigned long r = 0; +#ifdef _WIN64 _BitScanForward64(&r, val); +#else + _MyBitScanForward64(&r, val); +#endif return (int)(r >> 3); } @@ -43,4 +67,4 @@ int32_t BUILDIN_CTZ(uint32_t val) { unsigned long r = 0; _BitScanForward(&r, val); return (int)(r >> 3); -} +} \ No newline at end of file diff --git a/src/os/src/windows/w64Semphone.c b/src/os/src/windows/w64Semphone.c index 7ed6228228..ded7e41843 100644 --- a/src/os/src/windows/w64Semphone.c +++ b/src/os/src/windows/w64Semphone.c @@ -32,3 +32,7 @@ int64_t taosGetPthreadId() { return (int64_t)pthread_self(); #endif } + +bool taosComparePthread(pthread_t first, pthread_t second) { + return first.p == second.p; +} diff --git a/src/os/src/windows/w64Socket.c b/src/os/src/windows/w64Socket.c index dd8961da40..8fd198ba80 100644 --- a/src/os/src/windows/w64Socket.c +++ b/src/os/src/windows/w64Socket.c @@ -62,4 +62,24 @@ int taosSetSockOpt(SOCKET socketfd, int level, int optname, void *optval, int op } return setsockopt(socketfd, level, optname, optval, optlen); -} \ No newline at end of file +} + +#ifdef TAOS_OS_FUNC_SOCKET_INET + +uint32_t taosInetAddr(char *ipAddr) { + uint32_t value; + int ret = inet_pton(AF_INET, ipAddr, &value); + if (ret <= 0) { + return INADDR_NONE; + } else { + return value; + } +} + +const char *taosInetNtoa(struct in_addr ipInt) { + // not thread safe, only for debug usage while print log + static char tmpDstStr[16]; + return inet_ntop(AF_INET, &ipInt, tmpDstStr, INET6_ADDRSTRLEN); +} + +#endif \ No newline at end of file diff --git a/src/os/src/windows/w64Sysinfo.c b/src/os/src/windows/w64Sysinfo.c index 27869e1eec..eb252a0b86 100644 --- a/src/os/src/windows/w64Sysinfo.c +++ b/src/os/src/windows/w64Sysinfo.c @@ -16,11 +16,22 @@ #define _DEFAULT_SOURCE #include "os.h" #include "taosdef.h" -#include "tglobal.h" #include "tconfig.h" +#include "tglobal.h" #include "ttimer.h" #include "tulog.h" #include "tutil.h" +#if (_WIN64) +#include +#include +#include +#include +#include +#include +#pragma comment(lib, "Mswsock.lib ") +#endif + +#include static void taosGetSystemTimezone() { // get and set default timezone @@ -43,7 +54,7 @@ static void taosGetSystemLocale() { if (cfg_locale && cfg_locale->cfgStatus < TAOS_CFG_CSTATUS_DEFAULT) { char *locale = setlocale(LC_CTYPE, "chs"); if (locale != NULL) { - tstrncpy(tsLocale, locale, TSDB_LOCALE_LEN);; + tstrncpy(tsLocale, locale, TSDB_LOCALE_LEN); cfg_locale->cfgStatus = TAOS_CFG_CSTATUS_DEFAULT; uInfo("locale not configured, set to default:%s", tsLocale); } @@ -69,11 +80,64 @@ void taosGetSystemInfo() { taosGetSystemLocale(); } -bool taosGetDisk() { return true; } +bool taosGetDisk() { + const double unit = 1024 * 1024 * 1024; + BOOL fResult; + unsigned _int64 i64FreeBytesToCaller; + unsigned _int64 i64TotalBytes; + unsigned _int64 i64FreeBytes; + char dir[4] = {'C', ':', '\\', '\0'}; + int drive_type; + + if (tscEmbedded) { + drive_type = GetDriveTypeA(dir); + if (drive_type == DRIVE_FIXED) { + fResult = GetDiskFreeSpaceExA(dir, (PULARGE_INTEGER)&i64FreeBytesToCaller, (PULARGE_INTEGER)&i64TotalBytes, + (PULARGE_INTEGER)&i64FreeBytes); + if (fResult) { + tsTotalDataDirGB = tsTotalLogDirGB = tsTotalTmpDirGB = (float)(i64TotalBytes / unit); + tsAvailDataDirGB = tsAvailLogDirGB = tsAvailTmpDirectorySpace = (float)(i64FreeBytes / unit); + } + } + } + return true; +} + +bool taosReadProcIO(int64_t *readbyte, int64_t *writebyte) { + IO_COUNTERS io_counter; + if (GetProcessIoCounters(GetCurrentProcess(), &io_counter)) { + if (readbyte) *readbyte = io_counter.ReadTransferCount; + if (writebyte) *writebyte = io_counter.WriteTransferCount; + return true; + } + return false; +} bool taosGetProcIO(float *readKB, float *writeKB) { - *readKB = 0; - *writeKB = 0; + static int64_t lastReadbyte = -1; + static int64_t lastWritebyte = -1; + + int64_t curReadbyte = 0; + int64_t curWritebyte = 0; + + if (!taosReadProcIO(&curReadbyte, &curWritebyte)) { + return false; + } + + if (lastReadbyte == -1 || lastWritebyte == -1) { + lastReadbyte = curReadbyte; + lastWritebyte = curWritebyte; + return false; + } + + *readKB = (float)((double)(curReadbyte - lastReadbyte) / 1024); + *writeKB = (float)((double)(curWritebyte - lastWritebyte) / 1024); + if (*readKB < 0) *readKB = 0; + if (*writeKB < 0) *writeKB = 0; + + lastReadbyte = curReadbyte; + lastWritebyte = curWritebyte; + return true; } @@ -89,12 +153,33 @@ bool taosGetCpuUsage(float *sysCpuUsage, float *procCpuUsage) { } bool taosGetProcMemory(float *memoryUsedMB) { - *memoryUsedMB = 0; + unsigned bytes_used = 0; +#if defined(_WIN32) && defined(_MSC_VER) + PROCESS_MEMORY_COUNTERS pmc; + HANDLE cur_proc = GetCurrentProcess(); + + if (GetProcessMemoryInfo(cur_proc, &pmc, sizeof(pmc))) { + bytes_used = (unsigned)(pmc.WorkingSetSize + pmc.PagefileUsage); + } +#endif + + *memoryUsedMB = (float)bytes_used / 1024 / 1024; + return true; } bool taosGetSysMemory(float *memoryUsedMB) { - *memoryUsedMB = 0; + MEMORYSTATUSEX memsStat; + float nMemFree; + float nMemTotal; + + memsStat.dwLength = sizeof(memsStat); + if (!GlobalMemoryStatusEx(&memsStat)) { + return false; + } + nMemFree = memsStat.ullAvailPhys / (1024.0f * 1024.0f); + nMemTotal = memsStat.ullTotalPhys / (1024.0f * 1024.0f); + *memoryUsedMB = nMemTotal - nMemFree; return true; } @@ -103,16 +188,46 @@ int taosSystem(const char *cmd) { return -1; } -int flock(int fd, int option) { - return 0; +int flock(int fd, int option) { return 0; } + +int fsync(int filedes) { return 0; } + +int sigaction(int sig, struct sigaction *d, void *p) { return 0; } + +LONG WINAPI FlCrashDump(PEXCEPTION_POINTERS ep) { + typedef BOOL(WINAPI * FxMiniDumpWriteDump)(IN HANDLE hProcess, IN DWORD ProcessId, IN HANDLE hFile, + IN MINIDUMP_TYPE DumpType, + IN CONST PMINIDUMP_EXCEPTION_INFORMATION ExceptionParam, + IN CONST PMINIDUMP_USER_STREAM_INFORMATION UserStreamParam, + IN CONST PMINIDUMP_CALLBACK_INFORMATION CallbackParam); + + HMODULE dll = LoadLibrary("dbghelp.dll"); + if (dll == NULL) return EXCEPTION_CONTINUE_SEARCH; + FxMiniDumpWriteDump mdwd = (FxMiniDumpWriteDump)(GetProcAddress(dll, "MiniDumpWriteDump")); + if (mdwd == NULL) { + FreeLibrary(dll); + return EXCEPTION_CONTINUE_SEARCH; + } + + TCHAR path[MAX_PATH]; + DWORD len = GetModuleFileName(NULL, path, _countof(path)); + path[len - 3] = 'd'; + path[len - 2] = 'm'; + path[len - 1] = 'p'; + + HANDLE file = CreateFile(path, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); + + MINIDUMP_EXCEPTION_INFORMATION mei; + mei.ThreadId = GetCurrentThreadId(); + mei.ExceptionPointers = ep; + mei.ClientPointers = FALSE; + + (*mdwd)(GetCurrentProcess(), GetCurrentProcessId(), file, MiniDumpWithHandleData, &mei, NULL, NULL); + + CloseHandle(file); + FreeLibrary(dll); + + return EXCEPTION_CONTINUE_SEARCH; } -int fsync(int filedes) { - return 0; -} - -int sigaction(int sig, struct sigaction *d, void *p) { - return 0; -} - -void taosSetCoreDump() {} \ No newline at end of file +void taosSetCoreDump() { SetUnhandledExceptionFilter(&FlCrashDump); } \ No newline at end of file diff --git a/src/plugins/http/src/httpContext.c b/src/plugins/http/src/httpContext.c index e367911695..5ef3c9a66e 100644 --- a/src/plugins/http/src/httpContext.c +++ b/src/plugins/http/src/httpContext.c @@ -70,7 +70,7 @@ bool httpInitContexts() { void httpCleanupContexts() { if (tsHttpServer.contextCache != NULL) { SCacheObj *cache = tsHttpServer.contextCache; - httpInfo("context cache is cleanuping, size:%zu", taosHashGetSize(cache->pHashTable)); + httpInfo("context cache is cleanuping, size:%" PRIzu "", taosHashGetSize(cache->pHashTable)); taosCacheCleanup(tsHttpServer.contextCache); tsHttpServer.contextCache = NULL; } @@ -108,7 +108,8 @@ HttpContext *httpCreateContext(int32_t fd) { pContext->lastAccessTime = taosGetTimestampSec(); pContext->state = HTTP_CONTEXT_STATE_READY; - HttpContext **ppContext = taosCachePut(tsHttpServer.contextCache, &pContext, sizeof(int64_t), &pContext, sizeof(int64_t), 3); + uint64_t handleVal = (uint64_t)pContext; + HttpContext **ppContext = taosCachePut(tsHttpServer.contextCache, &handleVal, sizeof(int64_t), &pContext, sizeof(int64_t), 3000); pContext->ppContext = ppContext; httpDebug("context:%p, fd:%d, is created, data:%p", pContext, fd, ppContext); @@ -119,7 +120,8 @@ HttpContext *httpCreateContext(int32_t fd) { } HttpContext *httpGetContext(void *ptr) { - HttpContext **ppContext = taosCacheAcquireByKey(tsHttpServer.contextCache, &ptr, sizeof(HttpContext *)); + uint64_t handleVal = (uint64_t)ptr; + HttpContext **ppContext = taosCacheAcquireByKey(tsHttpServer.contextCache, &handleVal, sizeof(HttpContext *)); if (ppContext) { HttpContext *pContext = *ppContext; @@ -167,8 +169,8 @@ bool httpInitContext(HttpContext *pContext) { memset(pParser, 0, sizeof(HttpParser)); pParser->pCur = pParser->pLast = pParser->buffer; - httpDebug("context:%p, fd:%d, ip:%s, thread:%s, accessTimes:%d, parsed:%d", - pContext, pContext->fd, pContext->ipstr, pContext->pThread->label, pContext->accessTimes, pContext->parsed); + httpDebug("context:%p, fd:%d, ip:%s, accessTimes:%d, parsed:%d", pContext, pContext->fd, pContext->ipstr, + pContext->accessTimes, pContext->parsed); return true; } diff --git a/src/plugins/http/src/httpServer.c b/src/plugins/http/src/httpServer.c index 8ee92be31c..f4aca91cba 100644 --- a/src/plugins/http/src/httpServer.c +++ b/src/plugins/http/src/httpServer.c @@ -302,7 +302,7 @@ static void *httpAcceptHttpConnection(void *arg) { #if 0 if (totalFds > tsHttpCacheSessions * 100) { httpError("fd:%d, ip:%s:%u, totalFds:%d larger than httpCacheSessions:%d*100, refuse connection", connFd, - inet_ntoa(clientAddr.sin_addr), htons(clientAddr.sin_port), totalFds, tsHttpCacheSessions); + taosInetNtoa(clientAddr.sin_addr), htons(clientAddr.sin_port), totalFds, tsHttpCacheSessions); taosCloseSocket(connFd); continue; } @@ -316,14 +316,14 @@ static void *httpAcceptHttpConnection(void *arg) { pContext = httpCreateContext(connFd); if (pContext == NULL) { - httpError("fd:%d, ip:%s:%u, no enough resource to allocate http context", connFd, inet_ntoa(clientAddr.sin_addr), + httpError("fd:%d, ip:%s:%u, no enough resource to allocate http context", connFd, taosInetNtoa(clientAddr.sin_addr), htons(clientAddr.sin_port)); taosCloseSocket(connFd); continue; } pContext->pThread = pThread; - sprintf(pContext->ipstr, "%s:%u", inet_ntoa(clientAddr.sin_addr), htons(clientAddr.sin_port)); + sprintf(pContext->ipstr, "%s:%u", taosInetNtoa(clientAddr.sin_addr), htons(clientAddr.sin_port)); struct epoll_event event; event.events = EPOLLIN | EPOLLPRI | EPOLLWAKEUP | EPOLLERR | EPOLLHUP | EPOLLRDHUP; diff --git a/src/plugins/http/src/httpSession.c b/src/plugins/http/src/httpSession.c index fce85df45e..f19679e072 100644 --- a/src/plugins/http/src/httpSession.c +++ b/src/plugins/http/src/httpSession.c @@ -34,7 +34,7 @@ void httpCreateSession(HttpContext *pContext, void *taos) { session.refCount = 1; int32_t len = snprintf(session.id, HTTP_SESSION_ID_LEN, "%s.%s", pContext->user, pContext->pass); - pContext->session = taosCachePut(server->sessionCache, session.id, len, &session, sizeof(HttpSession), tsHttpSessionExpire); + pContext->session = taosCachePut(server->sessionCache, session.id, len, &session, sizeof(HttpSession), tsHttpSessionExpire * 1000); // void *temp = pContext->session; // taosCacheRelease(server->sessionCache, (void **)&temp, false); @@ -107,7 +107,7 @@ static void httpDestroySession(void *data) { void httpCleanUpSessions() { if (tsHttpServer.sessionCache != NULL) { SCacheObj *cache = tsHttpServer.sessionCache; - httpInfo("session cache is cleanuping, size:%zu", taosHashGetSize(cache->pHashTable)); + httpInfo("session cache is cleanuping, size:%" PRIzu "", taosHashGetSize(cache->pHashTable)); taosCacheCleanup(tsHttpServer.sessionCache); tsHttpServer.sessionCache = NULL; } diff --git a/src/plugins/http/src/httpSql.c b/src/plugins/http/src/httpSql.c index a01cb07a98..e0e0d1aa8b 100644 --- a/src/plugins/http/src/httpSql.c +++ b/src/plugins/http/src/httpSql.c @@ -210,6 +210,7 @@ void httpProcessSingleSqlRetrieveCallBack(void *param, TAOS_RES *result, int num } } + // todo refactor if (tscResultsetFetchCompleted(result)) { isContinue = false; } diff --git a/src/plugins/monitor/src/monitorMain.c b/src/plugins/monitor/src/monitorMain.c index 6c7ecbb951..d76bb4bd82 100644 --- a/src/plugins/monitor/src/monitorMain.c +++ b/src/plugins/monitor/src/monitorMain.c @@ -35,7 +35,7 @@ #define SQL_LENGTH 1024 #define LOG_LEN_STR 100 -#define IP_LEN_STR 18 +#define IP_LEN_STR TSDB_EP_LEN #define CHECK_INTERVAL 1000 typedef enum { @@ -192,7 +192,7 @@ static void dnodeBuildMonitorSql(char *sql, int32_t cmd) { snprintf(sql, SQL_LENGTH, "create table if not exists %s.slowquery(ts timestamp, username " "binary(%d), created_time timestamp, time bigint, sql binary(%d))", - tsMonitorDbName, TSDB_TABLE_ID_LEN - 1, TSDB_SLOW_QUERY_SQL_LEN); + tsMonitorDbName, TSDB_TABLE_FNAME_LEN - 1, TSDB_SLOW_QUERY_SQL_LEN); } else if (cmd == MONITOR_CMD_CREATE_TB_LOG) { snprintf(sql, SQL_LENGTH, "create table if not exists %s.log(ts timestamp, level tinyint, " @@ -234,17 +234,22 @@ static void monitorInitDatabaseCb(void *param, TAOS_RES *result, int32_t code) { } void monitorStopSystem() { - monitorInfo("monitor module is stopped"); - monitorExecuteSQLFp = NULL; + if (tsMonitorConn.state == MONITOR_STATE_STOPPED) return; tsMonitorConn.state = MONITOR_STATE_STOPPED; + monitorExecuteSQLFp = NULL; + + monitorInfo("monitor module is stopped"); + if (tsMonitorConn.initTimer != NULL) { taosTmrStopA(&(tsMonitorConn.initTimer)); } if (tsMonitorConn.timer != NULL) { taosTmrStopA(&(tsMonitorConn.timer)); } - - taos_close(tsMonitorConn.conn); + if (tsMonitorConn.conn != NULL) { + taos_close(tsMonitorConn.conn); + tsMonitorConn.conn = NULL; + } } void monitorCleanUpSystem() { diff --git a/src/query/inc/qAst.h b/src/query/inc/qAst.h index 01b4c16ac1..ec568a6cdb 100644 --- a/src/query/inc/qAst.h +++ b/src/query/inc/qAst.h @@ -48,7 +48,7 @@ typedef struct tQueryInfo { SSchema sch; // schema of tags char* q; __compar_fn_t compare; // filter function - void* param; // STSchema + bool indexed; // indexed columns } tQueryInfo; typedef struct SExprTraverseSupp { diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index bd2e0a4470..b5487561b2 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -52,10 +52,10 @@ typedef struct SWindowStatus { typedef struct SWindowResult { uint16_t numOfRows; // number of rows of current time window + SWindowStatus status; // this result status: closed or opened SPosInfo pos; // Position of current result in disk-based output buffer SResultInfo* resultInfo; // For each result column, there is a resultInfo STimeWindow window; // The time window that current result covers. - SWindowStatus status; // this result status: closed or opened } SWindowResult; /** @@ -122,6 +122,7 @@ typedef struct SQueryCostInfo { uint32_t discardBlocks; uint64_t elapsedTime; uint64_t computTime; + uint64_t internalSupSize; } SQueryCostInfo; typedef struct SQuery { @@ -184,10 +185,8 @@ enum { typedef struct SQInfo { void* signature; - int32_t pointsInterpo; - int32_t code; // error code to returned to client -// sem_t dataReady; - + int32_t code; // error code to returned to client + int64_t owner; // if it is in execution void* tsdb; int32_t vgId; STableGroupInfo tableGroupInfo; // table id list < only includes the STable list> diff --git a/src/query/inc/qExtbuffer.h b/src/query/inc/qExtbuffer.h index 36fc0c9820..0bdcf5c45e 100644 --- a/src/query/inc/qExtbuffer.h +++ b/src/query/inc/qExtbuffer.h @@ -29,7 +29,7 @@ extern "C" { #define MAX_TMPFILE_PATH_LENGTH PATH_MAX #define INITIAL_ALLOCATION_BUFFER_SIZE 64 -#define DEFAULT_PAGE_SIZE (4096L) // 16k larger than the SHistoInfo +#define DEFAULT_PAGE_SIZE (1024L) // 16k larger than the SHistoInfo typedef enum EXT_BUFFER_FLUSH_MODEL { /* diff --git a/src/query/inc/qFill.h b/src/query/inc/qFill.h index db6a69c2c5..6b8dcb0bf9 100644 --- a/src/query/inc/qFill.h +++ b/src/query/inc/qFill.h @@ -30,6 +30,11 @@ typedef struct { int16_t flag; // column flag: TAG COLUMN|NORMAL COLUMN union {int64_t i; double d;} fillVal; } SFillColInfo; + +typedef struct { + SSchema col; + char* tagVal; +} SFillTagColInfo; typedef struct SFillInfo { TSKEY start; // start timestamp @@ -44,7 +49,8 @@ typedef struct SFillInfo { int32_t numOfTags; // number of tags int32_t numOfCols; // number of columns, including the tags columns int32_t rowSize; // size of each row - char ** pTags; // tags value for current interpolation +// char ** pTags; // tags value for current interpolation + SFillTagColInfo* pTags; // tags value for filling gap int64_t slidingTime; // sliding value to determine the number of result for a given time window char * prevValues; // previous row of data, to generate the interpolation results char * nextValues; // next row of data diff --git a/src/query/inc/sql.y b/src/query/inc/sql.y index 15cfeee6b2..2b2a967edf 100644 --- a/src/query/inc/sql.y +++ b/src/query/inc/sql.y @@ -26,7 +26,12 @@ #include #include #include +#include "qSqlparser.h" +#include "tcmdtype.h" +#include "tstoken.h" +#include "ttokendef.h" #include "tutil.h" +#include "tvariant.h" } %syntax_error { @@ -254,7 +259,7 @@ alter_db_optr(Y) ::= alter_db_optr(Z) keep(X). { Y = Z; Y.keep = X; } alter_db_optr(Y) ::= alter_db_optr(Z) blocks(X). { Y = Z; Y.numOfBlocks = strtol(X.z, NULL, 10); } alter_db_optr(Y) ::= alter_db_optr(Z) comp(X). { Y = Z; Y.compressionLevel = strtol(X.z, NULL, 10); } alter_db_optr(Y) ::= alter_db_optr(Z) wal(X). { Y = Z; Y.walLevel = strtol(X.z, NULL, 10); } -alter_db_optr(Y) ::= alter_db_optr(Z) fsync(X). { Y = Z; Y.fsyncPeriod = strtod(X.z, NULL, 10); } +alter_db_optr(Y) ::= alter_db_optr(Z) fsync(X). { Y = Z; Y.fsyncPeriod = strtol(X.z, NULL, 10); } %type typename {TAOS_FIELD} typename(A) ::= ids(X). { @@ -422,8 +427,35 @@ as(X) ::= . { X.n = 0; } from(A) ::= FROM tablelist(X). {A = X;} %type tablelist {tVariantList*} -tablelist(A) ::= ids(X) cpxName(Y). { toTSDBType(X.type); X.n += Y.n; A = tVariantListAppendToken(NULL, &X, -1);} -tablelist(A) ::= tablelist(Y) COMMA ids(X) cpxName(Z). { toTSDBType(X.type); X.n += Z.n; A = tVariantListAppendToken(Y, &X, -1); } +tablelist(A) ::= ids(X) cpxName(Y). { + toTSDBType(X.type); + X.n += Y.n; + A = tVariantListAppendToken(NULL, &X, -1); + A = tVariantListAppendToken(A, &X, -1); // table alias name +} + +tablelist(A) ::= ids(X) cpxName(Y) ids(Z). { + toTSDBType(X.type); + toTSDBType(Z.type); + X.n += Y.n; + A = tVariantListAppendToken(NULL, &X, -1); + A = tVariantListAppendToken(A, &Z, -1); +} + +tablelist(A) ::= tablelist(Y) COMMA ids(X) cpxName(Z). { + toTSDBType(X.type); + X.n += Z.n; + A = tVariantListAppendToken(Y, &X, -1); + A = tVariantListAppendToken(A, &X, -1); +} + +tablelist(A) ::= tablelist(Y) COMMA ids(X) cpxName(Z) ids(F). { + toTSDBType(X.type); + toTSDBType(F.type); + X.n += Z.n; + A = tVariantListAppendToken(Y, &X, -1); + A = tVariantListAppendToken(A, &F, -1); +} // The value of interval should be the form of "number+[a,s,m,h,d,n,y]" or "now" %type tmvar {SSQLToken} diff --git a/src/query/src/qAst.c b/src/query/src/qAst.c index e2d609a62c..afa3618a96 100644 --- a/src/query/src/qAst.c +++ b/src/query/src/qAst.c @@ -427,8 +427,9 @@ static void tQueryIndexColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SArr if (ret != 0) { break; } - - taosArrayPush(result, SL_GET_NODE_DATA(pNode)); + + STableKeyInfo info = {.pTable = *(void**)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; + taosArrayPush(result, &info); } } else if (optr == TSDB_RELATION_GREATER || optr == TSDB_RELATION_GREATER_EQUAL) { // greater equal bool comp = true; @@ -445,7 +446,8 @@ static void tQueryIndexColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SArr if (ret == 0 && optr == TSDB_RELATION_GREATER) { continue; } else { - taosArrayPush(result, SL_GET_NODE_DATA(pNode)); + STableKeyInfo info = {.pTable = *(void**)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; + taosArrayPush(result, &info); comp = false; } } @@ -458,8 +460,9 @@ static void tQueryIndexColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SArr if (comp) { continue; } - - taosArrayPush(result, SL_GET_NODE_DATA(pNode)); + + STableKeyInfo info = {.pTable = *(void**)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; + taosArrayPush(result, &info); } tSkipListDestroyIter(iter); @@ -472,8 +475,9 @@ static void tQueryIndexColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SArr if (comp) { continue; } - - taosArrayPush(result, SL_GET_NODE_DATA(pNode)); + + STableKeyInfo info = {.pTable = *(void**)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; + taosArrayPush(result, &info); } } else { @@ -496,12 +500,14 @@ static void tQueryIndexColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SArr if (ret == 0 && optr == TSDB_RELATION_LESS) { continue; } else { - taosArrayPush(result, SL_GET_NODE_DATA(pNode)); + STableKeyInfo info = {.pTable = *(void**)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; + taosArrayPush(result, &info); comp = false; // no need to compare anymore } } } } + free(cond.start); free(cond.end); tSkipListDestroyIter(iter); @@ -689,7 +695,8 @@ static void tQueryIndexlessColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, } if (addToResult) { - taosArrayPush(res, pData); + STableKeyInfo info = {.pTable = *(void**)pData, .lastKey = TSKEY_INITIAL_VAL}; + taosArrayPush(res, &info); } } @@ -716,7 +723,7 @@ void tExprTreeTraverse(tExprNode *pExpr, SSkipList *pSkipList, SArray *result, S } tQueryInfo *pQueryInfo = pExpr->_node.info; - if (pQueryInfo->sch.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX && pQueryInfo->optr != TSDB_RELATION_LIKE) { + if (pQueryInfo->indexed && pQueryInfo->optr != TSDB_RELATION_LIKE) { tQueryIndexColumn(pSkipList, pQueryInfo, result); } else { tQueryIndexlessColumn(pSkipList, pQueryInfo, result, param->nodeFilterFn); diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index be3d476be5..c8efee03cd 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -130,6 +130,9 @@ static void finalizeQueryResult(SQueryRuntimeEnv *pRuntimeEnv); (tw)->ekey = (tw)->skey + ((_q)->intervalTime - 1); \ } while (0) +#define SET_STABLE_QUERY_OVER(_q) ((_q)->tableIndex = (int32_t)((_q)->tableqinfoGroupInfo.numOfTables)) +#define IS_STASBLE_QUERY_OVER(_q) ((_q)->tableIndex >= (int32_t)((_q)->tableqinfoGroupInfo.numOfTables)) + // todo move to utility static int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *group); @@ -400,8 +403,16 @@ static SWindowResult *doSetTimeWindowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SWin // more than the capacity, reallocate the resources if (pWindowResInfo->size >= pWindowResInfo->capacity) { - int64_t newCap = (int64_t)(pWindowResInfo->capacity * 1.5f); + int64_t newCap = 0; + if (pWindowResInfo->capacity > 10000) { + newCap = (int64_t)(pWindowResInfo->capacity * 1.25); + } else { + newCap = (int64_t)(pWindowResInfo->capacity * 1.5); + } + char *t = realloc(pWindowResInfo->pResult, newCap * sizeof(SWindowResult)); + pRuntimeEnv->summary.internalSupSize += (newCap - pWindowResInfo->capacity) * sizeof(SWindowResult); + if (t == NULL) { longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } @@ -411,6 +422,8 @@ static SWindowResult *doSetTimeWindowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SWin int32_t inc = (int32_t)newCap - pWindowResInfo->capacity; memset(&pWindowResInfo->pResult[pWindowResInfo->capacity], 0, sizeof(SWindowResult) * inc); + pRuntimeEnv->summary.internalSupSize += (pQuery->numOfOutput * sizeof(SResultInfo) + pRuntimeEnv->interBufSize) * inc; + for (int32_t i = pWindowResInfo->capacity; i < newCap; ++i) { createQueryResultInfo(pQuery, &pWindowResInfo->pResult[i], pRuntimeEnv->stableQuery, pRuntimeEnv->interBufSize); } @@ -1347,6 +1360,11 @@ static int32_t tableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBl if ((pQuery->limit.limit >= 0) && (pQuery->limit.limit + pQuery->limit.offset) <= numOfRes) { setQueryStatus(pQuery, QUERY_COMPLETED); } + + if (((pTableQInfo->lastKey > pTableQInfo->win.ekey) && QUERY_IS_ASC_QUERY(pQuery)) || + ((pTableQInfo->lastKey < pTableQInfo->win.ekey) && (!QUERY_IS_ASC_QUERY(pQuery)))) { + setQueryStatus(pQuery, QUERY_COMPLETED); + } } } @@ -1818,10 +1836,14 @@ static void doExchangeTimeWindow(SQInfo* pQInfo) { for(int32_t i = 0; i < t; ++i) { SArray* p1 = GET_TABLEGROUP(pQInfo, i); + SArray* tableKeyGroup = taosArrayGetP(pQInfo->tableGroupInfo.pGroupList, i); size_t len = taosArrayGetSize(p1); for(int32_t j = 0; j < len; ++j) { STableQueryInfo* pTableQueryInfo = (STableQueryInfo*) taosArrayGetP(p1, j); SWAP(pTableQueryInfo->win.skey, pTableQueryInfo->win.ekey, TSKEY); + + STableKeyInfo* pInfo = taosArrayGet(tableKeyGroup, j); + pInfo->lastKey = pTableQueryInfo->win.skey; } } } @@ -2220,7 +2242,7 @@ static void ensureOutputBufferSimple(SQueryRuntimeEnv* pRuntimeEnv, int32_t capa static void ensureOutputBuffer(SQueryRuntimeEnv* pRuntimeEnv, SDataBlockInfo* pBlockInfo) { // in case of prj/diff query, ensure the output buffer is sufficient to accommodate the results of current block SQuery* pQuery = pRuntimeEnv->pQuery; - if (!QUERY_IS_INTERVAL_QUERY(pQuery) && !pRuntimeEnv->groupbyNormalCol && !isFixedOutputQuery(pRuntimeEnv)) { + if (!QUERY_IS_INTERVAL_QUERY(pQuery) && !pRuntimeEnv->groupbyNormalCol && !isFixedOutputQuery(pRuntimeEnv) && !isTSCompQuery(pQuery)) { SResultRec *pRec = &pQuery->rec; if (pQuery->rec.capacity - pQuery->rec.rows < pBlockInfo->rows) { @@ -2290,13 +2312,7 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER; - while (true) { - if (!tsdbNextDataBlock(pQueryHandle)) { - if (terrno != TSDB_CODE_SUCCESS) { - longjmp(pRuntimeEnv->env, terrno); - } - break; - } + while (tsdbNextDataBlock(pQueryHandle)) { summary->totalBlocks += 1; if (IS_QUERY_KILLED(GET_QINFO_ADDR(pRuntimeEnv))) { @@ -2316,6 +2332,11 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { continue; } + if (terrno != TSDB_CODE_SUCCESS) { // load data block failed, abort query + longjmp(pRuntimeEnv->env, terrno); + break; + } + // query start position can not move into tableApplyFunctionsOnBlock due to limit/offset condition pQuery->pos = QUERY_IS_ASC_QUERY(pQuery)? 0 : blockInfo.rows - 1; int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, &blockInfo, pStatis, binarySearchForKey, pDataBlock); @@ -2330,6 +2351,10 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { } } + if (terrno != TSDB_CODE_SUCCESS) { + longjmp(pRuntimeEnv->env, terrno); + } + // if the result buffer is not full, set the query complete if (!Q_STATUS_EQUAL(pQuery->status, QUERY_RESBUF_FULL)) { setQueryStatus(pQuery, QUERY_COMPLETED); @@ -2657,6 +2682,10 @@ int32_t mergeIntoGroupResult(SQInfo *pQInfo) { qDebug("QInfo:%p no result in group %d, continue", pQInfo, pQInfo->groupIndex - 1); } + if (pQInfo->groupIndex == numOfGroups && pQInfo->offset == pQInfo->numOfGroupResultPages) { + SET_STABLE_QUERY_OVER(pQInfo); + } + qDebug("QInfo:%p merge res data into group, index:%d, total group:%d, elapsed time:%" PRId64 "ms", pQInfo, pQInfo->groupIndex - 1, numOfGroups, taosGetTimestampMs() - st); @@ -2675,7 +2704,7 @@ void copyResToQueryResultBuf(SQInfo *pQInfo, SQuery *pQuery) { // check if all results has been sent to client int32_t numOfGroup = (int32_t)(GET_NUM_OF_TABLEGROUP(pQInfo)); if (pQInfo->numOfGroupResultPages == 0 && pQInfo->groupIndex == numOfGroup) { - pQInfo->tableIndex = (int32_t)pQInfo->tableqinfoGroupInfo.numOfTables; // set query completed + SET_STABLE_QUERY_OVER(pQInfo); return; } } @@ -2699,7 +2728,6 @@ void copyResToQueryResultBuf(SQInfo *pQInfo, SQuery *pQuery) { memcpy(pDest + offset * bytes, pData->data + pRuntimeEnv->offset[i] * pData->num, bytes * pData->num); } -// rows += pData->num; offset += (int32_t)pData->num; } @@ -2785,14 +2813,20 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) { setWindowResultInfo(pResultInfo, pQuery, pRuntimeEnv->stableQuery, buf); resetMergeResultBuf(pQuery, pRuntimeEnv->pCtx, pResultInfo); + // todo add windowRes iterator int64_t lastTimestamp = -1; int64_t startt = taosGetTimestampMs(); while (1) { + if (IS_QUERY_KILLED(pQInfo)) { + qDebug("QInfo:%p it is already killed, abort", pQInfo); + longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); + } + int32_t pos = pTree->pNode[0].index; SWindowResInfo *pWindowResInfo = &pTableList[pos]->windowResInfo; - SWindowResult * pWindowRes = getWindowResult(pWindowResInfo, cs.position[pos]); + SWindowResult *pWindowRes = getWindowResult(pWindowResInfo, cs.position[pos]); tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, pWindowRes->pos.pageId); char *b = getPosInResultPage(pRuntimeEnv, PRIMARYKEY_TIMESTAMP_COL_INDEX, pWindowRes, page); @@ -2829,6 +2863,9 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) { lastTimestamp = ts; + // move to the next element of current entry + int32_t currentPageId = pWindowRes->pos.pageId; + cs.position[pos] += 1; if (cs.position[pos] >= pWindowResInfo->size) { cs.position[pos] = -1; @@ -2837,6 +2874,12 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) { if (--numOfTables == 0) { break; } + } else { + // current page is not needed anymore + SWindowResult *pNextWindowRes = getWindowResult(pWindowResInfo, cs.position[pos]); + if (pNextWindowRes->pos.pageId != currentPageId) { + releaseResBufPage(pRuntimeEnv->pResultBuf, page); + } } } @@ -2930,7 +2973,7 @@ static void updateTableQueryInfoForReverseScan(SQuery *pQuery, STableQueryInfo * return; } - // order has change already! + // order has changed already int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); // TODO validate the assertion @@ -2939,9 +2982,13 @@ static void updateTableQueryInfoForReverseScan(SQuery *pQuery, STableQueryInfo * // } else { // assert(pTableQueryInfo->win.ekey <= pTableQueryInfo->lastKey + step); // } - - pTableQueryInfo->win.ekey = pTableQueryInfo->lastKey + step; - + + if (pTableQueryInfo->lastKey == pTableQueryInfo->win.skey) { + // do nothing, no results + } else { + pTableQueryInfo->win.ekey = pTableQueryInfo->lastKey + step; + } + SWAP(pTableQueryInfo->win.skey, pTableQueryInfo->win.ekey, TSKEY); pTableQueryInfo->lastKey = pTableQueryInfo->win.skey; @@ -3003,16 +3050,26 @@ void disableFuncInReverseScan(SQInfo *pQInfo) { } } } - +} + +static void setupQueryRangeForReverseScan(SQInfo* pQInfo) { + SQuery* pQuery = pQInfo->runtimeEnv.pQuery; int32_t numOfGroups = (int32_t)(GET_NUM_OF_TABLEGROUP(pQInfo)); - + for(int32_t i = 0; i < numOfGroups; ++i) { SArray *group = GET_TABLEGROUP(pQInfo, i); - + SArray *tableKeyGroup = taosArrayGetP(pQInfo->tableGroupInfo.pGroupList, i); + size_t t = taosArrayGetSize(group); for (int32_t j = 0; j < t; ++j) { STableQueryInfo *pCheckInfo = taosArrayGetP(group, j); updateTableQueryInfoForReverseScan(pQuery, pCheckInfo); + + // update the last key in tableKeyInfo list + STableKeyInfo *pTableKeyInfo = taosArrayGet(tableKeyGroup, j); + pTableKeyInfo->lastKey = pCheckInfo->lastKey; + + assert(pCheckInfo->pTable == pTableKeyInfo->pTable); } } } @@ -3261,20 +3318,20 @@ static void setEnvBeforeReverseScan(SQueryRuntimeEnv *pRuntimeEnv, SQueryStatusI TIME_WINDOW_COPY(cond.twindow, pQuery->window); + setQueryStatus(pQuery, QUERY_NOT_COMPLETED); + switchCtxOrder(pRuntimeEnv); + disableFuncInReverseScan(pQInfo); + setupQueryRangeForReverseScan(pQInfo); + // clean unused handle if (pRuntimeEnv->pSecQueryHandle != NULL) { tsdbCleanupQueryHandle(pRuntimeEnv->pSecQueryHandle); } - // add ref for table pRuntimeEnv->pSecQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &pQInfo->tableGroupInfo, pQInfo); if (pRuntimeEnv->pSecQueryHandle == NULL) { longjmp(pRuntimeEnv->env, terrno); } - - setQueryStatus(pQuery, QUERY_NOT_COMPLETED); - switchCtxOrder(pRuntimeEnv); - disableFuncInReverseScan(pQInfo); } static void clearEnvAfterReverseScan(SQueryRuntimeEnv *pRuntimeEnv, SQueryStatusInfo *pStatus) { @@ -3299,6 +3356,13 @@ static void clearEnvAfterReverseScan(SQueryRuntimeEnv *pRuntimeEnv, SQueryStatus pQuery->window = pTableQueryInfo->win; } +static void restoreTimeWindow(STableGroupInfo* pTableGroupInfo, STsdbQueryCond* pCond) { + assert(pTableGroupInfo->numOfTables == 1); + SArray* pTableKeyGroup = taosArrayGetP(pTableGroupInfo->pGroupList, 0); + STableKeyInfo* pKeyInfo = taosArrayGet(pTableKeyGroup, 0); + pKeyInfo->lastKey = pCond->twindow.skey; +} + void scanOneTableDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, TSKEY start) { SQInfo *pQInfo = (SQInfo *) GET_QINFO_ADDR(pRuntimeEnv); SQuery *pQuery = pRuntimeEnv->pQuery; @@ -3347,6 +3411,7 @@ void scanOneTableDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, TSKEY start) { tsdbCleanupQueryHandle(pRuntimeEnv->pSecQueryHandle); } + restoreTimeWindow(&pQInfo->tableGroupInfo, &cond); pRuntimeEnv->pSecQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &pQInfo->tableGroupInfo, pQInfo); if (pRuntimeEnv->pSecQueryHandle == NULL) { longjmp(pRuntimeEnv->env, terrno); @@ -3795,7 +3860,7 @@ static void stableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBloc } } -bool queryHasRemainResults(SQueryRuntimeEnv* pRuntimeEnv) { +bool queryHasRemainResForTableQuery(SQueryRuntimeEnv* pRuntimeEnv) { SQuery *pQuery = pRuntimeEnv->pQuery; SFillInfo *pFillInfo = pRuntimeEnv->pFillInfo; @@ -3804,8 +3869,7 @@ bool queryHasRemainResults(SQueryRuntimeEnv* pRuntimeEnv) { } if (pQuery->fillType != TSDB_FILL_NONE && !isPointInterpoQuery(pQuery)) { - // There are results not returned to client yet, so filling operation applied to the remain result is required - // in the first place. + // There are results not returned to client yet, so filling applied to the remain result is required firstly. int32_t remain = taosNumOfRemainRows(pFillInfo); if (remain > 0) { return true; @@ -3859,14 +3923,14 @@ static void doCopyQueryResultToMsg(SQInfo *pQInfo, int32_t numOfRows, char *data data += sizeof(STableIdInfo); } - // all data returned, set query over + // Check if query is completed or not for stable query or normal table query respectively. if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) { if (pQInfo->runtimeEnv.stableQuery) { - if (pQInfo->tableIndex >= pQInfo->tableqinfoGroupInfo.numOfTables) { + if (IS_STASBLE_QUERY_OVER(pQInfo)) { setQueryStatus(pQuery, QUERY_OVER); } } else { - if (!queryHasRemainResults(&pQInfo->runtimeEnv)) { + if (!queryHasRemainResForTableQuery(&pQInfo->runtimeEnv)) { setQueryStatus(pQuery, QUERY_OVER); } } @@ -3912,7 +3976,7 @@ int32_t doFillGapsInResults(SQueryRuntimeEnv* pRuntimeEnv, tFilePage **pDst, int ret = 0; } - if (!queryHasRemainResults(pRuntimeEnv)) { + if (!queryHasRemainResForTableQuery(pRuntimeEnv)) { return ret; } } @@ -3926,6 +3990,8 @@ static void queryCostStatis(SQInfo *pQInfo) { " load data block:%d, total rows:%"PRId64 ", check rows:%"PRId64, pQInfo, pSummary->elapsedTime, pSummary->totalBlocks, pSummary->loadBlockStatis, pSummary->loadBlocks, pSummary->totalRows, pSummary->totalCheckedRows); + + qDebug("QInfo:%p :cost summary: internal size:%"PRId64, pQInfo, pSummary->internalSupSize); } static void updateOffsetVal(SQueryRuntimeEnv *pRuntimeEnv, SDataBlockInfo *pBlockInfo) { @@ -3978,14 +4044,7 @@ void skipBlocks(SQueryRuntimeEnv *pRuntimeEnv) { TsdbQueryHandleT pQueryHandle = pRuntimeEnv->pQueryHandle; SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER; - while (true) { - if (!tsdbNextDataBlock(pQueryHandle)) { - if (terrno != TSDB_CODE_SUCCESS) { - longjmp(pRuntimeEnv->env, terrno); - } - break; - } - + while (tsdbNextDataBlock(pQueryHandle)) { if (IS_QUERY_KILLED(GET_QINFO_ADDR(pRuntimeEnv))) { finalizeQueryResult(pRuntimeEnv); // clean up allocated resource during query longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); @@ -4005,6 +4064,10 @@ void skipBlocks(SQueryRuntimeEnv *pRuntimeEnv) { break; } } + + if (terrno != TSDB_CODE_SUCCESS) { + longjmp(pRuntimeEnv->env, terrno); + } } static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv, TSKEY* start) { @@ -4029,14 +4092,7 @@ static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv, TSKEY* start) { STableQueryInfo *pTableQueryInfo = pQuery->current; SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER; - while (true) { - if (!tsdbNextDataBlock(pRuntimeEnv->pQueryHandle)) { - if (terrno != TSDB_CODE_SUCCESS) { - longjmp(pRuntimeEnv->env, terrno); - } - break; - } - + while (tsdbNextDataBlock(pRuntimeEnv->pQueryHandle)) { tsdbRetrieveDataBlockInfo(pRuntimeEnv->pQueryHandle, &blockInfo); if (QUERY_IS_ASC_QUERY(pQuery)) { @@ -4132,6 +4188,11 @@ static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv, TSKEY* start) { } } + // check for error + if (terrno != TSDB_CODE_SUCCESS) { + longjmp(pRuntimeEnv->env, terrno); + } + return true; } @@ -4348,14 +4409,7 @@ static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) { int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); - while (true) { - if (!tsdbNextDataBlock(pQueryHandle)) { - if (terrno != TSDB_CODE_SUCCESS) { - longjmp(pRuntimeEnv->env, terrno); - } - break; - } - + while (tsdbNextDataBlock(pQueryHandle)) { summary->totalBlocks += 1; if (IS_QUERY_KILLED(pQInfo)) { @@ -4389,6 +4443,10 @@ static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) { pQInfo, blockInfo.uid, blockInfo.tid, blockInfo.window.skey, blockInfo.window.ekey, blockInfo.rows, pQuery->current->lastKey); } + if (terrno != TSDB_CODE_SUCCESS) { + longjmp(pRuntimeEnv->env, terrno); + } + updateWindowResNumOfRes(pRuntimeEnv); int64_t et = taosGetTimestampMs(); @@ -4420,9 +4478,11 @@ static bool multiTableMultioutputHelper(SQInfo *pQInfo, int32_t index) { // todo refactor SArray *g1 = taosArrayInit(1, POINTER_BYTES); - SArray *tx = taosArrayInit(1, POINTER_BYTES); + SArray *tx = taosArrayInit(1, sizeof(STableKeyInfo)); + + STableKeyInfo info = {.pTable = pCheckInfo->pTable, .lastKey = pCheckInfo->lastKey}; + taosArrayPush(tx, &info); - taosArrayPush(tx, &pCheckInfo->pTable); taosArrayPush(g1, &tx); STableGroupInfo gp = {.numOfTables = 1, .pGroupList = g1}; @@ -4478,7 +4538,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) { while (pQInfo->groupIndex < numOfGroups) { SArray* group = taosArrayGetP(pQInfo->tableGroupInfo.pGroupList, pQInfo->groupIndex); - qDebug("QInfo:%p last_row query on group:%d, total group:%zu, current group:%p", pQInfo, pQInfo->groupIndex, + qDebug("QInfo:%p last_row query on group:%d, total group:%" PRIzu ", current group:%p", pQInfo, pQInfo->groupIndex, numOfGroups, group); STsdbQueryCond cond = { @@ -4552,7 +4612,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) { while (pQInfo->groupIndex < numOfGroups) { SArray* group = taosArrayGetP(pQInfo->tableGroupInfo.pGroupList, pQInfo->groupIndex); - qDebug("QInfo:%p group by normal columns group:%d, total group:%zu", pQInfo, pQInfo->groupIndex, numOfGroups); + qDebug("QInfo:%p group by normal columns group:%d, total group:%" PRIzu "", pQInfo, pQInfo->groupIndex, numOfGroups); STsdbQueryCond cond = { .colList = pQuery->colList, @@ -4574,7 +4634,9 @@ static void sequentialTableProcess(SQInfo *pQInfo) { pRuntimeEnv->pQueryHandle = NULL; } + // no need to update the lastkey for each table pRuntimeEnv->pQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &gp, pQInfo); + taosArrayDestroy(g1); taosArrayDestroy(tx); if (pRuntimeEnv->pQueryHandle == NULL) { @@ -4675,7 +4737,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) { // the limitation of output result is reached, set the query completed if (limitResults(pRuntimeEnv)) { - pQInfo->tableIndex = (int32_t)pQInfo->tableqinfoGroupInfo.numOfTables; + SET_STABLE_QUERY_OVER(pQInfo); break; } @@ -4700,8 +4762,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) { taosArrayPush(pQInfo->arrTableIdInfo, &tidInfo); // if the buffer is full or group by each table, we need to jump out of the loop - if (Q_STATUS_EQUAL(pQuery->status, QUERY_RESBUF_FULL) /*|| - isGroupbyEachTable(pQuery->pGroupbyExpr, pSupporter->pSidSet)*/) { + if (Q_STATUS_EQUAL(pQuery->status, QUERY_RESBUF_FULL)) { break; } @@ -4743,7 +4804,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) { } qDebug( - "QInfo %p numOfTables:%"PRIu64", index:%d, numOfGroups:%zu, %"PRId64" points returned, total:%"PRId64", offset:%" PRId64, + "QInfo %p numOfTables:%"PRIu64", index:%d, numOfGroups:%" PRIzu ", %"PRId64" points returned, total:%"PRId64", offset:%" PRId64, pQInfo, pQInfo->tableqinfoGroupInfo.numOfTables, pQInfo->tableIndex, numOfGroups, pQuery->rec.rows, pQuery->rec.total, pQuery->limit.offset); } @@ -4765,7 +4826,7 @@ static void doSaveContext(SQInfo *pQInfo) { .colList = pQuery->colList, .numOfCols = pQuery->numOfCols, }; - + TIME_WINDOW_COPY(cond.twindow, pQuery->window); // clean unused handle @@ -4773,15 +4834,16 @@ static void doSaveContext(SQInfo *pQInfo) { tsdbCleanupQueryHandle(pRuntimeEnv->pSecQueryHandle); } + setQueryStatus(pQuery, QUERY_NOT_COMPLETED); + switchCtxOrder(pRuntimeEnv); + disableFuncInReverseScan(pQInfo); + setupQueryRangeForReverseScan(pQInfo); + pRuntimeEnv->prevGroupId = INT32_MIN; pRuntimeEnv->pSecQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &pQInfo->tableGroupInfo, pQInfo); if (pRuntimeEnv->pSecQueryHandle == NULL) { longjmp(pRuntimeEnv->env, terrno); } - - setQueryStatus(pQuery, QUERY_NOT_COMPLETED); - switchCtxOrder(pRuntimeEnv); - disableFuncInReverseScan(pQInfo); } static void doRestoreContext(SQInfo *pQInfo) { @@ -5065,15 +5127,13 @@ static void tableIntervalProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) { copyFromWindowResToSData(pQInfo, &pRuntimeEnv->windowResInfo); clearFirstNTimeWindow(pRuntimeEnv, pQInfo->groupIndex); } - - pQInfo->pointsInterpo += numOfFilled; } static void tableQueryImpl(SQInfo *pQInfo) { SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; SQuery * pQuery = pRuntimeEnv->pQuery; - if (queryHasRemainResults(pRuntimeEnv)) { + if (queryHasRemainResForTableQuery(pRuntimeEnv)) { if (pQuery->fillType != TSDB_FILL_NONE) { /* @@ -5482,7 +5542,7 @@ static int32_t buildAirthmeticExprFromMsg(SExprInfo *pArithExprInfo, SQueryTable qDebug("qmsg:%p create arithmetic expr from binary string: %s", pQueryMsg, pArithExprInfo->base.arg[0].argValue.pz); tExprNode* pExprNode = NULL; - TRY(TSDB_MAX_TAGS) { + TRY(TSDB_MAX_TAG_CONDITIONS) { pExprNode = exprTreeFromBinary(pArithExprInfo->base.arg[0].argValue.pz, pArithExprInfo->base.arg[0].argBytes); } CATCH( code ) { CLEANUP_EXECUTE(); @@ -5854,7 +5914,7 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList, pQInfo->tableqinfoGroupInfo.pGroupList = taosArrayInit(numOfGroups, POINTER_BYTES); pQInfo->tableqinfoGroupInfo.numOfTables = pTableGroupInfo->numOfTables; pQInfo->tableqinfoGroupInfo.map = taosHashInit(pTableGroupInfo->numOfTables, - taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false); + taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, false); } int tableIndex = 0; @@ -5875,8 +5935,8 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList, } for(int32_t j = 0; j < s; ++j) { - void* pTable = taosArrayGetP(pa, j); - STableId* id = TSDB_TABLEID(pTable); + STableKeyInfo* info = taosArrayGet(pa, j); + STableId* id = TSDB_TABLEID(info->pTable); STableIdInfo* pTableId = taosArraySearch(pTableIdList, id, compareTableIdInfo); if (pTableId != NULL ) { @@ -5886,10 +5946,11 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList, } void* buf = (char*)pQInfo->pBuf + index * sizeof(STableQueryInfo); - STableQueryInfo* item = createTableQueryInfo(&pQInfo->runtimeEnv, pTable, window, buf); + STableQueryInfo* item = createTableQueryInfo(&pQInfo->runtimeEnv, info->pTable, window, buf); if (item == NULL) { goto _cleanup; } + item->groupIndex = i; taosArrayPush(p1, &item); taosHashPut(pQInfo->tableqinfoGroupInfo.map, &id->tid, sizeof(id->tid), &item, POINTER_BYTES); @@ -5918,6 +5979,7 @@ _cleanup_query: taosArrayDestroy(pGroupbyExpr->columnInfo); free(pGroupbyExpr); } + taosTFree(pTagCols); for (int32_t i = 0; i < numOfOutput; ++i) { SExprInfo* pExprInfo = &pExprs[i]; @@ -5925,6 +5987,7 @@ _cleanup_query: tExprTreeDestroy(&pExprInfo->pExpr, NULL); } } + taosTFree(pExprs); _cleanup: @@ -6006,8 +6069,6 @@ static void freeQInfo(SQInfo *pQInfo) { } SQuery *pQuery = pQInfo->runtimeEnv.pQuery; - setQueryKilled(pQInfo); - qDebug("QInfo:%p start to free QInfo", pQInfo); for (int32_t col = 0; col < pQuery->numOfOutput; ++col) { taosTFree(pQuery->sdata[col]); @@ -6212,7 +6273,7 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi STableIdInfo *id = taosArrayGet(pTableIdList, 0); qDebug("qmsg:%p query normal table, uid:%"PRId64", tid:%d", pQueryMsg, id->uid, id->tid); - if ((code = tsdbGetOneTableGroup(tsdb, id->uid, &tableGroupInfo)) != TSDB_CODE_SUCCESS) { + if ((code = tsdbGetOneTableGroup(tsdb, id->uid, pQueryMsg->window.skey, &tableGroupInfo)) != TSDB_CODE_SUCCESS) { goto _over; } } else if (TSDB_QUERY_HAS_TYPE(pQueryMsg->queryType, TSDB_QUERY_TYPE_MULTITABLE_QUERY|TSDB_QUERY_TYPE_STABLE_QUERY)) { @@ -6229,8 +6290,9 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi } qDebug("qmsg:%p query stable, uid:%"PRId64", tid:%d", pQueryMsg, id->uid, id->tid); - code = tsdbQuerySTableByTagCond(tsdb, id->uid, tagCond, pQueryMsg->tagCondLen, pQueryMsg->tagNameRelType, tbnameCond, &tableGroupInfo, pGroupColIndex, - numOfGroupByCols); + code = tsdbQuerySTableByTagCond(tsdb, id->uid, pQueryMsg->window.skey, tagCond, pQueryMsg->tagCondLen, + pQueryMsg->tagNameRelType, tbnameCond, &tableGroupInfo, pGroupColIndex, numOfGroupByCols); + if (code != TSDB_CODE_SUCCESS) { qError("qmsg:%p failed to query stable, reason: %s", pQueryMsg, tstrerror(code)); goto _over; @@ -6241,11 +6303,11 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi goto _over; } - qDebug("qmsg:%p query on %zu tables in one group from client", pQueryMsg, tableGroupInfo.numOfTables); + qDebug("qmsg:%p query on %" PRIzu " tables in one group from client", pQueryMsg, tableGroupInfo.numOfTables); } int64_t el = taosGetTimestampUs() - st; - qDebug("qmsg:%p tag filter completed, numOfTables:%zu, elapsed time:%"PRId64"us", pQueryMsg, tableGroupInfo.numOfTables, el); + qDebug("qmsg:%p tag filter completed, numOfTables:%" PRIzu ", elapsed time:%"PRId64"us", pQueryMsg, tableGroupInfo.numOfTables, el); } else { assert(0); } @@ -6310,14 +6372,22 @@ static bool doBuildResCheck(SQInfo* pQInfo) { pthread_mutex_unlock(&pQInfo->lock); + // clear qhandle owner + assert(pQInfo->owner == taosGetPthreadId()); + pQInfo->owner = 0; + return buildRes; } bool qTableQuery(qinfo_t qinfo) { SQInfo *pQInfo = (SQInfo *)qinfo; + assert(pQInfo && pQInfo->signature == pQInfo); + int64_t threadId = taosGetPthreadId(); - if (pQInfo == NULL || pQInfo->signature != pQInfo) { - qDebug("QInfo:%p has been freed, no need to execute", pQInfo); + int64_t curOwner = 0; + if ((curOwner = atomic_val_compare_exchange_64(&pQInfo->owner, 0, threadId)) != 0) { + qError("QInfo:%p qhandle is now executed by thread:%p", pQInfo, (void*) curOwner); + pQInfo->code = TSDB_CODE_QRY_IN_EXEC; return false; } @@ -6356,7 +6426,7 @@ bool qTableQuery(qinfo_t qinfo) { if (IS_QUERY_KILLED(pQInfo)) { qDebug("QInfo:%p query is killed", pQInfo); } else if (pQuery->rec.rows == 0) { - qDebug("QInfo:%p over, %zu tables queried, %"PRId64" rows are returned", pQInfo, pQInfo->tableqinfoGroupInfo.numOfTables, pQuery->rec.total); + qDebug("QInfo:%p over, %" PRIzu " tables queried, %"PRId64" rows are returned", pQInfo, pQInfo->tableqinfoGroupInfo.numOfTables, pQuery->rec.total); } else { qDebug("QInfo:%p query paused, %" PRId64 " rows returned, numOfTotal:%" PRId64 " rows", pQInfo, pQuery->rec.rows, pQuery->rec.total + pQuery->rec.rows); @@ -6372,6 +6442,7 @@ int32_t qRetrieveQueryResultInfo(qinfo_t qinfo, bool* buildRes, void* pRspContex return TSDB_CODE_QRY_INVALID_QHANDLE; } + *buildRes = false; SQuery *pQuery = pQInfo->runtimeEnv.pQuery; if (IS_QUERY_KILLED(pQInfo)) { qDebug("QInfo:%p query is killed, code:%d", pQInfo, pQInfo->code); @@ -6395,34 +6466,6 @@ int32_t qRetrieveQueryResultInfo(qinfo_t qinfo, bool* buildRes, void* pRspContex return code; } -bool qHasMoreResultsToRetrieve(qinfo_t qinfo) { - SQInfo *pQInfo = (SQInfo *)qinfo; - - if (!isValidQInfo(pQInfo) || pQInfo->code != TSDB_CODE_SUCCESS) { - qDebug("QInfo:%p invalid qhandle or error occurs, abort query, code:%x", pQInfo, pQInfo->code); - return false; - } - - SQuery *pQuery = pQInfo->runtimeEnv.pQuery; - - bool ret = false; - if (Q_STATUS_EQUAL(pQuery->status, QUERY_OVER)) { - ret = false; - } else if (Q_STATUS_EQUAL(pQuery->status, QUERY_RESBUF_FULL)) { - ret = true; - } else if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) { - ret = true; - } else { - assert(0); - } - - if (ret) { - qDebug("QInfo:%p has more results waits for client retrieve", pQInfo); - } - - return ret; -} - int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *contLen, bool* continueExec) { SQInfo *pQInfo = (SQInfo *)qinfo; @@ -6448,38 +6491,44 @@ int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *co (*pRsp)->numOfRows = htonl((int32_t)pQuery->rec.rows); - int32_t code = pQInfo->code; - if (code == TSDB_CODE_SUCCESS) { - (*pRsp)->offset = htobe64(pQuery->limit.offset); + if (pQInfo->code == TSDB_CODE_SUCCESS) { + (*pRsp)->offset = htobe64(pQuery->limit.offset); (*pRsp)->useconds = htobe64(pRuntimeEnv->summary.elapsedTime); } else { - (*pRsp)->useconds = 0; - (*pRsp)->offset = 0; + (*pRsp)->offset = 0; + (*pRsp)->useconds = htobe64(pRuntimeEnv->summary.elapsedTime); } (*pRsp)->precision = htons(pQuery->precision); - if (pQuery->rec.rows > 0 && code == TSDB_CODE_SUCCESS) { - code = doDumpQueryResult(pQInfo, (*pRsp)->data); + if (pQuery->rec.rows > 0 && pQInfo->code == TSDB_CODE_SUCCESS) { + doDumpQueryResult(pQInfo, (*pRsp)->data); } else { setQueryStatus(pQuery, QUERY_OVER); - code = pQInfo->code; } pQInfo->rspContext = NULL; - pQInfo->dataReady = QUERY_RESULT_NOT_READY; + pQInfo->dataReady = QUERY_RESULT_NOT_READY; if (IS_QUERY_KILLED(pQInfo) || Q_STATUS_EQUAL(pQuery->status, QUERY_OVER)) { - (*pRsp)->completed = 1; // notify no more result to client - } - - if (qHasMoreResultsToRetrieve(pQInfo)) { - *continueExec = true; - } else { // failed to dump result, free qhandle immediately *continueExec = false; - qKillQuery(pQInfo); + (*pRsp)->completed = 1; // notify no more result to client + } else { + *continueExec = true; + qDebug("QInfo:%p has more results waits for client retrieve", pQInfo); } - return code; + return pQInfo->code; +} + +int32_t qQueryCompleted(qinfo_t qinfo) { + SQInfo *pQInfo = (SQInfo *)qinfo; + + if (pQInfo == NULL || !isValidQInfo(pQInfo)) { + return TSDB_CODE_QRY_INVALID_QHANDLE; + } + + SQuery* pQuery = pQInfo->runtimeEnv.pQuery; + return IS_QUERY_KILLED(pQInfo) || Q_STATUS_EQUAL(pQuery->status, QUERY_OVER); } int32_t qKillQuery(qinfo_t qinfo) { @@ -6490,6 +6539,13 @@ int32_t qKillQuery(qinfo_t qinfo) { } setQueryKilled(pQInfo); + + // Wait for the query executing thread being stopped/ + // Once the query is stopped, the owner of qHandle will be cleared immediately. + while (pQInfo->owner != 0) { + taosMsleep(100); + } + return TSDB_CODE_SUCCESS; } @@ -6584,7 +6640,7 @@ static void buildTagQueryResult(SQInfo* pQInfo) { *(int64_t*) pQuery->sdata[0]->data = num; count = 1; - pQInfo->tableIndex = (int32_t)num; //set query completed + SET_STABLE_QUERY_OVER(pQInfo); qDebug("QInfo:%p create count(tbname) query, res:%d rows:1", pQInfo, count); } else { // return only the tags|table name etc. count = 0; @@ -6656,7 +6712,7 @@ void freeqinfoFn(void *qhandle) { } void* qOpenQueryMgmt(int32_t vgId) { - const int32_t REFRESH_HANDLE_INTERVAL = 30; // every 30 seconds, refresh handle pool + const int32_t REFRESH_HANDLE_INTERVAL = 60; // every 30 seconds, refresh handle pool char cacheName[128] = {0}; sprintf(cacheName, "qhandle_%d", vgId); @@ -6686,9 +6742,9 @@ void qQueryMgmtNotifyClosed(void* pQMgmt) { SQueryMgmt* pQueryMgmt = pQMgmt; qDebug("vgId:%d, set querymgmt closed, wait for all queries cancelled", pQueryMgmt->vgId); - pthread_mutex_lock(&pQueryMgmt->lock); +// pthread_mutex_lock(&pQueryMgmt->lock); pQueryMgmt->closed = true; - pthread_mutex_unlock(&pQueryMgmt->lock); +// pthread_mutex_unlock(&pQueryMgmt->lock); taosCacheRefresh(pQueryMgmt->qinfoPool, queryMgmtKillQueryFn); } @@ -6718,7 +6774,7 @@ void** qRegisterQInfo(void* pMgmt, uint64_t qInfo) { return NULL; } - const int32_t DEFAULT_QHANDLE_LIFE_SPAN = tsShellActivityTimer * 2; + const int32_t DEFAULT_QHANDLE_LIFE_SPAN = tsShellActivityTimer * 2 * 1000; SQueryMgmt *pQueryMgmt = pMgmt; if (pQueryMgmt->qinfoPool == NULL) { @@ -6726,16 +6782,16 @@ void** qRegisterQInfo(void* pMgmt, uint64_t qInfo) { return NULL; } - pthread_mutex_lock(&pQueryMgmt->lock); +// pthread_mutex_lock(&pQueryMgmt->lock); if (pQueryMgmt->closed) { - pthread_mutex_unlock(&pQueryMgmt->lock); +// pthread_mutex_unlock(&pQueryMgmt->lock); qError("QInfo:%p failed to add qhandle into cache, since qMgmt is colsing", (void *)qInfo); return NULL; } else { uint64_t handleVal = (uint64_t) qInfo; void** handle = taosCachePut(pQueryMgmt->qinfoPool, &handleVal, sizeof(int64_t), &qInfo, POINTER_BYTES, DEFAULT_QHANDLE_LIFE_SPAN); - pthread_mutex_unlock(&pQueryMgmt->lock); +// pthread_mutex_unlock(&pQueryMgmt->lock); return handle; } @@ -6758,7 +6814,6 @@ void** qAcquireQInfo(void* pMgmt, uint64_t key) { void** qReleaseQInfo(void* pMgmt, void* pQInfo, bool freeHandle) { SQueryMgmt *pQueryMgmt = pMgmt; - if (pQueryMgmt->qinfoPool == NULL) { return NULL; } diff --git a/src/query/src/qExtbuffer.c b/src/query/src/qExtbuffer.c index fcc17a0c9e..a147780144 100644 --- a/src/query/src/qExtbuffer.c +++ b/src/query/src/qExtbuffer.c @@ -12,15 +12,14 @@ * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ -#include "qExtbuffer.h" #include "os.h" +#include "qExtbuffer.h" #include "queryLog.h" #include "taos.h" #include "taosdef.h" #include "taosmsg.h" #include "tsqlfunction.h" #include "tulog.h" -#include "tutil.h" #define COLMODEL_GET_VAL(data, schema, allrow, rowId, colId) \ (data + (schema)->pFields[colId].offset * (allrow) + (rowId) * (schema)->pFields[colId].field.bytes) diff --git a/src/query/src/qFill.c b/src/query/src/qFill.c index ac44feb576..d9fe67e1b7 100644 --- a/src/query/src/qFill.c +++ b/src/query/src/qFill.c @@ -42,19 +42,38 @@ SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_ pFillInfo->slidingUnit = slidingUnit; pFillInfo->pData = malloc(POINTER_BYTES * numOfCols); - - int32_t rowsize = 0; - for (int32_t i = 0; i < numOfCols; ++i) { - int32_t bytes = pFillInfo->pFillCol[i].col.bytes; - pFillInfo->pData[i] = calloc(1, bytes * capacity); - - rowsize += bytes; - } - if (numOfTags > 0) { - pFillInfo->pTags = calloc(1, pFillInfo->numOfTags * POINTER_BYTES + rowsize); + pFillInfo->pTags = calloc(pFillInfo->numOfTags, sizeof(SFillTagColInfo)); + for(int32_t i = 0; i < numOfTags; ++i) { + pFillInfo->pTags[i].col.colId = -2; + } } - + + int32_t rowsize = 0; + int32_t k = 0; + for (int32_t i = 0; i < numOfCols; ++i) { + SFillColInfo* pColInfo = &pFillInfo->pFillCol[i]; + pFillInfo->pData[i] = calloc(1, pColInfo->col.bytes * capacity); + + if (pColInfo->flag == TSDB_COL_TAG) { + bool exists = false; + for(int32_t j = 0; j < k; ++j) { + if (pFillInfo->pTags[j].col.colId == pColInfo->col.colId) { + exists = true; + break; + } + } + + if (!exists) { + pFillInfo->pTags[k].col.colId = pColInfo->col.colId; + pFillInfo->pTags[k].tagVal = calloc(1, pColInfo->col.bytes); + + k += 1; + } + } + rowsize += pColInfo->col.bytes; + } + pFillInfo->rowSize = rowsize; pFillInfo->capacityInRows = capacity; @@ -129,16 +148,21 @@ void taosFillCopyInputDataFromFilePage(SFillInfo* pFillInfo, tFilePage** pInput) void taosFillCopyInputDataFromOneFilePage(SFillInfo* pFillInfo, tFilePage* pInput) { assert(pFillInfo->numOfRows == pInput->num); - int32_t t = 0; - + for(int32_t i = 0; i < pFillInfo->numOfCols; ++i) { SFillColInfo* pCol = &pFillInfo->pFillCol[i]; - - char* s = pInput->data + pCol->col.offset * pInput->num; - memcpy(pFillInfo->pData[i], s, pInput->num * pCol->col.bytes); - - if (pCol->flag == TSDB_COL_TAG) { // copy the tag value - memcpy(pFillInfo->pTags[t++], pFillInfo->pData[i], pCol->col.bytes); + + char* data = pInput->data + pCol->col.offset * pInput->num; + memcpy(pFillInfo->pData[i], data, pInput->num * pCol->col.bytes); + + if (pCol->flag == TSDB_COL_TAG) { // copy the tag value to tag value buffer + for (int32_t j = 0; j < pFillInfo->numOfTags; ++j) { + SFillTagColInfo* pTag = &pFillInfo->pTags[j]; + if (pTag->col.colId == pCol->col.colId) { + memcpy(pTag->tagVal, data, pCol->col.bytes); + break; + } + } } } } @@ -224,22 +248,31 @@ int taosDoLinearInterpolation(int32_t type, SPoint* point1, SPoint* point2, SPoi return 0; } -static void setTagsValue(SFillInfo* pColInfo, tFilePage** data, char** pTags, int32_t start, int32_t num) { - for (int32_t j = 0, i = start; i < pColInfo->numOfCols; ++i, ++j) { - SFillColInfo* pCol = &pColInfo->pFillCol[i]; - - char* val1 = elePtrAt(data[i]->data, pCol->col.bytes, num); - assignVal(val1, pTags[j], pCol->col.bytes, pCol->col.type); +static void setTagsValue(SFillInfo* pFillInfo, tFilePage** data, int32_t num) { + for(int32_t j = 0; j < pFillInfo->numOfCols; ++j) { + SFillColInfo* pCol = &pFillInfo->pFillCol[j]; + if (pCol->flag == TSDB_COL_NORMAL) { + continue; + } + + char* val1 = elePtrAt(data[j]->data, pCol->col.bytes, num); + + for(int32_t i = 0; i < pFillInfo->numOfTags; ++i) { + SFillTagColInfo* pTag = &pFillInfo->pTags[i]; + if (pTag->col.colId == pCol->col.colId) { + assignVal(val1, pTag->tagVal, pCol->col.bytes, pCol->col.type); + break; + } + } } } -static void doInterpoResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t* num, char** srcData, - int64_t ts, char** pTags, bool outOfBound) { +static void doFillResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t* num, char** srcData, int64_t ts, + bool outOfBound) { char* prevValues = pFillInfo->prevValues; char* nextValues = pFillInfo->nextValues; SPoint point1, point2, point; - int32_t step = GET_FORWARD_DIRECTION_FACTOR(pFillInfo->order); char* val = elePtrAt(data[0]->data, TSDB_KEYSIZE, *num); @@ -279,7 +312,7 @@ static void doInterpoResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t* } } - setTagsValue(pFillInfo, data, pTags, numOfValCols, *num); + setTagsValue(pFillInfo, data, *num); } else if (pFillInfo->fillType == TSDB_FILL_LINEAR) { // TODO : linear interpolation supports NULL value if (prevValues != NULL && !outOfBound) { @@ -304,7 +337,7 @@ static void doInterpoResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t* taosDoLinearInterpolation(type, &point1, &point2, &point); } - setTagsValue(pFillInfo, data, pTags, numOfValCols, *num); + setTagsValue(pFillInfo, data, *num); } else { for (int32_t i = 1; i < numOfValCols; ++i) { @@ -319,7 +352,7 @@ static void doInterpoResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t* } } - setTagsValue(pFillInfo, data, pTags, numOfValCols, *num); + setTagsValue(pFillInfo, data, *num); } } else { /* fill the default value */ @@ -330,7 +363,7 @@ static void doInterpoResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t* assignVal(val1, (char*)&pCol->fillVal.i, pCol->col.bytes, pCol->col.type); } - setTagsValue(pFillInfo, data, pTags, numOfValCols, *num); + setTagsValue(pFillInfo, data, *num); } pFillInfo->start += (pFillInfo->slidingTime * step); @@ -364,17 +397,14 @@ int32_t generateDataBlockImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t nu char** nextValues = &pFillInfo->nextValues; int32_t numOfTags = pFillInfo->numOfTags; - char** pTags = pFillInfo->pTags; - int32_t step = GET_FORWARD_DIRECTION_FACTOR(pFillInfo->order); - if (numOfRows == 0) { /* * These data are generated according to fill strategy, since the current timestamp is out of time window of * real result set. Note that we need to keep the direct previous result rows, to generated the filled data. */ while (num < outputRows) { - doInterpoResultImpl(pFillInfo, data, &num, srcData, pFillInfo->start, pTags, true); + doFillResultImpl(pFillInfo, data, &num, srcData, pFillInfo->start, true); } pFillInfo->numOfTotal += pFillInfo->numOfCurrent; @@ -401,12 +431,11 @@ int32_t generateDataBlockImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t nu while (((pFillInfo->start < ts && FILL_IS_ASC_FILL(pFillInfo)) || (pFillInfo->start > ts && !FILL_IS_ASC_FILL(pFillInfo))) && num < outputRows) { - doInterpoResultImpl(pFillInfo, data, &num, srcData, ts, pTags, false); + doFillResultImpl(pFillInfo, data, &num, srcData, ts, false); } /* output buffer is full, abort */ - if ((num == outputRows && FILL_IS_ASC_FILL(pFillInfo)) || - (num < 0 && !FILL_IS_ASC_FILL(pFillInfo))) { + if ((num == outputRows && FILL_IS_ASC_FILL(pFillInfo)) || (num < 0 && !FILL_IS_ASC_FILL(pFillInfo))) { pFillInfo->numOfTotal += pFillInfo->numOfCurrent; return outputRows; } @@ -415,10 +444,12 @@ int32_t generateDataBlockImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t nu initBeforeAfterDataBuf(pFillInfo, prevValues); // assign rows to dst buffer - int32_t i = 0; - for (; i < pFillInfo->numOfCols - numOfTags; ++i) { + for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) { SFillColInfo* pCol = &pFillInfo->pFillCol[i]; - + if (pCol->flag == TSDB_COL_TAG) { + continue; + } + char* val1 = elePtrAt(data[i]->data, pCol->col.bytes, num); char* src = elePtrAt(srcData[i], pCol->col.bytes, pFillInfo->rowIdx); @@ -440,10 +471,12 @@ int32_t generateDataBlockImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t nu } // set the tag value for final result - setTagsValue(pFillInfo, data, pTags, pFillInfo->numOfCols - numOfTags, num); + setTagsValue(pFillInfo, data, num); pFillInfo->start += (pFillInfo->slidingTime * step); pFillInfo->rowIdx += 1; + + pFillInfo->numOfCurrent +=1; num += 1; } diff --git a/src/query/src/qResultbuf.c b/src/query/src/qResultbuf.c index a300cc6382..51ff892b33 100644 --- a/src/query/src/qResultbuf.c +++ b/src/query/src/qResultbuf.c @@ -6,7 +6,8 @@ #include "queryLog.h" #include "taoserror.h" -#define GET_DATA_PAYLOAD(_p) ((tFilePage*)(((char*)(_p)->pData) + POINTER_BYTES)) +#define GET_DATA_PAYLOAD(_p) ((char *)(_p)->pData + POINTER_BYTES) +#define NO_IN_MEM_AVAILABLE_PAGES(_b) (listNEles((_b)->lruList) >= (_b)->inMemPages) int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t rowSize, int32_t pagesize, int32_t inMemBufSize, const void* handle) { @@ -25,7 +26,7 @@ int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t ro pResBuf->comp = true; pResBuf->file = NULL; pResBuf->handle = handle; - pResBuf->fileSize = 0; + pResBuf->fileSize = 0; // at least more than 2 pages must be in memory assert(inMemBufSize >= pagesize * 2); @@ -34,9 +35,9 @@ int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t ro pResBuf->lruList = tdListNew(POINTER_BYTES); // init id hash table - pResBuf->groupSet = taosHashInit(10, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false); + pResBuf->groupSet = taosHashInit(10, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, false); pResBuf->assistBuf = malloc(pResBuf->pageSize + 2); // EXTRA BYTES - pResBuf->all = taosHashInit(10, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false); + pResBuf->all = taosHashInit(10, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, false); char path[PATH_MAX] = {0}; taosGetTmpfilePath("qbuf", path); @@ -186,8 +187,6 @@ static char* loadPageFromDisk(SDiskbasedResultBuf* pResultBuf, SPageInfo* pg) { return (char*)GET_DATA_PAYLOAD(pg); } -#define NO_AVAILABLE_PAGES(_b) ((_b)->numOfPages >= (_b)->inMemPages) - static SIDList addNewGroup(SDiskbasedResultBuf* pResultBuf, int32_t groupId) { assert(taosHashGet(pResultBuf->groupSet, (const char*) &groupId, sizeof(int32_t)) == NULL); @@ -211,11 +210,12 @@ static SPageInfo* registerPage(SDiskbasedResultBuf* pResultBuf, int32_t groupId, pResultBuf->numOfPages += 1; SPageInfo* ppi = malloc(sizeof(SPageInfo));//{ .info = PAGE_INFO_INITIALIZER, .pageId = pageId, .pn = NULL}; - ppi->info = PAGE_INFO_INITIALIZER; - ppi->pageId = pageId; - ppi->pData = NULL; - ppi->pn = NULL; - ppi->used = true; + + ppi->pageId = pageId; + ppi->pData = NULL; + ppi->info = PAGE_INFO_INITIALIZER; + ppi->used = true; + ppi->pn = NULL; return *(SPageInfo**) taosArrayPush(list, &ppi); } @@ -246,6 +246,8 @@ static char* evicOneDataPage(SDiskbasedResultBuf* pResultBuf) { // all pages are referenced by user, try to allocate new space if (pn == NULL) { int32_t prev = pResultBuf->inMemPages; + + // increase by 50% of previous mem pages pResultBuf->inMemPages = (int32_t)(pResultBuf->inMemPages * 1.5f); qWarn("%p in memory buf page not sufficient, expand from %d to %d, page size:%d", pResultBuf, prev, @@ -281,7 +283,7 @@ tFilePage* getNewDataBuf(SDiskbasedResultBuf* pResultBuf, int32_t groupId, int32 pResultBuf->statis.getPages += 1; char* availablePage = NULL; - if (NO_AVAILABLE_PAGES(pResultBuf)) { + if (NO_IN_MEM_AVAILABLE_PAGES(pResultBuf)) { availablePage = evicOneDataPage(pResultBuf); } @@ -311,7 +313,7 @@ tFilePage* getNewDataBuf(SDiskbasedResultBuf* pResultBuf, int32_t groupId, int32 ((void**)pi->pData)[0] = pi; pi->used = true; - return GET_DATA_PAYLOAD(pi); + return (void *)(GET_DATA_PAYLOAD(pi)); } tFilePage* getResBufPage(SDiskbasedResultBuf* pResultBuf, int32_t id) { @@ -325,7 +327,7 @@ tFilePage* getResBufPage(SDiskbasedResultBuf* pResultBuf, int32_t id) { // no need to update the LRU list if only one page exists if (pResultBuf->numOfPages == 1) { (*pi)->used = true; - return GET_DATA_PAYLOAD(*pi); + return (void *)(GET_DATA_PAYLOAD(*pi)); } SPageInfo** pInfo = (SPageInfo**) ((*pi)->pn->data); @@ -334,13 +336,13 @@ tFilePage* getResBufPage(SDiskbasedResultBuf* pResultBuf, int32_t id) { lruListMoveToFront(pResultBuf->lruList, (*pi)); (*pi)->used = true; - return GET_DATA_PAYLOAD(*pi); + return (void *)(GET_DATA_PAYLOAD(*pi)); } else { // not in memory assert((*pi)->pData == NULL && (*pi)->pn == NULL && (*pi)->info.length >= 0 && (*pi)->info.offset >= 0); char* availablePage = NULL; - if (NO_AVAILABLE_PAGES(pResultBuf)) { + if (NO_IN_MEM_AVAILABLE_PAGES(pResultBuf)) { availablePage = evicOneDataPage(pResultBuf); } @@ -353,8 +355,10 @@ tFilePage* getResBufPage(SDiskbasedResultBuf* pResultBuf, int32_t id) { ((void**)((*pi)->pData))[0] = (*pi); lruListPushFront(pResultBuf->lruList, *pi); + (*pi)->used = true; + loadPageFromDisk(pResultBuf, *pi); - return GET_DATA_PAYLOAD(*pi); + return (void *)(GET_DATA_PAYLOAD(*pi)); } } @@ -396,12 +400,13 @@ void destroyResultBuf(SDiskbasedResultBuf* pResultBuf) { } if (pResultBuf->file != NULL) { - qDebug("QInfo:%p disk-based output buffer closed, total:%" PRId64 " bytes, file size:%"PRId64" bytes", - pResultBuf->handle, pResultBuf->totalBufSize, pResultBuf->fileSize); + qDebug("QInfo:%p res output buffer closed, total:%" PRId64 " bytes, inmem size:%dbytes, file size:%"PRId64" bytes", + pResultBuf->handle, pResultBuf->totalBufSize, listNEles(pResultBuf->lruList) * pResultBuf->pageSize, + pResultBuf->fileSize); fclose(pResultBuf->file); } else { - qDebug("QInfo:%p disk-based output buffer closed, total:%" PRId64 " bytes, no file created", pResultBuf->handle, + qDebug("QInfo:%p res output buffer closed, total:%" PRId64 " bytes, no file created", pResultBuf->handle, pResultBuf->totalBufSize); } diff --git a/src/query/src/qTokenizer.c b/src/query/src/qTokenizer.c index d730b3cab0..227aded5f1 100644 --- a/src/query/src/qTokenizer.c +++ b/src/query/src/qTokenizer.c @@ -25,7 +25,7 @@ // All the keywords of the SQL language are stored in a hash table typedef struct SKeyword { const char* name; // The keyword name - uint16_t type; // type + uint16_t type; // type uint8_t len; // length } SKeyword; @@ -257,7 +257,7 @@ static void* KeywordHashTable = NULL; static void doInitKeywordsTable(void) { int numOfEntries = tListLen(keywordTable); - KeywordHashTable = taosHashInit(numOfEntries, MurmurHash3_32, false); + KeywordHashTable = taosHashInit(numOfEntries, MurmurHash3_32, true, false); for (int32_t i = 0; i < numOfEntries; i++) { keywordTable[i].len = (uint8_t)strlen(keywordTable[i].name); void* ptr = &keywordTable[i]; diff --git a/src/query/src/qTsbuf.c b/src/query/src/qTsbuf.c index ab7678a22b..25eb33ff7d 100644 --- a/src/query/src/qTsbuf.c +++ b/src/query/src/qTsbuf.c @@ -72,6 +72,7 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) { // invalid file if (header.magic != TS_COMP_FILE_MAGIC) { + tsBufDestroy(pTSBuf); return NULL; } diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c index 7175262270..a01eb33ec7 100644 --- a/src/query/src/qUtil.c +++ b/src/query/src/qUtil.c @@ -37,7 +37,7 @@ int32_t initWindowResInfo(SWindowResInfo *pWindowResInfo, SQueryRuntimeEnv *pRun pWindowResInfo->type = type; _hash_fn_t fn = taosGetDefaultHashFunction(type); - pWindowResInfo->hashList = taosHashInit(threshold, fn, false); + pWindowResInfo->hashList = taosHashInit(threshold, fn, true, false); if (pWindowResInfo->hashList == NULL) { return TSDB_CODE_QRY_OUT_OF_MEMORY; } @@ -46,12 +46,17 @@ int32_t initWindowResInfo(SWindowResInfo *pWindowResInfo, SQueryRuntimeEnv *pRun pWindowResInfo->size = 0; pWindowResInfo->prevSKey = TSKEY_INITIAL_VAL; + pRuntimeEnv->summary.internalSupSize += sizeof(SWindowResult) * threshold; + // use the pointer arraylist pWindowResInfo->pResult = calloc(threshold, sizeof(SWindowResult)); if (pWindowResInfo->pResult == NULL) { return TSDB_CODE_QRY_OUT_OF_MEMORY; } + pRuntimeEnv->summary.internalSupSize += sizeof(SWindowResult) * threshold; + pRuntimeEnv->summary.internalSupSize += (pRuntimeEnv->pQuery->numOfOutput * sizeof(SResultInfo) + pRuntimeEnv->interBufSize) * pWindowResInfo->capacity; + for (int32_t i = 0; i < pWindowResInfo->capacity; ++i) { int32_t code = createQueryResultInfo(pRuntimeEnv->pQuery, &pWindowResInfo->pResult[i], pRuntimeEnv->stableQuery, pRuntimeEnv->interBufSize); if (code != TSDB_CODE_SUCCESS) { @@ -104,7 +109,7 @@ void resetTimeWindowInfo(SQueryRuntimeEnv *pRuntimeEnv, SWindowResInfo *pWindowR pWindowResInfo->size = 0; _hash_fn_t fn = taosGetDefaultHashFunction(pWindowResInfo->type); - pWindowResInfo->hashList = taosHashInit(pWindowResInfo->capacity, fn, false); + pWindowResInfo->hashList = taosHashInit(pWindowResInfo->capacity, fn, true, false); pWindowResInfo->startTime = TSKEY_INITIAL_VAL; pWindowResInfo->prevSKey = TSKEY_INITIAL_VAL; diff --git a/src/query/src/sql.c b/src/query/src/sql.c index fe26f61725..75ef2f3218 100644 --- a/src/query/src/sql.c +++ b/src/query/src/sql.c @@ -126,17 +126,17 @@ typedef union { #define ParseARG_FETCH SSqlInfo* pInfo = yypParser->pInfo #define ParseARG_STORE yypParser->pInfo = pInfo #define YYFALLBACK 1 -#define YYNSTATE 243 -#define YYNRULE 226 +#define YYNSTATE 245 +#define YYNRULE 228 #define YYNTOKEN 207 -#define YY_MAX_SHIFT 242 -#define YY_MIN_SHIFTREDUCE 405 -#define YY_MAX_SHIFTREDUCE 630 -#define YY_ERROR_ACTION 631 -#define YY_ACCEPT_ACTION 632 -#define YY_NO_ACTION 633 -#define YY_MIN_REDUCE 634 -#define YY_MAX_REDUCE 859 +#define YY_MAX_SHIFT 244 +#define YY_MIN_SHIFTREDUCE 407 +#define YY_MAX_SHIFTREDUCE 634 +#define YY_ERROR_ACTION 635 +#define YY_ACCEPT_ACTION 636 +#define YY_NO_ACTION 637 +#define YY_MIN_REDUCE 638 +#define YY_MAX_REDUCE 865 /************* End control #defines *******************************************/ /* Define the yytestcase() macro to be a no-op if is not already defined @@ -202,64 +202,64 @@ typedef union { ** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (552) +#define YY_ACTTAB_COUNT (554) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 103, 446, 135, 673, 632, 242, 126, 515, 135, 447, - /* 10 */ 135, 158, 847, 41, 43, 11, 35, 36, 846, 157, - /* 20 */ 847, 29, 134, 446, 197, 39, 37, 40, 38, 155, - /* 30 */ 103, 447, 139, 34, 33, 217, 216, 32, 31, 30, - /* 40 */ 41, 43, 767, 35, 36, 32, 31, 30, 29, 756, - /* 50 */ 446, 197, 39, 37, 40, 38, 182, 802, 447, 192, - /* 60 */ 34, 33, 21, 21, 32, 31, 30, 406, 407, 408, - /* 70 */ 409, 410, 411, 412, 413, 414, 415, 416, 417, 241, - /* 80 */ 41, 43, 228, 35, 36, 194, 843, 58, 29, 21, - /* 90 */ 842, 197, 39, 37, 40, 38, 166, 167, 753, 753, - /* 100 */ 34, 33, 168, 56, 32, 31, 30, 778, 841, 16, - /* 110 */ 235, 208, 234, 233, 207, 206, 205, 232, 204, 231, - /* 120 */ 230, 229, 203, 215, 151, 753, 732, 586, 719, 720, - /* 130 */ 721, 722, 723, 724, 725, 726, 727, 728, 729, 730, - /* 140 */ 731, 43, 8, 35, 36, 61, 113, 21, 29, 153, - /* 150 */ 240, 197, 39, 37, 40, 38, 239, 238, 95, 775, - /* 160 */ 34, 33, 165, 99, 32, 31, 30, 169, 35, 36, - /* 170 */ 214, 213, 592, 29, 595, 103, 197, 39, 37, 40, - /* 180 */ 38, 220, 756, 753, 236, 34, 33, 175, 12, 32, - /* 190 */ 31, 30, 162, 599, 179, 178, 590, 767, 593, 103, - /* 200 */ 596, 161, 162, 599, 756, 17, 590, 148, 593, 152, - /* 210 */ 596, 154, 26, 88, 87, 142, 185, 567, 568, 16, - /* 220 */ 235, 147, 234, 233, 159, 160, 219, 232, 196, 231, - /* 230 */ 230, 229, 801, 76, 159, 160, 162, 599, 547, 228, - /* 240 */ 590, 3, 593, 17, 596, 74, 78, 83, 86, 77, - /* 250 */ 26, 39, 37, 40, 38, 80, 59, 754, 21, 34, - /* 260 */ 33, 544, 60, 32, 31, 30, 18, 140, 159, 160, - /* 270 */ 181, 737, 539, 736, 27, 734, 735, 150, 682, 184, - /* 280 */ 738, 126, 740, 741, 739, 674, 531, 141, 126, 528, - /* 290 */ 42, 529, 558, 530, 752, 591, 46, 594, 34, 33, - /* 300 */ 42, 598, 32, 31, 30, 116, 117, 68, 64, 67, - /* 310 */ 588, 598, 143, 50, 73, 72, 597, 170, 171, 130, - /* 320 */ 128, 91, 90, 89, 98, 47, 597, 144, 559, 616, - /* 330 */ 51, 26, 14, 13, 42, 145, 600, 521, 520, 201, - /* 340 */ 13, 46, 22, 22, 48, 598, 589, 10, 9, 535, - /* 350 */ 533, 536, 534, 85, 84, 146, 137, 133, 856, 138, - /* 360 */ 597, 136, 755, 812, 811, 163, 808, 807, 164, 777, - /* 370 */ 747, 218, 794, 100, 793, 769, 114, 115, 26, 684, - /* 380 */ 112, 202, 131, 183, 24, 211, 681, 212, 855, 532, - /* 390 */ 70, 854, 93, 852, 118, 702, 554, 25, 23, 132, - /* 400 */ 671, 79, 52, 186, 669, 81, 82, 667, 666, 172, - /* 410 */ 127, 664, 190, 663, 662, 661, 660, 652, 129, 658, - /* 420 */ 49, 656, 654, 766, 781, 782, 795, 104, 195, 44, - /* 430 */ 193, 191, 189, 187, 210, 105, 75, 28, 221, 199, - /* 440 */ 222, 223, 53, 225, 224, 149, 226, 62, 65, 703, - /* 450 */ 227, 237, 630, 173, 174, 629, 177, 665, 628, 119, - /* 460 */ 176, 92, 121, 125, 120, 751, 122, 123, 659, 124, - /* 470 */ 108, 106, 107, 109, 110, 111, 94, 1, 2, 621, - /* 480 */ 180, 184, 541, 55, 57, 555, 101, 156, 188, 198, - /* 490 */ 19, 63, 5, 560, 102, 4, 6, 601, 20, 15, - /* 500 */ 7, 488, 484, 200, 482, 481, 480, 477, 450, 209, - /* 510 */ 66, 45, 22, 69, 71, 517, 516, 514, 471, 54, - /* 520 */ 469, 461, 467, 463, 465, 459, 457, 487, 486, 485, - /* 530 */ 483, 479, 478, 476, 46, 448, 421, 419, 634, 633, - /* 540 */ 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, - /* 550 */ 96, 97, + /* 0 */ 105, 448, 137, 677, 636, 244, 128, 517, 137, 449, + /* 10 */ 137, 160, 853, 41, 43, 11, 35, 36, 852, 159, + /* 20 */ 853, 29, 136, 448, 199, 39, 37, 40, 38, 157, + /* 30 */ 105, 449, 141, 34, 33, 219, 218, 32, 31, 30, + /* 40 */ 41, 43, 771, 35, 36, 32, 31, 30, 29, 760, + /* 50 */ 448, 199, 39, 37, 40, 38, 184, 808, 449, 194, + /* 60 */ 34, 33, 21, 21, 32, 31, 30, 408, 409, 410, + /* 70 */ 411, 412, 413, 414, 415, 416, 417, 418, 419, 243, + /* 80 */ 41, 43, 230, 35, 36, 196, 849, 60, 29, 21, + /* 90 */ 848, 199, 39, 37, 40, 38, 168, 169, 757, 757, + /* 100 */ 34, 33, 170, 56, 32, 31, 30, 782, 847, 16, + /* 110 */ 237, 210, 236, 235, 209, 208, 207, 234, 206, 233, + /* 120 */ 232, 231, 205, 217, 153, 757, 736, 590, 723, 724, + /* 130 */ 725, 726, 727, 728, 729, 730, 731, 732, 733, 734, + /* 140 */ 735, 43, 8, 35, 36, 63, 115, 21, 29, 155, + /* 150 */ 242, 199, 39, 37, 40, 38, 241, 240, 97, 779, + /* 160 */ 34, 33, 167, 101, 32, 31, 30, 171, 35, 36, + /* 170 */ 216, 215, 596, 29, 599, 105, 199, 39, 37, 40, + /* 180 */ 38, 222, 760, 757, 238, 34, 33, 177, 12, 32, + /* 190 */ 31, 30, 164, 603, 181, 180, 594, 771, 597, 105, + /* 200 */ 600, 163, 164, 603, 760, 17, 594, 150, 597, 154, + /* 210 */ 600, 156, 26, 90, 89, 144, 187, 571, 572, 16, + /* 220 */ 237, 149, 236, 235, 161, 162, 221, 234, 198, 233, + /* 230 */ 232, 231, 807, 78, 161, 162, 164, 603, 549, 230, + /* 240 */ 594, 3, 597, 17, 600, 76, 80, 85, 88, 79, + /* 250 */ 26, 39, 37, 40, 38, 82, 61, 758, 21, 34, + /* 260 */ 33, 546, 62, 32, 31, 30, 18, 142, 161, 162, + /* 270 */ 183, 741, 541, 740, 27, 738, 739, 152, 686, 186, + /* 280 */ 742, 128, 744, 745, 743, 678, 533, 143, 128, 530, + /* 290 */ 42, 531, 562, 532, 756, 595, 46, 598, 34, 33, + /* 300 */ 42, 602, 32, 31, 30, 118, 119, 70, 66, 69, + /* 310 */ 592, 602, 145, 50, 75, 74, 601, 172, 173, 132, + /* 320 */ 130, 93, 92, 91, 100, 47, 601, 146, 563, 620, + /* 330 */ 51, 26, 14, 13, 42, 147, 604, 523, 522, 203, + /* 340 */ 13, 46, 22, 22, 48, 602, 593, 10, 9, 537, + /* 350 */ 535, 538, 536, 87, 86, 148, 139, 135, 862, 140, + /* 360 */ 601, 138, 759, 818, 817, 165, 814, 813, 166, 781, + /* 370 */ 751, 220, 800, 786, 788, 773, 102, 799, 26, 116, + /* 380 */ 114, 117, 688, 185, 204, 133, 24, 213, 685, 534, + /* 390 */ 214, 861, 95, 72, 860, 858, 558, 120, 706, 25, + /* 400 */ 23, 134, 52, 188, 675, 81, 673, 83, 84, 671, + /* 410 */ 670, 174, 192, 129, 668, 667, 666, 665, 664, 656, + /* 420 */ 49, 131, 662, 660, 658, 770, 57, 58, 801, 44, + /* 430 */ 197, 195, 193, 191, 189, 28, 106, 212, 77, 223, + /* 440 */ 224, 225, 226, 201, 53, 227, 228, 229, 239, 64, + /* 450 */ 67, 634, 151, 175, 176, 633, 178, 179, 632, 669, + /* 460 */ 186, 625, 94, 96, 123, 127, 2, 122, 707, 755, + /* 470 */ 121, 124, 125, 111, 107, 108, 126, 109, 110, 112, + /* 480 */ 663, 113, 182, 1, 543, 55, 59, 559, 103, 158, + /* 490 */ 564, 5, 190, 104, 6, 65, 490, 605, 4, 19, + /* 500 */ 20, 15, 200, 7, 202, 486, 484, 483, 482, 479, + /* 510 */ 452, 211, 68, 45, 22, 71, 73, 519, 518, 516, + /* 520 */ 54, 473, 471, 463, 469, 465, 467, 461, 459, 489, + /* 530 */ 488, 487, 485, 481, 480, 478, 46, 450, 423, 421, + /* 540 */ 638, 637, 637, 637, 637, 637, 637, 637, 637, 637, + /* 550 */ 637, 637, 98, 99, }; static const YYCODETYPE yy_lookahead[] = { /* 0 */ 211, 1, 262, 215, 208, 209, 218, 5, 262, 9, @@ -299,25 +299,25 @@ static const YYCODETYPE yy_lookahead[] = { /* 340 */ 104, 104, 104, 104, 123, 110, 37, 129, 130, 5, /* 350 */ 5, 7, 7, 72, 73, 262, 262, 262, 248, 262, /* 360 */ 125, 262, 248, 243, 243, 243, 243, 243, 243, 211, - /* 370 */ 244, 243, 269, 211, 269, 246, 211, 211, 106, 211, + /* 370 */ 244, 243, 269, 211, 211, 246, 211, 269, 106, 211, /* 380 */ 250, 211, 211, 246, 211, 211, 211, 211, 211, 105, /* 390 */ 211, 211, 59, 211, 211, 211, 110, 211, 211, 211, /* 400 */ 211, 211, 120, 265, 211, 211, 211, 211, 211, 211, /* 410 */ 211, 211, 265, 211, 211, 211, 211, 211, 211, 211, - /* 420 */ 122, 211, 211, 259, 212, 212, 212, 258, 114, 119, - /* 430 */ 118, 113, 112, 111, 75, 257, 84, 124, 83, 212, - /* 440 */ 49, 80, 212, 53, 82, 212, 81, 216, 216, 226, - /* 450 */ 79, 75, 5, 136, 5, 5, 5, 212, 5, 225, - /* 460 */ 136, 213, 220, 219, 224, 246, 223, 221, 212, 222, - /* 470 */ 254, 256, 255, 253, 252, 251, 213, 217, 214, 87, - /* 480 */ 127, 107, 100, 108, 104, 100, 99, 1, 99, 101, - /* 490 */ 104, 72, 115, 100, 99, 99, 115, 100, 104, 99, - /* 500 */ 99, 9, 5, 101, 5, 5, 5, 5, 76, 15, - /* 510 */ 72, 16, 104, 130, 130, 5, 5, 100, 5, 99, - /* 520 */ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - /* 530 */ 5, 5, 5, 5, 104, 76, 59, 58, 0, 273, - /* 540 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, - /* 550 */ 21, 21, 273, 273, 273, 273, 273, 273, 273, 273, + /* 420 */ 122, 211, 211, 211, 211, 259, 212, 212, 212, 119, + /* 430 */ 114, 118, 113, 112, 111, 124, 258, 75, 84, 83, + /* 440 */ 49, 80, 82, 212, 212, 53, 81, 79, 75, 216, + /* 450 */ 216, 5, 212, 136, 5, 5, 136, 5, 5, 212, + /* 460 */ 107, 87, 213, 213, 220, 219, 214, 224, 226, 246, + /* 470 */ 225, 223, 221, 253, 257, 256, 222, 255, 254, 252, + /* 480 */ 212, 251, 127, 217, 100, 108, 104, 100, 99, 1, + /* 490 */ 100, 115, 99, 99, 115, 72, 9, 100, 99, 104, + /* 500 */ 104, 99, 101, 99, 101, 5, 5, 5, 5, 5, + /* 510 */ 76, 15, 72, 16, 104, 130, 130, 5, 5, 100, + /* 520 */ 99, 5, 5, 5, 5, 5, 5, 5, 5, 5, + /* 530 */ 5, 5, 5, 5, 5, 5, 104, 76, 59, 58, + /* 540 */ 0, 273, 273, 273, 273, 273, 273, 273, 273, 273, + /* 550 */ 273, 273, 21, 21, 273, 273, 273, 273, 273, 273, /* 560 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, /* 570 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, /* 580 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, @@ -337,83 +337,84 @@ static const YYCODETYPE yy_lookahead[] = { /* 720 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, /* 730 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, /* 740 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, - /* 750 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, + /* 750 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, + /* 760 */ 273, }; -#define YY_SHIFT_COUNT (242) +#define YY_SHIFT_COUNT (244) #define YY_SHIFT_MIN (0) -#define YY_SHIFT_MAX (538) +#define YY_SHIFT_MAX (540) static const unsigned short int yy_shift_ofst[] = { /* 0 */ 144, 24, 134, 191, 235, 49, 49, 49, 49, 49, /* 10 */ 49, 0, 22, 235, 284, 284, 284, 106, 49, 49, - /* 20 */ 49, 49, 49, 161, 4, 4, 552, 201, 235, 235, + /* 20 */ 49, 49, 49, 161, 4, 4, 554, 201, 235, 235, /* 30 */ 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, /* 40 */ 235, 235, 235, 235, 235, 284, 284, 2, 2, 2, - /* 50 */ 2, 2, 2, 43, 2, 225, 49, 49, 101, 101, - /* 60 */ 157, 49, 49, 49, 49, 49, 49, 49, 49, 49, + /* 50 */ 2, 2, 2, 43, 2, 225, 49, 49, 49, 49, + /* 60 */ 101, 101, 157, 49, 49, 49, 49, 49, 49, 49, /* 70 */ 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, /* 80 */ 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, - /* 90 */ 49, 49, 49, 49, 49, 49, 49, 49, 272, 333, - /* 100 */ 333, 286, 286, 333, 282, 298, 310, 314, 312, 318, - /* 110 */ 320, 322, 313, 272, 333, 333, 359, 359, 333, 352, - /* 120 */ 355, 391, 361, 362, 390, 365, 371, 333, 376, 333, - /* 130 */ 376, 552, 552, 27, 67, 67, 67, 127, 152, 226, - /* 140 */ 226, 226, 181, 265, 265, 265, 265, 241, 255, 39, - /* 150 */ 60, 8, 8, 96, 172, 192, 228, 229, 236, 167, - /* 160 */ 290, 309, 142, 221, 209, 237, 238, 239, 185, 218, - /* 170 */ 344, 345, 281, 447, 317, 449, 450, 324, 451, 453, - /* 180 */ 392, 353, 374, 382, 375, 380, 385, 387, 486, 389, - /* 190 */ 393, 395, 386, 377, 394, 381, 397, 396, 400, 388, - /* 200 */ 401, 402, 419, 492, 497, 499, 500, 501, 502, 432, - /* 210 */ 494, 438, 495, 383, 384, 408, 510, 511, 417, 420, - /* 220 */ 408, 513, 515, 516, 517, 518, 519, 520, 521, 522, - /* 230 */ 523, 524, 525, 526, 527, 528, 430, 459, 529, 530, - /* 240 */ 477, 479, 538, + /* 90 */ 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, + /* 100 */ 272, 333, 333, 286, 286, 333, 282, 298, 310, 316, + /* 110 */ 313, 319, 321, 323, 311, 272, 333, 333, 362, 362, + /* 120 */ 333, 354, 356, 391, 361, 360, 392, 365, 368, 333, + /* 130 */ 373, 333, 373, 554, 554, 27, 67, 67, 67, 127, + /* 140 */ 152, 226, 226, 226, 181, 265, 265, 265, 265, 241, + /* 150 */ 255, 39, 60, 8, 8, 96, 172, 192, 228, 229, + /* 160 */ 236, 167, 290, 309, 142, 221, 209, 237, 238, 239, + /* 170 */ 185, 218, 344, 345, 281, 446, 317, 449, 450, 320, + /* 180 */ 452, 453, 374, 355, 353, 384, 377, 382, 387, 389, + /* 190 */ 488, 393, 390, 394, 395, 376, 396, 379, 397, 399, + /* 200 */ 402, 401, 404, 403, 423, 487, 500, 501, 502, 503, + /* 210 */ 504, 434, 496, 440, 497, 385, 386, 410, 512, 513, + /* 220 */ 419, 421, 410, 516, 517, 518, 519, 520, 521, 522, + /* 230 */ 523, 524, 525, 526, 527, 528, 529, 530, 432, 461, + /* 240 */ 531, 532, 479, 481, 540, }; -#define YY_REDUCE_COUNT (132) +#define YY_REDUCE_COUNT (134) #define YY_REDUCE_MIN (-260) -#define YY_REDUCE_MAX (264) +#define YY_REDUCE_MAX (268) static const short yy_reduce_ofst[] = { /* 0 */ -204, -101, 44, -260, -252, -211, -181, -149, -148, -122, /* 10 */ -64, -104, -61, -254, -199, -66, -44, -49, -48, -36, /* 20 */ -12, 15, 47, -212, 63, 70, 13, -247, -240, -230, /* 30 */ -176, -172, -154, -138, -53, 5, 25, 50, 65, 73, /* 40 */ 93, 94, 95, 97, 99, 110, 114, 120, 121, 122, - /* 50 */ 123, 124, 125, 126, 128, 129, 158, 162, 103, 105, - /* 60 */ 130, 165, 166, 168, 170, 171, 173, 174, 175, 176, + /* 50 */ 123, 124, 125, 126, 128, 129, 158, 162, 163, 165, + /* 60 */ 103, 108, 130, 168, 170, 171, 173, 174, 175, 176, /* 70 */ 177, 179, 180, 182, 183, 184, 186, 187, 188, 189, /* 80 */ 190, 193, 194, 195, 196, 197, 198, 199, 200, 202, - /* 90 */ 203, 204, 205, 206, 207, 208, 210, 211, 137, 212, - /* 100 */ 213, 138, 147, 214, 164, 169, 178, 215, 217, 216, - /* 110 */ 220, 222, 224, 219, 227, 230, 231, 232, 233, 223, - /* 120 */ 234, 240, 242, 243, 246, 247, 244, 245, 248, 256, - /* 130 */ 263, 260, 264, + /* 90 */ 203, 204, 205, 206, 207, 208, 210, 211, 212, 213, + /* 100 */ 137, 214, 215, 138, 147, 216, 166, 178, 217, 219, + /* 110 */ 222, 224, 220, 227, 230, 223, 231, 232, 233, 234, + /* 120 */ 240, 242, 245, 243, 244, 248, 251, 254, 246, 247, + /* 130 */ 249, 268, 250, 266, 252, }; static const YYACTIONTYPE yy_default[] = { - /* 0 */ 631, 683, 672, 849, 849, 631, 631, 631, 631, 631, - /* 10 */ 631, 779, 649, 849, 631, 631, 631, 631, 631, 631, - /* 20 */ 631, 631, 631, 685, 685, 685, 774, 631, 631, 631, - /* 30 */ 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, - /* 40 */ 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, - /* 50 */ 631, 631, 631, 631, 631, 631, 631, 631, 798, 798, - /* 60 */ 772, 631, 631, 631, 631, 631, 631, 631, 631, 631, - /* 70 */ 631, 631, 631, 631, 631, 631, 631, 631, 631, 670, - /* 80 */ 631, 668, 631, 631, 631, 631, 631, 631, 631, 631, - /* 90 */ 631, 631, 631, 631, 631, 657, 631, 631, 631, 651, - /* 100 */ 651, 631, 631, 651, 805, 809, 803, 791, 799, 790, - /* 110 */ 786, 785, 813, 631, 651, 651, 680, 680, 651, 701, - /* 120 */ 699, 697, 689, 695, 691, 693, 687, 651, 678, 651, - /* 130 */ 678, 718, 733, 631, 814, 848, 804, 832, 831, 844, - /* 140 */ 838, 837, 631, 836, 835, 834, 833, 631, 631, 631, - /* 150 */ 631, 840, 839, 631, 631, 631, 631, 631, 631, 631, - /* 160 */ 631, 631, 816, 810, 806, 631, 631, 631, 631, 631, - /* 170 */ 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, - /* 180 */ 631, 631, 771, 631, 631, 780, 631, 631, 631, 631, - /* 190 */ 631, 631, 800, 631, 792, 631, 631, 631, 631, 631, - /* 200 */ 631, 748, 631, 631, 631, 631, 631, 631, 631, 631, - /* 210 */ 631, 631, 631, 631, 631, 853, 631, 631, 631, 742, - /* 220 */ 851, 631, 631, 631, 631, 631, 631, 631, 631, 631, - /* 230 */ 631, 631, 631, 631, 631, 631, 704, 631, 655, 653, - /* 240 */ 631, 647, 631, + /* 0 */ 635, 687, 676, 855, 855, 635, 635, 635, 635, 635, + /* 10 */ 635, 783, 653, 855, 635, 635, 635, 635, 635, 635, + /* 20 */ 635, 635, 635, 689, 689, 689, 778, 635, 635, 635, + /* 30 */ 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, + /* 40 */ 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, + /* 50 */ 635, 635, 635, 635, 635, 635, 635, 785, 787, 635, + /* 60 */ 804, 804, 776, 635, 635, 635, 635, 635, 635, 635, + /* 70 */ 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, + /* 80 */ 635, 674, 635, 672, 635, 635, 635, 635, 635, 635, + /* 90 */ 635, 635, 635, 635, 635, 635, 635, 661, 635, 635, + /* 100 */ 635, 655, 655, 635, 635, 655, 811, 815, 809, 797, + /* 110 */ 805, 796, 792, 791, 819, 635, 655, 655, 684, 684, + /* 120 */ 655, 705, 703, 701, 693, 699, 695, 697, 691, 655, + /* 130 */ 682, 655, 682, 722, 737, 635, 820, 854, 810, 838, + /* 140 */ 837, 850, 844, 843, 635, 842, 841, 840, 839, 635, + /* 150 */ 635, 635, 635, 846, 845, 635, 635, 635, 635, 635, + /* 160 */ 635, 635, 635, 635, 822, 816, 812, 635, 635, 635, + /* 170 */ 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, + /* 180 */ 635, 635, 635, 635, 775, 635, 635, 784, 635, 635, + /* 190 */ 635, 635, 635, 635, 806, 635, 798, 635, 635, 635, + /* 200 */ 635, 635, 635, 752, 635, 635, 635, 635, 635, 635, + /* 210 */ 635, 635, 635, 635, 635, 635, 635, 859, 635, 635, + /* 220 */ 635, 746, 857, 635, 635, 635, 635, 635, 635, 635, + /* 230 */ 635, 635, 635, 635, 635, 635, 635, 635, 708, 635, + /* 240 */ 659, 657, 635, 651, 635, }; /********** End of lemon-generated parsing tables *****************************/ @@ -1154,84 +1155,86 @@ static const char *const yyRuleName[] = { /* 145 */ "as ::=", /* 146 */ "from ::= FROM tablelist", /* 147 */ "tablelist ::= ids cpxName", - /* 148 */ "tablelist ::= tablelist COMMA ids cpxName", - /* 149 */ "tmvar ::= VARIABLE", - /* 150 */ "interval_opt ::= INTERVAL LP tmvar RP", - /* 151 */ "interval_opt ::=", - /* 152 */ "fill_opt ::=", - /* 153 */ "fill_opt ::= FILL LP ID COMMA tagitemlist RP", - /* 154 */ "fill_opt ::= FILL LP ID RP", - /* 155 */ "sliding_opt ::= SLIDING LP tmvar RP", - /* 156 */ "sliding_opt ::=", - /* 157 */ "orderby_opt ::=", - /* 158 */ "orderby_opt ::= ORDER BY sortlist", - /* 159 */ "sortlist ::= sortlist COMMA item sortorder", - /* 160 */ "sortlist ::= item sortorder", - /* 161 */ "item ::= ids cpxName", - /* 162 */ "sortorder ::= ASC", - /* 163 */ "sortorder ::= DESC", - /* 164 */ "sortorder ::=", - /* 165 */ "groupby_opt ::=", - /* 166 */ "groupby_opt ::= GROUP BY grouplist", - /* 167 */ "grouplist ::= grouplist COMMA item", - /* 168 */ "grouplist ::= item", - /* 169 */ "having_opt ::=", - /* 170 */ "having_opt ::= HAVING expr", - /* 171 */ "limit_opt ::=", - /* 172 */ "limit_opt ::= LIMIT signed", - /* 173 */ "limit_opt ::= LIMIT signed OFFSET signed", - /* 174 */ "limit_opt ::= LIMIT signed COMMA signed", - /* 175 */ "slimit_opt ::=", - /* 176 */ "slimit_opt ::= SLIMIT signed", - /* 177 */ "slimit_opt ::= SLIMIT signed SOFFSET signed", - /* 178 */ "slimit_opt ::= SLIMIT signed COMMA signed", - /* 179 */ "where_opt ::=", - /* 180 */ "where_opt ::= WHERE expr", - /* 181 */ "expr ::= LP expr RP", - /* 182 */ "expr ::= ID", - /* 183 */ "expr ::= ID DOT ID", - /* 184 */ "expr ::= ID DOT STAR", - /* 185 */ "expr ::= INTEGER", - /* 186 */ "expr ::= MINUS INTEGER", - /* 187 */ "expr ::= PLUS INTEGER", - /* 188 */ "expr ::= FLOAT", - /* 189 */ "expr ::= MINUS FLOAT", - /* 190 */ "expr ::= PLUS FLOAT", - /* 191 */ "expr ::= STRING", - /* 192 */ "expr ::= NOW", - /* 193 */ "expr ::= VARIABLE", - /* 194 */ "expr ::= BOOL", - /* 195 */ "expr ::= ID LP exprlist RP", - /* 196 */ "expr ::= ID LP STAR RP", - /* 197 */ "expr ::= expr AND expr", - /* 198 */ "expr ::= expr OR expr", - /* 199 */ "expr ::= expr LT expr", - /* 200 */ "expr ::= expr GT expr", - /* 201 */ "expr ::= expr LE expr", - /* 202 */ "expr ::= expr GE expr", - /* 203 */ "expr ::= expr NE expr", - /* 204 */ "expr ::= expr EQ expr", - /* 205 */ "expr ::= expr PLUS expr", - /* 206 */ "expr ::= expr MINUS expr", - /* 207 */ "expr ::= expr STAR expr", - /* 208 */ "expr ::= expr SLASH expr", - /* 209 */ "expr ::= expr REM expr", - /* 210 */ "expr ::= expr LIKE expr", - /* 211 */ "expr ::= expr IN LP exprlist RP", - /* 212 */ "exprlist ::= exprlist COMMA expritem", - /* 213 */ "exprlist ::= expritem", - /* 214 */ "expritem ::= expr", - /* 215 */ "expritem ::=", - /* 216 */ "cmd ::= RESET QUERY CACHE", - /* 217 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist", - /* 218 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids", - /* 219 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist", - /* 220 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids", - /* 221 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids", - /* 222 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem", - /* 223 */ "cmd ::= KILL CONNECTION INTEGER", - /* 224 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER", - /* 225 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER", + /* 148 */ "tablelist ::= ids cpxName ids", + /* 149 */ "tablelist ::= tablelist COMMA ids cpxName", + /* 150 */ "tablelist ::= tablelist COMMA ids cpxName ids", + /* 151 */ "tmvar ::= VARIABLE", + /* 152 */ "interval_opt ::= INTERVAL LP tmvar RP", + /* 153 */ "interval_opt ::=", + /* 154 */ "fill_opt ::=", + /* 155 */ "fill_opt ::= FILL LP ID COMMA tagitemlist RP", + /* 156 */ "fill_opt ::= FILL LP ID RP", + /* 157 */ "sliding_opt ::= SLIDING LP tmvar RP", + /* 158 */ "sliding_opt ::=", + /* 159 */ "orderby_opt ::=", + /* 160 */ "orderby_opt ::= ORDER BY sortlist", + /* 161 */ "sortlist ::= sortlist COMMA item sortorder", + /* 162 */ "sortlist ::= item sortorder", + /* 163 */ "item ::= ids cpxName", + /* 164 */ "sortorder ::= ASC", + /* 165 */ "sortorder ::= DESC", + /* 166 */ "sortorder ::=", + /* 167 */ "groupby_opt ::=", + /* 168 */ "groupby_opt ::= GROUP BY grouplist", + /* 169 */ "grouplist ::= grouplist COMMA item", + /* 170 */ "grouplist ::= item", + /* 171 */ "having_opt ::=", + /* 172 */ "having_opt ::= HAVING expr", + /* 173 */ "limit_opt ::=", + /* 174 */ "limit_opt ::= LIMIT signed", + /* 175 */ "limit_opt ::= LIMIT signed OFFSET signed", + /* 176 */ "limit_opt ::= LIMIT signed COMMA signed", + /* 177 */ "slimit_opt ::=", + /* 178 */ "slimit_opt ::= SLIMIT signed", + /* 179 */ "slimit_opt ::= SLIMIT signed SOFFSET signed", + /* 180 */ "slimit_opt ::= SLIMIT signed COMMA signed", + /* 181 */ "where_opt ::=", + /* 182 */ "where_opt ::= WHERE expr", + /* 183 */ "expr ::= LP expr RP", + /* 184 */ "expr ::= ID", + /* 185 */ "expr ::= ID DOT ID", + /* 186 */ "expr ::= ID DOT STAR", + /* 187 */ "expr ::= INTEGER", + /* 188 */ "expr ::= MINUS INTEGER", + /* 189 */ "expr ::= PLUS INTEGER", + /* 190 */ "expr ::= FLOAT", + /* 191 */ "expr ::= MINUS FLOAT", + /* 192 */ "expr ::= PLUS FLOAT", + /* 193 */ "expr ::= STRING", + /* 194 */ "expr ::= NOW", + /* 195 */ "expr ::= VARIABLE", + /* 196 */ "expr ::= BOOL", + /* 197 */ "expr ::= ID LP exprlist RP", + /* 198 */ "expr ::= ID LP STAR RP", + /* 199 */ "expr ::= expr AND expr", + /* 200 */ "expr ::= expr OR expr", + /* 201 */ "expr ::= expr LT expr", + /* 202 */ "expr ::= expr GT expr", + /* 203 */ "expr ::= expr LE expr", + /* 204 */ "expr ::= expr GE expr", + /* 205 */ "expr ::= expr NE expr", + /* 206 */ "expr ::= expr EQ expr", + /* 207 */ "expr ::= expr PLUS expr", + /* 208 */ "expr ::= expr MINUS expr", + /* 209 */ "expr ::= expr STAR expr", + /* 210 */ "expr ::= expr SLASH expr", + /* 211 */ "expr ::= expr REM expr", + /* 212 */ "expr ::= expr LIKE expr", + /* 213 */ "expr ::= expr IN LP exprlist RP", + /* 214 */ "exprlist ::= exprlist COMMA expritem", + /* 215 */ "exprlist ::= expritem", + /* 216 */ "expritem ::= expr", + /* 217 */ "expritem ::=", + /* 218 */ "cmd ::= RESET QUERY CACHE", + /* 219 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist", + /* 220 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids", + /* 221 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist", + /* 222 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids", + /* 223 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids", + /* 224 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem", + /* 225 */ "cmd ::= KILL CONNECTION INTEGER", + /* 226 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER", + /* 227 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER", }; #endif /* NDEBUG */ @@ -1837,84 +1840,86 @@ static const struct { { 263, 0 }, /* (145) as ::= */ { 250, -2 }, /* (146) from ::= FROM tablelist */ { 264, -2 }, /* (147) tablelist ::= ids cpxName */ - { 264, -4 }, /* (148) tablelist ::= tablelist COMMA ids cpxName */ - { 265, -1 }, /* (149) tmvar ::= VARIABLE */ - { 252, -4 }, /* (150) interval_opt ::= INTERVAL LP tmvar RP */ - { 252, 0 }, /* (151) interval_opt ::= */ - { 253, 0 }, /* (152) fill_opt ::= */ - { 253, -6 }, /* (153) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ - { 253, -4 }, /* (154) fill_opt ::= FILL LP ID RP */ - { 254, -4 }, /* (155) sliding_opt ::= SLIDING LP tmvar RP */ - { 254, 0 }, /* (156) sliding_opt ::= */ - { 256, 0 }, /* (157) orderby_opt ::= */ - { 256, -3 }, /* (158) orderby_opt ::= ORDER BY sortlist */ - { 266, -4 }, /* (159) sortlist ::= sortlist COMMA item sortorder */ - { 266, -2 }, /* (160) sortlist ::= item sortorder */ - { 268, -2 }, /* (161) item ::= ids cpxName */ - { 269, -1 }, /* (162) sortorder ::= ASC */ - { 269, -1 }, /* (163) sortorder ::= DESC */ - { 269, 0 }, /* (164) sortorder ::= */ - { 255, 0 }, /* (165) groupby_opt ::= */ - { 255, -3 }, /* (166) groupby_opt ::= GROUP BY grouplist */ - { 270, -3 }, /* (167) grouplist ::= grouplist COMMA item */ - { 270, -1 }, /* (168) grouplist ::= item */ - { 257, 0 }, /* (169) having_opt ::= */ - { 257, -2 }, /* (170) having_opt ::= HAVING expr */ - { 259, 0 }, /* (171) limit_opt ::= */ - { 259, -2 }, /* (172) limit_opt ::= LIMIT signed */ - { 259, -4 }, /* (173) limit_opt ::= LIMIT signed OFFSET signed */ - { 259, -4 }, /* (174) limit_opt ::= LIMIT signed COMMA signed */ - { 258, 0 }, /* (175) slimit_opt ::= */ - { 258, -2 }, /* (176) slimit_opt ::= SLIMIT signed */ - { 258, -4 }, /* (177) slimit_opt ::= SLIMIT signed SOFFSET signed */ - { 258, -4 }, /* (178) slimit_opt ::= SLIMIT signed COMMA signed */ - { 251, 0 }, /* (179) where_opt ::= */ - { 251, -2 }, /* (180) where_opt ::= WHERE expr */ - { 262, -3 }, /* (181) expr ::= LP expr RP */ - { 262, -1 }, /* (182) expr ::= ID */ - { 262, -3 }, /* (183) expr ::= ID DOT ID */ - { 262, -3 }, /* (184) expr ::= ID DOT STAR */ - { 262, -1 }, /* (185) expr ::= INTEGER */ - { 262, -2 }, /* (186) expr ::= MINUS INTEGER */ - { 262, -2 }, /* (187) expr ::= PLUS INTEGER */ - { 262, -1 }, /* (188) expr ::= FLOAT */ - { 262, -2 }, /* (189) expr ::= MINUS FLOAT */ - { 262, -2 }, /* (190) expr ::= PLUS FLOAT */ - { 262, -1 }, /* (191) expr ::= STRING */ - { 262, -1 }, /* (192) expr ::= NOW */ - { 262, -1 }, /* (193) expr ::= VARIABLE */ - { 262, -1 }, /* (194) expr ::= BOOL */ - { 262, -4 }, /* (195) expr ::= ID LP exprlist RP */ - { 262, -4 }, /* (196) expr ::= ID LP STAR RP */ - { 262, -3 }, /* (197) expr ::= expr AND expr */ - { 262, -3 }, /* (198) expr ::= expr OR expr */ - { 262, -3 }, /* (199) expr ::= expr LT expr */ - { 262, -3 }, /* (200) expr ::= expr GT expr */ - { 262, -3 }, /* (201) expr ::= expr LE expr */ - { 262, -3 }, /* (202) expr ::= expr GE expr */ - { 262, -3 }, /* (203) expr ::= expr NE expr */ - { 262, -3 }, /* (204) expr ::= expr EQ expr */ - { 262, -3 }, /* (205) expr ::= expr PLUS expr */ - { 262, -3 }, /* (206) expr ::= expr MINUS expr */ - { 262, -3 }, /* (207) expr ::= expr STAR expr */ - { 262, -3 }, /* (208) expr ::= expr SLASH expr */ - { 262, -3 }, /* (209) expr ::= expr REM expr */ - { 262, -3 }, /* (210) expr ::= expr LIKE expr */ - { 262, -5 }, /* (211) expr ::= expr IN LP exprlist RP */ - { 271, -3 }, /* (212) exprlist ::= exprlist COMMA expritem */ - { 271, -1 }, /* (213) exprlist ::= expritem */ - { 272, -1 }, /* (214) expritem ::= expr */ - { 272, 0 }, /* (215) expritem ::= */ - { 209, -3 }, /* (216) cmd ::= RESET QUERY CACHE */ - { 209, -7 }, /* (217) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ - { 209, -7 }, /* (218) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ - { 209, -7 }, /* (219) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ - { 209, -7 }, /* (220) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ - { 209, -8 }, /* (221) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ - { 209, -9 }, /* (222) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ - { 209, -3 }, /* (223) cmd ::= KILL CONNECTION INTEGER */ - { 209, -5 }, /* (224) cmd ::= KILL STREAM INTEGER COLON INTEGER */ - { 209, -5 }, /* (225) cmd ::= KILL QUERY INTEGER COLON INTEGER */ + { 264, -3 }, /* (148) tablelist ::= ids cpxName ids */ + { 264, -4 }, /* (149) tablelist ::= tablelist COMMA ids cpxName */ + { 264, -5 }, /* (150) tablelist ::= tablelist COMMA ids cpxName ids */ + { 265, -1 }, /* (151) tmvar ::= VARIABLE */ + { 252, -4 }, /* (152) interval_opt ::= INTERVAL LP tmvar RP */ + { 252, 0 }, /* (153) interval_opt ::= */ + { 253, 0 }, /* (154) fill_opt ::= */ + { 253, -6 }, /* (155) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ + { 253, -4 }, /* (156) fill_opt ::= FILL LP ID RP */ + { 254, -4 }, /* (157) sliding_opt ::= SLIDING LP tmvar RP */ + { 254, 0 }, /* (158) sliding_opt ::= */ + { 256, 0 }, /* (159) orderby_opt ::= */ + { 256, -3 }, /* (160) orderby_opt ::= ORDER BY sortlist */ + { 266, -4 }, /* (161) sortlist ::= sortlist COMMA item sortorder */ + { 266, -2 }, /* (162) sortlist ::= item sortorder */ + { 268, -2 }, /* (163) item ::= ids cpxName */ + { 269, -1 }, /* (164) sortorder ::= ASC */ + { 269, -1 }, /* (165) sortorder ::= DESC */ + { 269, 0 }, /* (166) sortorder ::= */ + { 255, 0 }, /* (167) groupby_opt ::= */ + { 255, -3 }, /* (168) groupby_opt ::= GROUP BY grouplist */ + { 270, -3 }, /* (169) grouplist ::= grouplist COMMA item */ + { 270, -1 }, /* (170) grouplist ::= item */ + { 257, 0 }, /* (171) having_opt ::= */ + { 257, -2 }, /* (172) having_opt ::= HAVING expr */ + { 259, 0 }, /* (173) limit_opt ::= */ + { 259, -2 }, /* (174) limit_opt ::= LIMIT signed */ + { 259, -4 }, /* (175) limit_opt ::= LIMIT signed OFFSET signed */ + { 259, -4 }, /* (176) limit_opt ::= LIMIT signed COMMA signed */ + { 258, 0 }, /* (177) slimit_opt ::= */ + { 258, -2 }, /* (178) slimit_opt ::= SLIMIT signed */ + { 258, -4 }, /* (179) slimit_opt ::= SLIMIT signed SOFFSET signed */ + { 258, -4 }, /* (180) slimit_opt ::= SLIMIT signed COMMA signed */ + { 251, 0 }, /* (181) where_opt ::= */ + { 251, -2 }, /* (182) where_opt ::= WHERE expr */ + { 262, -3 }, /* (183) expr ::= LP expr RP */ + { 262, -1 }, /* (184) expr ::= ID */ + { 262, -3 }, /* (185) expr ::= ID DOT ID */ + { 262, -3 }, /* (186) expr ::= ID DOT STAR */ + { 262, -1 }, /* (187) expr ::= INTEGER */ + { 262, -2 }, /* (188) expr ::= MINUS INTEGER */ + { 262, -2 }, /* (189) expr ::= PLUS INTEGER */ + { 262, -1 }, /* (190) expr ::= FLOAT */ + { 262, -2 }, /* (191) expr ::= MINUS FLOAT */ + { 262, -2 }, /* (192) expr ::= PLUS FLOAT */ + { 262, -1 }, /* (193) expr ::= STRING */ + { 262, -1 }, /* (194) expr ::= NOW */ + { 262, -1 }, /* (195) expr ::= VARIABLE */ + { 262, -1 }, /* (196) expr ::= BOOL */ + { 262, -4 }, /* (197) expr ::= ID LP exprlist RP */ + { 262, -4 }, /* (198) expr ::= ID LP STAR RP */ + { 262, -3 }, /* (199) expr ::= expr AND expr */ + { 262, -3 }, /* (200) expr ::= expr OR expr */ + { 262, -3 }, /* (201) expr ::= expr LT expr */ + { 262, -3 }, /* (202) expr ::= expr GT expr */ + { 262, -3 }, /* (203) expr ::= expr LE expr */ + { 262, -3 }, /* (204) expr ::= expr GE expr */ + { 262, -3 }, /* (205) expr ::= expr NE expr */ + { 262, -3 }, /* (206) expr ::= expr EQ expr */ + { 262, -3 }, /* (207) expr ::= expr PLUS expr */ + { 262, -3 }, /* (208) expr ::= expr MINUS expr */ + { 262, -3 }, /* (209) expr ::= expr STAR expr */ + { 262, -3 }, /* (210) expr ::= expr SLASH expr */ + { 262, -3 }, /* (211) expr ::= expr REM expr */ + { 262, -3 }, /* (212) expr ::= expr LIKE expr */ + { 262, -5 }, /* (213) expr ::= expr IN LP exprlist RP */ + { 271, -3 }, /* (214) exprlist ::= exprlist COMMA expritem */ + { 271, -1 }, /* (215) exprlist ::= expritem */ + { 272, -1 }, /* (216) expritem ::= expr */ + { 272, 0 }, /* (217) expritem ::= */ + { 209, -3 }, /* (218) cmd ::= RESET QUERY CACHE */ + { 209, -7 }, /* (219) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + { 209, -7 }, /* (220) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + { 209, -7 }, /* (221) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + { 209, -7 }, /* (222) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + { 209, -8 }, /* (223) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + { 209, -9 }, /* (224) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + { 209, -3 }, /* (225) cmd ::= KILL CONNECTION INTEGER */ + { 209, -5 }, /* (226) cmd ::= KILL STREAM INTEGER COLON INTEGER */ + { 209, -5 }, /* (227) cmd ::= KILL QUERY INTEGER COLON INTEGER */ }; static void yy_accept(yyParser*); /* Forward Declaration */ @@ -2465,29 +2470,59 @@ static void yy_reduce( {yymsp[-1].minor.yy498 = yymsp[0].minor.yy498;} break; case 147: /* tablelist ::= ids cpxName */ -{ toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yylhsminor.yy498 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1);} +{ + toTSDBType(yymsp[-1].minor.yy0.type); + yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; + yylhsminor.yy498 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); + yylhsminor.yy498 = tVariantListAppendToken(yylhsminor.yy498, &yymsp[-1].minor.yy0, -1); // table alias name +} yymsp[-1].minor.yy498 = yylhsminor.yy498; break; - case 148: /* tablelist ::= tablelist COMMA ids cpxName */ -{ toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yylhsminor.yy498 = tVariantListAppendToken(yymsp[-3].minor.yy498, &yymsp[-1].minor.yy0, -1); } + case 148: /* tablelist ::= ids cpxName ids */ +{ + toTSDBType(yymsp[-2].minor.yy0.type); + toTSDBType(yymsp[0].minor.yy0.type); + yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n; + yylhsminor.yy498 = tVariantListAppendToken(NULL, &yymsp[-2].minor.yy0, -1); + yylhsminor.yy498 = tVariantListAppendToken(yylhsminor.yy498, &yymsp[0].minor.yy0, -1); +} + yymsp[-2].minor.yy498 = yylhsminor.yy498; + break; + case 149: /* tablelist ::= tablelist COMMA ids cpxName */ +{ + toTSDBType(yymsp[-1].minor.yy0.type); + yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; + yylhsminor.yy498 = tVariantListAppendToken(yymsp[-3].minor.yy498, &yymsp[-1].minor.yy0, -1); + yylhsminor.yy498 = tVariantListAppendToken(yylhsminor.yy498, &yymsp[-1].minor.yy0, -1); +} yymsp[-3].minor.yy498 = yylhsminor.yy498; break; - case 149: /* tmvar ::= VARIABLE */ + case 150: /* tablelist ::= tablelist COMMA ids cpxName ids */ +{ + toTSDBType(yymsp[-2].minor.yy0.type); + toTSDBType(yymsp[0].minor.yy0.type); + yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n; + yylhsminor.yy498 = tVariantListAppendToken(yymsp[-4].minor.yy498, &yymsp[-2].minor.yy0, -1); + yylhsminor.yy498 = tVariantListAppendToken(yylhsminor.yy498, &yymsp[0].minor.yy0, -1); +} + yymsp[-4].minor.yy498 = yylhsminor.yy498; + break; + case 151: /* tmvar ::= VARIABLE */ {yylhsminor.yy0 = yymsp[0].minor.yy0;} yymsp[0].minor.yy0 = yylhsminor.yy0; break; - case 150: /* interval_opt ::= INTERVAL LP tmvar RP */ - case 155: /* sliding_opt ::= SLIDING LP tmvar RP */ yytestcase(yyruleno==155); + case 152: /* interval_opt ::= INTERVAL LP tmvar RP */ + case 157: /* sliding_opt ::= SLIDING LP tmvar RP */ yytestcase(yyruleno==157); {yymsp[-3].minor.yy0 = yymsp[-1].minor.yy0; } break; - case 151: /* interval_opt ::= */ - case 156: /* sliding_opt ::= */ yytestcase(yyruleno==156); + case 153: /* interval_opt ::= */ + case 158: /* sliding_opt ::= */ yytestcase(yyruleno==158); {yymsp[1].minor.yy0.n = 0; yymsp[1].minor.yy0.z = NULL; yymsp[1].minor.yy0.type = 0; } break; - case 152: /* fill_opt ::= */ + case 154: /* fill_opt ::= */ {yymsp[1].minor.yy498 = 0; } break; - case 153: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */ + case 155: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */ { tVariant A = {0}; toTSDBType(yymsp[-3].minor.yy0.type); @@ -2497,33 +2532,33 @@ static void yy_reduce( yymsp[-5].minor.yy498 = yymsp[-1].minor.yy498; } break; - case 154: /* fill_opt ::= FILL LP ID RP */ + case 156: /* fill_opt ::= FILL LP ID RP */ { toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-3].minor.yy498 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); } break; - case 157: /* orderby_opt ::= */ - case 165: /* groupby_opt ::= */ yytestcase(yyruleno==165); + case 159: /* orderby_opt ::= */ + case 167: /* groupby_opt ::= */ yytestcase(yyruleno==167); {yymsp[1].minor.yy498 = 0;} break; - case 158: /* orderby_opt ::= ORDER BY sortlist */ - case 166: /* groupby_opt ::= GROUP BY grouplist */ yytestcase(yyruleno==166); + case 160: /* orderby_opt ::= ORDER BY sortlist */ + case 168: /* groupby_opt ::= GROUP BY grouplist */ yytestcase(yyruleno==168); {yymsp[-2].minor.yy498 = yymsp[0].minor.yy498;} break; - case 159: /* sortlist ::= sortlist COMMA item sortorder */ + case 161: /* sortlist ::= sortlist COMMA item sortorder */ { yylhsminor.yy498 = tVariantListAppend(yymsp[-3].minor.yy498, &yymsp[-1].minor.yy134, yymsp[0].minor.yy46); } yymsp[-3].minor.yy498 = yylhsminor.yy498; break; - case 160: /* sortlist ::= item sortorder */ + case 162: /* sortlist ::= item sortorder */ { yylhsminor.yy498 = tVariantListAppend(NULL, &yymsp[-1].minor.yy134, yymsp[0].minor.yy46); } yymsp[-1].minor.yy498 = yylhsminor.yy498; break; - case 161: /* item ::= ids cpxName */ + case 163: /* item ::= ids cpxName */ { toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; @@ -2532,196 +2567,196 @@ static void yy_reduce( } yymsp[-1].minor.yy134 = yylhsminor.yy134; break; - case 162: /* sortorder ::= ASC */ + case 164: /* sortorder ::= ASC */ {yymsp[0].minor.yy46 = TSDB_ORDER_ASC; } break; - case 163: /* sortorder ::= DESC */ + case 165: /* sortorder ::= DESC */ {yymsp[0].minor.yy46 = TSDB_ORDER_DESC;} break; - case 164: /* sortorder ::= */ + case 166: /* sortorder ::= */ {yymsp[1].minor.yy46 = TSDB_ORDER_ASC;} break; - case 167: /* grouplist ::= grouplist COMMA item */ + case 169: /* grouplist ::= grouplist COMMA item */ { yylhsminor.yy498 = tVariantListAppend(yymsp[-2].minor.yy498, &yymsp[0].minor.yy134, -1); } yymsp[-2].minor.yy498 = yylhsminor.yy498; break; - case 168: /* grouplist ::= item */ + case 170: /* grouplist ::= item */ { yylhsminor.yy498 = tVariantListAppend(NULL, &yymsp[0].minor.yy134, -1); } yymsp[0].minor.yy498 = yylhsminor.yy498; break; - case 169: /* having_opt ::= */ - case 179: /* where_opt ::= */ yytestcase(yyruleno==179); - case 215: /* expritem ::= */ yytestcase(yyruleno==215); + case 171: /* having_opt ::= */ + case 181: /* where_opt ::= */ yytestcase(yyruleno==181); + case 217: /* expritem ::= */ yytestcase(yyruleno==217); {yymsp[1].minor.yy64 = 0;} break; - case 170: /* having_opt ::= HAVING expr */ - case 180: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==180); + case 172: /* having_opt ::= HAVING expr */ + case 182: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==182); {yymsp[-1].minor.yy64 = yymsp[0].minor.yy64;} break; - case 171: /* limit_opt ::= */ - case 175: /* slimit_opt ::= */ yytestcase(yyruleno==175); + case 173: /* limit_opt ::= */ + case 177: /* slimit_opt ::= */ yytestcase(yyruleno==177); {yymsp[1].minor.yy216.limit = -1; yymsp[1].minor.yy216.offset = 0;} break; - case 172: /* limit_opt ::= LIMIT signed */ - case 176: /* slimit_opt ::= SLIMIT signed */ yytestcase(yyruleno==176); + case 174: /* limit_opt ::= LIMIT signed */ + case 178: /* slimit_opt ::= SLIMIT signed */ yytestcase(yyruleno==178); {yymsp[-1].minor.yy216.limit = yymsp[0].minor.yy207; yymsp[-1].minor.yy216.offset = 0;} break; - case 173: /* limit_opt ::= LIMIT signed OFFSET signed */ - case 177: /* slimit_opt ::= SLIMIT signed SOFFSET signed */ yytestcase(yyruleno==177); + case 175: /* limit_opt ::= LIMIT signed OFFSET signed */ + case 179: /* slimit_opt ::= SLIMIT signed SOFFSET signed */ yytestcase(yyruleno==179); {yymsp[-3].minor.yy216.limit = yymsp[-2].minor.yy207; yymsp[-3].minor.yy216.offset = yymsp[0].minor.yy207;} break; - case 174: /* limit_opt ::= LIMIT signed COMMA signed */ - case 178: /* slimit_opt ::= SLIMIT signed COMMA signed */ yytestcase(yyruleno==178); + case 176: /* limit_opt ::= LIMIT signed COMMA signed */ + case 180: /* slimit_opt ::= SLIMIT signed COMMA signed */ yytestcase(yyruleno==180); {yymsp[-3].minor.yy216.limit = yymsp[0].minor.yy207; yymsp[-3].minor.yy216.offset = yymsp[-2].minor.yy207;} break; - case 181: /* expr ::= LP expr RP */ + case 183: /* expr ::= LP expr RP */ {yymsp[-2].minor.yy64 = yymsp[-1].minor.yy64; } break; - case 182: /* expr ::= ID */ + case 184: /* expr ::= ID */ {yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_ID);} yymsp[0].minor.yy64 = yylhsminor.yy64; break; - case 183: /* expr ::= ID DOT ID */ + case 185: /* expr ::= ID DOT ID */ {yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[-2].minor.yy0, TK_ID);} yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 184: /* expr ::= ID DOT STAR */ + case 186: /* expr ::= ID DOT STAR */ {yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[-2].minor.yy0, TK_ALL);} yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 185: /* expr ::= INTEGER */ + case 187: /* expr ::= INTEGER */ {yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_INTEGER);} yymsp[0].minor.yy64 = yylhsminor.yy64; break; - case 186: /* expr ::= MINUS INTEGER */ - case 187: /* expr ::= PLUS INTEGER */ yytestcase(yyruleno==187); + case 188: /* expr ::= MINUS INTEGER */ + case 189: /* expr ::= PLUS INTEGER */ yytestcase(yyruleno==189); {yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_INTEGER; yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[-1].minor.yy0, TK_INTEGER);} yymsp[-1].minor.yy64 = yylhsminor.yy64; break; - case 188: /* expr ::= FLOAT */ + case 190: /* expr ::= FLOAT */ {yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_FLOAT);} yymsp[0].minor.yy64 = yylhsminor.yy64; break; - case 189: /* expr ::= MINUS FLOAT */ - case 190: /* expr ::= PLUS FLOAT */ yytestcase(yyruleno==190); + case 191: /* expr ::= MINUS FLOAT */ + case 192: /* expr ::= PLUS FLOAT */ yytestcase(yyruleno==192); {yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_FLOAT; yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[-1].minor.yy0, TK_FLOAT);} yymsp[-1].minor.yy64 = yylhsminor.yy64; break; - case 191: /* expr ::= STRING */ + case 193: /* expr ::= STRING */ {yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_STRING);} yymsp[0].minor.yy64 = yylhsminor.yy64; break; - case 192: /* expr ::= NOW */ + case 194: /* expr ::= NOW */ {yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_NOW); } yymsp[0].minor.yy64 = yylhsminor.yy64; break; - case 193: /* expr ::= VARIABLE */ + case 195: /* expr ::= VARIABLE */ {yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_VARIABLE);} yymsp[0].minor.yy64 = yylhsminor.yy64; break; - case 194: /* expr ::= BOOL */ + case 196: /* expr ::= BOOL */ {yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_BOOL);} yymsp[0].minor.yy64 = yylhsminor.yy64; break; - case 195: /* expr ::= ID LP exprlist RP */ + case 197: /* expr ::= ID LP exprlist RP */ { yylhsminor.yy64 = tSQLExprCreateFunction(yymsp[-1].minor.yy290, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } yymsp[-3].minor.yy64 = yylhsminor.yy64; break; - case 196: /* expr ::= ID LP STAR RP */ + case 198: /* expr ::= ID LP STAR RP */ { yylhsminor.yy64 = tSQLExprCreateFunction(NULL, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } yymsp[-3].minor.yy64 = yylhsminor.yy64; break; - case 197: /* expr ::= expr AND expr */ + case 199: /* expr ::= expr AND expr */ {yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_AND);} yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 198: /* expr ::= expr OR expr */ + case 200: /* expr ::= expr OR expr */ {yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_OR); } yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 199: /* expr ::= expr LT expr */ + case 201: /* expr ::= expr LT expr */ {yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_LT);} yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 200: /* expr ::= expr GT expr */ + case 202: /* expr ::= expr GT expr */ {yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_GT);} yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 201: /* expr ::= expr LE expr */ + case 203: /* expr ::= expr LE expr */ {yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_LE);} yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 202: /* expr ::= expr GE expr */ + case 204: /* expr ::= expr GE expr */ {yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_GE);} yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 203: /* expr ::= expr NE expr */ + case 205: /* expr ::= expr NE expr */ {yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_NE);} yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 204: /* expr ::= expr EQ expr */ + case 206: /* expr ::= expr EQ expr */ {yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_EQ);} yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 205: /* expr ::= expr PLUS expr */ + case 207: /* expr ::= expr PLUS expr */ {yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_PLUS); } yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 206: /* expr ::= expr MINUS expr */ + case 208: /* expr ::= expr MINUS expr */ {yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_MINUS); } yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 207: /* expr ::= expr STAR expr */ + case 209: /* expr ::= expr STAR expr */ {yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_STAR); } yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 208: /* expr ::= expr SLASH expr */ + case 210: /* expr ::= expr SLASH expr */ {yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_DIVIDE);} yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 209: /* expr ::= expr REM expr */ + case 211: /* expr ::= expr REM expr */ {yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_REM); } yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 210: /* expr ::= expr LIKE expr */ + case 212: /* expr ::= expr LIKE expr */ {yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_LIKE); } yymsp[-2].minor.yy64 = yylhsminor.yy64; break; - case 211: /* expr ::= expr IN LP exprlist RP */ + case 213: /* expr ::= expr IN LP exprlist RP */ {yylhsminor.yy64 = tSQLExprCreate(yymsp[-4].minor.yy64, (tSQLExpr*)yymsp[-1].minor.yy290, TK_IN); } yymsp[-4].minor.yy64 = yylhsminor.yy64; break; - case 212: /* exprlist ::= exprlist COMMA expritem */ + case 214: /* exprlist ::= exprlist COMMA expritem */ {yylhsminor.yy290 = tSQLExprListAppend(yymsp[-2].minor.yy290,yymsp[0].minor.yy64,0);} yymsp[-2].minor.yy290 = yylhsminor.yy290; break; - case 213: /* exprlist ::= expritem */ + case 215: /* exprlist ::= expritem */ {yylhsminor.yy290 = tSQLExprListAppend(0,yymsp[0].minor.yy64,0);} yymsp[0].minor.yy290 = yylhsminor.yy290; break; - case 214: /* expritem ::= expr */ + case 216: /* expritem ::= expr */ {yylhsminor.yy64 = yymsp[0].minor.yy64;} yymsp[0].minor.yy64 = yylhsminor.yy64; break; - case 216: /* cmd ::= RESET QUERY CACHE */ + case 218: /* cmd ::= RESET QUERY CACHE */ { setDCLSQLElems(pInfo, TSDB_SQL_RESET_CACHE, 0);} break; - case 217: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + case 219: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy523, NULL, TSDB_ALTER_TABLE_ADD_COLUMN); setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 218: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + case 220: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -2732,14 +2767,14 @@ static void yy_reduce( setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 219: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + case 221: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy523, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN); setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 220: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + case 222: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -2750,7 +2785,7 @@ static void yy_reduce( setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 221: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + case 223: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; @@ -2764,7 +2799,7 @@ static void yy_reduce( setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 222: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + case 224: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ { yymsp[-6].minor.yy0.n += yymsp[-5].minor.yy0.n; @@ -2776,13 +2811,13 @@ static void yy_reduce( setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 223: /* cmd ::= KILL CONNECTION INTEGER */ + case 225: /* cmd ::= KILL CONNECTION INTEGER */ {setKillSQL(pInfo, TSDB_SQL_KILL_CONNECTION, &yymsp[0].minor.yy0);} break; - case 224: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */ + case 226: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */ {yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSQL(pInfo, TSDB_SQL_KILL_STREAM, &yymsp[-2].minor.yy0);} break; - case 225: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */ + case 227: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */ {yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSQL(pInfo, TSDB_SQL_KILL_QUERY, &yymsp[-2].minor.yy0);} break; default: diff --git a/src/query/tests/resultBufferTest.cpp b/src/query/tests/resultBufferTest.cpp index e9611a3232..7b946d8589 100644 --- a/src/query/tests/resultBufferTest.cpp +++ b/src/query/tests/resultBufferTest.cpp @@ -130,7 +130,6 @@ void recyclePageTest() { ASSERT_TRUE(t4 == pBufPage4); ASSERT_TRUE(pageId == 4); releaseResBufPage(pResultBuf, t4); - releaseResBufPage(pResultBuf, t4); tFilePage* pBufPage5 = getNewDataBuf(pResultBuf, groupId, &pageId); tFilePage* t5 = getResBufPage(pResultBuf, pageId); diff --git a/src/query/tests/tsBufTest.cpp b/src/query/tests/tsBufTest.cpp index 28b1d9cefe..f8738eec9c 100644 --- a/src/query/tests/tsBufTest.cpp +++ b/src/query/tests/tsBufTest.cpp @@ -47,6 +47,8 @@ void simpleTest() { EXPECT_EQ(pTSBuf->block.numOfElem, num); tsBufDestroy(pTSBuf); + + free(list); } // one large list of ts, the ts list need to be split into several small blocks @@ -71,6 +73,7 @@ void largeTSTest() { EXPECT_EQ(pTSBuf->block.numOfElem, num); tsBufDestroy(pTSBuf); + free(list); } void multiTagsTest() { @@ -208,6 +211,8 @@ void loadDataTest() { int64_t e = taosGetTimestampUs(); printf("end:%" PRIu64 ", elapsed:%" PRIu64 ", total obj:%d\n", e, e - s, x); + tsBufDestroy(pTSBuf); + tsBufDestroy(pNewBuf); } void randomIncTsTest() {} @@ -338,6 +343,8 @@ void TSTraverse() { } } } + + tsBufDestroy(pTSBuf); } void performanceTest() {} @@ -352,9 +359,12 @@ void invalidFileTest() { STSBuf* pNewBuf = tsBufCreateFromFile("/tmp/test", true); EXPECT_TRUE(pNewBuf == NULL); + tsBufDestroy(pNewBuf); pNewBuf = tsBufCreateFromFile("/tmp/911", true); EXPECT_TRUE(pNewBuf == NULL); + + tsBufDestroy(pNewBuf); } void mergeDiffVnodeBufferTest() { diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c index 70e5713ee6..d0c57a34d0 100644 --- a/src/rpc/src/rpcMain.c +++ b/src/rpc/src/rpcMain.c @@ -260,7 +260,7 @@ void *rpcOpen(const SRpcInit *pInit) { } if (pRpc->connType == TAOS_CONN_SERVER) { - pRpc->hash = taosHashInit(pRpc->sessions, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true); + pRpc->hash = taosHashInit(pRpc->sessions, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, true); if (pRpc->hash == NULL) { tError("%s failed to init string hash", pRpc->label); rpcClose(pRpc); @@ -541,7 +541,7 @@ void rpcCancelRequest(void *handle) { if (pContext->signature != pContext) return; if (pContext->pConn) { - tDebug("%s, app trys to cancel request", pContext->pConn->info); + tDebug("%s, app tries to cancel request", pContext->pConn->info); pContext->pConn->pReqMsg = NULL; rpcCloseConn(pContext->pConn); pContext->pConn = NULL; diff --git a/src/rpc/src/rpcTcp.c b/src/rpc/src/rpcTcp.c index 9da11831e5..3475e0f317 100644 --- a/src/rpc/src/rpcTcp.c +++ b/src/rpc/src/rpcTcp.c @@ -254,11 +254,11 @@ static void *taosAcceptTcpConnection(void *arg) { pFdObj->ip = caddr.sin_addr.s_addr; pFdObj->port = htons(caddr.sin_port); tDebug("%s new TCP connection from %s:%hu, fd:%d FD:%p numOfFds:%d", pServerObj->label, - inet_ntoa(caddr.sin_addr), pFdObj->port, connFd, pFdObj, pThreadObj->numOfFds); + taosInetNtoa(caddr.sin_addr), pFdObj->port, connFd, pFdObj, pThreadObj->numOfFds); } else { taosCloseSocket(connFd); tError("%s failed to malloc FdObj(%s) for connection from:%s:%hu", pServerObj->label, strerror(errno), - inet_ntoa(caddr.sin_addr), htons(caddr.sin_port)); + taosInetNtoa(caddr.sin_addr), htons(caddr.sin_port)); } // pick up next thread for next connection diff --git a/src/rpc/test/CMakeLists.txt b/src/rpc/test/CMakeLists.txt index 286c8e1680..383ce1b0f6 100644 --- a/src/rpc/test/CMakeLists.txt +++ b/src/rpc/test/CMakeLists.txt @@ -3,7 +3,7 @@ PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/rpc/inc) -IF (TD_LINUX_64) +IF (TD_LINUX) LIST(APPEND CLIENT_SRC ./rclient.c) ADD_EXECUTABLE(rclient ${CLIENT_SRC}) TARGET_LINK_LIBRARIES(rclient trpc) diff --git a/src/rpc/test/rclient.c b/src/rpc/test/rclient.c index 6ec2d82445..7a963e9ce4 100644 --- a/src/rpc/test/rclient.c +++ b/src/rpc/test/rclient.c @@ -26,8 +26,8 @@ typedef struct { int num; int numOfReqs; int msgSize; - sem_t rspSem; - sem_t *pOverSem; + tsem_t rspSem; + tsem_t *pOverSem; pthread_t thread; void *pRpc; } SInfo; @@ -39,7 +39,7 @@ static void processResponse(SRpcMsg *pMsg, SRpcEpSet *pEpSet) { if (pEpSet) pInfo->epSet = *pEpSet; rpcFreeCont(pMsg->pCont); - sem_post(&pInfo->rspSem); + tsem_post(&pInfo->rspSem); } static int tcount = 0; @@ -60,7 +60,7 @@ static void *sendRequest(void *param) { rpcSendRequest(pInfo->pRpc, &pInfo->epSet, &rpcMsg); if ( pInfo->num % 20000 == 0 ) tInfo("thread:%d, %d requests have been sent", pInfo->index, pInfo->num); - sem_wait(&pInfo->rspSem); + tsem_wait(&pInfo->rspSem); } tDebug("thread:%d, it is over", pInfo->index); @@ -171,7 +171,7 @@ int main(int argc, char *argv[]) { pInfo->epSet = epSet; pInfo->numOfReqs = numOfReqs; pInfo->msgSize = msgSize; - sem_init(&pInfo->rspSem, 0, 0); + tsem_init(&pInfo->rspSem, 0, 0); pInfo->pRpc = pRpc; pthread_create(&pInfo->thread, &thattr, sendRequest, pInfo); pInfo++; diff --git a/src/rpc/test/rsclient.c b/src/rpc/test/rsclient.c index 6e6961784b..a152d8e4a5 100644 --- a/src/rpc/test/rsclient.c +++ b/src/rpc/test/rsclient.c @@ -27,8 +27,8 @@ typedef struct { int num; int numOfReqs; int msgSize; - sem_t rspSem; - sem_t *pOverSem; + tsem_t rspSem; + tsem_t *pOverSem; pthread_t thread; void *pRpc; } SInfo; @@ -171,7 +171,7 @@ int main(int argc, char *argv[]) { pInfo->epSet = epSet; pInfo->numOfReqs = numOfReqs; pInfo->msgSize = msgSize; - sem_init(&pInfo->rspSem, 0, 0); + tsem_init(&pInfo->rspSem, 0, 0); pInfo->pRpc = pRpc; pthread_create(&pInfo->thread, &thattr, sendRequest, pInfo); pInfo++; diff --git a/src/sync/src/syncMain.c b/src/sync/src/syncMain.c index 7c4b57350a..eaa073348a 100644 --- a/src/sync/src/syncMain.c +++ b/src/sync/src/syncMain.c @@ -32,7 +32,7 @@ // global configurable int tsMaxSyncNum = 2; int tsSyncTcpThreads = 2; -int tsMaxWatchFiles = 100; +int tsMaxWatchFiles = 500; int tsMaxFwdInfo = 200; int tsSyncTimer = 1; //int sDebugFlag = 135; @@ -96,7 +96,7 @@ static void syncModuleInitFunc() { return; } - vgIdHash = taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true); + vgIdHash = taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, true); if (vgIdHash == NULL) { taosTmrCleanUp(syncTmrCtrl); taosCloseTcpThreadPool(tsTcpPool); @@ -516,7 +516,7 @@ static SSyncPeer *syncAddPeer(SSyncNode *pNode, const SNodeInfo *pInfo) int ret = strcmp(pPeer->fqdn, tsNodeFqdn); if (pPeer->nodeId == 0 || (ret > 0) || (ret == 0 && pPeer->port > tsSyncPort)) { sDebug("%s, start to check peer connection", pPeer->id); - taosTmrReset(syncCheckPeerConnection, 100, pPeer, syncTmrCtrl, &pPeer->timer); + taosTmrReset(syncCheckPeerConnection, 100 + (pNode->vgId*10)%100, pPeer, syncTmrCtrl, &pPeer->timer); } syncAddNodeRef(pNode); @@ -815,7 +815,7 @@ static void syncRecoverFromMaster(SSyncPeer *pPeer) taosTmrStopA(&pPeer->timer); if (tsSyncNum >= tsMaxSyncNum) { sInfo("%s, %d syncs are in process, try later", pPeer->id, tsSyncNum); - taosTmrReset(syncTryRecoverFromMaster, 500, pPeer, syncTmrCtrl, &pPeer->timer); + taosTmrReset(syncTryRecoverFromMaster, 500 + (pNode->vgId*10)%200, pPeer, syncTmrCtrl, &pPeer->timer); return; } diff --git a/src/sync/src/syncRetrieve.c b/src/sync/src/syncRetrieve.c index f881b680f5..1dd1cda343 100644 --- a/src/sync/src/syncRetrieve.c +++ b/src/sync/src/syncRetrieve.c @@ -57,13 +57,14 @@ static int syncAddIntoWatchList(SSyncPeer *pPeer, char *name) } } - *wd = inotify_add_watch(pPeer->notifyFd, name, IN_MODIFY); + *wd = inotify_add_watch(pPeer->notifyFd, name, IN_MODIFY | IN_DELETE); if (*wd == -1) { sError("%s, failed to add %s(%s)", pPeer->id, name, strerror(errno)); return -1; + } else { + sDebug("%s, monitor %s, wd:%d watchNum:%d", pPeer->id, name, *wd, pPeer->watchNum); } - pPeer->watchNum++; pPeer->watchNum = (pPeer->watchNum +1) % tsMaxWatchFiles; return 0; @@ -75,16 +76,24 @@ static int syncAreFilesModified(SSyncPeer *pPeer) char buf[2048]; int len = read(pPeer->notifyFd, buf, sizeof(buf)); - if (len <0 && errno != EAGAIN) { + if (len < 0 && errno != EAGAIN) { sError("%s, failed to read notify FD(%s)", pPeer->id, strerror(errno)); return -1; } int code = 0; - if (len >0) { - sDebug("%s, processed file is changed", pPeer->id); - pPeer->fileChanged = 1; - code = 1; + if (len > 0) { + const struct inotify_event *event; + char *ptr; + for (ptr = buf; ptr < buf + len; ptr += sizeof(struct inotify_event) + event->len) { + event = (const struct inotify_event *) ptr; + if ((event->mask & IN_MODIFY) || (event->mask & IN_DELETE)) { + sDebug("%s, processed file is changed", pPeer->id); + pPeer->fileChanged = 1; + code = 1; + break; + } + } } return code; diff --git a/src/sync/src/tarbitrator.c b/src/sync/src/tarbitrator.c index 3c6db88a9c..3538391a94 100644 --- a/src/sync/src/tarbitrator.c +++ b/src/sync/src/tarbitrator.c @@ -31,7 +31,7 @@ static void arbSignalHandler(int32_t signum, siginfo_t *sigInfo, void *context); static void arbProcessIncommingConnection(int connFd, uint32_t sourceIp); static void arbProcessBrokenLink(void *param); static int arbProcessPeerMsg(void *param, void *buffer); -static sem_t tsArbSem; +static tsem_t tsArbSem; static ttpool_h tsArbTcpPool; typedef struct { @@ -61,7 +61,7 @@ int main(int argc, char *argv[]) { } } - if (sem_init(&tsArbSem, 0, 0) != 0) { + if (tsem_init(&tsArbSem, 0, 0) != 0) { printf("failed to create exit semphore\n"); exit(EXIT_FAILURE); } @@ -98,7 +98,7 @@ int main(int argc, char *argv[]) { sInfo("TAOS arbitrator: %s:%d is running", tsNodeFqdn, tsServerPort); - for (int res = sem_wait(&tsArbSem); res != 0; res = sem_wait(&tsArbSem)) { + for (int res = tsem_wait(&tsArbSem); res != 0; res = tsem_wait(&tsArbSem)) { if (res != EINTR) break; } @@ -185,6 +185,6 @@ static void arbSignalHandler(int32_t signum, siginfo_t *sigInfo, void *context) sInfo("shut down signal is %d, sender PID:%d", signum, sigInfo->si_pid); // inform main thread to exit - sem_post(&tsArbSem); + tsem_post(&tsArbSem); } diff --git a/src/sync/test/CMakeLists.txt b/src/sync/test/CMakeLists.txt index a309539024..27614454f9 100644 --- a/src/sync/test/CMakeLists.txt +++ b/src/sync/test/CMakeLists.txt @@ -1,7 +1,7 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) -IF (TD_LINUX_64) +IF (TD_LINUX) INCLUDE_DIRECTORIES(../inc) LIST(APPEND CLIENT_SRC ./syncClient.c) diff --git a/src/sync/test/syncClient.c b/src/sync/test/syncClient.c index cd873b758b..16053d1088 100644 --- a/src/sync/test/syncClient.c +++ b/src/sync/test/syncClient.c @@ -25,8 +25,8 @@ typedef struct { int num; int numOfReqs; int msgSize; - sem_t rspSem; - sem_t *pOverSem; + tsem_t rspSem; + tsem_t *pOverSem; pthread_t thread; void *pRpc; } SInfo; @@ -38,7 +38,7 @@ void processResponse(SRpcMsg *pMsg, SRpcEpSet *pEpSet) { if (pEpSet) pInfo->epSet = *pEpSet; rpcFreeCont(pMsg->pCont); - sem_post(&pInfo->rspSem); + tsem_post(&pInfo->rspSem); } int tcount = 0; @@ -59,7 +59,7 @@ void *sendRequest(void *param) { rpcSendRequest(pInfo->pRpc, &pInfo->epSet, &rpcMsg); if ( pInfo->num % 20000 == 0 ) uInfo("thread:%d, %d requests have been sent", pInfo->index, pInfo->num); - sem_wait(&pInfo->rspSem); + tsem_wait(&pInfo->rspSem); } uDebug("thread:%d, it is over", pInfo->index); @@ -169,7 +169,7 @@ int main(int argc, char *argv[]) { pInfo->epSet = epSet; pInfo->numOfReqs = numOfReqs; pInfo->msgSize = msgSize; - sem_init(&pInfo->rspSem, 0, 0); + tsem_init(&pInfo->rspSem, 0, 0); pInfo->pRpc = pRpc; pthread_create(&pInfo->thread, &thattr, sendRequest, pInfo); pInfo++; diff --git a/src/tsdb/inc/tsdbMain.h b/src/tsdb/inc/tsdbMain.h index 6119f7086c..7936ea423f 100644 --- a/src/tsdb/inc/tsdbMain.h +++ b/src/tsdb/inc/tsdbMain.h @@ -151,22 +151,22 @@ typedef struct { // ------------------ tsdbFile.c extern const char* tsdbFileSuffix[]; typedef enum { -#ifdef TSDB_IDX - TSDB_FILE_TYPE_IDX = 0, - TSDB_FILE_TYPE_HEAD, -#else TSDB_FILE_TYPE_HEAD = 0, -#endif TSDB_FILE_TYPE_DATA, TSDB_FILE_TYPE_LAST, - TSDB_FILE_TYPE_MAX, -#ifdef TSDB_IDX - TSDB_FILE_TYPE_NIDX, -#endif + TSDB_FILE_TYPE_STAT, TSDB_FILE_TYPE_NHEAD, - TSDB_FILE_TYPE_NLAST + TSDB_FILE_TYPE_NDATA, + TSDB_FILE_TYPE_NLAST, + TSDB_FILE_TYPE_NSTAT } TSDB_FILE_TYPE; +#ifndef TDINTERNAL +#define TSDB_FILE_TYPE_MAX (TSDB_FILE_TYPE_LAST+1) +#else +#define TSDB_FILE_TYPE_MAX (TSDB_FILE_TYPE_STAT+1) +#endif + typedef struct { uint32_t magic; uint32_t len; @@ -186,6 +186,7 @@ typedef struct { typedef struct { int fileId; + int state; // 0 for health, 1 for problem SFile files[TSDB_FILE_TYPE_MAX]; } SFileGroup; @@ -281,9 +282,6 @@ typedef struct { TSKEY minKey; TSKEY maxKey; SFileGroup fGroup; -#ifdef TSDB_IDX - SFile nIdxF; -#endif SFile nHeadF; SFile nLastF; } SHelperFile; @@ -477,6 +475,7 @@ int tsdbUpdateFileHeader(SFile* pFile); int tsdbEncodeSFileInfo(void** buf, const STsdbFileInfo* pInfo); void* tsdbDecodeSFileInfo(void* buf, STsdbFileInfo* pInfo); void tsdbRemoveFileGroup(STsdbRepo* pRepo, SFileGroup* pFGroup); +void tsdbGetFileInfoImpl(char* fname, uint32_t* magic, int32_t* size); void tsdbGetFidKeyRange(int daysPerFile, int8_t precision, int fileId, TSKEY *minKey, TSKEY *maxKey); // ------------------ tsdbRWHelper.c @@ -497,10 +496,6 @@ void tsdbGetFidKeyRange(int daysPerFile, int8_t precision, int fileId, TS #define helperState(h) (h)->state #define TSDB_NLAST_FILE_OPENED(h) ((h)->files.nLastF.fd > 0) #define helperFileId(h) ((h)->files.fGroup.fileId) -#ifdef TSDB_IDX -#define helperIdxF(h) (&((h)->files.fGroup.files[TSDB_FILE_TYPE_IDX])) -#define helperNewIdxF(h) (&((h)->files.nIdxF)) -#endif #define helperHeadF(h) (&((h)->files.fGroup.files[TSDB_FILE_TYPE_HEAD])) #define helperDataF(h) (&((h)->files.fGroup.files[TSDB_FILE_TYPE_DATA])) #define helperLastF(h) (&((h)->files.fGroup.files[TSDB_FILE_TYPE_LAST])) @@ -512,7 +507,7 @@ int tsdbInitWriteHelper(SRWHelper* pHelper, STsdbRepo* pRepo); void tsdbDestroyHelper(SRWHelper* pHelper); void tsdbResetHelper(SRWHelper* pHelper); int tsdbSetAndOpenHelperFile(SRWHelper* pHelper, SFileGroup* pGroup); -int tsdbCloseHelperFile(SRWHelper* pHelper, bool hasError); +int tsdbCloseHelperFile(SRWHelper* pHelper, bool hasError, SFileGroup* pGroup); int tsdbSetHelperTable(SRWHelper* pHelper, STable* pTable, STsdbRepo* pRepo); int tsdbCommitTableData(SRWHelper* pHelper, SCommitIter* pCommitIter, SDataCols* pDataCols, TSKEY maxKey); int tsdbMoveLastBlockIfNeccessary(SRWHelper* pHelper); diff --git a/src/tsdb/src/tsdbFile.c b/src/tsdb/src/tsdbFile.c index ee3bad9835..d960dfb7ba 100644 --- a/src/tsdb/src/tsdbFile.c +++ b/src/tsdb/src/tsdbFile.c @@ -13,24 +13,24 @@ * along with this program. If not, see . */ #define _DEFAULT_SOURCE +#include + +#define TAOS_RANDOM_FILE_FAIL_TEST + #include "os.h" #include "talgo.h" #include "tchecksum.h" #include "tsdbMain.h" #include "tutil.h" -#define TAOS_RANDOM_FILE_FAIL_TEST -#ifdef TSDB_IDX -const char *tsdbFileSuffix[] = {".idx", ".head", ".data", ".last", "", ".i", ".h", ".l"}; -#else -const char *tsdbFileSuffix[] = {".head", ".data", ".last", "", ".h", ".l"}; -#endif +const char *tsdbFileSuffix[] = {".head", ".data", ".last", ".stat", ".h", ".d", ".l", ".s"}; -static int tsdbInitFile(SFile *pFile, STsdbRepo *pRepo, int fid, int type); -static void tsdbDestroyFile(SFile *pFile); -static int compFGroup(const void *arg1, const void *arg2); -static int keyFGroupCompFunc(const void *key, const void *fgroup); +static int tsdbInitFile(SFile *pFile, STsdbRepo *pRepo, int fid, int type); +static void tsdbDestroyFile(SFile *pFile); +static int compFGroup(const void *arg1, const void *arg2); +static int keyFGroupCompFunc(const void *key, const void *fgroup); +static void tsdbInitFileGroup(SFileGroup *pFGroup, STsdbRepo *pRepo); // ---------------- INTERNAL FUNCTIONS ---------------- STsdbFileH *tsdbNewFileH(STsdbCfg *pCfg) { @@ -73,12 +73,14 @@ void tsdbFreeFileH(STsdbFileH *pFileH) { int tsdbOpenFileH(STsdbRepo *pRepo) { ASSERT(pRepo != NULL && pRepo->tsdbFileH != NULL); - char *tDataDir = NULL; - DIR * dir = NULL; - int fid = 0; - int vid = 0; + char * tDataDir = NULL; + DIR * dir = NULL; + int fid = 0; + int vid = 0; + regex_t regex1, regex2; + int code = 0; - SFileGroup fileGroup = {0}; + SFileGroup fileGroup = {0}; STsdbFileH *pFileH = pRepo->tsdbFileH; tDataDir = tsdbGetDataDirName(pRepo->rootDir); @@ -94,28 +96,60 @@ int tsdbOpenFileH(STsdbRepo *pRepo) { goto _err; } + code = regcomp(®ex1, "^v[0-9]+f[0-9]+\\.(head|data|last|stat)$", REG_EXTENDED); + if (code != 0) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + goto _err; + } + + code = regcomp(®ex2, "^v[0-9]+f[0-9]+\\.(h|d|l|s)$", REG_EXTENDED); + if (code != 0) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + goto _err; + } + struct dirent *dp = NULL; while ((dp = readdir(dir)) != NULL) { - if (strncmp(dp->d_name, ".", 1) == 0 || strncmp(dp->d_name, "..", 2) == 0) continue; - sscanf(dp->d_name, "v%df%d", &vid, &fid); + if (strcmp(dp->d_name, ".") == 0 || strcmp(dp->d_name, "..") == 0) continue; - if (tsdbSearchFGroup(pRepo->tsdbFileH, fid, TD_EQ) != NULL) continue; + code = regexec(®ex1, dp->d_name, 0, NULL, 0); + if (code == 0) { + sscanf(dp->d_name, "v%df%d", &vid, &fid); + if (vid != REPO_ID(pRepo)) { + tsdbError("vgId:%d invalid file %s exists, ignore it", REPO_ID(pRepo), dp->d_name); + continue; + } - memset((void *)(&fileGroup), 0, sizeof(SFileGroup)); - fileGroup.fileId = fid; - for (int type = 0; type < TSDB_FILE_TYPE_MAX; type++) { - if (tsdbInitFile(&fileGroup.files[type], pRepo, fid, type) < 0) { - tsdbError("vgId:%d failed to init file fid %d type %d", REPO_ID(pRepo), fid, type); + if (tsdbSearchFGroup(pFileH, fid, TD_EQ) != NULL) continue; + memset((void *)(&fileGroup), 0, sizeof(SFileGroup)); + fileGroup.fileId = fid; + + tsdbInitFileGroup(&fileGroup, pRepo); + } else if (code == REG_NOMATCH) { + code = regexec(®ex2, dp->d_name, 0, NULL, 0); + if (code == 0) { + tsdbDebug("vgId:%d invalid file %s exists, remove it", REPO_ID(pRepo), dp->d_name); + char *fname = malloc(strlen(tDataDir) + strlen(dp->d_name) + 2); + if (fname == NULL) goto _err; + sprintf(fname, "%s/%s", tDataDir, dp->d_name); + remove(fname); + free(fname); + } else if (code == REG_NOMATCH) { + tsdbError("vgId:%d invalid file %s exists, ignore it", REPO_ID(pRepo), dp->d_name); + continue; + } else { goto _err; } + } else { + goto _err; } - tsdbDebug("vgId:%d file group %d init", REPO_ID(pRepo), fid); - pFileH->pFGroup[pFileH->nFGroups++] = fileGroup; qsort((void *)(pFileH->pFGroup), pFileH->nFGroups, sizeof(SFileGroup), compFGroup); } + regfree(®ex1); + regfree(®ex2); taosTFree(tDataDir); closedir(dir); return 0; @@ -123,6 +157,9 @@ int tsdbOpenFileH(STsdbRepo *pRepo) { _err: for (int type = 0; type < TSDB_FILE_TYPE_MAX; type++) tsdbDestroyFile(&fileGroup.files[type]); + regfree(®ex1); + regfree(®ex2); + taosTFree(tDataDir); if (dir != NULL) closedir(dir); tsdbCloseFileH(pRepo); @@ -387,6 +424,35 @@ void tsdbRemoveFileGroup(STsdbRepo *pRepo, SFileGroup *pFGroup) { } } +void tsdbGetFileInfoImpl(char *fname, uint32_t *magic, int32_t *size) { + char buf[TSDB_FILE_HEAD_SIZE] = "\0"; + uint32_t version = 0; + STsdbFileInfo info = {0}; + + int fd = open(fname, O_RDONLY); + if (fd < 0) goto _err; + + if (taosTRead(fd, buf, TSDB_FILE_HEAD_SIZE) < TSDB_FILE_HEAD_SIZE) goto _err; + + if (!taosCheckChecksumWhole((uint8_t *)buf, TSDB_FILE_HEAD_SIZE)) goto _err; + + void *pBuf = (void *)buf; + pBuf = taosDecodeFixedU32(pBuf, &version); + pBuf = tsdbDecodeSFileInfo(pBuf, &info); + + off_t offset = lseek(fd, 0, SEEK_END); + if (offset < 0) goto _err; + close(fd); + + *magic = info.magic; + *size = (int32_t)offset; + +_err: + if (fd >= 0) close(fd); + *magic = TSDB_FILE_INIT_MAGIC; + *size = 0; +} + // ---------------- LOCAL FUNCTIONS ---------------- static int tsdbInitFile(SFile *pFile, STsdbRepo *pRepo, int fid, int type) { uint32_t version; @@ -413,6 +479,10 @@ static int tsdbInitFile(SFile *pFile, STsdbRepo *pRepo, int fid, int type) { pBuf = taosDecodeFixedU32(pBuf, &version); pBuf = tsdbDecodeSFileInfo(pBuf, &(pFile->info)); + if (pFile->info.size == TSDB_FILE_HEAD_SIZE) { + pFile->info.size = lseek(pFile->fd, 0, SEEK_END); + } + if (version != TSDB_FILE_VERSION) { tsdbError("vgId:%d file %s version %u is not the same as program version %u which may cause problem", REPO_ID(pRepo), pFile->fname, version, TSDB_FILE_VERSION); @@ -450,3 +520,14 @@ static int keyFGroupCompFunc(const void *key, const void *fgroup) { return fid > pFGroup->fileId ? 1 : -1; } } + +static void tsdbInitFileGroup(SFileGroup *pFGroup, STsdbRepo *pRepo) { + for (int type = 0; type < TSDB_FILE_TYPE_MAX; type++) { + if (tsdbInitFile(&pFGroup->files[type], pRepo, pFGroup->fileId, type) < 0) { + memset(&pFGroup->files[type].info, 0, sizeof(STsdbFileInfo)); + pFGroup->files[type].info.magic = TSDB_FILE_INIT_MAGIC; + pFGroup->state = 1; + terrno = TSDB_CODE_TDB_FILE_CORRUPTED; + } + } +} diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c index 3acad05504..bc979cca84 100644 --- a/src/tsdb/src/tsdbMain.c +++ b/src/tsdb/src/tsdbMain.c @@ -71,6 +71,8 @@ static void tsdbStopStream(STsdbRepo *pRepo); // Function declaration int32_t tsdbCreateRepo(char *rootDir, STsdbCfg *pCfg) { + taosRemoveDir(rootDir); + if (mkdir(rootDir, 0755) < 0) { tsdbError("vgId:%d failed to create rootDir %s since %s", pCfg->tsdbId, rootDir, strerror(errno)); terrno = TAOS_SYSTEM_ERROR(errno); @@ -95,6 +97,8 @@ TSDB_REPO_T *tsdbOpenRepo(char *rootDir, STsdbAppH *pAppH) { STsdbCfg config = {0}; STsdbRepo *pRepo = NULL; + terrno = TSDB_CODE_SUCCESS; + if (tsdbLoadConfig(rootDir, &config) < 0) { tsdbError("failed to open repo in rootDir %s since %s", rootDir, tstrerror(terrno)); return NULL; @@ -212,9 +216,9 @@ uint32_t tsdbGetFileInfo(TSDB_REPO_T *repo, char *name, uint32_t *index, uint32_ char *sdup = strdup(pRepo->rootDir); char *prefix = dirname(sdup); int prefixLen = (int)strlen(prefix); - taosTFree(sdup); if (name[0] == 0) { // get the file from index or after, but not larger than eindex + taosTFree(sdup); int fid = (*index) / TSDB_FILE_TYPE_MAX; if (pFileH->nFGroups == 0 || fid > pFileH->pFGroup[pFileH->nFGroups - 1].fileId) { @@ -242,21 +246,22 @@ uint32_t tsdbGetFileInfo(TSDB_REPO_T *repo, char *name, uint32_t *index, uint32_ } } strcpy(name, fname + prefixLen); - } else { // get the named file at the specified index. If not there, return 0 - if (*index == TSDB_META_FILE_INDEX) { // get meta file - fname = tsdbGetMetaFileName(pRepo->rootDir); - magic = TSDB_META_FILE_MAGIC(pRepo->tsdbMeta); - } else { - int fid = (*index) / TSDB_FILE_TYPE_MAX; - SFileGroup *pFGroup = tsdbSearchFGroup(pFileH, fid, TD_EQ); - if (pFGroup == NULL) { // not found - return 0; - } - - SFile *pFile = &pFGroup->files[(*index) % TSDB_FILE_TYPE_MAX]; - fname = strdup(pFile->fname); - magic = pFile->info.magic; + } else { // get the named file at the specified index. If not there, return 0 + fname = malloc(prefixLen + strlen(name) + 2); + sprintf(fname, "%s/%s", prefix, name); + if (access(fname, F_OK) != 0) { + taosFree(fname); + taosFree(sdup); + return 0; } + if (*index == TSDB_META_FILE_INDEX) { // get meta file + tsdbGetStoreInfo(fname, &magic, size); + } else { + tsdbGetFileInfoImpl(fname, &magic, size); + } + taosFree(fname); + taosFree(sdup); + return magic; } if (stat(fname, &fState) < 0) { @@ -799,6 +804,7 @@ static int tsdbRestoreInfo(STsdbRepo *pRepo) { tsdbInitFileGroupIter(pFileH, &iter, TSDB_ORDER_DESC); while ((pFGroup = tsdbGetFileGroupNext(&iter)) != NULL) { + if (pFGroup->state) continue; if (tsdbSetAndOpenHelperFile(&rhelper, pFGroup) < 0) goto _err; if (tsdbLoadCompIdx(&rhelper, NULL) < 0) goto _err; for (int i = 1; i < pMeta->maxTables; i++) { diff --git a/src/tsdb/src/tsdbMemTable.c b/src/tsdb/src/tsdbMemTable.c index 02c3ee4d85..2df8ff26bd 100644 --- a/src/tsdb/src/tsdbMemTable.c +++ b/src/tsdb/src/tsdbMemTable.c @@ -232,7 +232,7 @@ void *tsdbAllocBytes(STsdbRepo *pRepo, int bytes) { } pNode->next = pNode->prev = NULL; - tdListAppend(pRepo->mem->extraBuffList, pNode); + tdListAppendNode(pRepo->mem->extraBuffList, pNode); ptr = (void *)(pNode->data); tsdbTrace("vgId:%d allocate %d bytes from SYSTEM buffer block", REPO_ID(pRepo), bytes); } else { // allocate from TSDB buffer pool @@ -679,15 +679,10 @@ static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHe } taosTFree(dataDir); - tsdbCloseHelperFile(pHelper, 0); + tsdbCloseHelperFile(pHelper, 0, pGroup); pthread_rwlock_wrlock(&(pFileH->fhlock)); -#ifdef TSDB_IDX - rename(helperNewIdxF(pHelper)->fname, helperIdxF(pHelper)->fname); - pGroup->files[TSDB_FILE_TYPE_IDX].info = helperNewIdxF(pHelper)->info; -#endif - rename(helperNewHeadF(pHelper)->fname, helperHeadF(pHelper)->fname); pGroup->files[TSDB_FILE_TYPE_HEAD].info = helperNewHeadF(pHelper)->info; @@ -706,7 +701,7 @@ static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHe _err: taosTFree(dataDir); - tsdbCloseHelperFile(pHelper, 1); + tsdbCloseHelperFile(pHelper, 1, NULL); return -1; } diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c index eb1f1fc340..a84bb69777 100644 --- a/src/tsdb/src/tsdbMeta.c +++ b/src/tsdb/src/tsdbMeta.c @@ -443,7 +443,7 @@ STsdbMeta *tsdbNewMeta(STsdbCfg *pCfg) { goto _err; } - pMeta->uidMap = taosHashInit((size_t)(TSDB_INIT_NTABLES * 1.1), taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false); + pMeta->uidMap = taosHashInit((size_t)(TSDB_INIT_NTABLES * 1.1), taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false); if (pMeta->uidMap == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; goto _err; diff --git a/src/tsdb/src/tsdbRWHelper.c b/src/tsdb/src/tsdbRWHelper.c index d63bf8ab78..12199f491f 100644 --- a/src/tsdb/src/tsdbRWHelper.c +++ b/src/tsdb/src/tsdbRWHelper.c @@ -14,13 +14,15 @@ */ #define _DEFAULT_SOURCE + +#define TAOS_RANDOM_FILE_FAIL_TEST + #include "os.h" #include "talgo.h" #include "tchecksum.h" #include "tcoding.h" #include "tscompression.h" #include "tsdbMain.h" -#define TAOS_RANDOM_FILE_FAIL_TEST #define TSDB_GET_COMPCOL_LEN(nCols) (sizeof(SCompData) + sizeof(SCompCol) * (nCols) + sizeof(TSCKSUM)) #define TSDB_KEY_COL_OFFSET 0 @@ -91,7 +93,7 @@ void tsdbResetHelper(SRWHelper *pHelper) { tsdbResetHelperTableImpl(pHelper); // Reset the file part - tsdbCloseHelperFile(pHelper, false); + tsdbCloseHelperFile(pHelper, false, NULL); tsdbResetHelperFileImpl(pHelper); pHelper->state = TSDB_HELPER_CLEAR_STATE; @@ -110,31 +112,16 @@ int tsdbSetAndOpenHelperFile(SRWHelper *pHelper, SFileGroup *pGroup) { // Set the files pHelper->files.fGroup = *pGroup; if (helperType(pHelper) == TSDB_WRITE_HELPER) { -#ifdef TSDB_IDX - tsdbGetDataFileName(pHelper->pRepo, pGroup->fileId, TSDB_FILE_TYPE_NIDX, helperNewIdxF(pHelper)->fname); -#endif tsdbGetDataFileName(pHelper->pRepo, pGroup->fileId, TSDB_FILE_TYPE_NHEAD, helperNewHeadF(pHelper)->fname); tsdbGetDataFileName(pHelper->pRepo, pGroup->fileId, TSDB_FILE_TYPE_NLAST, helperNewLastF(pHelper)->fname); } // Open the files -#ifdef TSDB_IDX - if (tsdbOpenFile(helperIdxF(pHelper), O_RDONLY) < 0) return -1; -#endif if (tsdbOpenFile(helperHeadF(pHelper), O_RDONLY) < 0) return -1; if (helperType(pHelper) == TSDB_WRITE_HELPER) { if (tsdbOpenFile(helperDataF(pHelper), O_RDWR) < 0) return -1; if (tsdbOpenFile(helperLastF(pHelper), O_RDWR) < 0) return -1; -#ifdef TSDB_IDX - // Create and open .i file - pFile = helperNewIdxF(pHelper); - if (tsdbOpenFile(pFile, O_WRONLY | O_CREAT) < 0) return -1; - pFile->info.size = TSDB_FILE_HEAD_SIZE; - pFile->info.magic = TSDB_FILE_INIT_MAGIC; - if (tsdbUpdateFileHeader(pFile) < 0) return -1; -#endif - // Create and open .h pFile = helperNewHeadF(pHelper); if (tsdbOpenFile(pFile, O_WRONLY | O_CREAT) < 0) return -1; @@ -161,14 +148,9 @@ int tsdbSetAndOpenHelperFile(SRWHelper *pHelper, SFileGroup *pGroup) { return 0; } -int tsdbCloseHelperFile(SRWHelper *pHelper, bool hasError) { +int tsdbCloseHelperFile(SRWHelper *pHelper, bool hasError, SFileGroup *pGroup) { SFile *pFile = NULL; -#ifdef TSDB_IDX - pFile = helperIdxF(pHelper); - tsdbCloseFile(pFile); -#endif - pFile = helperHeadF(pHelper); tsdbCloseFile(pFile); @@ -177,10 +159,11 @@ int tsdbCloseHelperFile(SRWHelper *pHelper, bool hasError) { if (helperType(pHelper) == TSDB_WRITE_HELPER) { if (!hasError) { tsdbUpdateFileHeader(pFile); - fsync(pFile->fd); } else { - // TODO: shrink back to origin + ASSERT(pGroup != NULL); + taosFtruncate(pFile->fd, pGroup->files[TSDB_FILE_TYPE_DATA].info.size); } + fsync(pFile->fd); } tsdbCloseFile(pFile); } @@ -190,27 +173,16 @@ int tsdbCloseHelperFile(SRWHelper *pHelper, bool hasError) { if (helperType(pHelper) == TSDB_WRITE_HELPER && !TSDB_NLAST_FILE_OPENED(pHelper)) { if (!hasError) { tsdbUpdateFileHeader(pFile); - fsync(pFile->fd); } else { - // TODO: shrink back to origin + ASSERT(pGroup != NULL); + taosFtruncate(pFile->fd, pGroup->files[TSDB_FILE_TYPE_LAST].info.size); } + fsync(pFile->fd); } tsdbCloseFile(pFile); } if (helperType(pHelper) == TSDB_WRITE_HELPER) { -#ifdef TSDB_IDX - pFile = helperNewIdxF(pHelper); - if (pFile->fd > 0) { - if (!hasError) { - tsdbUpdateFileHeader(pFile); - fsync(pFile->fd); - } - tsdbCloseFile(pFile); - if (hasError) (void)remove(pFile->fname); - } -#endif - pFile = helperNewHeadF(pHelper); if (pFile->fd > 0) { if (!hasError) { @@ -412,10 +384,6 @@ int tsdbWriteCompInfo(SRWHelper *pHelper) { return -1; } -#ifdef TSDB_IDX - pFile = helperNewIdxF(pHelper); -#endif - if (taosTSizeof(pHelper->pWIdx) < pFile->info.len + sizeof(SCompIdx) + 12) { pHelper->pWIdx = taosTRealloc(pHelper->pWIdx, taosTSizeof(pHelper->pWIdx) == 0 ? 1024 : taosTSizeof(pHelper->pWIdx) * 2); if (pHelper->pWIdx == NULL) { @@ -426,6 +394,9 @@ int tsdbWriteCompInfo(SRWHelper *pHelper) { void *pBuf = POINTER_SHIFT(pHelper->pWIdx, pFile->info.len); pFile->info.len += tsdbEncodeSCompIdx(&pBuf, &(pHelper->curCompIdx)); + + pFile->info.size += pIdx->len; + // ASSERT(pFile->info.size == lseek(pFile->fd, 0, SEEK_CUR)); } return 0; @@ -435,11 +406,7 @@ int tsdbWriteCompIdx(SRWHelper *pHelper) { ASSERT(helperType(pHelper) == TSDB_WRITE_HELPER); off_t offset = 0; -#ifdef TSDB_IDX - SFile *pFile = helperNewIdxF(pHelper); -#else SFile *pFile = helperNewHeadF(pHelper); -#endif pFile->info.len += sizeof(TSCKSUM); if (taosTSizeof(pHelper->pWIdx) < pFile->info.len) { @@ -460,7 +427,7 @@ int tsdbWriteCompIdx(SRWHelper *pHelper) { return -1; } - pFile->info.offset = offset; + ASSERT(offset == pFile->info.size); if (taosTWrite(pFile->fd, (void *)pHelper->pWIdx, pFile->info.len) < (int)pFile->info.len) { tsdbError("vgId:%d failed to write %d bytes to file %s since %s", REPO_ID(pHelper->pRepo), pFile->info.len, @@ -469,16 +436,16 @@ int tsdbWriteCompIdx(SRWHelper *pHelper) { return -1; } + pFile->info.offset = offset; + pFile->info.size += pFile->info.len; + // ASSERT(pFile->info.size == lseek(pFile->fd, 0, SEEK_CUR)); + return 0; } int tsdbLoadCompIdx(SRWHelper *pHelper, void *target) { ASSERT(pHelper->state == TSDB_HELPER_FILE_SET_AND_OPEN); -#ifdef TSDB_IDX - SFile *pFile = helperIdxF(pHelper); -#else SFile *pFile = helperHeadF(pHelper); -#endif int fd = pFile->fd; if (!helperHasState(pHelper, TSDB_HELPER_IDX_LOAD)) { @@ -608,14 +575,14 @@ int tsdbLoadCompData(SRWHelper *pHelper, SCompBlock *pCompBlock, void *target) { } if (taosTRead(pFile->fd, (void *)pHelper->pCompData, tsize) < tsize) { - tsdbError("vgId:%d failed to read %zu bytes from file %s since %s", REPO_ID(pHelper->pRepo), tsize, pFile->fname, + tsdbError("vgId:%d failed to read %" PRIzu " bytes from file %s since %s", REPO_ID(pHelper->pRepo), tsize, pFile->fname, strerror(errno)); terrno = TAOS_SYSTEM_ERROR(errno); return -1; } if (!taosCheckChecksumWhole((uint8_t *)pHelper->pCompData, (uint32_t)tsize)) { - tsdbError("vgId:%d file %s is broken, offset %" PRId64 " size %zu", REPO_ID(pHelper->pRepo), pFile->fname, + tsdbError("vgId:%d file %s is broken, offset %" PRId64 " size %" PRIzu "", REPO_ID(pHelper->pRepo), pFile->fname, (int64_t)pCompBlock->offset, tsize); terrno = TSDB_CODE_TDB_FILE_CORRUPTED; return -1; @@ -847,6 +814,9 @@ static int tsdbWriteBlockToFile(SRWHelper *pHelper, SFile *pFile, SDataCols *pDa (int)(pCompBlock->numOfRows), pCompBlock->len, pCompBlock->numOfCols, pCompBlock->keyFirst, pCompBlock->keyLast); + pFile->info.size += pCompBlock->len; + // ASSERT(pFile->info.size == lseek(pFile->fd, 0, SEEK_CUR)); + return 0; _err: @@ -1052,10 +1022,6 @@ static void tsdbResetHelperFileImpl(SRWHelper *pHelper) { helperLastF(pHelper)->fd = -1; helperNewHeadF(pHelper)->fd = -1; helperNewLastF(pHelper)->fd = -1; -#ifdef TSDB_IDX - helperIdxF(pHelper)->fd = -1; - helperNewIdxF(pHelper)->fd = -1; -#endif } static int tsdbInitHelperFile(SRWHelper *pHelper) { @@ -1064,7 +1030,7 @@ static int tsdbInitHelperFile(SRWHelper *pHelper) { } static void tsdbDestroyHelperFile(SRWHelper *pHelper) { - tsdbCloseHelperFile(pHelper, false); + tsdbCloseHelperFile(pHelper, false, NULL); tsdbResetHelperFileImpl(pHelper); taosTZfree(pHelper->idxH.pIdxArray); taosTZfree(pHelper->pWIdx); @@ -1687,4 +1653,4 @@ static int tsdbWriteBlockToProperFile(SRWHelper *pHelper, SDataCols *pDataCols, if (tsdbWriteBlockToFile(pHelper, pFile, pDataCols, pCompBlock, isLast, true) < 0) return -1; return 0; -} \ No newline at end of file +} diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 17b0239e3b..ac3a6dac07 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -16,7 +16,6 @@ #include "os.h" #include "tulog.h" #include "talgo.h" -#include "tutil.h" #include "tcompare.h" #include "exception.h" @@ -172,6 +171,7 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab if (pQueryHandle == NULL) { goto out_of_memory; } + pQueryHandle->order = pCond->order; pQueryHandle->window = pCond->twindow; pQueryHandle->pTsdb = tsdb; @@ -183,6 +183,7 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab pQueryHandle->qinfo = qinfo; pQueryHandle->outputCapacity = ((STsdbRepo*)tsdb)->config.maxRowsPerFileBlock; pQueryHandle->allocSize = 0; + pQueryHandle->locateStart = false; if (tsdbInitReadHelper(&pQueryHandle->rhelper, (STsdbRepo*) tsdb) != 0) { goto out_of_memory; @@ -193,6 +194,12 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab size_t sizeOfGroup = taosArrayGetSize(groupList->pGroupList); assert(sizeOfGroup >= 1 && pCond != NULL && pCond->numOfCols > 0); + if (ASCENDING_TRAVERSE(pCond->order)) { + assert(pQueryHandle->window.skey <= pQueryHandle->window.ekey); + } else { + assert(pQueryHandle->window.skey >= pQueryHandle->window.ekey); + } + // allocate buffer in order to load data blocks from file int32_t numOfCols = pCond->numOfCols; @@ -200,6 +207,7 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab if (pQueryHandle->statis == NULL) { goto out_of_memory; } + pQueryHandle->pColumns = taosArrayInit(numOfCols, sizeof(SColumnInfoData)); // todo: use list instead of array? if (pQueryHandle->pColumns == NULL) { goto out_of_memory; @@ -221,8 +229,9 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab if (pQueryHandle->pTableCheckInfo == NULL) { goto out_of_memory; } + STsdbMeta* pMeta = tsdbGetMeta(tsdb); - assert(pMeta != NULL); + assert(pMeta != NULL && sizeOfGroup >= 1 && pCond != NULL && pCond->numOfCols > 0); for (int32_t i = 0; i < sizeOfGroup; ++i) { SArray* group = *(SArray**) taosArrayGet(groupList->pGroupList, i); @@ -231,26 +240,36 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab assert(gsize > 0); for (int32_t j = 0; j < gsize; ++j) { - STable* pTable = (STable*) taosArrayGetP(group, j); + STableKeyInfo* pKeyInfo = (STableKeyInfo*) taosArrayGet(group, j); STableCheckInfo info = { - .lastKey = pQueryHandle->window.skey, - //.tableId = pTable->tableId, - .pTableObj = pTable, + .lastKey = pKeyInfo->lastKey, + .pTableObj = pKeyInfo->pTable, }; - info.tableId = pTable->tableId; + info.tableId = ((STable*)(pKeyInfo->pTable))->tableId; assert(info.pTableObj != NULL && (info.pTableObj->type == TSDB_NORMAL_TABLE || info.pTableObj->type == TSDB_CHILD_TABLE || info.pTableObj->type == TSDB_STREAM_TABLE)); + info.tableId.tid = info.pTableObj->tableId.tid; + info.tableId.uid = info.pTableObj->tableId.uid; + + if (ASCENDING_TRAVERSE(pQueryHandle->order)) { + assert(info.lastKey >= pQueryHandle->window.skey); + } else { + assert(info.lastKey <= pQueryHandle->window.skey); + } + taosArrayPush(pQueryHandle->pTableCheckInfo, &info); + tsdbDebug("%p check table uid:%"PRId64", tid:%d from lastKey:%"PRId64" %p", pQueryHandle, info.tableId.uid, + info.tableId.tid, info.lastKey, qinfo); } } taosArraySort(pQueryHandle->pTableCheckInfo, tsdbCheckInfoCompar); pQueryHandle->defaultLoadColumn = getDefaultLoadColumns(pQueryHandle, true); - tsdbDebug("%p total numOfTable:%zu in query, %p", pQueryHandle, taosArrayGetSize(pQueryHandle->pTableCheckInfo), pQueryHandle->qinfo); + tsdbDebug("%p total numOfTable:%" PRIzu " in query, %p", pQueryHandle, taosArrayGetSize(pQueryHandle->pTableCheckInfo), pQueryHandle->qinfo); tsdbInitDataBlockLoadInfo(&pQueryHandle->dataBlockLoadInfo); tsdbInitCompBlockLoadInfo(&pQueryHandle->compBlockLoadInfo); @@ -316,17 +335,23 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh assert(pCheckInfo->iter == NULL && pCheckInfo->iiter == NULL); - // TODO: add uid check - if (pHandle->mem && pCheckInfo->tableId.tid < pHandle->mem->maxTables && - pHandle->mem->tData[pCheckInfo->tableId.tid] != NULL) { - pCheckInfo->iter = tSkipListCreateIterFromVal(pHandle->mem->tData[pCheckInfo->tableId.tid]->pData, - (const char*)&pCheckInfo->lastKey, TSDB_DATA_TYPE_TIMESTAMP, order); + STableData* pMem = NULL; + STableData* pIMem = NULL; + + if (pHandle->mem && pCheckInfo->tableId.tid < pHandle->mem->maxTables) { + pMem = pHandle->mem->tData[pCheckInfo->tableId.tid]; + if (pMem != NULL && pMem->uid == pCheckInfo->tableId.uid) { // check uid + pCheckInfo->iter = + tSkipListCreateIterFromVal(pMem->pData, (const char*)&pCheckInfo->lastKey, TSDB_DATA_TYPE_TIMESTAMP, order); + } } - if (pHandle->imem && pCheckInfo->tableId.tid < pHandle->imem->maxTables && - pHandle->imem->tData[pCheckInfo->tableId.tid] != NULL) { - pCheckInfo->iiter = tSkipListCreateIterFromVal(pHandle->imem->tData[pCheckInfo->tableId.tid]->pData, - (const char*)&pCheckInfo->lastKey, TSDB_DATA_TYPE_TIMESTAMP, order); + if (pHandle->imem && pCheckInfo->tableId.tid < pHandle->imem->maxTables) { + pIMem = pHandle->imem->tData[pCheckInfo->tableId.tid]; + if (pIMem != NULL && pIMem->uid == pCheckInfo->tableId.uid) { // check uid + pCheckInfo->iiter = + tSkipListCreateIterFromVal(pIMem->pData, (const char*)&pCheckInfo->lastKey, TSDB_DATA_TYPE_TIMESTAMP, order); + } } // both iterators are NULL, no data in buffer right now @@ -346,8 +371,17 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh SDataRow row = *(SDataRow *)SL_GET_NODE_DATA(node); TSKEY key = dataRowKey(row); // first timestamp in buffer - tsdbDebug("%p uid:%" PRId64", tid:%d check data in mem from skey:%" PRId64 ", order:%d, %p", pHandle, - pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, key, order, pHandle->qinfo); + tsdbDebug("%p uid:%" PRId64 ", tid:%d check data in mem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64 + "-%" PRId64 ", lastKey:%" PRId64 ", %p", + pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, key, order, pMem->keyFirst, pMem->keyLast, + pCheckInfo->lastKey, pHandle->qinfo); + + if (ASCENDING_TRAVERSE(order)) { + assert(pCheckInfo->lastKey <= key); + } else { + assert(pCheckInfo->lastKey >= key); + } + } else { tsdbDebug("%p uid:%"PRId64", tid:%d no data in mem, %p", pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, pHandle->qinfo); @@ -359,8 +393,16 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh SDataRow row = *(SDataRow *)SL_GET_NODE_DATA(node); TSKEY key = dataRowKey(row); // first timestamp in buffer - tsdbDebug("%p uid:%" PRId64", tid:%d check data in imem from skey:%" PRId64 ", order:%d, %p", pHandle, - pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, key, order, pHandle->qinfo); + tsdbDebug("%p uid:%" PRId64 ", tid:%d check data in mem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64 + "-%" PRId64 ", lastKey:%" PRId64 ", %p", + pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, key, order, pIMem->keyFirst, pIMem->keyLast, + pCheckInfo->lastKey, pHandle->qinfo); + + if (ASCENDING_TRAVERSE(order)) { + assert(pCheckInfo->lastKey <= key); + } else { + assert(pCheckInfo->lastKey >= key); + } } else { tsdbDebug("%p uid:%"PRId64", tid:%d no data in imem, %p", pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, pHandle->qinfo); @@ -556,6 +598,8 @@ static int32_t binarySearchForBlock(SCompBlock* pBlock, int32_t numOfBlocks, TSK static int32_t getFileCompInfo(STsdbQueryHandle* pQueryHandle, int32_t* numOfBlocks) { // load all the comp offset value for all tables in this file + int32_t code = TSDB_CODE_SUCCESS; + *numOfBlocks = 0; size_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo); @@ -563,7 +607,10 @@ static int32_t getFileCompInfo(STsdbQueryHandle* pQueryHandle, int32_t* numOfBlo STableCheckInfo* pCheckInfo = taosArrayGet(pQueryHandle->pTableCheckInfo, i); pCheckInfo->numOfBlocks = 0; - tsdbSetHelperTable(&pQueryHandle->rhelper, pCheckInfo->pTableObj, pQueryHandle->pTsdb); + if (tsdbSetHelperTable(&pQueryHandle->rhelper, pCheckInfo->pTableObj, pQueryHandle->pTsdb) != TSDB_CODE_SUCCESS) { + code = terrno; + break; + } SCompIdx* compIndex = &pQueryHandle->rhelper.curCompIdx; @@ -576,7 +623,11 @@ static int32_t getFileCompInfo(STsdbQueryHandle* pQueryHandle, int32_t* numOfBlo assert(compIndex->len > 0); char* t = realloc(pCheckInfo->pCompInfo, compIndex->len); - assert(t != NULL); + if (t == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + code = TSDB_CODE_TDB_OUT_OF_MEMORY; + break; + } pCheckInfo->pCompInfo = (SCompInfo*) t; pCheckInfo->compSize = compIndex->len; @@ -618,7 +669,7 @@ static int32_t getFileCompInfo(STsdbQueryHandle* pQueryHandle, int32_t* numOfBlo (*numOfBlocks) += pCheckInfo->numOfBlocks; } - return TSDB_CODE_SUCCESS; + return code; } #define GET_FILE_DATA_BLOCK_INFO(_checkInfo, _block) \ @@ -629,14 +680,19 @@ static int32_t getFileCompInfo(STsdbQueryHandle* pQueryHandle, int32_t* numOfBlo .uid = (_checkInfo)->tableId.uid}) -static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock, STableCheckInfo* pCheckInfo, int32_t slotIndex) { +static int32_t doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock, STableCheckInfo* pCheckInfo, int32_t slotIndex) { STsdbRepo *pRepo = pQueryHandle->pTsdb; - bool blockLoaded = false; int64_t st = taosGetTimestampUs(); if (pCheckInfo->pDataCols == NULL) { STsdbMeta* pMeta = tsdbGetMeta(pRepo); + pCheckInfo->pDataCols = tdNewDataCols(pMeta->maxRowBytes, pMeta->maxCols, pRepo->config.maxRowsPerFileBlock); + if (pCheckInfo->pDataCols == NULL) { + tsdbError("%p failed to malloc buf, %p", pQueryHandle, pQueryHandle->qinfo); + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + return terrno; + } } STSchema* pSchema = tsdbGetTableSchema(pCheckInfo->pTableObj); @@ -645,17 +701,18 @@ static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlo tdInitDataCols(pQueryHandle->rhelper.pDataCols[1], pSchema); int16_t* colIds = pQueryHandle->defaultLoadColumn->pData; + int32_t ret = tsdbLoadBlockDataCols(&(pQueryHandle->rhelper), pBlock, pCheckInfo->pCompInfo, colIds, (int)(QH_GET_NUM_OF_COLS(pQueryHandle))); - if (ret == TSDB_CODE_SUCCESS) { - SDataBlockLoadInfo* pBlockLoadInfo = &pQueryHandle->dataBlockLoadInfo; - - pBlockLoadInfo->fileGroup = pQueryHandle->pFileGroup; - pBlockLoadInfo->slot = pQueryHandle->cur.slot; - pBlockLoadInfo->tid = pCheckInfo->pTableObj->tableId.tid; - - blockLoaded = true; + if (ret != TSDB_CODE_SUCCESS) { + return terrno; } + SDataBlockLoadInfo* pBlockLoadInfo = &pQueryHandle->dataBlockLoadInfo; + + pBlockLoadInfo->fileGroup = pQueryHandle->pFileGroup; + pBlockLoadInfo->slot = pQueryHandle->cur.slot; + pBlockLoadInfo->tid = pCheckInfo->pTableObj->tableId.tid; + SDataCols* pCols = pQueryHandle->rhelper.pDataCols[0]; assert(pCols->numOfRows != 0 && pCols->numOfRows <= pBlock->numOfRows); @@ -666,12 +723,14 @@ static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlo tsdbDebug("%p load file block into buffer, index:%d, brange:%"PRId64"-%"PRId64" , rows:%d, elapsed time:%"PRId64 " us, %p", pQueryHandle, slotIndex, pBlock->keyFirst, pBlock->keyLast, pBlock->numOfRows, elapsedTime, pQueryHandle->qinfo); - return blockLoaded; + + return TSDB_CODE_SUCCESS; } -static void handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock, STableCheckInfo* pCheckInfo){ +static int32_t handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock, STableCheckInfo* pCheckInfo){ SQueryFilePos* cur = &pQueryHandle->cur; SDataBlockInfo binfo = GET_FILE_DATA_BLOCK_INFO(pCheckInfo, pBlock); + int32_t code = TSDB_CODE_SUCCESS; /*bool hasData = */ initTableMemIterator(pQueryHandle, pCheckInfo); SDataRow row = getSDataRowInTableMem(pCheckInfo, pQueryHandle->order); @@ -699,10 +758,14 @@ static void handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SCompBlock* cur->mixBlock = true; cur->blockCompleted = false; - return; + return code; + } + + // return error, add test cases + if ((code = doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo, cur->slot)) != TSDB_CODE_SUCCESS) { + return code; } - doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo, cur->slot); doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock); } else { /* @@ -721,16 +784,20 @@ static void handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SCompBlock* cur->lastKey = binfo.window.ekey + (ASCENDING_TRAVERSE(pQueryHandle->order)? 1:-1); pCheckInfo->lastKey = cur->lastKey; } + + return code; } -static bool loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock, STableCheckInfo* pCheckInfo) { +static int32_t loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock, STableCheckInfo* pCheckInfo, bool* exists) { SQueryFilePos* cur = &pQueryHandle->cur; + int32_t code = TSDB_CODE_SUCCESS; if (ASCENDING_TRAVERSE(pQueryHandle->order)) { // query ended in/started from current block if (pQueryHandle->window.ekey < pBlock->keyLast || pCheckInfo->lastKey > pBlock->keyFirst) { - if (!doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo, cur->slot)) { - return false; + if ((code = doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo, cur->slot)) != TSDB_CODE_SUCCESS) { + *exists = false; + return code; } SDataCols* pTSCol = pQueryHandle->rhelper.pDataCols[0]; @@ -746,12 +813,13 @@ static bool loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock assert(pCheckInfo->lastKey <= pBlock->keyLast); doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock); } else { // the whole block is loaded in to buffer - handleDataMergeIfNeeded(pQueryHandle, pBlock, pCheckInfo); + code = handleDataMergeIfNeeded(pQueryHandle, pBlock, pCheckInfo); } } else { //desc order, query ended in current block if (pQueryHandle->window.ekey > pBlock->keyFirst || pCheckInfo->lastKey < pBlock->keyLast) { - if (!doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo, cur->slot)) { - return false; + if ((code = doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo, cur->slot)) != TSDB_CODE_SUCCESS) { + *exists = false; + return code; } SDataCols* pTSCol = pQueryHandle->rhelper.pDataCols[0]; @@ -764,11 +832,12 @@ static bool loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock assert(pCheckInfo->lastKey >= pBlock->keyFirst); doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock); } else { - handleDataMergeIfNeeded(pQueryHandle, pBlock, pCheckInfo); + code = handleDataMergeIfNeeded(pQueryHandle, pBlock, pCheckInfo); } } - return pQueryHandle->realNumOfRows > 0; + *exists = pQueryHandle->realNumOfRows > 0; + return code; } static int doBinarySearchKey(char* pValue, int num, TSKEY key, int order) { @@ -1524,9 +1593,7 @@ static int32_t getDataBlocksInFilesImpl(STsdbQueryHandle* pQueryHandle, bool* ex cur->fid = pQueryHandle->pFileGroup->fileId; STableBlockInfo* pBlockInfo = &pQueryHandle->pDataBlockInfo[cur->slot]; - *exists = loadFileDataBlock(pQueryHandle, pBlockInfo->compBlock, pBlockInfo->pTableCheckInfo); - - return TSDB_CODE_SUCCESS; + return loadFileDataBlock(pQueryHandle, pBlockInfo->compBlock, pBlockInfo->pTableCheckInfo, exists); } static int32_t getDataBlocksInFiles(STsdbQueryHandle* pQueryHandle, bool* exists) { @@ -1551,7 +1618,7 @@ static int32_t getDataBlocksInFiles(STsdbQueryHandle* pQueryHandle, bool* exists STableCheckInfo* pCheckInfo = pBlockInfo->pTableCheckInfo; // current block is done, try next - if (!cur->mixBlock || cur->blockCompleted) { + if ((!cur->mixBlock) || cur->blockCompleted) { if ((cur->slot == pQueryHandle->numOfBlocks - 1 && ASCENDING_TRAVERSE(pQueryHandle->order)) || (cur->slot == 0 && !ASCENDING_TRAVERSE(pQueryHandle->order))) { // all data blocks in current file has been checked already, try next file if exists @@ -1565,15 +1632,14 @@ static int32_t getDataBlocksInFiles(STsdbQueryHandle* pQueryHandle, bool* exists cur->blockCompleted = false; STableBlockInfo* pNext = &pQueryHandle->pDataBlockInfo[cur->slot]; - *exists = loadFileDataBlock(pQueryHandle, pNext->compBlock, pNext->pTableCheckInfo); - - return TSDB_CODE_SUCCESS; + return loadFileDataBlock(pQueryHandle, pNext->compBlock, pNext->pTableCheckInfo, exists); } } else { - handleDataMergeIfNeeded(pQueryHandle, pBlockInfo->compBlock, pCheckInfo); + tsdbDebug("%p continue in current data block, index:%d, %p", pQueryHandle, cur->slot, pQueryHandle->qinfo); + int32_t code = handleDataMergeIfNeeded(pQueryHandle, pBlockInfo->compBlock, pCheckInfo); *exists = pQueryHandle->realNumOfRows > 0; - return TSDB_CODE_SUCCESS; + return code; } } } @@ -1611,8 +1677,11 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) { return false; } - /*SDataBlockInfo* pBlockInfo =*/ tsdbRetrieveDataBlockInfo(pHandle, &blockInfo); + tsdbRetrieveDataBlockInfo(pHandle, &blockInfo); /*SArray *pDataBlock = */tsdbRetrieveDataBlock(pHandle, pQueryHandle->defaultLoadColumn); + if (terrno != TSDB_CODE_SUCCESS) { + return false; + } if (pQueryHandle->cur.win.ekey == pQueryHandle->window.skey) { // data already retrieve, discard other data rows and return @@ -1670,9 +1739,9 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) { STableCheckInfo* pCheckInfo = (STableCheckInfo*) taosArrayGet(pQueryHandle->pTableCheckInfo, j); STableCheckInfo info = { .lastKey = pSecQueryHandle->window.skey, - //.tableId = pCheckInfo->tableId, .pTableObj = pCheckInfo->pTableObj, }; + info.tableId = pCheckInfo->tableId; taosArrayPush(pSecQueryHandle->pTableCheckInfo, &info); @@ -1682,8 +1751,10 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) { tsdbInitCompBlockLoadInfo(&pSecQueryHandle->compBlockLoadInfo); pSecQueryHandle->defaultLoadColumn = taosArrayClone(pQueryHandle->defaultLoadColumn); - bool ret = tsdbNextDataBlock((void*) pSecQueryHandle); - assert(ret); + if (!tsdbNextDataBlock((void*) pSecQueryHandle)) { + tsdbCleanupQueryHandle(pSecQueryHandle); + return false; + } tsdbRetrieveDataBlockInfo((void*) pSecQueryHandle, &blockInfo); tsdbRetrieveDataBlock((void*) pSecQueryHandle, pSecQueryHandle->defaultLoadColumn); @@ -1724,19 +1795,22 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) { if (pQueryHandle->checkFiles) { bool exists = true; + int32_t code = getDataBlocksInFiles(pQueryHandle, &exists); if (code != TSDB_CODE_SUCCESS) { + pQueryHandle->activeIndex = 0; + pQueryHandle->checkFiles = false; + return false; } if (exists) { - elapsedTime = taosGetTimestampUs() - stime; - pQueryHandle->cost.checkForNextTime += elapsedTime; + pQueryHandle->cost.checkForNextTime += (taosGetTimestampUs() - stime); return exists; } pQueryHandle->activeIndex = 0; - pQueryHandle->checkFiles = false; + pQueryHandle->checkFiles = false; } // TODO: opt by consider the scan order @@ -2004,7 +2078,9 @@ SArray* tsdbRetrieveDataBlock(TsdbQueryHandleT* pQueryHandle, SArray* pIdList) { return pHandle->pColumns; } else { // only load the file block SCompBlock* pBlock = pBlockInfo->compBlock; - doLoadFileDataBlock(pHandle, pBlock, pCheckInfo, pHandle->cur.slot); + if (doLoadFileDataBlock(pHandle, pBlock, pCheckInfo, pHandle->cur.slot) != TSDB_CODE_SUCCESS) { + return NULL; + } // todo refactor int32_t numOfRows = copyDataFromFileBlock(pHandle, pHandle->outputCapacity, 0, 0, pBlock->numOfRows - 1); @@ -2032,7 +2108,9 @@ static int32_t getAllTableList(STable* pSuperTable, SArray* list) { SSkipListNode* pNode = tSkipListIterGet(iter); STable** pTable = (STable**) SL_GET_NODE_DATA((SSkipListNode*) pNode); - taosArrayPush(list, pTable); + + STableKeyInfo info = {.pTable = *pTable, .lastKey = TSKEY_INITIAL_VAL}; + taosArrayPush(list, &info); } tSkipListDestroyIter(iter); @@ -2070,7 +2148,7 @@ void filterPrepare(void* expr, void* param) { pInfo->sch = *pSchema; pInfo->optr = pExpr->_node.optr; pInfo->compare = getComparFunc(pSchema->type, pInfo->optr); - pInfo->param = pTSSchema; + pInfo->indexed = pTSSchema->columns->colId == pInfo->sch.colId; if (pInfo->optr == TSDB_RELATION_IN) { pInfo->q = (char*) pCond->arr; @@ -2088,8 +2166,8 @@ typedef struct STableGroupSupporter { int32_t tableGroupComparFn(const void *p1, const void *p2, const void *param) { STableGroupSupporter* pTableGroupSupp = (STableGroupSupporter*) param; - STable* pTable1 = *(STable**) p1; - STable* pTable2 = *(STable**) p2; + STable* pTable1 = ((STableKeyInfo*) p1)->pTable; + STable* pTable2 = ((STableKeyInfo*) p2)->pTable; for (int32_t i = 0; i < pTableGroupSupp->numOfCols; ++i) { SColIndex* pColIndex = &pTableGroupSupp->pCols[i]; @@ -2139,12 +2217,14 @@ int32_t tableGroupComparFn(const void *p1, const void *p2, const void *param) { return 0; } -void createTableGroupImpl(SArray* pGroups, SArray* pTableList, size_t numOfTables, STableGroupSupporter* pSupp, +void createTableGroupImpl(SArray* pGroups, SArray* pTableList, size_t numOfTables, TSKEY skey, STableGroupSupporter* pSupp, __ext_compar_fn_t compareFn) { STable* pTable = taosArrayGetP(pTableList, 0); - SArray* g = taosArrayInit(16, POINTER_BYTES); - taosArrayPush(g, &pTable); + SArray* g = taosArrayInit(16, sizeof(STableKeyInfo)); + + STableKeyInfo info = {.pTable = pTable, .lastKey = skey}; + taosArrayPush(g, &info); tsdbRefTable(pTable); for (int32_t i = 1; i < numOfTables; ++i) { @@ -2158,18 +2238,21 @@ void createTableGroupImpl(SArray* pGroups, SArray* pTableList, size_t numOfTable assert((*p)->type == TSDB_CHILD_TABLE); if (ret == 0) { - taosArrayPush(g, p); + STableKeyInfo info1 = {.pTable = *p, .lastKey = skey}; + taosArrayPush(g, &info1); } else { taosArrayPush(pGroups, &g); // current group is ended, start a new group - g = taosArrayInit(16, POINTER_BYTES); - taosArrayPush(g, p); + g = taosArrayInit(16, sizeof(STableKeyInfo)); + + STableKeyInfo info1 = {.pTable = *p, .lastKey = skey}; + taosArrayPush(g, &info1); } } taosArrayPush(pGroups, &g); } -SArray* createTableGroup(SArray* pTableList, STSchema* pTagSchema, SColIndex* pCols, int32_t numOfOrderCols) { +SArray* createTableGroup(SArray* pTableList, STSchema* pTagSchema, SColIndex* pCols, int32_t numOfOrderCols, TSKEY skey) { assert(pTableList != NULL); SArray* pTableGroup = taosArrayInit(1, POINTER_BYTES); @@ -2180,25 +2263,28 @@ SArray* createTableGroup(SArray* pTableList, STSchema* pTagSchema, SColIndex* pC } if (numOfOrderCols == 0 || size == 1) { // no group by tags clause or only one table - SArray* sa = taosArrayInit(size, POINTER_BYTES); - for(int32_t i = 0; i < size; ++i) { - STable** pTable = taosArrayGet(pTableList, i); - assert((*pTable)->type == TSDB_CHILD_TABLE); + SArray* sa = taosArrayInit(size, sizeof(STableKeyInfo)); - tsdbRefTable(*pTable); - taosArrayPush(sa, pTable); + for(int32_t i = 0; i < size; ++i) { + STableKeyInfo *pKeyInfo = taosArrayGet(pTableList, i); + assert(((STable*)pKeyInfo->pTable)->type == TSDB_CHILD_TABLE); + + tsdbRefTable(pKeyInfo->pTable); + + STableKeyInfo info = {.pTable = pKeyInfo->pTable, .lastKey = skey}; + taosArrayPush(sa, &info); } taosArrayPush(pTableGroup, &sa); - tsdbDebug("all %zu tables belong to one group", size); + tsdbDebug("all %" PRIzu " tables belong to one group", size); } else { STableGroupSupporter *pSupp = (STableGroupSupporter *) calloc(1, sizeof(STableGroupSupporter)); pSupp->numOfCols = numOfOrderCols; pSupp->pTagSchema = pTagSchema; pSupp->pCols = pCols; - taosqsort(pTableList->pData, size, POINTER_BYTES, pSupp, tableGroupComparFn); - createTableGroupImpl(pTableGroup, pTableList, size, pSupp, tableGroupComparFn); + taosqsort(pTableList->pData, size, sizeof(STableKeyInfo), pSupp, tableGroupComparFn); + createTableGroupImpl(pTableGroup, pTableList, size, skey, pSupp, tableGroupComparFn); taosTFree(pSupp); } @@ -2271,7 +2357,7 @@ static int32_t doQueryTableList(STable* pSTable, SArray* pRes, tExprNode* pExpr) return TSDB_CODE_SUCCESS; } -int32_t tsdbQuerySTableByTagCond(TSDB_REPO_T* tsdb, uint64_t uid, const char* pTagCond, size_t len, +int32_t tsdbQuerySTableByTagCond(TSDB_REPO_T* tsdb, uint64_t uid, TSKEY skey, const char* pTagCond, size_t len, int16_t tagNameRelType, const char* tbnameCond, STableGroupInfo* pGroupInfo, SColIndex* pColIndex, int32_t numOfCols) { if (tsdbRLockRepoMeta(tsdb) < 0) goto _error; @@ -2295,7 +2381,7 @@ int32_t tsdbQuerySTableByTagCond(TSDB_REPO_T* tsdb, uint64_t uid, const char* pT } //NOTE: not add ref count for super table - SArray* res = taosArrayInit(8, POINTER_BYTES); + SArray* res = taosArrayInit(8, sizeof(STableKeyInfo)); STSchema* pTagSchema = tsdbGetTableTagSchema(pTable); // no tags and tbname condition, all child tables of this stable are involved @@ -2307,9 +2393,9 @@ int32_t tsdbQuerySTableByTagCond(TSDB_REPO_T* tsdb, uint64_t uid, const char* pT } pGroupInfo->numOfTables = taosArrayGetSize(res); - pGroupInfo->pGroupList = createTableGroup(res, pTagSchema, pColIndex, numOfCols); + pGroupInfo->pGroupList = createTableGroup(res, pTagSchema, pColIndex, numOfCols, skey); - tsdbDebug("%p no table name/tag condition, all tables belong to one group, numOfTables:%zu", tsdb, pGroupInfo->numOfTables); + tsdbDebug("%p no table name/tag condition, all tables belong to one group, numOfTables:%" PRIzu "", tsdb, pGroupInfo->numOfTables); taosArrayDestroy(res); if (tsdbUnlockRepoMeta(tsdb) < 0) goto _error; @@ -2319,7 +2405,7 @@ int32_t tsdbQuerySTableByTagCond(TSDB_REPO_T* tsdb, uint64_t uid, const char* pT int32_t ret = TSDB_CODE_SUCCESS; tExprNode* expr = NULL; - TRY(TSDB_MAX_TAGS) { + TRY(TSDB_MAX_TAG_CONDITIONS) { expr = exprTreeFromTableName(tbnameCond); if (expr == NULL) { expr = exprTreeFromBinary(pTagCond, len); @@ -2344,15 +2430,17 @@ int32_t tsdbQuerySTableByTagCond(TSDB_REPO_T* tsdb, uint64_t uid, const char* pT } CATCH( code ) { CLEANUP_EXECUTE(); terrno = code; + tsdbUnlockRepoMeta(tsdb); // unlock tsdb in any cases + goto _error; // TODO: more error handling } END_TRY doQueryTableList(pTable, res, expr); pGroupInfo->numOfTables = taosArrayGetSize(res); - pGroupInfo->pGroupList = createTableGroup(res, pTagSchema, pColIndex, numOfCols); + pGroupInfo->pGroupList = createTableGroup(res, pTagSchema, pColIndex, numOfCols, skey); - tsdbDebug("%p stable tid:%d, uid:%"PRIu64" query, numOfTables:%zu, belong to %zu groups", tsdb, pTable->tableId.tid, + tsdbDebug("%p stable tid:%d, uid:%"PRIu64" query, numOfTables:%" PRIzu ", belong to %" PRIzu " groups", tsdb, pTable->tableId.tid, pTable->tableId.uid, pGroupInfo->numOfTables, taosArrayGetSize(pGroupInfo->pGroupList)); taosArrayDestroy(res); @@ -2364,7 +2452,7 @@ int32_t tsdbQuerySTableByTagCond(TSDB_REPO_T* tsdb, uint64_t uid, const char* pT return terrno; } -int32_t tsdbGetOneTableGroup(TSDB_REPO_T* tsdb, uint64_t uid, STableGroupInfo* pGroupInfo) { +int32_t tsdbGetOneTableGroup(TSDB_REPO_T* tsdb, uint64_t uid, TSKEY startKey, STableGroupInfo* pGroupInfo) { if (tsdbRLockRepoMeta(tsdb) < 0) goto _error; STable* pTable = tsdbGetTableByUid(tsdbGetMeta(tsdb), uid); @@ -2381,9 +2469,11 @@ int32_t tsdbGetOneTableGroup(TSDB_REPO_T* tsdb, uint64_t uid, STableGroupInfo* p pGroupInfo->numOfTables = 1; pGroupInfo->pGroupList = taosArrayInit(1, POINTER_BYTES); - SArray* group = taosArrayInit(1, POINTER_BYTES); + SArray* group = taosArrayInit(1, sizeof(STableKeyInfo)); + + STableKeyInfo info = {.pTable = pTable, .lastKey = startKey}; + taosArrayPush(group, &info); - taosArrayPush(group, &pTable); taosArrayPush(pGroupInfo->pGroupList, &group); return TSDB_CODE_SUCCESS; @@ -2400,7 +2490,7 @@ int32_t tsdbGetTableGroupFromIdList(TSDB_REPO_T* tsdb, SArray* pTableIdList, STa assert(pTableIdList != NULL); size_t size = taosArrayGetSize(pTableIdList); pGroupInfo->pGroupList = taosArrayInit(1, POINTER_BYTES); - SArray* group = taosArrayInit(1, POINTER_BYTES); + SArray* group = taosArrayInit(1, sizeof(STableKeyInfo)); int32_t i = 0; for(; i < size; ++i) { @@ -2418,7 +2508,9 @@ int32_t tsdbGetTableGroupFromIdList(TSDB_REPO_T* tsdb, SArray* pTableIdList, STa } tsdbRefTable(pTable); - taosArrayPush(group, &pTable); + + STableKeyInfo info = {.pTable = pTable, .lastKey = id->key}; + taosArrayPush(group, &info); } if (tsdbUnlockRepoMeta(tsdb) < 0) { @@ -2507,4 +2599,4 @@ static int tsdbCheckInfoCompar(const void* key1, const void* key2) { ASSERT(false); return 0; } -} \ No newline at end of file +} diff --git a/src/util/inc/hash.h b/src/util/inc/hash.h index 137777f3cb..f289a4e8c3 100644 --- a/src/util/inc/hash.h +++ b/src/util/inc/hash.h @@ -20,7 +20,9 @@ extern "C" { #endif +#include "tarray.h" #include "hashfunc.h" +#include "tlockfree.h" #define HASH_MAX_CAPACITY (1024 * 1024 * 16) #define HASH_DEFAULT_LOAD_FACTOR (0.75) @@ -29,38 +31,45 @@ extern "C" { typedef void (*_hash_free_fn_t)(void *param); typedef struct SHashNode { - char *key; -// union { - struct SHashNode * prev; -// struct SHashEntry *prev1; -// }; -// + char *key; +// struct SHashNode *prev; struct SHashNode *next; uint32_t hashVal; // the hash value of key, if hashVal == HASH_VALUE_IN_TRASH, this node is moved to trash uint32_t keyLen; // length of the key - char data[]; + char *data; } SHashNode; -typedef struct SHashObj { - SHashNode **hashList; - size_t capacity; // number of slots - size_t size; // number of elements in hash table - _hash_fn_t hashFp; // hash function - _hash_free_fn_t freeFp; // hash node free callback function +typedef enum SHashLockTypeE { + HASH_NO_LOCK = 0, + HASH_ENTRY_LOCK = 1, +} SHashLockTypeE; -#if defined(LINUX) - pthread_rwlock_t *lock; -#else - pthread_mutex_t *lock; -#endif +typedef struct SHashEntry { + int32_t num; // number of elements in current entry + SRWLatch latch; // entry latch + SHashNode *next; +} SHashEntry; + +typedef struct SHashObj { + SHashEntry **hashList; + size_t capacity; // number of slots + size_t size; // number of elements in hash table + _hash_fn_t hashFp; // hash function + _hash_free_fn_t freeFp; // hash node free callback function + + SRWLatch lock; // read-write spin lock + SHashLockTypeE type; // lock type + bool enableUpdate; // enable update + SArray *pMemBlock; // memory block allocated for SHashEntry } SHashObj; typedef struct SHashMutableIterator { - SHashObj * pHashObj; + SHashObj *pHashObj; int32_t entryIndex; SHashNode *pCur; - SHashNode *pNext; // current node can be deleted for mutable iterator, so keep the next one before return current - int32_t num; // already check number of elements in hash table + SHashNode *pNext; // current node can be deleted for mutable iterator, so keep the next one before return current + size_t numOfChecked; // already check number of elements in hash table + size_t numOfEntries; // number of entries while the iterator is created } SHashMutableIterator; /** @@ -71,7 +80,7 @@ typedef struct SHashMutableIterator { * @param threadsafe thread safe or not * @return */ -SHashObj *taosHashInit(size_t capacity, _hash_fn_t fn, bool threadsafe); +SHashObj *taosHashInit(size_t capacity, _hash_fn_t fn, bool update, SHashLockTypeE type); /** * return the size of hash table @@ -101,13 +110,19 @@ int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *da */ void *taosHashGet(SHashObj *pHashObj, const void *key, size_t keyLen); +void *taosHashGetCB(SHashObj *pHashObj, const void *key, size_t keyLen, void(*fp)(void*)); + /** * remove item with the specified key * @param pHashObj * @param key * @param keyLen */ -void taosHashRemove(SHashObj *pHashObj, const void *key, size_t keyLen); +int32_t taosHashRemove(SHashObj *pHashObj, const void *key, size_t keyLen); + +int32_t taosHashRemoveWithData(SHashObj *pHashObj, const void *key, size_t keyLen, void* data, size_t dsize); + +int32_t taosHashCondTraverse(SHashObj *pHashObj, bool (*fp)(void *, void *), void *param); /** * clean up hash table @@ -115,13 +130,6 @@ void taosHashRemove(SHashObj *pHashObj, const void *key, size_t keyLen); */ void taosHashCleanup(SHashObj *pHashObj); -/** - * Set the free callback function - * This function if set will be invoked right before freeing each hash node - * @param pHashObj - */ -void taosHashSetFreecb(SHashObj *pHashObj, _hash_free_fn_t freeFp); - /** * * @param pHashObj diff --git a/src/util/inc/tcache.h b/src/util/inc/tcache.h index 1e2aeae394..11121fcf3b 100644 --- a/src/util/inc/tcache.h +++ b/src/util/inc/tcache.h @@ -121,7 +121,7 @@ void *taosCacheAcquireByKey(SCacheObj *pCacheObj, const void *key, size_t keyLen * @param expireTime new expire time of data * @return */ -void* taosCacheUpdateExpireTimeByName(SCacheObj *pCacheObj, void *key, size_t keyLen, uint64_t expireTime); +//void* taosCacheUpdateExpireTimeByName(SCacheObj *pCacheObj, void *key, size_t keyLen, uint64_t expireTime); /** * Add one reference count for the exist data, and assign this data for a new owner. diff --git a/src/util/inc/tkvstore.h b/src/util/inc/tkvstore.h index a6cc82e751..3b4e8e3757 100644 --- a/src/util/inc/tkvstore.h +++ b/src/util/inc/tkvstore.h @@ -58,6 +58,7 @@ int tdKVStoreStartCommit(SKVStore *pStore); int tdUpdateKVStoreRecord(SKVStore *pStore, uint64_t uid, void *cont, int contLen); int tdDropKVStoreRecord(SKVStore *pStore, uint64_t uid); int tdKVStoreEndCommit(SKVStore *pStore); +void tsdbGetStoreInfo(char *fname, uint32_t *magic, int32_t *size); #ifdef __cplusplus } diff --git a/src/util/inc/tstoken.h b/src/util/inc/tstoken.h index 705c52af47..c1c6f2de7a 100644 --- a/src/util/inc/tstoken.h +++ b/src/util/inc/tstoken.h @@ -24,8 +24,6 @@ extern "C" { #include "tutil.h" #include "ttokendef.h" - - #define TSQL_TBNAME "TBNAME" #define TSQL_TBNAME_L "tbname" diff --git a/src/util/src/hash.c b/src/util/src/hash.c index 37c9146a49..714f36f1cb 100644 --- a/src/util/src/hash.c +++ b/src/util/src/hash.c @@ -18,64 +18,52 @@ #include "tulog.h" #include "taosdef.h" -static FORCE_INLINE void __wr_lock(void *lock) { - if (lock == NULL) { +#define HASH_NEED_RESIZE(_h) ((_h)->size >= (_h)->capacity * HASH_DEFAULT_LOAD_FACTOR) + +#define DO_FREE_HASH_NODE(_n) \ + do { \ + taosTFree((_n)->data); \ + taosTFree(_n); \ + } while (0) + +#define FREE_HASH_NODE(_h, _n) \ + do { \ + if ((_h)->freeFp) { \ + (_h)->freeFp((_n)->data); \ + } \ + \ + DO_FREE_HASH_NODE(_n); \ + } while (0); + +static FORCE_INLINE void __wr_lock(void *lock, int32_t type) { + if (type == HASH_NO_LOCK) { + return; + } + taosWLockLatch(lock); +} + +static FORCE_INLINE void __rd_lock(void *lock, int32_t type) { + if (type == HASH_NO_LOCK) { return; } -#if defined(LINUX) - pthread_rwlock_wrlock(lock); -#else - pthread_mutex_lock(lock); -#endif + taosRLockLatch(lock); } -static FORCE_INLINE void __rd_lock(void *lock) { - if (lock == NULL) { +static FORCE_INLINE void __rd_unlock(void *lock, int32_t type) { + if (type == HASH_NO_LOCK) { return; } -#if defined(LINUX) - pthread_rwlock_rdlock(lock); -#else - pthread_mutex_lock(lock); -#endif + taosRUnLockLatch(lock); } -static FORCE_INLINE void __unlock(void *lock) { - if (lock == NULL) { +static FORCE_INLINE void __wr_unlock(void *lock, int32_t type) { + if (type == HASH_NO_LOCK) { return; } -#if defined(LINUX) - pthread_rwlock_unlock(lock); -#else - pthread_mutex_unlock(lock); -#endif -} - -static FORCE_INLINE int32_t __lock_init(void *lock) { - if (lock == NULL) { - return 0; - } - -#if defined(LINUX) - return pthread_rwlock_init(lock, NULL); -#else - return pthread_mutex_init(lock, NULL); -#endif -} - -static FORCE_INLINE void __lock_destroy(void *lock) { - if (lock == NULL) { - return; - } - -#if defined(LINUX) - pthread_rwlock_destroy(lock); -#else - pthread_mutex_destroy(lock); -#endif + taosWUnLockLatch(lock); } static FORCE_INLINE int32_t taosHashCapacity(int32_t length) { @@ -86,37 +74,17 @@ static FORCE_INLINE int32_t taosHashCapacity(int32_t length) { return i; } -/** - * Get SHashNode from hashlist, nodes from trash are not included. - * @param pHashObj Cache objection - * @param key key for hash - * @param keyLen key length - * @param hashVal hash value by hash function - * @return - */ -FORCE_INLINE SHashNode *doGetNodeFromHashTable(SHashObj *pHashObj, const void *key, uint32_t keyLen, uint32_t *hashVal) { - uint32_t hash = (*pHashObj->hashFp)(key, keyLen); - - int32_t slot = HASH_INDEX(hash, pHashObj->capacity); - SHashNode *pNode = pHashObj->hashList[slot]; - +static FORCE_INLINE SHashNode *doSearchInEntryList(SHashEntry *pe, const void *key, size_t keyLen, uint32_t hashVal) { + SHashNode *pNode = pe->next; while (pNode) { if ((pNode->keyLen == keyLen) && (memcmp(pNode->key, key, keyLen) == 0)) { + assert(pNode->hashVal == hashVal); break; } - + pNode = pNode->next; } - - if (pNode) { - assert(HASH_INDEX(pNode->hashVal, pHashObj->capacity) == slot); - } - - // return the calculated hash value, to avoid calculating it again in other functions - if (hashVal != NULL) { - *hashVal = hash; - } - + return pNode; } @@ -147,7 +115,13 @@ static SHashNode *doCreateHashNode(const void *key, size_t keyLen, const void *p * @param dsize size of actual data * @return hash node */ -static SHashNode *doUpdateHashNode(SHashNode *pNode, const void *key, size_t keyLen, const void *pData, size_t dsize); +static FORCE_INLINE SHashNode *doUpdateHashNode(SHashNode *pNode, SHashNode *pNewNode) { + assert(pNode->keyLen == pNewNode->keyLen); + SWAP(pNode->key, pNewNode->key, void *); + SWAP(pNode->data, pNewNode->data, void *); + + return pNewNode; +} /** * insert the hash node at the front of the linked list @@ -155,7 +129,7 @@ static SHashNode *doUpdateHashNode(SHashNode *pNode, const void *key, size_t key * @param pHashObj * @param pNode */ -static void doAddToHashTable(SHashObj *pHashObj, SHashNode *pNode); +static void pushfrontNodeInEntryList(SHashEntry *pEntry, SHashNode *pNode); /** * Get the next element in hash table for iterator @@ -164,7 +138,7 @@ static void doAddToHashTable(SHashObj *pHashObj, SHashNode *pNode); */ static SHashNode *getNextHashNode(SHashMutableIterator *pIter); -SHashObj *taosHashInit(size_t capacity, _hash_fn_t fn, bool threadsafe) { +SHashObj *taosHashInit(size_t capacity, _hash_fn_t fn, bool update, SHashLockTypeE type) { if (capacity == 0 || fn == NULL) { return NULL; } @@ -180,151 +154,360 @@ SHashObj *taosHashInit(size_t capacity, _hash_fn_t fn, bool threadsafe) { assert((pHashObj->capacity & (pHashObj->capacity - 1)) == 0); pHashObj->hashFp = fn; + pHashObj->type = type; + pHashObj->enableUpdate = update; - pHashObj->hashList = (SHashNode **)calloc(pHashObj->capacity, POINTER_BYTES); + pHashObj->hashList = (SHashEntry **)calloc(pHashObj->capacity, sizeof(void *)); if (pHashObj->hashList == NULL) { free(pHashObj); uError("failed to allocate memory, reason:%s", strerror(errno)); return NULL; - } + } else { + pHashObj->pMemBlock = taosArrayInit(8, sizeof(void *)); - if (threadsafe) { -#if defined(LINUX) - pHashObj->lock = calloc(1, sizeof(pthread_rwlock_t)); -#else - pHashObj->lock = calloc(1, sizeof(pthread_mutex_t)); -#endif - } + void *p = calloc(pHashObj->capacity, sizeof(SHashEntry)); + for (int32_t i = 0; i < pHashObj->capacity; ++i) { + pHashObj->hashList[i] = (void *)((char *)p + i * sizeof(SHashEntry)); + } - if (__lock_init(pHashObj->lock) != 0) { - free(pHashObj->hashList); - free(pHashObj); - - uError("failed to init lock, reason:%s", strerror(errno)); - return NULL; + taosArrayPush(pHashObj->pMemBlock, &p); } return pHashObj; } -size_t taosHashGetSize(const SHashObj *pHashObj) { - if (pHashObj == NULL) { - return 0; - } - - return pHashObj->size; -} +size_t taosHashGetSize(const SHashObj *pHashObj) { return (pHashObj == NULL) ? 0 : pHashObj->size; } int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *data, size_t size) { - __wr_lock(pHashObj->lock); - - uint32_t hashVal = 0; - SHashNode *pNode = doGetNodeFromHashTable(pHashObj, key, (uint32_t)keyLen, &hashVal); - - if (pNode == NULL) { // no data in hash table with the specified key, add it into hash table - taosHashTableResize(pHashObj); - - SHashNode *pNewNode = doCreateHashNode(key, keyLen, data, size, hashVal); - if (pNewNode == NULL) { - __unlock(pHashObj->lock); - - return -1; - } - - doAddToHashTable(pHashObj, pNewNode); - } else { - SHashNode *pNewNode = doUpdateHashNode(pNode, key, keyLen, data, size); - if (pNewNode == NULL) { - __unlock(pHashObj->lock); - return -1; - } - - if (pNewNode->prev) { - pNewNode->prev->next = pNewNode; - } else { - int32_t slot = HASH_INDEX(pNewNode->hashVal, pHashObj->capacity); - - assert(pHashObj->hashList[slot] == pNode); - pHashObj->hashList[slot] = pNewNode; - } - - if (pNewNode->next) { - (pNewNode->next)->prev = pNewNode; - } + uint32_t hashVal = (*pHashObj->hashFp)(key, (uint32_t)keyLen); + SHashNode *pNewNode = doCreateHashNode(key, keyLen, data, size, hashVal); + if (pNewNode == NULL) { + return -1; } - __unlock(pHashObj->lock); - return 0; + // need the resize process, write lock applied + if (HASH_NEED_RESIZE(pHashObj)) { + __wr_lock(&pHashObj->lock, pHashObj->type); + taosHashTableResize(pHashObj); + __wr_unlock(&pHashObj->lock, pHashObj->type); + } + + __rd_lock(&pHashObj->lock, pHashObj->type); + + int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity); + SHashEntry *pe = pHashObj->hashList[slot]; + + if (pHashObj->type == HASH_ENTRY_LOCK) { + taosWLockLatch(&pe->latch); + } + + SHashNode *pNode = pe->next; + if (pe->num > 0) { + assert(pNode != NULL); + } else { + assert(pNode == NULL); + } + + while (pNode) { + if ((pNode->keyLen == keyLen) && (memcmp(pNode->key, key, keyLen) == 0)) { + assert(pNode->hashVal == hashVal); + break; + } + + pNode = pNode->next; + } + + if (pNode == NULL) { + // no data in hash table with the specified key, add it into hash table + pushfrontNodeInEntryList(pe, pNewNode); + + if (pe->num == 0) { + assert(pe->next == NULL); + } else { + assert(pe->next != NULL); + } + + if (pHashObj->type == HASH_ENTRY_LOCK) { + taosWUnLockLatch(&pe->latch); + } + + // enable resize + __rd_unlock(&pHashObj->lock, pHashObj->type); + atomic_add_fetch_64(&pHashObj->size, 1); + + return 0; + } else { + // not support the update operation, return error + if (pHashObj->enableUpdate) { + doUpdateHashNode(pNode, pNewNode); + } + + if (pHashObj->type == HASH_ENTRY_LOCK) { + taosWUnLockLatch(&pe->latch); + } + + // enable resize + __rd_unlock(&pHashObj->lock, pHashObj->type); + + DO_FREE_HASH_NODE(pNewNode); + return pHashObj->enableUpdate ? 0 : -1; + } } void *taosHashGet(SHashObj *pHashObj, const void *key, size_t keyLen) { - __rd_lock(pHashObj->lock); + return taosHashGetCB(pHashObj, key, keyLen, NULL); +} - uint32_t hashVal = 0; - SHashNode *pNode = doGetNodeFromHashTable(pHashObj, key, (int32_t)keyLen, &hashVal); - - __unlock(pHashObj->lock); - - if (pNode != NULL) { - assert(pNode->hashVal == hashVal); - - return pNode->data; - } else { +void *taosHashGetCB(SHashObj *pHashObj, const void *key, size_t keyLen, void (*fp)(void *)) { + if (pHashObj->size <= 0 || keyLen == 0 || key == NULL) { return NULL; } + + uint32_t hashVal = (*pHashObj->hashFp)(key, (uint32_t)keyLen); + + // only add the read lock to disable the resize process + __rd_lock(&pHashObj->lock, pHashObj->type); + + int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity); + SHashEntry *pe = pHashObj->hashList[slot]; + + // no data, return directly + if (atomic_load_32(&pe->num) == 0) { + + __rd_unlock(&pHashObj->lock, pHashObj->type); + return NULL; + } + + char *data = NULL; + + // lock entry + if (pHashObj->type == HASH_ENTRY_LOCK) { + taosRLockLatch(&pe->latch); + } + + if (pe->num > 0) { + assert(pe->next != NULL); + } else { + assert(pe->next == NULL); + } + + SHashNode *pNode = doSearchInEntryList(pe, key, keyLen, hashVal); + if (pNode != NULL) { + if (fp != NULL) { + fp(pNode->data); + } + + data = pNode->data; + } + + if (pHashObj->type == HASH_ENTRY_LOCK) { + taosRUnLockLatch(&pe->latch); + } + + __rd_unlock(&pHashObj->lock, pHashObj->type); + return data; +} + +int32_t taosHashRemove(SHashObj *pHashObj, const void *key, size_t keyLen) { + return taosHashRemoveWithData(pHashObj, key, keyLen, NULL, 0); +} + +int32_t taosHashRemoveWithData(SHashObj *pHashObj, const void *key, size_t keyLen, void *data, size_t dsize) { + if (pHashObj == NULL || pHashObj->size <= 0) { + return -1; + } + + uint32_t hashVal = (*pHashObj->hashFp)(key, (uint32_t)keyLen); + + // disable the resize process + __rd_lock(&pHashObj->lock, pHashObj->type); + + int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity); + SHashEntry *pe = pHashObj->hashList[slot]; + + // no data, return directly + if (pe->num == 0) { + assert(pe->next == NULL); + __rd_unlock(&pHashObj->lock, pHashObj->type); + return -1; + } + + if (pHashObj->type == HASH_ENTRY_LOCK) { + taosWLockLatch(&pe->latch); + } + + if (pe->num == 0) { + assert(pe->next == NULL); + } else { + assert(pe->next != NULL); + } + + // double check after locked + if (pe->num == 0) { + assert(pe->next == NULL); + taosWUnLockLatch(&pe->latch); + + __rd_unlock(&pHashObj->lock, pHashObj->type); + return -1; + } + + SHashNode *pNode = pe->next; + SHashNode *pRes = NULL; + + // remove it + if ((pNode->keyLen == keyLen) && (memcmp(pNode->key, key, keyLen) == 0)) { + pe->num -= 1; + pRes = pNode; + pe->next = pNode->next; + } else { + while (pNode->next != NULL) { + if (((pNode->next)->keyLen == keyLen) && (memcmp((pNode->next)->key, key, keyLen) == 0)) { + assert((pNode->next)->hashVal == hashVal); + break; + } + + pNode = pNode->next; + } + + + if (pNode->next != NULL) { + pe->num -= 1; + pRes = pNode->next; + pNode->next = pNode->next->next; + } + } + + if (pHashObj->type == HASH_ENTRY_LOCK) { + taosWUnLockLatch(&pe->latch); + } + + __rd_unlock(&pHashObj->lock, pHashObj->type); + + if (data != NULL && pRes != NULL) { + memcpy(data, pRes->data, dsize); + } + + if (pRes != NULL) { + atomic_sub_fetch_64(&pHashObj->size, 1); + FREE_HASH_NODE(pHashObj, pRes); + + if (pe->num == 0) { + assert(pe->next == NULL); + } else { + assert(pe->next != NULL); + } + + return 0; + } else { + + if (pe->num == 0) { + assert(pe->next == NULL); + } else { + assert(pe->next != NULL); + } + + return -1; + } } -void taosHashRemove(SHashObj *pHashObj, const void *key, size_t keyLen) { - __wr_lock(pHashObj->lock); - - uint32_t val = 0; - SHashNode *pNode = doGetNodeFromHashTable(pHashObj, key, (uint32_t)keyLen, &val); - if (pNode == NULL) { - __unlock(pHashObj->lock); - return; +int32_t taosHashCondTraverse(SHashObj *pHashObj, bool (*fp)(void *, void *), void *param) { + if (pHashObj == NULL || pHashObj->size == 0) { + return 0; } - SHashNode *pNext = pNode->next; - if (pNode->prev == NULL) { - int32_t slot = HASH_INDEX(val, pHashObj->capacity); - assert(pHashObj->hashList[slot] == pNode); - - pHashObj->hashList[slot] = pNext; - } else { - pNode->prev->next = pNext; - } - - if (pNext != NULL) { - pNext->prev = pNode->prev; + // disable the resize process + __rd_lock(&pHashObj->lock, pHashObj->type); + + int32_t numOfEntries = (int32_t)pHashObj->capacity; + for (int32_t i = 0; i < numOfEntries; ++i) { + SHashEntry *pEntry = pHashObj->hashList[i]; + if (pEntry->num == 0) { + continue; + } + + if (pHashObj->type == HASH_ENTRY_LOCK) { + taosWLockLatch(&pEntry->latch); + } + + // todo remove the first node + SHashNode *pNode = NULL; + while((pNode = pEntry->next) != NULL) { + if (fp && (!fp(param, pNode->data))) { + pEntry->num -= 1; + atomic_sub_fetch_64(&pHashObj->size, 1); + + pEntry->next = pNode->next; + + if (pEntry->num == 0) { + assert(pEntry->next == NULL); + } else { + assert(pEntry->next != NULL); + } + + FREE_HASH_NODE(pHashObj, pNode); + } else { + break; + } + } + + // handle the following node + if (pNode != NULL) { + assert(pNode == pEntry->next); + SHashNode *pNext = NULL; + + while ((pNext = pNode->next) != NULL) { + // not qualified, remove it + if (fp && (!fp(param, pNext->data))) { + pNode->next = pNext->next; + pEntry->num -= 1; + atomic_sub_fetch_64(&pHashObj->size, 1); + + if (pEntry->num == 0) { + assert(pEntry->next == NULL); + } else { + assert(pEntry->next != NULL); + } + + FREE_HASH_NODE(pHashObj, pNext); + } else { + pNode = pNext; + } + } + } + + if (pHashObj->type == HASH_ENTRY_LOCK) { + taosWUnLockLatch(&pEntry->latch); + } } - pHashObj->size--; - - pNode->next = NULL; - pNode->prev = NULL; - - taosTFree(pNode); - __unlock(pHashObj->lock); + __rd_unlock(&pHashObj->lock, pHashObj->type); + return 0; } void taosHashCleanup(SHashObj *pHashObj) { - if (pHashObj == NULL) return; + if (pHashObj == NULL) { + return; + } SHashNode *pNode, *pNext; - __wr_lock(pHashObj->lock); + __wr_lock(&pHashObj->lock, pHashObj->type); if (pHashObj->hashList) { for (int32_t i = 0; i < pHashObj->capacity; ++i) { - pNode = pHashObj->hashList[i]; + SHashEntry *pEntry = pHashObj->hashList[i]; + if (pEntry->num == 0) { + assert(pEntry->next == 0); + continue; + } + + pNode = pEntry->next; + assert(pNode != NULL); while (pNode) { pNext = pNode->next; - if (pHashObj->freeFp) { - pHashObj->freeFp(pNode->data); - } + FREE_HASH_NODE(pHashObj, pNode); - free(pNode); pNode = pNext; } } @@ -332,20 +515,19 @@ void taosHashCleanup(SHashObj *pHashObj) { free(pHashObj->hashList); } - __unlock(pHashObj->lock); - __lock_destroy(pHashObj->lock); + __wr_unlock(&pHashObj->lock, pHashObj->type); - taosTFree(pHashObj->lock); - memset(pHashObj, 0, sizeof(SHashObj)); - free(pHashObj); -} - -void taosHashSetFreecb(SHashObj *pHashObj, _hash_free_fn_t freeFp) { - if (pHashObj == NULL || freeFp == NULL) { - return; + // destroy mem block + size_t memBlock = taosArrayGetSize(pHashObj->pMemBlock); + for (int32_t i = 0; i < memBlock; ++i) { + void *p = taosArrayGetP(pHashObj->pMemBlock, i); + taosTFree(p); } - pHashObj->freeFp = freeFp; + taosArrayDestroy(pHashObj->pMemBlock); + + memset(pHashObj, 0, sizeof(SHashObj)); + free(pHashObj); } SHashMutableIterator *taosHashCreateIter(SHashObj *pHashObj) { @@ -355,6 +537,9 @@ SHashMutableIterator *taosHashCreateIter(SHashObj *pHashObj) { } pIter->pHashObj = pHashObj; + + // keep it in local variable, in case the resize operation expand the size + pIter->numOfEntries = pHashObj->capacity; return pIter; } @@ -369,28 +554,42 @@ bool taosHashIterNext(SHashMutableIterator *pIter) { } // check the first one - if (pIter->num == 0) { + if (pIter->numOfChecked == 0) { assert(pIter->pCur == NULL && pIter->pNext == NULL); while (1) { - SHashNode *pEntry = pIter->pHashObj->hashList[pIter->entryIndex]; - if (pEntry == NULL) { + SHashEntry *pEntry = pIter->pHashObj->hashList[pIter->entryIndex]; + if (pEntry->num == 0) { + assert(pEntry->next == NULL); + pIter->entryIndex++; continue; } - pIter->pCur = pEntry; + if (pIter->pHashObj->type == HASH_ENTRY_LOCK) { + taosRLockLatch(&pEntry->latch); + } + + pIter->pCur = pEntry->next; if (pIter->pCur->next) { pIter->pNext = pIter->pCur->next; + + if (pIter->pHashObj->type == HASH_ENTRY_LOCK) { + taosRUnLockLatch(&pEntry->latch); + } } else { + if (pIter->pHashObj->type == HASH_ENTRY_LOCK) { + taosRUnLockLatch(&pEntry->latch); + } + pIter->pNext = getNextHashNode(pIter); } break; } - pIter->num++; + pIter->numOfChecked++; return true; } else { assert(pIter->pCur != NULL); @@ -400,7 +599,7 @@ bool taosHashIterNext(SHashMutableIterator *pIter) { return false; } - pIter->num++; + pIter->numOfChecked++; if (pIter->pCur->next) { pIter->pNext = pIter->pCur->next; @@ -432,19 +631,9 @@ int32_t taosHashGetMaxOverflowLinkLength(const SHashObj *pHashObj) { int32_t num = 0; for (int32_t i = 0; i < pHashObj->size; ++i) { - SHashNode *pEntry = pHashObj->hashList[i]; - if (pEntry == NULL) { - continue; - } - - int32_t j = 0; - while(pEntry != NULL) { - pEntry = pEntry->next; - j++; - } - - if (num < j) { - num = j; + SHashEntry *pEntry = pHashObj->hashList[i]; + if (num < pEntry->num) { + num = pEntry->num; } } @@ -452,146 +641,166 @@ int32_t taosHashGetMaxOverflowLinkLength(const SHashObj *pHashObj) { } void taosHashTableResize(SHashObj *pHashObj) { - if (pHashObj->size < pHashObj->capacity * HASH_DEFAULT_LOAD_FACTOR) { + if (!HASH_NEED_RESIZE(pHashObj)) { return; } - + // double the original capacity SHashNode *pNode = NULL; SHashNode *pNext = NULL; - - int32_t newSize = (int32_t)(pHashObj->capacity) << 1u; + + int32_t newSize = (int32_t)(pHashObj->capacity << 1u); if (newSize > HASH_MAX_CAPACITY) { -// uDebug("current capacity:%d, maximum capacity:%d, no resize applied due to limitation is reached", -// pHashObj->capacity, HASH_MAX_CAPACITY); + // uDebug("current capacity:%d, maximum capacity:%d, no resize applied due to limitation is reached", + // pHashObj->capacity, HASH_MAX_CAPACITY); return; } - int32_t pointerSize = POINTER_BYTES; - void *pNewEntry = realloc(pHashObj->hashList, pointerSize * newSize); - if (pNewEntry == NULL) {// todo handle error -// uDebug("cache resize failed due to out of memory, capacity remain:%d", pHashObj->capacity); + int64_t st = taosGetTimestampUs(); + void *pNewEntryList = realloc(pHashObj->hashList, sizeof(void*) * newSize); + if (pNewEntryList == NULL) { // todo handle error + // uDebug("cache resize failed due to out of memory, capacity remain:%d", pHashObj->capacity); return; } - - pHashObj->hashList = pNewEntry; - memset(&pHashObj->hashList[pHashObj->capacity], 0, POINTER_BYTES * (newSize - pHashObj->capacity)); - + + pHashObj->hashList = pNewEntryList; + + size_t inc = newSize - pHashObj->capacity; + void * p = calloc(inc, sizeof(SHashEntry)); + + for (int32_t i = 0; i < inc; ++i) { + pHashObj->hashList[i + pHashObj->capacity] = (void *)((char *)p + i * sizeof(SHashEntry)); + } + + taosArrayPush(pHashObj->pMemBlock, &p); + pHashObj->capacity = newSize; - for (int32_t i = 0; i < pHashObj->capacity; ++i) { - pNode = pHashObj->hashList[i]; - if (pNode != NULL) { - assert(pNode->prev == NULL); + SHashEntry *pe = pHashObj->hashList[i]; + + if (pe->num == 0) { + assert(pe->next == NULL); + } else { + assert(pe->next != NULL); } - - while (pNode) { + + if (pe->num == 0) { + assert(pe->next == NULL); + continue; + } + + while ((pNode = pe->next) != NULL) { int32_t j = HASH_INDEX(pNode->hashVal, pHashObj->capacity); - if (j == i) { // this key locates in the same slot, no need to relocate it - pNode = pNode->next; - } else { - pNext = pNode->next; - - if (pNode->prev == NULL) { // first node of the overflow linked list - pHashObj->hashList[i] = pNext; + if (j != i) { + pe->num -= 1; + pe->next = pNode->next; + + if (pe->num == 0) { + assert(pe->next == NULL); } else { - pNode->prev->next = pNext; + assert(pe->next != NULL); } - - if (pNext != NULL) { - pNext->prev = pNode->prev; - } - - // clear pointer - pNode->next = NULL; - pNode->prev = NULL; - - // added into new slot - SHashNode *pNew = pHashObj->hashList[j]; - if (pNew != NULL) { - assert(pNew->prev == NULL); - pNew->prev = pNode; - } - - pNode->next = pNew; - pHashObj->hashList[j] = pNode; - - // continue - pNode = pNext; + + SHashEntry *pNewEntry = pHashObj->hashList[j]; + pushfrontNodeInEntryList(pNewEntry, pNode); + } else { + break; } } + + if (pNode != NULL) { + while ((pNext = pNode->next) != NULL) { + int32_t j = HASH_INDEX(pNext->hashVal, pHashObj->capacity); + if (j != i) { + pe->num -= 1; + + pNode->next = pNext->next; + pNext->next = NULL; + + // added into new slot + SHashEntry *pNewEntry = pHashObj->hashList[j]; + + if (pNewEntry->num == 0) { + assert(pNewEntry->next == NULL); + } else { + assert(pNewEntry->next != NULL); + } + + pushfrontNodeInEntryList(pNewEntry, pNext); + } else { + pNode = pNext; + } + } + + if (pe->num == 0) { + assert(pe->next == NULL); + } else { + assert(pe->next != NULL); + } + + } + } -// uDebug("hash table resize completed, new capacity:%d, load factor:%f, elapsed time:%fms", pHashObj->capacity, -// ((double)pHashObj->size) / pHashObj->capacity, (et - st) / 1000.0); + int64_t et = taosGetTimestampUs(); + + uDebug("hash table resize completed, new capacity:%"PRId64", load factor:%f, elapsed time:%fms", pHashObj->capacity, + ((double)pHashObj->size) / pHashObj->capacity, (et - st) / 1000.0); } SHashNode *doCreateHashNode(const void *key, size_t keyLen, const void *pData, size_t dsize, uint32_t hashVal) { - size_t totalSize = dsize + sizeof(SHashNode) + keyLen; - - SHashNode *pNewNode = calloc(1, totalSize); + SHashNode *pNewNode = calloc(1, sizeof(SHashNode)); + if (pNewNode == NULL) { uError("failed to allocate memory, reason:%s", strerror(errno)); return NULL; } - + + pNewNode->data = malloc(dsize + keyLen); memcpy(pNewNode->data, pData, dsize); - + pNewNode->key = pNewNode->data + dsize; memcpy(pNewNode->key, key, keyLen); + pNewNode->keyLen = (uint32_t)keyLen; - pNewNode->hashVal = hashVal; return pNewNode; } -SHashNode *doUpdateHashNode(SHashNode *pNode, const void *key, size_t keyLen, const void *pData, size_t dsize) { - size_t size = dsize + sizeof(SHashNode) + keyLen; - - SHashNode *pNewNode = (SHashNode *)realloc(pNode, size); - if (pNewNode == NULL) { - return NULL; - } - - memcpy(pNewNode->data, pData, dsize); - - pNewNode->key = pNewNode->data + dsize; - assert(memcmp(pNewNode->key, key, keyLen) == 0 && keyLen == pNewNode->keyLen); - - memcpy(pNewNode->key, key, keyLen); - return pNewNode; -} +void pushfrontNodeInEntryList(SHashEntry *pEntry, SHashNode *pNode) { + assert(pNode != NULL && pEntry != NULL); -void doAddToHashTable(SHashObj *pHashObj, SHashNode *pNode) { - assert(pNode != NULL); - - int32_t index = HASH_INDEX(pNode->hashVal, pHashObj->capacity); - - SHashNode* pEntry = pHashObj->hashList[index]; - if (pEntry != NULL) { - pEntry->prev = pNode; - - pNode->next = pEntry; - pNode->prev = NULL; - } - - pHashObj->hashList[index] = pNode; - pHashObj->size++; + pNode->next = pEntry->next; + pEntry->next = pNode; + + pEntry->num += 1; } SHashNode *getNextHashNode(SHashMutableIterator *pIter) { assert(pIter != NULL); - + pIter->entryIndex++; - while (pIter->entryIndex < pIter->pHashObj->capacity) { - SHashNode *pNode = pIter->pHashObj->hashList[pIter->entryIndex]; - if (pNode == NULL) { + SHashNode *p = NULL; + + while (pIter->entryIndex < pIter->numOfEntries) { + SHashEntry *pEntry = pIter->pHashObj->hashList[pIter->entryIndex]; + if (pEntry->num == 0) { pIter->entryIndex++; continue; } - - return pNode; + + if (pIter->pHashObj->type == HASH_ENTRY_LOCK) { + taosRLockLatch(&pEntry->latch); + } + + p = pEntry->next; + + if (pIter->pHashObj->type == HASH_ENTRY_LOCK) { + taosRUnLockLatch(&pEntry->latch); + } + + return p; } - + return NULL; } diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index 7eca3b2637..bd903c8c23 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -30,14 +30,6 @@ static FORCE_INLINE void __cache_wr_lock(SCacheObj *pCacheObj) { #endif } -static FORCE_INLINE void __cache_rd_lock(SCacheObj *pCacheObj) { -#if defined(LINUX) - pthread_rwlock_rdlock(&pCacheObj->lock); -#else - pthread_mutex_lock(&pCacheObj->lock); -#endif -} - static FORCE_INLINE void __cache_unlock(SCacheObj *pCacheObj) { #if defined(LINUX) pthread_rwlock_unlock(&pCacheObj->lock); @@ -62,13 +54,6 @@ static FORCE_INLINE void __cache_lock_destroy(SCacheObj *pCacheObj) { #endif } -#if 0 -static FORCE_INLINE void taosFreeNode(void *data) { - SCacheDataNode *pNode = *(SCacheDataNode **)data; - free(pNode); -} -#endif - /** * @param key key of object for hash, usually a null-terminated string * @param keyLen length of key @@ -88,13 +73,6 @@ static SCacheDataNode *taosCreateCacheNode(const char *key, size_t keyLen, const */ static void taosAddToTrash(SCacheObj *pCacheObj, SCacheDataNode *pNode); -/** - * remove node in trash can - * @param pCacheObj - * @param pElem - */ -static void taosRemoveFromTrashCan(SCacheObj *pCacheObj, STrashElem *pElem); - /** * remove nodes in trash with refCount == 0 in cache * @param pNode @@ -107,111 +85,54 @@ static void taosTrashCanEmpty(SCacheObj *pCacheObj, bool force); /** * release node * @param pCacheObj cache object - * @param pNode data node + * @param pNode data node */ static FORCE_INLINE void taosCacheReleaseNode(SCacheObj *pCacheObj, SCacheDataNode *pNode) { if (pNode->signature != (uint64_t)pNode) { uError("key:%s, %p data is invalid, or has been released", pNode->key, pNode); + assert(0); return; } - - taosHashRemove(pCacheObj->pHashTable, pNode->key, pNode->keySize); pCacheObj->totalSize -= pNode->size; - uDebug("cache:%s, key:%p, %p is destroyed from cache, totalNum:%d totalSize:%" PRId64 "bytes size:%dbytes", - pCacheObj->name, pNode->key, pNode->data, (int32_t)taosHashGetSize(pCacheObj->pHashTable), pCacheObj->totalSize, - pNode->size); + int32_t size = (int32_t)taosHashGetSize(pCacheObj->pHashTable); + assert(size > 0); + + uDebug("cache:%s, key:%p, %p is destroyed from cache, size:%dbytes, num:%d size:%" PRId64 "bytes", + pCacheObj->name, pNode->key, pNode->data, pNode->size, size - 1, pCacheObj->totalSize); + + if (pCacheObj->freeFp) { + pCacheObj->freeFp(pNode->data); + } - if (pCacheObj->freeFp) pCacheObj->freeFp(pNode->data); free(pNode); } -/** - * move the old node into trash - * @param pCacheObj - * @param pNode - */ -static FORCE_INLINE void taosCacheMoveToTrash(SCacheObj *pCacheObj, SCacheDataNode *pNode) { - taosHashRemove(pCacheObj->pHashTable, pNode->key, pNode->keySize); - taosAddToTrash(pCacheObj, pNode); +static FORCE_INLINE void doRemoveElemInTrashcan(SCacheObj* pCacheObj, STrashElem *pElem) { + if (pElem->pData->signature != (uint64_t) pElem->pData) { + uError("key:sig:0x%" PRIx64 " %p data has been released, ignore", pElem->pData->signature, pElem->pData); + return; + } + + pCacheObj->numOfElemsInTrash--; + if (pElem->prev) { + pElem->prev->next = pElem->next; + } else { // pnode is the header, update header + pCacheObj->pTrash = pElem->next; + } + + if (pElem->next) { + pElem->next->prev = pElem->prev; + } } -/** - * update data in cache - * @param pCacheObj - * @param pNode - * @param key - * @param keyLen - * @param pData - * @param dataSize - * @return - */ -static SCacheDataNode *taosUpdateCacheImpl(SCacheObj *pCacheObj, SCacheDataNode *pNode, const char *key, int32_t keyLen, - const void *pData, uint32_t dataSize, uint64_t duration) { - SCacheDataNode *pNewNode = NULL; - - // only a node is not referenced by any other object, in-place update it - if (T_REF_VAL_GET(pNode) == 0) { - size_t newSize = sizeof(SCacheDataNode) + dataSize + keyLen + 1; - - pNewNode = (SCacheDataNode *)realloc(pNode, newSize); - if (pNewNode == NULL) { - return NULL; - } - - memset(pNewNode, 0, newSize); - pNewNode->signature = (uint64_t)pNewNode; - memcpy(pNewNode->data, pData, dataSize); - - pNewNode->key = (char *)pNewNode + sizeof(SCacheDataNode) + dataSize; - pNewNode->keySize = keyLen; - memcpy(pNewNode->key, key, keyLen); - - // update the timestamp information for updated key/value - pNewNode->addedTime = taosGetTimestampMs(); - pNewNode->lifespan = duration; - - T_REF_INC(pNewNode); - - // the address of this node may be changed, so the prev and next element should update the corresponding pointer - taosHashPut(pCacheObj->pHashTable, key, keyLen, &pNewNode, sizeof(void *)); - } else { - taosCacheMoveToTrash(pCacheObj, pNode); - - pNewNode = taosCreateCacheNode(key, keyLen, pData, dataSize, duration); - if (pNewNode == NULL) { - return NULL; - } - - T_REF_INC(pNewNode); - - // addedTime new element to hashtable - taosHashPut(pCacheObj->pHashTable, key, keyLen, &pNewNode, sizeof(void *)); +static FORCE_INLINE void doDestroyTrashcanElem(SCacheObj* pCacheObj, STrashElem *pElem) { + if (pCacheObj->freeFp) { + pCacheObj->freeFp(pElem->pData->data); } - - return pNewNode; -} -/** - * addedTime data into hash table - * @param key - * @param pData - * @param size - * @param pCacheObj - * @param keyLen - * @param pNode - * @return - */ -static FORCE_INLINE SCacheDataNode *taosAddToCacheImpl(SCacheObj *pCacheObj, const char *key, size_t keyLen, const void *pData, - size_t dataSize, uint64_t duration) { - SCacheDataNode *pNode = taosCreateCacheNode(key, keyLen, pData, dataSize, duration); - if (pNode == NULL) { - return NULL; - } - - T_REF_INC(pNode); - taosHashPut(pCacheObj->pHashTable, key, keyLen, &pNode, sizeof(void *)); - return pNode; + free(pElem->pData); + free(pElem); } /** @@ -237,7 +158,7 @@ SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool ext return NULL; } - pCacheObj->pHashTable = taosHashInit(128, taosGetDefaultHashFunction(keyType), false); + pCacheObj->pHashTable = taosHashInit(4096, taosGetDefaultHashFunction(keyType), false, HASH_ENTRY_LOCK); pCacheObj->name = strdup(cacheName); if (pCacheObj->pHashTable == NULL) { free(pCacheObj); @@ -269,60 +190,87 @@ SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool ext } void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const void *pData, size_t dataSize, int duration) { - SCacheDataNode *pNode; - - if (pCacheObj == NULL || pCacheObj->pHashTable == NULL) { + if (pCacheObj == NULL || pCacheObj->pHashTable == NULL || pCacheObj->deleting == 1) { return NULL; } - __cache_wr_lock(pCacheObj); - SCacheDataNode **pt = (SCacheDataNode **)taosHashGet(pCacheObj->pHashTable, key, keyLen); - SCacheDataNode * pOld = (pt != NULL) ? (*pt) : NULL; - - if (pOld == NULL) { // do addedTime to cache - pNode = taosAddToCacheImpl(pCacheObj, key, keyLen, pData, dataSize, duration * 1000L); - if (NULL != pNode) { - pCacheObj->totalSize += pNode->size; - - uDebug("cache:%s, key:%p, %p added into cache, added:%" PRIu64 ", expire:%" PRIu64 ", totalNum:%d totalSize:%" PRId64 - "bytes size:%" PRId64 "bytes", - pCacheObj->name, key, pNode->data, pNode->addedTime, pNode->expireTime, - (int32_t)taosHashGetSize(pCacheObj->pHashTable), pCacheObj->totalSize, (int64_t)dataSize); - } else { - uError("cache:%s, key:%p, failed to added into cache, out of memory", pCacheObj->name, key); - } - } else { // old data exists, update the node - pNode = taosUpdateCacheImpl(pCacheObj, pOld, key, (int32_t)keyLen, pData, (uint32_t)dataSize, duration * 1000L); - uDebug("cache:%s, key:%p, %p exist in cache, updated old:%p", pCacheObj->name, key, pNode->data, pOld); + SCacheDataNode *pNode1 = taosCreateCacheNode(key, keyLen, pData, dataSize, duration); + if (pNode1 == NULL) { + uError("cache:%s, key:%p, failed to added into cache, out of memory", pCacheObj->name, key); + return NULL; } - __cache_unlock(pCacheObj); + T_REF_INC(pNode1); - return (pNode != NULL) ? pNode->data : NULL; + int32_t succ = taosHashPut(pCacheObj->pHashTable, key, keyLen, &pNode1, sizeof(void *)); + if (succ == 0) { + atomic_add_fetch_64(&pCacheObj->totalSize, pNode1->size); + uDebug("cache:%s, key:%p, %p added into cache, added:%" PRIu64 ", expire:%" PRIu64 + ", totalNum:%d totalSize:%" PRId64 "bytes size:%" PRId64 "bytes", + pCacheObj->name, key, pNode1->data, pNode1->addedTime, pNode1->expireTime, + (int32_t)taosHashGetSize(pCacheObj->pHashTable), pCacheObj->totalSize, (int64_t)dataSize); + } else { // duplicated key exists + while (1) { + SCacheDataNode* p = NULL; + int32_t ret = taosHashRemoveWithData(pCacheObj->pHashTable, key, keyLen, (void*) &p, sizeof(void*)); + + // add to trashcan + if (ret == 0) { + if (T_REF_VAL_GET(p) == 0) { + if (pCacheObj->freeFp) { + pCacheObj->freeFp(p->data); + } + + taosTFree(p); + } else { + taosAddToTrash(pCacheObj, p); + uDebug("cache:%s, key:%p, %p exist in cache, updated old:%p", pCacheObj->name, key, pNode1->data, p); + } + } + + assert(T_REF_VAL_GET(pNode1) == 1); + + ret = taosHashPut(pCacheObj->pHashTable, key, keyLen, &pNode1, sizeof(void *)); + if (ret == 0) { + atomic_add_fetch_64(&pCacheObj->totalSize, pNode1->size); + + uDebug("cache:%s, key:%p, %p added into cache, added:%" PRIu64 ", expire:%" PRIu64 + ", totalNum:%d totalSize:%" PRId64 "bytes size:%" PRId64 "bytes", + pCacheObj->name, key, pNode1->data, pNode1->addedTime, pNode1->expireTime, + (int32_t)taosHashGetSize(pCacheObj->pHashTable), pCacheObj->totalSize, (int64_t)dataSize); + + return pNode1->data; + + } else { + // failed, try again + } + } + } + + return pNode1->data; +} + +static void incRefFn(void* ptNode) { + assert(ptNode != NULL); + + SCacheDataNode** p = (SCacheDataNode**) ptNode; + assert(T_REF_VAL_GET(*p) >= 0); + + int32_t ret = T_REF_INC(*p); + assert(ret > 0); } void *taosCacheAcquireByKey(SCacheObj *pCacheObj, const void *key, size_t keyLen) { - if (pCacheObj == NULL || taosHashGetSize(pCacheObj->pHashTable) == 0) { + if (pCacheObj == NULL || taosHashGetSize(pCacheObj->pHashTable) == 0 || pCacheObj->deleting == 1) { return NULL; } - void *pData = NULL; - - __cache_rd_lock(pCacheObj); - - SCacheDataNode **ptNode = (SCacheDataNode **)taosHashGet(pCacheObj->pHashTable, key, keyLen); - - int32_t ref = 0; - if (ptNode != NULL) { - ref = T_REF_INC(*ptNode); - pData = (*ptNode)->data; - } - - __cache_unlock(pCacheObj); + SCacheDataNode **ptNode = (SCacheDataNode **)taosHashGetCB(pCacheObj->pHashTable, key, keyLen, incRefFn); + void* pData = (ptNode != NULL)? (*ptNode)->data:NULL; if (pData != NULL) { atomic_add_fetch_32(&pCacheObj->statistics.hitCount, 1); - uDebug("cache:%s, key:%p, %p is retrieved from cache, refcnt:%d", pCacheObj->name, key, pData, ref); + uDebug("cache:%s, key:%p, %p is retrieved from cache, refcnt:%d", pCacheObj->name, key, pData, T_REF_VAL_GET(*ptNode)); } else { atomic_add_fetch_32(&pCacheObj->statistics.missCount, 1); uDebug("cache:%s, key:%p, not in cache, retrieved failed", pCacheObj->name, key); @@ -332,34 +280,6 @@ void *taosCacheAcquireByKey(SCacheObj *pCacheObj, const void *key, size_t keyLen return pData; } -void* taosCacheUpdateExpireTimeByName(SCacheObj *pCacheObj, void *key, size_t keyLen, uint64_t expireTime) { - if (pCacheObj == NULL || taosHashGetSize(pCacheObj->pHashTable) == 0) { - return NULL; - } - - __cache_rd_lock(pCacheObj); - - SCacheDataNode **ptNode = (SCacheDataNode **)taosHashGet(pCacheObj->pHashTable, key, keyLen); - if (ptNode != NULL) { - T_REF_INC(*ptNode); - (*ptNode)->expireTime = expireTime; // taosGetTimestampMs() + (*ptNode)->lifespan; - } - - __cache_unlock(pCacheObj); - - if (ptNode != NULL) { - atomic_add_fetch_32(&pCacheObj->statistics.hitCount, 1); - uDebug("cache:%s, key:%p, %p expireTime is updated in cache, refcnt:%d", pCacheObj->name, key, - (*ptNode)->data, T_REF_VAL_GET(*ptNode)); - } else { - atomic_add_fetch_32(&pCacheObj->statistics.missCount, 1); - uDebug("cache:%s, key:%p, not in cache, retrieved failed", pCacheObj->name, key); - } - - atomic_add_fetch_32(&pCacheObj->statistics.totalAccess, 1); - return (ptNode != NULL) ? (*ptNode)->data : NULL; -} - void *taosCacheAcquireByData(SCacheObj *pCacheObj, void *data) { if (pCacheObj == NULL || data == NULL) return NULL; @@ -420,15 +340,16 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) { if (pCacheObj->extendLifespan && (!inTrashCan) && (!_remove)) { atomic_store_64(&pNode->expireTime, pNode->lifespan + taosGetTimestampMs()); - uDebug("cache:%s data:%p extend life time to %"PRId64 " before release", pCacheObj->name, pNode->data, pNode->expireTime); + uDebug("cache:%s data:%p extend expire time: %"PRId64, pCacheObj->name, pNode->data, pNode->expireTime); } if (_remove) { - __cache_wr_lock(pCacheObj); - // NOTE: once refcount is decrease, pNode may be freed by other thread immediately. - int32_t ref = T_REF_DEC(pNode); - uDebug("cache:%s, key:%p, %p is released, refcnt:%d", pCacheObj->name, pNode->key, pNode->data, ref); + char* key = pNode->key; + char* d = pNode->data; + + int32_t ref = T_REF_VAL_GET(pNode); + uDebug("cache:%s, key:%p, %p is released, refcnt:%d", pCacheObj->name, key, d, ref - 1); /* * If it is not referenced by other users, remove it immediately. Otherwise move this node to trashcan wait for all users @@ -437,50 +358,85 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) { * NOTE: previous ref is 0, and current ref is still 0, remove it. If previous is not 0, there is another thread * that tries to do the same thing. */ - if (pNode->inTrashCan) { + if (inTrashCan) { + ref = T_REF_DEC(pNode); + if (ref == 0) { assert(pNode->pTNodeHeader->pData == pNode); - taosRemoveFromTrashCan(pCacheObj, pNode->pTNodeHeader); + + __cache_wr_lock(pCacheObj); + doRemoveElemInTrashcan(pCacheObj, pNode->pTNodeHeader); + __cache_unlock(pCacheObj); + + doDestroyTrashcanElem(pCacheObj, pNode->pTNodeHeader); } } else { - if (ref > 0) { - assert(pNode->pTNodeHeader == NULL); - taosCacheMoveToTrash(pCacheObj, pNode); - } else { - taosCacheReleaseNode(pCacheObj, pNode); + // NOTE: remove it from hash in the first place, otherwise, the pNode may have been released by other thread + // when reaches here. + int32_t ret = taosHashRemove(pCacheObj->pHashTable, pNode->key, pNode->keySize); + ref = T_REF_DEC(pNode); + + // successfully remove from hash table, if failed, this node must have been move to trash already, do nothing. + // note that the remove operation can be executed only once. + if (ret == 0) { + if (ref > 0) { + assert(pNode->pTNodeHeader == NULL); + + __cache_wr_lock(pCacheObj); + taosAddToTrash(pCacheObj, pNode); + __cache_unlock(pCacheObj); + } else { // ref == 0 + atomic_sub_fetch_64(&pCacheObj->totalSize, pNode->size); + + int32_t size = (int32_t)taosHashGetSize(pCacheObj->pHashTable); + uDebug("cache:%s, key:%p, %p is destroyed from cache, size:%dbytes, num:%d size:%" PRId64 "bytes", + pCacheObj->name, pNode->key, pNode->data, pNode->size, size, pCacheObj->totalSize); + + if (pCacheObj->freeFp) { + pCacheObj->freeFp(pNode->data); + } + + free(pNode); + } } } - __cache_unlock(pCacheObj); - } else { // NOTE: once refcount is decrease, pNode may be freed by other thread immediately. - int32_t ref = T_REF_DEC(pNode); + char* key = pNode->key; + char* p = pNode->data; - uDebug("cache:%s, key:%p, %p released, refcnt:%d, data in trancan:%d", pCacheObj->name, pNode->key, pNode->data, - ref, inTrashCan); + int32_t ref = T_REF_DEC(pNode); + uDebug("cache:%s, key:%p, %p released, refcnt:%d, data in trancan:%d", pCacheObj->name, key, p, ref, inTrashCan); } } -void taosCacheEmpty(SCacheObj *pCacheObj) { - SHashMutableIterator *pIter = taosHashCreateIter(pCacheObj->pHashTable); - - __cache_wr_lock(pCacheObj); - while (taosHashIterNext(pIter)) { - if (pCacheObj->deleting == 1) { - break; - } - - SCacheDataNode *pNode = *(SCacheDataNode **) taosHashIterGet(pIter); - if (T_REF_VAL_GET(pNode) == 0) { - taosCacheReleaseNode(pCacheObj, pNode); - } else { - taosCacheMoveToTrash(pCacheObj, pNode); - } +typedef struct SHashTravSupp { + SCacheObj* pCacheObj; + int64_t time; + __cache_free_fn_t fp; +} SHashTravSupp; + +static bool travHashTableEmptyFn(void* param, void* data) { + SHashTravSupp* ps = (SHashTravSupp*) param; + SCacheObj* pCacheObj= ps->pCacheObj; + + SCacheDataNode *pNode = *(SCacheDataNode **) data; + + if (T_REF_VAL_GET(pNode) == 0) { + taosCacheReleaseNode(pCacheObj, pNode); + } else { // do add to trashcan + taosAddToTrash(pCacheObj, pNode); } - __cache_unlock(pCacheObj); - - taosHashDestroyIter(pIter); + + // this node should be remove from hash table + return false; +} + +void taosCacheEmpty(SCacheObj *pCacheObj) { + SHashTravSupp sup = {.pCacheObj = pCacheObj, .fp = NULL, .time = taosGetTimestampMs()}; + + taosHashCondTraverse(pCacheObj->pHashTable, travHashTableEmptyFn, &sup); taosTrashCanEmpty(pCacheObj, false); } @@ -496,8 +452,7 @@ void taosCacheCleanup(SCacheObj *pCacheObj) { doCleanupDataCache(pCacheObj); } -SCacheDataNode *taosCreateCacheNode(const char *key, size_t keyLen, const char *pData, size_t size, - uint64_t duration) { +SCacheDataNode *taosCreateCacheNode(const char *key, size_t keyLen, const char *pData, size_t size, uint64_t duration) { size_t totalSize = size + sizeof(SCacheDataNode) + keyLen; SCacheDataNode *pNewNode = calloc(1, totalSize); @@ -543,36 +498,10 @@ void taosAddToTrash(SCacheObj *pCacheObj, SCacheDataNode *pNode) { pNode->pTNodeHeader = pElem; pCacheObj->numOfElemsInTrash++; - uDebug("key:%p, %p move to trash, numOfElem in trash:%d", pNode->key, pNode->data, pCacheObj->numOfElemsInTrash); + uDebug("%s key:%p, %p move to trash, numOfElem in trash:%d", pCacheObj->name, pNode->key, pNode->data, + pCacheObj->numOfElemsInTrash); } -void taosRemoveFromTrashCan(SCacheObj *pCacheObj, STrashElem *pElem) { - if (pElem->pData->signature != (uint64_t)pElem->pData) { - uError("key:sig:0x%" PRIx64 " %p data has been released, ignore", pElem->pData->signature, pElem->pData); - return; - } - - pCacheObj->numOfElemsInTrash--; - if (pElem->prev) { - pElem->prev->next = pElem->next; - } else { /* pnode is the header, update header */ - pCacheObj->pTrash = pElem->next; - } - - if (pElem->next) { - pElem->next->prev = pElem->prev; - } - - pElem->pData->signature = 0; - if (pCacheObj->freeFp) { - pCacheObj->freeFp(pElem->pData->data); - } - - free(pElem->pData); - free(pElem); -} - -// TODO add another lock when scanning trashcan void taosTrashCanEmpty(SCacheObj *pCacheObj, bool force) { __cache_wr_lock(pCacheObj); @@ -580,8 +509,8 @@ void taosTrashCanEmpty(SCacheObj *pCacheObj, bool force) { if (pCacheObj->pTrash != NULL) { uError("key:inconsistency data in cache, numOfElem in trash:%d", pCacheObj->numOfElemsInTrash); } - pCacheObj->pTrash = NULL; + pCacheObj->pTrash = NULL; __cache_unlock(pCacheObj); return; } @@ -597,10 +526,12 @@ void taosTrashCanEmpty(SCacheObj *pCacheObj, bool force) { if (force || (T_REF_VAL_GET(pElem->pData) == 0)) { uDebug("key:%p, %p removed from trash. numOfElem in trash:%d", pElem->pData->key, pElem->pData->data, pCacheObj->numOfElemsInTrash - 1); - STrashElem *p = pElem; + STrashElem *p = pElem; pElem = pElem->next; - taosRemoveFromTrashCan(pCacheObj, p); + + doRemoveElemInTrashcan(pCacheObj, p); + doDestroyTrashcanElem(pCacheObj, p); } else { pElem = pElem->next; } @@ -610,27 +541,13 @@ void taosTrashCanEmpty(SCacheObj *pCacheObj, bool force) { } void doCleanupDataCache(SCacheObj *pCacheObj) { - __cache_wr_lock(pCacheObj); - - SHashMutableIterator *pIter = taosHashCreateIter(pCacheObj->pHashTable); - while (taosHashIterNext(pIter)) { - SCacheDataNode *pNode = *(SCacheDataNode **)taosHashIterGet(pIter); - - int32_t c = T_REF_VAL_GET(pNode); - if (c <= 0) { - taosCacheReleaseNode(pCacheObj, pNode); - } else { - uDebug("cache:%s key:%p, %p will not remove from cache, refcnt:%d", pCacheObj->name, pNode->key, - pNode->data, T_REF_VAL_GET(pNode)); - } - } - taosHashDestroyIter(pIter); + SHashTravSupp sup = {.pCacheObj = pCacheObj, .fp = NULL, .time = taosGetTimestampMs()}; + taosHashCondTraverse(pCacheObj->pHashTable, travHashTableEmptyFn, &sup); // todo memory leak if there are object with refcount greater than 0 in hash table? taosHashCleanup(pCacheObj->pHashTable); - __cache_unlock(pCacheObj); - taosTrashCanEmpty(pCacheObj, true); + __cache_lock_destroy(pCacheObj); taosTFree(pCacheObj->name); @@ -638,26 +555,31 @@ void doCleanupDataCache(SCacheObj *pCacheObj) { free(pCacheObj); } -static void doCacheRefresh(SCacheObj* pCacheObj, int64_t time, __cache_free_fn_t fp) { - SHashMutableIterator *pIter = taosHashCreateIter(pCacheObj->pHashTable); +bool travHashTableFn(void* param, void* data) { + SHashTravSupp* ps = (SHashTravSupp*) param; + SCacheObj* pCacheObj= ps->pCacheObj; - __cache_wr_lock(pCacheObj); - while (taosHashIterNext(pIter)) { - SCacheDataNode *pNode = *(SCacheDataNode **)taosHashIterGet(pIter); + SCacheDataNode* pNode = *(SCacheDataNode **) data; + if ((int64_t)pNode->expireTime < ps->time && T_REF_VAL_GET(pNode) <= 0) { + taosCacheReleaseNode(pCacheObj, pNode); - if (pNode->expireTime < (uint64_t)time && T_REF_VAL_GET(pNode) <= 0) { - taosCacheReleaseNode(pCacheObj, pNode); - continue; - } - - if (fp) { - fp(pNode->data); - } + // this node should be remove from hash table + return false; } - __cache_unlock(pCacheObj); + if (ps->fp) { + (ps->fp)(pNode->data); + } - taosHashDestroyIter(pIter); + // do not remove element in hash table + return true; +} + +static void doCacheRefresh(SCacheObj* pCacheObj, int64_t time, __cache_free_fn_t fp) { + assert(pCacheObj != NULL); + + SHashTravSupp sup = {.pCacheObj = pCacheObj, .fp = fp, .time = time}; + taosHashCondTraverse(pCacheObj->pHashTable, travHashTableFn, &sup); } void* taosCacheTimedRefresh(void *handle) { diff --git a/src/util/src/tconfig.c b/src/util/src/tconfig.c index ccf4ea7317..704af2017e 100644 --- a/src/util/src/tconfig.c +++ b/src/util/src/tconfig.c @@ -133,7 +133,7 @@ static void taosReadDirectoryConfig(SGlobalCfg *cfg, char *input_value) { } static void taosReadIpStrConfig(SGlobalCfg *cfg, char *input_value) { - uint32_t value = inet_addr(input_value); + uint32_t value = taosInetAddr(input_value); char * option = (char *)cfg->ptr; if (value == INADDR_NONE) { uError("config option:%s, input value:%s, is not a valid ip address, use default value:%s", diff --git a/src/util/src/tkvstore.c b/src/util/src/tkvstore.c index 9657d82773..9fab4a5936 100644 --- a/src/util/src/tkvstore.c +++ b/src/util/src/tkvstore.c @@ -14,6 +14,9 @@ */ #define _DEFAULT_SOURCE + +#define TAOS_RANDOM_FILE_FAIL_TEST + #include "os.h" #include "hash.h" #include "taoserror.h" @@ -21,7 +24,6 @@ #include "tcoding.h" #include "tkvstore.h" #include "tulog.h" -#define TAOS_RANDOM_FILE_FAIL_TEST #define TD_KVSTORE_HEADER_SIZE 512 #define TD_KVSTORE_MAJOR_VERSION 1 @@ -330,6 +332,31 @@ int tdKVStoreEndCommit(SKVStore *pStore) { return 0; } +void tsdbGetStoreInfo(char *fname, uint32_t *magic, int32_t *size) { + char buf[TD_KVSTORE_HEADER_SIZE] = "\0"; + SStoreInfo info = {0}; + + int fd = open(fname, O_RDONLY); + if (fd < 0) goto _err; + + if (taosTRead(fd, buf, TD_KVSTORE_HEADER_SIZE) < TD_KVSTORE_HEADER_SIZE) goto _err; + if (!taosCheckChecksumWhole((uint8_t *)buf, TD_KVSTORE_HEADER_SIZE)) goto _err; + + void *pBuf = (void *)buf; + pBuf = tdDecodeStoreInfo(pBuf, &info); + off_t offset = lseek(fd, 0, SEEK_END); + if (offset < 0) goto _err; + close(fd); + + *magic = info.magic; + *size = (int32_t)offset; + +_err: + if (fd >= 0) close(fd); + *magic = TD_KVSTORE_INIT_MAGIC; + *size = 0; +} + static int tdLoadKVStoreHeader(int fd, char *fname, SStoreInfo *pInfo, uint32_t *version) { char buf[TD_KVSTORE_HEADER_SIZE] = "\0"; @@ -433,7 +460,7 @@ static SKVStore *tdNewKVStore(char *fname, iterFunc iFunc, afterFunc aFunc, void pStore->iFunc = iFunc; pStore->aFunc = aFunc; pStore->appH = appH; - pStore->map = taosHashInit(4096, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false); + pStore->map = taosHashInit(4096, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false); if (pStore->map == NULL) { terrno = TSDB_CODE_COM_OUT_OF_MEMORY; goto _err; @@ -509,7 +536,7 @@ static int tdRestoreKVStore(SKVStore *pStore) { ssize_t tsize = taosTRead(pStore->fd, tbuf, sizeof(SKVRecord)); if (tsize == 0) break; if (tsize < sizeof(SKVRecord)) { - uError("failed to read %zu bytes from file %s at offset %" PRId64 "since %s", sizeof(SKVRecord), pStore->fname, + uError("failed to read %" PRIzu " bytes from file %s at offset %" PRId64 "since %s", sizeof(SKVRecord), pStore->fname, pStore->info.size, strerror(errno)); terrno = TAOS_SYSTEM_ERROR(errno); goto _err; diff --git a/src/util/src/version.c b/src/util/src/version.c index d12c2f339e..d541b1afd3 100644 --- a/src/util/src/version.c +++ b/src/util/src/version.c @@ -1,7 +1,7 @@ -char version[12] = "2.0.0.0"; +char version[12] = "2.0.0.6"; char compatible_version[12] = "2.0.0.0"; -char gitinfo[48] = "8df8b7d9930342dd34ba13df160a7d538fad7bc7"; -char gitinfoOfInternal[48] = "bad4f040145fba581d1ab0c5dd718a5ede3e209f"; -char buildinfo[64] = "Built by root at 2020-08-03 17:23"; +char gitinfo[48] = "e9a20fafbe9e3b0b12cbdf55604163b4b9a41b41"; +char gitinfoOfInternal[48] = "dd679db0b9edeedad68574c1e031544711a9831f"; +char buildinfo[64] = "Built by at 2020-08-12 07:59"; -void libtaos_2_0_0_0_Linux_x64() {}; +void libtaos_2_0_0_6_Linux_x64() {}; diff --git a/src/util/tests/CMakeLists.txt b/src/util/tests/CMakeLists.txt index 8687a8005d..09523cbfb4 100644 --- a/src/util/tests/CMakeLists.txt +++ b/src/util/tests/CMakeLists.txt @@ -10,6 +10,6 @@ IF (HEADER_GTEST_INCLUDE_DIR AND LIB_GTEST_STATIC_DIR) INCLUDE_DIRECTORIES(${HEADER_GTEST_INCLUDE_DIR}) AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) - ADD_EXECUTABLE(utilTest ${SOURCE_LIST}) + ADD_EXECUTABLE(utilTest ./cacheTest.cpp ./hashTest.cpp) TARGET_LINK_LIBRARIES(utilTest tutil common osdetail gtest pthread gcov) ENDIF() diff --git a/src/util/tests/hashTest.cpp b/src/util/tests/hashTest.cpp index f9f636db90..bc3fed74c4 100644 --- a/src/util/tests/hashTest.cpp +++ b/src/util/tests/hashTest.cpp @@ -10,7 +10,7 @@ namespace { // the simple test code for basic operations void simpleTest() { - auto* hashTable = (SHashObj*) taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false); + SHashObj* hashTable = (SHashObj*) taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_ENTRY_LOCK); ASSERT_EQ(taosHashGetSize(hashTable), 0); // put 400 elements in the hash table @@ -47,7 +47,7 @@ void simpleTest() { } void stringKeyTest() { - auto* hashTable = (SHashObj*) taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false); + auto* hashTable = (SHashObj*) taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_ENTRY_LOCK); ASSERT_EQ(taosHashGetSize(hashTable), 0); char key[128] = {0}; @@ -97,7 +97,7 @@ void functionTest() { * a single threads situation */ void noLockPerformanceTest() { - auto* hashTable = (SHashObj*) taosHashInit(4096, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false); + auto* hashTable = (SHashObj*) taosHashInit(4096, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_ENTRY_LOCK); ASSERT_EQ(taosHashGetSize(hashTable), 0); char key[128] = {0}; diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index 6abd255dc4..0a5e292f6d 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -50,7 +50,7 @@ int32_t vnodeInitResources() { vnodeInitWriteFp(); vnodeInitReadFp(); - tsDnodeVnodesHash = taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true); + tsDnodeVnodesHash = taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, true); if (tsDnodeVnodesHash == NULL) { vError("failed to init vnode list"); return TSDB_CODE_VND_OUT_OF_MEMORY; @@ -251,10 +251,17 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) { appH.cqCreateFunc = cqCreate; appH.cqDropFunc = cqDrop; sprintf(temp, "%s/tsdb", rootDir); + + terrno = 0; pVnode->tsdb = tsdbOpenRepo(temp, &appH); if (pVnode->tsdb == NULL) { vnodeCleanUp(pVnode); return terrno; + } else if (terrno != TSDB_CODE_SUCCESS && pVnode->syncCfg.replica <= 1) { + vError("vgId:%d, failed to open tsdb, replica:%d reason:%s", pVnode->vgId, pVnode->syncCfg.replica, + tstrerror(terrno)); + vnodeCleanUp(pVnode); + return terrno; } sprintf(temp, "%s/wal", rootDir); diff --git a/src/vnode/src/vnodeRead.c b/src/vnode/src/vnodeRead.c index 066770e1bb..c41b245794 100644 --- a/src/vnode/src/vnodeRead.c +++ b/src/vnode/src/vnodeRead.c @@ -14,7 +14,7 @@ */ #define _DEFAULT_SOURCE -#include +//#include #include "os.h" #include "tglobal.h" @@ -66,7 +66,7 @@ int32_t vnodeProcessRead(void *param, SReadMsg *pReadMsg) { return (*vnodeProcessReadMsgFp[msgType])(pVnode, pReadMsg); } -static void vnodePutItemIntoReadQueue(SVnodeObj *pVnode, void *qhandle) { +static void vnodePutItemIntoReadQueue(SVnodeObj *pVnode, void **qhandle) { SReadMsg *pRead = (SReadMsg *)taosAllocateQitem(sizeof(SReadMsg)); pRead->rpcMsg.msgType = TSDB_MSG_TYPE_QUERY; pRead->pCont = qhandle; @@ -74,26 +74,30 @@ static void vnodePutItemIntoReadQueue(SVnodeObj *pVnode, void *qhandle) { pRead->rpcMsg.handle = NULL; atomic_add_fetch_32(&pVnode->refCount, 1); + + vDebug("QInfo:%p add to vread queue for exec query, msg:%p", *qhandle, pRead); taosWriteQitem(pVnode->rqueue, TAOS_QTYPE_QUERY, pRead); } -static int32_t vnodeDumpQueryResult(SRspRet *pRet, void* pVnode, void* handle, bool* freeHandle) { +static int32_t vnodeDumpQueryResult(SRspRet *pRet, void* pVnode, void** handle, bool* freeHandle) { bool continueExec = false; int32_t code = TSDB_CODE_SUCCESS; - if ((code = qDumpRetrieveResult(handle, (SRetrieveTableRsp **)&pRet->rsp, &pRet->len, &continueExec)) == TSDB_CODE_SUCCESS) { + if ((code = qDumpRetrieveResult(*handle, (SRetrieveTableRsp **)&pRet->rsp, &pRet->len, &continueExec)) == TSDB_CODE_SUCCESS) { if (continueExec) { - vDebug("QInfo:%p add to query task queue for exec", handle); - vnodePutItemIntoReadQueue(pVnode, handle); - pRet->qhandle = handle; *freeHandle = false; + vnodePutItemIntoReadQueue(pVnode, handle); + pRet->qhandle = *handle; } else { - vDebug("QInfo:%p exec completed", handle); *freeHandle = true; + vDebug("QInfo:%p exec completed, free handle:%d", *handle, *freeHandle); } } else { - pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp)); - memset(pRet->rsp, 0, sizeof(SRetrieveTableRsp)); + SRetrieveTableRsp* pRsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp)); + memset(pRsp, 0, sizeof(SRetrieveTableRsp)); + pRsp->completed = true; + + pRet->rsp = pRsp; *freeHandle = true; } @@ -158,7 +162,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { // current connect is broken if (code == TSDB_CODE_SUCCESS) { handle = qRegisterQInfo(pVnode->qMgmt, (uint64_t) pQInfo); - if (handle == NULL) { // failed to register qhandle + if (handle == NULL) { // failed to register qhandle, todo add error test case vError("vgId:%d QInfo:%p register qhandle failed, return to app, code:%s", pVnode->vgId, (void *)pQInfo, tstrerror(pRsp->code)); pRsp->code = TSDB_CODE_QRY_INVALID_QHANDLE; @@ -179,41 +183,40 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { } if (handle != NULL) { - vDebug("vgId:%d, QInfo:%p, dnode query msg disposed, register qhandle and return to app", vgId, *handle); - - vnodePutItemIntoReadQueue(pVnode, *handle); - qReleaseQInfo(pVnode->qMgmt, (void**) &handle, false); + vDebug("vgId:%d, QInfo:%p, dnode query msg disposed, create qhandle and returns to app", vgId, *handle); + vnodePutItemIntoReadQueue(pVnode, handle); } - } else { assert(pCont != NULL); + void** qhandle = (void**) pCont; - handle = qAcquireQInfo(pVnode->qMgmt, (uint64_t) pCont); - if (handle == NULL) { - vWarn("QInfo:%p invalid qhandle in continuing exec query, conn:%p", (void*) pCont, pReadMsg->rpcMsg.handle); - code = TSDB_CODE_QRY_INVALID_QHANDLE; + vDebug("vgId:%d, QInfo:%p, dnode continues to exec query", pVnode->vgId, *qhandle); + + bool freehandle = false; + bool buildRes = qTableQuery(*qhandle); // do execute query + + // build query rsp, the retrieve request has reached here already + if (buildRes) { + // update the connection info according to the retrieve connection + pReadMsg->rpcMsg.handle = qGetResultRetrieveMsg(*qhandle); + assert(pReadMsg->rpcMsg.handle != NULL); + + vDebug("vgId:%d, QInfo:%p, start to build retrieval rsp after query paused, %p", pVnode->vgId, *qhandle, + pReadMsg->rpcMsg.handle); + + // set the real rsp error code + pReadMsg->rpcMsg.code = vnodeDumpQueryResult(&pReadMsg->rspRet, pVnode, qhandle, &freehandle); + + // NOTE: set return code to be TSDB_CODE_QRY_HAS_RSP to notify dnode to return msg to client + code = TSDB_CODE_QRY_HAS_RSP; } else { - vDebug("vgId:%d, QInfo:%p, dnode continue exec query", pVnode->vgId, (void*) pCont); + freehandle = qQueryCompleted(*qhandle); + } - bool freehandle = false; - bool buildRes = qTableQuery(*handle); // do execute query - - // build query rsp - if (buildRes) { - // update the connection info according to the retrieve connection - pReadMsg->rpcMsg.handle = qGetResultRetrieveMsg(*handle); - assert(pReadMsg->rpcMsg.handle != NULL); - - vDebug("vgId:%d, QInfo:%p, start to build result rsp after query paused, %p", pVnode->vgId, *handle, pReadMsg->rpcMsg.handle); - code = vnodeDumpQueryResult(&pReadMsg->rspRet, pVnode, *handle, &freehandle); - - // todo test the error code case - if (code == TSDB_CODE_SUCCESS) { - code = TSDB_CODE_QRY_HAS_RSP; - } - } - - qReleaseQInfo(pVnode->qMgmt, (void**) &handle, freehandle); + // NOTE: if the qhandle is not put into vread queue or query is completed, free the qhandle. + // If the building of result is not required, simply free it. Otherwise, mandatorily free the qhandle + if (freehandle || (!buildRes)) { + qReleaseQInfo(pVnode->qMgmt, (void **)&qhandle, freehandle); } } @@ -225,8 +228,8 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { SRspRet *pRet = &pReadMsg->rspRet; SRetrieveTableMsg *pRetrieve = pCont; - pRetrieve->qhandle = htobe64(pRetrieve->qhandle); pRetrieve->free = htons(pRetrieve->free); + pRetrieve->qhandle = htobe64(pRetrieve->qhandle); vDebug("vgId:%d, QInfo:%p, retrieve msg is disposed, free:%d, conn:%p", pVnode->vgId, (void*) pRetrieve->qhandle, pRetrieve->free, pReadMsg->rpcMsg.handle); @@ -236,18 +239,27 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { void** handle = qAcquireQInfo(pVnode->qMgmt, pRetrieve->qhandle); if (handle == NULL || (*handle) != (void*) pRetrieve->qhandle) { code = TSDB_CODE_QRY_INVALID_QHANDLE; - vDebug("vgId:%d, invalid qhandle in fetch result, QInfo:%p", pVnode->vgId, (void*) pRetrieve->qhandle); + vDebug("vgId:%d, invalid qhandle in retrieving result, QInfo:%p", pVnode->vgId, (void*) pRetrieve->qhandle); vnodeBuildNoResultQueryRsp(pRet); return code; } if (pRetrieve->free == 1) { - vDebug("vgId:%d, QInfo:%p, retrieve msg received to kill query and free qhandle", pVnode->vgId, *handle); + vWarn("vgId:%d, QInfo:%p, retrieve msg received to kill query and free qhandle", pVnode->vgId, *handle); qKillQuery(*handle); qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true); vnodeBuildNoResultQueryRsp(pRet); + code = TSDB_CODE_TSC_QUERY_CANCELLED; + return code; + } + + // register the qhandle to connect to quit query immediate if connection is broken + if (vnodeNotifyCurrentQhandle(pReadMsg->rpcMsg.handle, *handle, pVnode->vgId) != TSDB_CODE_SUCCESS) { + vError("vgId:%d, QInfo:%p, retrieve discarded since link is broken, %p", pVnode->vgId, *handle, pReadMsg->rpcMsg.handle); + code = TSDB_CODE_RPC_NETWORK_UNAVAIL; + qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true); return code; } @@ -259,16 +271,22 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { //TODO handle malloc failure pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp)); memset(pRet->rsp, 0, sizeof(SRetrieveTableRsp)); + freeHandle = true; } else { // result is not ready, return immediately if (!buildRes) { qReleaseQInfo(pVnode->qMgmt, (void**) &handle, false); return TSDB_CODE_QRY_NOT_READY; } - code = vnodeDumpQueryResult(pRet, pVnode, *handle, &freeHandle); + code = vnodeDumpQueryResult(pRet, pVnode, handle, &freeHandle); + } + + // If qhandle is not added into vread queue, the query should be completed already or paused with error. + // Here free qhandle immediately + if (freeHandle) { + qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true); } - qReleaseQInfo(pVnode->qMgmt, (void**) &handle, freeHandle); return code; } diff --git a/src/wal/src/walMain.c b/src/wal/src/walMain.c index 4ac8a096c6..bebad69f32 100644 --- a/src/wal/src/walMain.c +++ b/src/wal/src/walMain.c @@ -14,6 +14,9 @@ */ #define _DEFAULT_SOURCE + +#define TAOS_RANDOM_FILE_FAIL_TEST + #include "os.h" #include "tlog.h" #include "tchecksum.h" @@ -22,7 +25,6 @@ #include "taoserror.h" #include "twal.h" #include "tqueue.h" -#define TAOS_RANDOM_FILE_FAIL_TEST #define walPrefix "wal" diff --git a/src/wal/test/CMakeLists.txt b/src/wal/test/CMakeLists.txt index 6fdb03710e..6c232ce4b9 100644 --- a/src/wal/test/CMakeLists.txt +++ b/src/wal/test/CMakeLists.txt @@ -1,7 +1,7 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) -IF (TD_LINUX_64) +IF (TD_LINUX) INCLUDE_DIRECTORIES(../inc) LIST(APPEND WALTEST_SRC ./waltest.c) diff --git a/tests/comparisonTest/tdengine/CMakeLists.txt b/tests/comparisonTest/tdengine/CMakeLists.txt index 2555bdce91..990612b8c3 100644 --- a/tests/comparisonTest/tdengine/CMakeLists.txt +++ b/tests/comparisonTest/tdengine/CMakeLists.txt @@ -1,7 +1,7 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) -IF (TD_LINUX_64) +IF (TD_LINUX) add_executable(tdengineTest tdengineTest.c) target_link_libraries(tdengineTest taos_static tutil common pthread) ENDIF() diff --git a/tests/examples/JDBC/JDBCDemo/pom.xml b/tests/examples/JDBC/JDBCDemo/pom.xml index d015c62772..50313a0a0c 100644 --- a/tests/examples/JDBC/JDBCDemo/pom.xml +++ b/tests/examples/JDBC/JDBCDemo/pom.xml @@ -63,7 +63,7 @@ com.taosdata.jdbc taos-jdbcdriver - 1.0.1 + 2.0.2 diff --git a/tests/examples/JDBC/SpringJdbcTemplate/pom.xml b/tests/examples/JDBC/SpringJdbcTemplate/pom.xml index 45abc5354a..b796d52d28 100644 --- a/tests/examples/JDBC/SpringJdbcTemplate/pom.xml +++ b/tests/examples/JDBC/SpringJdbcTemplate/pom.xml @@ -41,7 +41,7 @@ com.taosdata.jdbc taos-jdbcdriver - 1.0.3 + 2.0.2 diff --git a/tests/examples/JDBC/springbootdemo/pom.xml b/tests/examples/JDBC/springbootdemo/pom.xml index 74522979c0..5f31d36d6e 100644 --- a/tests/examples/JDBC/springbootdemo/pom.xml +++ b/tests/examples/JDBC/springbootdemo/pom.xml @@ -63,7 +63,7 @@ com.taosdata.jdbc taos-jdbcdriver - 1.0.3 + 2.0.2 diff --git a/tests/examples/c/makefile b/tests/examples/c/makefile index f9653c9c96..7293a22c11 100644 --- a/tests/examples/c/makefile +++ b/tests/examples/c/makefile @@ -4,22 +4,24 @@ ROOT=./ TARGET=exe LFLAGS = '-Wl,-rpath,/usr/local/taos/driver/' -ltaos -lpthread -lm -lrt -CFLAGS = -O3 -g -Wall -Wno-deprecated -fPIC -Wno-unused-result -Wconversion -Wno-char-subscripts -D_REENTRANT -Wno-format -D_REENTRANT -DLINUX -msse4.2 -Wno-unused-function -D_M_X64 \ - -I/usr/local/taos/include -std=gnu99 +CFLAGS = -O3 -g -Wall -Wno-deprecated -fPIC -Wno-unused-result -Wconversion \ + -Wno-char-subscripts -D_REENTRANT -Wno-format -D_REENTRANT -DLINUX \ + -msse4.2 -Wno-unused-function -D_M_X64 -I/usr/local/taos/include -std=gnu99 all: $(TARGET) exe: - gcc $(CFLAGS) ./asyncdemo.c -o $(ROOT)/asyncdemo $(LFLAGS) - gcc $(CFLAGS) ./demo.c -o $(ROOT)/demo $(LFLAGS) - gcc $(CFLAGS) ./prepare.c -o $(ROOT)/prepare $(LFLAGS) - gcc $(CFLAGS) ./stream.c -o $(ROOT)/stream $(LFLAGS) + gcc $(CFLAGS) ./asyncdemo.c -o $(ROOT)asyncdemo $(LFLAGS) + gcc $(CFLAGS) ./demo.c -o $(ROOT)demo $(LFLAGS) + gcc $(CFLAGS) ./prepare.c -o $(ROOT)prepare $(LFLAGS) + gcc $(CFLAGS) ./stream.c -o $(ROOT)stream $(LFLAGS) gcc $(CFLAGS) ./subscribe.c -o $(ROOT)subscribe $(LFLAGS) gcc $(CFLAGS) ./apitest.c -o $(ROOT)apitest $(LFLAGS) clean: - rm $(ROOT)/asyncdemo - rm $(ROOT)/demo - rm $(ROOT)/prepare - rm $(ROOT)/stream - rm $(ROOT)/subscribe + rm $(ROOT)asyncdemo + rm $(ROOT)demo + rm $(ROOT)prepare + rm $(ROOT)stream + rm $(ROOT)subscribe + rm $(ROOT)apitest diff --git a/tests/examples/c/prepare.c b/tests/examples/c/prepare.c index 3b968aca07..7a70b744ee 100644 --- a/tests/examples/c/prepare.c +++ b/tests/examples/c/prepare.c @@ -195,10 +195,15 @@ int main(int argc, char *argv[]) taos_print_row(temp, row, fields, num_fields); printf("%s\n", temp); } + if (rows == 2) { + printf("two rows are fetched as expectation\n"); + } else { + printf("expect two rows, but %d rows are fetched\n", rows); + } taos_free_result(result); taos_stmt_close(stmt); - return getchar(); + return 0; } diff --git a/tests/examples/c/subscribe.c b/tests/examples/c/subscribe.c index db5ad34ee7..8368af18f7 100644 --- a/tests/examples/c/subscribe.c +++ b/tests/examples/c/subscribe.c @@ -56,7 +56,7 @@ void check_row_count(int line, TAOS_RES* res, int expected) { void do_query(TAOS* taos, const char* sql) { - TAOS_RES* res = taos_query(taos, "drop database if exists test;"); + TAOS_RES* res = taos_query(taos, sql); taos_free_result(res); } diff --git a/tests/perftest-scripts/influxdbTestWriteLoop.sh b/tests/perftest-scripts/influxdbTestWriteLoop.sh index b2c35791b4..89eabbd7ec 100755 --- a/tests/perftest-scripts/influxdbTestWriteLoop.sh +++ b/tests/perftest-scripts/influxdbTestWriteLoop.sh @@ -21,7 +21,6 @@ function runTest { done done - for r in ${!rowsPerRequest[@]}; do if [ "$r" == "1" ] || [ "$r" == "100" ] || [ "$r" == "1000" ]; then NUM_OF_FILES=$clients @@ -29,7 +28,6 @@ function runTest { NUM_OF_FILES=100 fi - printf "$r, " for c in `seq 1 $clients`; do totalRPR=0 OUTPUT_FILE=influxdbTestWrite-RPR${rowsPerRequest[$r]}-clients$c.out diff --git a/tests/pytest/client/client.py b/tests/pytest/client/client.py index 6fd2a2b6bd..b40511094b 100644 --- a/tests/pytest/client/client.py +++ b/tests/pytest/client/client.py @@ -27,13 +27,7 @@ class TDTestCase: ret = tdSql.query('select database()') tdSql.checkData(0, 0, "db") - - ret = tdSql.query('select server_version()') - tdSql.checkData(0, 0, "2.0.0.0") - - ret = tdSql.query('select client_version()') - tdSql.checkData(0, 0, "2.0.0.0") - + ret = tdSql.query('select server_status()') tdSql.checkData(0, 0, 1) diff --git a/tests/pytest/client/version.py b/tests/pytest/client/version.py new file mode 100644 index 0000000000..bc2e58f106 --- /dev/null +++ b/tests/pytest/client/version.py @@ -0,0 +1,41 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + ret = tdSql.query('select server_version()') + tdSql.checkData(0, 0, "2.0.0.6") + + ret = tdSql.query('select client_version()') + tdSql.checkData(0, 0, "2.0.0.6") + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index cbe82b2c8c..fd5aa4ecf0 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -158,6 +158,7 @@ python3 ./test.py -f alter/alter_table_crash.py # client python3 ./test.py -f client/client.py +python3 ./test.py -f client/version.py # Misc python3 testCompress.py @@ -182,3 +183,6 @@ python3 ./test.py -f functions/function_stddev.py python3 ./test.py -f functions/function_sum.py python3 ./test.py -f functions/function_top.py python3 ./test.py -f functions/function_twa.py + +# tools +python3 test.py -f tools/taosdemo.py diff --git a/tests/pytest/regressiontest.sh b/tests/pytest/regressiontest.sh index 61ec491f5d..b69ee37a55 100755 --- a/tests/pytest/regressiontest.sh +++ b/tests/pytest/regressiontest.sh @@ -152,6 +152,7 @@ python3 ./test.py -f alter/alter_table_crash.py # client python3 ./test.py -f client/client.py +python3 ./test.py -f client/version.py # Misc python3 testCompress.py @@ -176,3 +177,6 @@ python3 ./test.py -f functions/function_stddev.py python3 ./test.py -f functions/function_sum.py python3 ./test.py -f functions/function_top.py python3 ./test.py -f functions/function_twa.py + +# tools +python3 test.py -f tools/taosdemo.py \ No newline at end of file diff --git a/tests/pytest/stream/new.py b/tests/pytest/stream/new.py index b8503f0b4e..eac93dc0e6 100644 --- a/tests/pytest/stream/new.py +++ b/tests/pytest/stream/new.py @@ -54,7 +54,7 @@ class TDTestCase: tdSql.execute("insert into tb%d values(now + %ds, %d, %d)" % (i, j, j, j)) tdLog.info("=============== step5") - tdLog.sleep(30) + tdLog.sleep(40) tdSql.waitedQuery("select * from st order by ts desc", 1, 120) v = tdSql.getData(0, 3) if v <= 51: diff --git a/tests/pytest/tag_lite/too_many_tag_condtions.py b/tests/pytest/tag_lite/too_many_tag_condtions.py new file mode 100644 index 0000000000..a40de405ff --- /dev/null +++ b/tests/pytest/tag_lite/too_many_tag_condtions.py @@ -0,0 +1,70 @@ +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + tdLog.info('======================== dnode1 start') + tbPrefix = "ta_cr_tb" + mtPrefix = "ta_cr_mt" + tbNum = 2 + rowNum = 10 + totalNum = 200 + tagCondsLimit = 1024 + tdLog.info('=============== step1: create tbl and prepare data') + i = 0 + i = 2 + mt = "%s%d" % (mtPrefix, i) + tb = "%s%d" % (tbPrefix, i) + + sql ='create table %s (ts timestamp, tbcol int) TAGS(tgcol int)'% (mt) + tdLog.info(sql) + tdSql.execute(sql) + for i in range(0, tbNum): + tblName = "%s%d"%(tbPrefix, i) + sql = 'create table %s using %s TAGS(%d)'%(tblName, mt, i) + tdSql.execute(sql) + for j in range(0, rowNum): + sql = "insert into %s values(now, %d)"%(tblName, j) + tdSql.execute(sql) + + sqlPrefix = "select * from %s where "%(mt) + for i in range(2, 2048, 1): + conds = "tgcol=1 and "* (i - 1) + conds = "%stgcol=1"%(conds) + sql = "%s%s"%(sqlPrefix, conds) + if i >= tagCondsLimit: + tdSql.error(sql) + else: + tdSql.query(sql) + #tdSql.checkRows(1) + + for i in range(2, 2048, 1): + conds = "" + for j in range(0, i - 1): + conds = conds + "tgcol=%d or "%(j%tbNum) + conds += "tgcol=%d"%(i%tbNum) + sql = sqlPrefix + conds + if i >= tagCondsLimit: + tdSql.error(sql) + else: + tdSql.query(sql) + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemo.py b/tests/pytest/tools/taosdemo.py new file mode 100644 index 0000000000..54d33c90f3 --- /dev/null +++ b/tests/pytest/tools/taosdemo.py @@ -0,0 +1,44 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.numberOfTables = 10000 + self.numberOfRecords = 100 + + def run(self): + tdSql.prepare() + + os.system("yes | taosdemo -t %d -n %d" % (self.numberOfTables, self.numberOfRecords)) + + tdSql.execute("use test") + tdSql.query("select count(*) from meters") + tdSql.checkData(0, 0, self.numberOfTables * self.numberOfRecords) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/script/general/http/autocreate.sim b/tests/script/general/http/autocreate.sim index d6efbdbd65..6a005b028a 100644 --- a/tests/script/general/http/autocreate.sim +++ b/tests/script/general/http/autocreate.sim @@ -18,8 +18,8 @@ sql create table if not exists db.win_cpu(ts timestamp,f_percent_dpc_time double print =============== step2 - auto create -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'import into db.win_cpu_windows_1_processor using db.win_cpu tags('windows','1','Processor') values(1564641722000,0.000000,95.598305,0.000000,0.000000,0.000000,0.000000);' 127.0.0.1:6041/rest/sql -print curl 127.0.0.1:6041/rest/sql -----> $system_content +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'import into db.win_cpu_windows_1_processor using db.win_cpu tags('windows','1','Processor') values(1564641722000,0.000000,95.598305,0.000000,0.000000,0.000000,0.000000);' 127.0.0.1:7111/rest/sql +print curl 127.0.0.1:7111/rest/sql -----> $system_content #if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:51.022",11]],"rows":11}@ then # return -1 #endi diff --git a/tests/script/general/http/grafana.sim b/tests/script/general/http/grafana.sim index 74b1e5637a..cea804cfbb 100644 --- a/tests/script/general/http/grafana.sim +++ b/tests/script/general/http/grafana.sim @@ -52,50 +52,50 @@ sql insert into t3 values('2017-12-25 21:27:41', 3) print =============== step2 - login -system_content curl 127.0.0.1:6041/grafana/ +system_content curl 127.0.0.1:7111/grafana/ print 1-> $system_content if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then return -1 endi -system_content curl 127.0.0.1:6041/grafana/xx +system_content curl 127.0.0.1:7111/grafana/xx print 2-> $system_content if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then return -1 endi -system_content curl 127.0.0.1:6041/grafana/login/xx/xx/ +system_content curl 127.0.0.1:7111/grafana/login/xx/xx/ print 3-> $system_content if $system_content != @{"status":"error","code":849,"desc":"Invalid user"}@ then return -1 endi -system_content curl 127.0.0.1:6041/grafana/root/1/123/1/1/3 +system_content curl 127.0.0.1:7111/grafana/root/1/123/1/1/3 print 4-> $system_content if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:6041/grafana/login/1/root/1/ +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:7111/grafana/login/1/root/1/ print 5-> $system_content if $system_content != @{"status":"error","code":849,"desc":"Invalid user"}@ then return -1 endi -system_content curl -H 'Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' -d 'show databases' 127.0.0.1:6041/grafana/root/1/login +system_content curl -H 'Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' -d 'show databases' 127.0.0.1:7111/grafana/root/1/login print 6-> $system_content if $system_content != @{"status":"error","code":5010,"desc":"invalid type of Authorization"}@ then return -1 endi -system_content curl -H 'Authorization: eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' -d 'show databases' 127.0.0.1:6041/grafana/root/1/login +system_content curl -H 'Authorization: eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' -d 'show databases' 127.0.0.1:7111/grafana/root/1/login print 7-> $system_content if $system_content != @{"status":"error","code":5010,"desc":"invalid type of Authorization"}@ then return -1 endi sleep 3000 -system_content curl 127.0.0.1:6041/grafana/login/root/taosdata +system_content curl 127.0.0.1:7111/grafana/login/root/taosdata print 8-> $system_content if $system_content != @{"status":"succ","code":0,"desc":"/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04"}@ then return -1 @@ -103,7 +103,7 @@ endi print =============== step3 - heartbeat -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:6041/grafana/d1/table_gc +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:7111/grafana/d1/table_gc print 9-> $system_content if $system_content != @{"message":"Grafana server receive a quest from you!"}@ then return -1 @@ -111,19 +111,19 @@ endi print =============== step4 - search -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:6041/grafana/heartbeat +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:7111/grafana/heartbeat print 10-> $system_content if $system_content != @{"message":"Grafana server receive a quest from you!"}@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:6041/grafana/d1/table_invalid/search +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:7111/grafana/d1/table_invalid/search print 11-> $system_content if $system_content != @{"message":"Grafana server receive a quest from you!"}@ then return -1 endi -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' 127.0.0.1:6041/grafana/d1/m1/search +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' 127.0.0.1:7111/grafana/d1/m1/search print 12-> $system_content if $system_content != @{"message":"Grafana server receive a quest from you!"}@ then return -1 @@ -131,49 +131,49 @@ endi print =============== step5 - query -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"taosd","sql":"select first(v1) from d1.m1 where ts > 1514208523020 and ts < 1514208523030 interval(1m)"},{"refId":"B","alias":"system","sql":"select first(v2) from d1.m1 where ts > 1514208523020 and ts < 1514208523030 interval(1m)"}]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"taosd","sql":"select first(v1) from d1.m1 where ts > 1514208523020 and ts < 1514208523030 interval(1m)"},{"refId":"B","alias":"system","sql":"select first(v2) from d1.m1 where ts > 1514208523020 and ts < 1514208523030 interval(1m)"}]' 127.0.0.1:7111/grafana/query print 13-> $system_content if $system_content != @[{"refId":"A","target":"taosd","datapoints":[[2,1514208480000]]},{"refId":"B","target":"system","datapoints":[[5.10000,1514208480000]]}]@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"","sql":"select first(v1) from d1.m1 where ts > 1514208523020 and ts < 1514208523030 interval(1m)"},{"refId":"B","alias":"","sql":"select first(v2) from d1.m1 where ts > 1514208523020 and ts < 1514208523030 interval(1m)"}]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"","sql":"select first(v1) from d1.m1 where ts > 1514208523020 and ts < 1514208523030 interval(1m)"},{"refId":"B","alias":"","sql":"select first(v2) from d1.m1 where ts > 1514208523020 and ts < 1514208523030 interval(1m)"}]' 127.0.0.1:7111/grafana/query print 14-> $system_content if $system_content != @[{"refId":"A","target":"A","datapoints":[[2,1514208480000]]},{"refId":"B","target":"B","datapoints":[[5.10000,1514208480000]]}]@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"","sql":"select count(v1) from d1.m1"},{"refId":"B","alias":"","sql":"select count(v2) from d1.m1"}]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"","sql":"select count(v1) from d1.m1"},{"refId":"B","alias":"","sql":"select count(v2) from d1.m1"}]' 127.0.0.1:7111/grafana/query print 15-> $system_content if $system_content != @[{"refId":"A","target":"A","datapoints":[[3,"-"]]},{"refId":"B","target":"B","datapoints":[[3,"-"]]}]@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"","sql":"select count(v1) from d1.m1"},{"refId":"B","alias":"","sql":"select count(v2) from d1.m1"}]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"","sql":"select count(v1) from d1.m1"},{"refId":"B","alias":"","sql":"select count(v2) from d1.m1"}]' 127.0.0.1:7111/grafana/query print 15-> $system_content if $system_content != @[{"refId":"A","target":"A","datapoints":[[3,"-"]]},{"refId":"B","target":"B","datapoints":[[3,"-"]]}]@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"","sql":"select count(i) from d1.mt group by a"},{"refId":"B","alias":"","sql":"select sum(i) from d1.mt group by b"}]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"","sql":"select count(i) from d1.mt group by a"},{"refId":"B","alias":"","sql":"select sum(i) from d1.mt group by b"}]' 127.0.0.1:7111/grafana/query print 16-> $system_content if $system_content != @[{"refId":"A","target":"{a:1,}","datapoints":[[5,"-"]]},{"refId":"A","target":"{a:2,}","datapoints":[[4,"-"]]},{"refId":"A","target":"{a:3,}","datapoints":[[3,"-"]]},{"refId":"B","target":"{b:a}","datapoints":[[5,"-"]]},{"refId":"B","target":"{b:b}","datapoints":[[8,"-"]]},{"refId":"B","target":"{b:c}","datapoints":[[9,"-"]]}]@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"count","sql":"select count(i) from d1.mt group by a"},{"refId":"B","alias":"sum-","sql":"select sum(i) from d1.mt group by b"}]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"count","sql":"select count(i) from d1.mt group by a"},{"refId":"B","alias":"sum-","sql":"select sum(i) from d1.mt group by b"}]' 127.0.0.1:7111/grafana/query print 17-> $system_content if $system_content != @[{"refId":"A","target":"count{a:1,}","datapoints":[[5,"-"]]},{"refId":"A","target":"count{a:2,}","datapoints":[[4,"-"]]},{"refId":"A","target":"count{a:3,}","datapoints":[[3,"-"]]},{"refId":"B","target":"sum-{b:a}","datapoints":[[5,"-"]]},{"refId":"B","target":"sum-{b:b}","datapoints":[[8,"-"]]},{"refId":"B","target":"sum-{b:c}","datapoints":[[9,"-"]]}]@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"count","sql":"select count(i) from d1.mt interval(1m) group by a "}]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"count","sql":"select count(i) from d1.mt interval(1m) group by a "}]' 127.0.0.1:7111/grafana/query print 18-> $system_content if $system_content != @[{"refId":"A","target":"count{a:1,}","datapoints":[[1,1514208300000],[1,1514208360000],[1,1514208420000],[1,1514208480000],[1,1514208540000]]},{"refId":"A","target":"count{a:2,}","datapoints":[[1,1514208300000],[1,1514208360000],[1,1514208420000],[1,1514208480000]]},{"refId":"A","target":"count{a:3,}","datapoints":[[1,1514208300000],[1,1514208360000],[1,1514208420000]]}]@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"","sql":"select sum(v2), count(v1) from d1.m1"},{"refId":"B","alias":"","sql":"select count(v2), sum(v2) from d1.m1"}]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"","sql":"select sum(v2), count(v1) from d1.m1"},{"refId":"B","alias":"","sql":"select count(v2), sum(v2) from d1.m1"}]' 127.0.0.1:7111/grafana/query print 19-> $system_content if $system_content != @[{"refId":"A","target":"{count(v1):3}","datapoints":[[15.299999714,"-"]]},{"refId":"B","target":"{sum(v2):15.299999714}","datapoints":[[3,"-"]]}]@ then return -1 diff --git a/tests/script/general/http/grafana_bug.sim b/tests/script/general/http/grafana_bug.sim index 9c6a8a7000..43c52ba75f 100644 --- a/tests/script/general/http/grafana_bug.sim +++ b/tests/script/general/http/grafana_bug.sim @@ -33,7 +33,7 @@ sql insert into tb2 values('2020-01-04 00:00:00.000', 22, 214, 224) print =============== step1 - one query, 1 column, with timestamp -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"A","target":"A","datapoints":[[null,1577980800000],[null,1578067200000]]}]@ then return -1 @@ -41,7 +41,7 @@ endi print =============== step2 - one query, 2 column, with timestamp -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts,val from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts,val from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query print step2-> $system_content if $system_content != @[{"refId":"A","target":"A","datapoints":[[2,1577980800000],[2,1578067200000]]}]@ then return -1 @@ -49,80 +49,80 @@ endi print =============== step3 - one query, 3 column, with timestamp -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts,val,val1 from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts,val,val1 from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query print step3.1-> $system_content if $system_content != @[{"refId":"A","target":"{val1:13,}","datapoints":[[2,1577980800000]]},{"refId":"A","target":"{val1:14,}","datapoints":[[2,1578067200000]]}]@ then return -1 endi -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts,val,val1 from db.tb "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts,val,val1 from db.tb "} ]' 127.0.0.1:7111/grafana/query print step3.2-> $system_content if $system_content != @[{"refId":"A","target":"{val1:11,}","datapoints":[[1,1577808000000]]},{"refId":"A","target":"{val1:12,}","datapoints":[[1,1577894400000]]},{"refId":"A","target":"{val1:13,}","datapoints":[[2,1577980800000]]},{"refId":"A","target":"{val1:14,}","datapoints":[[2,1578067200000]]}]@ then return -1 endi -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts,val1,val from db.tb "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts,val1,val from db.tb "} ]' 127.0.0.1:7111/grafana/query print step3.3-> $system_content if $system_content != @[{"refId":"A","target":"{val:1,}","datapoints":[[11,1577808000000],[12,1577894400000]]},{"refId":"A","target":"{val:2,}","datapoints":[[13,1577980800000],[14,1578067200000]]}]@ then return -1 endi print =============== step4 - one query, 4 column, with timestamp -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts,val2,val1,val from db.tb "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts,val2,val1,val from db.tb "} ]' 127.0.0.1:7111/grafana/query print step4.1-> $system_content if $system_content != @[{"refId":"A","target":"{val1:11,, val:1,}","datapoints":[[21,1577808000000]]},{"refId":"A","target":"{val1:12,, val:1,}","datapoints":[[22,1577894400000]]},{"refId":"A","target":"{val1:13,, val:2,}","datapoints":[[23,1577980800000]]},{"refId":"A","target":"{val1:14,, val:2,}","datapoints":[[24,1578067200000]]}]@ then return -1 endi -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts,val,val1,val2 from db.tb "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts,val,val1,val2 from db.tb "} ]' 127.0.0.1:7111/grafana/query print step4.2-> $system_content if $system_content != @[{"refId":"A","target":"{val1:11,, val2:21,}","datapoints":[[1,1577808000000]]},{"refId":"A","target":"{val1:12,, val2:22,}","datapoints":[[1,1577894400000]]},{"refId":"A","target":"{val1:13,, val2:23,}","datapoints":[[2,1577980800000]]},{"refId":"A","target":"{val1:14,, val2:24,}","datapoints":[[2,1578067200000]]}]@ then return -1 endi print =============== step5 - one query, 1 column, no timestamp -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select val from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select val from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"A","target":"A","datapoints":[[2,"-"],[2,"-"]]}]@ then return -1 endi print =============== step6 - one query, 2 column, no timestamp -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select val1,val2 from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select val1,val2 from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"A","target":"{val2:23,}","datapoints":[[13,"-"]]},{"refId":"A","target":"{val2:24,}","datapoints":[[14,"-"]]}]@ then return -1 endi print =============== step7 - one query, 3 column, no timestamp -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select val1,val2,val from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select val1,val2,val from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"A","target":"{val2:23,, val:2,}","datapoints":[[13,"-"]]},{"refId":"A","target":"{val2:24,, val:2,}","datapoints":[[14,"-"]]}]@ then return -1 endi -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select val1,val2,val from db.tb"} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select val1,val2,val from db.tb"} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"A","target":"{val2:21,, val:1,}","datapoints":[[11,"-"]]},{"refId":"A","target":"{val2:22,, val:1,}","datapoints":[[12,"-"]]},{"refId":"A","target":"{val2:23,, val:2,}","datapoints":[[13,"-"]]},{"refId":"A","target":"{val2:24,, val:2,}","datapoints":[[14,"-"]]}]@ then return -1 endi print =============== step8 - one query, no return -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select val1,val2,val from db.tb where ts >= 1677980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select val1,val2,val from db.tb where ts >= 1677980800000 "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[]@ then return -1 endi print =============== step9 - one query, insert sql -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"insert into db.t2 values(now, 1) "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"insert into db.t2 values(now, 1) "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[]@ then return -1 endi print =============== step10 - one query, error sql -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select * from db.tt "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select * from db.tt "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[]@ then return -1 @@ -130,7 +130,7 @@ endi print =============== step11 - two query, 1 column, with timestamp, 1 column, with timestamp -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"","sql":"select ts from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"","sql":"select ts from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"B","target":"BB","datapoints":[[null,1577980800000],[null,1578067200000]]},{"refId":"A","target":"A","datapoints":[[null,1577980800000],[null,1578067200000]]}]@ then return -1 @@ -138,98 +138,98 @@ endi print =============== step12 - two query, 1 column, with timestamp, 2 column, with timestamp -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"","sql":"select ts,val from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"","sql":"select ts,val from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"B","target":"BB","datapoints":[[null,1577980800000],[null,1578067200000]]},{"refId":"A","target":"A","datapoints":[[2,1577980800000],[2,1578067200000]]}]@ then return -1 endi print =============== step13 - two query, 1 column, with timestamp, 3 column, with timestamp -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"","sql":"select ts,val,val1 from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"","sql":"select ts,val,val1 from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"B","target":"BB","datapoints":[[null,1577980800000],[null,1578067200000]]},{"refId":"A","target":"{val1:13,}","datapoints":[[2,1577980800000]]},{"refId":"A","target":"{val1:14,}","datapoints":[[2,1578067200000]]}]@ then return -1 endi print =============== step14 - two query, 2 column, with timestamp, 2 column, with timestamp -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts, val2 from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts,val from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts, val2 from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts,val from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"B","target":"BB","datapoints":[[223,1577980800000],[224,1578067200000]]},{"refId":"A","target":"AA","datapoints":[[2,1577980800000],[2,1578067200000]]}]@ then return -1 endi print =============== step15 - two query, 2 column, with timestamp, 3 column, with timestamp -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts, val2, val1 from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts,val from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts, val2, val1 from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts,val from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"B","target":"BB{val1:213,}","datapoints":[[223,1577980800000]]},{"refId":"B","target":"BB{val1:214,}","datapoints":[[224,1578067200000]]},{"refId":"A","target":"AA","datapoints":[[2,1577980800000],[2,1578067200000]]}]@ then return -1 endi print =============== step16 - two query, 3 column, with timestamp, 4 column, with timestamp -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts, val, val1, val2, val1 from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts,val,val1 from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts, val, val1, val2, val1 from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts,val,val1 from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"B","target":"BB{val1:213,, val2:223,, val1:213,}","datapoints":[[22,1577980800000]]},{"refId":"B","target":"BB{val1:214,, val2:224,, val1:214,}","datapoints":[[22,1578067200000]]},{"refId":"A","target":"AA{val1:13,}","datapoints":[[2,1577980800000]]},{"refId":"A","target":"AA{val1:14,}","datapoints":[[2,1578067200000]]}]@ then return -1 endi print =============== step17 - two query, 2 column, with timestamp, no return -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts, val from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts,val from db.tb where ts >= 1677980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts, val from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts,val from db.tb where ts >= 1677980800000 "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"B","target":"BB","datapoints":[[22,1577980800000],[22,1578067200000]]}]@ then return -1 endi print =============== step18 - two query, 2 column, with timestamp, invalid sql -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts, val from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts,val from db.tb222 where ts >= 1677980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts, val from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts,val from db.tb222 where ts >= 1677980800000 "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"B","target":"BB","datapoints":[[22,1577980800000],[22,1578067200000]]}]@ then return -1 endi print =============== step19 - two query, 2 column, with timestamp, insert sql -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts, val from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"insert into db.t2 values(now, 1)"} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts, val from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"insert into db.t2 values(now, 1)"} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"B","target":"BB","datapoints":[[22,1577980800000],[22,1578067200000]]}]@ then return -1 endi print =============== step20 - two query, 1 column, no timestamp, 1 column, with timestamp -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select val from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts from db.tb2 where ts >= 1577980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select val from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts from db.tb2 where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"B","target":"BB","datapoints":[[22,"-"],[22,"-"]]},{"refId":"A","target":"AA","datapoints":[[null,1577980800000],[null,1578067200000]]}]@ then return -1 endi print =============== step21 - two query, 1 column, no timestamp, 2 column, with timestamp -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select val from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts,val2 from db.tb2 where ts >= 1577980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select val from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts,val2 from db.tb2 where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"B","target":"BB","datapoints":[[22,"-"],[22,"-"]]},{"refId":"A","target":"AA","datapoints":[[223,1577980800000],[224,1578067200000]]}]@ then return -1 endi print =============== step22 - two query, 1 column, no timestamp, 3 column, with timestamp -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select val from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts,val1, val2 from db.tb2 where ts >= 1577980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select val from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts,val1, val2 from db.tb2 where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"B","target":"BB","datapoints":[[22,"-"],[22,"-"]]},{"refId":"A","target":"AA{val2:223,}","datapoints":[[213,1577980800000]]},{"refId":"A","target":"AA{val2:224,}","datapoints":[[214,1578067200000]]}]@ then return -1 endi print =============== step23 - two query, 2 column, no timestamp, 1 column, no timestamp -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select val1,val2 from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select val1 from db.tb2 where ts >= 1577980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select val1,val2 from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select val1 from db.tb2 where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"B","target":"BB{val2:223,}","datapoints":[[213,"-"]]},{"refId":"B","target":"BB{val2:224,}","datapoints":[[214,"-"]]},{"refId":"A","target":"AA","datapoints":[[213,"-"],[214,"-"]]}]@ then return -1 endi print =============== step24 - two query, 2 column, no timestamp, 2 column, no timestamp -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select val1,val2 from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select val,val1 from db.tb2 where ts >= 1577980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select val1,val2 from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select val,val1 from db.tb2 where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"B","target":"BB{val2:223,}","datapoints":[[213,"-"]]},{"refId":"B","target":"BB{val2:224,}","datapoints":[[214,"-"]]},{"refId":"A","target":"AA{val1:213,}","datapoints":[[22,"-"]]},{"refId":"A","target":"AA{val1:214,}","datapoints":[[22,"-"]]}]@ then return -1 endi print =============== step25 - two query, 2 column, no timestamp, 3 column, no timestamp -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select val1,val2 from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select val,val1,val2 from db.tb2 where ts >= 1577980800000 "} ]' 127.0.0.1:6041/grafana/query +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select val1,val2 from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select val,val1,val2 from db.tb2 where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query print step1-> $system_content if $system_content != @[{"refId":"B","target":"BB{val2:223,}","datapoints":[[213,"-"]]},{"refId":"B","target":"BB{val2:224,}","datapoints":[[214,"-"]]},{"refId":"A","target":"AA{val1:213,, val2:223,}","datapoints":[[22,"-"]]},{"refId":"A","target":"AA{val1:214,, val2:224,}","datapoints":[[22,"-"]]}]@ then return -1 diff --git a/tests/script/general/http/prepare.sim b/tests/script/general/http/prepare.sim index 1645c1a7e9..0bcb42ad41 100644 --- a/tests/script/general/http/prepare.sim +++ b/tests/script/general/http/prepare.sim @@ -34,8 +34,8 @@ sleep 4000 print =============== step2 - login -system_content curl 127.0.0.1:6041/rest/login/root/taosdata -print curl 127.0.0.1:6041/rest/login/root/taosdata -----> $system_content +system_content curl 127.0.0.1:7111/rest/login/root/taosdata +print curl 127.0.0.1:7111/rest/login/root/taosdata -----> $system_content if $system_content != @{"status":"succ","code":0,"desc":"/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04"}@ then return -1 @@ -45,8 +45,8 @@ return print =============== step3 - query data -system_content curl -H 'Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwia2V5IjoiYkVsOExjdkxVZDdhOHFkdE5abXRPTnJ5cDIwMW1hMDQiLCJzdWIiOiJyb290In0.k7CkgmpOJImIkLqZqzASlPmkdeEw7Wfk4XUrqGZX-LQ' -d 'select * from t1' 127.0.0.1:6041/rest/sql/d1 -print curl 127.0.0.1:6041/rest/sql/d1 -----> $system_content +system_content curl -H 'Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwia2V5IjoiYkVsOExjdkxVZDdhOHFkdE5abXRPTnJ5cDIwMW1hMDQiLCJzdWIiOiJyb290In0.k7CkgmpOJImIkLqZqzASlPmkdeEw7Wfk4XUrqGZX-LQ' -d 'select * from t1' 127.0.0.1:7111/rest/sql/d1 +print curl 127.0.0.1:7111/rest/sql/d1 -----> $system_content if $system_content != @{"status":"succ","head":["ts","i","b"],"data":[["2017-12-25 21:28:54.022",14,"44\\\\\"\"44"],["2017-12-25 21:28:53.022",13,"33\\\\\"33"],["2017-12-25 21:28:52.022",12,"22\\\\11"],["2017-12-25 21:28:51.022",11,"11\\\\11"],["2017-12-25 21:28:49.022",9,"99\\99"],["2017-12-25 21:28:48.022",8,"88\"\"88"],["2017-12-25 21:28:47.022",7,"77\"7\""],["2017-12-25 21:28:46.022",6,"66'6'"],["2017-12-25 21:28:45.022",5,"55'"],["2017-12-25 21:28:44.022",4,"44\""],["2017-12-25 21:28:43.022",3,"33"],["2017-12-25 21:28:42.022",2,"22"],["2017-12-25 21:28:41.022",1,"11"]],"rows":13}@ then return -1 endi diff --git a/tests/script/general/http/restful.sim b/tests/script/general/http/restful.sim index ff0e823c6d..7d1169ca27 100644 --- a/tests/script/general/http/restful.sim +++ b/tests/script/general/http/restful.sim @@ -30,8 +30,8 @@ sql insert into table_rest values('2017-12-25 21:28:50.022', 10) print =============== step2 - login -system_content curl 127.0.0.1:6041/rest/login/root/taosdata -print curl 127.0.0.1:6041/rest/login/root/taosdata -----> $system_content +system_content curl 127.0.0.1:7111/rest/login/root/taosdata +print curl 127.0.0.1:7111/rest/login/root/taosdata -----> $system_content if $system_content != {"status":"succ","code":0,"desc":"/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04"} then return -1 @@ -39,44 +39,44 @@ endi print =============== step3 - query data -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_rest' 127.0.0.1:6041/rest/sql -print curl 127.0.0.1:6041/rest/sql -----> $system_content +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_rest' 127.0.0.1:7111/rest/sql +print curl 127.0.0.1:7111/rest/sql -----> $system_content if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10]],"rows":10}@ then return -1 endi print =============== step4 - insert data -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.table_rest values('2017-12-25 21:28:51.022', 11)" 127.0.0.1:6041/rest/sql -print curl 127.0.0.1:6041/rest/sql -----> $system_content +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.table_rest values('2017-12-25 21:28:51.022', 11)" 127.0.0.1:7111/rest/sql +print curl 127.0.0.1:7111/rest/sql -----> $system_content if $system_content != @{"status":"succ","head":["affected_rows"],"data":[[1]],"rows":1}@ then return -1 endi print =============== step5 - query data -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_rest' 127.0.0.1:6041/rest/sql -print curl 127.0.0.1:6041/rest/sql -----> $system_content +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_rest' 127.0.0.1:7111/rest/sql +print curl 127.0.0.1:7111/rest/sql -----> $system_content if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:51.022",11]],"rows":11}@ then return -1 endi print =============== step6 - query no db data -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show dnodes' 127.0.0.1:6041/rest/sql -print curl 127.0.0.1:6041/rest/sql -----> $system_content +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show dnodes' 127.0.0.1:7111/rest/sql +print curl 127.0.0.1:7111/rest/sql -----> $system_content print =============== step7 - change password sql create user u1 PASS 'abcd@1234' sql create user u2 PASS 'abcd_1234' -system_content curl 127.0.0.1:6041/rest/login/u1/abcd@1234 -print curl 127.0.0.1:6041/rest/login/u1/abcd@1234 -----> $system_content +system_content curl 127.0.0.1:7111/rest/login/u1/abcd@1234 +print curl 127.0.0.1:7111/rest/login/u1/abcd@1234 -----> $system_content if $system_content != @{"status":"succ","code":0,"desc":"jIlItaLFFIPa8qdtNZmtONryp201ma04SXX8PEJowKAB/46k1gwnPNryp201ma04"}@ then return -1 endi -system_content curl 127.0.0.1:6041/rest/login/u2/aabcd_1234 -print curl 127.0.0.1:6041/rest/login/u2/abcd_1234 -----> $system_content +system_content curl 127.0.0.1:7111/rest/login/u2/aabcd_1234 +print curl 127.0.0.1:7111/rest/login/u2/abcd_1234 -----> $system_content if $system_content != @{"status":"error","code":3,"desc":"Authentication failure"}@ then return -1 endi diff --git a/tests/script/general/http/restful_full.sim b/tests/script/general/http/restful_full.sim index 0b3cfaa160..88e7dece4c 100644 --- a/tests/script/general/http/restful_full.sim +++ b/tests/script/general/http/restful_full.sim @@ -12,57 +12,57 @@ print ============================ dnode1 start print =============== step1 - login -system_content curl 127.0.0.1:6041/rest/ +system_content curl 127.0.0.1:7111/rest/ print 1-> $system_content if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then return -1 endi -system_content curl 127.0.0.1:6041/rest/xx +system_content curl 127.0.0.1:7111/rest/xx print 2-> $system_content if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then return -1 endi -system_content curl 127.0.0.1:6041/rest/login +system_content curl 127.0.0.1:7111/rest/login print 3-> $system_content if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then return -1 endi #4 -system_content curl 127.0.0.1:6041/rest/login/root +system_content curl 127.0.0.1:7111/rest/login/root print 4-> $system_content if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then return -1 endi -system_content curl 127.0.0.1:6041/rest/login/root/123 +system_content curl 127.0.0.1:7111/rest/login/root/123 print 5-> $system_content if $system_content != @{"status":"error","code":3,"desc":"Authentication failure"}@ then return -1 endi -system_content curl 127.0.0.1:6041/rest/login/root/123/1/1/3 +system_content curl 127.0.0.1:7111/rest/login/root/123/1/1/3 print 6-> $system_content if $system_content != @{"status":"error","code":3,"desc":"Authentication failure"}@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:6041/rest/login/root/1 +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:7111/rest/login/root/1 print 7-> $system_content if $system_content != @{"status":"error","code":3,"desc":"Authentication failure"}@ then return -1 endi #8 -system_content curl -H 'Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' -d 'show databases' 127.0.0.1:6041/rest/login/root/1 +system_content curl -H 'Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' -d 'show databases' 127.0.0.1:7111/rest/login/root/1 print 8-> $system_content if $system_content != @{"status":"error","code":5010,"desc":"invalid type of Authorization"}@ then return -1 endi -system_content curl -H 'Authorization: eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' -d 'show databases' 127.0.0.1:6041/rest/login/root/1 +system_content curl -H 'Authorization: eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' -d 'show databases' 127.0.0.1:7111/rest/login/root/1 print 9-> $system_content if $system_content != @{"status":"error","code":5010,"desc":"invalid type of Authorization"}@ then return -1 @@ -70,7 +70,7 @@ endi sleep 3000 -system_content curl 127.0.0.1:6041/rest/login/root/taosdata/ +system_content curl 127.0.0.1:7111/rest/login/root/taosdata/ print 10-> $system_content if $system_content != @{"status":"succ","code":0,"desc":"/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04"}@ then return -1 @@ -79,52 +79,52 @@ endi print =============== step2 - no db #11 -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:7111/rest/sql print 11-> $system_content if $system_content != @{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep1,keep2,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","precision","status"],"data":[],"rows":0}@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'create database d1' 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'create database d1' 127.0.0.1:7111/rest/sql print 12-> $system_content if $system_content != @{"status":"succ","head":["affected_rows"],"data":[[0]],"rows":0}@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'create database d1' 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'create database d1' 127.0.0.1:7111/rest/sql print 13-> $system_content if $system_content != @{"status":"error","code":897,"desc":"Database already exists"}@ then return -1 endi #14 -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '' 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '' 127.0.0.1:7111/rest/sql print 14-> $system_content if $system_content != @{"status":"error","code":5012,"desc":"no sql input"}@ then return -1 endi -#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'use d1' 127.0.0.1:6041/rest/sql +#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'use d1' 127.0.0.1:7111/rest/sql #print 15-> $system_content #if $system_content != @{"status":"error","code":5017,"desc":"no need to execute use db cmd"}@ then #if $system_content != @{"status":"succ","head":["affected_rows"],"data":[[1]],"rows":1}@ then # return -1 #endi -#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' use d1' 127.0.0.1:6041/rest/sql +#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' use d1' 127.0.0.1:7111/rest/sql #print 16-> $system_content #if $system_content != @{"status":"error","code":5017,"desc":"no need to execute use db cmd"}@ then # return -1 #endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' used1' 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' used1' 127.0.0.1:7111/rest/sql print 17-> $system_content if $system_content != @{"status":"error","code":512,"desc":"invalid SQL: invalid SQL: syntax error near 'used1'"}@ then return -1 endi #18 -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' show tables;' 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' show tables;' 127.0.0.1:7111/rest/sql print 18-> $system_content if $system_content != @{"status":"error","code":896,"desc":"Database not specified or available"}@ then return -1 @@ -133,44 +133,44 @@ endi print =============== step3 - db #19 -#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' show tables;' 127.0.0.1:6041/rest/sql/d4 +#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' show tables;' 127.0.0.1:7111/rest/sql/d4 #print 19-> $system_content #if $system_content != @{"status":"error","code":1000,"desc":"invalid DB"}@ then # return -1 #endi -#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' show tables;' 127.0.0.1:6041/rest/sql/d1 +#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' show tables;' 127.0.0.1:7111/rest/sql/d1 #print 20-> $system_content #if $system_content != @{"status":"succ","head":["name","created time","columns","metric"],"data":[],"rows":0}@ then # return -1 #endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' select * from d1.t1;' 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' select * from d1.t1;' 127.0.0.1:7111/rest/sql print 21-> $system_content if $system_content != @{"status":"error","code":866,"desc":"Table does not exist"}@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' create table d1.t1 (ts timestamp, speed int)' 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' create table d1.t1 (ts timestamp, speed int)' 127.0.0.1:7111/rest/sql print 22-> $system_content if $system_content != @{"status":"succ","head":["affected_rows"],"data":[[0]],"rows":0}@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' select * from d1.t1 ' 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' select * from d1.t1 ' 127.0.0.1:7111/rest/sql print 23-> $system_content if $system_content != @{"status":"succ","head":["ts","speed"],"data":[],"rows":0}@ then return -1 endi #24 -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:41.022', 1)" 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:41.022', 1)" 127.0.0.1:7111/rest/sql print 24-> $system_content if $system_content != @{"status":"succ","head":["affected_rows"],"data":[[1]],"rows":1}@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' select * from d1.t1 ' 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' select * from d1.t1 ' 127.0.0.1:7111/rest/sql print 25-> $system_content if $system_content != @{"status":"succ","head":["ts","speed"],"data":[["2017-12-25 21:28:41.022",1]],"rows":1}@ then return -1 @@ -178,53 +178,53 @@ endi #26 print 25-> no print -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:42.022', 2)" 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:42.022', 2)" 127.0.0.1:7111/rest/sql -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:43.022', 3)" 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:43.022', 3)" 127.0.0.1:7111/rest/sql -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:44.022', 4)" 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:44.022', 4)" 127.0.0.1:7111/rest/sql -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:45.022', 5)" 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:45.022', 5)" 127.0.0.1:7111/rest/sql -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:46.022', 6)" 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:46.022', 6)" 127.0.0.1:7111/rest/sql -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:47.022', 7)" 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:47.022', 7)" 127.0.0.1:7111/rest/sql -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:48.022', 8)" 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:48.022', 8)" 127.0.0.1:7111/rest/sql -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:49.022', 9)" 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:49.022', 9)" 127.0.0.1:7111/rest/sql -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:50.022', 10)" 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:50.022', 10)" 127.0.0.1:7111/rest/sql -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:51.022', 11)" 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:51.022', 11)" 127.0.0.1:7111/rest/sql #27 -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' select * from d1.t1 ' 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' select * from d1.t1 ' 127.0.0.1:7111/rest/sql print 27-> $system_content if $system_content != @{"status":"succ","head":["ts","speed"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:51.022",11]],"rows":11}@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'create database d2' 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'create database d2' 127.0.0.1:7111/rest/sql print 28-> $system_content if $system_content != @{"status":"succ","head":["affected_rows"],"data":[[0]],"rows":0}@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' create table d2.t1 (ts timestamp, speed int)' 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' create table d2.t1 (ts timestamp, speed int)' 127.0.0.1:7111/rest/sql print 29-> $system_content if $system_content != @{"status":"succ","head":["affected_rows"],"data":[[0]],"rows":0}@ then return -1 endi #30 -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d2.t1 values('2017-12-25 21:28:41.022', 1)" 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d2.t1 values('2017-12-25 21:28:41.022', 1)" 127.0.0.1:7111/rest/sql print 30-> $system_content if $system_content != @{"status":"succ","head":["affected_rows"],"data":[[1]],"rows":1}@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' select * from d2.t1 ' 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' select * from d2.t1 ' 127.0.0.1:7111/rest/sql print 31-> $system_content if $system_content != @{"status":"succ","head":["ts","speed"],"data":[["2017-12-25 21:28:41.022",1]],"rows":1}@ then return -1 diff --git a/tests/script/general/http/restful_insert.sim b/tests/script/general/http/restful_insert.sim index ecdaef987e..f230f98723 100644 --- a/tests/script/general/http/restful_insert.sim +++ b/tests/script/general/http/restful_insert.sim @@ -28,8 +28,8 @@ sql create table d1.table_rest9 (ts timestamp, i int) print =============== step2 - login -system_content curl 127.0.0.1:6041/rest/login/root/taosdata -print curl 127.0.0.1:6041/rest/login/root/taosdata -----> $system_content +system_content curl 127.0.0.1:7111/rest/login/root/taosdata +print curl 127.0.0.1:7111/rest/login/root/taosdata -----> $system_content if $system_content != {"status":"succ","code":0,"desc":"/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04"} then return -1 @@ -37,17 +37,17 @@ endi print =============== step3 - query data -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:6041/rest/sql -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:6041/rest/sql -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:6041/rest/sql -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:6041/rest/sql -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:6041/rest/sql -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:6041/rest/sql -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:6041/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:7111/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:7111/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:7111/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:7111/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:7111/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:7111/rest/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:7111/rest/sql print =============== step5 - query data -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_rest1' 127.0.0.1:6041/rest/sql -print curl 127.0.0.1:6041/rest/sql -----> $system_content +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_rest1' 127.0.0.1:7111/rest/sql +print curl 127.0.0.1:7111/rest/sql -----> $system_content system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/general/http/restful_limit.sim b/tests/script/general/http/restful_limit.sim index a88bdec912..7d2b6e9a02 100644 --- a/tests/script/general/http/restful_limit.sim +++ b/tests/script/general/http/restful_limit.sim @@ -37,10 +37,10 @@ while $i < 2 $i = $i + 1 endw -system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from db0.st0 limit 100' 127.0.0.1:6041/rest/sql -print curl 127.0.0.1:6041/rest/sql -----> $system_content +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from db0.st0 limit 100' 127.0.0.1:7111/rest/sql +print curl 127.0.0.1:7111/rest/sql -----> $system_content -#system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d "select * from db0.st0 where tbname in ('tb0', 'tb1') limit 1000" 127.0.0.1:6041/rest/sql -#print curl 127.0.0.1:6041/rest/sql -----> $system_content +#system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d "select * from db0.st0 where tbname in ('tb0', 'tb1') limit 1000" 127.0.0.1:7111/rest/sql +#print curl 127.0.0.1:7111/rest/sql -----> $system_content system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/general/http/telegraf.sim b/tests/script/general/http/telegraf.sim index a845c2b12f..e54af99ad7 100644 --- a/tests/script/general/http/telegraf.sim +++ b/tests/script/general/http/telegraf.sim @@ -13,231 +13,231 @@ sql connect print ============================ dnode1 start print =============== step1 - parse -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/ +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/ print $system_content if $system_content != @{"status":"error","code":5022,"desc":"database name can not be null"}@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/ +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/ print $system_content if $system_content != @{"status":"error","code":5022,"desc":"database name can not be null"}@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/d123456789012345678901234567890123456 +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/d123456789012345678901234567890123456 print $system_content if $system_content != @{"status":"error","code":5023,"desc":"database name too long"}@ then return -1 endi -system_content curl -u root:taosdata -d '[]' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '[]' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5027,"desc":"metric name not find"}@ then return -1 endi -system_content curl -u root:taosdata -d '{}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5027,"desc":"metric name not find"}@ then return -1 endi -system_content curl -u root:taosdata -d '[{}]' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '[{}]' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5027,"desc":"metric name not find"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"metrics": []}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"metrics": []}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5025,"desc":"metrics size is 0"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"metrics": [{}]}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"metrics": [{}]}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5027,"desc":"metric name not find"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"metrics": 12}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"metrics": 12}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5025,"desc":"metrics size is 0"}@ then return -1 endi -#system_content curl -u root:taosdata -d '{"metrics": [{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}]}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +#system_content curl -u root:taosdata -d '{"metrics": [{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}]}' 127.0.0.1:7111/telegraf/db/root/taosdata1 #print $system_content #if $system_content != @{"status":"error","code":5026,"desc":"metrics size can not more than 50"}@ then # return -1 #endi -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5027,"desc":"metric name not find"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":111,"tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":111,"tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5028,"desc":"metric name type should be string"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5029,"desc":"metric name length is 0"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5030,"desc":"metric name length too long"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"}}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"}}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5031,"desc":"timestamp not find"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":""}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":""}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5032,"desc":"timestamp type should be integer"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":-1}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":-1}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5033,"desc":"timestamp value smaller than 0"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","timestamp":1535784122}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5034,"desc":"tags not find"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5035,"desc":"tags size is 0"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":"","timestamp":1535784122}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":"","timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5035,"desc":"tags size is 0"}@ then return -1 endi -#system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor","host":"windows","instance":"1","objectname":"Processor","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/db/root/taosdata +#system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor","host":"windows","instance":"1","objectname":"Processor","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata #print $system_content #if $system_content != @{"status":"error","code":5036,"desc":"tags size too long"}@ then # return -1 #endi -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5035,"desc":"tags size is 0"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"":"windows"},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"":"windows"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5038,"desc":"tag name is null"}@ then return -1 endi -#system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host111111111111222222222222222222222":""},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +#system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host111111111111222222222222222222222":""},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1 #print $system_content #if $system_content != @{"status":"error","code":5039,"desc":"tag name length too long"}@ then # return -1 #endi -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":true},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":true},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5040,"desc":"tag value type should be number or string"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":""},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":""},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5041,"desc":"tag value is null"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"5022":"111"},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"5022":"111"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5042,"desc":"table is null"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"host111111111111222222222222222222222host111111111111222222222222222222222host111111111111222222222222222222222host111111111111222222222222222222222host111111111111222222222222222222222host111111111111222222222222222222222host111111111111222222222222222222222host111111111111222222222222222222222"},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"host111111111111222222222222222222222host111111111111222222222222222222222host111111111111222222222222222222222host111111111111222222222222222222222host111111111111222222222222222222222host111111111111222222222222222222222host111111111111222222222222222222222host111111111111222222222222222222222"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5043,"desc":"table name length too long"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"fields":{},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5045,"desc":"fields size is 0"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"fields":{"":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{"":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5048,"desc":"field name is null"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":"","Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":"","Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5051,"desc":"field value is null"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":true,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6041/telegraf/db/root/taosdata1 +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":true,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:7111/telegraf/db/root/taosdata1 print $system_content if $system_content != @{"status":"error","code":5050,"desc":"field value type should be number or string"}@ then return -1 endi -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1564641722000}' 127.0.0.1:6041/telegraf/db +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1564641722000}' 127.0.0.1:7111/telegraf/db print $system_content @@ -248,7 +248,7 @@ endi sleep 3000 print =============== step2 - insert single data -system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1564641722000}' 127.0.0.1:6041/telegraf/db/ +system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1564641722000}' 127.0.0.1:7111/telegraf/db/ print $system_content @@ -256,7 +256,7 @@ print $system_content # return -1 #endi -system_content curl -u root:taosdata -d 'select * from db.win_cpu_windows_1_Processor' 127.0.0.1:6041/rest/sql/ +system_content curl -u root:taosdata -d 'select * from db.win_cpu_windows_1_Processor' 127.0.0.1:7111/rest/sql/ print $system_content @@ -265,7 +265,7 @@ print $system_content #endi print =============== step3 - multi-query data -system_content curl -u root:taosdata -d '{"metrics": [{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"window1","instance":"1","objectname":"Processor"},"timestamp":1564641723000},{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"window2","instance":"1","objectname":"Processor"},"timestamp":1564641723000}]}' 127.0.0.1:6041/telegraf/db/ +system_content curl -u root:taosdata -d '{"metrics": [{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"window1","instance":"1","objectname":"Processor"},"timestamp":1564641723000},{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"window2","instance":"1","objectname":"Processor"},"timestamp":1564641723000}]}' 127.0.0.1:7111/telegraf/db/ print $system_content @@ -273,7 +273,7 @@ if $system_content != @{"metrics":[{"metric":"win_cpu","stable":"win_cpu","table return -1 endi -system_content curl -u root:taosdata -d 'select * from db.win_cpu_window1_1_Processor' 127.0.0.1:6041/rest/sql/ +system_content curl -u root:taosdata -d 'select * from db.win_cpu_window1_1_Processor' 127.0.0.1:7111/rest/sql/ print $system_content @@ -281,7 +281,7 @@ print $system_content # return -1 #endi -system_content curl -u root:taosdata -d 'select count(*) from db.win_cpu' 127.0.0.1:6041/rest/sql/ +system_content curl -u root:taosdata -d 'select count(*) from db.win_cpu' 127.0.0.1:7111/rest/sql/ print $system_content diff --git a/tests/script/general/parser/bug.sim b/tests/script/general/parser/bug.sim index f97905d76c..2a46ad1fd6 100644 --- a/tests/script/general/parser/bug.sim +++ b/tests/script/general/parser/bug.sim @@ -38,6 +38,6 @@ sql insert into t2 values(1575880055000, 2); sql select st1.ts, st1.f1, st2.f2 from db.st1, db.st2 where st1.t1=st2.t2 and st1.ts=st2.ts -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select st1.ts, st1.f1, st2.f2 from db.st1, db.st2 where st1.t1=st2.t2 and st1.ts=st2.ts' 127.0.0.1:6041/restful/sql +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select st1.ts, st1.f1, st2.f2 from db.st1, db.st2 where st1.t1=st2.t2 and st1.ts=st2.ts' 127.0.0.1:7111/restful/sql system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/join.sim b/tests/script/general/parser/join.sim index 07f2cd3f77..882f561ae1 100644 --- a/tests/script/general/parser/join.sim +++ b/tests/script/general/parser/join.sim @@ -257,6 +257,21 @@ if $data01 != $val then return -1 endi +sql select count(join_tb1.*) + count(join_tb0.*) from join_tb1 , join_tb0 where join_tb1.ts = join_tb0.ts and join_tb1.ts >= 100000 and join_tb0.c7 = false;; +if $rows != 1 then + return -1 +endi + +if $data00 != 20.000000000 then + print expect 20.000000000 actual $data00 + return -1 +endi + +sql select count(join_tb1.*)/10 from join_tb1 , join_tb0 where join_tb1.ts = join_tb0.ts and join_tb1.ts >= 100000 and join_tb0.c7 = false;; +if $data00 != 1.000000000 then + return -1 +endi + print 3 #agg + where condition sql select count(join_tb1.c3), count(join_tb0.ts) from $tb1 , $tb2 where $ts1 = $ts2 and join_tb1.ts <= 100002 and join_tb0.c7 = true; @@ -381,6 +396,15 @@ if $data00 != $val then return -1 endi +sql select sum(join_mt0.c1)+sum(join_mt0.c1) from join_mt0, join_mt1 where join_mt0.ts = join_mt1.ts and join_mt0.t1=join_mt1.t1 and join_mt0.c2=99 and join_mt1.ts=100999; +if $rows != 1 then + return -1 +endi + +if $data00 != 396.000000000 then + return -1 +endi + # first/last sql select count(join_mt0.c1), sum(join_mt1.c2), first(join_mt0.c5), last(join_mt1.c7) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts and join_mt0.t1=1 interval(10a) order by join_mt0.ts asc; diff --git a/tests/script/general/parser/lastrow_query.sim b/tests/script/general/parser/lastrow_query.sim index e43cc15173..98eb5a8d6d 100644 --- a/tests/script/general/parser/lastrow_query.sim +++ b/tests/script/general/parser/lastrow_query.sim @@ -51,3 +51,80 @@ if $data09 != NCHAR then return -1 endi +# regression test case 1 +sql select count(*) from lr_tb1 where ts>'2018-09-18 08:45:00.1' and ts<'2018-09-18 08:45:00.2' +if $row != 0 then + return -1 +endi + +# regression test case 2 +sql select count(*) from lr_db0.lr_stb0 where ts>'2018-9-18 8:00:00' and ts<'2018-9-18 14:00:00' interval(1s) fill(NULL); +if $row != 21600 then + return -1 +endi + +#regression test case 3 +sql select t1,t1,count(*),t1,t1 from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' interval(1h) fill(NULL) group by t1 limit 1 +if $row != 2 then + return -1 +endi + +if $data01 != 7 then + return -1 +endi + +if $data02 != 7 then + return -1 +endi + +if $data03 != 59 then + print expect 59, actual: $data03 + return -1 +endi + +if $data04 != 7 then + return -1 +endi + +if $data11 != 8 then + return -1 +endi + +if $data12 != 8 then + return -1 +endi + +if $data13 != NULL then + return -1 +endi + +sql select t1,t1,count(*),t1,t1 from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' interval(1h) fill(NULL) group by t1 limit 9 +if $rows != 18 then + return -1 +endi + +sql select t1,t1,count(*),t1,t1 from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' interval(1h) fill(NULL) group by t1 limit 12 +if $rows != 24 then + return -1 +endi + +sql select t1,t1,count(*),t1,t1 from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' interval(1h) fill(NULL) group by t1 limit 25 +if $rows != 48 then + return -1 +endi + +sql select t1,t1,count(*),t1,t1 from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' interval(1h) fill(NULL) group by t1 limit 25 offset 1 +if $rows != 46 then + return -1 +endi + +sql select t1,t1,count(*),tbname,t1,t1,tbname from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' interval(1s) fill(NULL) group by tbname, t1 slimit 2 soffset 0 limit 250000 offset 1 +if $rows != 172798 then + return -1 +endi + +sql select t1,t1,count(*),tbname,t1,t1,tbname from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' interval(1s) fill(NULL) group by tbname, t1 slimit 1 soffset 1 limit 250000 offset 1 +if $rows != 86399 then + return -1 +endi + diff --git a/tests/script/general/parser/sliding.sim b/tests/script/general/parser/sliding.sim index 177c95651f..f85211beb8 100644 --- a/tests/script/general/parser/sliding.sim +++ b/tests/script/general/parser/sliding.sim @@ -1,7 +1,7 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 0 +system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/cfg.sh -n dnode1 -c debugFlag -v 135 system sh/cfg.sh -n dnode1 -c rpcDebugFlag -v 135 system sh/exec.sh -n dnode1 -s start @@ -28,7 +28,7 @@ $mt = $mtPrefix . $i sql drop database if exits $db -x step1 step1: -sql create database if not exists $db tables 4 keep 36500 +sql create database if not exists $db maxtables 4 keep 36500 sql use $db sql create table $mt (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int, t2 binary(12)) diff --git a/tests/script/general/parser/testSuite.sim b/tests/script/general/parser/testSuite.sim index 1e73893793..aafba2d328 100644 --- a/tests/script/general/parser/testSuite.sim +++ b/tests/script/general/parser/testSuite.sim @@ -97,6 +97,8 @@ run general/parser/topbot.sim sleep 2000 run general/parser/union.sim sleep 2000 +run general/parser/bug.sim +sleep 2000 run general/parser/sliding.sim #sleep 2000 diff --git a/tests/script/general/parser/topbot.sim b/tests/script/general/parser/topbot.sim index a0c46dbc65..fdda79451d 100644 --- a/tests/script/general/parser/topbot.sim +++ b/tests/script/general/parser/topbot.sim @@ -66,9 +66,56 @@ if $row != 100 then return -1 endi -sql select last(c2) from tb_tb9 +sql select bottom(c1, 100) from tb_stb0 +if $row != 100 then + return -1 +endi + +sql select last(*) from tb_tb9 if $row != 1 then return -1 endi +sql select last(c2) from tb_tb9 +if $row != 0 then + return -1 +endi + +sql select first(c2), last(c2) from tb_tb9 +if $row != 0 then + return -1 +endi + +sql create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20)); +sql create table test1 using test tags('beijing'); +sql insert into test1 values(1537146000000, 1, 1, 1, 1, 0.100000, 0.100000, 0, 'taosdata1', '涛思数据1'); +sql insert into test1 values(1537146000001, 2, 2, 2, 2, 1.100000, 1.100000, 1, 'taosdata2', '涛思数据2'); +sql insert into test1 values(1537146000002, 3, 3, 3, 3, 2.100000, 2.100000, 0, 'taosdata3', '涛思数据3'); +sql insert into test1 values(1537146000003, 4, 4, 4, 4, 3.100000, 3.100000, 1, 'taosdata4', '涛思数据4'); +sql insert into test1 values(1537146000004, 5, 5, 5, 5, 4.100000, 4.100000, 0, 'taosdata5', '涛思数据5'); +sql insert into test1 values(1537146000005, 6, 6, 6, 6, 5.100000, 5.100000, 1, 'taosdata6', '涛思数据6'); +sql insert into test1 values(1537146000006, 7, 7, 7, 7, 6.100000, 6.100000, 0, 'taosdata7', '涛思数据7'); +sql insert into test1 values(1537146000007, 8, 8, 8, 8, 7.100000, 7.100000, 1, 'taosdata8', '涛思数据8'); +sql insert into test1 values(1537146000008, 9, 9, 9, 9, 8.100000, 8.100000, 0, 'taosdata9', '涛思数据9'); +sql insert into test1 values(1537146000009, 10, 10, 10, 10, 9.100000, 9.100000, 1, 'taosdata10', '涛思数据10'); +sql select bottom(col5, 10) from test +if $rows != 10 then + return -1 +endi + +if $data01 != 0.10000 then + print expect 0.10000 actual: $data01 + return -1 +endi + +if $data11 != 1.10000 then + print expect 1.10000 actual: $data11 + return -1 +endi + +if $data21 != 2.10000 then + print expect 2.10000 actual: $data21 + return -1 +endi + system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/general/parser/union.sim b/tests/script/general/parser/union.sim index 358bcb8a40..fbd1c211b9 100644 --- a/tests/script/general/parser/union.sim +++ b/tests/script/general/parser/union.sim @@ -1,10 +1,10 @@ -#system sh/stop_dnodes.sh -# -#system sh/deploy.sh -n dnode1 -i 1 -#system sh/cfg.sh -n dnode1 -c walLevel -v 0 -#system sh/cfg.sh -n dnode1 -c debugFlag -v 135 -#system sh/cfg.sh -n dnode1 -c rpcDebugFlag -v 135 -#system sh/exec.sh -n dnode1 -s start +system sh/stop_dnodes.sh + +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 0 +system sh/cfg.sh -n dnode1 -c debugFlag -v 135 +system sh/cfg.sh -n dnode1 -c rpcDebugFlag -v 135 +system sh/exec.sh -n dnode1 -s start sleep 1000 sql connect @@ -24,77 +24,77 @@ $mt = $mtPrefix . $i $j = 1 $mt1 = $mtPrefix . $j -# -#sql drop database if exits $db -x step1 -#step1: -#sql create database if not exists $db maxtables 4 + +sql drop database if exits $db -x step1 +step1: +sql create database if not exists $db maxtables 4 sql use $db -#sql create table $mt (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int) -# -#$i = 0 -#$t = 1578203484000 -# -#while $i < $tbNum -# $tb = $tbPrefix . $i -# sql create table $tb using $mt tags( $i ) -# -# $x = 0 -# while $x < $rowNum -# $ms = $x * 1000 -# $ms = $ms * 60 -# -# $c = $x / 100 -# $c = $c * 100 -# $c = $x - $c -# $binary = 'binary . $c -# $binary = $binary . ' -# $nchar = 'nchar . $c -# $nchar = $nchar . ' -# -# $t1 = $t + $ms -# sql insert into $tb values ($t1 , $c , $c , $c , $c , $c , $c , $c , $binary , $nchar ) -# $x = $x + 1 -# endw -# -# $i = $i + 1 -#endw -# -#sql create table $mt1 (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int) -# -#$j = 0 -#$t = 1578203484000 -#$rowNum = 1000 -#$tbNum = 5 -#$i = 0 -# -#while $i < $tbNum -# $tb1 = $tbPrefix1 . $j -# sql create table $tb1 using $mt1 tags( $i ) -# -# $x = 0 -# while $x < $rowNum -# $ms = $x * 1000 -# $ms = $ms * 60 -# -# $c = $x / 100 -# $c = $c * 100 -# $c = $x - $c -# $binary = 'binary . $c -# $binary = $binary . ' -# $nchar = 'nchar . $c -# $nchar = $nchar . ' -# -# $t1 = $t + $ms -# sql insert into $tb1 values ($t1 , $c , $c , $c , $c , $c , $c , $c , $binary , $nchar ) -# $x = $x + 1 -# endw -# -# $i = $i + 1 -# $j = $j + 1 -#endw -# -#print sleep 1sec. -#sleep 1000 +sql create table $mt (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int) + +$i = 0 +$t = 1578203484000 + +while $i < $tbNum + $tb = $tbPrefix . $i + sql create table $tb using $mt tags( $i ) + + $x = 0 + while $x < $rowNum + $ms = $x * 1000 + $ms = $ms * 60 + + $c = $x / 100 + $c = $c * 100 + $c = $x - $c + $binary = 'binary . $c + $binary = $binary . ' + $nchar = 'nchar . $c + $nchar = $nchar . ' + + $t1 = $t + $ms + sql insert into $tb values ($t1 , $c , $c , $c , $c , $c , $c , $c , $binary , $nchar ) + $x = $x + 1 + endw + + $i = $i + 1 +endw + +sql create table $mt1 (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int) + +$j = 0 +$t = 1578203484000 +$rowNum = 1000 +$tbNum = 5 +$i = 0 + +while $i < $tbNum + $tb1 = $tbPrefix1 . $j + sql create table $tb1 using $mt1 tags( $i ) + + $x = 0 + while $x < $rowNum + $ms = $x * 1000 + $ms = $ms * 60 + + $c = $x / 100 + $c = $c * 100 + $c = $x - $c + $binary = 'binary . $c + $binary = $binary . ' + $nchar = 'nchar . $c + $nchar = $nchar . ' + + $t1 = $t + $ms + sql insert into $tb1 values ($t1 , $c , $c , $c , $c , $c , $c , $c , $binary , $nchar ) + $x = $x + 1 + endw + + $i = $i + 1 + $j = $j + 1 +endw + +print sleep 1sec. +sleep 1000 $i = 1 $tb = $tbPrefix . $i @@ -412,4 +412,8 @@ if $data10 != @union_db0@ then return -1 endi +sql_error show tables union all show tables +sql_error show stables union all show stables +sql_error show databases union all show databases + system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index e0f6b7b197..a48584b0ed 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -135,7 +135,6 @@ cd ../../../debug; make ./test.sh -f general/parser/limit2.sim ./test.sh -f general/parser/fill.sim ./test.sh -f general/parser/fill_stb.sim -#./test.sh -f general/parser/fill_us.sim ./test.sh -f general/parser/where.sim ./test.sh -f general/parser/slimit.sim ./test.sh -f general/parser/select_with_tags.sim @@ -143,7 +142,6 @@ cd ../../../debug; make ./test.sh -f general/parser/tags_dynamically_specifiy.sim ./test.sh -f general/parser/groupby.sim ./test.sh -f general/parser/set_tag_vals.sim -#./test.sh -f general/parser/sliding.sim ./test.sh -f general/parser/tags_filter.sim ./test.sh -f general/parser/slimit_alter_tags.sim ./test.sh -f general/parser/join.sim @@ -151,6 +149,8 @@ cd ../../../debug; make ./test.sh -f general/parser/binary_escapeCharacter.sim ./test.sh -f general/parser/bug.sim ./test.sh -f general/parser/repeatAlter.sim +./test.sh -f general/parser/union.sim +./test.sh -f general/parser/topbot.sim ./test.sh -f general/stable/disk.sim ./test.sh -f general/stable/dnode3.sim diff --git a/tests/script/tmp/prepare.sim b/tests/script/tmp/prepare.sim index 0a0c512b26..f59eebede0 100644 --- a/tests/script/tmp/prepare.sim +++ b/tests/script/tmp/prepare.sim @@ -30,4 +30,13 @@ system sh/cfg.sh -n dnode4 -c maxTablesPerVnode -v 100000 system sh/cfg.sh -n dnode1 -c http -v 1 system sh/cfg.sh -n dnode2 -c http -v 1 system sh/cfg.sh -n dnode3 -c http -v 1 -system sh/cfg.sh -n dnode4 -c http -v 1 \ No newline at end of file +system sh/cfg.sh -n dnode4 -c http -v 1 + + +system sh/cfg.sh -n dnode1 -c walLevel -v 2 + + +system sh/cfg.sh -n dnode1 -c firstEp -v 152.136.17.116:6030 +system sh/cfg.sh -n dnode1 -c secondEp -v 152.136.17.116:6030 +system sh/cfg.sh -n dnode1 -c serverPort -v 6030 +system sh/cfg.sh -n dnode1 -c fqdn -v 152.136.17.116 diff --git a/tests/script/unique/big/balance.sim b/tests/script/unique/big/balance.sim index a9092ac790..91a7c538d2 100644 --- a/tests/script/unique/big/balance.sim +++ b/tests/script/unique/big/balance.sim @@ -321,6 +321,63 @@ if $data00 != $totalNum then goto show5 endi + +print ========== step6 +sleep 3000 +sql alter database db replica 1 + +$x = 0 +show6: + $x = $x + 1 + sleep 3000 + if $x == 10 then + return -1 + endi + +sql show dnodes +print dnode1 freeVnodes $data2_1 +print dnode4 freeVnodes $data2_4 +if $data2_1 != 2 then + goto show6 +endi +if $data2_4 != 2 then + goto show6 +endi + +sql reset query cache +sleep 1000 + +sql select count(*) from t10 +print select count(*) from t10 $data00 expect $rowNum +if $data00 != $rowNum then + goto show5 +endi + +sql select count(*) from t1010 +print select count(*) from t1010 $data00 expect $rowNum +if $data00 != $rowNum then + goto show5 +endi + +sql select count(*) from t2010 +print select count(*) from t2010 $data00 expect $rowNum +if $data00 != $rowNum then + goto show5 +endi + +sql select count(*) from t3010 +print select count(*) from t3010 $data00 expect $rowNum +if $data00 != $rowNum then + goto show5 +endi + +sql select count(*) from mt +print select count(*) from mt $data00 expect $rowNum +if $data00 != $totalNum then + goto show5 +endi + + system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode2 -s stop -x SIGINT system sh/exec.sh -n dnode3 -s stop -x SIGINT diff --git a/tests/script/unique/http/admin.sim b/tests/script/unique/http/admin.sim index cca2d096f6..dc17520d02 100644 --- a/tests/script/unique/http/admin.sim +++ b/tests/script/unique/http/admin.sim @@ -31,63 +31,63 @@ sql insert into table_admin values('2017-12-25 21:28:50.022', 10) print =============== step1 - login -system_content curl 127.0.0.1:6041/admin/ +system_content curl 127.0.0.1:7111/admin/ print 1-> $system_content if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then return -1 endi -system_content curl 127.0.0.1:6041/admin/xx +system_content curl 127.0.0.1:7111/admin/xx print 2-> $system_content if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then return -1 endi -system_content curl 127.0.0.1:6041/admin/login +system_content curl 127.0.0.1:7111/admin/login print 3-> $system_content if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then return -1 endi -system_content curl 127.0.0.1:6041/admin/login/root +system_content curl 127.0.0.1:7111/admin/login/root print 4-> $system_content if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then return -1 endi -system_content curl 127.0.0.1:6041/admin/login/root/123 +system_content curl 127.0.0.1:7111/admin/login/root/123 print 5-> $system_content if $system_content != @{"status":"error","code":3,"desc":"Authentication failure"}@ then return -1 endi -system_content curl 127.0.0.1:6041/admin/login/root/123/1/1/3 +system_content curl 127.0.0.1:7111/admin/login/root/123/1/1/3 print 6-> $system_content if $system_content != @{"status":"error","code":3,"desc":"Authentication failure"}@ then return -1 endi -system_content curl -H 'Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.' -d 'show databases' 127.0.0.1:6041/admin/login/root/1 +system_content curl -H 'Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.' -d 'show databases' 127.0.0.1:7111/admin/login/root/1 print 7-> $system_content if $system_content != @{"status":"error","code":5010,"desc":"invalid type of Authorization"}@ then return -1 endi -system_content curl -H 'Authorization: Taosd eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' 127.0.0.1:6041/admin/login/root/1 +system_content curl -H 'Authorization: Taosd eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' 127.0.0.1:7111/admin/login/root/1 print 8-> $system_content if $system_content != @{"status":"error","code":5053,"desc":"parse http auth token error"}@ then return -1 endi sleep 3000 -system_content curl 127.0.0.1:6041/admin/login/root/taosdata +system_content curl 127.0.0.1:7111/admin/login/root/taosdata print 9 -----> $system_content if $system_content != {"status":"succ","code":0,"desc":"/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04"} then return -1 endi -#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:6041/admin/login/root/1 +#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:7111/admin/login/root/1 #print 10-> $system_content #if $system_content != @{"status":"error","code":29,"desc":"failed to connect to server"}@ then # return -1 @@ -95,14 +95,14 @@ endi print =============== step2 - logout -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:6041/admin/logout +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:7111/admin/logout print 10 -----> $system_content if $system_content != @{"status":"succ","code":0,"desc":"logout success"}@ then return -1 endi -system_content curl 127.0.0.1:6041/admin/logout +system_content curl 127.0.0.1:7111/admin/logout print 11 -----> $system_content if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then @@ -111,69 +111,69 @@ endi print =============== step3 - info -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:6041/admin/info -print curl 127.0.0.1:6041/admin/info -----> $system_content +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:7111/admin/info +print curl 127.0.0.1:7111/admin/info -----> $system_content if $system_content != {"status":"succ","data":[{"dbs":1,"tables":1,"users":3,"mnodes":1,"dnodes":1}]} then return -1 endi print =============== step4 - meta -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show mnodes' 127.0.0.1:6041/admin/meta -print curl 127.0.0.1:6041/admin/meta -----> $system_content +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show mnodes' 127.0.0.1:7111/admin/meta +print curl 127.0.0.1:7111/admin/meta -----> $system_content #if $system_content != @{"status":"succ","head":["column type","column name","column bytes"],"data":[["binary","IP",16],["timestamp","created time",8],["binary","status",10],["binary","role",10],["binary","public ip",16]],"rows":5}@ then # return -1 #endi print =============== step5 - query data -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:6041/admin/sql -print curl 127.0.0.1:6041/admin/all -----> $system_content +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:7111/admin/sql +print curl 127.0.0.1:7111/admin/all -----> $system_content if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10]],"rows":10}@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:6041/admin/sql -print curl 127.0.0.1:6041/admin/sql -----> $system_content +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:7111/admin/sql +print curl 127.0.0.1:7111/admin/sql -----> $system_content if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10]],"rows":10}@ then return -1 endi print =============== step6 - insert data -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.table_admin values('2017-12-25 21:28:51.022', 11)" 127.0.0.1:6041/admin/sql -print curl 127.0.0.1:6041/admin/sql -----> $system_content +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.table_admin values('2017-12-25 21:28:51.022', 11)" 127.0.0.1:7111/admin/sql +print curl 127.0.0.1:7111/admin/sql -----> $system_content if $system_content != @{"status":"succ","head":["affect_rows"],"data":[[1]],"rows":1}@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:6041/admin/all -print curl 127.0.0.1:6041/admin/all -----> $system_content +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:7111/admin/all +print curl 127.0.0.1:7111/admin/all -----> $system_content if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:51.022",11]],"rows":11}@ then return -1 endi -#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:6041/admin/sql -#print curl 127.0.0.1:6041/admin/sql -----> $system_content +#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:7111/admin/sql +#print curl 127.0.0.1:7111/admin/sql -----> $system_content #if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:51.022",11],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:42.022",2]],"rows":10}@ then # return -1 #endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:6041/admin/info -print curl 127.0.0.1:6041/admin/info -----> $system_content +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:7111/admin/info +print curl 127.0.0.1:7111/admin/info -----> $system_content if $system_content != {"status":"succ","data":[{"dbs":1,"tables":1,"users":3,"mnodes":1,"dnodes":1}]} then return -1 endi print =============== step7 - use dbs -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'use d1;' 127.0.0.1:6041/admin/all +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'use d1;' 127.0.0.1:7111/admin/all print 23-> $system_content if $system_content != @{"status":"error","code":5017,"desc":"no need to execute use db cmd"}@ then return -1 endi print =============== step8 - monitor dbs -#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show dnodes;show mnodes;' 127.0.0.1:6041/admin/sqls +#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show dnodes;show mnodes;' 127.0.0.1:7111/admin/sqls #print 24-> $system_content #if $system_content != @[{"status":"succ","head":["IP","created time","open vnodes","free vnodes","status","balance state"],"data":[["127.0.0.1","2018-09-04 #11:16:13.985",1,3,"ready","balanced"]],"rows":1},{"status":"succ","head":["IP","created time","status","role"],"data":[["127.0.0.1","2018-09-04 11:16:13.371","serving","master"]],"rows":1}]@ then # return -1 diff --git a/tests/script/unique/http/opentsdb.sim b/tests/script/unique/http/opentsdb.sim index e14d0de8f2..4901c5b3fd 100644 --- a/tests/script/unique/http/opentsdb.sim +++ b/tests/script/unique/http/opentsdb.sim @@ -11,92 +11,92 @@ sql connect print ============================ dnode1 start print =============== step1 - parse -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/ +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/ print $system_content if $system_content != @{"status":"error","code":5057,"desc":"database name can not be null"}@ then return -1 endi -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/db123456789012345678901234567890db +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db123456789012345678901234567890db print $system_content if $system_content != @{"status":"error","code":5058,"desc":"database name too long"}@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/ +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/ print $system_content if $system_content != @{"status":"error","code":5057,"desc":"database name can not be null"}@ then return -1 endi -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/db/put2 +system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put2 print $system_content if $system_content != @{"status":"error","code":5009,"desc":"http url parse error"}@ then return -1 endi -system_content curl -u root:taosdata -d '[]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[]' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"status":"error","code":5060,"desc":"metrics size is 0"}@ then return -1 endi -system_content curl -u root:taosdata -d '[' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"status":"error","code":5059,"desc":"invalid opentsdb json fromat"}@ then return -1 endi -system_content curl -u root:taosdata -d '{}' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '{}' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"status":"error","code":5060,"desc":"metrics size is 0"}@ then return -1 endi -system_content curl -u root:taosdata -d '[{}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{}]' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"status":"error","code":5062,"desc":"metric name not find"}@ then return -1 endi -system_content curl -u root:taosdata -d '[{"metric": 1,"timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": 1,"timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"status":"error","code":5063,"desc":"metric name type should be string"}@ then return -1 endi -system_content curl -u root:taosdata -d '[{"metric": "","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"status":"error","code":5064,"desc":"metric name length is 0"}@ then return -1 endi -system_content curl -u root:taosdata -d '[{"metric": "ab1234567890123456789012345678ab1234567890123456789012345678","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "ab1234567890123456789012345678ab1234567890123456789012345678","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"errors":[{"datapoint":{"metric":"ab1234567890123456789012345678ab1234567890123456789012345678","stable":"ab1234567890123456789012345678ab1234567890123456789012345678_d_bbb","table":"ab1234567890123456789012345678ab1234567890123456789012345678_d_bbb_lga_1_web01","timestamp":1346846400,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web01"},"status":"error","code":1547,"desc":"Timestamp data out of range"}}],"failed":1,"success":0,"affected_rows":0}@ then return -1 endi -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"status":"error","code":5066,"desc":"timestamp not find"}@ then return -1 endi -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": "2","value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": "2","value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"status":"error","code":5067,"desc":"timestamp type should be integer"}@ then return -1 endi -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": -1,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": -1,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"status":"error","code":5068,"desc":"timestamp value smaller than 0"}@ then return -1 endi -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"status":"error","code":5078,"desc":"value not find"}@ then return -1 @@ -104,49 +104,49 @@ endi ####### -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18}]' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"status":"error","code":5069,"desc":"tags not find"}@ then return -1 endi -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {}}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {}}]' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"status":"error","code":5070,"desc":"tags size is 0"}@ then return -1 endi -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": 0}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": 0}]' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"status":"error","code":5070,"desc":"tags size is 0"}@ then return -1 endi -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","group1": "1","group1": "1","group1": "1","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","group1": "1","group1": "1","group1": "1","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbbbbbb","table":"sys_cpu_d_bbbbbbb_lga_1_1_1_1_1_web01","timestamp":1346846400,"value":18.000000,"tags":{"dc":"lga","group1":"1","group1":"1","group1":"1","group1":"1","group1":"1","host":"web01"},"status":"error","code":866,"desc":"failed to create table"}}],"failed":1,"success":0,"affected_rows":0}@ then return -1 endi -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"": "web01"}}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"": "web01"}}]' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"status":"error","code":5073,"desc":"tag name is null"}@ then return -1 endi -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host01123456789001123456789001123456789001123456789001123456789001123456789": "01"}}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host01123456789001123456789001123456789001123456789001123456789001123456789": "01"}}]' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"status":"error","code":5074,"desc":"tag name length too long"}@ then return -1 endi -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web011234567890011234567890011234567890011234567890011234567890011234567890011234567890011234567890"}}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web011234567890011234567890011234567890011234567890011234567890011234567890011234567890011234567890"}}]' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"status":"error","code":5077,"desc":"tag value can not more than 64"}@ then return -1 endi -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": ""}}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": ""}}]' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"status":"error","code":5076,"desc":"tag value is null"}@ then return -1 @@ -155,26 +155,26 @@ endi sleep 3000 print =============== step2 - insert single data -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400000,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400000,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbb","table":"sys_cpu_d_bbb_lga_1_web01","timestamp":1346846400000,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web01"},"affected_rows":1,"status":"succ"}}],"failed":0,"success":1,"affected_rows":1}@ then return -1 endi -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400000,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400000,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put print $system_content if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbb","table":"sys_cpu_d_bbb_lga_1_web01","timestamp":1346846400000,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web01"},"affected_rows":1,"status":"succ"}}],"failed":0,"success":1,"affected_rows":1}@ then return -1 endi -system_content curl -u root:taosdata -d 'select * from db.sys_cpu_d_bbb_lga_1_web01' 127.0.0.1:6041/rest/sql/ +system_content curl -u root:taosdata -d 'select * from db.sys_cpu_d_bbb_lga_1_web01' 127.0.0.1:7111/rest/sql/ print $system_content if $system_content != @{"status":"succ","head":["ts","value"],"data":[["2012-09-05 20:00:00.000",18.000000000]],"rows":1}@ then return -1 endi print =============== step3 - multi-query data -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846405000,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846402000,"value": 18,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846405000,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846402000,"value": 18,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put print $system_content @@ -182,7 +182,7 @@ if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys return -1 endi -system_content curl -u root:taosdata -d 'select * from db.sys_cpu_d_bbb_lga_1_web01' 127.0.0.1:6041/rest/sql/ +system_content curl -u root:taosdata -d 'select * from db.sys_cpu_d_bbb_lga_1_web01' 127.0.0.1:7111/rest/sql/ print $system_content @@ -190,7 +190,7 @@ if $system_content != @{"status":"succ","head":["ts","value"],"data":[["2012-09- return -1 endi -system_content curl -u root:taosdata -d 'select count(*) from db.sys_cpu_d_bbb' 127.0.0.1:6041/rest/sql/ +system_content curl -u root:taosdata -d 'select count(*) from db.sys_cpu_d_bbb' 127.0.0.1:7111/rest/sql/ print $system_content @@ -199,7 +199,7 @@ if $system_content != @{"status":"succ","head":["count(*)"],"data":[[3]],"rows": endi print =============== step4 - summary-put data -system_content curl -u root:taosdata -d '[{"metric": "sys_mem","timestamp": 1346846400000,"value": 8,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_mem","timestamp": 1346846405000,"value": 9,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/db/put?details=false +system_content curl -u root:taosdata -d '[{"metric": "sys_mem","timestamp": 1346846400000,"value": 8,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_mem","timestamp": 1346846405000,"value": 9,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put?details=false print $system_content @@ -207,7 +207,7 @@ if $system_content != @{"failed":0,"success":2}@ then return -1 endi -system_content curl -u root:taosdata -d 'select * from db.sys_mem_d_bbb_lga_1_web01' 127.0.0.1:6041/rest/sql/ +system_content curl -u root:taosdata -d 'select * from db.sys_mem_d_bbb_lga_1_web01' 127.0.0.1:7111/rest/sql/ print $system_content @@ -215,7 +215,7 @@ if $system_content != @{"status":"succ","head":["ts","value"],"data":[["2012-09- return -1 endi -system_content curl -u root:taosdata -d 'select count(*) from db.sys_mem_d_bbb' 127.0.0.1:6041/rest/sql/ +system_content curl -u root:taosdata -d 'select count(*) from db.sys_mem_d_bbb' 127.0.0.1:7111/rest/sql/ print $system_content @@ -225,13 +225,13 @@ endi print =============== step5 - prepare data -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846402000,"value": 19,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846402,"value": 19,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/db/put -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846403000,"value": 20,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846403,"value": 20,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/db/put -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846404000,"value": 21,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846404,"value": 21,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/db/put -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846405000,"value": 22,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846405,"value": 22,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/db/put -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846406000,"value": 23,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846406,"value": 23,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:6041/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846402000,"value": 19,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846402,"value": 19,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846403000,"value": 20,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846403,"value": 20,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846404000,"value": 21,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846404,"value": 21,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846405000,"value": 22,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846405,"value": 22,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put +system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846406000,"value": 23,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846406,"value": 23,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -system_content curl -u root:taosdata -d 'select count(*) from db.sys_cpu_d_bbb' 127.0.0.1:6041/rest/sql/ +system_content curl -u root:taosdata -d 'select count(*) from db.sys_cpu_d_bbb' 127.0.0.1:7111/rest/sql/ print $system_content if $system_content != @{"status":"succ","head":["count(*)"],"data":[[7]],"rows":1}@ then return -1 diff --git a/tests/script/windows/alter/metrics.sim b/tests/script/windows/alter/metrics.sim index 3717d8c1ed..7d5dc77949 100644 --- a/tests/script/windows/alter/metrics.sim +++ b/tests/script/windows/alter/metrics.sim @@ -1,6 +1,13 @@ sleep 3000 sql connect +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + print ======== step1 sql create database d2 sql use d2 diff --git a/tests/script/windows/alter/table.sim b/tests/script/windows/alter/table.sim index 3b811a065e..03182e613d 100644 --- a/tests/script/windows/alter/table.sim +++ b/tests/script/windows/alter/table.sim @@ -1,6 +1,13 @@ sleep 3000 sql connect +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + print ======== step1 sql create database d1 sql use d1 diff --git a/tests/script/windows/compute/avg.sim b/tests/script/windows/compute/avg.sim index 1374ca5a25..b655abf163 100644 --- a/tests/script/windows/compute/avg.sim +++ b/tests/script/windows/compute/avg.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_av_db $tbPrefix = m_av_tb $mtPrefix = m_av_mt diff --git a/tests/script/windows/compute/bottom.sim b/tests/script/windows/compute/bottom.sim index e908c774e4..dc104a8ebd 100644 --- a/tests/script/windows/compute/bottom.sim +++ b/tests/script/windows/compute/bottom.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_bo_db $tbPrefix = m_bo_tb $mtPrefix = m_bo_mt diff --git a/tests/script/windows/compute/count.sim b/tests/script/windows/compute/count.sim index 54544f0354..9c9d8821b0 100644 --- a/tests/script/windows/compute/count.sim +++ b/tests/script/windows/compute/count.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_co_db $tbPrefix = m_co_tb $mtPrefix = m_co_mt diff --git a/tests/script/windows/compute/diff.sim b/tests/script/windows/compute/diff.sim index 6c2829872a..667fcdbcff 100644 --- a/tests/script/windows/compute/diff.sim +++ b/tests/script/windows/compute/diff.sim @@ -1,6 +1,13 @@ sleep 3000 sql connect +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_di_db $tbPrefix = m_di_tb $mtPrefix = m_di_mt diff --git a/tests/script/windows/compute/first.sim b/tests/script/windows/compute/first.sim index 9a0c02fe4b..d6e1b1deea 100644 --- a/tests/script/windows/compute/first.sim +++ b/tests/script/windows/compute/first.sim @@ -1,6 +1,13 @@ sleep 3000 sql connect +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_fi_db $tbPrefix = m_fi_tb $mtPrefix = m_fi_mt diff --git a/tests/script/windows/compute/interval.sim b/tests/script/windows/compute/interval.sim index 365c6d9d31..4bf548ccf2 100644 --- a/tests/script/windows/compute/interval.sim +++ b/tests/script/windows/compute/interval.sim @@ -1,6 +1,13 @@ sleep 3000 sql connect +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_in_db $tbPrefix = m_in_tb $mtPrefix = m_in_mt diff --git a/tests/script/windows/compute/last.sim b/tests/script/windows/compute/last.sim index aa9699778f..63d4d3ecbd 100644 --- a/tests/script/windows/compute/last.sim +++ b/tests/script/windows/compute/last.sim @@ -1,6 +1,13 @@ sleep 3000 sql connect +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_la_db $tbPrefix = m_la_tb $mtPrefix = m_la_mt diff --git a/tests/script/windows/compute/leastsquare.sim b/tests/script/windows/compute/leastsquare.sim index bb7404edd0..69c8fb377b 100644 --- a/tests/script/windows/compute/leastsquare.sim +++ b/tests/script/windows/compute/leastsquare.sim @@ -1,6 +1,13 @@ sleep 3000 sql connect +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_le_db $tbPrefix = m_le_tb $mtPrefix = m_le_mt @@ -62,12 +69,14 @@ if $data00 != @{slop:1.000000, intercept:1.000000}@ then endi print =============== step5 +print select leastsquares(tbcol, 1, 1) as b from $tb interval(1d) sql select leastsquares(tbcol, 1, 1) as b from $tb interval(1m) print ===> $data01 if $data01 != @{slop:1.000000, intercept:1.000000}@ then return -1 endi +print select leastsquares(tbcol, 1, 1) as b from $tb interval(1d) sql select leastsquares(tbcol, 1, 1) as b from $tb interval(1d) print ===> $data01 if $data01 != @{slop:1.000000, intercept:1.000000}@ then diff --git a/tests/script/windows/compute/max.sim b/tests/script/windows/compute/max.sim index a19d122ecd..e480736550 100644 --- a/tests/script/windows/compute/max.sim +++ b/tests/script/windows/compute/max.sim @@ -1,6 +1,13 @@ sleep 3000 sql connect +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_ma_db $tbPrefix = m_ma_tb $mtPrefix = m_ma_mt diff --git a/tests/script/windows/compute/min.sim b/tests/script/windows/compute/min.sim index 216f2061d7..1ff637cecd 100644 --- a/tests/script/windows/compute/min.sim +++ b/tests/script/windows/compute/min.sim @@ -1,6 +1,13 @@ sleep 3000 sql connect +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_mi_db $tbPrefix = m_mi_tb $mtPrefix = m_mi_mt diff --git a/tests/script/windows/compute/percentile.sim b/tests/script/windows/compute/percentile.sim index 20b2740d6e..5e327055a8 100644 --- a/tests/script/windows/compute/percentile.sim +++ b/tests/script/windows/compute/percentile.sim @@ -1,6 +1,13 @@ sleep 3000 sql connect +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_pe_db $tbPrefix = m_pe_tb $mtPrefix = m_pe_mt diff --git a/tests/script/windows/compute/stddev.sim b/tests/script/windows/compute/stddev.sim index c02b3e4ab3..2aa481248a 100644 --- a/tests/script/windows/compute/stddev.sim +++ b/tests/script/windows/compute/stddev.sim @@ -1,6 +1,13 @@ sleep 3000 sql connect +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_st_db $tbPrefix = m_st_tb $mtPrefix = m_st_mt diff --git a/tests/script/windows/compute/sum.sim b/tests/script/windows/compute/sum.sim index 04af1d457a..30e98a5b25 100644 --- a/tests/script/windows/compute/sum.sim +++ b/tests/script/windows/compute/sum.sim @@ -1,6 +1,13 @@ sleep 3000 sql connect +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_su_db $tbPrefix = m_su_tb $mtPrefix = m_su_mt diff --git a/tests/script/windows/compute/top.sim b/tests/script/windows/compute/top.sim index b3c698c064..9590997ef7 100644 --- a/tests/script/windows/compute/top.sim +++ b/tests/script/windows/compute/top.sim @@ -1,6 +1,13 @@ sleep 3000 sql connect +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_to_db $tbPrefix = m_to_tb $mtPrefix = m_to_mt diff --git a/tests/script/windows/db/basic.sim b/tests/script/windows/db/basic.sim index f1e18d15a5..fffde94d66 100644 --- a/tests/script/windows/db/basic.sim +++ b/tests/script/windows/db/basic.sim @@ -1,6 +1,13 @@ sleep 3000 sql connect +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + print ============================ dnode1 start $i = 0 diff --git a/tests/script/windows/db/len.sim b/tests/script/windows/db/len.sim index f922e7e05a..5afa2496dd 100644 --- a/tests/script/windows/db/len.sim +++ b/tests/script/windows/db/len.sim @@ -1,6 +1,13 @@ sleep 3000 sql connect +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + print =============== step1 sql_error drop database dd diff --git a/tests/script/windows/field/2.sim b/tests/script/windows/field/2.sim index 3d4492083e..8ac6fa1a1b 100644 --- a/tests/script/windows/field/2.sim +++ b/tests/script/windows/field/2.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = fi_bt_db $tbPrefix = fi_bt_tb $mtPrefix = fi_bt_mt diff --git a/tests/script/windows/field/3.sim b/tests/script/windows/field/3.sim index fb7d60d12a..331e930b31 100644 --- a/tests/script/windows/field/3.sim +++ b/tests/script/windows/field/3.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = fi_3_db $tbPrefix = fi_3_tb $mtPrefix = fi_3_mt diff --git a/tests/script/windows/field/4.sim b/tests/script/windows/field/4.sim index f7ffa9807c..c6224c46ee 100644 --- a/tests/script/windows/field/4.sim +++ b/tests/script/windows/field/4.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = fi_4_db $tbPrefix = fi_4_tb $mtPrefix = fi_4_mt diff --git a/tests/script/windows/field/5.sim b/tests/script/windows/field/5.sim index e408871693..d1f40059d0 100644 --- a/tests/script/windows/field/5.sim +++ b/tests/script/windows/field/5.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = fi_5_db $tbPrefix = fi_5_tb $mtPrefix = fi_5_mt diff --git a/tests/script/windows/field/6.sim b/tests/script/windows/field/6.sim index d1551d63b5..98071f87df 100644 --- a/tests/script/windows/field/6.sim +++ b/tests/script/windows/field/6.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = fi_6_db $tbPrefix = fi_6_tb $mtPrefix = fi_6_mt diff --git a/tests/script/windows/field/bigint.sim b/tests/script/windows/field/bigint.sim index 9ccaeb3723..bef571f445 100644 --- a/tests/script/windows/field/bigint.sim +++ b/tests/script/windows/field/bigint.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = fi_bi_db $tbPrefix = fi_bi_tb $mtPrefix = fi_bi_mt diff --git a/tests/script/windows/field/binary.sim b/tests/script/windows/field/binary.sim index 8b86c4dbea..72a356e684 100644 --- a/tests/script/windows/field/binary.sim +++ b/tests/script/windows/field/binary.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = fi_by_db $tbPrefix = fi_by_tb $mtPrefix = fi_by_mt diff --git a/tests/script/windows/field/bool.sim b/tests/script/windows/field/bool.sim index 5f2c61475c..abc970264d 100644 --- a/tests/script/windows/field/bool.sim +++ b/tests/script/windows/field/bool.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = fi_bo_db $tbPrefix = fi_bo_tb $mtPrefix = fi_bo_mt diff --git a/tests/script/windows/field/double.sim b/tests/script/windows/field/double.sim index ea7e075208..e805e0373b 100644 --- a/tests/script/windows/field/double.sim +++ b/tests/script/windows/field/double.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = fi_do_db $tbPrefix = fi_do_tb $mtPrefix = fi_do_mt diff --git a/tests/script/windows/field/float.sim b/tests/script/windows/field/float.sim index 5be59bae3b..4178ab4e1e 100644 --- a/tests/script/windows/field/float.sim +++ b/tests/script/windows/field/float.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = fi_fl_db $tbPrefix = fi_fl_tb $mtPrefix = fi_fl_mt diff --git a/tests/script/windows/field/int.sim b/tests/script/windows/field/int.sim index d7d26b7341..05dc19094d 100644 --- a/tests/script/windows/field/int.sim +++ b/tests/script/windows/field/int.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = fi_in_db $tbPrefix = fi_in_tb $mtPrefix = fi_in_mt diff --git a/tests/script/windows/field/single.sim b/tests/script/windows/field/single.sim index 0199133ecd..6422b7f697 100644 --- a/tests/script/windows/field/single.sim +++ b/tests/script/windows/field/single.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = fi_si_db $tbPrefix = fi_si_tb $mtPrefix = fi_si_mt diff --git a/tests/script/windows/field/smallint.sim b/tests/script/windows/field/smallint.sim index 8bee463292..8bf41f45a5 100644 --- a/tests/script/windows/field/smallint.sim +++ b/tests/script/windows/field/smallint.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = fi_sm_db $tbPrefix = fi_sm_tb $mtPrefix = fi_sm_mt diff --git a/tests/script/windows/field/tinyint.sim b/tests/script/windows/field/tinyint.sim index 65bffca095..16c19ba38d 100644 --- a/tests/script/windows/field/tinyint.sim +++ b/tests/script/windows/field/tinyint.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = fi_ti_db $tbPrefix = fi_ti_tb $mtPrefix = fi_ti_mt diff --git a/tests/script/windows/import/basic.sim b/tests/script/windows/import/basic.sim index c20378ee88..491b4f8b34 100644 --- a/tests/script/windows/import/basic.sim +++ b/tests/script/windows/import/basic.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + sql create database ibadb sql use ibadb sql create table tb(ts timestamp, i int) diff --git a/tests/script/windows/insert/basic.sim b/tests/script/windows/insert/basic.sim index be0980a2d4..54cbd3f4d9 100644 --- a/tests/script/windows/insert/basic.sim +++ b/tests/script/windows/insert/basic.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $i = 0 $dbPrefix = tb_in_db $tbPrefix = tb_in_tb diff --git a/tests/script/windows/insert/query_block1_file.sim b/tests/script/windows/insert/query_block1_file.sim index 3eb1d402e8..388ed061e5 100644 --- a/tests/script/windows/insert/query_block1_file.sim +++ b/tests/script/windows/insert/query_block1_file.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $i = 0 $dbPrefix = tb_1f_db $tbPrefix = tb_1f_tb @@ -24,7 +31,7 @@ $y = $N / 2 while $x > $y $ms = $x . m $xt = - . $x - sql insert into $tb values (now - $ms , - $x ) + sql insert into $tb values (now - $ms , $x ) $x = $x - 1 endw diff --git a/tests/script/windows/insert/query_block1_memory.sim b/tests/script/windows/insert/query_block1_memory.sim index 60d31e52d6..9e4fc68d09 100644 --- a/tests/script/windows/insert/query_block1_memory.sim +++ b/tests/script/windows/insert/query_block1_memory.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $i = 0 $dbPrefix = tb_1m_db $tbPrefix = tb_1m_tb diff --git a/tests/script/windows/insert/query_block2_file.sim b/tests/script/windows/insert/query_block2_file.sim index e9f562a538..9fd4434476 100644 --- a/tests/script/windows/insert/query_block2_file.sim +++ b/tests/script/windows/insert/query_block2_file.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $i = 0 $dbPrefix = tb_2f_db $tbPrefix = tb_2f_tb diff --git a/tests/script/windows/insert/query_block2_memory.sim b/tests/script/windows/insert/query_block2_memory.sim index fd173f1356..ede7f3efc6 100644 --- a/tests/script/windows/insert/query_block2_memory.sim +++ b/tests/script/windows/insert/query_block2_memory.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $i = 0 $dbPrefix = tb_2m_db $tbPrefix = tb_2m_tb diff --git a/tests/script/windows/insert/query_file_memory.sim b/tests/script/windows/insert/query_file_memory.sim index e9b0c69ea5..083beb4ac5 100644 --- a/tests/script/windows/insert/query_file_memory.sim +++ b/tests/script/windows/insert/query_file_memory.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $i = 0 $dbPrefix = tb_fm_db $tbPrefix = tb_fm_tb diff --git a/tests/script/windows/insert/query_multi_file.sim b/tests/script/windows/insert/query_multi_file.sim index 84c091fb21..465970f942 100644 --- a/tests/script/windows/insert/query_multi_file.sim +++ b/tests/script/windows/insert/query_multi_file.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $i = 0 $dbPrefix = tb_mf_db $tbPrefix = tb_mf_tb diff --git a/tests/script/windows/table/binary.sim b/tests/script/windows/table/binary.sim index 69354ed5c8..64a081c72f 100644 --- a/tests/script/windows/table/binary.sim +++ b/tests/script/windows/table/binary.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $i = 0 $dbPrefix = lm_bn_db $tbPrefix = lm_bn_tb diff --git a/tests/script/windows/table/bool.sim b/tests/script/windows/table/bool.sim index 9e434d801a..9486c42221 100644 --- a/tests/script/windows/table/bool.sim +++ b/tests/script/windows/table/bool.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $i = 0 $dbPrefix = lm_bo_db $tbPrefix = lm_bo_tb diff --git a/tests/script/windows/table/column_name.sim b/tests/script/windows/table/column_name.sim index bbac293fed..fffb1334e5 100644 --- a/tests/script/windows/table/column_name.sim +++ b/tests/script/windows/table/column_name.sim @@ -1,6 +1,14 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + + $i = 0 $dbPrefix = lm_cm_db $tbPrefix = lm_cm_tb diff --git a/tests/script/windows/table/column_num.sim b/tests/script/windows/table/column_num.sim index f7ead41437..d182696ce0 100644 --- a/tests/script/windows/table/column_num.sim +++ b/tests/script/windows/table/column_num.sim @@ -1,6 +1,14 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + + $i = 0 $dbPrefix = lm_cn_db $tbPrefix = lm_cn_tb diff --git a/tests/script/windows/table/column_value.sim b/tests/script/windows/table/column_value.sim index 9dbaf7ceab..c59e7af8ba 100644 --- a/tests/script/windows/table/column_value.sim +++ b/tests/script/windows/table/column_value.sim @@ -1,6 +1,14 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + + $i = 0 $dbPrefix = lm_cv_db $tbPrefix = lm_cv_tb diff --git a/tests/script/windows/table/db.table.sim b/tests/script/windows/table/db.table.sim index 8d244e011f..97a9e6fbe9 100644 --- a/tests/script/windows/table/db.table.sim +++ b/tests/script/windows/table/db.table.sim @@ -1,6 +1,14 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + + $i = 0 $dbPrefix = lm_dt_db $tbPrefix = lm_dt_tb diff --git a/tests/script/windows/table/double.sim b/tests/script/windows/table/double.sim index 1402982c98..93bf3bb149 100644 --- a/tests/script/windows/table/double.sim +++ b/tests/script/windows/table/double.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $i = 0 $dbPrefix = lm_do_db $tbPrefix = lm_do_tb diff --git a/tests/script/windows/table/float.sim b/tests/script/windows/table/float.sim index 57b626f865..684f78a386 100644 --- a/tests/script/windows/table/float.sim +++ b/tests/script/windows/table/float.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $i = 0 $dbPrefix = lm_fl_db $tbPrefix = lm_fl_tb diff --git a/tests/script/windows/table/table.sim b/tests/script/windows/table/table.sim index 55be8af851..985620152a 100644 --- a/tests/script/windows/table/table.sim +++ b/tests/script/windows/table/table.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ============================ dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $i = 0 $dbPrefix = ob_tb_db $tbPrefix = ob_tb_tb diff --git a/tests/script/windows/table/table_len.sim b/tests/script/windows/table/table_len.sim index cdd1f31731..367f1c6895 100644 --- a/tests/script/windows/table/table_len.sim +++ b/tests/script/windows/table/table_len.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $i = 0 $dbPrefix = lm_tb_db $tbPrefix = lm_tb_tb diff --git a/tests/script/windows/tag/3.sim b/tests/script/windows/tag/3.sim index 9ffa11b03f..63a8766727 100644 --- a/tests/script/windows/tag/3.sim +++ b/tests/script/windows/tag/3.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_3_db $tbPrefix = ta_3_tb $mtPrefix = ta_3_mt diff --git a/tests/script/windows/tag/4.sim b/tests/script/windows/tag/4.sim index beabe1fd8f..7e9af7ece7 100644 --- a/tests/script/windows/tag/4.sim +++ b/tests/script/windows/tag/4.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_4_db $tbPrefix = ta_4_tb $mtPrefix = ta_4_mt diff --git a/tests/script/windows/tag/5.sim b/tests/script/windows/tag/5.sim index 161d98756c..5dc128a0e0 100644 --- a/tests/script/windows/tag/5.sim +++ b/tests/script/windows/tag/5.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_5_db $tbPrefix = ta_5_tb $mtPrefix = ta_5_mt diff --git a/tests/script/windows/tag/6.sim b/tests/script/windows/tag/6.sim index b8666305bd..12e9c597f0 100644 --- a/tests/script/windows/tag/6.sim +++ b/tests/script/windows/tag/6.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_6_db $tbPrefix = ta_6_tb $mtPrefix = ta_6_mt diff --git a/tests/script/windows/tag/add.sim b/tests/script/windows/tag/add.sim index 2c72d01955..0a1416b68c 100644 --- a/tests/script/windows/tag/add.sim +++ b/tests/script/windows/tag/add.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_ad_db $tbPrefix = ta_ad_tb $mtPrefix = ta_ad_mt diff --git a/tests/script/windows/tag/bigint.sim b/tests/script/windows/tag/bigint.sim index 4406c7386d..d988ad1fdc 100644 --- a/tests/script/windows/tag/bigint.sim +++ b/tests/script/windows/tag/bigint.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_bi_db $tbPrefix = ta_bi_tb $mtPrefix = ta_bi_mt diff --git a/tests/script/windows/tag/binary.sim b/tests/script/windows/tag/binary.sim index deeae81117..9dc18cfa94 100644 --- a/tests/script/windows/tag/binary.sim +++ b/tests/script/windows/tag/binary.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_by_db $tbPrefix = ta_by_tb $mtPrefix = ta_by_mt diff --git a/tests/script/windows/tag/binary_binary.sim b/tests/script/windows/tag/binary_binary.sim index c1f93bc656..ba688aa80e 100644 --- a/tests/script/windows/tag/binary_binary.sim +++ b/tests/script/windows/tag/binary_binary.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_bib_db $tbPrefix = ta_bib_tb $mtPrefix = ta_bib_mt diff --git a/tests/script/windows/tag/bool.sim b/tests/script/windows/tag/bool.sim index 81ea20064b..a7e5d909c5 100644 --- a/tests/script/windows/tag/bool.sim +++ b/tests/script/windows/tag/bool.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_bo_db $tbPrefix = ta_bo_tb $mtPrefix = ta_bo_mt diff --git a/tests/script/windows/tag/bool_binary.sim b/tests/script/windows/tag/bool_binary.sim index c3daf2c242..639f6c5f2f 100644 --- a/tests/script/windows/tag/bool_binary.sim +++ b/tests/script/windows/tag/bool_binary.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_bob_db $tbPrefix = ta_bob_tb $mtPrefix = ta_bob_mt diff --git a/tests/script/windows/tag/bool_int.sim b/tests/script/windows/tag/bool_int.sim index 79e4b67bfa..900cc9e8a1 100644 --- a/tests/script/windows/tag/bool_int.sim +++ b/tests/script/windows/tag/bool_int.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_boi_db $tbPrefix = ta_boi_tb $mtPrefix = ta_boi_mt diff --git a/tests/script/windows/tag/change.sim b/tests/script/windows/tag/change.sim index 2901842190..75a976bbb1 100644 --- a/tests/script/windows/tag/change.sim +++ b/tests/script/windows/tag/change.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_ch_db $tbPrefix = ta_ch_tb $mtPrefix = ta_ch_mt diff --git a/tests/script/windows/tag/column.sim b/tests/script/windows/tag/column.sim index 131f3e06ea..9f5bfce07e 100644 --- a/tests/script/windows/tag/column.sim +++ b/tests/script/windows/tag/column.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_co_db $tbPrefix = ta_co_tb $mtPrefix = ta_co_mt diff --git a/tests/script/windows/tag/create.sim b/tests/script/windows/tag/create.sim index 5beba21727..6a76c93d83 100644 --- a/tests/script/windows/tag/create.sim +++ b/tests/script/windows/tag/create.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_cr_db $tbPrefix = ta_cr_tb $mtPrefix = ta_cr_mt diff --git a/tests/script/windows/tag/delete.sim b/tests/script/windows/tag/delete.sim index e2395c8f97..9e8ea9aba0 100644 --- a/tests/script/windows/tag/delete.sim +++ b/tests/script/windows/tag/delete.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_de_db $tbPrefix = ta_de_tb $mtPrefix = ta_de_mt diff --git a/tests/script/windows/tag/double.sim b/tests/script/windows/tag/double.sim index c08351a41b..5445b1dbea 100644 --- a/tests/script/windows/tag/double.sim +++ b/tests/script/windows/tag/double.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_do_db $tbPrefix = ta_do_tb $mtPrefix = ta_do_mt diff --git a/tests/script/windows/tag/filter.sim b/tests/script/windows/tag/filter.sim index b70e56fdb6..f704f32cd2 100644 --- a/tests/script/windows/tag/filter.sim +++ b/tests/script/windows/tag/filter.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_fi_db $tbPrefix = ta_fi_tb $mtPrefix = ta_fi_mt diff --git a/tests/script/windows/tag/float.sim b/tests/script/windows/tag/float.sim index 79eabb2cb4..64424c1e20 100644 --- a/tests/script/windows/tag/float.sim +++ b/tests/script/windows/tag/float.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_fl_db $tbPrefix = ta_fl_tb $mtPrefix = ta_fl_mt diff --git a/tests/script/windows/tag/int.sim b/tests/script/windows/tag/int.sim index d3921218fd..7d7b5271d1 100644 --- a/tests/script/windows/tag/int.sim +++ b/tests/script/windows/tag/int.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_in_db $tbPrefix = ta_in_tb $mtPrefix = ta_in_mt diff --git a/tests/script/windows/tag/int_binary.sim b/tests/script/windows/tag/int_binary.sim index 96f4f18966..1dd4771605 100644 --- a/tests/script/windows/tag/int_binary.sim +++ b/tests/script/windows/tag/int_binary.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_inb_db $tbPrefix = ta_inb_tb $mtPrefix = ta_inb_mt diff --git a/tests/script/windows/tag/int_float.sim b/tests/script/windows/tag/int_float.sim index 768e86b96d..cdb9032d8c 100644 --- a/tests/script/windows/tag/int_float.sim +++ b/tests/script/windows/tag/int_float.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_inf_db $tbPrefix = ta_inf_tb $mtPrefix = ta_inf_mt diff --git a/tests/script/windows/tag/set.sim b/tests/script/windows/tag/set.sim index 580f91cb49..16103c6ce8 100644 --- a/tests/script/windows/tag/set.sim +++ b/tests/script/windows/tag/set.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_se_db $tbPrefix = ta_se_tb $mtPrefix = ta_se_mt diff --git a/tests/script/windows/tag/smallint.sim b/tests/script/windows/tag/smallint.sim index 1b7ff0860b..dbab4f2d43 100644 --- a/tests/script/windows/tag/smallint.sim +++ b/tests/script/windows/tag/smallint.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_sm_db $tbPrefix = ta_sm_tb $mtPrefix = ta_sm_mt diff --git a/tests/script/windows/tag/tinyint.sim b/tests/script/windows/tag/tinyint.sim index 8fe957664c..7a0237c0d9 100644 --- a/tests/script/windows/tag/tinyint.sim +++ b/tests/script/windows/tag/tinyint.sim @@ -2,6 +2,13 @@ sql connect sleep 3000 print ======================== dnode1 start +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = ta_ti_db $tbPrefix = ta_ti_tb $mtPrefix = ta_ti_mt diff --git a/tests/script/windows/vector/metrics_field.sim b/tests/script/windows/vector/metrics_field.sim index 8e9a87239e..e7c926e141 100644 --- a/tests/script/windows/vector/metrics_field.sim +++ b/tests/script/windows/vector/metrics_field.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_mf_db $tbPrefix = m_mf_tb $mtPrefix = m_mf_mt diff --git a/tests/script/windows/vector/metrics_mix.sim b/tests/script/windows/vector/metrics_mix.sim index 0f960deb4d..3d94a96385 100644 --- a/tests/script/windows/vector/metrics_mix.sim +++ b/tests/script/windows/vector/metrics_mix.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_mx_db $tbPrefix = m_mx_tb $mtPrefix = m_mx_mt diff --git a/tests/script/windows/vector/metrics_query.sim b/tests/script/windows/vector/metrics_query.sim index a0df4c9b04..c292c6b306 100644 --- a/tests/script/windows/vector/metrics_query.sim +++ b/tests/script/windows/vector/metrics_query.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_mq_db $tbPrefix = m_mq_tb $mtPrefix = m_mq_mt diff --git a/tests/script/windows/vector/metrics_tag.sim b/tests/script/windows/vector/metrics_tag.sim index 22fd19bc89..f51a449d71 100644 --- a/tests/script/windows/vector/metrics_tag.sim +++ b/tests/script/windows/vector/metrics_tag.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_mtg_db $tbPrefix = m_mtg_tb $mtPrefix = m_mtg_mt diff --git a/tests/script/windows/vector/metrics_time.sim b/tests/script/windows/vector/metrics_time.sim index da102b64a3..716f49d1e5 100644 --- a/tests/script/windows/vector/metrics_time.sim +++ b/tests/script/windows/vector/metrics_time.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_mt_db $tbPrefix = m_mt_tb $mtPrefix = m_mt_mt diff --git a/tests/script/windows/vector/multi.sim b/tests/script/windows/vector/multi.sim index adcc94db3b..415384d243 100644 --- a/tests/script/windows/vector/multi.sim +++ b/tests/script/windows/vector/multi.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_mu_db $tbPrefix = m_mu_tb $mtPrefix = m_mu_mt diff --git a/tests/script/windows/vector/single.sim b/tests/script/windows/vector/single.sim index 61135fc6b5..f3f3862e54 100644 --- a/tests/script/windows/vector/single.sim +++ b/tests/script/windows/vector/single.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_si_db $tbPrefix = m_si_tb $mtPrefix = m_si_mt diff --git a/tests/script/windows/vector/table_field.sim b/tests/script/windows/vector/table_field.sim index ec50bc7a2a..0c5df695fb 100644 --- a/tests/script/windows/vector/table_field.sim +++ b/tests/script/windows/vector/table_field.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_tf_db $tbPrefix = m_tf_tb $mtPrefix = m_tf_mt diff --git a/tests/script/windows/vector/table_mix.sim b/tests/script/windows/vector/table_mix.sim index 653171b302..3d660b5611 100644 --- a/tests/script/windows/vector/table_mix.sim +++ b/tests/script/windows/vector/table_mix.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_tm_db $tbPrefix = m_tm_tb $mtPrefix = m_tm_mt diff --git a/tests/script/windows/vector/table_query.sim b/tests/script/windows/vector/table_query.sim index cdbd96f4d0..27fcd0f654 100644 --- a/tests/script/windows/vector/table_query.sim +++ b/tests/script/windows/vector/table_query.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_tq_db $tbPrefix = m_tq_tb $mtPrefix = m_tq_mt diff --git a/tests/script/windows/vector/table_time.sim b/tests/script/windows/vector/table_time.sim index 48bcf86dca..8a3804c619 100644 --- a/tests/script/windows/vector/table_time.sim +++ b/tests/script/windows/vector/table_time.sim @@ -1,6 +1,13 @@ sql connect sleep 3000 +sql show databases +sql drop database $data00 -x e1 +e1: +sql show databases +sql drop database $data00 -x e2 +e2: + $dbPrefix = m_tt_db $tbPrefix = m_tt_tb $mtPrefix = m_tt_mt diff --git a/tests/script/wtest.bat b/tests/script/wtest.bat new file mode 100644 index 0000000000..1574b5013e --- /dev/null +++ b/tests/script/wtest.bat @@ -0,0 +1,60 @@ +@echo off + +echo TDengine in windows +echo Start TDengine Testing Case ... + +set "SCRIPT_DIR=%~dp0" +echo SCRIPT_DIR: %SCRIPT_DIR% + +set "BUILD_DIR=%~dp0..\..\debug\build\bin" +set "TSIM=%~dp0..\..\debug\build\bin\tsim" +echo BUILD_DIR: %BUILD_DIR% + +set "SIM_DIR=%~dp0..\..\sim" +echo SIM_DIR: %SIM_DIR% + +set "TSIM_DIR=%~dp0..\..\sim\tsim" +echo TSIM_DIR: %TSIM_DIR% + +set "CFG_DIR=%~dp0..\..\sim\tsim\cfg" +echo CFG_DIR: %CFG_DIR% + +set "LOG_DIR=%~dp0..\..\sim\tsim\log" +echo LOG_DIR: %LOG_DIR% + +set "TAOS_CFG=%~dp0..\..\sim\tsim\cfg\taos.cfg" +echo TAOS_CFG: %TAOS_CFG% + +if not exist %SIM_DIR% mkdir %SIM_DIR% +if not exist %TSIM_DIR% mkdir %TSIM_DIR% +if exist %CFG_DIR% rmdir /s/q %CFG_DIR% +if exist %LOG_DIR% rmdir /s/q %LOG_DIR% +if not exist %CFG_DIR% mkdir %CFG_DIR% +if not exist %LOG_DIR% mkdir %LOG_DIR% + +echo firstEp %FIRSTEP% > %TAOS_CFG% +echo serverPort 6030 >> %TAOS_CFG% +echo wal 2 >> %TAOS_CFG% +echo asyncLog 0 >> %TAOS_CFG% +echo locale en_US.UTF-8 >> %TAOS_CFG% +echo logDir %LOG_DIR% >> %TAOS_CFG% +echo scriptDir %SCRIPT_DIR% >> %TAOS_CFG% +echo numOfLogLines 100000000 >> %TAOS_CFG% +echo tmrDebugFlag 131 >> %TAOS_CFG% +echo rpcDebugFlag 143 >> %TAOS_CFG% +echo cDebugFlag 143 >> %TAOS_CFG% +echo qdebugFlag 143 >> %TAOS_CFG% +echo udebugFlag 143 >> %TAOS_CFG% + +set "FILE_NAME=windows\testSuite.sim" +set "FIRSTEP=localhost" +if "%1" == "-f" set "FILE_NAME=%2" +if "%1" == "-h" set "FIRSTEP=%2" +if "%3" == "-f" set "FILE_NAME=%4" +if "%3" == "-h" set "FIRSTEP=%4" + +echo FILE_NAME: %FILE_NAME% +echo FIRSTEP: %FIRSTEP% +echo ExcuteCmd: %tsim% -c %CFG_DIR% -f %FILE_NAME% + +%tsim% -c %CFG_DIR% -f %FILE_NAME% \ No newline at end of file diff --git a/tests/tsim/src/simExe.c b/tests/tsim/src/simExe.c index 8bc9a76545..0529808b6b 100644 --- a/tests/tsim/src/simExe.c +++ b/tests/tsim/src/simExe.c @@ -748,11 +748,7 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) { sprintf(value, "%d", *((int *)row[i])); break; case TSDB_DATA_TYPE_BIGINT: -#ifdef _TD_ARM_32_ - sprintf(value, "%lld", *((int64_t *)row[i])); -#else - sprintf(value, "%ld", *((int64_t *)row[i])); -#endif + sprintf(value, "%" PRId64, *((int64_t *)row[i])); break; case TSDB_DATA_TYPE_FLOAT:{ #ifdef _TD_ARM_32_ @@ -783,10 +779,23 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) { break; case TSDB_DATA_TYPE_TIMESTAMP: tt = *(int64_t *)row[i] / 1000; + /* comment out as it make testcases like select_with_tags.sim fail. + but in windows, this may cause the call to localtime crash if tt < 0, + need to find a better solution. + if (tt < 0) { + tt = 0; + } + */ + +#ifdef WINDOWS + if (tt < 0) tt = 0; +#endif + tp = localtime(&tt); strftime(timeStr, 64, "%y-%m-%d %H:%M:%S", tp); sprintf(value, "%s.%03d", timeStr, - (int)(*((int64_t *)row[i]) % 1000)); + (int)(*((int64_t *)row[i]) % 1000)); + break; default: break;