diff --git a/README.md b/README.md
index 8f29adf89b..3fbd166f49 100644
--- a/README.md
+++ b/README.md
@@ -29,24 +29,69 @@ For user manual, system design and architecture, engineering blogs, refer to [TD
# Building
At the moment, TDengine only supports building and running on Linux systems. You can choose to [install from packages](https://www.taosdata.com/en/getting-started/#Install-from-Package) or from the source code. This quick guide is for installation from the source only.
-To build TDengine, use [CMake](https://cmake.org/) 2.8 or higher versions in the project directory. Install CMake for example on Ubuntu:
-```
-sudo apt-get install -y cmake build-essential
+To build TDengine, use [CMake](https://cmake.org/) 3.5 or higher versions in the project directory.
+
+## Install tools
+
+### Ubuntu & Debian:
+```bash
+sudo apt-get install -y gcc cmake build-essential git
```
To compile and package the JDBC driver source code, you should have a Java jdk-8 or higher and Apache Maven 2.7 or higher installed.
-To install openjdk-8 on Ubuntu:
-```
-sudo apt-get install openjdk-8-jdk
-```
-To install Apache Maven on Ubuntu:
-```
-sudo apt-get install maven
+To install openjdk-8:
+```bash
+sudo apt-get install -y openjdk-8-jdk
```
-Build TDengine:
-
+To install Apache Maven:
+```bash
+sudo apt-get install -y maven
```
+
+### Centos 7:
+```bash
+sudo yum install -y gcc gcc-c++ make cmake3 epel-release git
+sudo yum remove -y cmake
+sudo ln -s /usr/bin/cmake3 /usr/bin/cmake
+```
+
+To install openjdk-8:
+```bash
+sudo yum install -y java-1.8.0-openjdk
+```
+
+To install Apache Maven:
+```bash
+sudo yum install -y maven
+```
+
+### Centos 8 & Fedora:
+```bash
+sudo dnf install -y gcc gcc-c++ make cmake epel-release git
+```
+
+To install openjdk-8:
+```bash
+sudo dnf install -y java-1.8.0-openjdk
+```
+
+To install Apache Maven:
+```bash
+sudo dnf install -y maven
+```
+
+## Get the source codes
+
+- github:
+```bash
+git clone https://github.com/taosdata/TDengine.git
+cd TDengine
+```
+
+## Build TDengine
+
+```bash
mkdir debug && cd debug
cmake .. && cmake --build .
```
@@ -54,12 +99,12 @@ cmake .. && cmake --build .
To compile on an ARM processor (aarch64 or aarch32), please add option CPUTYPE as below:
aarch64:
-```cmd
+```bash
cmake .. -DCPUTYPE=aarch64 && cmake --build .
```
aarch32:
-```cmd
+```bash
cmake .. -DCPUTYPE=aarch32 && cmake --build .
```
@@ -124,6 +169,7 @@ The TDengine community has also kindly built some of their own connectors! Follo
- [Rust Connector](https://github.com/taosdata/TDengine/tree/master/tests/examples/rust)
- [.Net Core Connector](https://github.com/maikebing/Maikebing.EntityFrameworkCore.Taos)
+- [Lua Connector](https://github.com/taosdata/TDengine/tree/develop/tests/examples/lua)
# How to run the test cases and how to add a new test case?
TDengine's test framework and all test cases are fully open source.
diff --git a/cmake/version.inc b/cmake/version.inc
index c620d753a6..52d62fca65 100644
--- a/cmake/version.inc
+++ b/cmake/version.inc
@@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
- SET(TD_VER_NUMBER "2.0.2.0")
+ SET(TD_VER_NUMBER "2.0.3.0")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
diff --git a/documentation20/webdocs/markdowndocs/Documentation-ch.md b/documentation20/webdocs/markdowndocs/Documentation-ch.md
index 12b1b1bd3a..4d593cec90 100644
--- a/documentation20/webdocs/markdowndocs/Documentation-ch.md
+++ b/documentation20/webdocs/markdowndocs/Documentation-ch.md
@@ -52,7 +52,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
## [连接器](https://www.taosdata.com/cn/documentation20/connector)
- [C/C++ Connector](https://www.taosdata.com/cn/documentation20/connector/#C/C++-Connector):通过libtaos客户端的库,连接TDengine服务器的主要方法
-- [Java Connector(JDBC)](https://www.taosdata.com/cn/documentation20/connector/#Java-Connector):通过标准的JDBC API,给Java应用提供到TDengine的连接
+- [Java Connector(JDBC)](https://www.taosdata.com/cn/documentation20/connector-java):通过标准的JDBC API,给Java应用提供到TDengine的连接
- [Python Connector](https://www.taosdata.com/cn/documentation20/connector/#Python-Connector):给Python应用提供一个连接TDengine服务器的驱动
- [RESTful Connector](https://www.taosdata.com/cn/documentation20/connector/#RESTful-Connector):提供一最简单的连接TDengine服务器的方式
- [Go Connector](https://www.taosdata.com/cn/documentation20/connector/#Go-Connector):给Go应用提供一个连接TDengine服务器的驱动
diff --git a/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md b/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md
index 293aac8d23..b0f8ed276d 100644
--- a/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md
+++ b/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md
@@ -42,7 +42,7 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
| | 类型 | Bytes | 说明 |
| ---- | :-------: | ------ | ------------------------------------------------------------ |
-| 1 | TIMESTAMP | 8 | 时间戳。最小精度毫秒。从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始,计时不能早于该时间。 |
+| 1 | TIMESTAMP | 8 | 时间戳。缺省精度毫秒,可支持微秒。从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始,计时不能早于该时间。 |
| 2 | INT | 4 | 整型,范围 [-2^31+1, 2^31-1], -2^31用作Null |
| 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63用于NULL |
| 4 | FLOAT | 4 | 浮点型,有效位数6-7,范围 [-3.4E38, 3.4E38] |
@@ -416,7 +416,7 @@ taos> SELECT database();
power |
Query OK, 1 row(s) in set (0.000079s)
```
-如果登录的时候没有指定默认数据库,且没有使用```use``命令切换数据,则返回NULL。
+如果登录的时候没有指定默认数据库,且没有使用```use```命令切换数据,则返回NULL。
```
taos> SELECT database();
database() |
@@ -503,10 +503,10 @@ Query OK, 1 row(s) in set (0.001091s)
| % | match with any char sequences | **`binary`** **`nchar`** |
| _ | match with a single char | **`binary`** **`nchar`** |
-1. 同时进行多个字段的范围过滤需要使用关键词AND进行连接不同的查询条件,暂不支持OR连接的查询条件。
-2. 针对某一字段的过滤只支持单一区间的过滤条件。例如:value>20 and value<30是合法的过滤条件, 而Value<20 AND value<>5是非法的过滤条件。
+1. 同时进行多个字段的范围过滤需要使用关键词AND进行连接不同的查询条件,暂不支持OR连接的不同列之间的查询过滤条件。
+2. 针对某一字段的过滤只支持单一时间区间过滤条件。但是针对其他的(普通)列或标签列,可以使用``` OR``` 条件进行组合条件的查询过滤。例如:((value > 20 and value < 30) OR (value < 12)) 。
-### Some Examples
+### SQL 示例
- 对于下面的例子,表tb1用以下语句创建
@@ -538,7 +538,7 @@ Query OK, 1 row(s) in set (0.001091s)
SELECT COUNT(*) FROM tb1 WHERE ts >= NOW - 10m AND col2 > 3.14 >> /home/testoutpu.csv;
```
-## SQL函数
+## SQL 函数
### 聚合函数
diff --git a/documentation20/webdocs/markdowndocs/architecture-ch.md b/documentation20/webdocs/markdowndocs/architecture-ch.md
index 7ab4b5d096..a279875649 100644
--- a/documentation20/webdocs/markdowndocs/architecture-ch.md
+++ b/documentation20/webdocs/markdowndocs/architecture-ch.md
@@ -162,7 +162,7 @@ Master Vnode遵循下面的写入流程:
图 3 TDengine Master写入流程
1. Master vnode收到应用的数据插入请求,验证OK,进入下一步;
-2. 如果系统配置参数walLevel打开(设置为2),vnode将把该请求的原始数据包写入数据库日志文件WAL,以保证TDengine能够在断电等因素导致的服务重启时从数据库日志文件中恢复数据,避免数据的丢失;
+2. 如果系统配置参数walLevel大于0,vnode将把该请求的原始数据包写入数据库日志文件WAL。如果walLevel设置为2,而且fsync设置为0,TDengine还将WAL数据立即落盘,以保证即使宕机,也能从数据库日志文件中恢复数据,避免数据的丢失;
3. 如果有多个副本,vnode将把数据包转发给同一虚拟节点组内slave vnodes, 该转发包带有数据的版本号(version);
4. 写入内存,并加记录加入到skip list;
5. Master vnode返回确认信息给应用,表示写入成功。
@@ -174,7 +174,7 @@ Master Vnode遵循下面的写入流程:
图 4 TDengine Slave写入流程
1. Slave vnode收到Master vnode转发了的数据插入请求。
-2. 如果系统配置参数walLevl设置为2,vnode将把该请求的原始数据包写入日志(WAL);
+2. 如果系统配置参数walLevel大于0,vnode将把该请求的原始数据包写入数据库日志文件WAL。如果walLevel设置为2,而且fsync设置为0,TDengine还将WAL数据立即落盘,以保证即使宕机,也能从数据库日志文件中恢复数据,避免数据的丢失;
3. 写入内存,更新内存中的skip list。
与Master vnode相比,slave vnode不存在转发环节,也不存在回复确认环节,少了两步。但写内存与WAL是完全一样的。
diff --git a/documentation20/webdocs/markdowndocs/connector-ch.md b/documentation20/webdocs/markdowndocs/connector-ch.md
index 6b22004c43..7b6afb75a7 100644
--- a/documentation20/webdocs/markdowndocs/connector-ch.md
+++ b/documentation20/webdocs/markdowndocs/connector-ch.md
@@ -280,365 +280,10 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时
取消订阅。 如参数 `keepProgress` 不为0,API会保留订阅的进度信息,后续调用 `taos_subscribe` 时可以基于此进度继续;否则将删除进度信息,后续只能重新开始读取数据。
-## Java Connector
-
-TDengine 为了方便 Java 应用使用,提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现。目前可以通过 [Sonatype Repository][1] 搜索并下载。
-
-由于 TDengine 是使用 c 语言开发的,使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。
-
-* libtaos.so
- 在 linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so,该目录包含在 Linux 自动扫描路径上,无需单独指定。
-
-* taos.dll
- 在 windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
-
-> 注意:在 windows 环境开发时需要安装 TDengine 对应的 [windows 客户端][14],Linux 服务器安装完 TDengine 之后默认已安装 client,也可以单独安装 [Linux 客户端][15] 连接远程 TDengine Server。
-
-TDengine 的 JDBC 驱动实现尽可能的与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征的差异导致 taos-jdbcdriver 并未完全实现 JDBC 标准规范。在使用时需要注意以下几点:
-
-* TDengine 不提供针对单条数据记录的删除和修改的操作,驱动中也没有支持相关方法。
-* 由于不支持删除和修改,所以也不支持事务操作。
-* 目前不支持表间的 union 操作。
-* 目前不支持嵌套查询(nested query),对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet还没关闭的情况下执行了新的查询,TSDBJDBCDriver 则会自动关闭上一个 ResultSet。
-
-
-## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
-
-| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
-| --- | --- | --- |
-| 2.0.2 | 2.0.0.x 及以上 | 1.8.x |
-| 1.0.3 | 1.6.1.x 及以上 | 1.8.x |
-| 1.0.2 | 1.6.1.x 及以上 | 1.8.x |
-| 1.0.1 | 1.6.1.x 及以上 | 1.8.x |
-
-## TDengine DataType 和 Java DataType
-
-TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下:
-
-| TDengine DataType | Java DataType |
-| --- | --- |
-| TIMESTAMP | java.sql.Timestamp |
-| INT | java.lang.Integer |
-| BIGINT | java.lang.Long |
-| FLOAT | java.lang.Float |
-| DOUBLE | java.lang.Double |
-| SMALLINT, TINYINT |java.lang.Short |
-| BOOL | java.lang.Boolean |
-| BINARY, NCHAR | java.lang.String |
-
-## 如何获取 TAOS-JDBCDriver
-
-### maven 仓库
-
-目前 taos-jdbcdriver 已经发布到 [Sonatype Repository][1] 仓库,且各大仓库都已同步。
-* [sonatype][8]
-* [mvnrepository][9]
-* [maven.aliyun][10]
-
-maven 项目中使用如下 pom.xml 配置即可:
-
-```xml
-
- com.taosdata.jdbc
- taos-jdbcdriver
- 2.0.2
-
-```
-
-### 源码编译打包
-
-下载 [TDengine][3] 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package` 即可生成相应 jar 包。
-
-
-## 使用说明
-
-### 获取连接
-
-如下所示配置即可获取 TDengine Connection:
-```java
-Class.forName("com.taosdata.jdbc.TSDBDriver");
-String jdbcUrl = "jdbc:TAOS://127.0.0.1:6030/log?user=root&password=taosdata";
-Connection conn = DriverManager.getConnection(jdbcUrl);
-```
-> 端口 6030 为默认连接端口,JDBC URL 中的 log 为系统本身的监控数据库。
-
-TDengine 的 JDBC URL 规范格式为:
-`jdbc:TAOS://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]`
-
-其中,`{}` 中的内容必须,`[]` 中为可选。配置参数说明如下:
-
-* user:登录 TDengine 用户名,默认值 root。
-* password:用户登录密码,默认值 taosdata。
-* charset:客户端使用的字符集,默认值为系统字符集。
-* cfgdir:客户端配置文件目录路径,Linux OS 上默认值 /etc/taos ,Windows OS 上默认值 C:/TDengine/cfg。
-* locale:客户端语言环境,默认值系统当前 locale。
-* timezone:客户端使用的时区,默认值为系统当前时区。
-
-以上参数可以在 3 处配置,`优先级由高到低`分别如下:
-1. JDBC URL 参数
- 如上所述,可以在 JDBC URL 的参数中指定。
-2. java.sql.DriverManager.getConnection(String jdbcUrl, Properties connProps)
-```java
-public Connection getConn() throws Exception{
- Class.forName("com.taosdata.jdbc.TSDBDriver");
- String jdbcUrl = "jdbc:TAOS://127.0.0.1:0/log?user=root&password=taosdata";
- Properties connProps = new Properties();
- connProps.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
- connProps.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
- connProps.setProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR, "/etc/taos");
- connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
- connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
- connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
- Connection conn = DriverManager.getConnection(jdbcUrl, connProps);
- return conn;
-}
-```
-
-3. 客户端配置文件 taos.cfg
-
- linux 系统默认配置文件为 /var/lib/taos/taos.cfg,windows 系统默认配置文件路径为 C:\TDengine\cfg\taos.cfg。
-```properties
-# client default username
-# defaultUser root
-
-# client default password
-# defaultPass taosdata
-
-# default system charset
-# charset UTF-8
-
-# system locale
-# locale en_US.UTF-8
-```
-> 更多详细配置请参考[客户端配置][13]
-
-### 创建数据库和表
-
-```java
-Statement stmt = conn.createStatement();
-
-// create database
-stmt.executeUpdate("create database if not exists db");
-
-// use database
-stmt.executeUpdate("use db");
-
-// create table
-stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)");
-```
-> 注意:如果不使用 `use db` 指定数据库,则后续对表的操作都需要增加数据库名称作为前缀,如 db.tb。
-
-### 插入数据
-
-```java
-// insert data
-int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now + 1s, 20, 9.3)");
-
-System.out.println("insert " + affectedRows + " rows.");
-```
-> now 为系统内部函数,默认为服务器当前时间。
-> `now + 1s` 代表服务器当前时间往后加 1 秒,数字后面代表时间单位:a(毫秒), s(秒), m(分), h(小时), d(天),w(周), n(月), y(年)。
-
-### 查询数据
-
-```java
-// query data
-ResultSet resultSet = stmt.executeQuery("select * from tb");
-
-Timestamp ts = null;
-int temperature = 0;
-float humidity = 0;
-while(resultSet.next()){
-
- ts = resultSet.getTimestamp(1);
- temperature = resultSet.getInt(2);
- humidity = resultSet.getFloat("humidity");
-
- System.out.printf("%s, %d, %s\n", ts, temperature, humidity);
-}
-```
-> 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。
-
-
-### 订阅
-
-#### 创建
-
-```java
-TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from meters", false);
-```
-
-`subscribe` 方法的三个参数含义如下:
-
-* topic:订阅的主题(即名称),此参数是订阅的唯一标识
-* sql:订阅的查询语句,此语句只能是 `select` 语句,只应查询原始数据,只能按时间正序查询数据
-* restart:如果订阅已经存在,是重新开始,还是继续之前的订阅
-
-如上面的例子将使用 SQL 语句 `select * from meters` 创建一个名为 `topic' 的订阅,如果这个订阅已经存在,将继续之前的查询进度,而不是从头开始消费所有的数据。
-
-#### 消费数据
-
-```java
-int total = 0;
-while(true) {
- TSDBResultSet rs = sub.consume();
- int count = 0;
- while(rs.next()) {
- count++;
- }
- total += count;
- System.out.printf("%d rows consumed, total %d\n", count, total);
- Thread.sleep(1000);
-}
-```
-
-`consume` 方法返回一个结果集,其中包含从上次 `consume` 到目前为止的所有新数据。请务必按需选择合理的调用 `consume` 的频率(如例子中的`Thread.sleep(1000)`),否则会给服务端造成不必要的压力。
-
-#### 关闭订阅
-
-```java
-sub.close(true);
-```
-
-`close` 方法关闭一个订阅。如果其参数为 `true` 表示保留订阅进度信息,后续可以创建同名订阅继续消费数据;如为 `false` 则不保留订阅进度。
-
-
-### 关闭资源
-
-```java
-resultSet.close();
-stmt.close();
-conn.close();
-```
-> `注意务必要将 connection 进行关闭`,否则会出现连接泄露。
-## 与连接池使用
-
-**HikariCP**
-
-* 引入相应 HikariCP maven 依赖:
-```xml
-
- com.zaxxer
- HikariCP
- 3.4.1
-
-```
-
-* 使用示例如下:
-```java
- public static void main(String[] args) throws SQLException {
- HikariConfig config = new HikariConfig();
- config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log");
- config.setUsername("root");
- config.setPassword("taosdata");
-
- config.setMinimumIdle(3); //minimum number of idle connection
- config.setMaximumPoolSize(10); //maximum number of connection in the pool
- config.setConnectionTimeout(10000); //maximum wait milliseconds for get connection from pool
- config.setIdleTimeout(60000); // max idle time for recycle idle connection
- config.setConnectionTestQuery("describe log.dn"); //validation query
- config.setValidationTimeout(3000); //validation query timeout
-
- HikariDataSource ds = new HikariDataSource(config); //create datasource
-
- Connection connection = ds.getConnection(); // get connection
- Statement statement = connection.createStatement(); // get statement
-
- //query or insert
- // ...
-
- connection.close(); // put back to conneciton pool
-}
-```
-> 通过 HikariDataSource.getConnection() 获取连接后,使用完成后需要调用 close() 方法,实际上它并不会关闭连接,只是放回连接池中。
-> 更多 HikariCP 使用问题请查看[官方说明][5]
-
-**Druid**
-
-* 引入相应 Druid maven 依赖:
-
-```xml
-
- com.alibaba
- druid
- 1.1.20
-
-```
-
-* 使用示例如下:
-```java
-public static void main(String[] args) throws Exception {
- Properties properties = new Properties();
- properties.put("driverClassName","com.taosdata.jdbc.TSDBDriver");
- properties.put("url","jdbc:TAOS://127.0.0.1:6030/log");
- properties.put("username","root");
- properties.put("password","taosdata");
-
- properties.put("maxActive","10"); //maximum number of connection in the pool
- properties.put("initialSize","3");//initial number of connection
- properties.put("maxWait","10000");//maximum wait milliseconds for get connection from pool
- properties.put("minIdle","3");//minimum number of connection in the pool
-
- properties.put("timeBetweenEvictionRunsMillis","3000");// the interval milliseconds to test connection
-
- properties.put("minEvictableIdleTimeMillis","60000");//the minimum milliseconds to keep idle
- properties.put("maxEvictableIdleTimeMillis","90000");//the maximum milliseconds to keep idle
-
- properties.put("validationQuery","describe log.dn"); //validation query
- properties.put("testWhileIdle","true"); // test connection while idle
- properties.put("testOnBorrow","false"); // don't need while testWhileIdle is true
- properties.put("testOnReturn","false"); // don't need while testWhileIdle is true
-
- //create druid datasource
- DataSource ds = DruidDataSourceFactory.createDataSource(properties);
- Connection connection = ds.getConnection(); // get connection
- Statement statement = connection.createStatement(); // get statement
-
- //query or insert
- // ...
-
- connection.close(); // put back to conneciton pool
-}
-```
-> 更多 druid 使用问题请查看[官方说明][6]
-
-**注意事项**
-* TDengine `v1.6.4.1` 版本开始提供了一个专门用于心跳检测的函数 `select server_status()`,所以在使用连接池时推荐使用 `select server_status()` 进行 Validation Query。
-
-如下所示,`select server_status()` 执行成功会返回 `1`。
-```shell
-taos> select server_status();
-server_status()|
-================
-1 |
-Query OK, 1 row(s) in set (0.000141s)
-```
-
-## 与框架使用
-
-* Spring JdbcTemplate 中使用 taos-jdbcdriver,可参考 [SpringJdbcTemplate][11]
-* Springboot + Mybatis 中使用,可参考 [springbootdemo][12]
-
-## 常见问题
-
-* java.lang.UnsatisfiedLinkError: no taos in java.library.path
-
- **原因**:程序没有找到依赖的本地函数库 taos。
-
- **解决方法**:windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下,linux 下将建立如下软链 ` ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。
-
-* java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform
-
- **原因**:目前 TDengine 只支持 64 位 JDK。
-
- **解决方法**:重新安装 64 位 JDK。
-
-* 其它问题请参考 [Issues][7]
-
## Python Connector
### 安装准备
-* 已安装TDengine, 如果客户端在Windows上,需要安装Windows 版本的TDengine客户端 [(Windows TDengine 客户端安装)](https://www.taosdata.com/cn/documentation/connector/#Windows客户端及程序接口)
+* 已安装TDengine, 如果客户端在Windows上,需要安装Windows 版本的TDengine客户端 [(Windows TDengine 客户端安装)][4]
* 已安装python 2.7 or >= 3.4
* 已安装pip
@@ -1137,18 +782,5 @@ promise2.then(function(result) {
[这里](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example-raw.js)同样是一个使用NodeJS 连接器建表,插入天气数据并查询插入的数据的代码示例,但和上面不同的是,该示例只使用`cursor`.
-[1]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver
-[2]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver
-[3]: https://github.com/taosdata/TDengine
-[4]: https://www.taosdata.com/blog/2019/12/03/jdbcdriver%e6%89%be%e4%b8%8d%e5%88%b0%e5%8a%a8%e6%80%81%e9%93%be%e6%8e%a5%e5%ba%93/
-[5]: https://github.com/brettwooldridge/HikariCP
-[6]: https://github.com/alibaba/druid
-[7]: https://github.com/taosdata/TDengine/issues
-[8]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver
-[9]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver
-[10]: https://maven.aliyun.com/mvn/search
-[11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate
-[12]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo
-[13]: https://www.taosdata.com/cn/documentation20/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE
-[14]: https://www.taosdata.com/cn/documentation20/connector/#Windows
-[15]: https://www.taosdata.com/cn/getting-started/#%E5%BF%AB%E9%80%9F%E4%B8%8A%E6%89%8B
+[4]: https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client
+
diff --git a/documentation20/webdocs/markdowndocs/connector-java-ch.md b/documentation20/webdocs/markdowndocs/connector-java-ch.md
new file mode 100644
index 0000000000..da5ea52966
--- /dev/null
+++ b/documentation20/webdocs/markdowndocs/connector-java-ch.md
@@ -0,0 +1,370 @@
+# Java Connector
+
+TDengine 为了方便 Java 应用使用,提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现。目前可以通过 [Sonatype Repository][1] 搜索并下载。
+
+由于 TDengine 是使用 c 语言开发的,使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。
+
+* libtaos.so
+ 在 linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so,该目录包含在 Linux 自动扫描路径上,无需单独指定。
+
+* taos.dll
+ 在 windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
+
+> 注意:在 windows 环境开发时需要安装 TDengine 对应的 [windows 客户端][14],Linux 服务器安装完 TDengine 之后默认已安装 client,也可以单独安装 [Linux 客户端][15] 连接远程 TDengine Server。
+
+TDengine 的 JDBC 驱动实现尽可能的与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征的差异导致 taos-jdbcdriver 并未完全实现 JDBC 标准规范。在使用时需要注意以下几点:
+
+* TDengine 不提供针对单条数据记录的删除和修改的操作,驱动中也没有支持相关方法。
+* 由于不支持删除和修改,所以也不支持事务操作。
+* 目前不支持表间的 union 操作。
+* 目前不支持嵌套查询(nested query),对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet还没关闭的情况下执行了新的查询,TSDBJDBCDriver 则会自动关闭上一个 ResultSet。
+
+
+## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
+
+| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
+| --- | --- | --- |
+| 2.0.4 | 2.0.0.x 及以上 | 1.8.x |
+| 1.0.3 | 1.6.1.x 及以上 | 1.8.x |
+| 1.0.2 | 1.6.1.x 及以上 | 1.8.x |
+| 1.0.1 | 1.6.1.x 及以上 | 1.8.x |
+
+## TDengine DataType 和 Java DataType
+
+TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下:
+
+| TDengine DataType | Java DataType |
+| --- | --- |
+| TIMESTAMP | java.sql.Timestamp |
+| INT | java.lang.Integer |
+| BIGINT | java.lang.Long |
+| FLOAT | java.lang.Float |
+| DOUBLE | java.lang.Double |
+| SMALLINT, TINYINT |java.lang.Short |
+| BOOL | java.lang.Boolean |
+| BINARY, NCHAR | java.lang.String |
+
+## 如何获取 TAOS-JDBCDriver
+
+### maven 仓库
+
+目前 taos-jdbcdriver 已经发布到 [Sonatype Repository][1] 仓库,且各大仓库都已同步。
+* [sonatype][8]
+* [mvnrepository][9]
+* [maven.aliyun][10]
+
+maven 项目中使用如下 pom.xml 配置即可:
+
+```xml
+
+ com.taosdata.jdbc
+ taos-jdbcdriver
+ 2.0.4
+
+```
+
+### 源码编译打包
+
+下载 [TDengine][3] 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package` 即可生成相应 jar 包。
+
+
+## 使用说明
+
+### 获取连接
+
+如下所示配置即可获取 TDengine Connection:
+```java
+Class.forName("com.taosdata.jdbc.TSDBDriver");
+String jdbcUrl = "jdbc:TAOS://127.0.0.1:6030/log?user=root&password=taosdata";
+Connection conn = DriverManager.getConnection(jdbcUrl);
+```
+> 端口 6030 为默认连接端口,JDBC URL 中的 log 为系统本身的监控数据库。
+
+TDengine 的 JDBC URL 规范格式为:
+`jdbc:TAOS://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]`
+
+其中,`{}` 中的内容必须,`[]` 中为可选。配置参数说明如下:
+
+* user:登录 TDengine 用户名,默认值 root。
+* password:用户登录密码,默认值 taosdata。
+* charset:客户端使用的字符集,默认值为系统字符集。
+* cfgdir:客户端配置文件目录路径,Linux OS 上默认值 /etc/taos ,Windows OS 上默认值 C:/TDengine/cfg。
+* locale:客户端语言环境,默认值系统当前 locale。
+* timezone:客户端使用的时区,默认值为系统当前时区。
+
+以上参数可以在 3 处配置,`优先级由高到低`分别如下:
+1. JDBC URL 参数
+ 如上所述,可以在 JDBC URL 的参数中指定。
+2. java.sql.DriverManager.getConnection(String jdbcUrl, Properties connProps)
+```java
+public Connection getConn() throws Exception{
+ Class.forName("com.taosdata.jdbc.TSDBDriver");
+ String jdbcUrl = "jdbc:TAOS://127.0.0.1:0/log?user=root&password=taosdata";
+ Properties connProps = new Properties();
+ connProps.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
+ connProps.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
+ connProps.setProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR, "/etc/taos");
+ connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
+ connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
+ connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
+ Connection conn = DriverManager.getConnection(jdbcUrl, connProps);
+ return conn;
+}
+```
+
+3. 客户端配置文件 taos.cfg
+
+ linux 系统默认配置文件为 /var/lib/taos/taos.cfg,windows 系统默认配置文件路径为 C:\TDengine\cfg\taos.cfg。
+```properties
+# client default username
+# defaultUser root
+
+# client default password
+# defaultPass taosdata
+
+# default system charset
+# charset UTF-8
+
+# system locale
+# locale en_US.UTF-8
+```
+> 更多详细配置请参考[客户端配置][13]
+
+### 创建数据库和表
+
+```java
+Statement stmt = conn.createStatement();
+
+// create database
+stmt.executeUpdate("create database if not exists db");
+
+// use database
+stmt.executeUpdate("use db");
+
+// create table
+stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)");
+```
+> 注意:如果不使用 `use db` 指定数据库,则后续对表的操作都需要增加数据库名称作为前缀,如 db.tb。
+
+### 插入数据
+
+```java
+// insert data
+int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now + 1s, 20, 9.3)");
+
+System.out.println("insert " + affectedRows + " rows.");
+```
+> now 为系统内部函数,默认为服务器当前时间。
+> `now + 1s` 代表服务器当前时间往后加 1 秒,数字后面代表时间单位:a(毫秒), s(秒), m(分), h(小时), d(天),w(周), n(月), y(年)。
+
+### 查询数据
+
+```java
+// query data
+ResultSet resultSet = stmt.executeQuery("select * from tb");
+
+Timestamp ts = null;
+int temperature = 0;
+float humidity = 0;
+while(resultSet.next()){
+
+ ts = resultSet.getTimestamp(1);
+ temperature = resultSet.getInt(2);
+ humidity = resultSet.getFloat("humidity");
+
+ System.out.printf("%s, %d, %s\n", ts, temperature, humidity);
+}
+```
+> 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。
+
+
+### 订阅
+
+#### 创建
+
+```java
+TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from meters", false);
+```
+
+`subscribe` 方法的三个参数含义如下:
+
+* topic:订阅的主题(即名称),此参数是订阅的唯一标识
+* sql:订阅的查询语句,此语句只能是 `select` 语句,只应查询原始数据,只能按时间正序查询数据
+* restart:如果订阅已经存在,是重新开始,还是继续之前的订阅
+
+如上面的例子将使用 SQL 语句 `select * from meters` 创建一个名为 `topic' 的订阅,如果这个订阅已经存在,将继续之前的查询进度,而不是从头开始消费所有的数据。
+
+#### 消费数据
+
+```java
+int total = 0;
+while(true) {
+ TSDBResultSet rs = sub.consume();
+ int count = 0;
+ while(rs.next()) {
+ count++;
+ }
+ total += count;
+ System.out.printf("%d rows consumed, total %d\n", count, total);
+ Thread.sleep(1000);
+}
+```
+
+`consume` 方法返回一个结果集,其中包含从上次 `consume` 到目前为止的所有新数据。请务必按需选择合理的调用 `consume` 的频率(如例子中的`Thread.sleep(1000)`),否则会给服务端造成不必要的压力。
+
+#### 关闭订阅
+
+```java
+sub.close(true);
+```
+
+`close` 方法关闭一个订阅。如果其参数为 `true` 表示保留订阅进度信息,后续可以创建同名订阅继续消费数据;如为 `false` 则不保留订阅进度。
+
+
+### 关闭资源
+
+```java
+resultSet.close();
+stmt.close();
+conn.close();
+```
+> `注意务必要将 connection 进行关闭`,否则会出现连接泄露。
+## 与连接池使用
+
+**HikariCP**
+
+* 引入相应 HikariCP maven 依赖:
+```xml
+
+ com.zaxxer
+ HikariCP
+ 3.4.1
+
+```
+
+* 使用示例如下:
+```java
+ public static void main(String[] args) throws SQLException {
+ HikariConfig config = new HikariConfig();
+ config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log");
+ config.setUsername("root");
+ config.setPassword("taosdata");
+
+ config.setMinimumIdle(3); //minimum number of idle connection
+ config.setMaximumPoolSize(10); //maximum number of connection in the pool
+ config.setConnectionTimeout(10000); //maximum wait milliseconds for get connection from pool
+ config.setIdleTimeout(60000); // max idle time for recycle idle connection
+ config.setConnectionTestQuery("describe log.dn"); //validation query
+ config.setValidationTimeout(3000); //validation query timeout
+
+ HikariDataSource ds = new HikariDataSource(config); //create datasource
+
+ Connection connection = ds.getConnection(); // get connection
+ Statement statement = connection.createStatement(); // get statement
+
+ //query or insert
+ // ...
+
+ connection.close(); // put back to conneciton pool
+}
+```
+> 通过 HikariDataSource.getConnection() 获取连接后,使用完成后需要调用 close() 方法,实际上它并不会关闭连接,只是放回连接池中。
+> 更多 HikariCP 使用问题请查看[官方说明][5]
+
+**Druid**
+
+* 引入相应 Druid maven 依赖:
+
+```xml
+
+ com.alibaba
+ druid
+ 1.1.20
+
+```
+
+* 使用示例如下:
+```java
+public static void main(String[] args) throws Exception {
+ Properties properties = new Properties();
+ properties.put("driverClassName","com.taosdata.jdbc.TSDBDriver");
+ properties.put("url","jdbc:TAOS://127.0.0.1:6030/log");
+ properties.put("username","root");
+ properties.put("password","taosdata");
+
+ properties.put("maxActive","10"); //maximum number of connection in the pool
+ properties.put("initialSize","3");//initial number of connection
+ properties.put("maxWait","10000");//maximum wait milliseconds for get connection from pool
+ properties.put("minIdle","3");//minimum number of connection in the pool
+
+ properties.put("timeBetweenEvictionRunsMillis","3000");// the interval milliseconds to test connection
+
+ properties.put("minEvictableIdleTimeMillis","60000");//the minimum milliseconds to keep idle
+ properties.put("maxEvictableIdleTimeMillis","90000");//the maximum milliseconds to keep idle
+
+ properties.put("validationQuery","describe log.dn"); //validation query
+ properties.put("testWhileIdle","true"); // test connection while idle
+ properties.put("testOnBorrow","false"); // don't need while testWhileIdle is true
+ properties.put("testOnReturn","false"); // don't need while testWhileIdle is true
+
+ //create druid datasource
+ DataSource ds = DruidDataSourceFactory.createDataSource(properties);
+ Connection connection = ds.getConnection(); // get connection
+ Statement statement = connection.createStatement(); // get statement
+
+ //query or insert
+ // ...
+
+ connection.close(); // put back to conneciton pool
+}
+```
+> 更多 druid 使用问题请查看[官方说明][6]
+
+**注意事项**
+* TDengine `v1.6.4.1` 版本开始提供了一个专门用于心跳检测的函数 `select server_status()`,所以在使用连接池时推荐使用 `select server_status()` 进行 Validation Query。
+
+如下所示,`select server_status()` 执行成功会返回 `1`。
+```shell
+taos> select server_status();
+server_status()|
+================
+1 |
+Query OK, 1 row(s) in set (0.000141s)
+```
+
+## 与框架使用
+
+* Spring JdbcTemplate 中使用 taos-jdbcdriver,可参考 [SpringJdbcTemplate][11]
+* Springboot + Mybatis 中使用,可参考 [springbootdemo][12]
+
+## 常见问题
+
+* java.lang.UnsatisfiedLinkError: no taos in java.library.path
+
+ **原因**:程序没有找到依赖的本地函数库 taos。
+
+ **解决方法**:windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下,linux 下将建立如下软链 ` ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。
+
+* java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform
+
+ **原因**:目前 TDengine 只支持 64 位 JDK。
+
+ **解决方法**:重新安装 64 位 JDK。
+
+* 其它问题请参考 [Issues][7]
+
+[1]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver
+[2]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver
+[3]: https://github.com/taosdata/TDengine
+[4]: https://www.taosdata.com/blog/2019/12/03/jdbcdriver%e6%89%be%e4%b8%8d%e5%88%b0%e5%8a%a8%e6%80%81%e9%93%be%e6%8e%a5%e5%ba%93/
+[5]: https://github.com/brettwooldridge/HikariCP
+[6]: https://github.com/alibaba/druid
+[7]: https://github.com/taosdata/TDengine/issues
+[8]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver
+[9]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver
+[10]: https://maven.aliyun.com/mvn/search
+[11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate
+[12]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo
+[13]: https://www.taosdata.com/cn/documentation20/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE
+[14]: https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client
+[15]: https://www.taosdata.com/cn/getting-started/#%E5%BF%AB%E9%80%9F%E4%B8%8A%E6%89%8B
diff --git a/documentation20/webdocs/markdowndocs/faq-ch.md b/documentation20/webdocs/markdowndocs/faq-ch.md
index b760fe161a..27c1054dc8 100644
--- a/documentation20/webdocs/markdowndocs/faq-ch.md
+++ b/documentation20/webdocs/markdowndocs/faq-ch.md
@@ -23,17 +23,87 @@
客户端遇到链接故障,请按照下面的步骤进行检查:
-1. 确保客户端与服务端版本号是完全一致的,开源社区版和企业版也不能混用
-2. 在服务器,执行 `systemctl status taosd` 检查*taosd*运行状态。如果没有运行,启动*taosd*
-3. 确认客户端连接时指定了正确的服务器FQDN (Fully Qualified Domain Name(可在服务器上执行Linux命令hostname -f获得)
-4. ping服务器FQDN,如果没有反应,请检查你的网络,DNS设置,或客户端所在计算机的系统hosts文件
-5. 检查防火墙设置,确认TCP/UDP 端口6030-6039 是打开的
-6. 对于Linux上的JDBC(ODBC, Python, Go等接口类似)连接, 确保*libtaos.so*在目录*/usr/local/lib/taos*里, 并且*/usr/local/lib/taos*在系统库函数搜索路径*LD_LIBRARY_PATH*里
-7. 对于windows上的JDBC, ODBC, Python, Go等连接,确保*driver/c/taos.dll*在你的系统搜索目录里 (建议*taos.dll*放在目录 *C:\Windows\System32*)
-8. 如果仍不能排除连接故障,请使用命令行工具nc来分别判断指定端口的TCP和UDP连接是否通畅
+1. 检查网络环境
+ * 云服务器:检查云服务器的安全组是否打开TCP/UDP 端口6030-6042的访问权限
+ * 本地虚拟机:检查网络能否ping通,尽量避免使用`localhost` 作为hostname
+ * 公司服务器:如果为NAT网络环境,请务必检查服务器能否将消息返回值客户端
+
+2. 确保客户端与服务端版本号是完全一致的,开源社区版和企业版也不能混用
+
+3. 在服务器,执行 `systemctl status taosd` 检查*taosd*运行状态。如果没有运行,启动*taosd*
+
+4. 确认客户端连接时指定了正确的服务器FQDN (Fully Qualified Domain Name(可在服务器上执行Linux命令hostname -f获得)
+
+5. ping服务器FQDN,如果没有反应,请检查你的网络,DNS设置,或客户端所在计算机的系统hosts文件
+
+6. 检查防火墙设置,确认TCP/UDP 端口6030-6042 是打开的
+
+7. 对于Linux上的JDBC(ODBC, Python, Go等接口类似)连接, 确保*libtaos.so*在目录*/usr/local/lib/taos*里, 并且*/usr/local/lib/taos*在系统库函数搜索路径*LD_LIBRARY_PATH*里
+
+8. 对于windows上的JDBC, ODBC, Python, Go等连接,确保*driver/c/taos.dll*在你的系统搜索目录里 (建议*taos.dll*放在目录 *C:\Windows\System32*)
+
+9. 如果仍不能排除连接故障,请使用命令行工具nc来分别判断指定端口的TCP和UDP连接是否通畅
检查UDP端口连接是否工作:`nc -vuz {hostIP} {port} `
检查服务器侧TCP端口连接是否工作:`nc -l {port}`
检查客户端侧TCP端口链接是否工作:`nc {hostIP} {port}`
+
+10. 可以使用taos程序内嵌的网络连通检测功能:验证服务器和客户端之间指定的端口连接是否通畅(包括TCP和UDP)。
+
+ taos通过参数 -n 来确定运行服务端功能,还是客户端功能。-n server:表示运行检测服务端功能;-n client:表示运行检测客户端功能。
+
+ 1)首先在服务器上停止taosd服务;
+
+ 2)在服务器上运行taos内嵌的网络连通检测的服务端功能:taos -n server -P 6030 -e 6042 -l 1000;
+
+ 3)在客户端运行taos内嵌的网络连通检测的客户端功能:taos -n client -h host -P 6030 -e 6042 -l 1000;
+
+ -n :指示运行网络连通检测的服务端功能,或客户端功能,缺省值为空,表示不启动网络连通检测;
+
+ -h:指示服务端名称,可以是ip地址或fqdn格式。如:192.168.1.160,或 192.168.1.160:6030,或 hostname1,或hostname1:6030。缺省值是127.0.0.1。
+
+ -P :检测的起始端口号,缺省值是6030;
+
+ -e:检测的结束端口号,必须大于等于起始端口号,缺省值是6042;
+
+ -l:指定检测端口连通的报文长度,最大64000字节,缺省值是1000字节,测试时服务端和客户端必须指定相同;
+
+ 服务端设置的起始端口和结束端口号,必须包含客户端设置的起始端口和结束端口号;
+
+ 对于起始端口号有三种设置方式:缺省值、-h指定、-P指定,优先级是:-P指定 > -h指定 > 缺省值。
+
+ 客户端运行的输出样例:
+
+ `sum@sum-virtualBox /home/sum $ taos -n client -h ubuntu-vbox6`
+
+ `host: ubuntu-vbox6 start port: 6030 end port: 6042 packet len: 1000`
+
+ `tcp port:6030 test ok. udp port:6030 test ok.`
+
+ `tcp port:6031 test ok. udp port:6031 test ok.`
+
+ `tcp port:6032 test ok. udp port:6032 test ok.`
+
+ `tcp port:6033 test ok. udp port:6033 test ok.`
+
+ `tcp port:6034 test ok. udp port:6034 test ok.`
+
+ `tcp port:6035 test ok. udp port:6035 test ok.`
+
+ `tcp port:6036 test ok. udp port:6036 test ok.`
+
+ `tcp port:6037 test ok. udp port:6037 test ok.`
+
+ `tcp port:6038 test ok. udp port:6038 test ok.`
+
+ `tcp port:6039 test ok. udp port:6039 test ok.`
+
+ `tcp port:6040 test ok. udp port:6040 test ok.`
+
+ `tcp port:6041 test ok. udp port:6041 test ok.`
+
+ `tcp port:6042 test ok. udp port:6042 test ok.`
+
+ 如果某个端口不通,会输出 `port:xxxx test fail`的信息。
## 6. 遇到错误“Unexpected generic error in RPC”, 我怎么办?
diff --git a/snap/hooks/install b/snap/hooks/install
index e58918d2c3..542be0b834 100755
--- a/snap/hooks/install
+++ b/snap/hooks/install
@@ -5,7 +5,9 @@ if [ ! -d /var/lib/taos ]; then
fi
if [ ! -d /var/log/taos ]; then
- mkdir -p -m777 /var/log/taos
+ mkdir -p --mode=777 /var/log/taos
+else
+ chmod 777 /var/log/taos
fi
if [ ! -d /etc/taos ]; then
@@ -13,5 +15,8 @@ if [ ! -d /etc/taos ]; then
fi
if [ ! -f /etc/taos/taos.cfg ]; then
+ if [ ! -d /etc/taos ]; then
+ mkdir -p /etc/taos
+ fi
cp $SNAP/etc/taos/taos.cfg /etc/taos/taos.cfg
fi
diff --git a/snap/local/launcher.sh b/snap/local/launcher.sh
index 52b3e4ce5c..29a7a63779 100755
--- a/snap/local/launcher.sh
+++ b/snap/local/launcher.sh
@@ -15,11 +15,12 @@ case "$SNAP_USER_COMMON" in
*) COMMON=$SNAP_USER_COMMON ;;
esac
-if [ -d /etc/taos ]; then
- CONFIG_FILE="/etc/taos"
-else
- CONFIG_FILE="$SNAP/etc/taos"
+if [ ! -f $SNAP_DATA/etc/taos/taos.cfg ]; then
+ if [ ! -d $SNAP_DATA/etc/taos ]; then
+ mkdir -p $SNAP_DATA/etc/taos
+ fi
+ cp $SNAP/etc/taos/taos.cfg $SNAP_DATA/etc/taos
fi
# Launch the snap
-$SNAP/usr/bin/taosd -c $CONFIG_FILE $@
+$SNAP/usr/bin/taosd -c /etc/taos $@
diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml
index cf4f993394..7a0e1c3b80 100644
--- a/snap/snapcraft.yaml
+++ b/snap/snapcraft.yaml
@@ -1,13 +1,13 @@
name: tdengine
base: core18 # the base snap is the execution environment for this snap
-version: '2.0.0.6' # just for humans, typically '1.2+git' or '1.3.2'
+version: '2.0.2.0' # just for humans, typically '1.2+git' or '1.3.2'
icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT.
description: |
TDengine is an open-source big data platform designed and optimized for Internet of Things (IoT), Connected Vehicles, and Industrial IoT. Besides the 10x faster time-series database, it provides caching, stream computing, message queuing and other functionalities to reduce the complexity and costs of development and operations.
grade: stable
-confinement: classic
+confinement: strict
apps:
tdengine:
@@ -24,7 +24,9 @@ apps:
command: taoswrapper.sh
plugs:
- network
+ - system-observe
- systemfiles
+ - historyfile
taosdemo:
command: usr/bin/taosdemo
@@ -32,11 +34,19 @@ apps:
- network
plugs:
+ historyfile:
+ interface: personal-files
+ read:
+ - $HOME/.taos_history
+ write:
+ - $HOME/.taos_history
+
systemfiles:
interface: system-files
read:
- /etc/taos
- /var/lib/taos
+ - /var/log/taos
- /tmp
write:
- /var/log/taos
@@ -77,7 +87,7 @@ parts:
mkdir -p $SNAPCRAFT_STAGE/var/lib/taos
fi
if [ ! -d $SNAPCRAFT_STAGE/var/log/taos ]; then
- mkdir -p $SNAPCRAFT_STAGE/var/log/taos
+ mkdir -p --mode=777 $SNAPCRAFT_STAGE/var/log/taos
fi
prime:
@@ -85,16 +95,16 @@ parts:
- usr/bin/taosd
- usr/bin/taos
- usr/bin/taosdemo
- - usr/lib/libtaos.so.2.0.0.6
+ - usr/lib/libtaos.so.2.0.2.0
- usr/lib/libtaos.so.1
- usr/lib/libtaos.so
override-prime: |
snapcraftctl prime
- if [ ! -d $SNAPCRAFT_STAGE/var/lib/taos ]; then
+ if [ ! -d $SNAPCRAFT_PRIME/var/lib/taos ]; then
cp -rf $SNAPCRAFT_STAGE/var/lib/taos $SNAPCRAFT_PRIME
fi
- if [ ! -d $SNAPCRAFT_STAGE/var/log/taos ]; then
+ if [ ! -d $SNAPCRAFT_PRIME/var/log/taos ]; then
cp -rf $SNAPCRAFT_STAGE/var/log/taos $SNAPCRAFT_PRIME
fi
@@ -103,11 +113,10 @@ layout:
bind: $SNAP_DATA/var/lib/taos
/var/log/taos:
bind: $SNAP_DATA/var/log/taos
- /etc/taos/taos.cfg:
- bind-file: $SNAP_DATA/etc/taos/taos.cfg
+ /etc/taos:
+ bind: $SNAP_DATA/etc/taos
hooks:
install:
- plugs: [systemfiles]
-
+ plugs: [systemfiles, historyfile]
diff --git a/src/client/inc/tscSubquery.h b/src/client/inc/tscSubquery.h
index d5833675aa..07e0580397 100644
--- a/src/client/inc/tscSubquery.h
+++ b/src/client/inc/tscSubquery.h
@@ -30,7 +30,7 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code);
SJoinSupporter* tscCreateJoinSupporter(SSqlObj* pSql, SSubqueryState* pState, int32_t index);
-int32_t tscHandleMasterJoinQuery(SSqlObj* pSql);
+void tscHandleMasterJoinQuery(SSqlObj* pSql);
int32_t tscHandleMasterSTableQuery(SSqlObj *pSql);
diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h
index f77897a74b..9b31b8fc6a 100644
--- a/src/client/inc/tscUtil.h
+++ b/src/client/inc/tscUtil.h
@@ -39,7 +39,6 @@ extern "C" {
#define UTIL_TABLE_IS_NORMAL_TABLE(metaInfo)\
(!(UTIL_TABLE_IS_SUPER_TABLE(metaInfo) || UTIL_TABLE_IS_CHILD_TABLE(metaInfo)))
-#define TSDB_COL_IS_TAG(f) (((f)&TSDB_COL_TAG) != 0)
typedef struct SParsedColElem {
int16_t colIndex;
@@ -70,6 +69,8 @@ typedef struct SJoinSupporter {
SSubqueryState* pState;
SSqlObj* pObj; // parent SqlObj
int32_t subqueryIndex; // index of sub query
+ char intervalTimeUnit;
+ char slidingTimeUnit;
int64_t intervalTime; // interval time
int64_t slidingTime; // sliding time
SLimitVal limit; // limit info
@@ -186,7 +187,7 @@ SSqlExpr* tscSqlExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t functi
size_t tscSqlExprNumOfExprs(SQueryInfo* pQueryInfo);
SSqlExpr* tscSqlExprGet(SQueryInfo* pQueryInfo, int32_t index);
-void tscSqlExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deepcopy);
+int32_t tscSqlExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deepcopy);
void tscSqlExprInfoDestroy(SArray* pExprInfo);
SColumn* tscColumnClone(const SColumn* src);
@@ -204,7 +205,7 @@ bool tscValidateColumnId(STableMetaInfo* pTableMetaInfo, int32_t colId, int32_t
SCond* tsGetSTableQueryCond(STagCond* pCond, uint64_t uid);
void tsSetSTableQueryCond(STagCond* pTagCond, uint64_t uid, SBufferWriter* bw);
-void tscTagCondCopy(STagCond* dest, const STagCond* src);
+int32_t tscTagCondCopy(STagCond* dest, const STagCond* src);
void tscTagCondRelease(STagCond* pCond);
void tscGetSrcColumnInfo(SSrcColumnInfo* pColInfo, SQueryInfo* pQueryInfo);
diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h
index d2c52e972a..5f4a46ddad 100644
--- a/src/client/inc/tsclient.h
+++ b/src/client/inc/tsclient.h
@@ -221,19 +221,18 @@ typedef struct STableDataBlocks {
SParamInfo *params;
} STableDataBlocks;
-//typedef struct SDataBlockList { // todo remove
-// uint32_t nSize;
-// uint32_t nAlloc;
-// STableDataBlocks **pData;
-//} SDataBlockList;
-
typedef struct SQueryInfo {
int16_t command; // the command may be different for each subclause, so keep it seperately.
uint32_t type; // query/insert type
+ // TODO refactor
+ char intervalTimeUnit;
char slidingTimeUnit;
STimeWindow window; // query time window
- int64_t intervalTime; // aggregation time interval
+ int64_t intervalTime; // aggregation time window range
int64_t slidingTime; // sliding window in mseconds
+ int64_t intervalOffset;// start offset of each time window
+ int32_t tz; // query client timezone
+
SSqlGroupbyExpr groupbyExpr; // group by tags info
SArray * colList; // SArray
SFieldInfo fieldsInfo;
@@ -349,6 +348,7 @@ typedef struct SSqlObj {
void * pStream;
void * pSubscription;
char * sqlstr;
+ char parseRetry;
char retry;
char maxRetry;
SRpcEpSet epSet;
@@ -366,6 +366,8 @@ typedef struct SSqlStream {
uint32_t streamId;
char listed;
bool isProject;
+ char intervalTimeUnit;
+ char slidingTimeUnit;
int16_t precision;
int64_t num; // number of computing count
@@ -379,7 +381,7 @@ typedef struct SSqlStream {
int64_t ctime; // stream created time
int64_t stime; // stream next executed time
int64_t etime; // stream end query time, when time is larger then etime, the stream will be closed
- int64_t interval;
+ int64_t intervalTime;
int64_t slidingTime;
void * pTimer;
@@ -398,7 +400,7 @@ int tsParseSql(SSqlObj *pSql, bool initial);
void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet);
int tscProcessSql(SSqlObj *pSql);
-int tscRenewTableMeta(SSqlObj *pSql, char *tableId);
+int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex);
void tscQueueAsyncRes(SSqlObj *pSql);
void tscQueueAsyncError(void(*fp), void *param, int32_t code);
@@ -413,7 +415,7 @@ void tscRestoreSQLFuncForSTableQuery(SQueryInfo *pQueryInfo);
int32_t tscCreateResPointerInfo(SSqlRes *pRes, SQueryInfo *pQueryInfo);
void tscDestroyResPointerInfo(SSqlRes *pRes);
-void tscResetSqlCmdObj(SSqlCmd *pCmd);
+void tscResetSqlCmdObj(SSqlCmd *pCmd, bool removeFromCache);
/**
* free query result of the sql object
@@ -455,6 +457,7 @@ bool tscResultsetFetchCompleted(TAOS_RES *result);
char *tscGetErrorMsgPayload(SSqlCmd *pCmd);
int32_t tscInvalidSQLErrMsg(char *msg, const char *additionalInfo, const char *sql);
+int32_t tscSQLSyntaxErrMsg(char* msg, const char* additionalInfo, const char* sql);
int32_t tscToSQLCmd(SSqlObj *pSql, struct SSqlInfo *pInfo);
@@ -468,7 +471,7 @@ static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pField
char* pData = pRes->data + pInfo->pSqlExpr->offset * pRes->numOfRows + bytes * pRes->row;
// user defined constant value output columns
- if (pInfo->pSqlExpr->colInfo.flag == TSDB_COL_UDC) {
+ if (TSDB_COL_IS_UD_COL(pInfo->pSqlExpr->colInfo.flag)) {
if (type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BINARY) {
pData = pInfo->pSqlExpr->param[1].pz;
pRes->length[columnIndex] = pInfo->pSqlExpr->param[1].nLen;
diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c
index 4643d255dc..d07089539a 100644
--- a/src/client/src/tscAsync.c
+++ b/src/client/src/tscAsync.c
@@ -43,6 +43,7 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, void (*fp)(), void* param, const
pSql->signature = pSql;
pSql->param = param;
pSql->pTscObj = pObj;
+ pSql->parseRetry= 0;
pSql->maxRetry = TSDB_MAX_REPLICA;
pSql->fp = fp;
pSql->fetchFp = fp;
@@ -50,7 +51,8 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, void (*fp)(), void* param, const
pSql->sqlstr = calloc(1, sqlLen + 1);
if (pSql->sqlstr == NULL) {
tscError("%p failed to malloc sql string buffer", pSql);
- tscQueueAsyncError(pSql->fp, pSql->param, TSDB_CODE_TSC_OUT_OF_MEMORY);
+ pSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ tscQueueAsyncRes(pSql);
return;
}
@@ -94,7 +96,6 @@ void taos_query_a(TAOS *taos, const char *sqlstr, __async_cb_func_t fp, void *pa
SSqlObj *pSql = (SSqlObj *)calloc(1, sizeof(SSqlObj));
if (pSql == NULL) {
tscError("failed to malloc sqlObj");
- terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
tscQueueAsyncError(fp, param, TSDB_CODE_TSC_OUT_OF_MEMORY);
return;
}
@@ -191,7 +192,7 @@ void tscAsyncQuerySingleRowForNextVnode(void *param, TAOS_RES *tres, int numOfRo
tscProcessAsyncRetrieveImpl(param, tres, numOfRows, tscAsyncFetchSingleRowProxy);
}
-void taos_fetch_rows_a(TAOS_RES *taosa, void (*fp)(void *, TAOS_RES *, int), void *param) {
+void taos_fetch_rows_a(TAOS_RES *taosa, __async_cb_func_t fp, void *param) {
SSqlObj *pSql = (SSqlObj *)taosa;
if (pSql == NULL || pSql->signature != pSql) {
tscError("sql object is NULL");
@@ -209,6 +210,8 @@ void taos_fetch_rows_a(TAOS_RES *taosa, void (*fp)(void *, TAOS_RES *, int), voi
if (pRes->qhandle == 0) {
tscError("qhandle is NULL");
pRes->code = TSDB_CODE_TSC_INVALID_QHANDLE;
+ pSql->param = param;
+
tscQueueAsyncRes(pSql);
return;
}
@@ -269,7 +272,10 @@ void taos_fetch_row_a(TAOS_RES *taosa, void (*fp)(void *, TAOS_RES *, TAOS_ROW),
if (pRes->qhandle == 0) {
tscError("qhandle is NULL");
- tscQueueAsyncError(fp, param, TSDB_CODE_TSC_INVALID_QHANDLE);
+ pSql->param = param;
+ pRes->code = TSDB_CODE_TSC_INVALID_QHANDLE;
+
+ tscQueueAsyncRes(pSql);
return;
}
@@ -352,36 +358,17 @@ void tscProcessFetchRow(SSchedMsg *pMsg) {
void tscProcessAsyncRes(SSchedMsg *pMsg) {
SSqlObj *pSql = (SSqlObj *)pMsg->ahandle;
-// SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
-
-// void *taosres = pSql;
-
- // pCmd may be released, so cache pCmd->command
-// int cmd = pCmd->command;
-// int code = pRes->code;
-
- // in case of async insert, restore the user specified callback function
-// bool shouldFree = tscShouldBeFreed(pSql);
-
-// if (pCmd->command == TSDB_SQL_INSERT) {
-// assert(pSql->fp != NULL);
assert(pSql->fp != NULL && pSql->fetchFp != NULL);
-// }
-// if (pSql->fp) {
pSql->fp = pSql->fetchFp;
(*pSql->fp)(pSql->param, pSql, pRes->code);
-// }
-
-// if (shouldFree) {
-// tscDebug("%p sqlObj is automatically freed in async res", pSql);
-// tscFreeSqlObj(pSql);
-// }
}
+// this function will be executed by queue task threads, so the terrno is not valid
static void tscProcessAsyncError(SSchedMsg *pMsg) {
void (*fp)() = pMsg->ahandle;
+ terrno = *(int32_t*) pMsg->msg;
(*fp)(pMsg->thandle, NULL, *(int32_t*)pMsg->msg);
}
@@ -482,7 +469,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
if (pCmd->command == TSDB_SQL_INSERT || pCmd->command == TSDB_SQL_SELECT) {
tscDebug("%p redo parse sql string and proceed", pSql);
pCmd->parseFinished = false;
- tscResetSqlCmdObj(pCmd);
+ tscResetSqlCmdObj(pCmd, false);
code = tsParseSql(pSql, true);
diff --git a/src/client/src/tscFunctionImpl.c b/src/client/src/tscFunctionImpl.c
index e74fcba246..4b31a8001f 100644
--- a/src/client/src/tscFunctionImpl.c
+++ b/src/client/src/tscFunctionImpl.c
@@ -1648,9 +1648,10 @@ static void last_function(SQLFunctionCtx *pCtx) {
for (int32_t i = pCtx->size - 1; i >= 0; --i) {
char *data = GET_INPUT_CHAR_INDEX(pCtx, i);
if (pCtx->hasNull && isNull(data, pCtx->inputType)) {
- continue;
+ if (!pCtx->requireNull) {
+ continue;
+ }
}
-
memcpy(pCtx->aOutputBuf, data, pCtx->inputBytes);
TSKEY ts = pCtx->ptsList[i];
@@ -1721,7 +1722,9 @@ static void last_dist_function(SQLFunctionCtx *pCtx) {
for (int32_t i = pCtx->size - 1; i >= 0; --i) {
char *data = GET_INPUT_CHAR_INDEX(pCtx, i);
if (pCtx->hasNull && isNull(data, pCtx->inputType)) {
- continue;
+ if (!pCtx->requireNull) {
+ continue;
+ }
}
last_data_assign_impl(pCtx, data, i);
@@ -2422,24 +2425,14 @@ static void top_bottom_func_finalizer(SQLFunctionCtx *pCtx) {
///////////////////////////////////////////////////////////////////////////////////////////////
static bool percentile_function_setup(SQLFunctionCtx *pCtx) {
- const int32_t MAX_AVAILABLE_BUFFER_SIZE = 1 << 20; // 1MB
- const int32_t NUMOFCOLS = 1;
-
if (!function_setup(pCtx)) {
return false;
}
SResultInfo *pResInfo = GET_RES_INFO(pCtx);
- SSchema field[1] = { { (uint8_t)pCtx->inputType, "dummyCol", 0, pCtx->inputBytes } };
-
- SColumnModel *pModel = createColumnModel(field, 1, 1000);
- int32_t orderIdx = 0;
-
- // tOrderDesc object
- tOrderDescriptor *pDesc = tOrderDesCreate(&orderIdx, NUMOFCOLS, pModel, TSDB_ORDER_DESC);
-
+
((SPercentileInfo *)(pResInfo->interResultBuf))->pMemBucket =
- tMemBucketCreate(1024, MAX_AVAILABLE_BUFFER_SIZE, pCtx->inputBytes, pCtx->inputType, pDesc);
+ tMemBucketCreate(pCtx->inputBytes, pCtx->inputType);
return true;
}
@@ -2485,15 +2478,13 @@ static void percentile_finalizer(SQLFunctionCtx *pCtx) {
SResultInfo *pResInfo = GET_RES_INFO(pCtx);
tMemBucket * pMemBucket = ((SPercentileInfo *)pResInfo->interResultBuf)->pMemBucket;
- if (pMemBucket->numOfElems > 0) { // check for null
+ if (pMemBucket->total > 0) { // check for null
*(double *)pCtx->aOutputBuf = getPercentile(pMemBucket, v);
} else {
setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes);
}
- tOrderDescDestroy(pMemBucket->pOrderDesc);
tMemBucketDestroy(pMemBucket);
-
doFinalizer(pCtx);
}
diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c
index caaaa5bc18..b240d357a8 100644
--- a/src/client/src/tscLocal.c
+++ b/src/client/src/tscLocal.c
@@ -274,7 +274,7 @@ static int32_t tscProcessDescribeTable(SSqlObj *pSql) {
return tscSetValueToResObj(pSql, rowLen);
}
-static void tscProcessCurrentUser(SSqlObj *pSql) {
+static int32_t tscProcessCurrentUser(SSqlObj *pSql) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0);
@@ -282,14 +282,20 @@ static void tscProcessCurrentUser(SSqlObj *pSql) {
pExpr->resType = TSDB_DATA_TYPE_BINARY;
char* vx = calloc(1, pExpr->resBytes);
+ if (vx == NULL) {
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
size_t size = sizeof(pSql->pTscObj->user);
STR_WITH_MAXSIZE_TO_VARSTR(vx, pSql->pTscObj->user, size);
tscSetLocalQueryResult(pSql, vx, pExpr->aliasName, pExpr->resType, pExpr->resBytes);
free(vx);
+
+ return TSDB_CODE_SUCCESS;
}
-static void tscProcessCurrentDB(SSqlObj *pSql) {
+static int32_t tscProcessCurrentDB(SSqlObj *pSql) {
char db[TSDB_DB_NAME_LEN] = {0};
extractDBName(pSql->pTscObj->db, db);
@@ -302,6 +308,10 @@ static void tscProcessCurrentDB(SSqlObj *pSql) {
pExpr->resBytes = TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE;
char* vx = calloc(1, pExpr->resBytes);
+ if (vx == NULL) {
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
if (t == 0) {
setVardataNull(vx, TSDB_DATA_TYPE_BINARY);
} else {
@@ -310,9 +320,11 @@ static void tscProcessCurrentDB(SSqlObj *pSql) {
tscSetLocalQueryResult(pSql, vx, pExpr->aliasName, pExpr->resType, pExpr->resBytes);
free(vx);
+
+ return TSDB_CODE_SUCCESS;
}
-static void tscProcessServerVer(SSqlObj *pSql) {
+static int32_t tscProcessServerVer(SSqlObj *pSql) {
const char* v = pSql->pTscObj->sversion;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
@@ -323,13 +335,18 @@ static void tscProcessServerVer(SSqlObj *pSql) {
pExpr->resBytes = (int16_t)(t + VARSTR_HEADER_SIZE);
char* vx = calloc(1, pExpr->resBytes);
+ if (vx == NULL) {
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
STR_WITH_SIZE_TO_VARSTR(vx, v, (VarDataLenT)t);
tscSetLocalQueryResult(pSql, vx, pExpr->aliasName, pExpr->resType, pExpr->resBytes);
- taosTFree(vx);
+ free(vx);
+ return TSDB_CODE_SUCCESS;
}
-static void tscProcessClientVer(SSqlObj *pSql) {
+static int32_t tscProcessClientVer(SSqlObj *pSql) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0);
@@ -339,23 +356,28 @@ static void tscProcessClientVer(SSqlObj *pSql) {
pExpr->resBytes = (int16_t)(t + VARSTR_HEADER_SIZE);
char* v = calloc(1, pExpr->resBytes);
+ if (v == NULL) {
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
STR_WITH_SIZE_TO_VARSTR(v, version, (VarDataLenT)t);
tscSetLocalQueryResult(pSql, v, pExpr->aliasName, pExpr->resType, pExpr->resBytes);
- taosTFree(v);
+ free(v);
+ return TSDB_CODE_SUCCESS;
}
-static void tscProcessServStatus(SSqlObj *pSql) {
+static int32_t tscProcessServStatus(SSqlObj *pSql) {
STscObj* pObj = pSql->pTscObj;
if (pObj->pHb != NULL) {
if (pObj->pHb->res.code == TSDB_CODE_RPC_NETWORK_UNAVAIL) {
pSql->res.code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
- return;
+ return pSql->res.code;
}
} else {
if (pSql->res.code == TSDB_CODE_RPC_NETWORK_UNAVAIL) {
- return;
+ return pSql->res.code;
}
}
@@ -364,6 +386,7 @@ static void tscProcessServStatus(SSqlObj *pSql) {
SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0);
int32_t val = 1;
tscSetLocalQueryResult(pSql, (char*) &val, pExpr->aliasName, TSDB_DATA_TYPE_INT, sizeof(int32_t));
+ return TSDB_CODE_SUCCESS;
}
void tscSetLocalQueryResult(SSqlObj *pSql, const char *val, const char *columnName, int16_t type, size_t valueLength) {
@@ -393,37 +416,39 @@ void tscSetLocalQueryResult(SSqlObj *pSql, const char *val, const char *columnNa
int tscProcessLocalCmd(SSqlObj *pSql) {
SSqlCmd *pCmd = &pSql->cmd;
+ SSqlRes *pRes = &pSql->res;
if (pCmd->command == TSDB_SQL_CFG_LOCAL) {
- pSql->res.code = (uint8_t)taosCfgDynamicOptions(pCmd->payload);
+ pRes->code = (uint8_t)taosCfgDynamicOptions(pCmd->payload);
} else if (pCmd->command == TSDB_SQL_DESCRIBE_TABLE) {
- pSql->res.code = (uint8_t)tscProcessDescribeTable(pSql);
+ pRes->code = (uint8_t)tscProcessDescribeTable(pSql);
} else if (pCmd->command == TSDB_SQL_RETRIEVE_EMPTY_RESULT) {
/*
* set the qhandle to be 1 in order to pass the qhandle check, and to call partial release function to
* free allocated resources and remove the SqlObj from sql query linked list
*/
- pSql->res.qhandle = 0x1;
- pSql->res.numOfRows = 0;
+ pRes->qhandle = 0x1;
+ pRes->numOfRows = 0;
} else if (pCmd->command == TSDB_SQL_RESET_CACHE) {
taosCacheEmpty(tscCacheHandle);
+ pRes->code = TSDB_CODE_SUCCESS;
} else if (pCmd->command == TSDB_SQL_SERV_VERSION) {
- tscProcessServerVer(pSql);
+ pRes->code = tscProcessServerVer(pSql);
} else if (pCmd->command == TSDB_SQL_CLI_VERSION) {
- tscProcessClientVer(pSql);
+ pRes->code = tscProcessClientVer(pSql);
} else if (pCmd->command == TSDB_SQL_CURRENT_USER) {
- tscProcessCurrentUser(pSql);
+ pRes->code = tscProcessCurrentUser(pSql);
} else if (pCmd->command == TSDB_SQL_CURRENT_DB) {
- tscProcessCurrentDB(pSql);
+ pRes->code = tscProcessCurrentDB(pSql);
} else if (pCmd->command == TSDB_SQL_SERV_STATUS) {
- tscProcessServStatus(pSql);
+ pRes->code = tscProcessServStatus(pSql);
} else {
- pSql->res.code = TSDB_CODE_TSC_INVALID_SQL;
+ pRes->code = TSDB_CODE_TSC_INVALID_SQL;
tscError("%p not support command:%d", pSql, pCmd->command);
}
// keep the code in local variable in order to avoid invalid read in case of async query
- int32_t code = pSql->res.code;
+ int32_t code = pRes->code;
if (code == TSDB_CODE_SUCCESS) {
(*pSql->fp)(pSql->param, pSql, code);
} else {
diff --git a/src/client/src/tscLocalMerge.c b/src/client/src/tscLocalMerge.c
index 563c9fa84e..39a757795e 100644
--- a/src/client/src/tscLocalMerge.c
+++ b/src/client/src/tscLocalMerge.c
@@ -67,8 +67,7 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SLocalReducer *pReducer, tOrderDesc
SQLFunctionCtx *pCtx = &pReducer->pCtx[i];
SSqlExpr * pExpr = tscSqlExprGet(pQueryInfo, i);
- pCtx->aOutputBuf =
- pReducer->pResultBuf->data + pExpr->offset * pReducer->resColModel->capacity;
+ pCtx->aOutputBuf = pReducer->pResultBuf->data + pExpr->offset * pReducer->resColModel->capacity;
pCtx->order = pQueryInfo->order.order;
pCtx->functionId = pExpr->functionId;
@@ -160,7 +159,6 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
if (pMemBuffer == NULL) {
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer);
-
tscError("%p pMemBuffer is NULL", pMemBuffer);
pRes->code = TSDB_CODE_TSC_APP_ERROR;
return;
@@ -168,7 +166,6 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
if (pDesc->pColumnModel == NULL) {
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer);
-
tscError("%p no local buffer or intermediate result format model", pSql);
pRes->code = TSDB_CODE_TSC_APP_ERROR;
return;
@@ -188,7 +185,6 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
if (numOfFlush == 0 || numOfBuffer == 0) {
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer);
tscDebug("%p retrieved no data", pSql);
-
return;
}
@@ -279,6 +275,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
taosTFree(pReducer);
return;
}
+
param->pLocalData = pReducer->pLocalDataSrc;
param->pDesc = pReducer->pDesc;
param->num = pReducer->pLocalDataSrc[0]->pMemBuffer->numOfElemsPerPage;
diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c
index 47bfe0fcdc..09eb8f167e 100644
--- a/src/client/src/tscParseInsert.c
+++ b/src/client/src/tscParseInsert.c
@@ -180,7 +180,7 @@ int32_t tsParseOneColumnData(SSchema *pSchema, SStrToken *pToken, char *payload,
} else if (strncasecmp(TSDB_DATA_NULL_STR_L, pToken->z, pToken->n) == 0) {
*(uint8_t *)payload = TSDB_DATA_BOOL_NULL;
} else {
- return tscInvalidSQLErrMsg(msg, "invalid bool data", pToken->z);
+ return tscSQLSyntaxErrMsg(msg, "invalid bool data", pToken->z);
}
} else if (pToken->type == TK_INTEGER) {
iv = strtoll(pToken->z, NULL, 10);
@@ -439,8 +439,8 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[
int16_t type = sToken.type;
if ((type != TK_NOW && type != TK_INTEGER && type != TK_STRING && type != TK_FLOAT && type != TK_BOOL &&
type != TK_NULL && type != TK_HEX && type != TK_OCT && type != TK_BIN) || (sToken.n == 0) || (type == TK_RP)) {
- tscInvalidSQLErrMsg(error, "invalid data or symbol", sToken.z);
- *code = TSDB_CODE_TSC_INVALID_SQL;
+ tscSQLSyntaxErrMsg(error, "invalid data or symbol", sToken.z);
+ *code = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
return -1;
}
@@ -472,7 +472,7 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[
bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX);
int32_t ret = tsParseOneColumnData(pSchema, &sToken, start, error, str, isPrimaryKey, timePrec);
if (ret != TSDB_CODE_SUCCESS) {
- *code = TSDB_CODE_TSC_INVALID_SQL;
+ *code = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
return -1; // NOTE: here 0 mean error!
}
@@ -568,8 +568,8 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, STableMeta *pTableMe
sToken = tStrGetToken(*str, &index, false, 0, NULL);
*str += index;
if (sToken.n == 0 || sToken.type != TK_RP) {
- tscInvalidSQLErrMsg(error, ") expected", *str);
- *code = TSDB_CODE_TSC_INVALID_SQL;
+ tscSQLSyntaxErrMsg(error, ") expected", *str);
+ *code = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
return -1;
}
@@ -578,7 +578,7 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, STableMeta *pTableMe
if (numOfRows <= 0) {
strcpy(error, "no any data points");
- *code = TSDB_CODE_TSC_INVALID_SQL;
+ *code = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
return -1;
} else {
return numOfRows;
@@ -943,7 +943,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
sToken = tStrGetToken(sql, &index, false, 0, NULL);
sql += index;
if (sToken.n == 0 || sToken.type != TK_RP) {
- return tscInvalidSQLErrMsg(pCmd->payload, ") expected", sToken.z);
+ return tscSQLSyntaxErrMsg(pCmd->payload, ") expected", sToken.z);
}
pCmd->payloadLen = sizeof(pTag->name) + sizeof(pTag->dataLen) + pTag->dataLen;
@@ -1327,15 +1327,33 @@ int tsParseSql(SSqlObj *pSql, bool initial) {
pSql->fetchFp = pSql->fp;
pSql->fp = (void(*)())tscHandleMultivnodeInsert;
}
-
+
if (initial && ((ret = tsInsertInitialCheck(pSql)) != TSDB_CODE_SUCCESS)) {
return ret;
}
-
+
+ // make a backup as tsParseInsertSql may modify the string
+ char* sqlstr = strdup(pSql->sqlstr);
ret = tsParseInsertSql(pSql);
+ if (sqlstr == NULL || pSql->parseRetry >= 1 || ret != TSDB_CODE_TSC_INVALID_SQL) {
+ free(sqlstr);
+ } else {
+ tscResetSqlCmdObj(pCmd, true);
+ free(pSql->sqlstr);
+ pSql->sqlstr = sqlstr;
+ pSql->parseRetry++;
+ if ((ret = tsInsertInitialCheck(pSql)) == TSDB_CODE_SUCCESS) {
+ ret = tsParseInsertSql(pSql);
+ }
+ }
} else {
SSqlInfo SQLInfo = qSQLParse(pSql->sqlstr);
ret = tscToSQLCmd(pSql, &SQLInfo);
+ if (ret == TSDB_CODE_TSC_INVALID_SQL && pSql->parseRetry == 0 && SQLInfo.type == TSDB_SQL_NULL) {
+ tscResetSqlCmdObj(pCmd, true);
+ pSql->parseRetry++;
+ ret = tscToSQLCmd(pSql, &SQLInfo);
+ }
SQLInfoDestroy(&SQLInfo);
}
diff --git a/src/client/src/tscProfile.c b/src/client/src/tscProfile.c
index 6ff97e9d00..b8c3830204 100644
--- a/src/client/src/tscProfile.c
+++ b/src/client/src/tscProfile.c
@@ -259,11 +259,11 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
pSdesc->num = htobe64(pStream->num);
pSdesc->useconds = htobe64(pStream->useconds);
- pSdesc->stime = htobe64(pStream->stime - pStream->interval);
+ pSdesc->stime = htobe64(pStream->stime - pStream->intervalTime);
pSdesc->ctime = htobe64(pStream->ctime);
pSdesc->slidingTime = htobe64(pStream->slidingTime);
- pSdesc->interval = htobe64(pStream->interval);
+ pSdesc->interval = htobe64(pStream->intervalTime);
pHeartbeat->numOfStreams++;
pSdesc++;
diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c
index 5ce4c7125f..de608961c2 100644
--- a/src/client/src/tscSQLParser.c
+++ b/src/client/src/tscSQLParser.c
@@ -33,6 +33,8 @@
#define DEFAULT_PRIMARY_TIMESTAMP_COL_NAME "_c0"
+#define TSWINDOW_IS_EQUAL(t1, t2) (((t1).skey == (t2).skey) && ((t1).ekey == (t2).ekey))
+
// -1 is tbname column index, so here use the -3 as the initial value
#define COLUMN_INDEX_INITIAL_VAL (-3)
#define COLUMN_INDEX_INITIALIZER \
@@ -45,6 +47,10 @@ typedef struct SColumnList { // todo refactor
SColumnIndex ids[TSDB_MAX_COLUMNS];
} SColumnList;
+typedef struct SConvertFunc {
+ int32_t originFuncId;
+ int32_t execFuncId;
+} SConvertFunc;
static SSqlExpr* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t outputIndex, int32_t colIndex, int32_t tableIndex);
static int32_t setShowInfo(SSqlObj* pSql, SSqlInfo* pInfo);
@@ -184,7 +190,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
int32_t code = TSDB_CODE_SUCCESS;
if (!pInfo->valid) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), pInfo->pzErrMsg);
+ return tscSQLSyntaxErrMsg(tscGetErrorMsgPayload(pCmd), NULL, pInfo->pzErrMsg);
}
SQueryInfo* pQueryInfo = tscGetQueryInfoDetailSafely(pCmd, pCmd->clauseIndex);
@@ -587,21 +593,20 @@ int32_t parseIntervalClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQ
// interval is not null
SStrToken* t = &pQuerySql->interval;
- if (getTimestampInUsFromStr(t->z, t->n, &pQueryInfo->intervalTime) != TSDB_CODE_SUCCESS) {
+ if (parseDuration(t->z, t->n, &pQueryInfo->intervalTime, &pQueryInfo->intervalTimeUnit) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
- // if the unit of time window value is millisecond, change the value from microsecond
- if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
- pQueryInfo->intervalTime = pQueryInfo->intervalTime / 1000;
- }
+ if (pQueryInfo->intervalTimeUnit != 'n' && pQueryInfo->intervalTimeUnit != 'y') {
+ // if the unit of time window value is millisecond, change the value from microsecond
+ if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
+ pQueryInfo->intervalTime = pQueryInfo->intervalTime / 1000;
+ }
- /* parser has filter the illegal type, no need to check here */
- pQueryInfo->slidingTimeUnit = pQuerySql->interval.z[pQuerySql->interval.n - 1];
-
- // interval cannot be less than 10 milliseconds
- if (pQueryInfo->intervalTime < tsMinIntervalTime) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ // interval cannot be less than 10 milliseconds
+ if (pQueryInfo->intervalTime < tsMinIntervalTime) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ }
}
// for top/bottom + interval query, we do not add additional timestamp column in the front
@@ -666,6 +671,7 @@ int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu
const char* msg0 = "sliding value too small";
const char* msg1 = "sliding value no larger than the interval value";
const char* msg2 = "sliding value can not less than 1% of interval value";
+ const char* msg3 = "does not support sliding when interval is natual month/year";
const static int32_t INTERVAL_SLIDING_FACTOR = 100;
@@ -673,21 +679,27 @@ int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
SStrToken* pSliding = &pQuerySql->sliding;
- if (pSliding->n != 0) {
- getTimestampInUsFromStr(pSliding->z, pSliding->n, &pQueryInfo->slidingTime);
- if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
- pQueryInfo->slidingTime /= 1000;
- }
-
- if (pQueryInfo->slidingTime < tsMinSlidingTime) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
- }
-
- if (pQueryInfo->slidingTime > pQueryInfo->intervalTime) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
- }
- } else {
+ if (pSliding->n == 0) {
+ pQueryInfo->slidingTimeUnit = pQueryInfo->intervalTimeUnit;
pQueryInfo->slidingTime = pQueryInfo->intervalTime;
+ return TSDB_CODE_SUCCESS;
+ }
+
+ if (pQueryInfo->intervalTimeUnit == 'n' || pQueryInfo->intervalTimeUnit == 'y') {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ }
+
+ getTimestampInUsFromStr(pSliding->z, pSliding->n, &pQueryInfo->slidingTime);
+ if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
+ pQueryInfo->slidingTime /= 1000;
+ }
+
+ if (pQueryInfo->slidingTime < tsMinSlidingTime) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
+ }
+
+ if (pQueryInfo->slidingTime > pQueryInfo->intervalTime) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if ((pQueryInfo->intervalTime != 0) && (pQueryInfo->intervalTime/pQueryInfo->slidingTime > INTERVAL_SLIDING_FACTOR)) {
@@ -1501,13 +1513,13 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t
return TSDB_CODE_SUCCESS;
}
-static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSchema* pSchema, int32_t functionID, char* aliasName,
+static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSchema* pSchema, SConvertFunc cvtFunc, char* aliasName,
int32_t resColIdx, SColumnIndex* pColIndex) {
int16_t type = 0;
int16_t bytes = 0;
-
char columnName[TSDB_COL_NAME_LEN] = {0};
const char* msg1 = "not support column types";
+ int32_t functionID = cvtFunc.execFuncId;
if (functionID == TSDB_FUNC_SPREAD) {
if (pSchema[pColIndex->columnIndex].type == TSDB_DATA_TYPE_BINARY ||
@@ -1523,16 +1535,21 @@ static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SS
type = pSchema[pColIndex->columnIndex].type;
bytes = pSchema[pColIndex->columnIndex].bytes;
}
-
+
if (aliasName != NULL) {
tstrncpy(columnName, aliasName, sizeof(columnName));
} else {
- getRevisedName(columnName, functionID, sizeof(columnName) - 1, pSchema[pColIndex->columnIndex].name);
+ getRevisedName(columnName, cvtFunc.originFuncId, sizeof(columnName) - 1, pSchema[pColIndex->columnIndex].name);
}
+
SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, functionID, pColIndex, type, bytes, bytes, false);
tstrncpy(pExpr->aliasName, columnName, sizeof(pExpr->aliasName));
+ if (cvtFunc.originFuncId == TSDB_FUNC_LAST_ROW && cvtFunc.originFuncId != functionID) {
+ pExpr->colInfo.flag |= TSDB_COL_NULL;
+ }
+
// set reverse order scan data blocks for last query
if (functionID == TSDB_FUNC_LAST) {
pExpr->numOfParams = 1;
@@ -1766,7 +1783,10 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
if (changeFunctionID(optr, &functionID) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg9);
}
-
+ SConvertFunc cvtFunc = {.originFuncId = functionID, .execFuncId = functionID};
+ if (functionID == TSDB_FUNC_LAST_ROW && TSWINDOW_IS_EQUAL(pQueryInfo->window,TSWINDOW_INITIALIZER)) {
+ cvtFunc.execFuncId = TSDB_FUNC_LAST;
+ }
if (!requireAllFields) {
if (pItem->pNode->pParam->nExpr < 1) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
@@ -1798,7 +1818,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
for (int32_t j = 0; j < tscGetNumOfColumns(pTableMetaInfo->pTableMeta); ++j) {
index.columnIndex = j;
- if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, functionID, pItem->aliasName, colIndex++, &index) != 0) {
+ if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, cvtFunc, pItem->aliasName, colIndex++, &index) != 0) {
return TSDB_CODE_TSC_INVALID_SQL;
}
}
@@ -1815,8 +1835,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
if ((index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) || (index.columnIndex < 0)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
-
- if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, functionID, pItem->aliasName, colIndex + i, &index) != 0) {
+ if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, cvtFunc, pItem->aliasName, colIndex + i, &index) != 0) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -1853,7 +1872,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
for (int32_t i = 0; i < tscGetNumOfColumns(pTableMetaInfo->pTableMeta); ++i) {
SColumnIndex index = {.tableIndex = j, .columnIndex = i};
- if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, functionID, pItem->aliasName, colIndex, &index) != 0) {
+ if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, cvtFunc, pItem->aliasName, colIndex, &index) != 0) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -4675,7 +4694,9 @@ int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
const char* msg0 = "sample interval can not be less than 10ms.";
const char* msg1 = "functions not allowed in select clause";
- if (pQueryInfo->intervalTime != 0 && pQueryInfo->intervalTime < 10) {
+ if (pQueryInfo->intervalTime != 0 && pQueryInfo->intervalTime < 10 &&
+ pQueryInfo->intervalTimeUnit != 'n' &&
+ pQueryInfo->intervalTimeUnit != 'y') {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
}
@@ -5238,7 +5259,7 @@ static bool tagColumnInGroupby(SSqlGroupbyExpr* pGroupbyExpr, int16_t columnId)
for (int32_t j = 0; j < pGroupbyExpr->numOfGroupCols; ++j) {
SColIndex* pColIndex = taosArrayGet(pGroupbyExpr->columnInfo, j);
- if (columnId == pColIndex->colId && pColIndex->flag == TSDB_COL_TAG) {
+ if (columnId == pColIndex->colId && TSDB_COL_IS_TAG(pColIndex->flag )) {
return true;
}
}
@@ -5537,7 +5558,6 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
return checkUpdateTagPrjFunctions(pQueryInfo, pCmd);
}
}
-
int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
const char* msg1 = "only one expression allowed";
const char* msg2 = "invalid expression in select clause";
@@ -5807,22 +5827,34 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
int32_t ret = TSDB_CODE_SUCCESS;
for (int32_t i = 0; i < pList->nExpr; ++i) {
- SSchema* pSchema = pTagSchema + i;
+ SSchema* pSchema = &pTagSchema[i];
+
+ char tagVal[TSDB_MAX_TAGS_LEN];
if (pSchema->type == TSDB_DATA_TYPE_BINARY || pSchema->type == TSDB_DATA_TYPE_NCHAR) {
- // validate the length of binary
- if (pList->a[i].pVar.nLen + VARSTR_HEADER_SIZE > pSchema->bytes) {
+ if (pList->a[i].pVar.nLen > pSchema->bytes) {
tdDestroyKVRowBuilder(&kvRowBuilder);
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
}
-
- char tagVal[TSDB_MAX_TAGS_LEN];
+
ret = tVariantDump(&(pList->a[i].pVar), tagVal, pSchema->type, true);
+
+ // check again after the convert since it may be converted from binary to nchar.
+ if (pSchema->type == TSDB_DATA_TYPE_BINARY || pSchema->type == TSDB_DATA_TYPE_NCHAR) {
+ int16_t len = varDataTLen(tagVal);
+ if (len > pSchema->bytes) {
+ tdDestroyKVRowBuilder(&kvRowBuilder);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ }
+ }
+
if (ret != TSDB_CODE_SUCCESS) {
tdDestroyKVRowBuilder(&kvRowBuilder);
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
+
+
tdAddColToKVRow(&kvRowBuilder, pSchema->colId, pSchema->type, tagVal);
}
@@ -6078,6 +6110,10 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
}
int32_t joinQuery = (pQuerySql->from != NULL && pQuerySql->from->nExpr > 2);
+
+ if (pQuerySql->pWhere) {
+ pQueryInfo->window = TSWINDOW_INITIALIZER;
+ }
if (parseSelectClause(pCmd, index, pQuerySql->pSelection, isSTable, joinQuery) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -6161,7 +6197,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
- if (pQueryInfo->intervalTime > 0) {
+ if (pQueryInfo->intervalTime > 0 && pQueryInfo->intervalTimeUnit != 'n' && pQueryInfo->intervalTimeUnit != 'y') {
int64_t timeRange = ABS(pQueryInfo->window.skey - pQueryInfo->window.ekey);
// number of result is not greater than 10,000,000
if ((timeRange == 0) || (timeRange / pQueryInfo->intervalTime) > MAX_INTERVAL_TIME_WINDOW) {
diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c
index ecb85472fc..16e3458e13 100644
--- a/src/client/src/tscServer.c
+++ b/src/client/src/tscServer.c
@@ -226,17 +226,13 @@ int tscSendMsgToServer(SSqlObj *pSql) {
.handle = &pSql->pRpcCtx,
.code = 0
};
+
// NOTE: the rpc context should be acquired before sending data to server.
// Otherwise, the pSql object may have been released already during the response function, which is
// processMsgFromServer function. In the meanwhile, the assignment of the rpc context to sql object will absolutely
// cause crash.
- if (pObj != NULL && pObj->signature == pObj) {
- rpcSendRequest(pObj->pDnodeConn, &pSql->epSet, &rpcMsg);
- return TSDB_CODE_SUCCESS;
- } else {
- //pObj->signature has been reset by other thread, ignore concurrency problem
- return TSDB_CODE_TSC_CONN_KILLED;
- }
+ rpcSendRequest(pObj->pDnodeConn, &pSql->epSet, &rpcMsg);
+ return TSDB_CODE_SUCCESS;
}
void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
@@ -280,8 +276,6 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
}
}
- STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
-
int32_t cmd = pCmd->command;
if ((cmd == TSDB_SQL_SELECT || cmd == TSDB_SQL_FETCH || cmd == TSDB_SQL_INSERT || cmd == TSDB_SQL_UPDATE_TAGS_VAL) &&
(rpcMsg->code == TSDB_CODE_TDB_INVALID_TABLE_ID ||
@@ -306,7 +300,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
taosMsleep(duration);
}
- rpcMsg->code = tscRenewTableMeta(pSql, pTableMetaInfo->name);
+ rpcMsg->code = tscRenewTableMeta(pSql, 0);
// if there is an error occurring, proceed to the following error handling procedure.
if (rpcMsg->code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
@@ -673,6 +667,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pQueryMsg->numOfCols = htons((int16_t)taosArrayGetSize(pQueryInfo->colList));
pQueryMsg->intervalTime = htobe64(pQueryInfo->intervalTime);
pQueryMsg->slidingTime = htobe64(pQueryInfo->slidingTime);
+ pQueryMsg->intervalTimeUnit = pQueryInfo->intervalTimeUnit;
pQueryMsg->slidingTimeUnit = pQueryInfo->slidingTimeUnit;
pQueryMsg->numOfGroupCols = htons(pQueryInfo->groupbyExpr.numOfGroupCols);
pQueryMsg->numOfTags = htonl(numOfTags);
@@ -1495,8 +1490,7 @@ int tscBuildTableMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
char *tmpData = NULL;
uint32_t len = pSql->cmd.payloadLen;
if (len > 0) {
- tmpData = calloc(1, len);
- if (NULL == tmpData) {
+ if ((tmpData = calloc(1, len)) == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@@ -1541,8 +1535,7 @@ int tscBuildMultiMeterMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
// copy payload content to temp buff
char *tmpData = 0;
if (pCmd->payloadLen > 0) {
- tmpData = calloc(1, pCmd->payloadLen + 1);
- if (NULL == tmpData) return -1;
+ if ((tmpData = calloc(1, pCmd->payloadLen + 1)) == NULL) return -1;
memcpy(tmpData, pCmd->payload, pCmd->payloadLen);
}
@@ -2207,14 +2200,14 @@ int tscGetMeterMetaEx(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo, bool create
/**
* retrieve table meta from mnode, and update the local table meta cache.
* @param pSql sql object
- * @param tableId table full name
+ * @param tableIndex table index
* @return status code
*/
-int tscRenewTableMeta(SSqlObj *pSql, char *tableId) {
+int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex) {
SSqlCmd *pCmd = &pSql->cmd;
SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
- STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
+ STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, tableIndex);
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
if (pTableMetaInfo->pTableMeta) {
diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c
index f63923e046..9fa4db999f 100644
--- a/src/client/src/tscSql.c
+++ b/src/client/src/tscSql.c
@@ -597,11 +597,12 @@ int taos_errno(TAOS_RES *tres) {
}
/*
- * In case of invalid sql error, additional information is attached to explain
+ * In case of invalid sql/sql syntax error, additional information is attached to explain
* why the sql is invalid
*/
static bool hasAdditionalErrorInfo(int32_t code, SSqlCmd *pCmd) {
- if (code != TSDB_CODE_TSC_INVALID_SQL) {
+ if (code != TSDB_CODE_TSC_INVALID_SQL
+ && code != TSDB_CODE_TSC_SQL_SYNTAX_ERROR) {
return false;
}
@@ -609,9 +610,11 @@ static bool hasAdditionalErrorInfo(int32_t code, SSqlCmd *pCmd) {
char *z = NULL;
if (len > 0) {
- z = strstr(pCmd->payload, "invalid SQL");
+ z = strstr(pCmd->payload, "invalid SQL");
+ if (z == NULL) {
+ z = strstr(pCmd->payload, "syntax error");
+ }
}
-
return z != NULL;
}
@@ -817,7 +820,7 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t tblListLen) {
// must before clean the sqlcmd object
- tscResetSqlCmdObj(&pSql->cmd);
+ tscResetSqlCmdObj(&pSql->cmd, false);
SSqlCmd *pCmd = &pSql->cmd;
diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c
index 9dd47888d2..79e0011093 100644
--- a/src/client/src/tscStream.c
+++ b/src/client/src/tscStream.c
@@ -46,22 +46,23 @@ static bool isProjectStream(SQueryInfo* pQueryInfo) {
return true;
}
-static int64_t tscGetRetryDelayTime(int64_t slidingTime, int16_t prec) {
+static int64_t tscGetRetryDelayTime(SSqlStream* pStream, int64_t slidingTime, int16_t prec) {
float retryRangeFactor = 0.3f;
-
- // change to ms
- if (prec == TSDB_TIME_PRECISION_MICRO) {
- slidingTime = slidingTime / 1000;
- }
-
int64_t retryDelta = (int64_t)(tsStreamCompRetryDelay * retryRangeFactor);
retryDelta = ((rand() % retryDelta) + tsStreamCompRetryDelay) * 1000L;
- if (slidingTime < retryDelta) {
- return slidingTime;
- } else {
- return retryDelta;
+ if (pStream->intervalTimeUnit != 'n' && pStream->intervalTimeUnit != 'y') {
+ // change to ms
+ if (prec == TSDB_TIME_PRECISION_MICRO) {
+ slidingTime = slidingTime / 1000;
+ }
+
+ if (slidingTime < retryDelta) {
+ return slidingTime;
+ }
}
+
+ return retryDelta;
}
static void tscProcessStreamLaunchQuery(SSchedMsg *pMsg) {
@@ -86,7 +87,7 @@ static void tscProcessStreamLaunchQuery(SSchedMsg *pMsg) {
// failed to get meter/metric meta, retry in 10sec.
if (code != TSDB_CODE_SUCCESS) {
- int64_t retryDelayTime = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision);
+ int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->slidingTime, pStream->precision);
tscDebug("%p stream:%p,get metermeta failed, retry in %" PRId64 "ms", pStream->pSql, pStream, retryDelayTime);
tscSetRetryTimer(pStream, pSql, retryDelayTime);
@@ -131,13 +132,17 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
}
if (etime > pStream->etime) {
etime = pStream->etime;
+ } else if (pStream->intervalTimeUnit != 'y' && pStream->intervalTimeUnit != 'n') {
+ etime = pStream->stime + (etime - pStream->stime) / pStream->intervalTime * pStream->intervalTime;
} else {
- etime = pStream->stime + (etime - pStream->stime) / pStream->interval * pStream->interval;
+ etime = taosGetIntervalStartTimestamp(etime, pStream->slidingTime, pStream->intervalTime, pStream->slidingTimeUnit, pStream->precision);
}
pQueryInfo->window.ekey = etime;
if (pQueryInfo->window.skey >= pQueryInfo->window.ekey) {
int64_t timer = pStream->slidingTime;
- if (pStream->precision == TSDB_TIME_PRECISION_MICRO) {
+ if (pStream->intervalTimeUnit == 'y' || pStream->intervalTimeUnit == 'n') {
+ timer = 86400 * 1000l;
+ } else if (pStream->precision == TSDB_TIME_PRECISION_MICRO) {
timer /= 1000l;
}
tscSetRetryTimer(pStream, pSql, timer);
@@ -157,7 +162,7 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOfRows) {
SSqlStream *pStream = (SSqlStream *)param;
if (tres == NULL || numOfRows < 0) {
- int64_t retryDelay = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision);
+ int64_t retryDelay = tscGetRetryDelayTime(pStream, pStream->slidingTime, pStream->precision);
tscError("%p stream:%p, query data failed, code:0x%08x, retry in %" PRId64 "ms", pStream->pSql, pStream, numOfRows,
retryDelay);
@@ -218,7 +223,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
SSqlObj * pSql = (SSqlObj *)res;
if (pSql == NULL || numOfRows < 0) {
- int64_t retryDelayTime = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision);
+ int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->slidingTime, pStream->precision);
tscError("%p stream:%p, retrieve data failed, code:0x%08x, retry in %" PRId64 "ms", pSql, pStream, numOfRows, retryDelayTime);
tscSetRetryTimer(pStream, pStream->pSql, retryDelayTime);
@@ -241,7 +246,11 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
}
if (!pStream->isProject) {
- pStream->stime += pStream->slidingTime;
+ if (pStream->intervalTimeUnit == 'y' || pStream->intervalTimeUnit == 'n') {
+ pStream->stime = taosAddNatualInterval(pStream->stime, pStream->slidingTime, pStream->slidingTimeUnit, pStream->precision);
+ } else {
+ pStream->stime += pStream->slidingTime;
+ }
}
// actually only one row is returned. this following is not necessary
taos_fetch_rows_a(res, tscProcessStreamRetrieveResult, pStream);
@@ -301,7 +310,7 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer)
now + timer, timer, delay, pStream->stime, etime);
} else {
tscDebug("%p stream:%p, next start at %" PRId64 ", in %" PRId64 "ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64, pStream->pSql, pStream,
- pStream->stime, timer, delay, pStream->stime - pStream->interval, pStream->stime - 1);
+ pStream->stime, timer, delay, pStream->stime - pStream->intervalTime, pStream->stime - 1);
}
pSql->cmd.command = TSDB_SQL_SELECT;
@@ -311,23 +320,26 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer)
}
static int64_t getLaunchTimeDelay(const SSqlStream* pStream) {
- int64_t delayDelta = (int64_t)(pStream->slidingTime * tsStreamComputDelayRatio);
-
int64_t maxDelay =
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMaxStreamComputDelay * 1000L : tsMaxStreamComputDelay;
- if (delayDelta > maxDelay) {
- delayDelta = maxDelay;
- }
-
- int64_t remainTimeWindow = pStream->slidingTime - delayDelta;
- if (maxDelay > remainTimeWindow) {
- maxDelay = (int64_t)(remainTimeWindow / 1.5f);
+ int64_t delayDelta = maxDelay;
+ if (pStream->intervalTimeUnit != 'n' && pStream->intervalTimeUnit != 'y') {
+ delayDelta = (int64_t)(pStream->slidingTime * tsStreamComputDelayRatio);
+ if (delayDelta > maxDelay) {
+ delayDelta = maxDelay;
+ }
+ int64_t remainTimeWindow = pStream->slidingTime - delayDelta;
+ if (maxDelay > remainTimeWindow) {
+ maxDelay = (int64_t)(remainTimeWindow / 1.5f);
+ }
}
int64_t currentDelay = (rand() % maxDelay); // a random number
currentDelay += delayDelta;
- assert(currentDelay < pStream->slidingTime);
+ if (pStream->intervalTimeUnit != 'n' && pStream->intervalTimeUnit != 'y') {
+ assert(currentDelay < pStream->slidingTime);
+ }
return currentDelay;
}
@@ -354,7 +366,8 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) {
return;
}
} else {
- if ((pStream->stime - pStream->interval) >= pStream->etime) {
+ int64_t stime = taosGetIntervalStartTimestamp(pStream->stime - 1, pStream->intervalTime, pStream->intervalTime, pStream->intervalTimeUnit, pStream->precision);
+ if (stime >= pStream->etime) {
tscDebug("%p stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql, pStream,
pStream->stime, pStream->etime);
// TODO : How to terminate stream here
@@ -387,24 +400,24 @@ static void tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
- if (pQueryInfo->intervalTime < minIntervalTime) {
+ if (pQueryInfo->intervalTimeUnit != 'n' && pQueryInfo->intervalTimeUnit != 'y' && pQueryInfo->intervalTime < minIntervalTime) {
tscWarn("%p stream:%p, original sample interval:%ld too small, reset to:%" PRId64, pSql, pStream,
pQueryInfo->intervalTime, minIntervalTime);
pQueryInfo->intervalTime = minIntervalTime;
}
- pStream->interval = pQueryInfo->intervalTime; // it shall be derived from sql string
+ pStream->intervalTimeUnit = pQueryInfo->intervalTimeUnit;
+ pStream->intervalTime = pQueryInfo->intervalTime; // it shall be derived from sql string
- if (pQueryInfo->slidingTime == 0) {
+ if (pQueryInfo->slidingTime <= 0) {
pQueryInfo->slidingTime = pQueryInfo->intervalTime;
+ pQueryInfo->slidingTimeUnit = pQueryInfo->intervalTimeUnit;
}
int64_t minSlidingTime =
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinSlidingTime * 1000L : tsMinSlidingTime;
- if (pQueryInfo->slidingTime == -1) {
- pQueryInfo->slidingTime = pQueryInfo->intervalTime;
- } else if (pQueryInfo->slidingTime < minSlidingTime) {
+ if (pQueryInfo->intervalTimeUnit != 'n' && pQueryInfo->intervalTimeUnit != 'y' && pQueryInfo->slidingTime < minSlidingTime) {
tscWarn("%p stream:%p, original sliding value:%" PRId64 " too small, reset to:%" PRId64, pSql, pStream,
pQueryInfo->slidingTime, minSlidingTime);
@@ -418,6 +431,7 @@ static void tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
pQueryInfo->slidingTime = pQueryInfo->intervalTime;
}
+ pStream->slidingTimeUnit = pQueryInfo->slidingTimeUnit;
pStream->slidingTime = pQueryInfo->slidingTime;
if (pStream->isProject) {
@@ -431,7 +445,7 @@ static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, in
if (pStream->isProject) {
// no data in table, flush all data till now to destination meter, 10sec delay
- pStream->interval = tsProjectExecInterval;
+ pStream->intervalTime = tsProjectExecInterval;
pStream->slidingTime = tsProjectExecInterval;
if (stime != 0) { // first projection start from the latest event timestamp
@@ -442,11 +456,15 @@ static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, in
}
} else { // timewindow based aggregation stream
if (stime == 0) { // no data in meter till now
- stime = ((int64_t)taosGetTimestamp(pStream->precision) / pStream->interval) * pStream->interval;
- stime -= pStream->interval;
- tscWarn("%p stream:%p, last timestamp:0, reset to:%" PRId64, pSql, pStream, stime);
+ stime = pQueryInfo->window.skey;
+ if (stime == INT64_MIN) {
+ stime = (int64_t)taosGetTimestamp(pStream->precision);
+ stime = taosGetIntervalStartTimestamp(stime, pStream->intervalTime, pStream->intervalTime, pStream->intervalTimeUnit, pStream->precision);
+ stime = taosGetIntervalStartTimestamp(stime - 1, pStream->intervalTime, pStream->intervalTime, pStream->intervalTimeUnit, pStream->precision);
+ tscWarn("%p stream:%p, last timestamp:0, reset to:%" PRId64, pSql, pStream, stime);
+ }
} else {
- int64_t newStime = (stime / pStream->interval) * pStream->interval;
+ int64_t newStime = taosGetIntervalStartTimestamp(stime, pStream->intervalTime, pStream->intervalTime, pStream->intervalTimeUnit, pStream->precision);
if (newStime != stime) {
tscWarn("%p stream:%p, last timestamp:%" PRId64 ", reset to:%" PRId64, pSql, pStream, stime, newStime);
stime = newStime;
@@ -516,7 +534,7 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) {
taosTmrReset(tscProcessStreamTimer, (int32_t)starttime, pStream, tscTmr, &pStream->pTimer);
tscDebug("%p stream:%p is opened, query on:%s, interval:%" PRId64 ", sliding:%" PRId64 ", first launched in:%" PRId64 ", sql:%s", pSql,
- pStream, pTableMetaInfo->name, pStream->interval, pStream->slidingTime, starttime, pSql->sqlstr);
+ pStream, pTableMetaInfo->name, pStream->intervalTime, pStream->slidingTime, starttime, pSql->sqlstr);
}
TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c
index 2fb264c756..e264fa9b33 100644
--- a/src/client/src/tscSubquery.c
+++ b/src/client/src/tscSubquery.c
@@ -178,6 +178,8 @@ SJoinSupporter* tscCreateJoinSupporter(SSqlObj* pSql, SSubqueryState* pState, in
pSupporter->subqueryIndex = index;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
+ pSupporter->intervalTimeUnit = pQueryInfo->intervalTimeUnit;
+ pSupporter->slidingTime = pQueryInfo->slidingTimeUnit;
pSupporter->intervalTime = pQueryInfo->intervalTime;
pSupporter->slidingTime = pQueryInfo->slidingTime;
pSupporter->limit = pQueryInfo->limit;
@@ -309,6 +311,8 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
// set the second stage sub query for join process
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE);
+ pQueryInfo->intervalTimeUnit = pSupporter->intervalTimeUnit;
+ pQueryInfo->slidingTimeUnit = pSupporter->slidingTimeUnit;
pQueryInfo->intervalTime = pSupporter->intervalTime;
pQueryInfo->slidingTime = pSupporter->slidingTime;
pQueryInfo->groupbyExpr = pSupporter->groupbyExpr;
@@ -570,8 +574,9 @@ static int32_t getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pPar
SSchema* pColSchema = tscGetTableColumnSchemaById(pTableMetaInfo->pTableMeta, tagColId);
- *s1 = taosArrayInit(p1->num, p1->tagSize);
- *s2 = taosArrayInit(p2->num, p2->tagSize);
+ // int16_t for padding
+ *s1 = taosArrayInit(p1->num, p1->tagSize - sizeof(int16_t));
+ *s2 = taosArrayInit(p2->num, p2->tagSize - sizeof(int16_t));
if (!(checkForDuplicateTagVal(pQueryInfo, p1, pParentSql) && checkForDuplicateTagVal(pQueryInfo, p2, pParentSql))) {
return TSDB_CODE_QRY_DUP_JOIN_KEY;
@@ -1039,6 +1044,10 @@ void tscSetupOutputColumnIndex(SSqlObj* pSql) {
int32_t numOfExprs = (int32_t)tscSqlExprNumOfExprs(pQueryInfo);
pRes->pColumnIndex = calloc(1, sizeof(SColumnIndex) * numOfExprs);
+ if (pRes->pColumnIndex == NULL) {
+ pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ return;
+ }
for (int32_t i = 0; i < numOfExprs; ++i) {
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
@@ -1153,7 +1162,8 @@ static void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code);
static SSqlObj *tscCreateSTableSubquery(SSqlObj *pSql, SRetrieveSupport *trsupport, SSqlObj *prevSqlObj);
-int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter *pSupporter) {
+// TODO
+int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter *pSupporter) {
SSqlCmd * pCmd = &pSql->cmd;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
@@ -1199,7 +1209,9 @@ int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
// this data needs to be transfer to support struct
memset(&pNewQueryInfo->fieldsInfo, 0, sizeof(SFieldInfo));
- tscTagCondCopy(&pSupporter->tagCond, &pNewQueryInfo->tagCond);//pNewQueryInfo->tagCond;
+ if (tscTagCondCopy(&pSupporter->tagCond, &pNewQueryInfo->tagCond) != 0) {
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
pNew->cmd.numOfCols = 0;
pNewQueryInfo->intervalTime = 0;
@@ -1296,52 +1308,75 @@ int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
pNewQueryInfo->type |= TSDB_QUERY_TYPE_SUBQUERY;
}
- return tscProcessSql(pNew);
+ return TSDB_CODE_SUCCESS;
}
-int32_t tscHandleMasterJoinQuery(SSqlObj* pSql) {
+void tscHandleMasterJoinQuery(SSqlObj* pSql) {
SSqlCmd* pCmd = &pSql->cmd;
+ SSqlRes* pRes = &pSql->res;
+
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
assert((pQueryInfo->type & TSDB_QUERY_TYPE_SUBQUERY) == 0);
+ int32_t code = TSDB_CODE_SUCCESS;
+
// todo add test
SSubqueryState *pState = calloc(1, sizeof(SSubqueryState));
if (pState == NULL) {
- pSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY;
- return pSql->res.code;
+ code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto _error;
}
pState->numOfTotal = pQueryInfo->numOfTables;
pState->numOfRemain = pState->numOfTotal;
+ bool hasEmptySub = false;
+
tscDebug("%p start subquery, total:%d", pSql, pQueryInfo->numOfTables);
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
SJoinSupporter *pSupporter = tscCreateJoinSupporter(pSql, pState, i);
if (pSupporter == NULL) { // failed to create support struct, abort current query
tscError("%p tableIndex:%d, failed to allocate join support object, abort further query", pSql, i);
- pState->numOfRemain = i;
- pSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY;
- if (0 == i) {
- taosTFree(pState);
- }
- return pSql->res.code;
+ code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto _error;
}
- int32_t code = tscLaunchJoinSubquery(pSql, i, pSupporter);
+ code = tscCreateJoinSubquery(pSql, i, pSupporter);
if (code != TSDB_CODE_SUCCESS) { // failed to create subquery object, quit query
tscDestroyJoinSupporter(pSupporter);
- pSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY;
- if (0 == i) {
- taosTFree(pState);
- }
+ goto _error;
+ }
+
+ SSqlObj* pSub = pSql->pSubs[i];
+ STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSub->cmd, 0, 0);
+ if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo) && (pTableMetaInfo->vgroupList->numOfVgroups == 0)) {
+ hasEmptySub = true;
break;
}
}
- pSql->cmd.command = (pSql->numOfSubs <= 0)? TSDB_SQL_RETRIEVE_EMPTY_RESULT:TSDB_SQL_TABLE_JOIN_RETRIEVE;
-
- return TSDB_CODE_SUCCESS;
+ if (hasEmptySub) { // at least one subquery is empty, do nothing and return
+ freeJoinSubqueryObj(pSql);
+ pSql->cmd.command = TSDB_SQL_RETRIEVE_EMPTY_RESULT;
+ (*pSql->fp)(pSql->param, pSql, 0);
+ } else {
+ for (int32_t i = 0; i < pSql->numOfSubs; ++i) {
+ SSqlObj* pSub = pSql->pSubs[i];
+ if ((code = tscProcessSql(pSub)) != TSDB_CODE_SUCCESS) {
+ pState->numOfRemain = i - 1; // the already sent reques will continue and do not go to the error process routine
+ break;
+ }
+ }
+
+ pSql->cmd.command = TSDB_SQL_TABLE_JOIN_RETRIEVE;
+ }
+
+ return;
+
+ _error:
+ pRes->code = code;
+ tscQueueAsyncRes(pSql);
}
static void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs, SSubqueryState* pState) {
@@ -1380,7 +1415,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
const uint32_t nBufferSize = (1u << 16); // 64KB
- SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
+ SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
pSql->numOfSubs = pTableMetaInfo->vgroupList->numOfVgroups;
@@ -1395,9 +1430,20 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
}
pSql->pSubs = calloc(pSql->numOfSubs, POINTER_BYTES);
-
+
tscDebug("%p retrieved query data from %d vnode(s)", pSql, pSql->numOfSubs);
SSubqueryState *pState = calloc(1, sizeof(SSubqueryState));
+
+ if (pSql->pSubs == NULL || pState == NULL) {
+ taosTFree(pState);
+ taosTFree(pSql->pSubs);
+ pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pModel, pSql->numOfSubs);
+
+ tscQueueAsyncRes(pSql);
+ return ret;
+ }
+
pState->numOfTotal = pSql->numOfSubs;
pState->numOfRemain = pSql->numOfSubs;
@@ -2029,8 +2075,21 @@ static void doBuildResFromSubqueries(SSqlObj* pSql) {
numOfRes = (int32_t)(MIN(numOfRes, pSql->pSubs[i]->res.numOfRows));
}
+ if (numOfRes == 0) {
+ return;
+ }
+
int32_t totalSize = tscGetResRowLength(pQueryInfo->exprList);
- pRes->pRsp = realloc(pRes->pRsp, numOfRes * totalSize);
+
+ assert(numOfRes * totalSize > 0);
+ char* tmp = realloc(pRes->pRsp, numOfRes * totalSize);
+ if (tmp == NULL) {
+ pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ return;
+ } else {
+ pRes->pRsp = tmp;
+ }
+
pRes->data = pRes->pRsp;
char* data = pRes->data;
@@ -2069,6 +2128,12 @@ void tscBuildResFromSubqueries(SSqlObj *pSql) {
pRes->buffer = calloc(numOfExprs, POINTER_BYTES);
pRes->length = calloc(numOfExprs, sizeof(int32_t));
+ if (pRes->tsrow == NULL || pRes->buffer == NULL || pRes->length == NULL) {
+ pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ tscQueueAsyncRes(pSql);
+ return;
+ }
+
tscRestoreSQLFuncForSTableQuery(pQueryInfo);
}
diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c
index b61fd7e8c9..b45d40f49c 100644
--- a/src/client/src/tscUtil.c
+++ b/src/client/src/tscUtil.c
@@ -33,7 +33,7 @@
static void freeQueryInfoImpl(SQueryInfo* pQueryInfo);
static void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, const char* address, bool removeFromCache);
- SCond* tsGetSTableQueryCond(STagCond* pTagCond, uint64_t uid) {
+SCond* tsGetSTableQueryCond(STagCond* pTagCond, uint64_t uid) {
if (pTagCond->pCond == NULL) {
return NULL;
}
@@ -254,15 +254,12 @@ int32_t tscCreateResPointerInfo(SSqlRes* pRes, SQueryInfo* pQueryInfo) {
pRes->numOfCols = numOfOutput;
pRes->tsrow = calloc(numOfOutput, POINTER_BYTES);
- pRes->length = calloc(numOfOutput, sizeof(int32_t)); // todo refactor
+ pRes->length = calloc(numOfOutput, sizeof(int32_t));
pRes->buffer = calloc(numOfOutput, POINTER_BYTES);
// not enough memory
if (pRes->tsrow == NULL || (pRes->buffer == NULL && pRes->numOfCols > 0)) {
taosTFree(pRes->tsrow);
- taosTFree(pRes->buffer);
- taosTFree(pRes->length);
-
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
return pRes->code;
}
@@ -281,13 +278,14 @@ void tscDestroyResPointerInfo(SSqlRes* pRes) {
}
taosTFree(pRes->pRsp);
+
taosTFree(pRes->tsrow);
taosTFree(pRes->length);
-
+ taosTFree(pRes->buffer);
+
taosTFree(pRes->pGroupRec);
taosTFree(pRes->pColumnIndex);
- taosTFree(pRes->buffer);
-
+
if (pRes->pArithSup != NULL) {
taosTFree(pRes->pArithSup->data);
taosTFree(pRes->pArithSup);
@@ -296,7 +294,7 @@ void tscDestroyResPointerInfo(SSqlRes* pRes) {
pRes->data = NULL; // pRes->data points to the buffer of pRsp, no need to free
}
-static void tscFreeQueryInfo(SSqlCmd* pCmd) {
+static void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeFromCache) {
if (pCmd == NULL || pCmd->numOfClause == 0) {
return;
}
@@ -306,7 +304,7 @@ static void tscFreeQueryInfo(SSqlCmd* pCmd) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, i);
freeQueryInfoImpl(pQueryInfo);
- clearAllTableMetaInfo(pQueryInfo, (const char*)addr, false);
+ clearAllTableMetaInfo(pQueryInfo, (const char*)addr, removeFromCache);
taosTFree(pQueryInfo);
}
@@ -314,7 +312,7 @@ static void tscFreeQueryInfo(SSqlCmd* pCmd) {
taosTFree(pCmd->pQueryInfo);
}
-void tscResetSqlCmdObj(SSqlCmd* pCmd) {
+void tscResetSqlCmdObj(SSqlCmd* pCmd, bool removeFromCache) {
pCmd->command = 0;
pCmd->numOfCols = 0;
pCmd->count = 0;
@@ -328,7 +326,7 @@ void tscResetSqlCmdObj(SSqlCmd* pCmd) {
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
- tscFreeQueryInfo(pCmd);
+ tscFreeQueryInfo(pCmd, removeFromCache);
}
void tscFreeSqlResult(SSqlObj* pSql) {
@@ -366,7 +364,7 @@ void tscPartiallyFreeSqlObj(SSqlObj* pSql) {
taosTFree(pSql->pSubs);
pSql->numOfSubs = 0;
- tscResetSqlCmdObj(pCmd);
+ tscResetSqlCmdObj(pCmd, false);
}
void tscFreeSqlObj(SSqlObj* pSql) {
@@ -1052,7 +1050,7 @@ void tscSqlExprInfoDestroy(SArray* pExprInfo) {
taosArrayDestroy(pExprInfo);
}
-void tscSqlExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deepcopy) {
+int32_t tscSqlExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deepcopy) {
assert(src != NULL && dst != NULL);
size_t size = taosArrayGetSize(src);
@@ -1064,7 +1062,7 @@ void tscSqlExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deepcopy)
if (deepcopy) {
SSqlExpr* p1 = calloc(1, sizeof(SSqlExpr));
if (p1 == NULL) {
- assert(0);
+ return -1;
}
*p1 = *pExpr;
@@ -1078,6 +1076,8 @@ void tscSqlExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deepcopy)
}
}
}
+
+ return 0;
}
SColumn* tscColumnListInsert(SArray* pColumnList, SColumnIndex* pColIndex) {
@@ -1324,11 +1324,14 @@ bool tscValidateColumnId(STableMetaInfo* pTableMetaInfo, int32_t colId, int32_t
return false;
}
-void tscTagCondCopy(STagCond* dest, const STagCond* src) {
+int32_t tscTagCondCopy(STagCond* dest, const STagCond* src) {
memset(dest, 0, sizeof(STagCond));
if (src->tbnameCond.cond != NULL) {
dest->tbnameCond.cond = strdup(src->tbnameCond.cond);
+ if (dest->tbnameCond.cond == NULL) {
+ return -1;
+ }
}
dest->tbnameCond.uid = src->tbnameCond.uid;
@@ -1337,7 +1340,7 @@ void tscTagCondCopy(STagCond* dest, const STagCond* src) {
dest->relType = src->relType;
if (src->pCond == NULL) {
- return;
+ return 0;
}
size_t s = taosArrayGetSize(src->pCond);
@@ -1354,7 +1357,7 @@ void tscTagCondCopy(STagCond* dest, const STagCond* src) {
assert(pCond->cond != NULL);
c.cond = malloc(c.len);
if (c.cond == NULL) {
- assert(0);
+ return -1;
}
memcpy(c.cond, pCond->cond, c.len);
@@ -1362,6 +1365,8 @@ void tscTagCondCopy(STagCond* dest, const STagCond* src) {
taosArrayPush(dest->pCond, &c);
}
+
+ return 0;
}
void tscTagCondRelease(STagCond* pTagCond) {
@@ -1830,6 +1835,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
pNewQueryInfo->command = pQueryInfo->command;
+ pNewQueryInfo->intervalTimeUnit = pQueryInfo->intervalTimeUnit;
pNewQueryInfo->slidingTimeUnit = pQueryInfo->slidingTimeUnit;
pNewQueryInfo->intervalTime = pQueryInfo->intervalTime;
pNewQueryInfo->slidingTime = pQueryInfo->slidingTime;
@@ -1854,7 +1860,10 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
}
}
- tscTagCondCopy(&pNewQueryInfo->tagCond, &pQueryInfo->tagCond);
+ if (tscTagCondCopy(&pNewQueryInfo->tagCond, &pQueryInfo->tagCond) != 0) {
+ terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto _error;
+ }
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
pNewQueryInfo->fillVal = malloc(pQueryInfo->fieldsInfo.numOfOutput * sizeof(int64_t));
@@ -1883,7 +1892,10 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
}
uint64_t uid = pTableMetaInfo->pTableMeta->id.uid;
- tscSqlExprCopy(pNewQueryInfo->exprList, pQueryInfo->exprList, uid, true);
+ if (tscSqlExprCopy(pNewQueryInfo->exprList, pQueryInfo->exprList, uid, true) != 0) {
+ terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto _error;
+ }
doSetSqlExprAndResultFieldInfo(pQueryInfo, pNewQueryInfo, uid);
@@ -2028,10 +2040,37 @@ bool tscIsUpdateQuery(SSqlObj* pSql) {
return ((pCmd->command >= TSDB_SQL_INSERT && pCmd->command <= TSDB_SQL_DROP_DNODE) || TSDB_SQL_USE_DB == pCmd->command);
}
+int32_t tscSQLSyntaxErrMsg(char* msg, const char* additionalInfo, const char* sql) {
+ const char* msgFormat1 = "syntax error near \'%s\'";
+ const char* msgFormat2 = "syntax error near \'%s\' (%s)";
+ const char* msgFormat3 = "%s";
+
+ const char* prefix = "syntax error";
+ const int32_t BACKWARD_CHAR_STEP = 0;
+
+ if (sql == NULL) {
+ assert(additionalInfo != NULL);
+ sprintf(msg, msgFormat1, additionalInfo);
+ return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
+ }
+
+ char buf[64] = {0}; // only extract part of sql string
+ strncpy(buf, (sql - BACKWARD_CHAR_STEP), tListLen(buf) - 1);
+
+ if (additionalInfo != NULL) {
+ sprintf(msg, msgFormat2, buf, additionalInfo);
+ } else {
+ const char* msgFormat = (0 == strncmp(sql, prefix, strlen(prefix))) ? msgFormat3 : msgFormat1;
+ sprintf(msg, msgFormat, buf);
+ }
+
+ return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
+
+}
int32_t tscInvalidSQLErrMsg(char* msg, const char* additionalInfo, const char* sql) {
const char* msgFormat1 = "invalid SQL: %s";
- const char* msgFormat2 = "invalid SQL: syntax error near \"%s\" (%s)";
- const char* msgFormat3 = "invalid SQL: syntax error near \"%s\"";
+ const char* msgFormat2 = "invalid SQL: \'%s\' (%s)";
+ const char* msgFormat3 = "invalid SQL: \'%s\'";
const int32_t BACKWARD_CHAR_STEP = 0;
@@ -2257,4 +2296,4 @@ bool tscSetSqlOwner(SSqlObj* pSql) {
void tscClearSqlOwner(SSqlObj* pSql) {
assert(taosCheckPthreadValid(pSql->owner));
atomic_store_64(&pSql->owner, 0);
-}
\ No newline at end of file
+}
diff --git a/src/common/inc/tname.h b/src/common/inc/tname.h
index 2a4ac3fc40..beef9ff375 100644
--- a/src/common/inc/tname.h
+++ b/src/common/inc/tname.h
@@ -35,6 +35,8 @@ bool tscValidateTableNameLength(size_t len);
SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters);
+int64_t taosAddNatualInterval(int64_t key, int64_t intervalTime, char timeUnit, int16_t precision);
+int32_t taosCountNatualInterval(int64_t skey, int64_t ekey, int64_t intervalTime, char timeUnit, int16_t precision);
int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision);
#endif // TDENGINE_NAME_H
diff --git a/src/common/src/tname.c b/src/common/src/tname.c
index 01945dbb00..49c9e6b726 100644
--- a/src/common/src/tname.c
+++ b/src/common/src/tname.c
@@ -100,33 +100,123 @@ SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numO
return pFilter;
}
+int64_t taosAddNatualInterval(int64_t key, int64_t intervalTime, char timeUnit, int16_t precision) {
+ key /= 1000;
+ if (precision == TSDB_TIME_PRECISION_MICRO) {
+ key /= 1000;
+ }
+
+ struct tm tm;
+ time_t t = (time_t)key;
+ localtime_r(&t, &tm);
+
+ if (timeUnit == 'y') {
+ intervalTime *= 12;
+ }
+
+ int mon = (int)(tm.tm_year * 12 + tm.tm_mon + intervalTime);
+ tm.tm_year = mon / 12;
+ tm.tm_mon = mon % 12;
+
+ key = mktime(&tm) * 1000L;
+
+ if (precision == TSDB_TIME_PRECISION_MICRO) {
+ key *= 1000L;
+ }
+
+ return key;
+}
+
+int32_t taosCountNatualInterval(int64_t skey, int64_t ekey, int64_t intervalTime, char timeUnit, int16_t precision) {
+ skey /= 1000;
+ ekey /= 1000;
+ if (precision == TSDB_TIME_PRECISION_MICRO) {
+ skey /= 1000;
+ ekey /= 1000;
+ }
+ if (ekey < skey) {
+ int64_t tmp = ekey;
+ ekey = skey;
+ skey = tmp;
+ }
+
+ struct tm tm;
+ time_t t = (time_t)skey;
+ localtime_r(&t, &tm);
+ int smon = tm.tm_year * 12 + tm.tm_mon;
+
+ t = (time_t)ekey;
+ localtime_r(&t, &tm);
+ int emon = tm.tm_year * 12 + tm.tm_mon;
+
+ if (timeUnit == 'y') {
+ intervalTime *= 12;
+ }
+
+ return (emon - smon) / (int32_t)intervalTime;
+}
+
int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision) {
if (slidingTime == 0) {
return startTime;
}
+ int64_t start = startTime;
+ if (timeUnit == 'n' || timeUnit == 'y') {
+ start /= 1000;
+ if (precision == TSDB_TIME_PRECISION_MICRO) {
+ start /= 1000;
+ }
+ struct tm tm;
+ time_t t = (time_t)start;
+ localtime_r(&t, &tm);
+ tm.tm_sec = 0;
+ tm.tm_min = 0;
+ tm.tm_hour = 0;
+ tm.tm_mday = 1;
- int64_t start = ((startTime - intervalTime) / slidingTime + 1) * slidingTime;
- if (!(timeUnit == 'u' || timeUnit == 'a' || timeUnit == 'm' || timeUnit == 's' || timeUnit == 'h')) {
- /*
- * here we revised the start time of day according to the local time zone,
- * but in case of DST, the start time of one day need to be dynamically decided.
- */
- // todo refactor to extract function that is available for Linux/Windows/Mac platform
-#if defined(WINDOWS) && _MSC_VER >= 1900
- // see https://docs.microsoft.com/en-us/cpp/c-runtime-library/daylight-dstbias-timezone-and-tzname?view=vs-2019
- int64_t timezone = _timezone;
- int32_t daylight = _daylight;
- char** tzname = _tzname;
-#endif
+ if (timeUnit == 'y') {
+ tm.tm_mon = 0;
+ tm.tm_year = (int)(tm.tm_year / slidingTime * slidingTime);
+ } else {
+ int mon = tm.tm_year * 12 + tm.tm_mon;
+ mon = (int)(mon / slidingTime * slidingTime);
+ tm.tm_year = mon / 12;
+ tm.tm_mon = mon % 12;
+ }
- int64_t t = (precision == TSDB_TIME_PRECISION_MILLI) ? MILLISECOND_PER_SECOND : MILLISECOND_PER_SECOND * 1000L;
- start += timezone * t;
+ start = mktime(&tm) * 1000L;
+ if (precision == TSDB_TIME_PRECISION_MICRO) {
+ start *= 1000L;
+ }
+ } else {
+ int64_t delta = startTime - intervalTime;
+ int32_t factor = delta > 0? 1:-1;
+
+ start = (delta / slidingTime + factor) * slidingTime;
+
+ if (timeUnit == 'd' || timeUnit == 'w') {
+ /*
+ * here we revised the start time of day according to the local time zone,
+ * but in case of DST, the start time of one day need to be dynamically decided.
+ */
+ // todo refactor to extract function that is available for Linux/Windows/Mac platform
+ #if defined(WINDOWS) && _MSC_VER >= 1900
+ // see https://docs.microsoft.com/en-us/cpp/c-runtime-library/daylight-dstbias-timezone-and-tzname?view=vs-2019
+ int64_t timezone = _timezone;
+ int32_t daylight = _daylight;
+ char** tzname = _tzname;
+ #endif
+
+ int64_t t = (precision == TSDB_TIME_PRECISION_MILLI) ? MILLISECOND_PER_SECOND : MILLISECOND_PER_SECOND * 1000L;
+ start += timezone * t;
+ }
+
+ int64_t end = start + intervalTime - 1;
+ if (end < startTime) {
+ start += slidingTime;
+ }
}
- int64_t end = start + intervalTime - 1;
- if (end < startTime) {
- start += slidingTime;
- }
return start;
}
diff --git a/src/connector/jdbc/src/test/java/TestPreparedStatement.java b/src/connector/jdbc/src/test/java/TestPreparedStatement.java
index 16dad40a3f..1edb957493 100644
--- a/src/connector/jdbc/src/test/java/TestPreparedStatement.java
+++ b/src/connector/jdbc/src/test/java/TestPreparedStatement.java
@@ -12,7 +12,6 @@ public class TestPreparedStatement {
Class.forName("com.taosdata.jdbc.TSDBDriver");
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, "localhost");
-
connection = DriverManager.getConnection("jdbc:TAOS://localhost:0/", properties);
String rawSql = "select * from test.log0601";
// String[] params = new String[]{"ts", "c1"};
diff --git a/src/connector/jdbc/src/test/java/TestTSDBDatabaseMetaData.java b/src/connector/jdbc/src/test/java/TestTSDBDatabaseMetaData.java
index 7e65bd1149..39a08f0fe9 100644
--- a/src/connector/jdbc/src/test/java/TestTSDBDatabaseMetaData.java
+++ b/src/connector/jdbc/src/test/java/TestTSDBDatabaseMetaData.java
@@ -13,7 +13,6 @@ public class TestTSDBDatabaseMetaData {
Class.forName("com.taosdata.jdbc.TSDBDriver");
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, "localhost");
-
connection = DriverManager.getConnection("jdbc:TAOS://localhost:0/", properties);
dbMetaData = connection.getMetaData();
resSet = dbMetaData.getCatalogs();
diff --git a/src/connector/jdbc/src/test/java/TestTSDBSubscribe.java b/src/connector/jdbc/src/test/java/TestTSDBSubscribe.java
index faa073b5e9..47acb20064 100644
--- a/src/connector/jdbc/src/test/java/TestTSDBSubscribe.java
+++ b/src/connector/jdbc/src/test/java/TestTSDBSubscribe.java
@@ -3,7 +3,6 @@ import com.taosdata.jdbc.TSDBDriver;
import com.taosdata.jdbc.TSDBResultSet;
import com.taosdata.jdbc.TSDBSubscribe;
-import java.sql.Connection;
import java.sql.DriverManager;
import java.util.Properties;
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BatchInsertTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BatchInsertTest.java
index 47082c1ef1..c49293c96b 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BatchInsertTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BatchInsertTest.java
@@ -40,7 +40,6 @@ public class BatchInsertTest extends BaseTest {
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
-
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
statement = connection.createStatement();
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ConnectionTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ConnectionTest.java
index 2196048cc3..a54ece4ead 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ConnectionTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ConnectionTest.java
@@ -29,7 +29,6 @@ public class ConnectionTest extends BaseTest {
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
-
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
assertTrue(null != connection);
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/DatabaseMetaDataTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/DatabaseMetaDataTest.java
index 565e409834..284af3dfe7 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/DatabaseMetaDataTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/DatabaseMetaDataTest.java
@@ -26,8 +26,7 @@ public class DatabaseMetaDataTest extends BaseTest {
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
-
- connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/" , properties);
+ connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
String sql = "drop database if exists " + dbName;
statement = (TSDBPreparedStatement) connection.prepareStatement(sql);
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ImportTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ImportTest.java
index 52ddc5d0eb..dbe16d9fea 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ImportTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ImportTest.java
@@ -28,7 +28,6 @@ public class ImportTest extends BaseTest {
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
-
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
statement = connection.createStatement();
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/PreparedStatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/PreparedStatementTest.java
index dff7b05e6d..0535214ac1 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/PreparedStatementTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/PreparedStatementTest.java
@@ -33,7 +33,6 @@ public class PreparedStatementTest extends BaseTest {
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
-
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
String sql = "drop database if exists " + dbName;
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SelectTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SelectTest.java
index 22b9d6a59d..1844a92b47 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SelectTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SelectTest.java
@@ -28,8 +28,7 @@ public class SelectTest extends BaseTest {
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
-
- connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/" , properties);
+ connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
statement = connection.createStatement();
statement.executeUpdate("drop database if exists " + dbName);
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/StableTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/StableTest.java
index 29a27c7980..6e01fb7c34 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/StableTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/StableTest.java
@@ -31,8 +31,7 @@ public class StableTest extends BaseTest {
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
-
- connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/" , properties);
+ connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
statement = connection.createStatement();
statement.executeUpdate("create database if not exists " + dbName);
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/StatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/StatementTest.java
index 30c346ab62..db7b8c8cb1 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/StatementTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/StatementTest.java
@@ -30,7 +30,6 @@ public class StatementTest extends BaseTest {
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
-
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
statement = connection.createStatement();
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java
index c6ca030ce8..07b43d1227 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java
@@ -32,7 +32,6 @@ public class SubscribeTest extends BaseTest {
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
-
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
statement = connection.createStatement();
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBDriverTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBDriverTest.java
index 9ea5a431a5..a0981063a5 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBDriverTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBDriverTest.java
@@ -12,7 +12,7 @@ public class TSDBDriverTest {
@Test
public void urlParserTest() throws SQLException {
TSDBDriver driver = new TSDBDriver();
- String url = "jdbc:TSDB://127.0.0.1:0/db?user=root&password=your_password";
+ String url = "jdbc:TSDB://127.0.0.1:0/db";
Properties properties = new Properties();
driver.parseURL(url, properties);
diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h
index 951c511022..d8e5c8f1d7 100644
--- a/src/inc/taoserror.h
+++ b/src/inc/taoserror.h
@@ -98,6 +98,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TSC_ACTION_IN_PROGRESS, 0, 0x0212, "Action in
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_DISCONNECTED, 0, 0x0213, "Disconnected from service")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_NO_WRITE_AUTH, 0, 0x0214, "No write permission")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_CONN_KILLED, 0, 0x0215, "Connection killed")
+TAOS_DEFINE_ERROR(TSDB_CODE_TSC_SQL_SYNTAX_ERROR, 0, 0x0216, "Syntax errr in SQL")
// mnode
TAOS_DEFINE_ERROR(TSDB_CODE_MND_MSG_NOT_PROCESSED, 0, 0x0300, "Message not processed")
diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h
index 761a267ce5..e2df886320 100644
--- a/src/inc/taosmsg.h
+++ b/src/inc/taosmsg.h
@@ -170,6 +170,13 @@ enum _mgmt_table {
#define TSDB_COL_NORMAL 0x0u // the normal column of the table
#define TSDB_COL_TAG 0x1u // the tag column type
#define TSDB_COL_UDC 0x2u // the user specified normal string column, it is a dummy column
+#define TSDB_COL_NULL 0x4u // the column filter NULL or not
+
+#define TSDB_COL_IS_TAG(f) (((f&(~(TSDB_COL_NULL)))&TSDB_COL_TAG) != 0)
+#define TSDB_COL_IS_NORMAL_COL(f) ((f&(~(TSDB_COL_NULL))) == TSDB_COL_NORMAL)
+#define TSDB_COL_IS_UD_COL(f) ((f&(~(TSDB_COL_NULL))) == TSDB_COL_UDC)
+#define TSDB_COL_REQ_NULL(f) (((f)&TSDB_COL_NULL) != 0)
+
extern char *taosMsg[];
@@ -456,6 +463,7 @@ typedef struct {
int64_t intervalTime; // time interval for aggregation, in million second
int64_t intervalOffset; // start offset for interval query
int64_t slidingTime; // value for sliding window
+ char intervalTimeUnit;
char slidingTimeUnit; // time interval type, for revisement of interval(1d)
uint16_t tagCondLen; // tag length in current query
int16_t numOfGroupCols; // num of group by columns
diff --git a/src/mnode/src/mnodeShow.c b/src/mnode/src/mnodeShow.c
index e553853658..f66ef6b7a3 100644
--- a/src/mnode/src/mnodeShow.c
+++ b/src/mnode/src/mnodeShow.c
@@ -313,6 +313,7 @@ static int32_t mnodeProcessConnectMsg(SMnodeMsg *pMsg) {
if (pDb->status != TSDB_DB_STATUS_READY) {
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
code = TSDB_CODE_MND_DB_IN_DROPPING;
+ mnodeDecDbRef(pDb);
goto connect_over;
}
mnodeDecDbRef(pDb);
diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c
index 794958f7f0..03b1399ea7 100644
--- a/src/mnode/src/mnodeTable.c
+++ b/src/mnode/src/mnodeTable.c
@@ -294,6 +294,7 @@ static int32_t mnodeChildTableActionRestored() {
SSdbOper desc = {.type = SDB_OPER_LOCAL, .pObj = pTable, .table = tsChildTableSdb};
sdbDeleteRow(&desc);
mnodeDecTableRef(pTable);
+ mnodeDecDbRef(pDb);
continue;
}
mnodeDecDbRef(pDb);
@@ -1259,6 +1260,7 @@ static int32_t mnodeGetShowSuperTableMeta(STableMetaMsg *pMeta, SShowObj *pShow,
if (pDb->status != TSDB_DB_STATUS_READY) {
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
+ mnodeDecDbRef(pDb);
return TSDB_CODE_MND_DB_IN_DROPPING;
}
@@ -1323,6 +1325,7 @@ int32_t mnodeRetrieveShowSuperTables(SShowObj *pShow, char *data, int32_t rows,
if (pDb->status != TSDB_DB_STATUS_READY) {
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
+ mnodeDecDbRef(pDb);
return 0;
}
@@ -2495,6 +2498,7 @@ static int32_t mnodeGetShowTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, void
if (pDb->status != TSDB_DB_STATUS_READY) {
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
+ mnodeDecDbRef(pDb);
return TSDB_CODE_MND_DB_IN_DROPPING;
}
@@ -2548,6 +2552,7 @@ static int32_t mnodeRetrieveShowTables(SShowObj *pShow, char *data, int32_t rows
if (pDb->status != TSDB_DB_STATUS_READY) {
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
+ mnodeDecDbRef(pDb);
return 0;
}
@@ -2716,6 +2721,7 @@ static int32_t mnodeGetStreamTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, vo
if (pDb->status != TSDB_DB_STATUS_READY) {
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
+ mnodeDecDbRef(pDb);
return TSDB_CODE_MND_DB_IN_DROPPING;
}
@@ -2768,6 +2774,7 @@ static int32_t mnodeRetrieveStreamTables(SShowObj *pShow, char *data, int32_t ro
if (pDb->status != TSDB_DB_STATUS_READY) {
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
+ mnodeDecDbRef(pDb);
return 0;
}
diff --git a/src/mnode/src/mnodeVgroup.c b/src/mnode/src/mnodeVgroup.c
index ff253c1935..aa6631ff83 100644
--- a/src/mnode/src/mnodeVgroup.c
+++ b/src/mnode/src/mnodeVgroup.c
@@ -89,6 +89,7 @@ static int32_t mnodeVgroupActionInsert(SSdbOper *pOper) {
if (pDb->status != TSDB_DB_STATUS_READY) {
mError("vgId:%d, db:%s status:%d, in dropping", pVgroup->vgId, pDb->name, pDb->status);
+ mnodeDecDbRef(pDb);
return TSDB_CODE_MND_DB_IN_DROPPING;
}
@@ -617,6 +618,7 @@ static int32_t mnodeGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *p
if (pDb->status != TSDB_DB_STATUS_READY) {
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
+ mnodeDecDbRef(pDb);
return TSDB_CODE_MND_DB_IN_DROPPING;
}
@@ -708,6 +710,7 @@ static int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, v
if (pDb->status != TSDB_DB_STATUS_READY) {
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
+ mnodeDecDbRef(pDb);
return 0;
}
@@ -784,7 +787,10 @@ void mnodeAddTableIntoVgroup(SVgObj *pVgroup, SChildTableObj *pTable) {
if (pTable->sid >= 1) {
taosIdPoolMarkStatus(pVgroup->idPool, pTable->sid);
pVgroup->numOfTables++;
- mnodeIncVgroupRef(pVgroup);
+ // The create vgroup message may be received later than the create table message
+ // and the writing order in sdb is therefore uncertain
+ // which will cause the reference count of the vgroup to be incorrect when restarting
+ // mnodeIncVgroupRef(pVgroup);
}
}
@@ -792,7 +798,10 @@ void mnodeRemoveTableFromVgroup(SVgObj *pVgroup, SChildTableObj *pTable) {
if (pTable->sid >= 1) {
taosFreeId(pVgroup->idPool, pTable->sid);
pVgroup->numOfTables--;
- mnodeDecVgroupRef(pVgroup);
+ // The create vgroup message may be received later than the create table message
+ // and the writing order in sdb is therefore uncertain
+ // which will cause the reference count of the vgroup to be incorrect when restarting
+ // mnodeDecVgroupRef(pVgroup);
}
}
diff --git a/src/os/inc/osTime.h b/src/os/inc/osTime.h
index cd2553f753..97432ca241 100644
--- a/src/os/inc/osTime.h
+++ b/src/os/inc/osTime.h
@@ -64,6 +64,7 @@ static FORCE_INLINE int64_t taosGetTimestamp(int32_t precision) {
}
int32_t getTimestampInUsFromStr(char* token, int32_t tokenlen, int64_t* ts);
+int32_t parseDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit);
int32_t taosParseTime(char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t dayligth);
void deltaToUtcInitOnce();
diff --git a/src/os/src/detail/osTime.c b/src/os/src/detail/osTime.c
index 57634e468a..9d8328a71b 100644
--- a/src/os/src/detail/osTime.c
+++ b/src/os/src/detail/osTime.c
@@ -319,6 +319,8 @@ int32_t parseLocaltimeWithDst(char* timestr, int64_t* time, int32_t timePrec) {
*time = factor * seconds + fraction;
return 0;
}
+
+
static int32_t getTimestampInUsFromStrImpl(int64_t val, char unit, int64_t* result) {
*result = val;
@@ -384,6 +386,23 @@ int32_t getTimestampInUsFromStr(char* token, int32_t tokenlen, int64_t* ts) {
return getTimestampInUsFromStrImpl(timestamp, token[tokenlen - 1], ts);
}
+int32_t parseDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit) {
+ errno = 0;
+
+ /* get the basic numeric value */
+ *duration = strtoll(token, NULL, 10);
+ if (errno != 0) {
+ return -1;
+ }
+
+ *unit = token[tokenLen - 1];
+ if (*unit == 'n' || *unit == 'y') {
+ return 0;
+ }
+
+ return getTimestampInUsFromStrImpl(*duration, *unit, duration);
+}
+
// internal function, when program is paused in debugger,
// one can call this function from debugger to print a
// timestamp as human readable string, for example (gdb):
diff --git a/src/plugins/http/inc/httpQueue.h b/src/plugins/http/inc/httpQueue.h
new file mode 100644
index 0000000000..a4590719ff
--- /dev/null
+++ b/src/plugins/http/inc/httpQueue.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#ifndef TDENGINE_HTTP_QUEUE_H
+#define TDENGINE_HTTP_QUEUE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include
+
+bool httpInitResultQueue();
+void httpCleanupResultQueue();
+void httpDispatchToResultQueue();
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/plugins/http/src/httpQueue.c b/src/plugins/http/src/httpQueue.c
new file mode 100644
index 0000000000..9625102f74
--- /dev/null
+++ b/src/plugins/http/src/httpQueue.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#define _DEFAULT_SOURCE
+#include "os.h"
+#include "tqueue.h"
+#include "tnote.h"
+#include "taos.h"
+#include "tsclient.h"
+#include "httpInt.h"
+#include "httpContext.h"
+#include "httpSql.h"
+#include "httpResp.h"
+#include "httpAuth.h"
+#include "httpSession.h"
+
+typedef struct {
+ pthread_t thread;
+ int32_t workerId;
+} SHttpWorker;
+
+typedef struct {
+ int32_t num;
+ SHttpWorker *httpWorker;
+} SHttpWorkerPool;
+
+typedef struct {
+ void *param;
+ void *result;
+ int numOfRows;
+ void (*fp)(void *param, void *result, int numOfRows);
+} SHttpResult;
+
+static SHttpWorkerPool tsHttpPool;
+static taos_qset tsHttpQset;
+static taos_queue tsHttpQueue;
+
+void httpDispatchToResultQueue(void *param, TAOS_RES *result, int numOfRows, void (*fp)(void *param, void *result, int numOfRows)) {
+ if (tsHttpQueue != NULL) {
+ SHttpResult *pMsg = (SHttpResult *)taosAllocateQitem(sizeof(SHttpResult));
+ pMsg->param = param;
+ pMsg->result = result;
+ pMsg->numOfRows = numOfRows;
+ pMsg->fp = fp;
+ taosWriteQitem(tsHttpQueue, TAOS_QTYPE_RPC, pMsg);
+ } else {
+ (*fp)(param, result, numOfRows);
+ }
+}
+
+static void *httpProcessResultQueue(void *param) {
+ SHttpResult *pMsg;
+ int32_t type;
+ void *unUsed;
+
+ while (1) {
+ if (taosReadQitemFromQset(tsHttpQset, &type, (void **)&pMsg, &unUsed) == 0) {
+ httpDebug("httpResultQueue: got no message from qset, exiting...");
+ break;
+ }
+
+ httpDebug("context:%p, res:%p will be processed in result queue", pMsg->param, pMsg->result);
+ (*pMsg->fp)(pMsg->param, pMsg->result, pMsg->numOfRows);
+ taosFreeQitem(pMsg);
+ }
+
+ return NULL;
+}
+
+static bool httpAllocateResultQueue() {
+ tsHttpQueue = taosOpenQueue();
+ if (tsHttpQueue == NULL) return false;
+
+ taosAddIntoQset(tsHttpQset, tsHttpQueue, NULL);
+
+ for (int32_t i = 0; i < tsHttpPool.num; ++i) {
+ SHttpWorker *pWorker = tsHttpPool.httpWorker + i;
+ pWorker->workerId = i;
+
+ pthread_attr_t thAttr;
+ pthread_attr_init(&thAttr);
+ pthread_attr_setdetachstate(&thAttr, PTHREAD_CREATE_JOINABLE);
+
+ if (pthread_create(&pWorker->thread, &thAttr, httpProcessResultQueue, pWorker) != 0) {
+ httpError("failed to create thread to process http result queue, reason:%s", strerror(errno));
+ }
+
+ pthread_attr_destroy(&thAttr);
+ httpDebug("http result worker:%d is launched, total:%d", pWorker->workerId, tsHttpPool.num);
+ }
+
+ httpInfo("http result queue is opened");
+ return true;
+}
+
+static void httpFreeResultQueue() {
+ taosCloseQueue(tsHttpQueue);
+ tsHttpQueue = NULL;
+}
+
+bool httpInitResultQueue() {
+ tsHttpQset = taosOpenQset();
+
+ tsHttpPool.num = tsHttpMaxThreads;
+ tsHttpPool.httpWorker = (SHttpWorker *)calloc(sizeof(SHttpWorker), tsHttpPool.num);
+
+ if (tsHttpPool.httpWorker == NULL) return -1;
+ for (int32_t i = 0; i < tsHttpPool.num; ++i) {
+ SHttpWorker *pWorker = tsHttpPool.httpWorker + i;
+ pWorker->workerId = i;
+ }
+
+ return httpAllocateResultQueue();
+}
+
+void httpCleanupResultQueue() {
+ httpFreeResultQueue();
+
+ for (int32_t i = 0; i < tsHttpPool.num; ++i) {
+ SHttpWorker *pWorker = tsHttpPool.httpWorker + i;
+ if (pWorker->thread) {
+ taosQsetThreadResume(tsHttpQset);
+ }
+ }
+
+ for (int32_t i = 0; i < tsHttpPool.num; ++i) {
+ SHttpWorker *pWorker = tsHttpPool.httpWorker + i;
+ if (pWorker->thread) {
+ pthread_join(pWorker->thread, NULL);
+ }
+ }
+
+ taosCloseQset(tsHttpQset);
+ free(tsHttpPool.httpWorker);
+
+ httpInfo("http result queue is closed");
+}
diff --git a/src/plugins/http/src/httpSql.c b/src/plugins/http/src/httpSql.c
index 041fbdb92a..07cdea1380 100644
--- a/src/plugins/http/src/httpSql.c
+++ b/src/plugins/http/src/httpSql.c
@@ -24,12 +24,15 @@
#include "httpResp.h"
#include "httpAuth.h"
#include "httpSession.h"
+#include "httpQueue.h"
void *taos_connect_a(char *ip, char *user, char *pass, char *db, uint16_t port, void (*fp)(void *, TAOS_RES *, int),
void *param, void **taos);
void httpProcessMultiSql(HttpContext *pContext);
-void httpProcessMultiSqlRetrieveCallBack(void *param, TAOS_RES *result, int numOfRows) {
+void httpProcessMultiSqlRetrieveCallBack(void *param, TAOS_RES *result, int numOfRows);
+
+void httpProcessMultiSqlRetrieveCallBackImp(void *param, TAOS_RES *result, int numOfRows) {
HttpContext *pContext = (HttpContext *)param;
if (pContext == NULL) return;
@@ -75,7 +78,11 @@ void httpProcessMultiSqlRetrieveCallBack(void *param, TAOS_RES *result, int numO
}
}
-void httpProcessMultiSqlCallBack(void *param, TAOS_RES *result, int code) {
+void httpProcessMultiSqlRetrieveCallBack(void *param, TAOS_RES *result, int numOfRows) {
+ httpDispatchToResultQueue(param, result, numOfRows, httpProcessMultiSqlRetrieveCallBackImp);
+}
+
+void httpProcessMultiSqlCallBackImp(void *param, TAOS_RES *result, int code) {
HttpContext *pContext = (HttpContext *)param;
if (pContext == NULL) return;
@@ -154,6 +161,10 @@ void httpProcessMultiSqlCallBack(void *param, TAOS_RES *result, int code) {
}
}
+void httpProcessMultiSqlCallBack(void *param, TAOS_RES *result, int unUsedCode) {
+ httpDispatchToResultQueue(param, result, unUsedCode, httpProcessMultiSqlCallBackImp);
+}
+
void httpProcessMultiSql(HttpContext *pContext) {
HttpSqlCmds * multiCmds = pContext->multiCmds;
HttpEncodeMethod *encode = pContext->encodeMethod;
@@ -196,7 +207,9 @@ void httpProcessMultiSqlCmd(HttpContext *pContext) {
httpProcessMultiSql(pContext);
}
-void httpProcessSingleSqlRetrieveCallBack(void *param, TAOS_RES *result, int numOfRows) {
+void httpProcessSingleSqlRetrieveCallBack(void *param, TAOS_RES *result, int numOfRows);
+
+void httpProcessSingleSqlRetrieveCallBackImp(void *param, TAOS_RES *result, int numOfRows) {
HttpContext *pContext = (HttpContext *)param;
if (pContext == NULL) return;
@@ -243,7 +256,11 @@ void httpProcessSingleSqlRetrieveCallBack(void *param, TAOS_RES *result, int num
}
}
-void httpProcessSingleSqlCallBack(void *param, TAOS_RES *result, int unUsedCode) {
+void httpProcessSingleSqlRetrieveCallBack(void *param, TAOS_RES *result, int numOfRows) {
+ httpDispatchToResultQueue(param, result, numOfRows, httpProcessSingleSqlRetrieveCallBackImp);
+}
+
+void httpProcessSingleSqlCallBackImp(void *param, TAOS_RES *result, int unUsedCode) {
HttpContext *pContext = (HttpContext *)param;
if (pContext == NULL) return;
@@ -306,6 +323,10 @@ void httpProcessSingleSqlCallBack(void *param, TAOS_RES *result, int unUsedCode)
}
}
+void httpProcessSingleSqlCallBack(void *param, TAOS_RES *result, int unUsedCode) {
+ httpDispatchToResultQueue(param, result, unUsedCode, httpProcessSingleSqlCallBackImp);
+}
+
void httpProcessSingleSqlCmd(HttpContext *pContext) {
HttpSqlCmd * cmd = &pContext->singleCmd;
char * sql = cmd->nativSql;
diff --git a/src/plugins/http/src/httpSystem.c b/src/plugins/http/src/httpSystem.c
index 38bd8624b2..e51c8dd4f7 100644
--- a/src/plugins/http/src/httpSystem.c
+++ b/src/plugins/http/src/httpSystem.c
@@ -26,6 +26,7 @@
#include "httpServer.h"
#include "httpResp.h"
#include "httpHandle.h"
+#include "httpQueue.h"
#include "gcHandle.h"
#include "restHandle.h"
#include "tgHandle.h"
@@ -67,6 +68,11 @@ int httpStartSystem() {
return -1;
}
+ if (!httpInitResultQueue()) {
+ httpError("http init result queue failed");
+ return -1;
+ }
+
if (!httpInitContexts()) {
httpError("http init contexts failed");
return -1;
@@ -98,6 +104,8 @@ void httpCleanUpSystem() {
httpCleanUpConnect();
httpCleanupContexts();
httpCleanUpSessions();
+ httpCleanupResultQueue();
+
pthread_mutex_destroy(&tsHttpServer.serverMutex);
taosTFree(tsHttpServer.pThreads);
tsHttpServer.pThreads = NULL;
diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h
index 7093495763..25fb04fb9a 100644
--- a/src/query/inc/qExecutor.h
+++ b/src/query/inc/qExecutor.h
@@ -132,11 +132,12 @@ typedef struct SQueryCostInfo {
typedef struct SQuery {
int16_t numOfCols;
int16_t numOfTags;
+ char intervalTimeUnit;
+ char slidingTimeUnit; // interval data type, used for daytime revise
SOrderVal order;
STimeWindow window;
int64_t intervalTime;
int64_t slidingTime; // sliding time for sliding window query
- char slidingTimeUnit; // interval data type, used for daytime revise
int16_t precision;
int16_t numOfOutput;
int16_t fillType;
diff --git a/src/query/inc/qPercentile.h b/src/query/inc/qPercentile.h
index 52f666c338..0a52d4f205 100644
--- a/src/query/inc/qPercentile.h
+++ b/src/query/inc/qPercentile.h
@@ -17,6 +17,8 @@
#define TDENGINE_QPERCENTILE_H
#include "qExtbuffer.h"
+#include "qResultbuf.h"
+#include "qTsbuf.h"
typedef struct MinMaxEntry {
union {
@@ -31,47 +33,43 @@ typedef struct MinMaxEntry {
};
} MinMaxEntry;
-typedef struct tMemBucketSegment {
- int32_t numOfSlots;
- MinMaxEntry * pBoundingEntries;
- tExtMemBuffer **pBuffer;
-} tMemBucketSegment;
+typedef struct {
+ int32_t size;
+ int32_t pageId;
+ tFilePage *data;
+} SSlotInfo;
+
+typedef struct tMemBucketSlot {
+ SSlotInfo info;
+ MinMaxEntry range;
+} tMemBucketSlot;
+
+struct tMemBucket;
+typedef int32_t (*__perc_hash_func_t)(struct tMemBucket *pBucket, const void *value);
typedef struct tMemBucket {
- int16_t numOfSegs;
- int16_t nTotalSlots;
- int16_t nSlotsOfSeg;
- int16_t dataType;
-
- int16_t nElemSize;
- int32_t numOfElems;
-
- int32_t nTotalBufferSize;
- int32_t maxElemsCapacity;
-
- int32_t pageSize;
- int16_t numOfTotalPages;
- int16_t numOfAvailPages; /* remain available buffer pages */
-
- tMemBucketSegment *pSegs;
- tOrderDescriptor * pOrderDesc;
-
- MinMaxEntry nRange;
-
- void (*HashFunc)(struct tMemBucket *pBucket, void *value, int16_t *segIdx, int16_t *slotIdx);
+ int16_t numOfSlots;
+ int16_t type;
+ int16_t bytes;
+ int32_t total;
+ int32_t elemPerPage; // number of elements for each object
+ int32_t maxCapacity; // maximum allowed number of elements that can be sort directly to get the result
+ int32_t bufPageSize; // disk page size
+ MinMaxEntry range; // value range
+ int32_t times; // count that has been checked for deciding the correct data value buckets.
+ __compar_fn_t comparFn;
+
+ tMemBucketSlot *pSlots;
+ SDiskbasedResultBuf *pBuffer;
+ __perc_hash_func_t hashFunc;
} tMemBucket;
-tMemBucket *tMemBucketCreate(int32_t totalSlots, int32_t nBufferSize, int16_t nElemSize, int16_t dataType,
- tOrderDescriptor *pDesc);
+tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType);
void tMemBucketDestroy(tMemBucket *pBucket);
-void tMemBucketPut(tMemBucket *pBucket, void *data, int32_t numOfRows);
+void tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size);
double getPercentile(tMemBucket *pMemBucket, double percent);
-void tBucketIntHash(tMemBucket *pBucket, void *value, int16_t *segIdx, int16_t *slotIdx);
-
-void tBucketDoubleHash(tMemBucket *pBucket, void *value, int16_t *segIdx, int16_t *slotIdx);
-
#endif // TDENGINE_QPERCENTILE_H
diff --git a/src/query/inc/tsqlfunction.h b/src/query/inc/tsqlfunction.h
index 65ab82883b..c314087179 100644
--- a/src/query/inc/tsqlfunction.h
+++ b/src/query/inc/tsqlfunction.h
@@ -69,6 +69,15 @@ extern "C" {
#define TSDB_FUNC_AVG_IRATE 33
#define TSDB_FUNC_TID_TAG 34
+#define TSDB_FUNC_HISTOGRAM 35
+#define TSDB_FUNC_HLL 36
+#define TSDB_FUNC_MODE 37
+#define TSDB_FUNC_SAMPLE 38
+#define TSDB_FUNC_CEIL 39
+#define TSDB_FUNC_FLOOR 40
+#define TSDB_FUNC_ROUND 41
+#define TSDB_FUNC_MAVG 42
+#define TSDB_FUNC_CSUM 43
#define TSDB_FUNCSTATE_SO 0x1u // single output
#define TSDB_FUNCSTATE_MO 0x2u // dynamic number of output, not multinumber of output e.g., TOP/BOTTOM
@@ -168,6 +177,7 @@ typedef struct SQLFunctionCtx {
int16_t outputType;
int16_t outputBytes; // size of results, determined by function and input column data type
bool hasNull; // null value exist in current block
+ bool requireNull; // require null in some function
int16_t functionId; // function id
void * aInputElemBuf;
char * aOutputBuf; // final result output buffer, point to sdata->data
diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c
index 4e2e31d269..d48d7d5ea1 100644
--- a/src/query/src/qExecutor.c
+++ b/src/query/src/qExecutor.c
@@ -35,10 +35,6 @@
* forced to load primary column explicitly.
*/
#define Q_STATUS_EQUAL(p, s) (((p) & (s)) != 0)
-#define TSDB_COL_IS_TAG(f) (((f)&TSDB_COL_TAG) != 0)
-#define TSDB_COL_IS_NORMAL_COL(f) ((f) == TSDB_COL_NORMAL)
-#define TSDB_COL_IS_UD_COL(f) ((f) == TSDB_COL_UDC)
-
#define QUERY_IS_ASC_QUERY(q) (GET_FORWARD_DIRECTION_FACTOR((q)->order.order) == QUERY_ASC_FORWARD_STEP)
#define IS_MASTER_SCAN(runtime) ((runtime)->scanFlag == MASTER_SCAN)
@@ -137,13 +133,44 @@ static void finalizeQueryResult(SQueryRuntimeEnv *pRuntimeEnv);
#define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->intervalTime > 0)
-// previous time window may not be of the same size of pQuery->intervalTime
-#define GET_NEXT_TIMEWINDOW(_q, tw) \
- do { \
- int32_t factor = GET_FORWARD_DIRECTION_FACTOR((_q)->order.order); \
- (tw)->skey += ((_q)->slidingTime * factor); \
- (tw)->ekey = (tw)->skey + ((_q)->intervalTime - 1); \
- } while (0)
+static void getNextTimeWindow(SQuery* pQuery, STimeWindow* tw) {
+ int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order);
+ if (pQuery->intervalTimeUnit != 'n' && pQuery->intervalTimeUnit != 'y') {
+ tw->skey += pQuery->slidingTime * factor;
+ tw->ekey = tw->skey + pQuery->intervalTime - 1;
+ return;
+ }
+
+ int64_t key = tw->skey / 1000, interval = pQuery->intervalTime;
+ if (pQuery->precision == TSDB_TIME_PRECISION_MICRO) {
+ key /= 1000;
+ }
+ if (pQuery->intervalTimeUnit == 'y') {
+ interval *= 12;
+ }
+
+ struct tm tm;
+ time_t t = (time_t)key;
+ localtime_r(&t, &tm);
+
+ int mon = (int)(tm.tm_year * 12 + tm.tm_mon + interval * factor);
+ tm.tm_year = mon / 12;
+ tm.tm_mon = mon % 12;
+ tw->skey = mktime(&tm) * 1000L;
+
+ mon = (int)(mon + interval);
+ tm.tm_year = mon / 12;
+ tm.tm_mon = mon % 12;
+ tw->ekey = mktime(&tm) * 1000L;
+
+ if (pQuery->precision == TSDB_TIME_PRECISION_MICRO) {
+ tw->skey *= 1000L;
+ tw->ekey *= 1000L;
+ }
+ tw->ekey -= 1;
+}
+
+#define GET_NEXT_TIMEWINDOW(_q, tw) getNextTimeWindow((_q), (tw))
#define SET_STABLE_QUERY_OVER(_q) ((_q)->tableIndex = (int32_t)((_q)->tableqinfoGroupInfo.numOfTables))
#define IS_STASBLE_QUERY_OVER(_q) ((_q)->tableIndex >= (int32_t)((_q)->tableqinfoGroupInfo.numOfTables))
@@ -254,7 +281,7 @@ bool isGroupbyNormalCol(SSqlGroupbyExpr *pGroupbyExpr) {
for (int32_t i = 0; i < pGroupbyExpr->numOfGroupCols; ++i) {
SColIndex *pColIndex = taosArrayGet(pGroupbyExpr->columnInfo, i);
- if (pColIndex->flag == TSDB_COL_NORMAL) {
+ if (TSDB_COL_IS_NORMAL_COL(pColIndex->flag)) {
//make sure the normal column locates at the second position if tbname exists in group by clause
if (pGroupbyExpr->numOfGroupCols > 1) {
assert(pColIndex->colIndex > 0);
@@ -275,7 +302,7 @@ int16_t getGroupbyColumnType(SQuery *pQuery, SSqlGroupbyExpr *pGroupbyExpr) {
for (int32_t i = 0; i < pGroupbyExpr->numOfGroupCols; ++i) {
SColIndex *pColIndex = taosArrayGet(pGroupbyExpr->columnInfo, i);
- if (pColIndex->flag == TSDB_COL_NORMAL) {
+ if (TSDB_COL_IS_NORMAL_COL(pColIndex->flag)) {
colId = pColIndex->colId;
break;
}
@@ -467,9 +494,13 @@ static SWindowResult *doSetTimeWindowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SWin
static STimeWindow getActiveTimeWindow(SWindowResInfo *pWindowResInfo, int64_t ts, SQuery *pQuery) {
STimeWindow w = {0};
- if (pWindowResInfo->curIndex == -1) { // the first window, from the previous stored value
+ if (pWindowResInfo->curIndex == -1) { // the first window, from the previous stored value
w.skey = pWindowResInfo->prevSKey;
- w.ekey = w.skey + pQuery->intervalTime - 1;
+ if (pQuery->intervalTimeUnit == 'n' || pQuery->intervalTimeUnit == 'y') {
+ w.ekey = taosAddNatualInterval(w.skey, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision) - 1;
+ } else {
+ w.ekey = w.skey + pQuery->intervalTime - 1;
+ }
} else {
int32_t slot = curTimeWindowIndex(pWindowResInfo);
SWindowResult* pWindowRes = getWindowResult(pWindowResInfo, slot);
@@ -477,19 +508,24 @@ static STimeWindow getActiveTimeWindow(SWindowResInfo *pWindowResInfo, int64_t t
}
if (w.skey > ts || w.ekey < ts) {
- int64_t st = w.skey;
+ if (pQuery->intervalTimeUnit == 'n' || pQuery->intervalTimeUnit == 'y') {
+ w.skey = taosGetIntervalStartTimestamp(ts, pQuery->slidingTime, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision);
+ w.ekey = taosAddNatualInterval(w.skey, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision) - 1;
+ } else {
+ int64_t st = w.skey;
- if (st > ts) {
- st -= ((st - ts + pQuery->slidingTime - 1) / pQuery->slidingTime) * pQuery->slidingTime;
+ if (st > ts) {
+ st -= ((st - ts + pQuery->slidingTime - 1) / pQuery->slidingTime) * pQuery->slidingTime;
+ }
+
+ int64_t et = st + pQuery->intervalTime - 1;
+ if (et < ts) {
+ st += ((ts - et + pQuery->slidingTime - 1) / pQuery->slidingTime) * pQuery->slidingTime;
+ }
+
+ w.skey = st;
+ w.ekey = w.skey + pQuery->intervalTime - 1;
}
-
- int64_t et = st + pQuery->intervalTime - 1;
- if (et < ts) {
- st += ((ts - et + pQuery->slidingTime - 1) / pQuery->slidingTime) * pQuery->slidingTime;
- }
-
- w.skey = st;
- w.ekey = w.skey + pQuery->intervalTime - 1;
}
/*
@@ -814,14 +850,22 @@ static int32_t getNextQualifiedWindow(SQueryRuntimeEnv *pRuntimeEnv, STimeWindow
*/
if (QUERY_IS_ASC_QUERY(pQuery) && primaryKeys[startPos] > pNext->ekey) {
TSKEY next = primaryKeys[startPos];
-
- pNext->ekey += ((next - pNext->ekey + pQuery->slidingTime - 1)/pQuery->slidingTime) * pQuery->slidingTime;
- pNext->skey = pNext->ekey - pQuery->intervalTime + 1;
+ if (pQuery->intervalTimeUnit == 'n' || pQuery->intervalTimeUnit == 'y') {
+ pNext->skey = taosGetIntervalStartTimestamp(next, pQuery->slidingTime, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision);
+ pNext->ekey = taosAddNatualInterval(pNext->skey, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision) - 1;
+ } else {
+ pNext->ekey += ((next - pNext->ekey + pQuery->slidingTime - 1)/pQuery->slidingTime) * pQuery->slidingTime;
+ pNext->skey = pNext->ekey - pQuery->intervalTime + 1;
+ }
} else if ((!QUERY_IS_ASC_QUERY(pQuery)) && primaryKeys[startPos] < pNext->skey) {
TSKEY next = primaryKeys[startPos];
-
- pNext->skey -= ((pNext->skey - next + pQuery->slidingTime - 1) / pQuery->slidingTime) * pQuery->slidingTime;
- pNext->ekey = pNext->skey + pQuery->intervalTime - 1;
+ if (pQuery->intervalTimeUnit == 'n' || pQuery->intervalTimeUnit == 'y') {
+ pNext->skey = taosGetIntervalStartTimestamp(next, pQuery->slidingTime, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision);
+ pNext->ekey = taosAddNatualInterval(pNext->skey, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision) - 1;
+ } else {
+ pNext->skey -= ((pNext->skey - next + pQuery->slidingTime - 1) / pQuery->slidingTime) * pQuery->slidingTime;
+ pNext->ekey = pNext->skey + pQuery->intervalTime - 1;
+ }
}
return startPos;
@@ -1085,7 +1129,7 @@ static char *getGroupbyColumnData(SQuery *pQuery, int16_t *type, int16_t *bytes,
for (int32_t k = 0; k < pGroupbyExpr->numOfGroupCols; ++k) {
SColIndex* pColIndex = taosArrayGet(pGroupbyExpr->columnInfo, k);
- if (pColIndex->flag == TSDB_COL_TAG) {
+ if (TSDB_COL_IS_TAG(pColIndex->flag)) {
continue;
}
@@ -1555,6 +1599,13 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int16_t order
SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[i];
SColIndex* pIndex = &pSqlFuncMsg->colInfo;
+ if (TSDB_COL_REQ_NULL(pIndex->flag)) {
+ pCtx->requireNull = true;
+ pIndex->flag &= ~(TSDB_COL_NULL);
+ } else {
+ pCtx->requireNull = false;
+ }
+
int32_t index = pSqlFuncMsg->colInfo.colIndex;
if (TSDB_COL_IS_TAG(pIndex->flag)) {
if (pIndex->colId == TSDB_TBNAME_COLUMN_INDEX) { // todo refactor
@@ -1574,6 +1625,7 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int16_t order
pCtx->inputType = pQuery->colList[index].type;
}
+
assert(isValidDataType(pCtx->inputType));
pCtx->ptsOutputBuf = NULL;
@@ -1783,7 +1835,7 @@ static bool onlyQueryTags(SQuery* pQuery) {
if (functionId != TSDB_FUNC_TAGPRJ &&
functionId != TSDB_FUNC_TID_TAG &&
(!(functionId == TSDB_FUNC_COUNT && pExprInfo->base.colInfo.colId == TSDB_TBNAME_COLUMN_INDEX)) &&
- (!(functionId == TSDB_FUNC_PRJ && pExprInfo->base.colInfo.flag == TSDB_COL_UDC))) {
+ (!(functionId == TSDB_FUNC_PRJ && TSDB_COL_IS_UD_COL(pExprInfo->base.colInfo.flag)))) {
return false;
}
}
@@ -1804,7 +1856,8 @@ void getAlignQueryTimeWindow(SQuery *pQuery, int64_t key, int64_t keyFirst, int6
if (keyFirst > (INT64_MAX - pQuery->intervalTime)) {
assert(keyLast - keyFirst < pQuery->intervalTime);
win->ekey = INT64_MAX;
- return;
+ } else if (pQuery->intervalTimeUnit == 'n' || pQuery->intervalTimeUnit == 'y') {
+ win->ekey = taosAddNatualInterval(win->skey, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision) - 1;
} else {
win->ekey = win->skey + pQuery->intervalTime - 1;
}
@@ -1872,24 +1925,24 @@ static bool onlyFirstQuery(SQuery *pQuery) { return onlyOneQueryType(pQuery, TSD
static bool onlyLastQuery(SQuery *pQuery) { return onlyOneQueryType(pQuery, TSDB_FUNC_LAST, TSDB_FUNC_LAST_DST); }
// todo refactor, add iterator
-static void doExchangeTimeWindow(SQInfo* pQInfo) {
- size_t t = GET_NUM_OF_TABLEGROUP(pQInfo);
+static void doExchangeTimeWindow(SQInfo* pQInfo, STimeWindow* win) {
+ size_t t = taosArrayGetSize(pQInfo->tableGroupInfo.pGroupList);
for(int32_t i = 0; i < t; ++i) {
- SArray* p1 = GET_TABLEGROUP(pQInfo, i);
+ SArray* p1 = taosArrayGetP(pQInfo->tableGroupInfo.pGroupList, i);
- SArray* tableKeyGroup = taosArrayGetP(pQInfo->tableGroupInfo.pGroupList, i);
size_t len = taosArrayGetSize(p1);
for(int32_t j = 0; j < len; ++j) {
- STableQueryInfo* pTableQueryInfo = (STableQueryInfo*) taosArrayGetP(p1, j);
- SWAP(pTableQueryInfo->win.skey, pTableQueryInfo->win.ekey, TSKEY);
+ STableKeyInfo* pInfo = taosArrayGet(p1, j);
- STableKeyInfo* pInfo = taosArrayGet(tableKeyGroup, j);
- pInfo->lastKey = pTableQueryInfo->win.skey;
+ // update the new lastkey if it is equalled to the value of the old skey
+ if (pInfo->lastKey == win->ekey) {
+ pInfo->lastKey = win->skey;
+ }
}
}
}
-static void changeExecuteScanOrder(SQInfo *pQInfo, bool stableQuery) {
+static void changeExecuteScanOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bool stableQuery) {
SQuery* pQuery = pQInfo->runtimeEnv.pQuery;
// in case of point-interpolation query, use asc order scan
@@ -1898,25 +1951,36 @@ static void changeExecuteScanOrder(SQInfo *pQInfo, bool stableQuery) {
// todo handle the case the the order irrelevant query type mixed up with order critical query type
// descending order query for last_row query
- if (isFirstLastRowQuery(pQuery)) {
+ if (isFirstLastRowQuery(pQuery) && !QUERY_IS_ASC_QUERY(pQuery)) {
qDebug("QInfo:%p scan order changed for last_row query, old:%d, new:%d", GET_QINFO_ADDR(pQuery),
pQuery->order.order, TSDB_ORDER_ASC);
+ SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
pQuery->order.order = TSDB_ORDER_ASC;
- if (pQuery->window.skey > pQuery->window.ekey) {
- SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
- }
+ assert (pQuery->window.skey <= pQuery->window.ekey);
+
+ doExchangeTimeWindow(pQInfo, &pQuery->window);
return;
}
- if (isPointInterpoQuery(pQuery) && pQuery->intervalTime == 0) {
- if (!QUERY_IS_ASC_QUERY(pQuery)) {
- qDebug(msg, GET_QINFO_ADDR(pQuery), "interp", pQuery->order.order, TSDB_ORDER_ASC, pQuery->window.skey,
- pQuery->window.ekey, pQuery->window.ekey, pQuery->window.skey);
- SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
- }
+ if (isGroupbyNormalCol(pQuery->pGroupbyExpr) && !QUERY_IS_ASC_QUERY(pQuery)) {
+ pQuery->order.order = TSDB_ORDER_ASC;
+ SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
+ assert (pQuery->window.skey <= pQuery->window.ekey);
+
+ doExchangeTimeWindow(pQInfo, &pQuery->window);
+ return;
+ }
+
+ if (isPointInterpoQuery(pQuery) && (pQuery->intervalTime == 0) && !QUERY_IS_ASC_QUERY(pQuery)) {
+ qDebug(msg, GET_QINFO_ADDR(pQuery), "interp", pQuery->order.order, TSDB_ORDER_ASC, pQuery->window.skey,
+ pQuery->window.ekey, pQuery->window.ekey, pQuery->window.skey);
+ SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
pQuery->order.order = TSDB_ORDER_ASC;
+
+ assert (pQuery->window.skey <= pQuery->window.ekey);
+ doExchangeTimeWindow(pQInfo, &pQuery->window);
return;
}
@@ -1927,7 +1991,7 @@ static void changeExecuteScanOrder(SQInfo *pQInfo, bool stableQuery) {
pQuery->window.ekey, pQuery->window.ekey, pQuery->window.skey);
SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
- doExchangeTimeWindow(pQInfo);
+ doExchangeTimeWindow(pQInfo, &pQuery->window);
}
pQuery->order.order = TSDB_ORDER_ASC;
@@ -1937,7 +2001,7 @@ static void changeExecuteScanOrder(SQInfo *pQInfo, bool stableQuery) {
pQuery->window.ekey, pQuery->window.ekey, pQuery->window.skey);
SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
- doExchangeTimeWindow(pQInfo);
+ doExchangeTimeWindow(pQInfo, &pQuery->window);
}
pQuery->order.order = TSDB_ORDER_DESC;
@@ -1951,6 +2015,7 @@ static void changeExecuteScanOrder(SQInfo *pQInfo, bool stableQuery) {
pQuery->window.skey, pQuery->window.ekey, pQuery->window.ekey, pQuery->window.skey);
SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
+ doExchangeTimeWindow(pQInfo, &pQuery->window);
}
pQuery->order.order = TSDB_ORDER_ASC;
@@ -1960,6 +2025,7 @@ static void changeExecuteScanOrder(SQInfo *pQInfo, bool stableQuery) {
pQuery->window.skey, pQuery->window.ekey, pQuery->window.ekey, pQuery->window.skey);
SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
+ doExchangeTimeWindow(pQInfo, &pQuery->window);
}
pQuery->order.order = TSDB_ORDER_DESC;
@@ -2070,35 +2136,36 @@ static bool needToLoadDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SDataStatis *pDat
return false;
}
-#define PT_IN_WINDOW(_p, _w) ((_p) > (_w).skey && (_p) < (_w).ekey)
-
static bool overlapWithTimeWindow(SQuery* pQuery, SDataBlockInfo* pBlockInfo) {
STimeWindow w = {0};
TSKEY sk = MIN(pQuery->window.skey, pQuery->window.ekey);
TSKEY ek = MAX(pQuery->window.skey, pQuery->window.ekey);
-
if (QUERY_IS_ASC_QUERY(pQuery)) {
getAlignQueryTimeWindow(pQuery, pBlockInfo->window.skey, sk, ek, &w);
+ assert(w.ekey >= pBlockInfo->window.skey);
- if (PT_IN_WINDOW(w.ekey, pBlockInfo->window)) {
+ if (w.ekey < pBlockInfo->window.ekey) {
return true;
}
while(1) {
GET_NEXT_TIMEWINDOW(pQuery, &w);
- if (w.skey > pBlockInfo->window.skey) {
+ if (w.skey > pBlockInfo->window.ekey) {
break;
}
- if (PT_IN_WINDOW(w.skey, pBlockInfo->window) || PT_IN_WINDOW(w.ekey, pBlockInfo->window)) {
+ assert(w.ekey > pBlockInfo->window.ekey);
+ if (w.skey <= pBlockInfo->window.ekey && w.skey > pBlockInfo->window.skey) {
return true;
}
}
} else {
getAlignQueryTimeWindow(pQuery, pBlockInfo->window.ekey, sk, ek, &w);
- if (PT_IN_WINDOW(w.skey, pBlockInfo->window)) {
+ assert(w.skey <= pBlockInfo->window.ekey);
+
+ if (w.skey > pBlockInfo->window.skey) {
return true;
}
@@ -2108,7 +2175,8 @@ static bool overlapWithTimeWindow(SQuery* pQuery, SDataBlockInfo* pBlockInfo) {
break;
}
- if (PT_IN_WINDOW(w.skey, pBlockInfo->window) || PT_IN_WINDOW(w.ekey, pBlockInfo->window)) {
+ assert(w.skey < pBlockInfo->window.skey);
+ if (w.ekey < pBlockInfo->window.ekey && w.ekey >= pBlockInfo->window.skey) {
return true;
}
}
@@ -2852,11 +2920,11 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) {
STableQueryInfo *item = taosArrayGetP(pGroup, i);
SIDList list = getDataBufPagesIdList(pRuntimeEnv->pResultBuf, TSDB_TABLEID(item->pTable)->tid);
- pageList = list;
- tid = TSDB_TABLEID(item->pTable)->tid;
if (taosArrayGetSize(list) > 0 && item->windowResInfo.size > 0) {
pTableList[numOfTables++] = item;
+ tid = TSDB_TABLEID(item->pTable)->tid;
+ pageList = list;
}
}
@@ -4286,6 +4354,32 @@ static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv, TSKEY* start) {
return true;
}
+static void freeTableQueryInfo(STableGroupInfo* pTableGroupInfo) {
+ if (pTableGroupInfo->pGroupList == NULL) {
+ assert(pTableGroupInfo->numOfTables == 0);
+ } else {
+ size_t numOfGroups = taosArrayGetSize(pTableGroupInfo->pGroupList);
+ for (int32_t i = 0; i < numOfGroups; ++i) {
+ SArray *p = taosArrayGetP(pTableGroupInfo->pGroupList, i);
+
+ size_t num = taosArrayGetSize(p);
+ for(int32_t j = 0; j < num; ++j) {
+ STableQueryInfo* item = taosArrayGetP(p, j);
+ destroyTableQueryInfo(item);
+ }
+
+ taosArrayDestroy(p);
+ }
+
+ taosArrayDestroy(pTableGroupInfo->pGroupList);
+ pTableGroupInfo->pGroupList = NULL;
+ pTableGroupInfo->numOfTables = 0;
+ }
+
+ taosHashCleanup(pTableGroupInfo->map);
+ pTableGroupInfo->map = NULL;
+}
+
static int32_t setupQueryHandle(void* tsdb, SQInfo* pQInfo, bool isSTableQuery) {
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
@@ -4321,20 +4415,22 @@ static int32_t setupQueryHandle(void* tsdb, SQInfo* pQInfo, bool isSTableQuery)
terrno = TSDB_CODE_SUCCESS;
if (isFirstLastRowQuery(pQuery)) {
pRuntimeEnv->pQueryHandle = tsdbQueryLastRow(tsdb, &cond, &pQInfo->tableGroupInfo, pQInfo);
+ if (pRuntimeEnv->pQueryHandle == NULL) { // no data in current stable, clear all
+ freeTableQueryInfo(&pQInfo->tableqinfoGroupInfo);
+ } else { // update the query time window
+ pQuery->window = cond.twindow;
- // update the query time window
- pQuery->window = cond.twindow;
+ size_t numOfGroups = GET_NUM_OF_TABLEGROUP(pQInfo);
+ for (int32_t i = 0; i < numOfGroups; ++i) {
+ SArray *group = GET_TABLEGROUP(pQInfo, i);
- size_t numOfGroups = GET_NUM_OF_TABLEGROUP(pQInfo);
- for(int32_t i = 0; i < numOfGroups; ++i) {
- SArray *group = GET_TABLEGROUP(pQInfo, i);
+ size_t t = taosArrayGetSize(group);
+ for (int32_t j = 0; j < t; ++j) {
+ STableQueryInfo *pCheckInfo = taosArrayGetP(group, j);
- size_t t = taosArrayGetSize(group);
- for (int32_t j = 0; j < t; ++j) {
- STableQueryInfo *pCheckInfo = taosArrayGetP(group, j);
-
- pCheckInfo->win = pQuery->window;
- pCheckInfo->lastKey = pCheckInfo->win.skey;
+ pCheckInfo->win = pQuery->window;
+ pCheckInfo->lastKey = pCheckInfo->win.skey;
+ }
}
}
} else if (isPointInterpoQuery(pQuery)) {
@@ -4383,15 +4479,17 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo
setScanLimitationByResultBuffer(pQuery);
- // NOTE: pTableCheckInfo need to update the query time range and the lastKey info
- // TODO fixme
- changeExecuteScanOrder(pQInfo, false);
-
code = setupQueryHandle(tsdb, pQInfo, isSTableQuery);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
+ if (pQInfo->tableqinfoGroupInfo.numOfTables == 0) {
+ qDebug("QInfo:%p no table qualified for tag filter, abort query", pQInfo);
+ setQueryStatus(pQuery, QUERY_COMPLETED);
+ return TSDB_CODE_SUCCESS;
+ }
+
pQInfo->tsdb = tsdb;
pQInfo->vgId = vgId;
@@ -5353,7 +5451,7 @@ static int32_t getColumnIndexInSource(SQueryTableMsg *pQueryMsg, SSqlFuncMsg *pE
j += 1;
}
- } else if (pExprMsg->colInfo.flag == TSDB_COL_UDC) { // user specified column data
+ } else if (TSDB_COL_IS_UD_COL(pExprMsg->colInfo.flag)) { // user specified column data
return TSDB_UD_COLUMN_INDEX;
} else {
while (j < pQueryMsg->numOfCols) {
@@ -5561,7 +5659,7 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList,
int16_t functionId = pExprMsg->functionId;
if (functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TAGPRJ || functionId == TSDB_FUNC_TAG_DUMMY) {
- if (pExprMsg->colInfo.flag != TSDB_COL_TAG) { // ignore the column index check for arithmetic expression.
+ if (!TSDB_COL_IS_TAG(pExprMsg->colInfo.flag)) { // ignore the column index check for arithmetic expression.
code = TSDB_CODE_QRY_INVALID_MSG;
goto _cleanup;
}
@@ -5956,14 +6054,6 @@ static void doUpdateExprColumnIndex(SQuery *pQuery) {
}
}
-static int compareTableIdInfo(const void* a, const void* b) {
- const STableIdInfo* x = (const STableIdInfo*)a;
- const STableIdInfo* y = (const STableIdInfo*)b;
- if (x->uid > y->uid) return 1;
- if (x->uid < y->uid) return -1;
- return 0;
-}
-
static void freeQInfo(SQInfo *pQInfo);
static void calResultBufSize(SQuery* pQuery) {
@@ -5985,8 +6075,8 @@ static void calResultBufSize(SQuery* pQuery) {
}
}
-static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList, SSqlGroupbyExpr *pGroupbyExpr, SExprInfo *pExprs,
- STableGroupInfo *pTableGroupInfo, SColumnInfo* pTagCols) {
+static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGroupbyExpr, SExprInfo *pExprs,
+ STableGroupInfo *pTableGroupInfo, SColumnInfo* pTagCols, bool stableQuery) {
int16_t numOfCols = pQueryMsg->numOfCols;
int16_t numOfOutput = pQueryMsg->numOfOutput;
@@ -6016,6 +6106,7 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList,
pQuery->pGroupbyExpr = pGroupbyExpr;
pQuery->intervalTime = pQueryMsg->intervalTime;
pQuery->slidingTime = pQueryMsg->slidingTime;
+ pQuery->intervalTimeUnit = pQueryMsg->intervalTimeUnit;
pQuery->slidingTimeUnit = pQueryMsg->slidingTimeUnit;
pQuery->fillType = pQueryMsg->fillType;
pQuery->numOfTags = pQueryMsg->numOfTags;
@@ -6084,8 +6175,6 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList,
}
int tableIndex = 0;
- STimeWindow window = pQueryMsg->window;
- taosArraySort(pTableIdList, compareTableIdInfo);
pQInfo->runtimeEnv.interBufSize = getOutputInterResultBufSize(pQuery);
pQInfo->pBuf = calloc(pTableGroupInfo->numOfTables, sizeof(STableQueryInfo));
@@ -6093,10 +6182,21 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList,
goto _cleanup;
}
+ // NOTE: pTableCheckInfo need to update the query time range and the lastKey info
+ pQInfo->arrTableIdInfo = taosArrayInit(tableIndex, sizeof(STableIdInfo));
+ pQInfo->dataReady = QUERY_RESULT_NOT_READY;
+ pthread_mutex_init(&pQInfo->lock, NULL);
+
+ pQuery->pos = -1;
+ pQuery->window = pQueryMsg->window;
+ changeExecuteScanOrder(pQInfo, pQueryMsg, stableQuery);
+
+ STimeWindow window = pQuery->window;
+
int32_t index = 0;
for(int32_t i = 0; i < numOfGroups; ++i) {
- SArray* pa = taosArrayGetP(pTableGroupInfo->pGroupList, i);
+ SArray* pa = taosArrayGetP(pQInfo->tableGroupInfo.pGroupList, i);
size_t s = taosArrayGetSize(pa);
SArray* p1 = taosArrayInit(s, POINTER_BYTES);
@@ -6109,12 +6209,9 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList,
for(int32_t j = 0; j < s; ++j) {
STableKeyInfo* info = taosArrayGet(pa, j);
- STableId* id = TSDB_TABLEID(info->pTable);
- STableIdInfo* pTableId = taosArraySearch(pTableIdList, id, compareTableIdInfo);
-
- window.skey = (pTableId != NULL)? pTableId->key:pQueryMsg->window.skey;
void* buf = (char*)pQInfo->pBuf + index * sizeof(STableQueryInfo);
+ window.skey = info->lastKey;
STableQueryInfo* item = createTableQueryInfo(&pQInfo->runtimeEnv, info->pTable, window, buf);
if (item == NULL) {
goto _cleanup;
@@ -6122,17 +6219,13 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList,
item->groupIndex = i;
taosArrayPush(p1, &item);
+
+ STableId* id = TSDB_TABLEID(info->pTable);
taosHashPut(pQInfo->tableqinfoGroupInfo.map, &id->tid, sizeof(id->tid), &item, POINTER_BYTES);
index += 1;
}
}
- pQInfo->arrTableIdInfo = taosArrayInit(tableIndex, sizeof(STableIdInfo));
- pQInfo->dataReady = QUERY_RESULT_NOT_READY;
- pthread_mutex_init(&pQInfo->lock, NULL);
-
- pQuery->pos = -1;
- pQuery->window = pQueryMsg->window;
colIdCheck(pQuery);
qDebug("qmsg:%p QInfo:%p created", pQueryMsg, pQInfo);
@@ -6290,29 +6383,13 @@ static void freeQInfo(SQInfo *pQInfo) {
taosTFree(pQuery);
}
- // todo refactor, extract method to destroytableDataInfo
- if (pQInfo->tableqinfoGroupInfo.pGroupList != NULL) {
- int32_t numOfGroups = (int32_t)(GET_NUM_OF_TABLEGROUP(pQInfo));
- for (int32_t i = 0; i < numOfGroups; ++i) {
- SArray *p = GET_TABLEGROUP(pQInfo, i);
-
- size_t num = taosArrayGetSize(p);
- for(int32_t j = 0; j < num; ++j) {
- STableQueryInfo* item = taosArrayGetP(p, j);
- destroyTableQueryInfo(item);
- }
-
- taosArrayDestroy(p);
- }
- }
+ freeTableQueryInfo(&pQInfo->tableqinfoGroupInfo);
taosTFree(pQInfo->pBuf);
- taosArrayDestroy(pQInfo->tableqinfoGroupInfo.pGroupList);
- taosHashCleanup(pQInfo->tableqinfoGroupInfo.map);
+
tsdbDestroyTableGroup(&pQInfo->tableGroupInfo);
taosArrayDestroy(pQInfo->arrTableIdInfo);
-
pQInfo->signature = 0;
qDebug("QInfo:%p QInfo is freed", pQInfo);
@@ -6488,7 +6565,7 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi
assert(0);
}
- (*pQInfo) = createQInfoImpl(pQueryMsg, pTableIdList, pGroupbyExpr, pExprs, &tableGroupInfo, pTagColumnInfo);
+ (*pQInfo) = createQInfoImpl(pQueryMsg, pGroupbyExpr, pExprs, &tableGroupInfo, pTagColumnInfo, isSTableQuery);
pExprs = NULL;
pGroupbyExpr = NULL;
pTagColumnInfo = NULL;
@@ -6843,7 +6920,7 @@ static void buildTagQueryResult(SQInfo* pQInfo) {
int16_t type = 0, bytes = 0;
for(int32_t j = 0; j < pQuery->numOfOutput; ++j) {
// not assign value in case of user defined constant output column
- if (pExprInfo[j].base.colInfo.flag == TSDB_COL_UDC) {
+ if (TSDB_COL_IS_UD_COL(pExprInfo[j].base.colInfo.flag)) {
continue;
}
diff --git a/src/query/src/qFill.c b/src/query/src/qFill.c
index c1cfab3ea2..ddb63c5012 100644
--- a/src/query/src/qFill.c
+++ b/src/query/src/qFill.c
@@ -55,7 +55,7 @@ SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_
SFillColInfo* pColInfo = &pFillInfo->pFillCol[i];
pFillInfo->pData[i] = calloc(1, pColInfo->col.bytes * capacity);
- if (pColInfo->flag == TSDB_COL_TAG) {
+ if (TSDB_COL_IS_TAG(pColInfo->flag)) {
bool exists = false;
for(int32_t j = 0; j < k; ++j) {
if (pFillInfo->pTags[j].col.colId == pColInfo->col.colId) {
@@ -155,7 +155,7 @@ void taosFillCopyInputDataFromOneFilePage(SFillInfo* pFillInfo, tFilePage* pInpu
char* data = pInput->data + pCol->col.offset * pInput->num;
memcpy(pFillInfo->pData[i], data, (size_t)(pInput->num * pCol->col.bytes));
- if (pCol->flag == TSDB_COL_TAG) { // copy the tag value to tag value buffer
+ if (TSDB_COL_IS_TAG(pCol->flag)) { // copy the tag value to tag value buffer
for (int32_t j = 0; j < pFillInfo->numOfTags; ++j) {
SFillTagColInfo* pTag = &pFillInfo->pTags[j];
if (pTag->col.colId == pCol->col.colId) {
@@ -179,14 +179,22 @@ int64_t getFilledNumOfRes(SFillInfo* pFillInfo, TSKEY ekey, int32_t maxNumOfRows
if (numOfRows > 0) { // still fill gap within current data block, not generating data after the result set.
TSKEY lastKey = tsList[pFillInfo->numOfRows - 1];
- numOfRes = (int64_t)(ABS(lastKey - pFillInfo->start) / pFillInfo->slidingTime) + 1;
+ if (pFillInfo->slidingUnit != 'y' && pFillInfo->slidingUnit != 'n') {
+ numOfRes = (int64_t)(ABS(lastKey - pFillInfo->start) / pFillInfo->slidingTime) + 1;
+ } else {
+ numOfRes = taosCountNatualInterval(lastKey, pFillInfo->start, pFillInfo->slidingTime, pFillInfo->slidingUnit, pFillInfo->precision) + 1;
+ }
assert(numOfRes >= numOfRows);
} else { // reach the end of data
if ((ekey1 < pFillInfo->start && FILL_IS_ASC_FILL(pFillInfo)) ||
(ekey1 > pFillInfo->start && !FILL_IS_ASC_FILL(pFillInfo))) {
return 0;
- } else { // the numOfRes rows are all filled with specified policy
+ }
+ // the numOfRes rows are all filled with specified policy
+ if (pFillInfo->slidingUnit != 'y' && pFillInfo->slidingUnit != 'n') {
numOfRes = (ABS(ekey1 - pFillInfo->start) / pFillInfo->slidingTime) + 1;
+ } else {
+ numOfRes = taosCountNatualInterval(ekey1, pFillInfo->start, pFillInfo->slidingTime, pFillInfo->slidingUnit, pFillInfo->precision) + 1;
}
}
@@ -251,7 +259,7 @@ int taosDoLinearInterpolation(int32_t type, SPoint* point1, SPoint* point2, SPoi
static void setTagsValue(SFillInfo* pFillInfo, tFilePage** data, int32_t num) {
for(int32_t j = 0; j < pFillInfo->numOfCols; ++j) {
SFillColInfo* pCol = &pFillInfo->pFillCol[j];
- if (pCol->flag == TSDB_COL_NORMAL) {
+ if (TSDB_COL_IS_NORMAL_COL(pCol->flag)) {
continue;
}
@@ -366,7 +374,12 @@ static void doFillResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t* nu
setTagsValue(pFillInfo, data, *num);
}
- pFillInfo->start += (pFillInfo->slidingTime * step);
+// TODO natual sliding time
+ if (pFillInfo->slidingUnit != 'n' && pFillInfo->slidingUnit != 'y') {
+ pFillInfo->start += (pFillInfo->slidingTime * step);
+ } else {
+ pFillInfo->start = taosAddNatualInterval(pFillInfo->start, pFillInfo->slidingTime*step, pFillInfo->slidingUnit, pFillInfo->precision);
+ }
pFillInfo->numOfCurrent++;
(*num) += 1;
@@ -446,7 +459,7 @@ int32_t generateDataBlockImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t nu
// assign rows to dst buffer
for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) {
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
- if (pCol->flag == TSDB_COL_TAG) {
+ if (TSDB_COL_IS_TAG(pCol->flag)) {
continue;
}
@@ -473,7 +486,12 @@ int32_t generateDataBlockImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t nu
// set the tag value for final result
setTagsValue(pFillInfo, data, num);
- pFillInfo->start += (pFillInfo->slidingTime * step);
+ // TODO natual sliding time
+ if (pFillInfo->slidingUnit != 'n' && pFillInfo->slidingUnit != 'y') {
+ pFillInfo->start += (pFillInfo->slidingTime * step);
+ } else {
+ pFillInfo->start = taosAddNatualInterval(pFillInfo->start, pFillInfo->slidingTime*step, pFillInfo->slidingUnit, pFillInfo->precision);
+ }
pFillInfo->rowIdx += 1;
pFillInfo->numOfCurrent +=1;
diff --git a/src/query/src/qPercentile.c b/src/query/src/qPercentile.c
index 19775075fc..1ce5861e52 100644
--- a/src/query/src/qPercentile.c
+++ b/src/query/src/qPercentile.c
@@ -14,310 +14,296 @@
*/
#include "qPercentile.h"
+#include "qResultbuf.h"
#include "os.h"
#include "queryLog.h"
#include "taosdef.h"
-#include "taosmsg.h"
#include "tulog.h"
+#include "tcompare.h"
-tExtMemBuffer *releaseBucketsExceptFor(tMemBucket *pMemBucket, int16_t segIdx, int16_t slotIdx) {
- tExtMemBuffer *pBuffer = NULL;
-
- for (int32_t i = 0; i < pMemBucket->numOfSegs; ++i) {
- tMemBucketSegment *pSeg = &pMemBucket->pSegs[i];
-
- for (int32_t j = 0; j < pSeg->numOfSlots; ++j) {
- if (i == segIdx && j == slotIdx) {
- pBuffer = pSeg->pBuffer[j];
- } else {
- if (pSeg->pBuffer && pSeg->pBuffer[j]) {
- pSeg->pBuffer[j] = destoryExtMemBuffer(pSeg->pBuffer[j]);
- }
- }
- }
- }
-
- return pBuffer;
+#define DEFAULT_NUM_OF_SLOT 1024
+
+int32_t getGroupId(int32_t numOfSlots, int32_t slotIndex, int32_t times) {
+ return (times * numOfSlots) + slotIndex;
}
-static tFilePage *loadIntoBucketFromDisk(tMemBucket *pMemBucket, int32_t segIdx, int32_t slotIdx,
- tOrderDescriptor *pDesc) {
- // release all data in other slots
- tExtMemBuffer *pMemBuffer = pMemBucket->pSegs[segIdx].pBuffer[slotIdx];
- tFilePage * buffer = (tFilePage *)calloc(1, pMemBuffer->nElemSize * pMemBuffer->numOfTotalElems + sizeof(tFilePage));
- int32_t oldCapacity = pDesc->pColumnModel->capacity;
- pDesc->pColumnModel->capacity = pMemBuffer->numOfTotalElems;
-
- if (!tExtMemBufferIsAllDataInMem(pMemBuffer)) {
- pMemBuffer = releaseBucketsExceptFor(pMemBucket, segIdx, slotIdx);
- assert(pMemBuffer->numOfTotalElems > 0);
-
- // load data in disk to memory
- tFilePage *pPage = (tFilePage *)calloc(1, pMemBuffer->pageSize);
-
- for (uint32_t i = 0; i < pMemBuffer->fileMeta.flushoutData.nLength; ++i) {
- tFlushoutInfo *pFlushInfo = &pMemBuffer->fileMeta.flushoutData.pFlushoutInfo[i];
-
- int32_t ret = fseek(pMemBuffer->file, pFlushInfo->startPageId * pMemBuffer->pageSize, SEEK_SET);
- UNUSED(ret);
-
- for (uint32_t j = 0; j < pFlushInfo->numOfPages; ++j) {
- ret = (int32_t)fread(pPage, pMemBuffer->pageSize, 1, pMemBuffer->file);
- UNUSED(ret);
- assert(pPage->num > 0);
-
- tColModelAppend(pDesc->pColumnModel, buffer, pPage->data, 0, (int32_t)pPage->num, (int32_t)pPage->num);
- printf("id: %d count: %" PRIu64 "\n", j, buffer->num);
- }
- }
- taosTFree(pPage);
-
- assert(buffer->num == pMemBuffer->fileMeta.numOfElemsInFile);
+static tFilePage *loadDataFromFilePage(tMemBucket *pMemBucket, int32_t slotIdx) {
+ tFilePage *buffer = (tFilePage *)calloc(1, pMemBucket->bytes * pMemBucket->pSlots[slotIdx].info.size + sizeof(tFilePage));
+
+ int32_t groupId = getGroupId(pMemBucket->numOfSlots, slotIdx, pMemBucket->times);
+ SIDList list = getDataBufPagesIdList(pMemBucket->pBuffer, groupId);
+
+ int32_t offset = 0;
+ for(int32_t i = 0; i < list->size; ++i) {
+ SPageInfo* pgInfo = *(SPageInfo**) taosArrayGet(list, i);
+
+ tFilePage* pg = getResBufPage(pMemBucket->pBuffer, pgInfo->pageId);
+ memcpy(buffer->data + offset, pg->data, (size_t)(pg->num * pMemBucket->bytes));
+
+ offset += (int32_t)(pg->num * pMemBucket->bytes);
}
-
- // load data in pMemBuffer to buffer
- tFilePagesItem *pListItem = pMemBuffer->pHead;
- while (pListItem != NULL) {
- tColModelAppend(pDesc->pColumnModel, buffer, pListItem->item.data, 0, (int32_t)pListItem->item.num,
- (int32_t)pListItem->item.num);
- pListItem = pListItem->pNext;
- }
-
- tColDataQSort(pDesc, (int32_t)buffer->num, 0, (int32_t)buffer->num - 1, buffer->data, TSDB_ORDER_ASC);
-
- pDesc->pColumnModel->capacity = oldCapacity; // restore value
+
+ qsort(buffer->data, pMemBucket->pSlots[slotIdx].info.size, pMemBucket->bytes, pMemBucket->comparFn);
return buffer;
}
-double findOnlyResult(tMemBucket *pMemBucket) {
- assert(pMemBucket->numOfElems == 1);
-
- for (int32_t i = 0; i < pMemBucket->numOfSegs; ++i) {
- tMemBucketSegment *pSeg = &pMemBucket->pSegs[i];
- if (pSeg->pBuffer) {
- for (int32_t j = 0; j < pSeg->numOfSlots; ++j) {
- tExtMemBuffer *pBuffer = pSeg->pBuffer[j];
- if (pBuffer) {
- assert(pBuffer->numOfTotalElems == 1);
- tFilePage *pPage = &pBuffer->pHead->item;
- if (pBuffer->numOfElemsInBuffer == 1) {
- switch (pMemBucket->dataType) {
- case TSDB_DATA_TYPE_INT:
- return *(int32_t *)pPage->data;
- case TSDB_DATA_TYPE_SMALLINT:
- return *(int16_t *)pPage->data;
- case TSDB_DATA_TYPE_TINYINT:
- return *(int8_t *)pPage->data;
- case TSDB_DATA_TYPE_BIGINT:
- return (double)(*(int64_t *)pPage->data);
- case TSDB_DATA_TYPE_DOUBLE: {
- double dv = GET_DOUBLE_VAL(pPage->data);
- //return *(double *)pPage->data;
- return dv;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- float fv = GET_FLOAT_VAL(pPage->data);
- //return *(float *)pPage->data;
- return fv;
- }
- default:
- return 0;
- }
- }
- }
- }
- }
- }
- return 0;
-}
-
-void tBucketBigIntHash(tMemBucket *pBucket, void *value, int16_t *segIdx, int16_t *slotIdx) {
- int64_t v = *(int64_t *)value;
-
- if (pBucket->nRange.i64MaxVal == INT64_MIN) {
- if (v >= 0) {
- *segIdx = ((v >> (64 - 9)) >> 6) + 8;
- *slotIdx = (v >> (64 - 9)) & 0x3F;
- } else { // v<0
- *segIdx = ((-v) >> (64 - 9)) >> 6;
- *slotIdx = ((-v) >> (64 - 9)) & 0x3F;
- *segIdx = 7 - (*segIdx);
- }
- } else {
- // todo hash for bigint and float and double
- int64_t span = pBucket->nRange.i64MaxVal - pBucket->nRange.i64MinVal;
- if (span < pBucket->nTotalSlots) {
- int32_t delta = (int32_t)(v - pBucket->nRange.i64MinVal);
- *segIdx = delta / pBucket->nSlotsOfSeg;
- *slotIdx = delta % pBucket->nSlotsOfSeg;
- } else {
- double x = (double)span / pBucket->nTotalSlots;
- double posx = (v - pBucket->nRange.i64MinVal) / x;
- if (v == pBucket->nRange.i64MaxVal) {
- posx -= 1;
- }
-
- *segIdx = ((int32_t)posx) / pBucket->nSlotsOfSeg;
- *slotIdx = ((int32_t)posx) % pBucket->nSlotsOfSeg;
- }
- }
-}
-
-// todo refactor to more generic
-void tBucketIntHash(tMemBucket *pBucket, void *value, int16_t *segIdx, int16_t *slotIdx) {
- int32_t v = *(int32_t *)value;
-
- if (pBucket->nRange.iMaxVal == INT32_MIN) {
- /*
- * taking negative integer into consideration,
- * there is only half of pBucket->segs available for non-negative integer
- */
- // int32_t numOfSlots = pBucket->nTotalSlots>>1;
- // int32_t bits = bitsOfNumber(numOfSlots)-1;
-
- if (v >= 0) {
- *segIdx = ((v >> (32 - 9)) >> 6) + 8;
- *slotIdx = (v >> (32 - 9)) & 0x3F;
- } else { // v<0
- *segIdx = ((-v) >> (32 - 9)) >> 6;
- *slotIdx = ((-v) >> (32 - 9)) & 0x3F;
- *segIdx = 7 - (*segIdx);
- }
- } else {
- // divide a range of [iMinVal, iMaxVal] into 1024 buckets
- int32_t span = pBucket->nRange.iMaxVal - pBucket->nRange.iMinVal;
- if (span < pBucket->nTotalSlots) {
- int32_t delta = v - pBucket->nRange.iMinVal;
- *segIdx = delta / pBucket->nSlotsOfSeg;
- *slotIdx = delta % pBucket->nSlotsOfSeg;
- } else {
- double x = (double)span / pBucket->nTotalSlots;
- double posx = (v - pBucket->nRange.iMinVal) / x;
- if (v == pBucket->nRange.iMaxVal) {
- posx -= 1;
- }
- *segIdx = ((int32_t)posx) / pBucket->nSlotsOfSeg;
- *slotIdx = ((int32_t)posx) % pBucket->nSlotsOfSeg;
- }
- }
-}
-
-void tBucketDoubleHash(tMemBucket *pBucket, void *value, int16_t *segIdx, int16_t *slotIdx) {
- // double v = *(double *)value;
- double v = GET_DOUBLE_VAL(value);
-
- if (pBucket->nRange.dMinVal == DBL_MAX) {
- /*
- * taking negative integer into consideration,
- * there is only half of pBucket->segs available for non-negative integer
- */
- double x = DBL_MAX / (pBucket->nTotalSlots >> 1);
- double posx = (v + DBL_MAX) / x;
- *segIdx = ((int32_t)posx) / pBucket->nSlotsOfSeg;
- *slotIdx = ((int32_t)posx) % pBucket->nSlotsOfSeg;
- } else {
- // divide a range of [dMinVal, dMaxVal] into 1024 buckets
- double span = pBucket->nRange.dMaxVal - pBucket->nRange.dMinVal;
- if (span < pBucket->nTotalSlots) {
- int32_t delta = (int32_t)(v - pBucket->nRange.dMinVal);
- *segIdx = delta / pBucket->nSlotsOfSeg;
- *slotIdx = delta % pBucket->nSlotsOfSeg;
- } else {
- double x = span / pBucket->nTotalSlots;
- double posx = (v - pBucket->nRange.dMinVal) / x;
- if (v == pBucket->nRange.dMaxVal) {
- posx -= 1;
- }
- *segIdx = ((int32_t)posx) / pBucket->nSlotsOfSeg;
- *slotIdx = ((int32_t)posx) % pBucket->nSlotsOfSeg;
- }
-
- if (*segIdx < 0 || *segIdx > 16 || *slotIdx < 0 || *slotIdx > 64) {
- uError("error in hash process. segment is: %d, slot id is: %d\n", *segIdx, *slotIdx);
- }
- }
-}
-
-tMemBucket *tMemBucketCreate(int32_t totalSlots, int32_t nBufferSize, int16_t nElemSize, int16_t dataType,
- tOrderDescriptor *pDesc) {
- tMemBucket *pBucket = (tMemBucket *)malloc(sizeof(tMemBucket));
- pBucket->nTotalSlots = totalSlots;
- pBucket->nSlotsOfSeg = 1 << 6; // 64 Segments, 16 slots each seg.
- pBucket->dataType = dataType;
- pBucket->nElemSize = nElemSize;
- pBucket->pageSize = DEFAULT_PAGE_SIZE;
-
- pBucket->numOfElems = 0;
- pBucket->numOfSegs = pBucket->nTotalSlots / pBucket->nSlotsOfSeg;
-
- pBucket->nTotalBufferSize = nBufferSize;
-
- pBucket->maxElemsCapacity = pBucket->nTotalBufferSize / pBucket->nElemSize;
-
- pBucket->numOfTotalPages = pBucket->nTotalBufferSize / pBucket->pageSize;
- pBucket->numOfAvailPages = pBucket->numOfTotalPages;
-
- pBucket->pSegs = NULL;
- pBucket->pOrderDesc = pDesc;
-
- switch (pBucket->dataType) {
+static void resetBoundingBox(MinMaxEntry* range, int32_t type) {
+ switch (type) {
+ case TSDB_DATA_TYPE_BIGINT: {
+ range->i64MaxVal = INT64_MIN;
+ range->i64MinVal = INT64_MAX;
+ break;
+ };
case TSDB_DATA_TYPE_INT:
case TSDB_DATA_TYPE_SMALLINT:
case TSDB_DATA_TYPE_TINYINT: {
- pBucket->nRange.iMinVal = INT32_MAX;
- pBucket->nRange.iMaxVal = INT32_MIN;
- pBucket->HashFunc = tBucketIntHash;
+ range->iMaxVal = INT32_MIN;
+ range->iMinVal = INT32_MAX;
break;
};
case TSDB_DATA_TYPE_DOUBLE:
case TSDB_DATA_TYPE_FLOAT: {
- pBucket->nRange.dMinVal = DBL_MAX;
- pBucket->nRange.dMaxVal = -DBL_MAX;
- pBucket->HashFunc = tBucketDoubleHash;
+ range->dMaxVal = -DBL_MAX;
+ range->dMinVal = DBL_MAX;
break;
- };
- case TSDB_DATA_TYPE_BIGINT: {
- pBucket->nRange.i64MinVal = INT64_MAX;
- pBucket->nRange.i64MaxVal = INT64_MIN;
- pBucket->HashFunc = tBucketBigIntHash;
- break;
- };
- default: {
- uError("MemBucket:%p,not support data type %d,failed", pBucket, pBucket->dataType);
- taosTFree(pBucket);
- return NULL;
+ }
+ }
+}
+
+static void resetPosInfo(SSlotInfo* pInfo) {
+ pInfo->size = 0;
+ pInfo->pageId = -1;
+ pInfo->data = NULL;
+}
+
+double findOnlyResult(tMemBucket *pMemBucket) {
+ assert(pMemBucket->total == 1);
+
+ for (int32_t i = 0; i < pMemBucket->numOfSlots; ++i) {
+ tMemBucketSlot *pSlot = &pMemBucket->pSlots[i];
+ if (pSlot->info.size == 0) {
+ continue;
+ }
+
+ int32_t groupId = getGroupId(pMemBucket->numOfSlots, i, pMemBucket->times);
+ SIDList list = getDataBufPagesIdList(pMemBucket->pBuffer, groupId);
+ assert(list->size == 1);
+
+ SPageInfo* pgInfo = (SPageInfo*) taosArrayGetP(list, 0);
+ tFilePage* pPage = getResBufPage(pMemBucket->pBuffer, pgInfo->pageId);
+ assert(pPage->num == 1);
+
+ switch (pMemBucket->type) {
+ case TSDB_DATA_TYPE_INT:
+ return *(int32_t *)pPage->data;
+ case TSDB_DATA_TYPE_SMALLINT:
+ return *(int16_t *)pPage->data;
+ case TSDB_DATA_TYPE_TINYINT:
+ return *(int8_t *)pPage->data;
+ case TSDB_DATA_TYPE_BIGINT:
+ return (double)(*(int64_t *)pPage->data);
+ case TSDB_DATA_TYPE_DOUBLE: {
+ double dv = GET_DOUBLE_VAL(pPage->data);
+ return dv;
+ }
+ case TSDB_DATA_TYPE_FLOAT: {
+ float fv = GET_FLOAT_VAL(pPage->data);
+ return fv;
+ }
+ default:
+ return 0;
}
}
- int32_t numOfCols = pDesc->pColumnModel->numOfCols;
- if (numOfCols != 1) {
- uError("MemBucket:%p,only consecutive data is allowed,invalid numOfCols:%d", pBucket, numOfCols);
- taosTFree(pBucket);
+ return 0;
+}
+
+int32_t tBucketBigIntHash(tMemBucket *pBucket, const void *value) {
+ int64_t v = *(int64_t *)value;
+ int32_t index = -1;
+
+ int32_t halfSlot = pBucket->numOfSlots >> 1;
+// int32_t bits = 32;//bitsOfNumber(pBucket->numOfSlots) - 1;
+
+ if (pBucket->range.i64MaxVal == INT64_MIN) {
+ if (v >= 0) {
+ index = (v >> (64 - 9)) + halfSlot;
+ } else { // v<0
+ index = ((-v) >> (64 - 9));
+ index = -index + (halfSlot - 1);
+ }
+
+ return index;
+ } else {
+ // todo hash for bigint and float and double
+ int64_t span = pBucket->range.i64MaxVal - pBucket->range.i64MinVal;
+ if (span < pBucket->numOfSlots) {
+ int32_t delta = (int32_t)(v - pBucket->range.i64MinVal);
+ index = delta % pBucket->numOfSlots;
+ } else {
+ double slotSpan = (double)span / pBucket->numOfSlots;
+ index = (int32_t)((v - pBucket->range.i64MinVal) / slotSpan);
+ if (v == pBucket->range.i64MaxVal) {
+ index -= 1;
+ }
+ }
+
+ return index;
+ }
+}
+
+// todo refactor to more generic
+int32_t tBucketIntHash(tMemBucket *pBucket, const void *value) {
+ int32_t v = 0;
+ switch(pBucket->type) {
+ case TSDB_DATA_TYPE_SMALLINT: v = *(int16_t*) value; break;
+ case TSDB_DATA_TYPE_TINYINT: v = *(int8_t*) value; break;
+ default: v = *(int32_t*) value;break;
+ }
+
+ int32_t index = -1;
+ if (pBucket->range.iMaxVal == INT32_MIN) {
+ /*
+ * taking negative integer into consideration,
+ * there is only half of pBucket->segs available for non-negative integer
+ */
+ int32_t halfSlot = pBucket->numOfSlots >> 1;
+ int32_t bits = 32;//bitsOfNumber(pBucket->numOfSlots) - 1;
+
+ if (v >= 0) {
+ index = (v >> (bits - 9)) + halfSlot;
+ } else { // v < 0
+ index = ((-v) >> (32 - 9));
+ index = -index + (halfSlot - 1);
+ }
+
+ return index;
+ } else {
+ // divide a range of [iMinVal, iMaxVal] into 1024 buckets
+ int32_t span = pBucket->range.iMaxVal - pBucket->range.iMinVal;
+ if (span < pBucket->numOfSlots) {
+ int32_t delta = v - pBucket->range.iMinVal;
+ index = (delta % pBucket->numOfSlots);
+ } else {
+ double slotSpan = (double)span / pBucket->numOfSlots;
+ index = (int32_t)((v - pBucket->range.iMinVal) / slotSpan);
+ if (v == pBucket->range.iMaxVal) {
+ index -= 1;
+ }
+ }
+
+ return index;
+ }
+}
+
+int32_t tBucketDoubleHash(tMemBucket *pBucket, const void *value) {
+ double v = GET_DOUBLE_VAL(value);
+ int32_t index = -1;
+
+ if (pBucket->range.dMinVal == DBL_MAX) {
+ /*
+ * taking negative integer into consideration,
+ * there is only half of pBucket->segs available for non-negative integer
+ */
+ double x = DBL_MAX / (pBucket->numOfSlots >> 1);
+ double posx = (v + DBL_MAX) / x;
+ return ((int32_t)posx) % pBucket->numOfSlots;
+ } else {
+ // divide a range of [dMinVal, dMaxVal] into 1024 buckets
+ double span = pBucket->range.dMaxVal - pBucket->range.dMinVal;
+ if (span < pBucket->numOfSlots) {
+ int32_t delta = (int32_t)(v - pBucket->range.dMinVal);
+ index = (delta % pBucket->numOfSlots);
+ } else {
+ double slotSpan = span / pBucket->numOfSlots;
+ index = (int32_t)((v - pBucket->range.dMinVal) / slotSpan);
+ if (v == pBucket->range.dMaxVal) {
+ index -= 1;
+ }
+ }
+
+ if (index < 0 || index > pBucket->numOfSlots) {
+ uError("error in hash process. slot id: %d", index);
+ }
+
+ return index;
+ }
+}
+
+static __perc_hash_func_t getHashFunc(int32_t type) {
+ switch (type) {
+ case TSDB_DATA_TYPE_INT:
+ case TSDB_DATA_TYPE_SMALLINT:
+ case TSDB_DATA_TYPE_TINYINT: {
+ return tBucketIntHash;
+ };
+
+ case TSDB_DATA_TYPE_DOUBLE:
+ case TSDB_DATA_TYPE_FLOAT: {
+ return tBucketDoubleHash;
+ };
+
+ case TSDB_DATA_TYPE_BIGINT: {
+ return tBucketBigIntHash;
+ };
+
+ default: {
+ return NULL;
+ }
+ }
+}
+
+static void resetSlotInfo(tMemBucket* pBucket) {
+ for (int32_t i = 0; i < pBucket->numOfSlots; ++i) {
+ tMemBucketSlot* pSlot = &pBucket->pSlots[i];
+
+ resetBoundingBox(&pSlot->range, pBucket->type);
+ resetPosInfo(&pSlot->info);
+ }
+}
+
+tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType) {
+ tMemBucket *pBucket = (tMemBucket *)calloc(1, sizeof(tMemBucket));
+ if (pBucket == NULL) {
return NULL;
}
- SSchema* pSchema = getColumnModelSchema(pDesc->pColumnModel, 0);
- if (pSchema->type != dataType) {
- uError("MemBucket:%p,data type is not consistent,%d in schema, %d in param", pBucket, pSchema->type, dataType);
- taosTFree(pBucket);
+ pBucket->numOfSlots = DEFAULT_NUM_OF_SLOT;
+ pBucket->bufPageSize = DEFAULT_PAGE_SIZE * 4; // 4k per page
+
+ pBucket->type = dataType;
+ pBucket->bytes = nElemSize;
+ pBucket->total = 0;
+ pBucket->times = 1;
+
+ pBucket->maxCapacity = 200000;
+
+ pBucket->elemPerPage = (pBucket->bufPageSize - sizeof(tFilePage))/pBucket->bytes;
+ pBucket->comparFn = getKeyComparFunc(pBucket->type);
+ resetBoundingBox(&pBucket->range, pBucket->type);
+
+ pBucket->hashFunc = getHashFunc(pBucket->type);
+ if (pBucket->hashFunc == NULL) {
+ uError("MemBucket:%p, not support data type %d, failed", pBucket, pBucket->type);
+ free(pBucket);
return NULL;
}
- if (pBucket->numOfTotalPages < pBucket->nTotalSlots) {
- uWarn("MemBucket:%p,total buffer pages %d are not enough for all slots", pBucket, pBucket->numOfTotalPages);
+ pBucket->pSlots = (tMemBucketSlot *)calloc(pBucket->numOfSlots, sizeof(tMemBucketSlot));
+ if (pBucket->pSlots == NULL) {
+ free(pBucket);
+ return NULL;
}
- pBucket->pSegs = (tMemBucketSegment *)malloc(pBucket->numOfSegs * sizeof(tMemBucketSegment));
+ resetSlotInfo(pBucket);
- for (int32_t i = 0; i < pBucket->numOfSegs; ++i) {
- pBucket->pSegs[i].numOfSlots = pBucket->nSlotsOfSeg;
- pBucket->pSegs[i].pBuffer = NULL;
- pBucket->pSegs[i].pBoundingEntries = NULL;
+ int32_t ret = createDiskbasedResultBuffer(&pBucket->pBuffer, pBucket->bytes, pBucket->bufPageSize, pBucket->bufPageSize * 512, NULL);
+ if (ret != TSDB_CODE_SUCCESS) {
+ tMemBucketDestroy(pBucket);
+ return NULL;
}
-
- uDebug("MemBucket:%p,created,buffer size:%ld,elem size:%d", pBucket, pBucket->numOfTotalPages * DEFAULT_PAGE_SIZE,
- pBucket->nElemSize);
-
+
+ uDebug("MemBucket:%p, elem size:%d", pBucket, pBucket->bytes);
return pBucket;
}
@@ -326,81 +312,11 @@ void tMemBucketDestroy(tMemBucket *pBucket) {
return;
}
- if (pBucket->pSegs) {
- for (int32_t i = 0; i < pBucket->numOfSegs; ++i) {
- tMemBucketSegment *pSeg = &(pBucket->pSegs[i]);
- taosTFree(pSeg->pBoundingEntries);
-
- if (pSeg->pBuffer == NULL || pSeg->numOfSlots == 0) {
- continue;
- }
-
- for (int32_t j = 0; j < pSeg->numOfSlots; ++j) {
- if (pSeg->pBuffer[j] != NULL) {
- pSeg->pBuffer[j] = destoryExtMemBuffer(pSeg->pBuffer[j]);
- }
- }
- taosTFree(pSeg->pBuffer);
- }
- }
-
- taosTFree(pBucket->pSegs);
+ destroyResultBuf(pBucket->pBuffer);
+ taosTFree(pBucket->pSlots);
taosTFree(pBucket);
}
-/*
- * find the slots which accounts for largest proportion of total in-memory buffer
- */
-static void tBucketGetMaxMemSlot(tMemBucket *pBucket, int16_t *segIdx, int16_t *slotIdx) {
- *segIdx = -1;
- *slotIdx = -1;
-
- int32_t val = 0;
- for (int32_t k = 0; k < pBucket->numOfSegs; ++k) {
- tMemBucketSegment *pSeg = &pBucket->pSegs[k];
- for (int32_t i = 0; i < pSeg->numOfSlots; ++i) {
- if (pSeg->pBuffer == NULL || pSeg->pBuffer[i] == NULL) {
- continue;
- }
-
- if (val < pSeg->pBuffer[i]->numOfInMemPages) {
- val = pSeg->pBuffer[i]->numOfInMemPages;
- *segIdx = k;
- *slotIdx = i;
- }
- }
- }
-}
-
-static void resetBoundingBox(tMemBucketSegment *pSeg, int32_t type) {
- switch (type) {
- case TSDB_DATA_TYPE_BIGINT: {
- for (int32_t i = 0; i < pSeg->numOfSlots; ++i) {
- pSeg->pBoundingEntries[i].i64MaxVal = INT64_MIN;
- pSeg->pBoundingEntries[i].i64MinVal = INT64_MAX;
- }
- break;
- };
- case TSDB_DATA_TYPE_INT:
- case TSDB_DATA_TYPE_SMALLINT:
- case TSDB_DATA_TYPE_TINYINT: {
- for (int32_t i = 0; i < pSeg->numOfSlots; ++i) {
- pSeg->pBoundingEntries[i].iMaxVal = INT32_MIN;
- pSeg->pBoundingEntries[i].iMinVal = INT32_MAX;
- }
- break;
- };
- case TSDB_DATA_TYPE_DOUBLE:
- case TSDB_DATA_TYPE_FLOAT: {
- for (int32_t i = 0; i < pSeg->numOfSlots; ++i) {
- pSeg->pBoundingEntries[i].dMaxVal = -DBL_MAX;
- pSeg->pBoundingEntries[i].dMinVal = DBL_MAX;
- }
- break;
- }
- }
-}
-
void tMemBucketUpdateBoundingBox(MinMaxEntry *r, char *data, int32_t dataType) {
switch (dataType) {
case TSDB_DATA_TYPE_INT: {
@@ -461,7 +377,6 @@ void tMemBucketUpdateBoundingBox(MinMaxEntry *r, char *data, int32_t dataType) {
break;
};
case TSDB_DATA_TYPE_FLOAT: {
- // double val = *(float *)data;
double val = GET_FLOAT_VAL(data);
if (r->dMinVal > val) {
@@ -478,171 +393,95 @@ void tMemBucketUpdateBoundingBox(MinMaxEntry *r, char *data, int32_t dataType) {
}
/*
- * in memory bucket, we only accept the simple data consecutive put in a row/column
- * no column-model in this case.
+ * in memory bucket, we only accept data array list
*/
-void tMemBucketPut(tMemBucket *pBucket, void *data, int32_t numOfRows) {
- pBucket->numOfElems += numOfRows;
- int16_t segIdx = 0, slotIdx = 0;
+void tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size) {
+ assert(pBucket != NULL && data != NULL && size > 0);
+ pBucket->total += (int32_t)size;
- for (int32_t i = 0; i < numOfRows; ++i) {
- char *d = (char *)data + i * tDataTypeDesc[pBucket->dataType].nSize;
+ int32_t bytes = pBucket->bytes;
- switch (pBucket->dataType) {
- case TSDB_DATA_TYPE_SMALLINT: {
- int32_t val = *(int16_t *)d;
- (pBucket->HashFunc)(pBucket, &val, &segIdx, &slotIdx);
- break;
- }
- case TSDB_DATA_TYPE_TINYINT: {
- int32_t val = *(int8_t *)d;
- (pBucket->HashFunc)(pBucket, &val, &segIdx, &slotIdx);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- int32_t val = *(int32_t *)d;
- (pBucket->HashFunc)(pBucket, &val, &segIdx, &slotIdx);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- int64_t val = *(int64_t *)d;
- (pBucket->HashFunc)(pBucket, &val, &segIdx, &slotIdx);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- // double val = *(double *)d;
- double val = GET_DOUBLE_VAL(d);
- (pBucket->HashFunc)(pBucket, &val, &segIdx, &slotIdx);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- // double val = *(float *)d;
- double val = GET_FLOAT_VAL(d);
- (pBucket->HashFunc)(pBucket, &val, &segIdx, &slotIdx);
- break;
- }
- }
+ for (int32_t i = 0; i < size; ++i) {
+ char *d = (char *) data + i * bytes;
- tMemBucketSegment *pSeg = &pBucket->pSegs[segIdx];
- if (pSeg->pBoundingEntries == NULL) {
- pSeg->pBoundingEntries = (MinMaxEntry *)malloc(sizeof(MinMaxEntry) * pBucket->nSlotsOfSeg);
- resetBoundingBox(pSeg, pBucket->dataType);
- }
+ int32_t slotIdx = (pBucket->hashFunc)(pBucket, d);
+ assert(slotIdx >= 0);
- if (pSeg->pBuffer == NULL) {
- pSeg->pBuffer = (tExtMemBuffer **)calloc(pBucket->nSlotsOfSeg, sizeof(void *));
- }
-
- if (pSeg->pBuffer[slotIdx] == NULL) {
- pSeg->pBuffer[slotIdx] = createExtMemBuffer(pBucket->numOfTotalPages * pBucket->pageSize, pBucket->nElemSize,
- pBucket->pageSize, pBucket->pOrderDesc->pColumnModel);
- pSeg->pBuffer[slotIdx]->flushModel = SINGLE_APPEND_MODEL;
- pBucket->pOrderDesc->pColumnModel->capacity = pSeg->pBuffer[slotIdx]->numOfElemsPerPage;
- }
-
- tMemBucketUpdateBoundingBox(&pSeg->pBoundingEntries[slotIdx], d, pBucket->dataType);
+ tMemBucketSlot *pSlot = &pBucket->pSlots[slotIdx];
+ tMemBucketUpdateBoundingBox(&pSlot->range, d, pBucket->type);
// ensure available memory pages to allocate
- int16_t cseg = 0, cslot = 0;
- if (pBucket->numOfAvailPages == 0) {
- uDebug("MemBucket:%p,max avail size:%d, no avail memory pages,", pBucket, pBucket->numOfTotalPages);
+ int32_t groupId = getGroupId(pBucket->numOfSlots, slotIdx, pBucket->times);
+ int32_t pageId = -1;
- tBucketGetMaxMemSlot(pBucket, &cseg, &cslot);
- if (cseg == -1 || cslot == -1) {
- uError("MemBucket:%p,failed to find appropriated avail buffer", pBucket);
- return;
+ if (pSlot->info.data == NULL || pSlot->info.data->num >= pBucket->elemPerPage) {
+ if (pSlot->info.data != NULL) {
+ assert(pSlot->info.data->num >= pBucket->elemPerPage && pSlot->info.size > 0);
+
+ // keep the pointer in memory
+ releaseResBufPage(pBucket->pBuffer, pSlot->info.data);
+ pSlot->info.data = NULL;
}
- if (cseg != segIdx || cslot != slotIdx) {
- pBucket->numOfAvailPages += pBucket->pSegs[cseg].pBuffer[cslot]->numOfInMemPages;
-
- int32_t avail = pBucket->pSegs[cseg].pBuffer[cslot]->numOfInMemPages;
- UNUSED(avail);
- tExtMemBufferFlush(pBucket->pSegs[cseg].pBuffer[cslot]);
-
- uDebug("MemBucket:%p,seg:%d,slot:%d flushed to disk,new avail pages:%d", pBucket, cseg, cslot,
- pBucket->numOfAvailPages);
- } else {
- uDebug("MemBucket:%p,failed to choose slot to flush to disk seg:%d,slot:%d", pBucket, cseg, cslot);
- }
+ pSlot->info.data = getNewDataBuf(pBucket->pBuffer, groupId, &pageId);
+ pSlot->info.pageId = pageId;
}
- int16_t consumedPgs = pSeg->pBuffer[slotIdx]->numOfInMemPages;
- int16_t newPgs = tExtMemBufferPut(pSeg->pBuffer[slotIdx], d, 1);
- /*
- * trigger 1. page re-allocation, to reduce the available pages
- * 2. page flushout, to increase the available pages
- */
- pBucket->numOfAvailPages += (consumedPgs - newPgs);
+ memcpy(pSlot->info.data->data + pSlot->info.data->num * pBucket->bytes, d, pBucket->bytes);
+
+ pSlot->info.data->num += 1;
+ pSlot->info.size += 1;
}
}
-void releaseBucket(tMemBucket *pMemBucket, int32_t segIdx, int32_t slotIdx) {
- if (segIdx < 0 || segIdx > pMemBucket->numOfSegs || slotIdx < 0) {
- return;
- }
-
- tMemBucketSegment *pSeg = &pMemBucket->pSegs[segIdx];
- if (slotIdx < 0 || slotIdx >= pSeg->numOfSlots || pSeg->pBuffer[slotIdx] == NULL) {
- return;
- }
-
- pSeg->pBuffer[slotIdx] = destoryExtMemBuffer(pSeg->pBuffer[slotIdx]);
-}
-
////////////////////////////////////////////////////////////////////////////////////////////
static void findMaxMinValue(tMemBucket *pMemBucket, double *maxVal, double *minVal) {
*minVal = DBL_MAX;
*maxVal = -DBL_MAX;
- for (int32_t i = 0; i < pMemBucket->numOfSegs; ++i) {
- tMemBucketSegment *pSeg = &pMemBucket->pSegs[i];
- if (pSeg->pBuffer == NULL) {
+ for (int32_t i = 0; i < pMemBucket->numOfSlots; ++i) {
+ tMemBucketSlot *pSlot = &pMemBucket->pSlots[i];
+ if (pSlot->info.size == 0) {
continue;
}
- switch (pMemBucket->dataType) {
+
+ switch (pMemBucket->type) {
case TSDB_DATA_TYPE_INT:
case TSDB_DATA_TYPE_SMALLINT:
case TSDB_DATA_TYPE_TINYINT: {
- for (int32_t j = 0; j < pSeg->numOfSlots; ++j) {
- double minv = pSeg->pBoundingEntries[j].iMinVal;
- double maxv = pSeg->pBoundingEntries[j].iMaxVal;
+ double minv = pSlot->range.iMinVal;
+ double maxv = pSlot->range.iMaxVal;
- if (*minVal > minv) {
- *minVal = minv;
- }
- if (*maxVal < maxv) {
- *maxVal = maxv;
- }
+ if (*minVal > minv) {
+ *minVal = minv;
+ }
+ if (*maxVal < maxv) {
+ *maxVal = maxv;
}
break;
}
case TSDB_DATA_TYPE_DOUBLE:
case TSDB_DATA_TYPE_FLOAT: {
- for (int32_t j = 0; j < pSeg->numOfSlots; ++j) {
- double minv = pSeg->pBoundingEntries[j].dMinVal;
- double maxv = pSeg->pBoundingEntries[j].dMaxVal;
+ double minv = pSlot->range.dMinVal;
+ double maxv = pSlot->range.dMaxVal;
- if (*minVal > minv) {
- *minVal = minv;
- }
- if (*maxVal < maxv) {
- *maxVal = maxv;
- }
+ if (*minVal > minv) {
+ *minVal = minv;
+ }
+ if (*maxVal < maxv) {
+ *maxVal = maxv;
}
break;
}
case TSDB_DATA_TYPE_BIGINT: {
- for (int32_t j = 0; j < pSeg->numOfSlots; ++j) {
- double minv = (double)pSeg->pBoundingEntries[j].i64MinVal;
- double maxv = (double)pSeg->pBoundingEntries[j].i64MaxVal;
+ double minv = (double)pSlot->range.i64MinVal;
+ double maxv = (double)pSlot->range.i64MaxVal;
- if (*minVal > minv) {
- *minVal = minv;
- }
- if (*maxVal < maxv) {
- *maxVal = maxv;
- }
+ if (*minVal > minv) {
+ *minVal = minv;
+ }
+ if (*maxVal < maxv) {
+ *maxVal = maxv;
}
break;
}
@@ -650,20 +489,6 @@ static void findMaxMinValue(tMemBucket *pMemBucket, double *maxVal, double *minV
}
}
-static MinMaxEntry getMinMaxEntryOfNearestSlotInNextSegment(tMemBucket *pMemBucket, int32_t segIdx) {
- int32_t i = segIdx + 1;
- while (i < pMemBucket->numOfSegs && pMemBucket->pSegs[i].numOfSlots == 0) ++i;
-
- tMemBucketSegment *pSeg = &pMemBucket->pSegs[i];
- assert(pMemBucket->numOfSegs > i && pMemBucket->pSegs[i].pBuffer != NULL);
-
- i = 0;
- while (i < pMemBucket->nSlotsOfSeg && pSeg->pBuffer[i] == NULL) ++i;
-
- assert(i < pMemBucket->nSlotsOfSeg);
- return pSeg->pBoundingEntries[i];
-}
-
/*
*
* now, we need to find the minimum value of the next slot for
@@ -671,262 +496,198 @@ static MinMaxEntry getMinMaxEntryOfNearestSlotInNextSegment(tMemBucket *pMemBuck
* j is the last slot of current segment, we need to get the first
* slot of the next segment.
*/
-static MinMaxEntry getMinMaxEntryOfNextSlotWithData(tMemBucket *pMemBucket, int32_t segIdx, int32_t slotIdx) {
- tMemBucketSegment *pSeg = &pMemBucket->pSegs[segIdx];
-
- MinMaxEntry next;
- if (slotIdx == pSeg->numOfSlots - 1) { // find next segment with data
- return getMinMaxEntryOfNearestSlotInNextSegment(pMemBucket, segIdx);
- } else {
+static MinMaxEntry getMinMaxEntryOfNextSlotWithData(tMemBucket *pMemBucket, int32_t slotIdx) {
int32_t j = slotIdx + 1;
- for (; j < pMemBucket->nSlotsOfSeg && pMemBucket->pSegs[segIdx].pBuffer[j] == 0; ++j) {
+ while (j < pMemBucket->numOfSlots && (pMemBucket->pSlots[j].info.size == 0)) {
+ ++j;
+ }
+
+ assert(j < pMemBucket->numOfSlots);
+ return pMemBucket->pSlots[j].range;
+}
+
+static bool isIdenticalData(tMemBucket *pMemBucket, int32_t index);
+char *getFirstElemOfMemBuffer(tMemBucketSlot *pSeg, int32_t slotIdx, tFilePage *pPage);
+
+static double getIdenticalDataVal(tMemBucket* pMemBucket, int32_t slotIndex) {
+ assert(isIdenticalData(pMemBucket, slotIndex));
+
+ tMemBucketSlot *pSlot = &pMemBucket->pSlots[slotIndex];
+
+ double finalResult = 0.0;
+ switch (pMemBucket->type) {
+ case TSDB_DATA_TYPE_SMALLINT:
+ case TSDB_DATA_TYPE_TINYINT:
+ case TSDB_DATA_TYPE_INT: {
+ finalResult = pSlot->range.iMinVal;
+ break;
+ }
+
+ case TSDB_DATA_TYPE_FLOAT:
+ case TSDB_DATA_TYPE_DOUBLE: {
+ finalResult = pSlot->range.dMinVal;
+ break;
};
- if (j == pMemBucket->nSlotsOfSeg) { // current slot has no available
- // slot,try next segment
- return getMinMaxEntryOfNearestSlotInNextSegment(pMemBucket, segIdx);
- } else {
- next = pSeg->pBoundingEntries[slotIdx + 1];
- assert(pSeg->pBuffer[slotIdx + 1] != NULL);
+ case TSDB_DATA_TYPE_BIGINT: {
+ finalResult = (double)pSlot->range.i64MinVal;
+ break;
}
}
- return next;
+ return finalResult;
}
-bool isIdenticalData(tMemBucket *pMemBucket, int32_t segIdx, int32_t slotIdx);
-char *getFirstElemOfMemBuffer(tMemBucketSegment *pSeg, int32_t slotIdx, tFilePage *pPage);
-
double getPercentileImpl(tMemBucket *pMemBucket, int32_t count, double fraction) {
int32_t num = 0;
- for (int32_t i = 0; i < pMemBucket->numOfSegs; ++i) {
- tMemBucketSegment *pSeg = &pMemBucket->pSegs[i];
- for (int32_t j = 0; j < pSeg->numOfSlots; ++j) {
- if (pSeg->pBuffer == NULL || pSeg->pBuffer[j] == NULL) {
- continue;
- }
- // required value in current slot
- if (num < (count + 1) && num + pSeg->pBuffer[j]->numOfTotalElems >= (count + 1)) {
- if (pSeg->pBuffer[j]->numOfTotalElems + num == (count + 1)) {
- /*
- * now, we need to find the minimum value of the next slot for interpolating the percentile value
- * j is the last slot of current segment, we need to get the first slot of the next segment.
- *
- */
- MinMaxEntry next = getMinMaxEntryOfNextSlotWithData(pMemBucket, i, j);
+ for (int32_t i = 0; i < pMemBucket->numOfSlots; ++i) {
+ tMemBucketSlot *pSlot = &pMemBucket->pSlots[i];
+ if (pSlot->info.size == 0) {
+ continue;
+ }
- double maxOfThisSlot = 0;
- double minOfNextSlot = 0;
- switch (pMemBucket->dataType) {
- case TSDB_DATA_TYPE_INT:
- case TSDB_DATA_TYPE_SMALLINT:
- case TSDB_DATA_TYPE_TINYINT: {
- maxOfThisSlot = pSeg->pBoundingEntries[j].iMaxVal;
- minOfNextSlot = next.iMinVal;
- break;
- };
- case TSDB_DATA_TYPE_FLOAT:
- case TSDB_DATA_TYPE_DOUBLE: {
- maxOfThisSlot = pSeg->pBoundingEntries[j].dMaxVal;
- minOfNextSlot = next.dMinVal;
- break;
- };
- case TSDB_DATA_TYPE_BIGINT: {
- maxOfThisSlot = (double)pSeg->pBoundingEntries[j].i64MaxVal;
- minOfNextSlot = (double)next.i64MinVal;
- break;
- }
+ // required value in current slot
+ if (num < (count + 1) && num + pSlot->info.size >= (count + 1)) {
+ if (pSlot->info.size + num == (count + 1)) {
+ /*
+ * now, we need to find the minimum value of the next slot for interpolating the percentile value
+ * j is the last slot of current segment, we need to get the first slot of the next segment.
+ */
+ MinMaxEntry next = getMinMaxEntryOfNextSlotWithData(pMemBucket, i);
+
+ double maxOfThisSlot = 0;
+ double minOfNextSlot = 0;
+ switch (pMemBucket->type) {
+ case TSDB_DATA_TYPE_INT:
+ case TSDB_DATA_TYPE_SMALLINT:
+ case TSDB_DATA_TYPE_TINYINT: {
+ maxOfThisSlot = pSlot->range.iMaxVal;
+ minOfNextSlot = next.iMinVal;
+ break;
};
-
- assert(minOfNextSlot > maxOfThisSlot);
-
- double val = (1 - fraction) * maxOfThisSlot + fraction * minOfNextSlot;
- return val;
- }
- if (pSeg->pBuffer[j]->numOfTotalElems <= pMemBucket->maxElemsCapacity) {
- // data in buffer and file are merged together to be processed.
- tFilePage *buffer = loadIntoBucketFromDisk(pMemBucket, i, j, pMemBucket->pOrderDesc);
- int32_t currentIdx = count - num;
-
- char * thisVal = buffer->data + pMemBucket->nElemSize * currentIdx;
- char * nextVal = thisVal + pMemBucket->nElemSize;
- double td = 1.0, nd = 1.0;
- switch (pMemBucket->dataType) {
- case TSDB_DATA_TYPE_SMALLINT: {
- td = *(int16_t *)thisVal;
- nd = *(int16_t *)nextVal;
- break;
- }
- case TSDB_DATA_TYPE_TINYINT: {
- td = *(int8_t *)thisVal;
- nd = *(int8_t *)nextVal;
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- td = *(int32_t *)thisVal;
- nd = *(int32_t *)nextVal;
- break;
- };
- case TSDB_DATA_TYPE_FLOAT: {
- // td = *(float *)thisVal;
- // nd = *(float *)nextVal;
- td = GET_FLOAT_VAL(thisVal);
- nd = GET_FLOAT_VAL(nextVal);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- // td = *(double *)thisVal;
- td = GET_DOUBLE_VAL(thisVal);
- // nd = *(double *)nextVal;
- nd = GET_DOUBLE_VAL(nextVal);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- td = (double)*(int64_t *)thisVal;
- nd = (double)*(int64_t *)nextVal;
- break;
- }
+ case TSDB_DATA_TYPE_FLOAT:
+ case TSDB_DATA_TYPE_DOUBLE: {
+ maxOfThisSlot = pSlot->range.dMaxVal;
+ minOfNextSlot = next.dMinVal;
+ break;
+ };
+ case TSDB_DATA_TYPE_BIGINT: {
+ maxOfThisSlot = (double)pSlot->range.i64MaxVal;
+ minOfNextSlot = (double)next.i64MinVal;
+ break;
}
- double val = (1 - fraction) * td + fraction * nd;
- taosTFree(buffer);
+ };
- return val;
- } else { // incur a second round bucket split
- if (isIdenticalData(pMemBucket, i, j)) {
- tExtMemBuffer *pMemBuffer = pSeg->pBuffer[j];
+ assert(minOfNextSlot > maxOfThisSlot);
- tFilePage *pPage = (tFilePage *)malloc(pMemBuffer->pageSize);
-
- char *thisVal = getFirstElemOfMemBuffer(pSeg, j, pPage);
-
- double finalResult = 0.0;
-
- switch (pMemBucket->dataType) {
- case TSDB_DATA_TYPE_SMALLINT: {
- finalResult = *(int16_t *)thisVal;
- break;
- }
- case TSDB_DATA_TYPE_TINYINT: {
- finalResult = *(int8_t *)thisVal;
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- finalResult = *(int32_t *)thisVal;
- break;
- };
- case TSDB_DATA_TYPE_FLOAT: {
- // finalResult = *(float *)thisVal;
- finalResult = GET_FLOAT_VAL(thisVal);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- // finalResult = *(double *)thisVal;
- finalResult = GET_DOUBLE_VAL(thisVal);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- finalResult = (double)(*(int64_t *)thisVal);
- break;
- }
- }
-
- free(pPage);
- return finalResult;
- }
-
- uDebug("MemBucket:%p,start second round bucketing", pMemBucket);
-
- if (pSeg->pBuffer[j]->numOfElemsInBuffer != 0) {
- uDebug("MemBucket:%p,flush %d pages to disk, clear status", pMemBucket, pSeg->pBuffer[j]->numOfInMemPages);
-
- pMemBucket->numOfAvailPages += pSeg->pBuffer[j]->numOfInMemPages;
- tExtMemBufferFlush(pSeg->pBuffer[j]);
- }
-
- tExtMemBuffer *pMemBuffer = pSeg->pBuffer[j];
- pSeg->pBuffer[j] = NULL;
-
- // release all
- for (int32_t tt = 0; tt < pMemBucket->numOfSegs; ++tt) {
- tMemBucketSegment *pSeg = &pMemBucket->pSegs[tt];
- for (int32_t ttx = 0; ttx < pSeg->numOfSlots; ++ttx) {
- if (pSeg->pBuffer && pSeg->pBuffer[ttx]) {
- pSeg->pBuffer[ttx] = destoryExtMemBuffer(pSeg->pBuffer[ttx]);
- }
- }
- }
-
- pMemBucket->nRange.i64MaxVal = pSeg->pBoundingEntries->i64MaxVal;
- pMemBucket->nRange.i64MinVal = pSeg->pBoundingEntries->i64MinVal;
- pMemBucket->numOfElems = 0;
-
- for (int32_t tt = 0; tt < pMemBucket->numOfSegs; ++tt) {
- tMemBucketSegment *pSeg = &pMemBucket->pSegs[tt];
- for (int32_t ttx = 0; ttx < pSeg->numOfSlots; ++ttx) {
- if (pSeg->pBoundingEntries) {
- resetBoundingBox(pSeg, pMemBucket->dataType);
- }
- }
- }
-
- tFilePage *pPage = (tFilePage *)malloc(pMemBuffer->pageSize);
-
- tFlushoutInfo *pFlushInfo = &pMemBuffer->fileMeta.flushoutData.pFlushoutInfo[0];
- assert(pFlushInfo->numOfPages == pMemBuffer->fileMeta.nFileSize);
-
- int32_t ret = fseek(pMemBuffer->file, pFlushInfo->startPageId * pMemBuffer->pageSize, SEEK_SET);
- UNUSED(ret);
-
- for (uint32_t jx = 0; jx < pFlushInfo->numOfPages; ++jx) {
- size_t sz = fread(pPage, pMemBuffer->pageSize, 1, pMemBuffer->file);
- if (sz != pMemBuffer->pageSize) {
- uError("MemBucket:%p, read tmp file %s failed", pMemBucket, pMemBuffer->path);
- } else {
- tMemBucketPut(pMemBucket, pPage->data, (int32_t)pPage->num);
- }
- }
-
- fclose(pMemBuffer->file);
- if (unlink(pMemBuffer->path) != 0) {
- uError("MemBucket:%p, remove tmp file %s failed", pMemBucket, pMemBuffer->path);
- }
- taosTFree(pMemBuffer);
- taosTFree(pPage);
-
- return getPercentileImpl(pMemBucket, count - num, fraction);
- }
- } else {
- num += pSeg->pBuffer[j]->numOfTotalElems;
+ double val = (1 - fraction) * maxOfThisSlot + fraction * minOfNextSlot;
+ return val;
}
+
+ if (pSlot->info.size <= pMemBucket->maxCapacity) {
+ // data in buffer and file are merged together to be processed.
+ tFilePage *buffer = loadDataFromFilePage(pMemBucket, i);
+ int32_t currentIdx = count - num;
+
+ char *thisVal = buffer->data + pMemBucket->bytes * currentIdx;
+ char *nextVal = thisVal + pMemBucket->bytes;
+
+ double td = 1.0, nd = 1.0;
+ switch (pMemBucket->type) {
+ case TSDB_DATA_TYPE_SMALLINT: {
+ td = *(int16_t *)thisVal;
+ nd = *(int16_t *)nextVal;
+ break;
+ }
+ case TSDB_DATA_TYPE_TINYINT: {
+ td = *(int8_t *)thisVal;
+ nd = *(int8_t *)nextVal;
+ break;
+ }
+ case TSDB_DATA_TYPE_INT: {
+ td = *(int32_t *)thisVal;
+ nd = *(int32_t *)nextVal;
+ break;
+ };
+ case TSDB_DATA_TYPE_FLOAT: {
+ td = GET_FLOAT_VAL(thisVal);
+ nd = GET_FLOAT_VAL(nextVal);
+ break;
+ }
+ case TSDB_DATA_TYPE_DOUBLE: {
+ td = GET_DOUBLE_VAL(thisVal);
+ nd = GET_DOUBLE_VAL(nextVal);
+ break;
+ }
+ case TSDB_DATA_TYPE_BIGINT: {
+ td = (double)*(int64_t *)thisVal;
+ nd = (double)*(int64_t *)nextVal;
+ break;
+ }
+ }
+
+ double val = (1 - fraction) * td + fraction * nd;
+ taosTFree(buffer);
+
+ return val;
+ } else { // incur a second round bucket split
+ if (isIdenticalData(pMemBucket, i)) {
+ return getIdenticalDataVal(pMemBucket, i);
+ }
+
+ // try next round
+ pMemBucket->times += 1;
+ uDebug("MemBucket:%p, start next round data bucketing, time:%d", pMemBucket, pMemBucket->times);
+
+ pMemBucket->range = pSlot->range;
+ pMemBucket->total = 0;
+
+ resetSlotInfo(pMemBucket);
+
+ int32_t groupId = getGroupId(pMemBucket->numOfSlots, i, pMemBucket->times - 1);
+ SIDList list = getDataBufPagesIdList(pMemBucket->pBuffer, groupId);
+ assert(list->size > 0);
+
+ for (int32_t f = 0; f < list->size; ++f) {
+ SPageInfo *pgInfo = *(SPageInfo **)taosArrayGet(list, f);
+ tFilePage *pg = getResBufPage(pMemBucket->pBuffer, pgInfo->pageId);
+
+ tMemBucketPut(pMemBucket, pg->data, (int32_t)pg->num);
+ releaseResBufPageInfo(pMemBucket->pBuffer, pgInfo);
+ }
+
+ return getPercentileImpl(pMemBucket, count - num, fraction);
+ }
+ } else {
+ num += pSlot->info.size;
}
}
+
return 0;
}
double getPercentile(tMemBucket *pMemBucket, double percent) {
- if (pMemBucket->numOfElems == 0) {
+ if (pMemBucket->total == 0) {
return 0.0;
}
- if (pMemBucket->numOfElems == 1) { // return the only element
+ // if only one elements exists, return it
+ if (pMemBucket->total == 1) {
return findOnlyResult(pMemBucket);
}
percent = fabs(percent);
- // validate the parameters
+ // find the min/max value, no need to scan all data in bucket
if (fabs(percent - 100.0) < DBL_EPSILON || (percent < DBL_EPSILON)) {
double minx = 0, maxx = 0;
- /*
- * find the min/max value, no need to scan all data in bucket
- */
findMaxMinValue(pMemBucket, &maxx, &minx);
return fabs(percent - 100) < DBL_EPSILON ? maxx : minx;
}
- double percentVal = (percent * (pMemBucket->numOfElems - 1)) / ((double)100.0);
+ double percentVal = (percent * (pMemBucket->total - 1)) / ((double)100.0);
int32_t orderIdx = (int32_t)percentVal;
// do put data by using buckets
@@ -934,19 +695,18 @@ double getPercentile(tMemBucket *pMemBucket, double percent) {
}
/*
- * check if data in one slot are all identical
- * only need to compare with the bounding box
+ * check if data in one slot are all identical only need to compare with the bounding box
*/
-bool isIdenticalData(tMemBucket *pMemBucket, int32_t segIdx, int32_t slotIdx) {
- tMemBucketSegment *pSeg = &pMemBucket->pSegs[segIdx];
+bool isIdenticalData(tMemBucket *pMemBucket, int32_t index) {
+ tMemBucketSlot *pSeg = &pMemBucket->pSlots[index];
- if (pMemBucket->dataType == TSDB_DATA_TYPE_INT || pMemBucket->dataType == TSDB_DATA_TYPE_BIGINT ||
- pMemBucket->dataType == TSDB_DATA_TYPE_SMALLINT || pMemBucket->dataType == TSDB_DATA_TYPE_TINYINT) {
- return pSeg->pBoundingEntries[slotIdx].i64MinVal == pSeg->pBoundingEntries[slotIdx].i64MaxVal;
+ if (pMemBucket->type == TSDB_DATA_TYPE_INT || pMemBucket->type == TSDB_DATA_TYPE_BIGINT ||
+ pMemBucket->type == TSDB_DATA_TYPE_SMALLINT || pMemBucket->type == TSDB_DATA_TYPE_TINYINT) {
+ return pSeg->range.i64MinVal == pSeg->range.i64MaxVal;
}
- if (pMemBucket->dataType == TSDB_DATA_TYPE_FLOAT || pMemBucket->dataType == TSDB_DATA_TYPE_DOUBLE) {
- return fabs(pSeg->pBoundingEntries[slotIdx].dMaxVal - pSeg->pBoundingEntries[slotIdx].dMinVal) < DBL_EPSILON;
+ if (pMemBucket->type == TSDB_DATA_TYPE_FLOAT || pMemBucket->type == TSDB_DATA_TYPE_DOUBLE) {
+ return fabs(pSeg->range.dMaxVal - pSeg->range.dMinVal) < DBL_EPSILON;
}
return false;
@@ -956,24 +716,24 @@ bool isIdenticalData(tMemBucket *pMemBucket, int32_t segIdx, int32_t slotIdx) {
* get the first element of one slot into memory.
* if no data of current slot in memory, load it from disk
*/
-char *getFirstElemOfMemBuffer(tMemBucketSegment *pSeg, int32_t slotIdx, tFilePage *pPage) {
- tExtMemBuffer *pMemBuffer = pSeg->pBuffer[slotIdx];
- char * thisVal = NULL;
+char *getFirstElemOfMemBuffer(tMemBucketSlot *pSeg, int32_t slotIdx, tFilePage *pPage) {
+// STSBuf *pMemBuffer = pSeg->pBuffer[slotIdx];
+ char *thisVal = NULL;
- if (pSeg->pBuffer[slotIdx]->numOfElemsInBuffer != 0) {
- thisVal = pSeg->pBuffer[slotIdx]->pHead->item.data;
- } else {
- /*
- * no data in memory, load one page into memory
- */
- tFlushoutInfo *pFlushInfo = &pMemBuffer->fileMeta.flushoutData.pFlushoutInfo[0];
- assert(pFlushInfo->numOfPages == pMemBuffer->fileMeta.nFileSize);
- int32_t ret;
- ret = fseek(pMemBuffer->file, pFlushInfo->startPageId * pMemBuffer->pageSize, SEEK_SET);
- UNUSED(ret);
- size_t sz = fread(pPage, pMemBuffer->pageSize, 1, pMemBuffer->file);
- UNUSED(sz);
- thisVal = pPage->data;
- }
+// if (pSeg->pBuffer[slotIdx]->numOfTotal != 0) {
+//// thisVal = pSeg->pBuffer[slotIdx]->pHead->item.data;
+// } else {
+// /*
+// * no data in memory, load one page into memory
+// */
+// tFlushoutInfo *pFlushInfo = &pMemBuffer->fileMeta.flushoutData.pFlushoutInfo[0];
+// assert(pFlushInfo->numOfPages == pMemBuffer->fileMeta.nFileSize);
+// int32_t ret;
+// ret = fseek(pMemBuffer->file, pFlushInfo->startPageId * pMemBuffer->pageSize, SEEK_SET);
+// UNUSED(ret);
+// size_t sz = fread(pPage, pMemBuffer->pageSize, 1, pMemBuffer->file);
+// UNUSED(sz);
+// thisVal = pPage->data;
+// }
return thisVal;
}
diff --git a/src/tsdb/src/tsdbRWHelper.c b/src/tsdb/src/tsdbRWHelper.c
index 4c6d75ec14..84f22918ec 100644
--- a/src/tsdb/src/tsdbRWHelper.c
+++ b/src/tsdb/src/tsdbRWHelper.c
@@ -1130,8 +1130,15 @@ static int tsdbCheckAndDecodeColumnData(SDataCol *pDataCol, char *content, int32
// Decode the data
if (comp) {
// // Need to decompress
- pDataCol->len = (*(tDataTypeDesc[pDataCol->type].decompFunc))(
- content, len - sizeof(TSCKSUM), numOfRows, pDataCol->pData, pDataCol->spaceSize, comp, buffer, bufferSize);
+ int tlen = (*(tDataTypeDesc[pDataCol->type].decompFunc))(content, len - sizeof(TSCKSUM), numOfRows, pDataCol->pData,
+ pDataCol->spaceSize, comp, buffer, bufferSize);
+ if (tlen <= 0) {
+ tsdbError("Failed to decompress column, file corrupted, len:%d comp:%d numOfRows:%d maxPoints:%d bufferSize:%d",
+ len, comp, numOfRows, maxPoints, bufferSize);
+ terrno = TSDB_CODE_TDB_FILE_CORRUPTED;
+ return -1;
+ }
+ pDataCol->len = tlen;
if (pDataCol->type == TSDB_DATA_TYPE_BINARY || pDataCol->type == TSDB_DATA_TYPE_NCHAR) {
dataColSetOffset(pDataCol, numOfRows);
}
diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c
index f8ff25ddab..ac7eba72b2 100644
--- a/src/tsdb/src/tsdbRead.c
+++ b/src/tsdb/src/tsdbRead.c
@@ -295,9 +295,16 @@ out_of_memory:
}
TsdbQueryHandleT tsdbQueryLastRow(TSDB_REPO_T *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupList, void* qinfo) {
- pCond->order = TSDB_ORDER_ASC;
pCond->twindow = changeTableGroupByLastrow(groupList);
+
+ // no qualified table
+ if (groupList->numOfTables == 0) {
+ return NULL;
+ }
+
STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, groupList, qinfo);
+
+ assert(pCond->order == TSDB_ORDER_ASC && pCond->twindow.skey <= pCond->twindow.ekey);
return pQueryHandle;
}
@@ -1981,8 +1988,9 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) {
STimeWindow changeTableGroupByLastrow(STableGroupInfo *groupList) {
STimeWindow window = {INT64_MAX, INT64_MIN};
+ int32_t totalNumOfTable = 0;
+
// NOTE: starts from the buffer in case of descending timestamp order check data blocks
- // todo consider the query time window, current last_row does not apply the query time window
size_t numOfGroups = taosArrayGetSize(groupList->pGroupList);
for(int32_t j = 0; j < numOfGroups; ++j) {
SArray* pGroup = taosArrayGetP(groupList->pGroupList, j);
@@ -1993,8 +2001,9 @@ STimeWindow changeTableGroupByLastrow(STableGroupInfo *groupList) {
size_t numOfTables = taosArrayGetSize(pGroup);
for(int32_t i = 0; i < numOfTables; ++i) {
STableKeyInfo* pKeyInfo = (STableKeyInfo*) taosArrayGet(pGroup, i);
- TSKEY lastKey = ((STable*)(pKeyInfo->pTable))->lastKey;
+ // if the lastKey equals to INT64_MIN, there is no data in this table
+ TSKEY lastKey = ((STable*)(pKeyInfo->pTable))->lastKey;
if (key < lastKey) {
key = lastKey;
@@ -2012,13 +2021,23 @@ STimeWindow changeTableGroupByLastrow(STableGroupInfo *groupList) {
}
}
+ // clear current group
+ taosArrayClear(pGroup);
+
// more than one table in each group, only one table left for each group
- if (numOfTables > 1) {
- taosArrayClear(pGroup);
+ if (keyInfo.pTable != NULL) {
+ totalNumOfTable++;
taosArrayPush(pGroup, &keyInfo);
}
}
+ // window does not being updated, so set the original
+ if (window.skey == INT64_MAX && window.ekey == INT64_MIN) {
+ window = TSWINDOW_INITIALIZER;
+ assert(totalNumOfTable == 0);
+ }
+
+ groupList->numOfTables = totalNumOfTable;
return window;
}
diff --git a/src/util/inc/tscompression.h b/src/util/inc/tscompression.h
index bd1ccf3ca5..37d1e7b590 100644
--- a/src/util/inc/tscompression.h
+++ b/src/util/inc/tscompression.h
@@ -65,7 +65,7 @@ static FORCE_INLINE int tsDecompressTinyint(const char *const input, int compres
if (algorithm == ONE_STAGE_COMP) {
return tsDecompressINTImp(input, nelements, output, TSDB_DATA_TYPE_TINYINT);
} else if (algorithm == TWO_STAGE_COMP) {
- tsDecompressStringImp(input, compressedSize, buffer, bufferSize);
+ if (tsDecompressStringImp(input, compressedSize, buffer, bufferSize) < 0) return -1;
return tsDecompressINTImp(buffer, nelements, output, TSDB_DATA_TYPE_TINYINT);
} else {
assert(0);
@@ -91,7 +91,7 @@ static FORCE_INLINE int tsDecompressSmallint(const char *const input, int compre
if (algorithm == ONE_STAGE_COMP) {
return tsDecompressINTImp(input, nelements, output, TSDB_DATA_TYPE_SMALLINT);
} else if (algorithm == TWO_STAGE_COMP) {
- tsDecompressStringImp(input, compressedSize, buffer, bufferSize);
+ if (tsDecompressStringImp(input, compressedSize, buffer, bufferSize) < 0) return -1;
return tsDecompressINTImp(buffer, nelements, output, TSDB_DATA_TYPE_SMALLINT);
} else {
assert(0);
@@ -117,7 +117,7 @@ static FORCE_INLINE int tsDecompressInt(const char *const input, int compressedS
if (algorithm == ONE_STAGE_COMP) {
return tsDecompressINTImp(input, nelements, output, TSDB_DATA_TYPE_INT);
} else if (algorithm == TWO_STAGE_COMP) {
- tsDecompressStringImp(input, compressedSize, buffer, bufferSize);
+ if (tsDecompressStringImp(input, compressedSize, buffer, bufferSize) < 0) return -1;
return tsDecompressINTImp(buffer, nelements, output, TSDB_DATA_TYPE_INT);
} else {
assert(0);
@@ -143,7 +143,7 @@ static FORCE_INLINE int tsDecompressBigint(const char *const input, int compress
if (algorithm == ONE_STAGE_COMP) {
return tsDecompressINTImp(input, nelements, output, TSDB_DATA_TYPE_BIGINT);
} else if (algorithm == TWO_STAGE_COMP) {
- tsDecompressStringImp(input, compressedSize, buffer, bufferSize);
+ if (tsDecompressStringImp(input, compressedSize, buffer, bufferSize) < 0) return -1;
return tsDecompressINTImp(buffer, nelements, output, TSDB_DATA_TYPE_BIGINT);
} else {
assert(0);
@@ -169,7 +169,7 @@ static FORCE_INLINE int tsDecompressBool(const char *const input, int compressed
if (algorithm == ONE_STAGE_COMP) {
return tsDecompressBoolImp(input, nelements, output);
} else if (algorithm == TWO_STAGE_COMP) {
- tsDecompressStringImp(input, compressedSize, buffer, bufferSize);
+ if (tsDecompressStringImp(input, compressedSize, buffer, bufferSize) < 0) return -1;
return tsDecompressBoolImp(buffer, nelements, output);
} else {
assert(0);
@@ -205,7 +205,7 @@ static FORCE_INLINE int tsDecompressFloat(const char *const input, int compresse
if (algorithm == ONE_STAGE_COMP) {
return tsDecompressFloatImp(input, nelements, output);
} else if (algorithm == TWO_STAGE_COMP) {
- tsDecompressStringImp(input, compressedSize, buffer, bufferSize);
+ if (tsDecompressStringImp(input, compressedSize, buffer, bufferSize) < 0) return -1;
return tsDecompressFloatImp(buffer, nelements, output);
} else {
assert(0);
@@ -231,7 +231,7 @@ static FORCE_INLINE int tsDecompressDouble(const char *const input, int compress
if (algorithm == ONE_STAGE_COMP) {
return tsDecompressDoubleImp(input, nelements, output);
} else if (algorithm == TWO_STAGE_COMP) {
- tsDecompressStringImp(input, compressedSize, buffer, bufferSize);
+ if (tsDecompressStringImp(input, compressedSize, buffer, bufferSize) < 0) return -1;
return tsDecompressDoubleImp(buffer, nelements, output);
} else {
assert(0);
@@ -257,7 +257,7 @@ static FORCE_INLINE int tsDecompressTimestamp(const char *const input, int compr
if (algorithm == ONE_STAGE_COMP) {
return tsDecompressTimestampImp(input, nelements, output);
} else if (algorithm == TWO_STAGE_COMP) {
- tsDecompressStringImp(input, compressedSize, buffer, bufferSize);
+ if (tsDecompressStringImp(input, compressedSize, buffer, bufferSize) < 0) return -1;
return tsDecompressTimestampImp(buffer, nelements, output);
} else {
assert(0);
diff --git a/src/util/src/hash.c b/src/util/src/hash.c
index 7d10545ce7..cc96f83f44 100644
--- a/src/util/src/hash.c
+++ b/src/util/src/hash.c
@@ -327,7 +327,6 @@ int32_t taosHashRemoveWithData(SHashObj *pHashObj, const void *key, size_t keyLe
// no data, return directly
if (pe->num == 0) {
- assert(pe->next == NULL);
__rd_unlock(&pHashObj->lock, pHashObj->type);
return -1;
}
@@ -377,6 +376,12 @@ int32_t taosHashRemoveWithData(SHashObj *pHashObj, const void *key, size_t keyLe
}
}
+ if (pe->num == 0) {
+ assert(pe->next == NULL);
+ } else {
+ assert(pe->next != NULL);
+ }
+
if (pHashObj->type == HASH_ENTRY_LOCK) {
taosWUnLockLatch(&pe->latch);
}
@@ -390,22 +395,8 @@ int32_t taosHashRemoveWithData(SHashObj *pHashObj, const void *key, size_t keyLe
if (pRes != NULL) {
atomic_sub_fetch_64(&pHashObj->size, 1);
FREE_HASH_NODE(pHashObj, pRes);
-
- if (pe->num == 0) {
- assert(pe->next == NULL);
- } else {
- assert(pe->next != NULL);
- }
-
return 0;
} else {
-
- if (pe->num == 0) {
- assert(pe->next == NULL);
- } else {
- assert(pe->next != NULL);
- }
-
return -1;
}
}
diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c
index dfa982b848..e5526647cb 100644
--- a/src/util/src/tcache.c
+++ b/src/util/src/tcache.c
@@ -266,7 +266,12 @@ void *taosCacheAcquireByKey(SCacheObj *pCacheObj, const void *key, size_t keyLen
}
SCacheDataNode **ptNode = (SCacheDataNode **)taosHashGetCB(pCacheObj->pHashTable, key, keyLen, incRefFn);
+ if (ptNode != NULL) {
+ assert ((*ptNode) != NULL && (int64_t) ((*ptNode)->data) != 0x40);
+ }
+
void* pData = (ptNode != NULL)? (*ptNode)->data:NULL;
+ assert((int64_t)pData != 0x40);
if (pData != NULL) {
atomic_add_fetch_32(&pCacheObj->statistics.hitCount, 1);
@@ -349,7 +354,7 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) {
char* d = pNode->data;
int32_t ref = T_REF_VAL_GET(pNode);
- uDebug("cache:%s, key:%p, %p is released, refcnt:%d", pCacheObj->name, key, d, ref - 1);
+ uDebug("cache:%s, key:%p, %p is released, refcnt:%d, intrash:%d", pCacheObj->name, key, d, ref - 1, inTrashCan);
/*
* If it is not referenced by other users, remove it immediately. Otherwise move this node to trashcan wait for all users
@@ -373,31 +378,43 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) {
} else {
// NOTE: remove it from hash in the first place, otherwise, the pNode may have been released by other thread
// when reaches here.
- int32_t ret = taosHashRemove(pCacheObj->pHashTable, pNode->key, pNode->keySize);
+ SCacheDataNode *p = NULL;
+ int32_t ret = taosHashRemoveWithData(pCacheObj->pHashTable, pNode->key, pNode->keySize, &p, sizeof(void *));
ref = T_REF_DEC(pNode);
// successfully remove from hash table, if failed, this node must have been move to trash already, do nothing.
// note that the remove operation can be executed only once.
if (ret == 0) {
- if (ref > 0) {
- assert(pNode->pTNodeHeader == NULL);
+ if (p != pNode) {
+ uDebug( "cache:%s, key:%p, successfully removed a new entry:%p, refcnt:%d, prev entry:%p has been removed by "
+ "others already", pCacheObj->name, pNode->key, p->data, T_REF_VAL_GET(p), pNode->data);
- __cache_wr_lock(pCacheObj);
- taosAddToTrash(pCacheObj, pNode);
- __cache_unlock(pCacheObj);
- } else { // ref == 0
- atomic_sub_fetch_64(&pCacheObj->totalSize, pNode->size);
+ assert(p->pTNodeHeader == NULL);
+ taosAddToTrash(pCacheObj, p);
+ } else {
+ uDebug("cache:%s, key:%p, %p successfully removed from hash table, refcnt:%d", pCacheObj->name, pNode->key,
+ pNode->data, ref);
+ if (ref > 0) {
+ assert(pNode->pTNodeHeader == NULL);
- int32_t size = (int32_t)taosHashGetSize(pCacheObj->pHashTable);
- uDebug("cache:%s, key:%p, %p is destroyed from cache, size:%dbytes, num:%d size:%" PRId64 "bytes",
- pCacheObj->name, pNode->key, pNode->data, pNode->size, size, pCacheObj->totalSize);
+ taosAddToTrash(pCacheObj, pNode);
+ } else { // ref == 0
+ atomic_sub_fetch_64(&pCacheObj->totalSize, pNode->size);
- if (pCacheObj->freeFp) {
- pCacheObj->freeFp(pNode->data);
+ int32_t size = (int32_t)taosHashGetSize(pCacheObj->pHashTable);
+ uDebug("cache:%s, key:%p, %p is destroyed from cache, size:%dbytes, num:%d size:%" PRId64 "bytes",
+ pCacheObj->name, pNode->key, pNode->data, pNode->size, size, pCacheObj->totalSize);
+
+ if (pCacheObj->freeFp) {
+ pCacheObj->freeFp(pNode->data);
+ }
+
+ free(pNode);
}
-
- free(pNode);
}
+ } else {
+ uDebug("cache:%s, key:%p, %p has been removed from hash table by other thread already, refcnt:%d",
+ pCacheObj->name, pNode->key, pNode->data, ref);
}
}
@@ -485,20 +502,21 @@ void taosAddToTrash(SCacheObj *pCacheObj, SCacheDataNode *pNode) {
STrashElem *pElem = calloc(1, sizeof(STrashElem));
pElem->pData = pNode;
+ pElem->prev = NULL;
+ pNode->inTrashCan = true;
+ pNode->pTNodeHeader = pElem;
+ __cache_wr_lock(pCacheObj);
pElem->next = pCacheObj->pTrash;
if (pCacheObj->pTrash) {
pCacheObj->pTrash->prev = pElem;
}
- pElem->prev = NULL;
pCacheObj->pTrash = pElem;
-
- pNode->inTrashCan = true;
- pNode->pTNodeHeader = pElem;
pCacheObj->numOfElemsInTrash++;
+ __cache_unlock(pCacheObj);
- uDebug("%s key:%p, %p move to trash, numOfElem in trash:%d", pCacheObj->name, pNode->key, pNode->data,
+ uDebug("cache:%s key:%p, %p move to trash, numOfElem in trash:%d", pCacheObj->name, pNode->key, pNode->data,
pCacheObj->numOfElemsInTrash);
}
diff --git a/src/util/src/tcompression.c b/src/util/src/tcompression.c
index 8c5828d32d..1a5d28625f 100644
--- a/src/util/src/tcompression.c
+++ b/src/util/src/tcompression.c
@@ -47,10 +47,11 @@
*
*/
-#include "os.h"
#include "lz4.h"
-#include "tscompression.h"
+#include "os.h"
#include "taosdef.h"
+#include "tscompression.h"
+#include "tulog.h"
static const int TEST_NUMBER = 1;
#define is_bigendian() ((*(char *)&TEST_NUMBER) == 0)
@@ -88,7 +89,7 @@ int tsCompressINTImp(const char *const input, const int nelements, char *const o
word_length = CHAR_BYTES;
break;
default:
- perror("Wrong integer types.\n");
+ uError("Invalid compress integer type:%d", type);
return -1;
}
@@ -209,7 +210,7 @@ int tsDecompressINTImp(const char *const input, const int nelements, char *const
word_length = CHAR_BYTES;
break;
default:
- perror("Wrong integer types.\n");
+ uError("Invalid decompress integer type:%d", type);
return -1;
}
@@ -307,7 +308,7 @@ int tsCompressBoolImp(const char *const input, const int nelements, char *const
/* t = (~((( uint8_t)1) << (7-i%BITS_PER_BYTE))); */
output[pos] |= t;
} else {
- perror("Wrong bool value.\n");
+ uError("Invalid compress bool value:%d", output[pos]);
return -1;
}
}
@@ -363,7 +364,7 @@ int tsCompressBoolRLEImp(const char *const input, const int nelements, char *con
} else if (num == 0) {
output[_pos++] = (counter << 1) | INT8MASK(0);
} else {
- perror("Wrong bool value!\n");
+ uError("Invalid compress bool value:%d", output[_pos]);
return -1;
}
}
@@ -413,9 +414,7 @@ int tsDecompressStringImp(const char *const input, int compressedSize, char *con
/* It is compressed by LZ4 algorithm */
const int decompressed_size = LZ4_decompress_safe(input + 1, output, compressedSize - 1, outputSize);
if (decompressed_size < 0) {
- char msg[128] = {0};
- sprintf(msg, "decomp_size:%d, Error decompress in LZ4 algorithm!\n", decompressed_size);
- perror(msg);
+ uError("Failed to decompress string with LZ4 algorithm, decompressed size:%d", decompressed_size);
return -1;
}
@@ -425,7 +424,7 @@ int tsDecompressStringImp(const char *const input, int compressedSize, char *con
memcpy(output, input + 1, compressedSize - 1);
return compressedSize - 1;
} else {
- perror("Wrong compressed string indicator!\n");
+ uError("Invalid decompress string indicator:%d", input[0]);
return -1;
}
}
diff --git a/src/util/tests/CMakeLists.txt b/src/util/tests/CMakeLists.txt
index 09523cbfb4..8687a8005d 100644
--- a/src/util/tests/CMakeLists.txt
+++ b/src/util/tests/CMakeLists.txt
@@ -10,6 +10,6 @@ IF (HEADER_GTEST_INCLUDE_DIR AND LIB_GTEST_STATIC_DIR)
INCLUDE_DIRECTORIES(${HEADER_GTEST_INCLUDE_DIR})
AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST)
- ADD_EXECUTABLE(utilTest ./cacheTest.cpp ./hashTest.cpp)
+ ADD_EXECUTABLE(utilTest ${SOURCE_LIST})
TARGET_LINK_LIBRARIES(utilTest tutil common osdetail gtest pthread gcov)
ENDIF()
diff --git a/src/util/tests/cacheTest.cpp b/src/util/tests/cacheTest.cpp
index e0debd53f4..51221e0b35 100644
--- a/src/util/tests/cacheTest.cpp
+++ b/src/util/tests/cacheTest.cpp
@@ -1,16 +1,9 @@
#include "os.h"
#include
#include
-#include
#include "taos.h"
-//#include "tsdb.h"
-
-//#include "testCommon.h"
-#include "tstoken.h"
-#include "tutil.h"
#include "tcache.h"
-#include "ttimer.h"
namespace {
int32_t tsMaxMgmtConnections = 10000;
diff --git a/tests/examples/lua/lua_connector.c b/tests/examples/lua/lua_connector.c
index f4065bb274..143f16a799 100644
--- a/tests/examples/lua/lua_connector.c
+++ b/tests/examples/lua/lua_connector.c
@@ -58,8 +58,10 @@ static int l_query(lua_State *L){
int table_index = lua_gettop(L);
// printf("receive command:%s\r\n",s);
- if(taos_query(taos, s)!=0){
- printf("failed, reason:%s\n", taos_errstr(taos));
+ result = taos_query(taos,s);
+ int32_t code = taos_errno(result);
+ if( code != 0){
+ printf("failed, reason:%s\n", taos_errstr(result));
lua_pushnumber(L, -1);
lua_setfield(L, table_index, "code");
lua_pushstring(L, taos_errstr(taos));
@@ -69,24 +71,13 @@ static int l_query(lua_State *L){
}else{
//printf("success to query.\n");
- result = taos_use_result(taos);
-
- if (result == NULL) {
- printf("failed to get result, reason:%s\n", taos_errstr(taos));
- lua_pushnumber(L, -2);
- lua_setfield(L, table_index, "code");
- lua_pushstring(L, taos_errstr(taos));
- lua_setfield(L, table_index, "error");
- return 1;
- }
-
TAOS_ROW row;
int rows = 0;
- int num_fields = taos_field_count(taos);
+ int num_fields = taos_field_count(result);
TAOS_FIELD *fields = taos_fetch_fields(result);
char temp[256];
- int affectRows = taos_affected_rows(taos);
+ int affectRows = taos_affected_rows(result);
// printf(" affect rows:%d\r\n", affectRows);
lua_pushnumber(L, 0);
lua_setfield(L, table_index, "code");
@@ -155,15 +146,13 @@ static int l_query(lua_State *L){
}
void stream_cb(void *param, TAOS_RES *result, TAOS_ROW row){
-
struct cb_param* p = (struct cb_param*) param;
TAOS_FIELD *fields = taos_fetch_fields(result);
int numFields = taos_num_fields(result);
+ printf("\nnumfields:%d\n", numFields);
printf("\n\r-----------------------------------------------------------------------------------\n");
- // printf("r:%d, L:%d\n",p->callback, p->state);
-
lua_State *L = p->state;
lua_rawgeti(L, LUA_REGISTRYINDEX, p->callback);
diff --git a/tests/examples/lua/test.lua b/tests/examples/lua/test.lua
index 38ae1c82f2..4d5f9fe7d3 100644
--- a/tests/examples/lua/test.lua
+++ b/tests/examples/lua/test.lua
@@ -15,7 +15,7 @@ else
conn = res.conn
end
-local res = driver.query(conn,"drop database demo")
+local res = driver.query(conn,"drop database if exists demo")
res = driver.query(conn,"create database demo")
if res.code ~=0 then
@@ -106,7 +106,7 @@ end
--From now on we begin continous query in an definite (infinite if you want) loop.
local loop_index = 0
-while loop_index < 20 do
+while loop_index < 10 do
local t = os.time()*1000
local v = loop_index
res = driver.query(conn,string.format("INSERT INTO therm1 VALUES (%d, %d)",t,v))
diff --git a/tests/pytest/crash_gen.py b/tests/pytest/crash_gen.py
index 7d3eb959c0..c0a8fd1f00 100755
--- a/tests/pytest/crash_gen.py
+++ b/tests/pytest/crash_gen.py
@@ -161,6 +161,21 @@ class WorkerThread:
logger.debug("[TRD] Thread Coordinator not running any more, worker thread now stopping...")
break
+ # Before we fetch the task and run it, let's ensure we properly "use" the database
+ try:
+ if (gConfig.per_thread_db_connection): # most likely TRUE
+ if not self._dbConn.isOpen: # might have been closed during server auto-restart
+ self._dbConn.open()
+ self.useDb() # might encounter exceptions. TODO: catch
+ except taos.error.ProgrammingError as err:
+ errno = Helper.convertErrno(err.errno)
+ if errno in [0x383, 0x386, 0x00B, 0x014] : # invalid database, dropping, Unable to establish connection, Database not ready
+ # ignore
+ dummy = 0
+ else:
+ print("\nCaught programming error. errno=0x{:X}, msg={} ".format(errno, err.msg))
+ raise
+
# Fetch a task from the Thread Coordinator
logger.debug( "[TRD] Worker thread [{}] about to fetch task".format(self._tid))
task = tc.fetchTask()
@@ -324,10 +339,12 @@ class ThreadCoordinator:
logger.debug("[STT] transition ended")
# Due to limitation (or maybe not) of the Python library,
# we cannot share connections across threads
- if sm.hasDatabase():
- for t in self._pool.threadList:
- logger.debug("[DB] use db for all worker threads")
- t.useDb()
+ # Here we are in main thread, we cannot operate the connections created in workers
+ # Moving below to task loop
+ # if sm.hasDatabase():
+ # for t in self._pool.threadList:
+ # logger.debug("[DB] use db for all worker threads")
+ # t.useDb()
# t.execSql("use db") # main thread executing "use
# db" on behalf of every worker thread
except taos.error.ProgrammingError as err:
@@ -387,7 +404,7 @@ class ThreadCoordinator:
transitionFailed = self._doTransition() # To start, we end step -1 first
except taos.error.ProgrammingError as err:
transitionFailed = True
- errno2 = err.errno if (err.errno > 0) else 0x80000000 + err.errno # correct error scheme
+ errno2 = Helper.convertErrno(err.errno) # correct error scheme
errMsg = "Transition failed: errno=0x{:X}, msg: {}".format(errno2, err)
logger.info(errMsg)
self._execStats.registerFailure(errMsg)
@@ -468,6 +485,10 @@ class ThreadCoordinator:
# We define a class to run a number of threads in locking steps.
+class Helper:
+ @classmethod
+ def convertErrno(cls, errno):
+ return errno if (errno > 0) else 0x80000000 + errno
class ThreadPool:
def __init__(self, numThreads, maxSteps):
@@ -613,8 +634,7 @@ class DbConn:
def resetDb(self): # reset the whole database, etc.
if (not self.isOpen):
- raise RuntimeError(
- "Cannot reset database until connection is open")
+ raise RuntimeError("Cannot reset database until connection is open")
# self._tdSql.prepare() # Recreate database, etc.
self.execute('drop database if exists db')
@@ -673,7 +693,7 @@ class DbConnRest(DbConn):
def __init__(self):
super().__init__()
self._type = self.TYPE_REST
- self._url = "http://localhost:6020/rest/sql" # fixed for now
+ self._url = "http://localhost:6041/rest/sql" # fixed for now
self._result = None
def openByType(self): # Open connection
@@ -681,8 +701,7 @@ class DbConnRest(DbConn):
def close(self):
if (not self.isOpen):
- raise RuntimeError(
- "Cannot clean up database until connection is open")
+ raise RuntimeError("Cannot clean up database until connection is open")
# Do nothing for REST
logger.debug("[DB] REST Database connection closed")
self.isOpen = False
@@ -747,27 +766,32 @@ class DbConnRest(DbConn):
class MyTDSql:
- def __init__(self):
+ def __init__(self, hostAddr, cfgPath):
+ # Make the DB connection
+ self._conn = taos.connect(host=hostAddr, config=cfgPath)
+ self._cursor = self._conn.cursor()
+
self.queryRows = 0
self.queryCols = 0
self.affectedRows = 0
- def init(self, cursor, log=True):
- self.cursor = cursor
+ # def init(self, cursor, log=True):
+ # self.cursor = cursor
# if (log):
# caller = inspect.getframeinfo(inspect.stack()[1][0])
# self.cursor.log(caller.filename + ".sql")
def close(self):
- self.cursor.close()
+ self._conn.close() # TODO: very important, cursor close does NOT close DB connection!
+ self._cursor.close()
def query(self, sql):
self.sql = sql
try:
- self.cursor.execute(sql)
- self.queryResult = self.cursor.fetchall()
+ self._cursor.execute(sql)
+ self.queryResult = self._cursor.fetchall()
self.queryRows = len(self.queryResult)
- self.queryCols = len(self.cursor.description)
+ self.queryCols = len(self._cursor.description)
except Exception as e:
# caller = inspect.getframeinfo(inspect.stack()[1][0])
# args = (caller.filename, caller.lineno, sql, repr(e))
@@ -778,7 +802,7 @@ class MyTDSql:
def execute(self, sql):
self.sql = sql
try:
- self.affectedRows = self.cursor.execute(sql)
+ self.affectedRows = self._cursor.execute(sql)
except Exception as e:
# caller = inspect.getframeinfo(inspect.stack()[1][0])
# args = (caller.filename, caller.lineno, sql, repr(e))
@@ -791,13 +815,13 @@ class DbConnNative(DbConn):
# Class variables
_lock = threading.Lock()
_connInfoDisplayed = False
+ totalConnections = 0 # Not private
def __init__(self):
super().__init__()
self._type = self.TYPE_NATIVE
self._conn = None
- self._cursor = None
-
+ # self._cursor = None
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
@@ -814,7 +838,8 @@ class DbConnNative(DbConn):
buildPath = root[:len(root) - len("/build/bin")]
break
if buildPath == None:
- raise RuntimeError("Failed to determine buildPath, selfPath={}".format(selfPath))
+ raise RuntimeError("Failed to determine buildPath, selfPath={}, projPath={}"
+ .format(selfPath, projPath))
return buildPath
@@ -822,33 +847,40 @@ class DbConnNative(DbConn):
cfgPath = self.getBuildPath() + "/test/cfg"
hostAddr = "127.0.0.1"
- with self._lock: # force single threading for opening DB connections
- if not self._connInfoDisplayed:
- self.__class__._connInfoDisplayed = True # updating CLASS variable
- logger.info("Initiating TAOS native connection to {}, using config at {}".format(hostAddr, cfgPath))
-
- self._conn = taos.connect(host=hostAddr, config=cfgPath) # TODO: make configurable
- self._cursor = self._conn.cursor()
+ cls = self.__class__ # Get the class, to access class variables
+ with cls._lock: # force single threading for opening DB connections. # TODO: whaaat??!!!
+ if not cls._connInfoDisplayed:
+ cls._connInfoDisplayed = True # updating CLASS variable
+ logger.info("Initiating TAOS native connection to {}, using config at {}".format(hostAddr, cfgPath))
+ # Make the connection
+ # self._conn = taos.connect(host=hostAddr, config=cfgPath) # TODO: make configurable
+ # self._cursor = self._conn.cursor()
+ # Record the count in the class
+ self._tdSql = MyTDSql(hostAddr, cfgPath) # making DB connection
+ cls.totalConnections += 1
- self._cursor.execute('reset query cache')
+ self._tdSql.execute('reset query cache')
# self._cursor.execute('use db') # do this at the beginning of every
# Open connection
- self._tdSql = MyTDSql()
- self._tdSql.init(self._cursor)
-
+ # self._tdSql = MyTDSql()
+ # self._tdSql.init(self._cursor)
+
def close(self):
if (not self.isOpen):
- raise RuntimeError(
- "Cannot clean up database until connection is open")
+ raise RuntimeError("Cannot clean up database until connection is open")
self._tdSql.close()
+ # Decrement the class wide counter
+ cls = self.__class__ # Get the class, to access class variables
+ with cls._lock:
+ cls.totalConnections -= 1
+
logger.debug("[DB] Database connection closed")
self.isOpen = False
def execute(self, sql):
if (not self.isOpen):
- raise RuntimeError(
- "Cannot execute database commands until connection is open")
+ raise RuntimeError("Cannot execute database commands until connection is open")
logger.debug("[SQL] Executing SQL: {}".format(sql))
self._lastSql = sql
nRows = self._tdSql.execute(sql)
@@ -1274,6 +1306,7 @@ class DbManager():
"Cannot establish DB connection, please re-run script without parameter, and follow the instructions.")
sys.exit(2)
else:
+ print("Failed to connect to DB, errno = {}, msg: {}".format(Helper.convertErrno(err.errno), err.msg))
raise
except BaseException:
print("[=] Unexpected exception")
@@ -1528,7 +1561,7 @@ class Task():
try:
self._executeInternal(te, wt) # TODO: no return value?
except taos.error.ProgrammingError as err:
- errno2 = err.errno if (err.errno > 0) else 0x80000000 + err.errno # correct error scheme
+ errno2 = Helper.convertErrno(err.errno)
if (gConfig.continue_on_exception): # user choose to continue
self.logDebug("[=] Continue after TAOS exception: errno=0x{:X}, msg: {}, SQL: {}".format(
errno2, err, wt.getDbConn().getLastSql()))
@@ -1678,9 +1711,8 @@ class ExecutionStats:
logger.info(
"| Total Elapsed Time (from wall clock): {:.3f} seconds".format(
self._elapsedTime))
- logger.info(
- "| Top numbers written: {}".format(
- TaskExecutor.getBoundedList()))
+ logger.info("| Top numbers written: {}".format(TaskExecutor.getBoundedList()))
+ logger.info("| Total Number of Active DB Native Connections: {}".format(DbConnNative.totalConnections))
logger.info(
"----------------------------------------------------------------------")
@@ -1789,7 +1821,7 @@ class TdSuperTable:
try:
dbc.query("select TBNAME from db.{}".format(self._stName)) # TODO: analyze result set later
except taos.error.ProgrammingError as err:
- errno2 = err.errno if (err.errno > 0) else 0x80000000 + err.errno
+ errno2 = Helper.convertErrno(err.errno)
logger.debug("[=] Failed to get tables from super table: errno=0x{:X}, msg: {}".format(errno2, err))
raise
@@ -1879,10 +1911,19 @@ class TaskReadData(StateTransitionTask):
# 'twa(speed)', # TODO: this one REQUIRES a where statement, not reasonable
'sum(speed)',
'stddev(speed)',
+ # SELECTOR functions
'min(speed)',
'max(speed)',
'first(speed)',
- 'last(speed)']) # TODO: add more from 'top'
+ 'last(speed)',
+ # 'top(speed)', # TODO: not supported?
+ # 'bottom(speed)', # TODO: not supported?
+ # 'percentile(speed, 10)', # TODO: TD-1316
+ 'last_row(speed)',
+ # Transformation Functions
+ # 'diff(speed)', # TODO: no supported?!
+ 'spread(speed)'
+ ]) # TODO: add more from 'top'
filterExpr = Dice.choice([ # TODO: add various kind of WHERE conditions
None
])
@@ -1891,7 +1932,7 @@ class TaskReadData(StateTransitionTask):
if aggExpr not in ['stddev(speed)']: #TODO: STDDEV not valid for super tables?!
dbc.execute("select {} from db.{}".format(aggExpr, sTable.getName()))
except taos.error.ProgrammingError as err:
- errno2 = err.errno if (err.errno > 0) else 0x80000000 + err.errno
+ errno2 = Helper.convertErrno(err.errno)
logger.debug("[=] Read Failure: errno=0x{:X}, msg: {}, SQL: {}".format(errno2, err, dbc.getLastSql()))
raise
@@ -1920,9 +1961,8 @@ class TaskDropSuperTable(StateTransitionTask):
self.execWtSql(wt, "drop table {}".format(
regTableName)) # nRows always 0, like MySQL
except taos.error.ProgrammingError as err:
- # correcting for strange error number scheme
- errno2 = err.errno if (
- err.errno > 0) else 0x80000000 + err.errno
+ # correcting for strange error number scheme
+ errno2 = Helper.convertErrno(err.errno)
if (errno2 in [0x362]): # mnode invalid table name
isSuccess = False
logger.debug(
@@ -2320,7 +2360,7 @@ class ServiceManagerThread:
self._thread2.start()
# wait for service to start
- for i in range(0, 10):
+ for i in range(0, 100):
time.sleep(1.0)
# self.procIpcBatch() # don't pump message during start up
print("_zz_", end="", flush=True)
@@ -2328,7 +2368,7 @@ class ServiceManagerThread:
logger.info("[] TDengine service READY to process requests")
return # now we've started
# TODO: handle this better?
- self.procIpcBatch(20, True) # display output before cronking out, trim to last 20 msgs, force output
+ self.procIpcBatch(100, True) # display output before cronking out, trim to last 20 msgs, force output
raise RuntimeError("TDengine service did not start successfully")
def stop(self):
@@ -2429,7 +2469,11 @@ class ServiceManagerThread:
for line in iter(out.readline, b''):
# print("Finished reading a line: {}".format(line))
# print("Adding item to queue...")
- line = line.decode("utf-8").rstrip()
+ try:
+ line = line.decode("utf-8").rstrip()
+ except UnicodeError:
+ print("\nNon-UTF8 server output: {}\n".format(line))
+
# This might block, and then causing "out" buffer to block
queue.put(line)
self._printProgress("_i")
@@ -2455,7 +2499,7 @@ class ServiceManagerThread:
def svcErrorReader(self, err: IO, queue):
for line in iter(err.readline, b''):
- print("\nTD Svc STDERR: {}".format(line))
+ print("\nTDengine Service (taosd) ERROR (from stderr): {}".format(line))
class TdeSubProcess:
@@ -2734,7 +2778,7 @@ class MainExec:
try:
ret = self._clientMgr.run(self._svcMgr) # stop TAOS service inside
except requests.exceptions.ConnectionError as err:
- logger.warning("Failed to open REST connection to DB")
+ logger.warning("Failed to open REST connection to DB: {}".format(err.getMessage()))
# don't raise
return ret
diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh
index fd5aa4ecf0..d600a003b0 100755
--- a/tests/pytest/fulltest.sh
+++ b/tests/pytest/fulltest.sh
@@ -12,7 +12,7 @@ python3 ./test.py -f insert/tinyint.py
python3 ./test.py -f insert/date.py
python3 ./test.py -f insert/binary.py
python3 ./test.py -f insert/nchar.py
-python3 ./test.py -f insert/nchar-boundary.py
+#python3 ./test.py -f insert/nchar-boundary.py
python3 ./test.py -f insert/nchar-unicode.py
python3 ./test.py -f insert/multi.py
python3 ./test.py -f insert/randomNullCommit.py
@@ -20,7 +20,7 @@ python3 ./test.py -f insert/randomNullCommit.py
python3 ./test.py -f table/column_name.py
python3 ./test.py -f table/column_num.py
python3 ./test.py -f table/db_table.py
-python3 ./test.py -f table/tablename-boundary.py
+#python3 ./test.py -f table/tablename-boundary.py
# tag
python3 ./test.py -f tag_lite/filter.py
@@ -52,7 +52,7 @@ python3 ./test.py -f tag_lite/set.py
python3 ./test.py -f tag_lite/smallint.py
python3 ./test.py -f tag_lite/tinyint.py
-python3 ./test.py -f dbmgmt/database-name-boundary.py
+#python3 ./test.py -f dbmgmt/database-name-boundary.py
python3 ./test.py -f import_merge/importBlock1HO.py
python3 ./test.py -f import_merge/importBlock1HPO.py
@@ -145,6 +145,8 @@ python3 ./test.py -f query/queryJoin.py
python3 ./test.py -f query/select_last_crash.py
python3 ./test.py -f query/queryNullValueTest.py
python3 ./test.py -f query/queryInsertValue.py
+python3 ./test.py -f query/queryConnection.py
+python3 ./test.py -f query/natualInterval.py
#stream
python3 ./test.py -f stream/metric_1.py
@@ -182,7 +184,7 @@ python3 ./test.py -f functions/function_spread.py
python3 ./test.py -f functions/function_stddev.py
python3 ./test.py -f functions/function_sum.py
python3 ./test.py -f functions/function_top.py
-python3 ./test.py -f functions/function_twa.py
+#python3 ./test.py -f functions/function_twa.py
# tools
python3 test.py -f tools/taosdemo.py
diff --git a/tests/pytest/insert/nchar.py b/tests/pytest/insert/nchar.py
index b8e365f143..3319aa3c56 100644
--- a/tests/pytest/insert/nchar.py
+++ b/tests/pytest/insert/nchar.py
@@ -35,6 +35,8 @@ class TDTestCase:
tdSql.checkRows(2)
tdSql.checkData(1, 1, '涛思数据')
+ tdSql.error("insert into tb values (now, 'taosdata001')")
+
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
diff --git a/tests/pytest/query/filterOtherTypes.py b/tests/pytest/query/filterOtherTypes.py
index 5033ffdb48..85d5a67bef 100644
--- a/tests/pytest/query/filterOtherTypes.py
+++ b/tests/pytest/query/filterOtherTypes.py
@@ -365,6 +365,54 @@ class TDTestCase:
# _ for binary type on tag case 5
tdSql.query("select * from st where tagcol3 like '_据'")
tdSql.checkRows(0)
+
+ # test case for https://jira.taosdata.com:18080/browse/TD-857
+ tdSql.execute("create database test")
+ tdSql.execute("use test")
+ tdSql.execute("create table meters(ts timestamp, voltage int) tags(tag1 binary(20))")
+ tdSql.execute("create table t1 using meters tags('beijing')")
+ tdSql.execute("create table t2 using meters tags('nanjing')")
+
+ tdSql.execute("insert into t1 values(1538548685000, 1) (1538548685001, 2) (1538548685002, 3)")
+ tdSql.execute("insert into t2 values(1538548685000, 4) (1538548685001, 5) (1538548685002, 6)")
+
+ tdSql.query("select * from t1 where tag1 like '%g'")
+ tdSql.checkRows(3)
+
+ tdSql.query("select * from t2 where tag1 like '%g'")
+ tdSql.checkRows(3)
+
+ tdSql.query("select * from meters where tag1 like '%g'")
+ tdSql.checkRows(6)
+
+ tdSql.execute("create table meters1(ts timestamp, voltage int) tags(tag1 nchar(20))")
+ tdSql.execute("create table t3 using meters1 tags('北京')")
+ tdSql.execute("create table t4 using meters1 tags('南京')")
+ tdSql.execute("create table t5 using meters1 tags('beijing')")
+ tdSql.execute("create table t6 using meters1 tags('nanjing')")
+
+ tdSql.execute("insert into t3 values(1538548685000, 1) (1538548685001, 2) (1538548685002, 3)")
+ tdSql.execute("insert into t4 values(1538548685000, 4) (1538548685001, 5) (1538548685002, 6)")
+ tdSql.execute("insert into t5 values(1538548685000, 1) (1538548685001, 2) (1538548685002, 3)")
+ tdSql.execute("insert into t6 values(1538548685000, 1) (1538548685001, 2) (1538548685002, 3)")
+
+ tdSql.query("select * from t3 where tag1 like '%京'")
+ tdSql.checkRows(3)
+
+ tdSql.query("select * from t4 where tag1 like '%京'")
+ tdSql.checkRows(3)
+
+ tdSql.query("select * from meters1 where tag1 like '%京'")
+ tdSql.checkRows(6)
+
+ tdSql.query("select * from t5 where tag1 like '%g'")
+ tdSql.checkRows(3)
+
+ tdSql.query("select * from t6 where tag1 like '%g'")
+ tdSql.checkRows(3)
+
+ tdSql.query("select * from meters1 where tag1 like '%g'")
+ tdSql.checkRows(6)
def stop(self):
tdSql.close()
diff --git a/tests/pytest/query/natualInterval.py b/tests/pytest/query/natualInterval.py
new file mode 100644
index 0000000000..1ed91e1c68
--- /dev/null
+++ b/tests/pytest/query/natualInterval.py
@@ -0,0 +1,170 @@
+###################################################################
+# Copyright (c) 2020 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import *
+from util.cases import *
+from util.sql import *
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+
+ def singleTable(self):
+ tdSql.execute("create table car(ts timestamp, s int)")
+ tdSql.execute("insert into car values('2019-01-01 00:00:00', 1)")
+ tdSql.execute("insert into car values('2019-05-13 12:00:00', 1)")
+ tdSql.execute("insert into car values('2019-12-31 23:59:59', 1)")
+ tdSql.execute("insert into car values('2020-01-01 12:00:00', 1)")
+ tdSql.execute("insert into car values('2020-01-02 12:00:00', 1)")
+ tdSql.execute("insert into car values('2020-01-03 12:00:00', 1)")
+ tdSql.execute("insert into car values('2020-01-04 12:00:00', 1)")
+ tdSql.execute("insert into car values('2020-01-05 12:00:00', 1)")
+ tdSql.execute("insert into car values('2020-01-31 12:00:00', 1)")
+ tdSql.execute("insert into car values('2020-02-01 12:00:00', 1)")
+ tdSql.execute("insert into car values('2020-02-02 12:00:00', 1)")
+ tdSql.execute("insert into car values('2020-02-29 12:00:00', 1)")
+ tdSql.execute("insert into car values('2020-03-01 12:00:00', 1)")
+ tdSql.execute("insert into car values('2020-03-02 12:00:00', 1)")
+ tdSql.execute("insert into car values('2020-03-15 12:00:00', 1)")
+ tdSql.execute("insert into car values('2020-03-31 12:00:00', 1)")
+ tdSql.execute("insert into car values('2020-05-01 12:00:00', 1)")
+
+ tdSql.query("select count(*) from car interval(1n)")
+ tdSql.checkData(0, 1, 1)
+ tdSql.checkData(1, 1, 1)
+ tdSql.checkData(2, 1, 1)
+ tdSql.checkData(3, 1, 6)
+ tdSql.checkData(4, 1, 3)
+ tdSql.checkData(5, 1, 4)
+ tdSql.checkData(6, 1, 1)
+
+ tdSql.query("select count(*) from car interval(1n) order by ts desc")
+ tdSql.checkData(6, 1, 1)
+ tdSql.checkData(5, 1, 1)
+ tdSql.checkData(4, 1, 1)
+ tdSql.checkData(3, 1, 6)
+ tdSql.checkData(2, 1, 3)
+ tdSql.checkData(1, 1, 4)
+ tdSql.checkData(0, 1, 1)
+
+ tdSql.query("select count(*) from car interval(2n)")
+ tdSql.checkData(0, 1, 1)
+ tdSql.checkData(1, 1, 1)
+ tdSql.checkData(2, 1, 1)
+ tdSql.checkData(3, 1, 9)
+ tdSql.checkData(4, 1, 4)
+ tdSql.checkData(5, 1, 1)
+
+ tdSql.query("select count(*) from car interval(2n) order by ts desc")
+ tdSql.checkData(5, 1, 1)
+ tdSql.checkData(4, 1, 1)
+ tdSql.checkData(3, 1, 1)
+ tdSql.checkData(2, 1, 9)
+ tdSql.checkData(1, 1, 4)
+ tdSql.checkData(0, 1, 1)
+
+ tdSql.query("select count(*) from car interval(1y)")
+ tdSql.checkData(0, 1, 3)
+ tdSql.checkData(1, 1, 14)
+
+ tdSql.query("select count(*) from car interval(2y)")
+ tdSql.checkData(0, 1, 3)
+ tdSql.checkData(1, 1, 14)
+
+
+ def superTable(self):
+ tdSql.execute("create table cars(ts timestamp, s int) tags(id int)")
+ tdSql.execute("create table car0 using cars tags(0)")
+ tdSql.execute("create table car1 using cars tags(0)")
+ tdSql.execute("create table car2 using cars tags(0)")
+ tdSql.execute("create table car3 using cars tags(0)")
+ tdSql.execute("create table car4 using cars tags(0)")
+
+ tdSql.execute("insert into car0 values('2019-01-01 00:00:00', 1)")
+ tdSql.execute("insert into car1 values('2019-05-13 12:00:00', 1)")
+ tdSql.execute("insert into car2 values('2019-12-31 23:59:59', 1)")
+ tdSql.execute("insert into car1 values('2020-01-01 12:00:00', 1)")
+ tdSql.execute("insert into car1 values('2020-01-02 12:00:00', 1)")
+ tdSql.execute("insert into car1 values('2020-01-03 12:00:00', 1)")
+ tdSql.execute("insert into car1 values('2020-01-04 12:00:00', 1)")
+ tdSql.execute("insert into car1 values('2020-01-05 12:00:00', 1)")
+ tdSql.execute("insert into car1 values('2020-01-31 12:00:00', 1)")
+ tdSql.execute("insert into car1 values('2020-02-01 12:00:00', 1)")
+ tdSql.execute("insert into car2 values('2020-02-02 12:00:00', 1)")
+ tdSql.execute("insert into car2 values('2020-02-29 12:00:00', 1)")
+ tdSql.execute("insert into car3 values('2020-03-01 12:00:00', 1)")
+ tdSql.execute("insert into car3 values('2020-03-02 12:00:00', 1)")
+ tdSql.execute("insert into car3 values('2020-03-15 12:00:00', 1)")
+ tdSql.execute("insert into car4 values('2020-03-31 12:00:00', 1)")
+ tdSql.execute("insert into car3 values('2020-05-01 12:00:00', 1)")
+
+ tdSql.query("select count(*) from cars interval(1n)")
+ tdSql.checkData(0, 1, 1)
+ tdSql.checkData(1, 1, 1)
+ tdSql.checkData(2, 1, 1)
+ tdSql.checkData(3, 1, 6)
+ tdSql.checkData(4, 1, 3)
+ tdSql.checkData(5, 1, 4)
+ tdSql.checkData(6, 1, 1)
+
+ tdSql.query("select count(*) from cars interval(1n) order by ts desc")
+ tdSql.checkData(6, 1, 1)
+ tdSql.checkData(5, 1, 1)
+ tdSql.checkData(4, 1, 1)
+ tdSql.checkData(3, 1, 6)
+ tdSql.checkData(2, 1, 3)
+ tdSql.checkData(1, 1, 4)
+ tdSql.checkData(0, 1, 1)
+
+ tdSql.query("select count(*) from cars interval(2n)")
+ tdSql.checkData(0, 1, 1)
+ tdSql.checkData(1, 1, 1)
+ tdSql.checkData(2, 1, 1)
+ tdSql.checkData(3, 1, 9)
+ tdSql.checkData(4, 1, 4)
+ tdSql.checkData(5, 1, 1)
+
+ tdSql.query("select count(*) from cars interval(2n) order by ts desc")
+ tdSql.checkData(5, 1, 1)
+ tdSql.checkData(4, 1, 1)
+ tdSql.checkData(3, 1, 1)
+ tdSql.checkData(2, 1, 9)
+ tdSql.checkData(1, 1, 4)
+ tdSql.checkData(0, 1, 1)
+
+ tdSql.query("select count(*) from cars interval(1y)")
+ tdSql.checkData(0, 1, 3)
+ tdSql.checkData(1, 1, 14)
+
+ tdSql.query("select count(*) from cars interval(2y)")
+ tdSql.checkData(0, 1, 3)
+ tdSql.checkData(1, 1, 14)
+
+
+ def run(self):
+ tdSql.prepare()
+ self.singleTable()
+ self.superTable()
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/query/queryConnection.py b/tests/pytest/query/queryConnection.py
new file mode 100644
index 0000000000..ed05b5e6bd
--- /dev/null
+++ b/tests/pytest/query/queryConnection.py
@@ -0,0 +1,52 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import tdLog
+from util.cases import tdCases
+from util.sql import tdSql
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ def run(self):
+ tdSql.prepare()
+
+ tdSql.execute(
+ "create table if not exists st (ts timestamp, tagtype int) tags(dev nchar(50))")
+ tdSql.execute(
+ 'CREATE TABLE if not exists dev_001 using st tags("dev_01")')
+ tdSql.execute(
+ 'CREATE TABLE if not exists dev_002 using st tags("dev_02")')
+
+ tdSql.execute(
+ """INSERT INTO dev_001(ts, tagtype) VALUES('2020-05-13 10:00:00.000', 1),
+ ('2020-05-13 10:00:00.001', 1)
+ dev_002 VALUES('2020-05-13 10:00:00.001', 1)""")
+
+ for i in range(10):
+ for j in range(1000):
+ tdSql.query("select * from db.st")
+ tdLog.sleep(10)
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/query/queryJoin.py b/tests/pytest/query/queryJoin.py
index 6ea240a334..17027cf498 100644
--- a/tests/pytest/query/queryJoin.py
+++ b/tests/pytest/query/queryJoin.py
@@ -114,6 +114,32 @@ class TDTestCase:
tdSql.error("select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_t.pid, stb_p.id, stb_p.dscrption, stb_p.pressure,stb_v.velocity from stb_p, stb_t, stb_v where stb_p.ts=stb_t.ts and stb_p.ts=stb_v.ts and stb_p.id = stb_t.id")
+ # test case for https://jira.taosdata.com:18080/browse/TD-1250
+
+ tdSql.execute("create table meters1(ts timestamp, voltage int) tags(tag1 binary(20), tag2 nchar(20))")
+ tdSql.execute("create table t1 using meters1 tags('beijing', 'chaoyang')")
+ tdSql.execute("create table t2 using meters1 tags('shanghai', 'xuhui')")
+ tdSql.execute("insert into t1 values(1538548685000, 1) (1538548685001, 2) (1538548685002, 3)")
+ tdSql.execute("insert into t1 values(1538548685004, 4) (1538548685004, 5) (1538548685005, 6)")
+
+ tdSql.execute("create table meters2(ts timestamp, voltage int) tags(tag1 binary(20), tag2 nchar(20))")
+ tdSql.execute("create table t3 using meters2 tags('beijing', 'chaoyang')")
+ tdSql.execute("create table t4 using meters2 tags('shenzhen', 'nanshan')")
+ tdSql.execute("insert into t3 values(1538548685000, 7) (1538548685001, 8) (1538548685002, 9)")
+ tdSql.execute("insert into t4 values(1538548685000, 10) (1538548685001, 11) (1538548685002, 12)")
+
+ tdSql.execute("create table meters3(ts timestamp, voltage int) tags(tag1 binary(20), tag2 nchar(20))")
+
+ tdSql.query("select * from meters1, meters2 where meters1.ts = meters2.ts and meters1.tag1 = meters2.tag1")
+ tdSql.checkRows(3)
+
+ tdSql.query("select * from meters1, meters2 where meters1.ts = meters2.ts and meters1.tag2 = meters2.tag2")
+ tdSql.checkRows(3)
+
+ tdSql.query("select * from meters1, meters3 where meters1.ts = meters3.ts and meters1.tag1 = meters3.tag1")
+ tdSql.checkRows(0)
+
+
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
diff --git a/tests/pytest/tag_lite/bigint.py b/tests/pytest/tag_lite/bigint.py
index ecc6fad44d..f83961aaad 100644
--- a/tests/pytest/tag_lite/bigint.py
+++ b/tests/pytest/tag_lite/bigint.py
@@ -575,6 +575,20 @@ class TDTestCase:
# TSIM: system sh/exec.sh -n dnode1 -s stop -x SIGINT
# convert end
+ tdSql.execute("create database db")
+ tdSql.execute("use db")
+ tdSql.execute(
+ "create table if not exists st (ts timestamp, tagtype int) tags(dev bigint)")
+
+ tdSql.execute(
+ 'CREATE TABLE if not exists dev_001 using st tags(%d)' % (pow(2, 63) - 1))
+ tdSql.execute(
+ 'CREATE TABLE if not exists dev_002 using st tags(%d)' % (-1 * pow(2, 63) + 1))
+
+ print("==============step2")
+ tdSql.query("show tables")
+ tdSql.checkRows(2)
+
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
diff --git a/tests/pytest/tag_lite/binary.py b/tests/pytest/tag_lite/binary.py
index a5757dc5cd..4cbae63bec 100644
--- a/tests/pytest/tag_lite/binary.py
+++ b/tests/pytest/tag_lite/binary.py
@@ -579,6 +579,20 @@ class TDTestCase:
# TSIM: system sh/exec.sh -n dnode1 -s stop -x SIGINT
# convert end
+ tdSql.execute("create database db")
+ tdSql.execute("use db")
+ tdSql.execute(
+ "create table if not exists st (ts timestamp, tagtype int) tags(dev binary(5))")
+ tdSql.error(
+ 'CREATE TABLE if not exists dev_001 using st tags("dev_001")')
+
+ tdSql.execute(
+ 'CREATE TABLE if not exists dev_002 using st tags("dev")')
+
+ print("==============step2")
+ tdSql.query("show tables")
+ tdSql.checkRows(1)
+
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
diff --git a/tests/pytest/tag_lite/int.py b/tests/pytest/tag_lite/int.py
index 99d4a69624..d5a6917389 100644
--- a/tests/pytest/tag_lite/int.py
+++ b/tests/pytest/tag_lite/int.py
@@ -574,6 +574,24 @@ class TDTestCase:
# TSIM: system sh/exec.sh -n dnode1 -s stop -x SIGINT
# convert end
+ tdSql.execute("create database db")
+ tdSql.execute("use db")
+ tdSql.execute(
+ "create table if not exists st (ts timestamp, tagtype int) tags(dev int)")
+ tdSql.error(
+ 'CREATE TABLE if not exists dev_001 using st tags(%d)' % pow(2, 31))
+ tdSql.error(
+ 'CREATE TABLE if not exists dev_001 using st tags(%d)' % (-1 * pow(2, 31)))
+
+ tdSql.execute(
+ 'CREATE TABLE if not exists dev_001 using st tags(%d)' % (pow(2, 31) - 1))
+ tdSql.execute(
+ 'CREATE TABLE if not exists dev_002 using st tags(%d)' % (-1 * pow(2, 31) + 1))
+
+ print("==============step2")
+ tdSql.query("show tables")
+ tdSql.checkRows(2)
+
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
diff --git a/tests/pytest/tag_lite/nchar.py b/tests/pytest/tag_lite/nchar.py
new file mode 100644
index 0000000000..851cc32b56
--- /dev/null
+++ b/tests/pytest/tag_lite/nchar.py
@@ -0,0 +1,48 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import tdLog
+from util.cases import tdCases
+from util.sql import tdSql
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ def run(self):
+ tdSql.prepare()
+
+ print("==============step1")
+ tdSql.execute(
+ "create table if not exists st (ts timestamp, tagtype int) tags(dev nchar(5))")
+ tdSql.error(
+ 'CREATE TABLE if not exists dev_001 using st tags("dev_001")')
+ tdSql.execute(
+ 'CREATE TABLE if not exists dev_002 using st tags("dev")')
+
+
+ print("==============step2")
+ tdSql.query("show tables")
+ tdSql.checkRows(1)
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/tag_lite/smallint.py b/tests/pytest/tag_lite/smallint.py
index 089af55a34..c060e3f82b 100644
--- a/tests/pytest/tag_lite/smallint.py
+++ b/tests/pytest/tag_lite/smallint.py
@@ -574,6 +574,23 @@ class TDTestCase:
# TSIM:
# TSIM: system sh/exec.sh -n dnode1 -s stop -x SIGINT
# convert end
+ tdSql.execute("create database db")
+ tdSql.execute("use db")
+ tdSql.execute(
+ "create table if not exists st (ts timestamp, tagtype int) tags(dev smallint)")
+ tdSql.error(
+ 'CREATE TABLE if not exists dev_001 using st tags(%d)' % pow(2, 15))
+ tdSql.error(
+ 'CREATE TABLE if not exists dev_001 using st tags(%d)' % (-1 * pow(2, 15)))
+
+ tdSql.execute(
+ 'CREATE TABLE if not exists dev_001 using st tags(%d)' % (pow(2, 15) - 1))
+ tdSql.execute(
+ 'CREATE TABLE if not exists dev_002 using st tags(%d)' % (-1 * pow(2, 15) + 1))
+
+ print("==============step2")
+ tdSql.query("show tables")
+ tdSql.checkRows(2)
def stop(self):
tdSql.close()
diff --git a/tests/pytest/tag_lite/tinyint.py b/tests/pytest/tag_lite/tinyint.py
index 55e33d013b..089dd46569 100644
--- a/tests/pytest/tag_lite/tinyint.py
+++ b/tests/pytest/tag_lite/tinyint.py
@@ -575,6 +575,24 @@ class TDTestCase:
# TSIM: system sh/exec.sh -n dnode1 -s stop -x SIGINT
# convert end
+ tdSql.execute("create database db")
+ tdSql.execute("use db")
+ tdSql.execute(
+ "create table if not exists st (ts timestamp, tagtype int) tags(dev tinyint)")
+ tdSql.error(
+ 'CREATE TABLE if not exists dev_001 using st tags(%d)' % pow(2, 7))
+ tdSql.error(
+ 'CREATE TABLE if not exists dev_001 using st tags(%d)' % (-1 * pow(2, 7)))
+
+ tdSql.execute(
+ 'CREATE TABLE if not exists dev_001 using st tags(%d)' % (pow(2, 7) - 1))
+ tdSql.execute(
+ 'CREATE TABLE if not exists dev_002 using st tags(%d)' % (-1 * pow(2, 7) + 1))
+
+ print("==============step2")
+ tdSql.query("show tables")
+ tdSql.checkRows(2)
+
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
diff --git a/tests/script/fullGeneralSuite.sim b/tests/script/fullGeneralSuite.sim
index 15cc2954e8..d137e53d27 100644
--- a/tests/script/fullGeneralSuite.sim
+++ b/tests/script/fullGeneralSuite.sim
@@ -24,6 +24,7 @@ run general/compute/diff2.sim
run general/compute/first.sim
run general/compute/interval.sim
run general/compute/last.sim
+run general/compute/last_row.sim
run general/compute/leastsquare.sim
run general/compute/max.sim
run general/compute/min.sim
diff --git a/tests/script/general/compute/last_row.sim b/tests/script/general/compute/last_row.sim
new file mode 100644
index 0000000000..cc5cc3edbb
--- /dev/null
+++ b/tests/script/general/compute/last_row.sim
@@ -0,0 +1,175 @@
+system sh/stop_dnodes.sh
+
+system sh/deploy.sh -n dnode1 -i 1
+system sh/cfg.sh -n dnode1 -c walLevel -v 0
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+sql connect
+
+$dbPrefix = m_la_db
+$tbPrefix = m_la_tb
+$mtPrefix = m_la_mt
+$tbNum = 10
+$rowNum = 20
+$totalNum = 200
+
+print =============== step1
+$i = 0
+$db = $dbPrefix . $i
+$mt = $mtPrefix . $i
+
+sql drop database $db -x step1
+step1:
+sql create database $db
+sql use $db
+sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol int)
+
+$i = 0
+while $i < $tbNum
+ $tb = $tbPrefix . $i
+ sql create table $tb using $mt tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ms = $x . m
+ sql insert into $tb values (now + $ms , $x )
+ $x = $x + 1
+ endw
+
+ $i = $i + 1
+endw
+
+sleep 100
+
+print =============== step2
+$i = 1
+$tb = $tbPrefix . $i
+
+sql select last_row(tbcol) from $tb
+print ===> $data00
+if $data00 != 19 then
+ return -1
+endi
+
+print =============== step3
+sql select last_row(tbcol) from $tb where ts < now + 4m
+print ===> $data00
+if $data00 != 4 then
+ return -1
+endi
+
+print =============== step4
+sql select last_row(tbcol) as b from $tb
+print ===> $data00
+if $data00 != 19 then
+ return -1
+endi
+
+
+
+print =============== step7
+sql select last_row(tbcol) from $mt
+print ===> $data00
+if $data00 != 19 then
+ return -1
+endi
+
+print =============== step8
+sql select last_row(tbcol) as c from $mt where ts < now + 4m
+print ===> $data00
+if $data00 != 4 then
+ return -1
+endi
+
+sql select last_row(tbcol) as c from $mt where tgcol < 5
+print ===> $data00
+if $data00 != 19 then
+ return -1
+endi
+
+sql select last_row(tbcol) as c from $mt where tgcol < 5 and ts < now + 4m
+print ===> $data00
+if $data00 != 4 then
+ return -1
+endi
+
+
+
+print =============== step10
+sql select last_row(tbcol) as b from $mt group by tgcol
+print ===> $data00
+if $data00 != 19 then
+ return -1
+endi
+
+if $rows != $tbNum then
+ return -1
+endi
+
+print =============== step11
+
+sql insert into $tb values(now + 1h, 10)
+sql insert into $tb values(now + 3h, null)
+sql insert into $tb values(now + 5h, -1)
+sql insert into $tb values(now + 7h, null)
+
+## for super table
+sql select last_row(*) from $mt where ts < now + 6h
+if $data01 != -1 then
+ return -1
+endi
+
+sql select last_row(*) from $mt where ts < now + 8h
+if $data01 != NULL then
+ return -1
+endi
+
+sql select last_row(*) from $mt
+if $data01 != NULL then
+ return -1
+endi
+
+sql select last_row(*) from $mt where ts < now + 4h
+if $data01 != NULL then
+ return -1
+endi
+
+sql select last_row(*) from $mt where ts > now + 1h and ts < now + 4h
+if $data01 != NULL then
+ return -1
+endi
+
+## for table
+sql select last_row(*) from $tb where ts < now + 6h
+if $data01 != -1 then
+ return -1
+endi
+
+sql select last_row(*) from $tb where ts < now + 8h
+if $data01 != NULL then
+ return -1
+endi
+
+sql select last_row(*) from $tb
+if $data01 != NULL then
+ return -1
+endi
+
+sql select last_row(*) from $tb where ts < now + 4h
+if $data01 != NULL then
+ return -1
+endi
+
+sql select last_row(*) from $tb where ts > now + 1h and ts < now + 4h
+if $data01 != NULL then
+ return -1
+endi
+
+print =============== clear
+sql drop database $db
+sql show databases
+if $rows != 0 then
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/general/http/restful_full.sim b/tests/script/general/http/restful_full.sim
index 88e7dece4c..b7f98e49e0 100644
--- a/tests/script/general/http/restful_full.sim
+++ b/tests/script/general/http/restful_full.sim
@@ -119,7 +119,7 @@ endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' used1' 127.0.0.1:7111/rest/sql
print 17-> $system_content
-if $system_content != @{"status":"error","code":512,"desc":"invalid SQL: invalid SQL: syntax error near 'used1'"}@ then
+if $system_content != @{"status":"error","code":534,"desc":"Syntax errr in SQL"}@ then
return -1
endi
@@ -230,4 +230,4 @@ if $system_content != @{"status":"succ","head":["ts","speed"],"data":[["2017-12-
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/general/parser/constCol.sim b/tests/script/general/parser/constCol.sim
index a196ba2b50..13b4455779 100644
--- a/tests/script/general/parser/constCol.sim
+++ b/tests/script/general/parser/constCol.sim
@@ -347,6 +347,8 @@ if $rows != 3 then
return -1
endi
+print ======================udc with normal column group by
+
sql_error select from t1
sql_error select abc from t1
sql_error select abc as tu from t1
diff --git a/tests/script/general/parser/join.sim b/tests/script/general/parser/join.sim
index 1bb0ff5448..f17e28c1da 100644
--- a/tests/script/general/parser/join.sim
+++ b/tests/script/general/parser/join.sim
@@ -482,15 +482,31 @@ sql insert into um2 using m2 tags(9) values(1000001, 10)(2000000, 20);
sql_error select count(*) from m1,m2 where m1.a=m2.a and m1.ts=m2.ts;
-#empty table join test, add for no result join test
+print ====> empty table/empty super-table join test, add for no result join test
sql create database ux1;
sql use ux1;
sql create table m1(ts timestamp, k int) tags(a binary(12), b int);
sql create table tm0 using m1 tags('abc', 1);
sql create table m2(ts timestamp, k int) tags(a int, b binary(12));
+
+sql select count(*) from m1, m2 where m1.ts=m2.ts and m1.b=m2.a;
+if $rows != 0 then
+ return -1
+endi
+
sql create table tm2 using m2 tags(2, 'abc');
sql select count(*) from tm0, tm2 where tm0.ts=tm2.ts;
-sql select count(*) from m1, m2 where m1.ts=m2.ts and m1.b=m2.a
+if $rows != 0 then
+ return -1
+endi
+
+sql select count(*) from m1, m2 where m1.ts=m2.ts and m1.b=m2.a;
+if $rows != 0 then
+ return -1
+endi
+
+sql drop table tm2;
+sql select count(*) from m1, m2 where m1.ts=m2.ts and m1.b=m2.a;
sql drop database ux1;
system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/general/parser/lastrow_query.sim b/tests/script/general/parser/lastrow_query.sim
index 7954a8d228..5fc47ed15d 100644
--- a/tests/script/general/parser/lastrow_query.sim
+++ b/tests/script/general/parser/lastrow_query.sim
@@ -152,3 +152,23 @@ sql select t1,t1,count(*),t1,t1 from lr_stb0 where ts>'2018-09-24 00:00:00.000'
if $rows != 46 then
return -1
endi
+
+print ========>td-1317, empty table last_row query crashed
+sql create table m1(ts timestamp, k int) tags (a int);
+sql create table t1 using m1 tags(1);
+sql create table t2 using m1 tags(2);
+
+sql select last_row(*) from t1
+if $rows != 0 then
+ return -1
+endi
+
+sql select last_row(*) from m1
+if $rows != 0 then
+ return -1
+endi
+
+sql select last_row(*) from m1 where tbname in ('t1')
+if $rows != 0 then
+ return -1
+endi
diff --git a/tests/script/general/parser/testSuite.sim b/tests/script/general/parser/testSuite.sim
index 4e26d14cfd..6790564cc7 100644
--- a/tests/script/general/parser/testSuite.sim
+++ b/tests/script/general/parser/testSuite.sim
@@ -99,6 +99,8 @@ run general/parser/union.sim
sleep 2000
run general/parser/constCol.sim
sleep 2000
+run general/parser/timestamp.sim
+sleep 2000
run general/parser/sliding.sim
#sleep 2000
diff --git a/tests/script/general/parser/timestamp.sim b/tests/script/general/parser/timestamp.sim
index 0a86e39de0..28bbc9df0e 100644
--- a/tests/script/general/parser/timestamp.sim
+++ b/tests/script/general/parser/timestamp.sim
@@ -20,7 +20,7 @@ $db = $dbPrefix . $i
$stb = $stbPrefix . $i
sql drop database if exists $db
-sql create database $db maxrows 200 cache 1024 tblocks 200 maxTables 4
+sql create database $db maxrows 200 maxTables 4
print ====== create tables
sql use $db
sql create table $stb (ts timestamp, c1 timestamp, c2 int) tags(t1 binary(20))
diff --git a/tests/script/general/parser/timestamp_query.sim b/tests/script/general/parser/timestamp_query.sim
index 63e40d0bf7..6994b2d295 100644
--- a/tests/script/general/parser/timestamp_query.sim
+++ b/tests/script/general/parser/timestamp_query.sim
@@ -22,12 +22,29 @@ $tsu = $tsu - $delta
$tsu = $tsu + $ts0
##### select from supertable
-
$tb = $tbPrefix . 0
-sql select first(c1), last(c1) from $tb where ts >= $ts0 and ts < $tsu interval(5m) fill(value, -1)
+sql select first(c1), last(c1), (1537325400 - 1537146000)/(5*60) v from $tb where ts >= $ts0 and ts < $tsu interval(5m) fill(value, -1)
$res = $rowNum * 2
-$res = $res - 1
-if $rows != $res then
+$n = $res - 2
+print ============>$n
+if $rows != $n then
+ print expect $n, actual $rows
return -1
endi
+if $data03 != 598.000000000 then
+ print expect 598.000000000, actual $data03
+ return -1
+endi
+
+
+if $data13 != 598.000000000 then
+ print expect 598.000000000, actual $data03
+ return -1
+endi
+
+sql select first(c1), last(c1), (1537325400 - 1537146000)/(5*60) v from $tb where ts >= $ts0 and ts < $tsu interval(5m) fill(value, NULL)
+if $data13 != 598.000000000 then
+ print expect 598.000000000, actual $data03
+ return -1
+endi
\ No newline at end of file
diff --git a/tests/script/general/parser/topbot.sim b/tests/script/general/parser/topbot.sim
index fdda79451d..5616f8ed16 100644
--- a/tests/script/general/parser/topbot.sim
+++ b/tests/script/general/parser/topbot.sim
@@ -118,4 +118,23 @@ if $data21 != 2.10000 then
return -1
endi
+print =====================td-1302 case
+sql create database t1 keep 36500;
+sql use t1;
+sql create table test(ts timestamp, k int);
+sql insert into test values(29999, 1)(70000, 2)(80000, 3)
+
+print ================== restart server to commit data into disk
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+sleep 5000
+system sh/exec.sh -n dnode1 -s start
+print ================== server restart completed
+sql connect
+sleep 3000
+
+sql select count(*) from t1.test where ts>10000 and ts<90000 interval(5000a)
+if $rows != 3 then
+ return -1
+endi
+
system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/tmp/182.sim b/tests/script/tmp/182.sim
new file mode 100644
index 0000000000..a178282cf8
--- /dev/null
+++ b/tests/script/tmp/182.sim
@@ -0,0 +1,41 @@
+system sh/stop_dnodes.sh
+
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode2 -c walLevel -v 2
+system sh/cfg.sh -n dnode3 -c walLevel -v 2
+system sh/cfg.sh -n dnode4 -c walLevel -v 2
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
+
+system sh/cfg.sh -n dnode1 -c activeCode -v eglxDLzRpslJWl7OxrPZ2K3sQ5631AP9SVpezsaz2dhJWl7OxrPZ2ElaXs7Gs9nYSVpezsaz2djGIj5StnQ3ZvLHcsE8cwcN
+
+system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 4
+system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 4
+system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 4
+system sh/cfg.sh -n dnode4 -c mnodeEqualVnodeNum -v 4
+
+system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 100000
+system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 100000
+system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 100000
+system sh/cfg.sh -n dnode4 -c maxTablesPerVnode -v 100000
+
+system sh/cfg.sh -n dnode1 -c http -v 1
+system sh/cfg.sh -n dnode2 -c http -v 1
+system sh/cfg.sh -n dnode3 -c http -v 1
+system sh/cfg.sh -n dnode4 -c http -v 1
+
+system sh/cfg.sh -n dnode1 -c httpMaxThreads -v 4
+
+system sh/cfg.sh -n dnode1 -c firstEp -v 127.0.0.1:6030
+system sh/cfg.sh -n dnode1 -c secondEp -v 127.0.0.1:6030
+system sh/cfg.sh -n dnode1 -c serverPort -v 6030
+system sh/cfg.sh -n dnode1 -c fqdn -v 127.0.0.1
+system sh/exec.sh -n dnode1 -s start
diff --git a/tests/script/unique/account/usage.sim b/tests/script/unique/account/usage.sim
index 8d611d6fb6..7fde365201 100644
--- a/tests/script/unique/account/usage.sim
+++ b/tests/script/unique/account/usage.sim
@@ -129,6 +129,25 @@ print no write auth
sql_error insert into d1.t1 values(now + 7s, 2)
sql_error insert into d1.t1 values(now + 8s, 2)
+print =============== step5
+sql alter account root pass "taosdata" tseries 10 storage 3600 streams 10 dbs 5 users 5 state all
+sleep 10000
+
+sql insert into d1.t1 values(now + 11s, 1)
+sql insert into d1.t1 values(now + 12s, 2)
+
+sql alter account root pass "taosdata" tseries 10 storage 3600 streams 10 dbs 5 users 5 state no
+sleep 10000
+print no write auth
+sql_error insert into d1.t1 values(now + 13s, 2)
+sql_error insert into d1.t1 values(now + 14s, 2)
+
+sql alter account root pass "taosdata" tseries 10 storage 3600 streams 10 dbs 5 users 5 state all
+sleep 10000
+print has write auth
+sql insert into d1.t1 values(now + 15s, 1)
+sql insert into d1.t1 values(now + 16s, 2)
+
print =============== check grant
sql_error create database d6
diff --git a/tests/test/c/CMakeLists.txt b/tests/test/c/CMakeLists.txt
index 9d11895104..e1fedaee3c 100644
--- a/tests/test/c/CMakeLists.txt
+++ b/tests/test/c/CMakeLists.txt
@@ -25,6 +25,9 @@ IF (TD_LINUX)
#add_executable(createTablePerformance createTablePerformance.c)
#target_link_libraries(createTablePerformance taos_static tutil common pthread)
- add_executable(createNormalTable createNormalTable.c)
- target_link_libraries(createNormalTable taos_static tutil common pthread)
+ #add_executable(createNormalTable createNormalTable.c)
+ #target_link_libraries(createNormalTable taos_static tutil common pthread)
+
+ add_executable(queryPerformance queryPerformance.c)
+ target_link_libraries(queryPerformance taos_static tutil common pthread)
ENDIF()
diff --git a/tests/test/c/queryPerformance.c b/tests/test/c/queryPerformance.c
new file mode 100644
index 0000000000..eda082dd4f
--- /dev/null
+++ b/tests/test/c/queryPerformance.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#define _DEFAULT_SOURCE
+#include "os.h"
+#include "taos.h"
+#include "tulog.h"
+#include "ttimer.h"
+#include "tutil.h"
+#include "tglobal.h"
+
+#define MAX_RANDOM_POINTS 20000
+#define GREEN "\033[1;32m"
+#define NC "\033[0m"
+
+typedef struct {
+ int64_t startTimeMs;
+ int64_t endTimeMs;
+ int threadIndex;
+ pthread_t thread;
+} SInfo;
+
+void *syncTest(void *param);
+void shellParseArgument(int argc, char *argv[]);
+void queryData();
+
+int numOfThreads = 10;
+int useGlobalConn = 1;
+int requestPerThread = 10000;
+char requestSql[10240] = "show dnodes";
+TAOS *globalConn;
+
+int main(int argc, char *argv[]) {
+ shellParseArgument(argc, argv);
+ taos_init();
+ queryData();
+}
+
+void queryData() {
+ struct timeval systemTime;
+ int64_t st, et;
+ char fqdn[TSDB_FQDN_LEN];
+ uint16_t port;
+
+ if (useGlobalConn) {
+ taosGetFqdnPortFromEp(tsFirst, fqdn, &port);
+
+ globalConn = taos_connect(fqdn, "root", "taosdata", NULL, port);
+ if (globalConn == NULL) {
+ pError("failed to connect to DB, reason:%s", taos_errstr(globalConn));
+ exit(1);
+ }
+ }
+
+ pPrint("%d threads are spawned to query", numOfThreads);
+
+ gettimeofday(&systemTime, NULL);
+ st = systemTime.tv_sec * 1000000 + systemTime.tv_usec;
+
+ pthread_attr_t thattr;
+ pthread_attr_init(&thattr);
+ pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
+ SInfo *pInfo = (SInfo *)malloc(sizeof(SInfo) * numOfThreads);
+
+ // Start threads to write
+ for (int i = 0; i < numOfThreads; ++i) {
+ pInfo[i].threadIndex = i;
+ pthread_create(&(pInfo[i].thread), &thattr, syncTest, (void *)(pInfo + i));
+ }
+
+ taosMsleep(300);
+ for (int i = 0; i < numOfThreads; i++) {
+ pthread_join(pInfo[i].thread, NULL);
+ }
+
+ gettimeofday(&systemTime, NULL);
+ et = systemTime.tv_sec * 1000000 + systemTime.tv_usec;
+ double totalTimeMs = (et - st) / 1000.0;
+
+ int totalReq = requestPerThread * numOfThreads;
+ float rspTime = totalTimeMs / requestPerThread;
+ float qps = totalReq / (totalTimeMs / 1000);
+
+ pPrint("%s threads:%d, totalTime %.1fms totalReq:%d qps:%.1f rspTime:%.3fms %s", GREEN, numOfThreads, totalTimeMs,
+ totalReq, qps, rspTime, NC);
+
+ pthread_attr_destroy(&thattr);
+ free(pInfo);
+}
+
+void *syncTest(void *param) {
+ TAOS * con;
+ SInfo * pInfo = (SInfo *)param;
+ char fqdn[TSDB_FQDN_LEN];
+ uint16_t port;
+
+ if (useGlobalConn) {
+ pPrint("thread:%d, start to run use global connection", pInfo->threadIndex);
+ con = globalConn;
+ } else {
+ pPrint("thread:%d, start to run, and create new conn", pInfo->threadIndex);
+ taosGetFqdnPortFromEp(tsFirst, fqdn, &port);
+
+ con = taos_connect(fqdn, "root", "taosdata", NULL, port);
+ if (con == NULL) {
+ pError("index:%d, failed to connect to DB, reason:%s", pInfo->threadIndex, taos_errstr(con));
+ exit(1);
+ }
+ }
+
+ for (int i = 0; i < requestPerThread; ++i) {
+ void *tres = taos_query(con, requestSql);
+
+ TAOS_ROW row = taos_fetch_row(tres);
+ if (row == NULL) {
+ taos_free_result(tres);
+ exit(0);
+ }
+
+ do {
+ row = taos_fetch_row(tres);
+ } while (row != NULL);
+
+ taos_free_result(tres);
+ }
+ return NULL;
+}
+
+void printHelp() {
+ char indent[10] = " ";
+ printf("Used to test the query performance of TDengine\n");
+
+ printf("%s%s\n", indent, "-c");
+ printf("%s%s%s%s\n", indent, indent, "Configuration directory, default is ", configDir);
+ printf("%s%s\n", indent, "-s");
+ printf("%s%s%s%s\n", indent, indent, "The sql to be executed, default is ", requestSql);
+ printf("%s%s\n", indent, "-r");
+ printf("%s%s%s%d\n", indent, indent, "Request per thread, default is ", requestPerThread);
+ printf("%s%s\n", indent, "-t");
+ printf("%s%s%s%d\n", indent, indent, "Number of threads to be used, default is ", numOfThreads);
+ printf("%s%s\n", indent, "-g");
+ printf("%s%s%s%d\n", indent, indent, "Whether to share connections between threads, default is ", useGlobalConn);
+
+ exit(EXIT_SUCCESS);
+}
+
+void shellParseArgument(int argc, char *argv[]) {
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-h") == 0 || strcmp(argv[i], "--help") == 0) {
+ printHelp();
+ exit(0);
+ } else if (strcmp(argv[i], "-c") == 0) {
+ strcpy(configDir, argv[++i]);
+ } else if (strcmp(argv[i], "-s") == 0) {
+ strcpy(requestSql, argv[++i]);
+ } else if (strcmp(argv[i], "-r") == 0) {
+ requestPerThread = atoi(argv[++i]);
+ } else if (strcmp(argv[i], "-t") == 0) {
+ numOfThreads = atoi(argv[++i]);
+ } else if (strcmp(argv[i], "-g") == 0) {
+ useGlobalConn = atoi(argv[++i]);
+ } else {
+ }
+ }
+
+ pPrint("%s sql:%s %s", GREEN, requestSql, NC);
+ pPrint("%s requestPerThread:%d %s", GREEN, requestPerThread, NC);
+ pPrint("%s numOfThreads:%d %s", GREEN, numOfThreads, NC);
+ pPrint("%s useGlobalConn:%d %s", GREEN, useGlobalConn, NC);
+ pPrint("%s start to run %s", GREEN, NC);
+}