diff --git a/cmake/define.inc b/cmake/define.inc
index a15e0aecbb..d6a3f2b915 100755
--- a/cmake/define.inc
+++ b/cmake/define.inc
@@ -57,7 +57,7 @@ IF (TD_LINUX_64)
ADD_DEFINITIONS(-D_M_X64)
ADD_DEFINITIONS(-D_TD_LINUX_64)
MESSAGE(STATUS "linux64 is defined")
- SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
+ SET(COMMON_FLAGS "-Wall -Werror -fPIC -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ADD_DEFINITIONS(-DUSE_LIBICONV)
IF (JEMALLOC_ENABLED)
@@ -70,7 +70,7 @@ IF (TD_LINUX_32)
ADD_DEFINITIONS(-D_TD_LINUX_32)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "linux32 is defined")
- SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -fsigned-char -munaligned-access -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
+ SET(COMMON_FLAGS "-Wall -Werror -fPIC -fsigned-char -munaligned-access -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
IF (TD_ARM_64)
@@ -78,7 +78,7 @@ IF (TD_ARM_64)
ADD_DEFINITIONS(-D_TD_ARM_)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "arm64 is defined")
- SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
+ SET(COMMON_FLAGS "-Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
IF (TD_ARM_32)
@@ -86,7 +86,7 @@ IF (TD_ARM_32)
ADD_DEFINITIONS(-D_TD_ARM_)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "arm32 is defined")
- SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE -Wno-pointer-to-int-cast -Wno-int-to-pointer-cast -Wno-incompatible-pointer-types ")
+ SET(COMMON_FLAGS "-Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE -Wno-pointer-to-int-cast -Wno-int-to-pointer-cast -Wno-incompatible-pointer-types ")
ENDIF ()
IF (TD_MIPS_64)
@@ -94,7 +94,7 @@ IF (TD_MIPS_64)
ADD_DEFINITIONS(-D_TD_MIPS_64)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "mips64 is defined")
- SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
+ SET(COMMON_FLAGS "-Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
IF (TD_MIPS_32)
@@ -102,7 +102,7 @@ IF (TD_MIPS_32)
ADD_DEFINITIONS(-D_TD_MIPS_32)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "mips32 is defined")
- SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
+ SET(COMMON_FLAGS "-Wall -Werror -fPIC -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
IF (TD_APLHINE)
@@ -147,7 +147,7 @@ IF (TD_DARWIN_64)
ADD_DEFINITIONS(-D_REENTRANT -D__USE_POSIX -D_LIBC_REENTRANT)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "darwin64 is defined")
- SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -Wno-missing-braces -fPIC -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
+ SET(COMMON_FLAGS "-Wall -Werror -Wno-missing-braces -fPIC -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
IF (TD_MEMORY_SANITIZER)
SET(DEBUG_FLAGS "-fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=null -fno-sanitize=alignment -O0 -g3 -DDEBUG")
ELSE ()
diff --git a/cmake/env.inc b/cmake/env.inc
index 6c1ce8fd89..fa15ec6aee 100755
--- a/cmake/env.inc
+++ b/cmake/env.inc
@@ -35,13 +35,13 @@ ENDIF ()
# Set compiler options
SET(COMMON_C_FLAGS "${COMMON_FLAGS} -std=gnu99")
-SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} ${COMMON_FLAGS} ${DEBUG_FLAGS}")
-SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${COMMON_FLAGS} ${RELEASE_FLAGS}")
+SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} ${COMMON_C_FLAGS} ${DEBUG_FLAGS}")
+SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${COMMON_C_FLAGS} ${RELEASE_FLAGS}")
# Set c++ compiler options
-# SET(COMMON_CXX_FLAGS "${COMMON_FLAGS} -std=c++11")
-# SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${COMMON_CXX_FLAGS} ${DEBUG_FLAGS}")
-# SET(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${COMMON_CXX_FLAGS} ${RELEASE_FLAGS}")
+SET(COMMON_CXX_FLAGS "${COMMON_FLAGS} -std=c++11 -Wno-unused-function")
+SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${COMMON_CXX_FLAGS} ${DEBUG_FLAGS}")
+SET(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${COMMON_CXX_FLAGS} ${RELEASE_FLAGS}")
IF (${CMAKE_BUILD_TYPE} MATCHES "Debug")
SET(CMAKE_BUILD_TYPE "Debug")
diff --git a/cmake/version.inc b/cmake/version.inc
index ed8e7a156c..134f09f179 100755
--- a/cmake/version.inc
+++ b/cmake/version.inc
@@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
- SET(TD_VER_NUMBER "2.1.1.0")
+ SET(TD_VER_NUMBER "2.1.3.0")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
diff --git a/documentation20/cn/00.index/docs.md b/documentation20/cn/00.index/docs.md
index 4c37ce598c..c900cd373d 100644
--- a/documentation20/cn/00.index/docs.md
+++ b/documentation20/cn/00.index/docs.md
@@ -42,7 +42,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
* [数据写入](/taos-sql#insert):支持单表单条、多条、多表多条写入,支持历史数据写入
* [数据查询](/taos-sql#select):支持时间段、值过滤、排序、查询结果手动分页等
* [SQL函数](/taos-sql#functions):支持各种聚合函数、选择函数、计算函数,如avg, min, diff等
-* [时间维度聚合](/taos-sql#aggregation):将表中数据按照时间段进行切割后聚合,降维处理
+* [窗口切分聚合](/taos-sql#aggregation):将表中数据按照时间段等方式进行切割后聚合,降维处理
* [边界限制](/taos-sql#limitation):库、表、SQL等边界限制条件
* [错误码](/taos-sql/error-code):TDengine 2.0 错误码以及对应的十进制码
diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md
index f26928eec7..2d76c866d1 100644
--- a/documentation20/cn/08.connector/docs.md
+++ b/documentation20/cn/08.connector/docs.md
@@ -805,7 +805,7 @@ C#连接器支持的系统有:Linux 64/Windows x64/Windows x86
* 应用驱动安装请参考[安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector#driver)。
* .NET接口文件TDengineDrivercs.cs和参考程序示例TDengineTest.cs均位于Windows客户端install_directory/examples/C#目录下。
-* 在Windows系统上,C#应用程序可以使用TDengine的原生C接口来执行所有数据库操作,后续版本将提供ORM(dapper)框架驱动。
+* 在Windows系统上,C#应用程序可以使用TDengine的原生C接口来执行所有数据库操作,后续版本将提供ORM(Dapper)框架驱动。
### 安装验证
diff --git a/documentation20/cn/10.cluster/docs.md b/documentation20/cn/10.cluster/docs.md
index 62d709c279..db20ca4edb 100644
--- a/documentation20/cn/10.cluster/docs.md
+++ b/documentation20/cn/10.cluster/docs.md
@@ -85,7 +85,7 @@ taos>
将后续的数据节点添加到现有集群,具体有以下几步:
-1. 按照[《立即开始》](https://www.taosdata.com/cn/documentation/getting-started/)一章的方法在每个物理节点启动taosd;
+1. 按照[《立即开始》](https://www.taosdata.com/cn/documentation/getting-started/)一章的方法在每个物理节点启动taosd;(注意:每个物理节点都需要在 taos.cfg 文件中将 firstEP 参数配置为新集群首个节点的 End Point——在本例中是 h1.taos.com:6030)
2. 在第一个数据节点,使用CLI程序taos, 登录进TDengine系统, 执行命令:
diff --git a/documentation20/cn/11.administrator/docs.md b/documentation20/cn/11.administrator/docs.md
index ae13a36f76..10951ed1fb 100644
--- a/documentation20/cn/11.administrator/docs.md
+++ b/documentation20/cn/11.administrator/docs.md
@@ -131,7 +131,7 @@ taosd -C
- quorum:多副本环境下指令执行的确认数要求。取值范围:1、2,单位为个,默认值:1。(可通过 alter database 修改)
- precision:时间戳精度标识。ms表示毫秒,us表示微秒,默认值:ms。(2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。)
- cacheLast:是否在内存中缓存子表的最近数据。0:关闭;1:缓存子表最近一行数据;2:缓存子表每一列的最近的非NULL值;3:同时打开缓存最近行和列功能。默认值:0。(可通过 alter database 修改)(从 2.1.2.0 版本开始此参数支持 0~3 的取值范围,在此之前取值只能是 [0, 1];而 2.0.11.0 之前的版本在 SQL 指令中不支持此参数。)(2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。)
-- update:是否允许更新。0:不允许;1:允许。默认值:0。(可通过 alter database 修改)
+- update:是否允许更新。0:不允许;1:允许。默认值:0。
对于一个应用场景,可能有多种数据特征的数据并存,最佳的设计是将具有相同数据特征的表放在一个库里,这样一个应用有多个库,而每个库可以配置不同的存储参数,从而保证系统有最优的性能。TDengine允许应用在创建库时指定上述存储参数,如果指定,该参数就将覆盖对应的系统配置参数。举例,有下述SQL:
diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md
index b0268a9ed4..5904abbbaa 100644
--- a/documentation20/cn/12.taos-sql/docs.md
+++ b/documentation20/cn/12.taos-sql/docs.md
@@ -139,11 +139,6 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传
```
FSYNC 参数控制执行 fsync 操作的周期。缺省值为 3000,单位是毫秒,取值范围为 [0, 180000]。如果设置为 0,表示每次写入,立即执行 fsync。该设置项主要用于调节 WAL 参数设为 2 时的系统行为。
- ```mysql
- ALTER DATABASE db_name UPDATE 0;
- ```
- UPDATE 参数控制是否允许更新数据。缺省值为 0,取值范围为 [0, 1]。0 表示会直接丢弃后写入的相同时间戳的数据;1 表示会使用后写入的数据覆盖已有的相同时间戳的数据。
-
**Tips**: 以上所有参数修改后都可以用show databases来确认是否修改成功。另外,从 2.1.3.0 版本开始,修改这些参数后无需重启服务器即可生效。
- **显示系统所有数据库**
@@ -481,9 +476,10 @@ Query OK, 1 row(s) in set (0.001091s)
SELECT select_expr [, select_expr ...]
FROM {tb_name_list}
[WHERE where_condition]
- [INTERVAL (interval_val [, interval_offset])]
- [SLIDING sliding_val]
- [FILL fill_val]
+ [SESSION(ts_col, tol_val)]
+ [STATE_WINDOW(col)]
+ [INTERVAL(interval_val [, interval_offset]) [SLIDING sliding_val]]
+ [FILL(fill_mod_and_val)]
[GROUP BY col_list]
[ORDER BY col_list { DESC | ASC }]
[SLIMIT limit_val [SOFFSET offset_val]]
@@ -1289,39 +1285,45 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
Query OK, 3 row(s) in set (0.001046s)
```
-## 时间维度聚合
+## 按窗口切分聚合
-TDengine支持按时间段进行聚合,可以将表中数据按照时间段进行切割后聚合生成结果,比如温度传感器每秒采集一次数据,但需查询每隔10分钟的温度平均值。这个聚合适合于降维(down sample)操作, 语法如下:
+TDengine 支持按时间段等窗口切分方式进行聚合结果查询,比如温度传感器每秒采集一次数据,但需查询每隔 10 分钟的温度平均值。这类聚合适合于降维(down sample)操作,语法如下:
```mysql
SELECT function_list FROM tb_name
[WHERE where_condition]
- INTERVAL (interval [, offset])
- [SLIDING sliding]
- [FILL ({NONE | VALUE | PREV | NULL | LINEAR | NEXT})]
+ [SESSION(ts_col, tol_val)]
+ [STATE_WINDOW(col)]
+ [INTERVAL(interval [, offset]) [SLIDING sliding]]
+ [FILL({NONE | VALUE | PREV | NULL | LINEAR | NEXT})]
SELECT function_list FROM stb_name
[WHERE where_condition]
- INTERVAL (interval [, offset])
- [SLIDING sliding]
- [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})]
+ [SESSION(ts_col, tol_val)]
+ [STATE_WINDOW(col)]
+ [INTERVAL(interval [, offset]) [SLIDING sliding]]
+ [FILL({NONE | VALUE | PREV | NULL | LINEAR | NEXT})]
[GROUP BY tags]
```
-- 聚合时间段的长度由关键词INTERVAL指定,最短时间间隔10毫秒(10a),并且支持偏移(偏移必须小于间隔)。聚合查询中,能够同时执行的聚合和选择函数仅限于单个输出的函数:count、avg、sum 、stddev、leastsquares、percentile、min、max、first、last,不能使用具有多行输出结果的函数(例如:top、bottom、diff以及四则运算)。
-- WHERE语句可以指定查询的起止时间和其他过滤条件。
-- SLIDING语句用于指定聚合时间段的前向增量。
-- FILL语句指定某一时间区间数据缺失的情况下的填充模式。填充模式包括以下几种:
- 1. 不进行填充:NONE(默认填充模式)。
- 2. VALUE填充:固定值填充,此时需要指定填充的数值。例如:FILL(VALUE, 1.23)。
- 3. NULL填充:使用NULL填充数据。例如:FILL(NULL)。
- 4. PREV填充:使用前一个非NULL值填充数据。例如:FILL(PREV)。
- 5. NEXT填充:使用下一个非NULL值填充数据。例如:FILL(NEXT)。
+- 在聚合查询中,function_list 位置允许使用聚合和选择函数,并要求每个函数仅输出单个结果(例如:COUNT、AVG、SUM、STDDEV、LEASTSQUARES、PERCENTILE、MIN、MAX、FIRST、LAST),而不能使用具有多行输出结果的函数(例如:TOP、BOTTOM、DIFF 以及四则运算)。
+- 查询过滤、聚合等操作按照每个切分窗口为独立的单位执行。聚合查询目前支持三种窗口的划分方式:
+ 1. 时间窗口:聚合时间段的窗口宽度由关键词 INTERVAL 指定,最短时间间隔 10 毫秒(10a);并且支持偏移 offset(偏移必须小于间隔),也即时间窗口划分与“UTC 时刻 0”相比的偏移量。SLIDING 语句用于指定聚合时间段的前向增量,也即每次窗口向前滑动的时长。当 SLIDING 与 INTERVAL 取值相等的时候,滑动窗口即为翻转窗口。
+ 2. 状态窗口:使用整数(布尔值)或字符串来标识产生记录时设备的状态量,产生的记录如果具有相同的状态量取值则归属于同一个状态窗口,数值改变后该窗口关闭。状态量所对应的列作为 STAT_WINDOW 语句的参数来指定。
+ 3. 会话窗口:时间戳所在的列由 SESSION 语句的 ts_col 参数指定,会话窗口根据相邻两条记录的时间戳差值来确定是否属于同一个会话——如果时间戳差异在 tol_val 以内,则认为记录仍属于同一个窗口;如果时间变化超过 tol_val,则自动开启下一个窗口。
+- WHERE 语句可以指定查询的起止时间和其他过滤条件。
+- FILL 语句指定某一窗口区间数据缺失的情况下的填充模式。填充模式包括以下几种:
+ 1. 不进行填充:NONE(默认填充模式)。
+ 2. VALUE 填充:固定值填充,此时需要指定填充的数值。例如:FILL(VALUE, 1.23)。
+ 3. PREV 填充:使用前一个非 NULL 值填充数据。例如:FILL(PREV)。
+ 4. NULL 填充:使用 NULL 填充数据。例如:FILL(NULL)。
+ 5. LINEAR 填充:根据前后距离最近的非 NULL 值做线性插值填充。例如:FILL(LINEAR)。
+ 6. NEXT 填充:使用下一个非 NULL 值填充数据。例如:FILL(NEXT)。
说明:
- 1. 使用FILL语句的时候可能生成大量的填充输出,务必指定查询的时间区间。针对每次查询,系统可返回不超过1千万条具有插值的结果。
+ 1. 使用 FILL 语句的时候可能生成大量的填充输出,务必指定查询的时间区间。针对每次查询,系统可返回不超过 1 千万条具有插值的结果。
2. 在时间维度聚合中,返回的结果中时间序列严格单调递增。
- 3. 如果查询对象是超级表,则聚合函数会作用于该超级表下满足值过滤条件的所有表的数据。如果查询中没有使用GROUP BY语句,则返回的结果按照时间序列严格单调递增;如果查询中使用了GROUP BY语句分组,则返回结果中每个GROUP内不按照时间序列严格单调递增。
+ 3. 如果查询对象是超级表,则聚合函数会作用于该超级表下满足值过滤条件的所有表的数据。如果查询中没有使用 GROUP BY 语句,则返回的结果按照时间序列严格单调递增;如果查询中使用了 GROUP BY 语句分组,则返回结果中每个 GROUP 内不按照时间序列严格单调递增。
时间聚合也常被用于连续查询场景,可以参考文档 [连续查询(Continuous Query)](https://www.taosdata.com/cn/documentation/advanced-features#continuous-query)。
@@ -1331,7 +1333,7 @@ SELECT function_list FROM stb_name
CREATE TABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
```
-针对智能电表采集的数据,以10分钟为一个阶段,计算过去24小时的电流数据的平均值、最大值、电流的中位数、以及随着时间变化的电流走势拟合直线。如果没有计算值,用前一个非NULL值填充。使用的查询语句如下:
+针对智能电表采集的数据,以 10 分钟为一个阶段,计算过去 24 小时的电流数据的平均值、最大值、电流的中位数、以及随着时间变化的电流走势拟合直线。如果没有计算值,用前一个非 NULL 值填充。使用的查询语句如下:
```mysql
SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), PERCENTILE(current, 50) FROM meters
diff --git a/documentation20/en/02.getting-started/docs.md b/documentation20/en/02.getting-started/docs.md
index fcfb88a6fe..19af3b5f31 100644
--- a/documentation20/en/02.getting-started/docs.md
+++ b/documentation20/en/02.getting-started/docs.md
@@ -2,7 +2,7 @@
## Quick Install
-TDegnine software consists of 3 parts: server, client, and alarm module. At the moment, TDengine server only runs on Linux (Windows, mac OS and more OS supports will come soon), but client can run on either Windows or Linux. TDengine client can be installed and run on Windows or Linux. Applications based-on any OSes can all connect to server taosd via a RESTful interface. About CPU, TDegnine supports X64/ARM64/MIPS64/Alpha64, and ARM32、RISC-V, other more CPU architectures will be supported soon. You can set up and install TDengine server either from the [source code](https://www.taosdata.com/en/getting-started/#Install-from-Source) or the [packages](https://www.taosdata.com/en/getting-started/#Install-from-Package).
+TDengine software consists of 3 parts: server, client, and alarm module. At the moment, TDengine server only runs on Linux (Windows, mac OS and more OS supports will come soon), but client can run on either Windows or Linux. TDengine client can be installed and run on Windows or Linux. Applications based-on any OSes can all connect to server taosd via a RESTful interface. About CPU, TDengine supports X64/ARM64/MIPS64/Alpha64, and ARM32、RISC-V, other more CPU architectures will be supported soon. You can set up and install TDengine server either from the [source code](https://www.taosdata.com/en/getting-started/#Install-from-Source) or the [packages](https://www.taosdata.com/en/getting-started/#Install-from-Package).
### Install from Source
@@ -14,7 +14,7 @@ Please visit our [TDengine Official Docker Image: Distribution, Downloading, and
### Install from Package
-It’s extremely easy to install for TDegnine, which takes only a few seconds from downloaded to successful installed. The server installation package includes clients and connectors. We provide 3 installation packages, which you can choose according to actual needs:
+It’s extremely easy to install for TDengine, which takes only a few seconds from downloaded to successful installed. The server installation package includes clients and connectors. We provide 3 installation packages, which you can choose according to actual needs:
Click [here](https://www.taosdata.com/cn/getting-started/#%E9%80%9A%E8%BF%87%E5%AE%89%E8%A3%85%E5%8C%85%E5%AE%89%E8%A3%85) to download the install package.
@@ -39,8 +39,8 @@ If the service is running successfully, you can play around through TDengine she
**Note:**
- The `systemctl` command needs the **root** privilege. Use **sudo** if you are not the **root** user.
-- To get better product feedback and improve our solution, TDegnine will collect basic usage information, but you can modify the configuration parameter **telemetryReporting** in the system configuration file taos.cfg, and set it to 0 to turn it off.
-- TDegnine uses FQDN (usually hostname) as the node ID. In order to ensure normal operation, you need to set hostname for the server running taosd, and configure DNS service or hosts file for the machine running client application, to ensure the FQDN can be resolved.
+- To get better product feedback and improve our solution, TDengine will collect basic usage information, but you can modify the configuration parameter **telemetryReporting** in the system configuration file taos.cfg, and set it to 0 to turn it off.
+- TDengine uses FQDN (usually hostname) as the node ID. In order to ensure normal operation, you need to set hostname for the server running taosd, and configure DNS service or hosts file for the machine running client application, to ensure the FQDN can be resolved.
- TDengine supports installation on Linux systems with[ systemd ](https://en.wikipedia.org/wiki/Systemd)as the process service management, and uses `which systemctl` command to detect whether `systemd` packages exist in the system:
```bash
diff --git a/documentation20/en/08.connector/docs.md b/documentation20/en/08.connector/docs.md
index 36dc06a36e..3da9937f98 100644
--- a/documentation20/en/08.connector/docs.md
+++ b/documentation20/en/08.connector/docs.md
@@ -6,7 +6,7 @@ TDengine provides many connectors for development, including C/C++, JAVA, Python
At present, TDengine connectors support a wide range of platforms, including hardware platforms such as X64/X86/ARM64/ARM32/MIPS/Alpha, and development environments such as Linux/Win64/Win32. The comparison matrix is as follows:
-| **CPU** | **X64 64bit** | **X64 64bit** | **X64 64bit** | **X86 32bit** | **ARM64** | **ARM32** | **MIPS Godson** | **Alpha Whenwei** | **X64 TimecomTech** |
+| **CPU** | **X64 64bit** | **X64 64bit** | **X64 64bit** | **X86 32bit** | **ARM64** | **ARM32** | **MIPS Godson** | **Alpha Sunway** | **X64 TimecomTech** |
| ----------- | ------------- | ------------- | ------------- | ------------- | --------- | --------- | --------------- | ----------------- | ------------------- |
| **OS** | **Linux** | **Win64** | **Win32** | **Win32** | **Linux** | **Linux** | **Linux** | **Linux** | **Linux** |
| **C/C++** | ● | ● | ● | ○ | ● | ● | ○ | ○ | ○ |
@@ -75,9 +75,9 @@ Edit the taos.cfg file (default path/etc/taos/taos.cfg) and change firstEP to En
* X64 hardware environment: TDengine-client-2.X.X.X-Windows-x64.exe
* X86 hardware environment: TDengine-client-2.X.X.X-Windows-x86.exe
-**2. Execute installation, select default vales as prompted to complete**
+**2. Execute installation, select default values as prompted to complete**
-**3. Installatino path**
+**3. Installation path**
Default installation path is: C:\TDengine, with following files(directories):
@@ -327,7 +327,7 @@ typedef struct TAOS_BIND {
} TAOS_BIND;
```
-Add the curren bound parameters to the batch. After calling this function, you can call `taos_stmt_bind_param` again to bind the new parameters. It should be noted that this function only supports insert/import statements, and if it is other SQL statements such as select, it will return errors.
+Add the current bound parameters to the batch. After calling this function, you can call `taos_stmt_bind_param` again to bind the new parameters. It should be noted that this function only supports insert/import statements, and if it is other SQL statements such as select, it will return errors.
- `int taos_stmt_execute(TAOS_STMT *stmt)`
@@ -523,7 +523,7 @@ Users can directly view the usage information of the module through Python's hel
Refer to help (taos.TDEngineConnection) in python. This class corresponds to a connection between the client and TDengine. In the scenario of client multithreading, it is recommended that each thread apply for an independent connection instance, but not recommended that multiple threads share a connection.
-- *TDegnineCursor* class
+- *TDengineCursor* class
Refer to help (taos.TDengineCursor) in python. This class corresponds to the write and query operations performed by the client. In the scenario of client multithreading, this cursor instance must be kept exclusive to threads and cannot be used by threads, otherwise errors will occur in the returned results.
@@ -685,7 +685,7 @@ Return value:
}
```
-- Craete a database demo:
+- Create a database demo:
```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'create database demo' 192.168.0.1:6041/rest/sql
@@ -771,7 +771,7 @@ The C # connector supports: Linux 64/Windows x64/Windows x86.
- For application driver installation, please refer to the[ steps of installing connector driver](https://www.taosdata.com/en/documentation/connector#driver).
- . NET interface file TDengineDrivercs.cs and reference sample TDengineTest.cs are both located in the Windows client install_directory/examples/C# directory.
-- On Windows, C # applications can use the native C interface of TDengine to perform all database operations, and future versions will provide the ORM (dapper) framework driver.
+- On Windows, C # applications can use the native C interface of TDengine to perform all database operations, and future versions will provide the ORM (Dapper) framework driver.
### Installation verification
@@ -908,7 +908,7 @@ Use Microsoft [windows-build-tools](https://github.com/felixrieseberg/windows-bu
#### Solution 2
-Mannually install the following tools:
+Manually install the following tools:
- Install Visual Studio related tools: [Visual Studio Build Tools](https://visualstudio.microsoft.com/thank-you-downloading-visual-studio/?sku=BuildTools) or [Visual Studio 2017 Community](https://visualstudio.microsoft.com/pl/thank-you-downloading-visual-studio/?sku=Community)
- Install [Python](https://www.python.org/downloads/) 2.7 (not supported in v3.x.x) and execute npm config set python python2.7
diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml
index a63225ab32..65a8e6b684 100644
--- a/snap/snapcraft.yaml
+++ b/snap/snapcraft.yaml
@@ -1,6 +1,6 @@
name: tdengine
base: core18
-version: '2.1.1.0'
+version: '2.1.3.0'
icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT.
description: |
@@ -72,7 +72,7 @@ parts:
- usr/bin/taosd
- usr/bin/taos
- usr/bin/taosdemo
- - usr/lib/libtaos.so.2.1.1.0
+ - usr/lib/libtaos.so.2.1.3.0
- usr/lib/libtaos.so.1
- usr/lib/libtaos.so
diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h
index 21941d81d4..64ae966e8e 100644
--- a/src/client/inc/tscUtil.h
+++ b/src/client/inc/tscUtil.h
@@ -211,7 +211,8 @@ int32_t createProjectionExpr(SQueryInfo* pQueryInfo, STableMetaInfo* pTableMetaI
void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, bool removeMeta);
SColumn* tscColumnClone(const SColumn* src);
-bool tscColumnExists(SArray* pColumnList, int32_t columnIndex, uint64_t uid);
+void tscColumnCopy(SColumn* pDest, const SColumn* pSrc);
+int32_t tscColumnExists(SArray* pColumnList, int32_t columnId, uint64_t uid);
SColumn* tscColumnListInsert(SArray* pColumnList, int32_t columnIndex, uint64_t uid, SSchema* pSchema);
void tscColumnListDestroy(SArray* pColList);
void tscColumnListCopy(SArray* dst, const SArray* src, uint64_t tableUid);
@@ -341,7 +342,7 @@ char* strdup_throw(const char* str);
bool vgroupInfoIdentical(SNewVgroupInfo *pExisted, SVgroupMsg* src);
SNewVgroupInfo createNewVgroupInfo(SVgroupMsg *pVgroupMsg);
-SCond* tsGetTableFilter(SArray* filters, uint64_t uid);
+STblCond* tsGetTableFilter(SArray* filters, uint64_t uid, int16_t idx);
#ifdef __cplusplus
}
diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c
index bac8920d8f..8bb776ffee 100644
--- a/src/client/src/tscPrepare.c
+++ b/src/client/src/tscPrepare.c
@@ -1156,27 +1156,6 @@ static void insertBatchClean(STscStmt* pStmt) {
tfree(pCmd->insertParam.pTableNameList);
-/*
- STableDataBlocks** p = taosHashIterate(pCmd->insertParam.pTableBlockHashList, NULL);
-
- STableDataBlocks* pOneTableBlock = *p;
-
- while (1) {
- SSubmitBlk* pBlocks = (SSubmitBlk*) pOneTableBlock->pData;
-
- pOneTableBlock->size = sizeof(SSubmitBlk);
-
- pBlocks->numOfRows = 0;
-
- p = taosHashIterate(pCmd->insertParam.pTableBlockHashList, p);
- if (p == NULL) {
- break;
- }
-
- pOneTableBlock = *p;
- }
-*/
-
pCmd->insertParam.pDataBlocks = tscDestroyBlockArrayList(pCmd->insertParam.pDataBlocks);
pCmd->insertParam.numOfTables = 0;
@@ -1499,7 +1478,7 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
pRes->numOfRows = 1;
strtolower(pSql->sqlstr, sql);
- tscDebugL("%p SQL: %s", pSql, pSql->sqlstr);
+ tscDebugL("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr);
if (tscIsInsertData(pSql->sqlstr)) {
pStmt->isInsert = true;
@@ -1604,7 +1583,7 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
if (taosHashGetSize(pCmd->insertParam.pTableBlockHashList) > 0) {
SHashObj* hashList = pCmd->insertParam.pTableBlockHashList;
pCmd->insertParam.pTableBlockHashList = NULL;
- tscResetSqlCmd(pCmd, true);
+ tscResetSqlCmd(pCmd, false);
pCmd->insertParam.pTableBlockHashList = hashList;
}
@@ -1663,7 +1642,7 @@ int taos_stmt_close(TAOS_STMT* stmt) {
} else {
if (pStmt->multiTbInsert) {
taosHashCleanup(pStmt->mtb.pTableHash);
- pStmt->mtb.pTableBlockHashList = tscDestroyBlockHashTable(pStmt->mtb.pTableBlockHashList, true);
+ pStmt->mtb.pTableBlockHashList = tscDestroyBlockHashTable(pStmt->mtb.pTableBlockHashList, false);
taosHashCleanup(pStmt->pSql->cmd.insertParam.pTableBlockHashList);
pStmt->pSql->cmd.insertParam.pTableBlockHashList = NULL;
taosArrayDestroy(pStmt->mtb.tags);
diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c
index 315125d411..830bfa3993 100644
--- a/src/client/src/tscSQLParser.c
+++ b/src/client/src/tscSQLParser.c
@@ -66,7 +66,6 @@ static char* getAccountId(SSqlObj* pSql);
static int convertTimestampStrToInt64(tVariant *pVar, int32_t precision);
static bool serializeExprListToVariant(SArray* pList, tVariant **dest, int16_t colType, uint8_t precision);
-static int32_t validateParamOfRelationIn(tVariant *pVar, int32_t colType);
static bool has(SArray* pFieldList, int32_t startIdx, const char* name);
static char* cloneCurrentDBName(SSqlObj* pSql);
@@ -158,80 +157,79 @@ bool serializeExprListToVariant(SArray* pList, tVariant **dst, int16_t colType,
return ret;
}
- tSqlExprItem* item = (tSqlExprItem *)taosArrayGet(pList, 0);
- int32_t firstTokenType = item->pNode->token.type;
- int32_t type = firstTokenType;
+ tSqlExpr* item = ((tSqlExprItem*)(taosArrayGet(pList, 0)))->pNode;
+ int32_t firstVarType = item->value.nType;
- //nchar to binary and other xxint to bigint
- toTSDBType(type);
- if (colType != TSDB_DATA_TYPE_TIMESTAMP && !IS_UNSIGNED_NUMERIC_TYPE(colType)) {
- if (type != colType && (type != TSDB_DATA_TYPE_BINARY || colType != TSDB_DATA_TYPE_NCHAR)) {
- return false;
- }
- }
- type = colType;
-
SBufferWriter bw = tbufInitWriter( NULL, false);
-
tbufEnsureCapacity(&bw, 512);
+ if (colType == TSDB_DATA_TYPE_TIMESTAMP) {
+ tbufWriteUint32(&bw, TSDB_DATA_TYPE_BIGINT);
+ } else {
+ tbufWriteUint32(&bw, colType);
+ }
+ tbufWriteInt32(&bw, (int32_t)(pList->size));
- int32_t size = (int32_t)(pList->size);
- tbufWriteUint32(&bw, type);
- tbufWriteInt32(&bw, size);
-
- for (int32_t i = 0; i < size; i++) {
+ for (int32_t i = 0; i < (int32_t)pList->size; i++) {
tSqlExpr* pSub = ((tSqlExprItem*)(taosArrayGet(pList, i)))->pNode;
+ tVariant* var = &pSub->value;
// check all the token type in expr list same or not
- if (firstTokenType != pSub->token.type) {
+ if (firstVarType != var->nType) {
break;
}
-
- uint32_t tType = pSub->token.type;
- toTSDBType(pSub->token.type);
-
- tVariant var;
- tVariantCreate(&var, &pSub->token);
- pSub->token.type = tType;
- if (type == TSDB_DATA_TYPE_BOOL || IS_SIGNED_NUMERIC_TYPE(type)) {
- tbufWriteInt64(&bw, var.i64);
- } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) {
- // ugly code, refactor later
- if (IS_UNSIGNED_NUMERIC_TYPE(pSub->token.type) || IS_SIGNED_NUMERIC_TYPE(pSub->token.type)) {
- tbufWriteUint64(&bw, var.i64);
- } else {
- tVariantDestroy(&var);
+ if ((colType == TSDB_DATA_TYPE_BOOL || IS_SIGNED_NUMERIC_TYPE(colType))) {
+ if (var->nType != TSDB_DATA_TYPE_BOOL && !IS_SIGNED_NUMERIC_TYPE(var->nType)) {
break;
}
- } else if (type == TSDB_DATA_TYPE_DOUBLE || type == TSDB_DATA_TYPE_FLOAT) {
- tbufWriteDouble(&bw, var.dKey);
- } else if (type == TSDB_DATA_TYPE_BINARY){
- tbufWriteBinary(&bw, var.pz, var.nLen);
- } else if (type == TSDB_DATA_TYPE_NCHAR) {
- char *buf = (char *)calloc(1, (var.nLen + 1)*TSDB_NCHAR_SIZE);
- if (tVariantDump(&var, buf, type, false) != TSDB_CODE_SUCCESS) {
+ if (colType == TSDB_DATA_TYPE_BOOL && (var->i64 > 1 ||var->i64 < 0)) {
+ break;
+ }
+ tbufWriteInt64(&bw, var->i64);
+ } else if (IS_UNSIGNED_NUMERIC_TYPE(colType)) {
+ if (IS_SIGNED_NUMERIC_TYPE(var->nType) && IS_UNSIGNED_NUMERIC_TYPE(var->nType)) {
+ break;
+ }
+ tbufWriteUint64(&bw, var->u64);
+ } else if (colType == TSDB_DATA_TYPE_DOUBLE || colType == TSDB_DATA_TYPE_FLOAT) {
+ if (IS_SIGNED_NUMERIC_TYPE(var->nType) || IS_UNSIGNED_NUMERIC_TYPE(var->nType)) {
+ tbufWriteDouble(&bw, (double)(var->i64));
+ } else if (var->nType == TSDB_DATA_TYPE_DOUBLE || var->nType == TSDB_DATA_TYPE_FLOAT){
+ tbufWriteDouble(&bw, var->dKey);
+ } else {
+ break;
+ }
+ } else if (colType == TSDB_DATA_TYPE_BINARY) {
+ if (var->nType != TSDB_DATA_TYPE_BINARY) {
+ break;
+ }
+ tbufWriteBinary(&bw, var->pz, var->nLen);
+ } else if (colType == TSDB_DATA_TYPE_NCHAR) {
+ if (var->nType != TSDB_DATA_TYPE_BINARY) {
+ break;
+ }
+ char *buf = (char *)calloc(1, (var->nLen + 1)*TSDB_NCHAR_SIZE);
+ if (tVariantDump(var, buf, colType, false) != TSDB_CODE_SUCCESS) {
free(buf);
- tVariantDestroy(&var);
break;
}
tbufWriteBinary(&bw, buf, twcslen((wchar_t *)buf) * TSDB_NCHAR_SIZE);
free(buf);
- } else if (type == TSDB_DATA_TYPE_TIMESTAMP) {
- if (var.nType == TSDB_DATA_TYPE_BINARY) {
- if (convertTimestampStrToInt64(&var, precision) < 0) {
- tVariantDestroy(&var);
+ } else if (colType == TSDB_DATA_TYPE_TIMESTAMP) {
+ if (var->nType == TSDB_DATA_TYPE_BINARY) {
+ if (convertTimestampStrToInt64(var, precision) < 0) {
break;
}
- tbufWriteInt64(&bw, var.i64);
- } else if (var.nType == TSDB_DATA_TYPE_BIGINT) {
- tbufWriteInt64(&bw, var.i64);
+ tbufWriteInt64(&bw, var->i64);
+ } else if (var->nType == TSDB_DATA_TYPE_BIGINT) {
+ tbufWriteInt64(&bw, var->i64);
+ } else {
+ break;
}
+ } else {
+ break;
}
- tVariantDestroy(&var);
-
- if (i == size - 1) { ret = true;}
- }
-
+ if (i == (int32_t)(pList->size - 1)) { ret = true;}
+ }
if (ret == true) {
if ((*dst = calloc(1, sizeof(tVariant))) != NULL) {
tVariantCreateFromBinary(*dst, tbufGetData(&bw, false), tbufTell(&bw), TSDB_DATA_TYPE_BINARY);
@@ -243,13 +241,6 @@ bool serializeExprListToVariant(SArray* pList, tVariant **dst, int16_t colType,
return ret;
}
-static int32_t validateParamOfRelationIn(tVariant *pVar, int32_t colType) {
- if (pVar->nType != TSDB_DATA_TYPE_BINARY) {
- return -1;
- }
- SBufferReader br = tbufInitReader(pVar->pz, pVar->nLen, false);
- return colType == TSDB_DATA_TYPE_NCHAR ? 0 : (tbufReadUint32(&br) == colType ? 0: -1);
-}
static uint8_t convertOptr(SStrToken *pToken) {
switch (pToken->type) {
@@ -1703,7 +1694,7 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32
// arithmetic expression always return result in the format of double float
pExprInfo->base.resBytes = sizeof(double);
- pExprInfo->base.interBytes = sizeof(double);
+ pExprInfo->base.interBytes = 0;
pExprInfo->base.resType = TSDB_DATA_TYPE_DOUBLE;
pExprInfo->base.functionId = TSDB_FUNC_ARITHM;
@@ -1938,14 +1929,14 @@ SExprInfo* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t colIndex, int32_t tab
index.columnIndex = colIndex;
}
- return tscExprAppend(pQueryInfo, functionId, &index, pSchema->type, pSchema->bytes, colId, pSchema->bytes,
+ return tscExprAppend(pQueryInfo, functionId, &index, pSchema->type, pSchema->bytes, colId, 0,
(functionId == TSDB_FUNC_TAGPRJ));
}
SExprInfo* tscAddFuncInSelectClause(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId,
SColumnIndex* pIndex, SSchema* pColSchema, int16_t flag, int16_t colId) {
SExprInfo* pExpr = tscExprInsert(pQueryInfo, outputColIndex, functionId, pIndex, pColSchema->type,
- pColSchema->bytes, colId, pColSchema->bytes, TSDB_COL_IS_TAG(flag));
+ pColSchema->bytes, colId, 0, TSDB_COL_IS_TAG(flag));
tstrncpy(pExpr->base.aliasName, pColSchema->name, sizeof(pExpr->base.aliasName));
tstrncpy(pExpr->base.token, pColSchema->name, sizeof(pExpr->base.token));
@@ -2100,7 +2091,7 @@ static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SS
type = pSchema->type;
bytes = pSchema->bytes;
}
-
+
SExprInfo* pExpr = tscExprAppend(pQueryInfo, functionID, pColIndex, type, bytes, getNewResColId(pCmd), bytes, false);
tstrncpy(pExpr->base.aliasName, name, tListLen(pExpr->base.aliasName));
@@ -2154,7 +2145,10 @@ void setResultColName(char* name, tSqlExprItem* pItem, int32_t functionId, SStrT
}
static void updateLastScanOrderIfNeeded(SQueryInfo* pQueryInfo) {
- if (pQueryInfo->sessionWindow.gap > 0 || tscGroupbyColumn(pQueryInfo)) {
+ if (pQueryInfo->sessionWindow.gap > 0 ||
+ pQueryInfo->stateWindow ||
+ taosArrayGetSize(pQueryInfo->pUpstream) > 0 ||
+ tscGroupbyColumn(pQueryInfo)) {
size_t numOfExpr = tscNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < numOfExpr; ++i) {
SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
@@ -2169,6 +2163,17 @@ static void updateLastScanOrderIfNeeded(SQueryInfo* pQueryInfo) {
}
}
+static UNUSED_FUNC void updateFunctionInterBuf(SQueryInfo* pQueryInfo, bool superTable) {
+ size_t numOfExpr = tscNumOfExprs(pQueryInfo);
+ for (int32_t i = 0; i < numOfExpr; ++i) {
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
+
+ int32_t param = (int32_t)pExpr->base.param[0].i64;
+ getResultDataInfo(pExpr->base.colType, pExpr->base.colBytes, pExpr->base.functionId, param, &pExpr->base.resType, &pExpr->base.resBytes,
+ &pExpr->base.interBytes, 0, superTable);
+ }
+}
+
int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t colIndex, tSqlExprItem* pItem, bool finalResult) {
STableMetaInfo* pTableMetaInfo = NULL;
int32_t functionId = pItem->pNode->functionId;
@@ -2278,10 +2283,11 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
case TSDB_FUNC_LEASTSQR: {
// 1. valid the number of parameters
int32_t numOfParams = (pItem->pNode->pParam == NULL)? 0: (int32_t) taosArrayGetSize(pItem->pNode->pParam);
+
+ // no parameters or more than one parameter for function
if (pItem->pNode->pParam == NULL ||
(functionId != TSDB_FUNC_LEASTSQR && functionId != TSDB_FUNC_DERIVATIVE && numOfParams != 1) ||
((functionId == TSDB_FUNC_LEASTSQR || functionId == TSDB_FUNC_DERIVATIVE) && numOfParams != 3)) {
- /* no parameters or more than one parameter for function */
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
@@ -2295,14 +2301,15 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
- if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
+ pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
+ STableComInfo info = tscGetTableInfo(pTableMetaInfo->pTableMeta);
+
+ // functions can not be applied to tags
+ if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX || (index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta))) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
// 2. check if sql function can be applied on this column data type
- pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
- STableComInfo info = tscGetTableInfo(pTableMetaInfo->pTableMeta);
-
SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex);
if (!IS_NUMERIC_TYPE(pSchema->type)) {
@@ -2331,11 +2338,6 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
insertResultField(pQueryInfo, 0, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS_DUMMY].name, pExpr);
}
- // functions can not be applied to tags
- if (index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
- }
-
SExprInfo* pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd), intermediateResSize, false);
if (functionId == TSDB_FUNC_LEASTSQR) { // set the leastsquares parameters
@@ -2364,9 +2366,9 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
}
if (info.precision == TSDB_TIME_PRECISION_MILLI) {
- tickPerSec /= 1000000;
+ tickPerSec /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MICRO);
} else if (info.precision == TSDB_TIME_PRECISION_MICRO) {
- tickPerSec /= 1000;
+ tickPerSec /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MILLI);
}
if (tickPerSec <= 0 || tickPerSec < TSDB_TICK_PER_SECOND(info.precision)) {
@@ -2414,7 +2416,9 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
// NOTE: has time range condition or normal column filter condition, the last_row query will be transferred to last query
SConvertFunc cvtFunc = {.originFuncId = functionId, .execFuncId = functionId};
- if (functionId == TSDB_FUNC_LAST_ROW && ((!TSWINDOW_IS_EQUAL(pQueryInfo->window, TSWINDOW_INITIALIZER)) || (hasNormalColumnFilter(pQueryInfo)))) {
+ if (functionId == TSDB_FUNC_LAST_ROW && ((!TSWINDOW_IS_EQUAL(pQueryInfo->window, TSWINDOW_INITIALIZER)) ||
+ (hasNormalColumnFilter(pQueryInfo)) ||
+ taosArrayGetSize(pQueryInfo->pUpstream)>0)) {
cvtFunc.execFuncId = TSDB_FUNC_LAST;
}
@@ -2597,7 +2601,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
// set the first column ts for top/bottom query
SColumnIndex index1 = {index.tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX};
pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS, &index1, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, getNewResColId(pCmd),
- TSDB_KEYSIZE, false);
+ 0, false);
tstrncpy(pExpr->base.aliasName, aAggs[TSDB_FUNC_TS].name, sizeof(pExpr->base.aliasName));
const int32_t TS_COLUMN_INDEX = PRIMARYKEY_TIMESTAMP_COL_INDEX;
@@ -3112,15 +3116,10 @@ bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo)
return true;
}
- if (pQueryInfo->groupbyExpr.numOfGroupCols != 1) {
+ SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, 0);
+ if (pColIndex->colIndex != TSDB_TBNAME_COLUMN_INDEX) {
invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
return true;
- } else {
- SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, 0);
- if (pColIndex->colIndex != TSDB_TBNAME_COLUMN_INDEX) {
- invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
- return true;
- }
}
} else if (tscIsSessionWindowQuery(pQueryInfo)) {
invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
@@ -3370,11 +3369,6 @@ static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo,
if (pRight->tokenId != TK_SET || !serializeExprListToVariant(pRight->pParam, &pVal, colType, timePrecision)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
}
- if (validateParamOfRelationIn(pVal, colType) != TSDB_CODE_SUCCESS) {
- tVariantDestroy(pVal);
- free(pVal);
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
- }
pColumnFilter->pz = (int64_t)calloc(1, pVal->nLen + 1);
pColumnFilter->len = pVal->nLen;
pColumnFilter->filterstr = 1;
@@ -3639,8 +3633,9 @@ static int32_t getColQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlEx
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, i);
int64_t uid = pTableMetaInfo->pTableMeta->id.uid;
- SCond cond = {
+ STblCond cond = {
.uid = uid,
+ .idx = i,
.len = (int32_t)(tbufTell(&bw)),
.cond = tbufGetData(&bw, true)
};
@@ -3727,7 +3722,7 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
index.columnIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
- if (!tscColumnExists(pTableMetaInfo->tagColList, index.columnIndex, pTableMetaInfo->pTableMeta->id.uid)) {
+ if (tscColumnExists(pTableMetaInfo->tagColList, pTagSchema1->colId, pTableMetaInfo->pTableMeta->id.uid) < 0) {
tscColumnListInsert(pTableMetaInfo->tagColList, index.columnIndex, pTableMeta->id.uid, pTagSchema1);
if (taosArrayGetSize(pTableMetaInfo->tagColList) > 1) {
@@ -3759,7 +3754,7 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
index.columnIndex = index.columnIndex - tscGetNumOfColumns(pTableMeta);
- if (!tscColumnExists(pTableMetaInfo->tagColList, index.columnIndex, pTableMeta->id.uid)) {
+ if (tscColumnExists(pTableMetaInfo->tagColList, pTagSchema2->colId, pTableMeta->id.uid) < 0) {
tscColumnListInsert(pTableMetaInfo->tagColList, index.columnIndex, pTableMeta->id.uid, pTagSchema2);
if (taosArrayGetSize(pTableMetaInfo->tagColList) > 1) {
@@ -4128,7 +4123,7 @@ static int32_t validateLikeExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_t
}
static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SCondExpr* pCondExpr,
- int32_t* type, int32_t parentOptr, tSqlExpr** columnExpr, tSqlExpr** tsExpr) {
+ int32_t* type, int32_t* tbIdx, int32_t parentOptr, tSqlExpr** columnExpr, tSqlExpr** tsExpr) {
const char* msg1 = "table query cannot use tags filter";
const char* msg2 = "illegal column name";
const char* msg4 = "too many join tables";
@@ -4146,6 +4141,8 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
+ *tbIdx = index.tableIndex;
+
assert(tSqlExprIsParentOfLeaf(*pExpr));
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
@@ -4284,7 +4281,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql
}
int32_t getQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SCondExpr* pCondExpr,
- int32_t* type, int32_t parentOptr, tSqlExpr** columnExpr, tSqlExpr** tsExpr) {
+ int32_t* type, int32_t* tbIdx, int32_t parentOptr, tSqlExpr** columnExpr, tSqlExpr** tsExpr) {
if (pExpr == NULL) {
return TSDB_CODE_SUCCESS;
}
@@ -4297,6 +4294,7 @@ int32_t getQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr** pExpr
int32_t ret = 0;
const char* msg1 = "query condition between columns and tags and timestamp must use 'AND'";
+ const char* msg2 = "query condition between tables must use 'AND'";
if ((*pExpr)->flags & (1 << EXPR_FLAG_TS_ERROR)) {
return TSDB_CODE_TSC_INVALID_OPERATION;
@@ -4311,14 +4309,16 @@ int32_t getQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr** pExpr
int32_t leftType = 0;
int32_t rightType = 0;
+ int32_t leftTbIdx = 0;
+ int32_t rightTbIdx = 0;
if (!tSqlExprIsParentOfLeaf(*pExpr)) {
- ret = getQueryCondExpr(pCmd, pQueryInfo, &(*pExpr)->pLeft, pCondExpr, &leftType, (*pExpr)->tokenId, &columnLeft, &tsLeft);
+ ret = getQueryCondExpr(pCmd, pQueryInfo, &(*pExpr)->pLeft, pCondExpr, &leftType, &leftTbIdx, (*pExpr)->tokenId, &columnLeft, &tsLeft);
if (ret != TSDB_CODE_SUCCESS) {
goto err_ret;
}
- ret = getQueryCondExpr(pCmd, pQueryInfo, &(*pExpr)->pRight, pCondExpr, &rightType, (*pExpr)->tokenId, &columnRight, &tsRight);
+ ret = getQueryCondExpr(pCmd, pQueryInfo, &(*pExpr)->pRight, pCondExpr, &rightType, &rightTbIdx, (*pExpr)->tokenId, &columnRight, &tsRight);
if (ret != TSDB_CODE_SUCCESS) {
goto err_ret;
}
@@ -4332,6 +4332,11 @@ int32_t getQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr** pExpr
goto err_ret;
}
+ if (((leftTbIdx != rightTbIdx) || (leftTbIdx == -1 || rightTbIdx == -1)) && ((*pExpr)->tokenId == TK_OR)) {
+ ret = invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ goto err_ret;
+ }
+
if (columnLeft && columnRight) {
setNormalExprToCond(&columnLeft, columnRight, (*pExpr)->tokenId);
@@ -4349,6 +4354,7 @@ int32_t getQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr** pExpr
}
*type = leftType|rightType;
+ *tbIdx = (leftTbIdx == rightTbIdx) ? leftTbIdx : -1;
return TSDB_CODE_SUCCESS;
}
@@ -4365,7 +4371,7 @@ int32_t getQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr** pExpr
goto err_ret;
}
- ret = handleExprInQueryCond(pCmd, pQueryInfo, pExpr, pCondExpr, type, parentOptr, columnExpr, tsExpr);
+ ret = handleExprInQueryCond(pCmd, pQueryInfo, pExpr, pCondExpr, type, tbIdx, parentOptr, columnExpr, tsExpr);
if (ret) {
goto err_ret;
}
@@ -4380,6 +4386,11 @@ err_ret:
}
static void doExtractExprForSTable(SSqlCmd* pCmd, tSqlExpr** pExpr, SQueryInfo* pQueryInfo, tSqlExpr** pOut, int32_t tableIndex) {
+ if (*pExpr == NULL) {
+ *pOut = NULL;
+ return;
+ }
+
if (tSqlExprIsParentOfLeaf(*pExpr)) {
tSqlExpr* pLeft = (*pExpr)->pLeft;
@@ -4944,7 +4955,8 @@ int32_t validateWhereNode(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSq
}
int32_t type = 0;
- if ((ret = getQueryCondExpr(&pSql->cmd, pQueryInfo, pExpr, &condExpr, &type, (*pExpr)->tokenId, &condExpr.pColumnCond, &condExpr.pTimewindow)) != TSDB_CODE_SUCCESS) {
+ int32_t tbIdx = 0;
+ if ((ret = getQueryCondExpr(&pSql->cmd, pQueryInfo, pExpr, &condExpr, &type, &tbIdx, (*pExpr)->tokenId, &condExpr.pColumnCond, &condExpr.pTimewindow)) != TSDB_CODE_SUCCESS) {
return ret;
}
@@ -4983,6 +4995,14 @@ int32_t validateWhereNode(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSq
goto PARSE_WHERE_EXIT;
}
+/*
+ if (taosArrayGetSize(pQueryInfo->pUpstream) > 0 ) {
+ if ((ret = getColumnQueryCondInfo(&pSql->cmd, pQueryInfo, condExpr.pTimewindow, TK_AND)) != TSDB_CODE_SUCCESS) {
+ goto PARSE_WHERE_EXIT;
+ }
+ }
+*/
+
if ((ret = getColQueryCondExpr(&pSql->cmd, pQueryInfo, &condExpr.pColumnCond)) != TSDB_CODE_SUCCESS) {
goto PARSE_WHERE_EXIT;
}
@@ -7948,7 +7968,8 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
const char* msg5 = "only tag query not compatible with normal column filter";
const char* msg6 = "not support stddev/percentile in outer query yet";
const char* msg7 = "drivative requires timestamp column exists in subquery";
-
+ const char* msg8 = "condition missing for join query";
+
int32_t code = TSDB_CODE_SUCCESS;
SSqlCmd* pCmd = &pSql->cmd;
@@ -7983,17 +8004,29 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
return code;
}
}
+
+ int32_t timeWindowQuery =
+ (TPARSER_HAS_TOKEN(pSqlNode->interval.interval) || TPARSER_HAS_TOKEN(pSqlNode->sessionVal.gap));
- if (validateSelectNodeList(pCmd, pQueryInfo, pSqlNode->pSelNodeList, false, false, false) != TSDB_CODE_SUCCESS) {
+ if (validateSelectNodeList(pCmd, pQueryInfo, pSqlNode->pSelNodeList, false, false, timeWindowQuery) != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+ // parse the window_state
+ if (validateStateWindowNode(pCmd, pQueryInfo, pSqlNode, false) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
-
// todo NOT support yet
for(int32_t i = 0; i < tscNumOfExprs(pQueryInfo); ++i) {
SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
int32_t f = pExpr->base.functionId;
if (f == TSDB_FUNC_STDDEV || f == TSDB_FUNC_PERCT) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
+ }
+
+ if ((timeWindowQuery || pQueryInfo->stateWindow) && f == TSDB_FUNC_LAST) {
+ pExpr->base.numOfParams = 1;
+ pExpr->base.param[0].i64 = TSDB_ORDER_ASC;
+ pExpr->base.param[0].nType = TSDB_DATA_TYPE_INT;
}
}
@@ -8013,10 +8046,9 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
if (validateWhereNode(pQueryInfo, &pSqlNode->pWhere, pSql) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
-
- if (pTableMeta->tableInfo.precision == TSDB_TIME_PRECISION_MILLI) {
- pQueryInfo->window.skey = pQueryInfo->window.skey / 1000;
- pQueryInfo->window.ekey = pQueryInfo->window.ekey / 1000;
+ } else {
+ if (pQueryInfo->numOfTables > 1) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg8);
}
}
@@ -8024,7 +8056,10 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
if (validateIntervalNode(pSql, pQueryInfo, pSqlNode) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
} else {
- if (isTimeWindowQuery(pQueryInfo) || pQueryInfo->sessionWindow.gap > 0) {
+ if (validateSessionNode(pCmd, pQueryInfo, pSqlNode) != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+ if (isTimeWindowQuery(pQueryInfo)) {
// check if the first column of the nest query result is timestamp column
SColumn* pCol = taosArrayGetP(pQueryInfo->colList, 0);
if (pCol->info.type != TSDB_DATA_TYPE_TIMESTAMP) {
@@ -8045,6 +8080,9 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
if ((code = doFunctionsCompatibleCheck(pCmd, pQueryInfo, tscGetErrorMsgPayload(pCmd))) != TSDB_CODE_SUCCESS) {
return code;
}
+
+// updateFunctionInterBuf(pQueryInfo, false);
+ updateLastScanOrderIfNeeded(pQueryInfo);
} else {
pQueryInfo->command = TSDB_SQL_SELECT;
@@ -8129,8 +8167,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
return TSDB_CODE_TSC_INVALID_OPERATION;
}
- if ((isTimeWindowQuery(pQueryInfo) || pQueryInfo->sessionWindow.gap > 0) &&
- (validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS)) {
+ if (isTimeWindowQuery(pQueryInfo) && (validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS)) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
@@ -8169,6 +8206,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
updateLastScanOrderIfNeeded(pQueryInfo);
tscFieldInfoUpdateOffset(pQueryInfo);
+// updateFunctionInterBuf(pQueryInfo, isSTable);
if ((code = validateFillNode(pCmd, pQueryInfo, pSqlNode)) != TSDB_CODE_SUCCESS) {
return code;
@@ -8210,6 +8248,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pSqlExpr, SQueryInfo* pQueryInfo, SArray* pCols, uint64_t *uid) {
tExprNode* pLeft = NULL;
tExprNode* pRight= NULL;
+ SColumnIndex index = COLUMN_INDEX_INITIALIZER;
if (pSqlExpr->pLeft != NULL) {
int32_t ret = exprTreeFromSqlExpr(pCmd, &pLeft, pSqlExpr->pLeft, pQueryInfo, pCols, uid);
@@ -8277,13 +8316,13 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS
}
}
} else if (pSqlExpr->type == SQL_NODE_TABLE_COLUMN) { // column name, normal column arithmetic expression
- SColumnIndex index = COLUMN_INDEX_INITIALIZER;
int32_t ret = getColumnIndexByName(pCmd, &pSqlExpr->colInfo, pQueryInfo, &index);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
- STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta;
+ pQueryInfo->curTableIdx = index.tableIndex;
+ STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, index.tableIndex)->pTableMeta;
int32_t numOfColumns = tscGetNumOfColumns(pTableMeta);
*pExpr = calloc(1, sizeof(tExprNode));
@@ -8306,7 +8345,7 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS
return TSDB_CODE_SUCCESS;
} else if (pSqlExpr->tokenId == TK_SET) {
int32_t colType = -1;
- STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta;
+ STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, pQueryInfo->curTableIdx)->pTableMeta;
size_t colSize = taosArrayGetSize(pCols);
if (pCols != NULL && colSize > 0) {
SColIndex* idx = taosArrayGet(pCols, colSize - 1);
@@ -8321,7 +8360,7 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS
} else if (colType == TSDB_DATA_TYPE_FLOAT || colType == TSDB_DATA_TYPE_DOUBLE) {
colType = TSDB_DATA_TYPE_DOUBLE;
}
- STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
+ STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pQueryInfo->curTableIdx);
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
if (serializeExprListToVariant(pSqlExpr->pParam, &pVal, colType, tinfo.precision) == false) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "not support filter expression");
diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c
index 45e131d8a5..0bb56587e7 100644
--- a/src/client/src/tscServer.c
+++ b/src/client/src/tscServer.c
@@ -795,6 +795,7 @@ static int32_t serializeSqlExpr(SSqlExpr* pExpr, STableMetaInfo* pTableMetaInfo,
pSqlExpr->colBytes = htons(pExpr->colBytes);
pSqlExpr->resType = htons(pExpr->resType);
pSqlExpr->resBytes = htons(pExpr->resBytes);
+ pSqlExpr->interBytes = htonl(pExpr->interBytes);
pSqlExpr->functionId = htons(pExpr->functionId);
pSqlExpr->numOfParams = htons(pExpr->numOfParams);
pSqlExpr->resColId = htons(pExpr->resColId);
@@ -915,7 +916,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
}
if (pQueryInfo->colCond && taosArrayGetSize(pQueryInfo->colCond) > 0 && !onlyQueryTags(&query) ) {
- SCond *pCond = tsGetTableFilter(pQueryInfo->colCond, pTableMeta->id.uid);
+ STblCond *pCond = tsGetTableFilter(pQueryInfo->colCond, pTableMeta->id.uid, 0);
if (pCond != NULL && pCond->cond != NULL) {
pQueryMsg->colCondLen = htons(pCond->len);
memcpy(pMsg, pCond->cond, pCond->len);
@@ -1506,7 +1507,9 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pMsg = (char *)pSchema;
pAlterTableMsg->tagValLen = htonl(pAlterInfo->tagData.dataLen);
- memcpy(pMsg, pAlterInfo->tagData.data, pAlterInfo->tagData.dataLen);
+ if (pAlterInfo->tagData.dataLen > 0) {
+ memcpy(pMsg, pAlterInfo->tagData.data, pAlterInfo->tagData.dataLen);
+ }
pMsg += pAlterInfo->tagData.dataLen;
msgLen = (int32_t)(pMsg - (char*)pAlterTableMsg);
diff --git a/src/client/src/tscSub.c b/src/client/src/tscSub.c
index ef46b4068e..52ba424fa5 100644
--- a/src/client/src/tscSub.c
+++ b/src/client/src/tscSub.c
@@ -512,6 +512,7 @@ TAOS_RES *taos_consume(TAOS_SUB *tsub) {
pSub->pSql = pSql;
pSql->pSubscription = pSub;
+ pSub->lastSyncTime = 0;
// no table list now, force to update it
tscDebug("begin table synchronization");
diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c
index 06809a6406..b57664061a 100644
--- a/src/client/src/tscSubquery.c
+++ b/src/client/src/tscSubquery.c
@@ -103,13 +103,6 @@ bool subAndCheckDone(SSqlObj *pSql, SSqlObj *pParentSql, int idx) {
pthread_mutex_lock(&subState->mutex);
-// bool done = allSubqueryDone(pParentSql);
-// if (done) {
-// tscDebug("0x%"PRIx64" subquery:0x%"PRIx64",%d all subs already done", pParentSql->self, pSql->self, idx);
-// pthread_mutex_unlock(&subState->mutex);
-// return false;
-// }
-
tscDebug("0x%"PRIx64" subquery:0x%"PRIx64", index:%d state set to 1", pParentSql->self, pSql->self, idx);
subState->states[idx] = 1;
@@ -2398,8 +2391,14 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) {
SColumn *pCol = taosArrayGetP(pColList, i);
if (pCol->info.flist.numOfFilters > 0) { // copy to the pNew->cmd.colList if it is filtered.
- SColumn *p = tscColumnClone(pCol);
- taosArrayPush(pNewQueryInfo->colList, &p);
+ int32_t index1 = tscColumnExists(pNewQueryInfo->colList, pCol->columnIndex, pCol->tableUid);
+ if (index1 >= 0) {
+ SColumn* x = taosArrayGetP(pNewQueryInfo->colList, index1);
+ tscColumnCopy(x, pCol);
+ } else {
+ SColumn *p = tscColumnClone(pCol);
+ taosArrayPush(pNewQueryInfo->colList, &p);
+ }
}
}
diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c
index ac6fc62adb..8b1d1889a8 100644
--- a/src/client/src/tscSystem.c
+++ b/src/client/src/tscSystem.c
@@ -45,6 +45,7 @@ int32_t tscNumOfObj = 0; // number of sqlObj in current process.
static void *tscCheckDiskUsageTmr;
void *tscRpcCache; // cache to keep rpc obj
int32_t tscNumOfThreads = 1; // num of rpc threads
+char tscLogFileName[12] = "taoslog";
static pthread_mutex_t rpcObjMutex; // mutex to protect open the rpc obj concurrently
static pthread_once_t tscinit = PTHREAD_ONCE_INIT;
static volatile int tscInitRes = 0;
@@ -132,7 +133,7 @@ void taos_init_imp(void) {
printf("failed to create log dir:%s\n", tsLogDir);
}
- sprintf(temp, "%s/taoslog", tsLogDir);
+ sprintf(temp, "%s/%s", tsLogDir, tscLogFileName);
if (taosInitLog(temp, tsNumOfLogLines, 10) < 0) {
printf("failed to open log file in directory:%s\n", tsLogDir);
}
diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c
index d5d5292e2b..a6a25269fc 100644
--- a/src/client/src/tscUtil.c
+++ b/src/client/src/tscUtil.c
@@ -118,16 +118,16 @@ SCond* tsGetSTableQueryCond(STagCond* pTagCond, uint64_t uid) {
return NULL;
}
-SCond* tsGetTableFilter(SArray* filters, uint64_t uid) {
+STblCond* tsGetTableFilter(SArray* filters, uint64_t uid, int16_t idx) {
if (filters == NULL) {
return NULL;
}
size_t size = taosArrayGetSize(filters);
for (int32_t i = 0; i < size; ++i) {
- SCond* cond = taosArrayGet(filters, i);
+ STblCond* cond = taosArrayGet(filters, i);
- if (uid == cond->uid) {
+ if (uid == cond->uid && (idx >= 0 && cond->idx == idx)) {
return cond;
}
}
@@ -743,8 +743,7 @@ typedef struct SDummyInputInfo {
SSDataBlock *block;
STableQueryInfo *pTableQueryInfo;
SSqlObj *pSql; // refactor: remove it
- int32_t numOfFilterCols;
- SSingleColumnFilterInfo *pFilterInfo;
+ SFilterInfo *pFilterInfo;
} SDummyInputInfo;
typedef struct SJoinStatus {
@@ -760,7 +759,7 @@ typedef struct SJoinOperatorInfo {
SRspResultInfo resultInfo; // todo refactor, add this info for each operator
} SJoinOperatorInfo;
-static void doSetupSDataBlock(SSqlRes* pRes, SSDataBlock* pBlock, SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols) {
+static void doSetupSDataBlock(SSqlRes* pRes, SSDataBlock* pBlock, SFilterInfo* pFilterInfo) {
int32_t offset = 0;
char* pData = pRes->data;
@@ -776,10 +775,12 @@ static void doSetupSDataBlock(SSqlRes* pRes, SSDataBlock* pBlock, SSingleColumnF
}
// filter data if needed
- if (numOfFilterCols > 0) {
- doSetFilterColumnInfo(pFilterInfo, numOfFilterCols, pBlock);
+ if (pFilterInfo) {
+ //doSetFilterColumnInfo(pFilterInfo, numOfFilterCols, pBlock);
+ doSetFilterColInfo(pFilterInfo, pBlock);
int8_t* p = calloc(pBlock->info.rows, sizeof(int8_t));
- bool all = doFilterDataBlock(pFilterInfo, numOfFilterCols, pBlock->info.rows, p);
+ //bool all = doFilterDataBlock(pFilterInfo, numOfFilterCols, pBlock->info.rows, p);
+ bool all = filterExecute(pFilterInfo, pBlock->info.rows, p);
if (!all) {
doCompactSDataBlock(pBlock, pBlock->info.rows, p);
}
@@ -816,7 +817,7 @@ SSDataBlock* doGetDataBlock(void* param, bool* newgroup) {
pBlock->info.rows = pRes->numOfRows;
if (pRes->numOfRows != 0) {
- doSetupSDataBlock(pRes, pBlock, pInput->pFilterInfo, pInput->numOfFilterCols);
+ doSetupSDataBlock(pRes, pBlock, pInput->pFilterInfo);
*newgroup = false;
return pBlock;
}
@@ -831,7 +832,7 @@ SSDataBlock* doGetDataBlock(void* param, bool* newgroup) {
}
pBlock->info.rows = pRes->numOfRows;
- doSetupSDataBlock(pRes, pBlock, pInput->pFilterInfo, pInput->numOfFilterCols);
+ doSetupSDataBlock(pRes, pBlock, pInput->pFilterInfo);
*newgroup = false;
return pBlock;
}
@@ -871,25 +872,40 @@ SSDataBlock* doDataBlockJoin(void* param, bool* newgroup) {
if (pOperator->status == OP_EXEC_DONE) {
return pJoinInfo->pRes;
}
-
+
SJoinStatus* st0 = &pJoinInfo->status[0];
SColumnInfoData* p0 = taosArrayGet(st0->pBlock->pDataBlock, 0);
int64_t* ts0 = (int64_t*) p0->pData;
+ if (st0->index >= st0->pBlock->info.rows) {
+ continue;
+ }
+
bool prefixEqual = true;
while(1) {
prefixEqual = true;
for (int32_t i = 1; i < pJoinInfo->numOfUpstream; ++i) {
SJoinStatus* st = &pJoinInfo->status[i];
+ ts0 = (int64_t*) p0->pData;
SColumnInfoData* p = taosArrayGet(st->pBlock->pDataBlock, 0);
int64_t* ts = (int64_t*)p->pData;
+ if (st->index >= st->pBlock->info.rows || st0->index >= st0->pBlock->info.rows) {
+ fetchNextBlockIfCompleted(pOperator, newgroup);
+ if (pOperator->status == OP_EXEC_DONE) {
+ return pJoinInfo->pRes;
+ }
+
+ prefixEqual = false;
+ break;
+ }
+
if (ts[st->index] < ts0[st0->index]) { // less than the first
prefixEqual = false;
- if ((++(st->index)) >= st->pBlock->info.rows) {
+ if ((++(st->index)) >= st->pBlock->info.rows) {
fetchNextBlockIfCompleted(pOperator, newgroup);
if (pOperator->status == OP_EXEC_DONE) {
return pJoinInfo->pRes;
@@ -1009,15 +1025,14 @@ static void destroyDummyInputOperator(void* param, int32_t numOfOutput) {
}
// todo this operator servers as the adapter for Operator tree and SqlRes result, remove it later
-SOperatorInfo* createDummyInputOperator(SSqlObj* pSql, SSchema* pSchema, int32_t numOfCols, SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols) {
+SOperatorInfo* createDummyInputOperator(SSqlObj* pSql, SSchema* pSchema, int32_t numOfCols, SFilterInfo* pFilters) {
assert(numOfCols > 0);
STimeWindow win = {.skey = INT64_MIN, .ekey = INT64_MAX};
SDummyInputInfo* pInfo = calloc(1, sizeof(SDummyInputInfo));
pInfo->pSql = pSql;
- pInfo->pFilterInfo = pFilterInfo;
- pInfo->numOfFilterCols = numOfFilterCols;
+ pInfo->pFilterInfo = pFilters;
pInfo->pTableQueryInfo = createTmpTableQueryInfo(win);
pInfo->block = calloc(numOfCols, sizeof(SSDataBlock));
@@ -1105,6 +1120,7 @@ void convertQueryResult(SSqlRes* pRes, SQueryInfo* pQueryInfo, uint64_t objId) {
pRes->completed = (pRes->numOfRows == 0);
}
+/*
static void createInputDataFilterInfo(SQueryInfo* px, int32_t numOfCol1, int32_t* numOfFilterCols, SSingleColumnFilterInfo** pFilterInfo) {
SColumnInfo* tableCols = calloc(numOfCol1, sizeof(SColumnInfo));
for(int32_t i = 0; i < numOfCol1; ++i) {
@@ -1122,6 +1138,7 @@ static void createInputDataFilterInfo(SQueryInfo* px, int32_t numOfCol1, int32_t
tfree(tableCols);
}
+*/
void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQueryInfo* px, SSqlObj* pSql) {
SSqlRes* pOutput = &pSql->res;
@@ -1150,11 +1167,17 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue
// if it is a join query, create join operator here
int32_t numOfCol1 = pTableMeta->tableInfo.numOfColumns;
- int32_t numOfFilterCols = 0;
- SSingleColumnFilterInfo* pFilterInfo = NULL;
- createInputDataFilterInfo(px, numOfCol1, &numOfFilterCols, &pFilterInfo);
+ SFilterInfo *pFilters = NULL;
+ STblCond *pCond = NULL;
+ if (px->colCond) {
+ pCond = tsGetTableFilter(px->colCond, pTableMeta->id.uid, 0);
+ if (pCond && pCond->cond) {
+ createQueryFilter(pCond->cond, pCond->len, &pFilters);
+ }
+ //createInputDataFlterInfo(px, numOfCol1, &numOfFilterCols, &pFilterInfo);
+ }
- SOperatorInfo* pSourceOperator = createDummyInputOperator(pSqlObjList[0], pSchema, numOfCol1, pFilterInfo, numOfFilterCols);
+ SOperatorInfo* pSourceOperator = createDummyInputOperator(pSqlObjList[0], pSchema, numOfCol1, pFilters);
pOutput->precision = pSqlObjList[0]->res.precision;
@@ -1171,15 +1194,21 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue
for(int32_t i = 1; i < px->numOfTables; ++i) {
STableMeta* pTableMeta1 = tscGetMetaInfo(px, i)->pTableMeta;
+ numOfCol1 = pTableMeta1->tableInfo.numOfColumns;
+ SFilterInfo *pFilters1 = NULL;
SSchema* pSchema1 = tscGetTableSchema(pTableMeta1);
int32_t n = pTableMeta1->tableInfo.numOfColumns;
- int32_t numOfFilterCols1 = 0;
- SSingleColumnFilterInfo* pFilterInfo1 = NULL;
- createInputDataFilterInfo(px, numOfCol1, &numOfFilterCols1, &pFilterInfo1);
+ if (px->colCond) {
+ pCond = tsGetTableFilter(px->colCond, pTableMeta1->id.uid, i);
+ if (pCond && pCond->cond) {
+ createQueryFilter(pCond->cond, pCond->len, &pFilters1);
+ }
+ //createInputDataFilterInfo(px, numOfCol1, &numOfFilterCols1, &pFilterInfo1);
+ }
- p[i] = createDummyInputOperator(pSqlObjList[i], pSchema1, n, pFilterInfo1, numOfFilterCols1);
+ p[i] = createDummyInputOperator(pSqlObjList[i], pSchema1, n, pFilters1);
memcpy(&schema[offset], pSchema1, n * sizeof(SSchema));
offset += n;
}
@@ -1322,7 +1351,7 @@ void tscResetSqlCmd(SSqlCmd* pCmd, bool clearCachedMeta) {
if (pCmd->pTableMetaMap != NULL) {
STableMetaVgroupInfo* p = taosHashIterate(pCmd->pTableMetaMap, NULL);
while (p) {
- tfree(p->pVgroupInfo);
+ tscVgroupInfoClear(p->pVgroupInfo);
tfree(p->pTableMeta);
p = taosHashIterate(pCmd->pTableMetaMap, p);
}
@@ -1350,7 +1379,7 @@ void tscFreeSubobj(SSqlObj* pSql) {
tscDebug("0x%"PRIx64" start to free sub SqlObj, numOfSub:%d", pSql->self, pSql->subState.numOfSub);
for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
- tscDebug("0x%"PRIx64" free sub SqlObj:%p, index:%d", pSql->self, pSql->pSubs[i], i);
+ tscDebug("0x%"PRIx64" free sub SqlObj:0x%"PRIx64", index:%d", pSql->self, pSql->pSubs[i]->self, i);
taos_free_result(pSql->pSubs[i]);
pSql->pSubs[i] = NULL;
}
@@ -1802,7 +1831,7 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl
tscSortRemoveDataBlockDupRows(pOneTableBlock);
char* ekey = (char*)pBlocks->data + pOneTableBlock->rowSize*(pBlocks->numOfRows-1);
- tscDebug("0x%"PRIx64" name:%s, name:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pInsertParam->objectId, tNameGetTableName(&pOneTableBlock->tableName),
+ tscDebug("0x%"PRIx64" name:%s, tid:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pInsertParam->objectId, tNameGetTableName(&pOneTableBlock->tableName),
pBlocks->tid, pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->data), GET_INT64_VAL(ekey));
int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
@@ -2293,18 +2322,14 @@ int32_t tscExprCopyAll(SArray* dst, const SArray* src, bool deepcopy) {
return 0;
}
-bool tscColumnExists(SArray* pColumnList, int32_t columnIndex, uint64_t uid) {
- // ignore the tbname columnIndex to be inserted into source list
- if (columnIndex < 0) {
- return false;
- }
-
+// ignore the tbname columnIndex to be inserted into source list
+int32_t tscColumnExists(SArray* pColumnList, int32_t columnId, uint64_t uid) {
size_t numOfCols = taosArrayGetSize(pColumnList);
int32_t i = 0;
while (i < numOfCols) {
SColumn* pCol = taosArrayGetP(pColumnList, i);
- if ((pCol->columnIndex != columnIndex) || (pCol->tableUid != uid)) {
+ if ((pCol->info.colId != columnId) || (pCol->tableUid != uid)) {
++i;
continue;
} else {
@@ -2313,10 +2338,10 @@ bool tscColumnExists(SArray* pColumnList, int32_t columnIndex, uint64_t uid) {
}
if (i >= numOfCols || numOfCols == 0) {
- return false;
+ return -1;
}
- return true;
+ return i;
}
void tscExprAssign(SExprInfo* dst, const SExprInfo* src) {
@@ -2402,13 +2427,7 @@ SColumn* tscColumnClone(const SColumn* src) {
return NULL;
}
- dst->columnIndex = src->columnIndex;
- dst->tableUid = src->tableUid;
- dst->info.flist.numOfFilters = src->info.flist.numOfFilters;
- dst->info.flist.filterInfo = tFilterInfoDup(src->info.flist.filterInfo, src->info.flist.numOfFilters);
- dst->info.type = src->info.type;
- dst->info.colId = src->info.colId;
- dst->info.bytes = src->info.bytes;
+ tscColumnCopy(dst, src);
return dst;
}
@@ -2417,6 +2436,18 @@ static void tscColumnDestroy(SColumn* pCol) {
free(pCol);
}
+void tscColumnCopy(SColumn* pDest, const SColumn* pSrc) {
+ destroyFilterInfo(&pDest->info.flist);
+
+ pDest->columnIndex = pSrc->columnIndex;
+ pDest->tableUid = pSrc->tableUid;
+ pDest->info.flist.numOfFilters = pSrc->info.flist.numOfFilters;
+ pDest->info.flist.filterInfo = tFilterInfoDup(pSrc->info.flist.filterInfo, pSrc->info.flist.numOfFilters);
+ pDest->info.type = pSrc->info.type;
+ pDest->info.colId = pSrc->info.colId;
+ pDest->info.bytes = pSrc->info.bytes;
+}
+
void tscColumnListCopy(SArray* dst, const SArray* src, uint64_t tableUid) {
assert(src != NULL && dst != NULL);
@@ -2717,11 +2748,12 @@ int32_t tscColCondCopy(SArray** dest, const SArray* src) {
*dest = taosArrayInit(s, sizeof(SCond));
for (int32_t i = 0; i < s; ++i) {
- SCond* pCond = taosArrayGet(src, i);
+ STblCond* pCond = taosArrayGet(src, i);
- SCond c = {0};
+ STblCond c = {0};
c.len = pCond->len;
c.uid = pCond->uid;
+ c.idx = pCond->idx;
if (pCond->len > 0) {
assert(pCond->cond != NULL);
@@ -2746,7 +2778,7 @@ void tscColCondRelease(SArray** pCond) {
size_t s = taosArrayGetSize(*pCond);
for (int32_t i = 0; i < s; ++i) {
- SCond* p = taosArrayGet(*pCond, i);
+ STblCond* p = taosArrayGet(*pCond, i);
tfree(p->cond);
}
@@ -4157,7 +4189,10 @@ SVgroupsInfo* tscVgroupsInfoDup(SVgroupsInfo* pVgroupsInfo) {
size_t size = sizeof(SVgroupInfo) * pVgroupsInfo->numOfVgroups + sizeof(SVgroupsInfo);
SVgroupsInfo* pInfo = calloc(1, size);
- memcpy(pInfo, pVgroupsInfo, size);
+ pInfo->numOfVgroups = pVgroupsInfo->numOfVgroups;
+ for (int32_t m = 0; m < pVgroupsInfo->numOfVgroups; ++m) {
+ tscSVgroupInfoCopy(&pInfo->vgroups[m], &pVgroupsInfo->vgroups[m]);
+ }
return pInfo;
}
diff --git a/src/client/tests/timeParseTest.cpp b/src/client/tests/timeParseTest.cpp
index 692398e3b7..3cc6d541e0 100644
--- a/src/client/tests/timeParseTest.cpp
+++ b/src/client/tests/timeParseTest.cpp
@@ -98,7 +98,7 @@ TEST(testCase, parse_time) {
taosParseTime(t41, &time, strlen(t41), TSDB_TIME_PRECISION_MILLI, 0);
EXPECT_EQ(time, 852048000999);
-// int64_t k = timezone;
+ // int64_t k = timezone;
char t42[] = "1997-1-1T0:0:0.999999999Z";
taosParseTime(t42, &time, strlen(t42), TSDB_TIME_PRECISION_MILLI, 0);
EXPECT_EQ(time, 852048000999 - timezone * MILLISECOND_PER_SECOND);
diff --git a/src/connector/go b/src/connector/go
index 7a26c432f8..b8f76da4a7 160000
--- a/src/connector/go
+++ b/src/connector/go
@@ -1 +1 @@
-Subproject commit 7a26c432f8b4203e42344ff3290b9b9b01b983d5
+Subproject commit b8f76da4a708d158ec3cc4b844571dc4414e36b4
diff --git a/src/connector/hivemq-tdengine-extension b/src/connector/hivemq-tdengine-extension
index b62a26ecc1..ce52010141 160000
--- a/src/connector/hivemq-tdengine-extension
+++ b/src/connector/hivemq-tdengine-extension
@@ -1 +1 @@
-Subproject commit b62a26ecc164a310104df57691691b237e091c89
+Subproject commit ce5201014136503d34fecbd56494b67b4961056c
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BadLocaleSettingTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BadLocaleSettingTest.java
new file mode 100644
index 0000000000..b351ee94bb
--- /dev/null
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BadLocaleSettingTest.java
@@ -0,0 +1,58 @@
+package com.taosdata.jdbc.cases;
+
+
+import com.taosdata.jdbc.TSDBDriver;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import java.io.IOException;
+
+import java.sql.*;
+import java.util.Properties;
+
+public class BadLocaleSettingTest {
+
+ private static final String host = "127.0.0.1";
+ private static final String dbName = "bad_locale_test";
+ private static Connection conn;
+
+ @Test
+ public void canSetLocale() {
+ try {
+ Properties properties = new Properties();
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
+
+ String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
+ conn = DriverManager.getConnection(url, properties);
+ Statement stmt = conn.createStatement();
+ stmt.execute("drop database if exists " + dbName);
+ stmt.execute("create database if not exists " + dbName);
+ stmt.execute("use " + dbName);
+ stmt.execute("drop table if exists weather");
+ stmt.execute("create table weather(ts timestamp, temperature float, humidity int)");
+ stmt.executeUpdate("insert into weather values(1624071506435, 12.3, 4)");
+ stmt.close();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @BeforeClass
+ public static void beforeClass() {
+ System.setProperty("sun.jnu.encoding", "ANSI_X3.4-1968");
+ System.setProperty("file.encoding", "ANSI_X3.4-1968");
+ }
+
+ @AfterClass
+ public static void afterClass() {
+ try {
+ if (conn != null)
+ conn.close();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+}
\ No newline at end of file
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimestampPrecisionInNanoInJniTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimestampPrecisionInNanoInJniTest.java
new file mode 100644
index 0000000000..042d76d576
--- /dev/null
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimestampPrecisionInNanoInJniTest.java
@@ -0,0 +1,570 @@
+package com.taosdata.jdbc.cases;
+
+
+import com.taosdata.jdbc.TSDBDriver;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.After;
+import org.junit.Test;
+
+import java.sql.*;
+import java.util.Properties;
+import java.text.Format;
+import java.text.SimpleDateFormat;
+
+public class TimestampPrecisionInNanoInJniTest {
+
+ private static final String host = "127.0.0.1";
+ private static final String ns_timestamp_db = "ns_precision_test";
+ private static final long timestamp1 = System.currentTimeMillis();
+ private static final long timestamp2 = timestamp1 * 1000_000 + 123455;
+ private static final long timestamp3 = (timestamp1 + 10) * 1000_000 + 123456;
+ private static final Format format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
+ private static final String date1 = format.format(new Date(timestamp1));
+ private static final String date4 = format.format(new Date(timestamp1 + 10l));
+ private static final String date2 = date1 + "123455";
+ private static final String date3 = date4 + "123456";
+
+
+ private static Connection conn;
+
+ @BeforeClass
+ public static void beforeClass() throws SQLException {
+ Properties properties = new Properties();
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
+
+ String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
+ conn = DriverManager.getConnection(url, properties);
+
+ Statement stmt = conn.createStatement();
+ stmt.execute("drop database if exists " + ns_timestamp_db);
+ stmt.execute("create database if not exists " + ns_timestamp_db + " precision 'ns'");
+ stmt.execute("create table " + ns_timestamp_db + ".weather(ts timestamp, ts2 timestamp, f1 int)");
+ stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(\"" + date3 + "\", \"" + date3 + "\", 128)");
+ stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(" + timestamp2 + "," + timestamp2 + ", 127)");
+ stmt.close();
+ }
+
+ @After
+ public void afterEach() throws SQLException {
+ Statement stmt = conn.createStatement();
+ stmt.execute("drop database if exists " + ns_timestamp_db);
+ stmt.execute("create database if not exists " + ns_timestamp_db + " precision 'ns'");
+ stmt.execute("create table " + ns_timestamp_db + ".weather(ts timestamp, ts2 timestamp, f1 int)");
+ stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(\"" + date3 + "\", \"" + date3 + "\", 128)");
+ stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(" + timestamp2 + "," + timestamp2 + ", 127)");
+ stmt.close();
+ }
+
+ @AfterClass
+ public static void afterClass() {
+ try {
+ if (conn != null)
+ conn.close();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ private void checkCount(long count, ResultSet rs) throws SQLException {
+ if (count == 0) {
+ Assert.fail();
+ }
+ rs.next();
+ long test_count = rs.getLong(1);
+ Assert.assertEquals(count, test_count);
+ }
+
+ private void checkTime(long ts, ResultSet rs) throws SQLException {
+ rs.next();
+ int nanos = rs.getTimestamp(1).getNanos();
+ Assert.assertEquals(ts % 1000_000_000l, nanos);
+ long test_ts = rs.getLong(1);
+ Assert.assertEquals(ts / 1000_000l, test_ts);
+ }
+
+ @Test
+ public void canInsertTimestampAndQueryByEqualToInDateTypeInBothFirstAndSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + date3 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts = '" + date3 + "'");
+ checkTime(timestamp3, rs);
+ rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + date3 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 = '" + date3 + "'");
+ checkTime(timestamp3, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canImportTimestampAndQueryByEqualToInDateTypeInBothFirstAndSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ stmt.executeUpdate("import into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(\"" + date1 + "123123\", \"" + date1 + "123123\", 127)");
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + date1 + "123123'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts = '" + date1 + "123123'");
+ checkTime(timestamp1 * 1000_000l + 123123l, rs);
+ rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + date1 + "123123'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 = '" + date1 + "123123'");
+ checkTime(timestamp1 * 1000_000l + 123123l, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canInsertTimestampAndQueryByEqualToInNumberTypeInBothFirstAndSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + timestamp2 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts = '" + timestamp2 + "'");
+ checkTime(timestamp2, rs);
+ rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + timestamp2 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 = '" + timestamp2 + "'");
+ checkTime(timestamp2, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canImportTimestampAndQueryByEqualToInNumberTypeInBothFirstAndSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ long timestamp4 = timestamp1 * 1000_000 + 123123;
+ stmt.executeUpdate("import into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(" + timestamp4 + ", " + timestamp4 + ", 127)");
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + timestamp4 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts = '" + timestamp4 + "'");
+ checkTime(timestamp4, rs);
+ rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + timestamp4 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 = '" + timestamp4 + "'");
+ checkTime(timestamp4, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canSelectLastRowFromWeatherForFirstCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select last(ts) from " + ns_timestamp_db + ".weather");
+ checkTime(timestamp3, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canSelectLastRowFromWeatherForSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select last(ts2) from " + ns_timestamp_db + ".weather");
+ checkTime(timestamp3, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canSelectFirstRowFromWeatherForFirstCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select first(ts) from " + ns_timestamp_db + ".weather");
+ checkTime(timestamp2, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canSelectFirstRowFromWeatherForSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select first(ts2) from " + ns_timestamp_db + ".weather");
+ checkTime(timestamp2, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryLargerThanInDateTypeForFirstCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts > '" + date2 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts > '" + date2 + "'");
+ checkTime(timestamp3, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryLargerThanInDateTypeForSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 > '" + date2 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 > '" + date2 + "'");
+ checkTime(timestamp3, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryLargerThanInNumberTypeForFirstCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts > '" + timestamp2 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts > '" + timestamp2 + "'");
+ checkTime(timestamp3, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryLargerThanInNumberTypeForSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 > '" + timestamp2 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 > '" + timestamp2 + "'");
+ checkTime(timestamp3, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryLargerThanOrEqualToInDateTypeForFirstCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts >= '" + date2 + "'");
+ checkCount(2l, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryLargerThanOrEqualToInDateTypeForSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 >= '" + date2 + "'");
+ checkCount(2l, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryLargerThanOrEqualToInNumberTypeForFirstCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts >= '" + timestamp2 + "'");
+ checkCount(2l, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryLargerThanOrEqualToInNumberTypeForSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 >= '" + timestamp2 + "'");
+ checkCount(2l, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryLessThanInDateTypeForFirstCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts < '" + date3 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts < '" + date3 + "'");
+ checkTime(timestamp2, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryLessThanInDateTypeForSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 < '" + date3 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 < '" + date3 + "'");
+ checkTime(timestamp2, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryLessThanInNumberTypeForFirstCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts < '" + timestamp3 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts < '" + timestamp3 + "'");
+ checkTime(timestamp2, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryLessThanInNumberTypeForSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 < '" + timestamp3 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 < '" + timestamp3 + "'");
+ checkTime(timestamp2, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryLessThanOrEqualToInDateTypeForFirstCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts <= '" + date3 + "'");
+ checkCount(2l, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryLessThanOrEqualToInDateTypeForSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <= '" + date3 + "'");
+ checkCount(2l, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryLessThanOrEqualToInNumberTypeForFirstCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts <= '" + timestamp3 + "'");
+ checkCount(2l, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryLessThanOrEqualToInNumberTypeForSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <= '" + timestamp3 + "'");
+ checkCount(2l, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryBetweenAndInDateTypeForFirstCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts <= '" + date3 + "' AND ts > '" + date2 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts <= '" + date3 + "' AND ts > '" + date2 + "'");
+ checkTime(timestamp3, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryBetweenAndInDateTypeForSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <= '" + date3 + "' AND ts2 > '" + date2 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 <= '" + date3 + "' AND ts2 > '" + date2 + "'");
+ checkTime(timestamp3, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryBetweenAndInNumberTypeForFirstCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts <= '" + timestamp3 + "' AND ts > '" + timestamp2 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts <= '" + timestamp3 + "' AND ts > '" + timestamp2 + "'");
+ checkTime(timestamp3, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryBetweenAndInNumberTypeForSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <= '" + timestamp3 + "' AND ts2 > '" + timestamp2 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 <= '" + timestamp3 + "' AND ts2 > '" + timestamp2 + "'");
+ checkTime(timestamp3, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryNotEqualToInDateTypeForSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <> '" + date3 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 <> '" + date3 + "'");
+ checkTime(timestamp2, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryNotEqualToInNumberTypeForSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <> '" + timestamp3 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 <> '" + timestamp3 + "'");
+ checkTime(timestamp2, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryNotEqualInDateTypeForSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 != '" + date3 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 != '" + date3 + "'");
+ checkTime(timestamp2, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryNotEqualInNumberTypeForSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 != '" + timestamp3 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 != '" + timestamp3 + "'");
+ checkTime(timestamp2, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canInsertTimestampWithNowAndNsOffsetInBothFirstAndSecondCol(){
+ try (Statement stmt = conn.createStatement()) {
+ stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(now + 1000b, now - 1000b, 128)");
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather");
+ checkCount(3l, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canIntervalAndSlidingAcceptNsUnitForFirstCol(){
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select sum(f1) from " + ns_timestamp_db + ".weather where ts >= '" + date2 + "' and ts <= '" + date3 + "' interval(10000000b) sliding(10000000b)");
+ rs.next();
+ long sum = rs.getLong(2);
+ Assert.assertEquals(127l, sum);
+ rs.next();
+ sum = rs.getLong(2);
+ Assert.assertEquals(128l, sum);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canIntervalAndSlidingAcceptNsUnitForSecondCol(){
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select sum(f1) from " + ns_timestamp_db + ".weather where ts2 >= '" + date2 + "' and ts <= '" + date3 + "' interval(10000000b) sliding(10000000b)");
+ rs.next();
+ long sum = rs.getLong(2);
+ Assert.assertEquals(127l, sum);
+ rs.next();
+ sum = rs.getLong(2);
+ Assert.assertEquals(128l, sum);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void testDataOutOfRangeExceptionForFirstCol() {
+ try (Statement stmt = conn.createStatement()) {
+ stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(123456789012345678, 1234567890123456789, 127)");
+ } catch (SQLException e) {
+ Assert.assertEquals("TDengine ERROR (8000060b): Timestamp data out of range", e.getMessage());
+ }
+ }
+
+ @Test
+ public void testDataOutOfRangeExceptionForSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(1234567890123456789, 123456789012345678, 127)");
+ } catch (SQLException e) {
+ Assert.assertEquals("TDengine ERROR (8000060b): Timestamp data out of range", e.getMessage());
+ }
+ }
+
+ @Test
+ public void willAutomaticallyFillToNsUnitWithZerosForFirstCol() {
+ try (Statement stmt = conn.createStatement()) {
+ stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values('" + date1 + "', '" + date1 + "', 127)");
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + date1 + "000000'");
+ checkCount(1l, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void willAutomaticallyFillToNsUnitWithZerosForSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values('" + date1 + "', '" + date1 + "', 127)");
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + date1 + "000000'");
+ checkCount(1l, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void willAutomaticallyDropDigitExceedNsDigitNumberForFirstCol() {
+ try (Statement stmt = conn.createStatement()) {
+ stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values('" + date1 + "999999999', '" + date1 + "999999999', 127)");
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + date1 + "999999'");
+ checkCount(1l, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void willAutomaticallyDropDigitExceedNsDigitNumberForSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values('" + date1 + "999999999', '" + date1 + "999999999', 127)");
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + date1 + "999999'");
+ checkCount(1l, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+}
diff --git a/src/dnode/src/dnodeMain.c b/src/dnode/src/dnodeMain.c
index cf633502c1..61b11579bf 100644
--- a/src/dnode/src/dnodeMain.c
+++ b/src/dnode/src/dnodeMain.c
@@ -303,6 +303,8 @@ static int32_t dnodeInitStorage() {
dnodeCheckDataDirOpenned(tsDnodeDir);
+ taosGetDisk();
+ taosPrintDiskInfo();
dInfo("dnode storage is initialized at %s", tsDnodeDir);
return 0;
}
diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h
index ac0cb02a3a..6213602032 100644
--- a/src/inc/taosdef.h
+++ b/src/inc/taosdef.h
@@ -101,7 +101,7 @@ extern const int32_t TYPE_BYTES[15];
#define TSDB_TIME_PRECISION_MICRO_STR "us"
#define TSDB_TIME_PRECISION_NANO_STR "ns"
-#define TSDB_TICK_PER_SECOND(precision) ((precision)==TSDB_TIME_PRECISION_MILLI ? 1e3L : ((precision)==TSDB_TIME_PRECISION_MICRO ? 1e6L : 1e9L))
+#define TSDB_TICK_PER_SECOND(precision) ((int64_t)((precision)==TSDB_TIME_PRECISION_MILLI ? 1e3L : ((precision)==TSDB_TIME_PRECISION_MICRO ? 1e6L : 1e9L)))
#define T_MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
#define T_APPEND_MEMBER(dst, ptr, type, member) \
diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c
index 865224d2c3..38f0ecb219 100644
--- a/src/kit/taosdemo/taosdemo.c
+++ b/src/kit/taosdemo/taosdemo.c
@@ -79,10 +79,9 @@ enum TEST_MODE {
#define MAX_SQL_SIZE 65536
#define BUFFER_SIZE (65536*2)
-#define COND_BUF_LEN BUFFER_SIZE - 30
+#define COND_BUF_LEN (BUFFER_SIZE - 30)
#define MAX_USERNAME_SIZE 64
#define MAX_PASSWORD_SIZE 64
-#define MAX_DB_NAME_SIZE 64
#define MAX_HOSTNAME_SIZE 64
#define MAX_TB_NAME_SIZE 64
#define MAX_DATA_SIZE (16*1024)+20 // max record len: 16*1024, timestamp string and ,('') need extra space
@@ -90,7 +89,7 @@ enum TEST_MODE {
#define OPT_ABORT 1 /* –abort */
#define STRING_LEN 60000
#define MAX_PREPARED_RAND 1000000
-#define MAX_FILE_NAME_LEN 256
+#define MAX_FILE_NAME_LEN 256 // max file name length on linux is 255.
#define MAX_SAMPLES_ONCE_FROM_FILE 10000
#define MAX_NUM_DATATYPE 10
@@ -195,13 +194,6 @@ enum _describe_table_index {
TSDB_MAX_DESCRIBE_METRIC
};
-typedef struct {
- char field[TSDB_COL_NAME_LEN + 1];
- char type[16];
- int length;
- char note[128];
-} SColDes;
-
/* Used by main to communicate with parse_opt. */
static char *g_dupstr = NULL;
@@ -247,16 +239,16 @@ typedef struct SArguments_S {
} SArguments;
typedef struct SColumn_S {
- char field[TSDB_COL_NAME_LEN + 1];
- char dataType[MAX_TB_NAME_SIZE];
+ char field[TSDB_COL_NAME_LEN];
+ char dataType[16];
uint32_t dataLen;
char note[128];
} StrColumn;
typedef struct SSuperTable_S {
- char sTblName[MAX_TB_NAME_SIZE+1];
- char dataSource[MAX_TB_NAME_SIZE+1]; // rand_gen or sample
- char childTblPrefix[MAX_TB_NAME_SIZE];
+ char sTblName[TSDB_TABLE_NAME_LEN];
+ char dataSource[MAX_TB_NAME_SIZE]; // rand_gen or sample
+ char childTblPrefix[TSDB_TABLE_NAME_LEN - 20]; // 20 characters reserved for seq
char insertMode[MAX_TB_NAME_SIZE]; // taosc, rest
uint16_t childTblExists;
int64_t childTblCount;
@@ -277,8 +269,8 @@ typedef struct SSuperTable_S {
int64_t timeStampStep;
char startTimestamp[MAX_TB_NAME_SIZE];
char sampleFormat[MAX_TB_NAME_SIZE]; // csv, json
- char sampleFile[MAX_FILE_NAME_LEN+1];
- char tagsFile[MAX_FILE_NAME_LEN+1];
+ char sampleFile[MAX_FILE_NAME_LEN];
+ char tagsFile[MAX_FILE_NAME_LEN];
uint32_t columnCount;
StrColumn columns[MAX_COLUMN_COUNT];
@@ -305,7 +297,7 @@ typedef struct SSuperTable_S {
} SSuperTable;
typedef struct {
- char name[TSDB_DB_NAME_LEN + 1];
+ char name[TSDB_DB_NAME_LEN];
char create_time[32];
int64_t ntables;
int32_t vgroups;
@@ -341,11 +333,11 @@ typedef struct SDbCfg_S {
int cache;
int blocks;
int quorum;
- char precision[MAX_TB_NAME_SIZE];
+ char precision[8];
} SDbCfg;
typedef struct SDataBase_S {
- char dbName[MAX_DB_NAME_SIZE];
+ char dbName[TSDB_DB_NAME_LEN];
bool drop; // 0: use exists, 1: if exists, drop then new create
SDbCfg dbCfg;
uint64_t superTblCount;
@@ -353,14 +345,14 @@ typedef struct SDataBase_S {
} SDataBase;
typedef struct SDbs_S {
- char cfgDir[MAX_FILE_NAME_LEN+1];
+ char cfgDir[MAX_FILE_NAME_LEN];
char host[MAX_HOSTNAME_SIZE];
struct sockaddr_in serv_addr;
uint16_t port;
char user[MAX_USERNAME_SIZE];
char password[MAX_PASSWORD_SIZE];
- char resultFile[MAX_FILE_NAME_LEN+1];
+ char resultFile[MAX_FILE_NAME_LEN];
bool use_metric;
bool insert_only;
bool do_aggreFunc;
@@ -387,7 +379,7 @@ typedef struct SpecifiedQueryInfo_S {
bool subscribeRestart;
int subscribeKeepProgress;
char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1];
- char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1];
+ char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN];
int resubAfterConsume[MAX_QUERY_SQL_COUNT];
int endAfterConsume[MAX_QUERY_SQL_COUNT];
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT];
@@ -398,7 +390,7 @@ typedef struct SpecifiedQueryInfo_S {
} SpecifiedQueryInfo;
typedef struct SuperQueryInfo_S {
- char sTblName[MAX_TB_NAME_SIZE+1];
+ char sTblName[TSDB_TABLE_NAME_LEN];
uint64_t queryInterval; // 0: unlimit > 0 loop/s
uint32_t threadCnt;
uint32_t asyncMode; // 0: sync, 1: async
@@ -407,10 +399,10 @@ typedef struct SuperQueryInfo_S {
int subscribeKeepProgress;
uint64_t queryTimes;
int64_t childTblCount;
- char childTblPrefix[MAX_TB_NAME_SIZE];
+ char childTblPrefix[TSDB_TABLE_NAME_LEN - 20]; // 20 characters reserved for seq
int sqlCount;
char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1];
- char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1];
+ char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN];
int resubAfterConsume;
int endAfterConsume;
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT];
@@ -420,13 +412,13 @@ typedef struct SuperQueryInfo_S {
} SuperQueryInfo;
typedef struct SQueryMetaInfo_S {
- char cfgDir[MAX_FILE_NAME_LEN+1];
+ char cfgDir[MAX_FILE_NAME_LEN];
char host[MAX_HOSTNAME_SIZE];
uint16_t port;
struct sockaddr_in serv_addr;
char user[MAX_USERNAME_SIZE];
char password[MAX_PASSWORD_SIZE];
- char dbName[MAX_DB_NAME_SIZE+1];
+ char dbName[TSDB_DB_NAME_LEN];
char queryMode[MAX_TB_NAME_SIZE]; // taosc, rest
SpecifiedQueryInfo specifiedQueryInfo;
@@ -438,11 +430,11 @@ typedef struct SThreadInfo_S {
TAOS * taos;
TAOS_STMT *stmt;
int threadID;
- char db_name[MAX_DB_NAME_SIZE+1];
+ char db_name[TSDB_DB_NAME_LEN];
uint32_t time_precision;
char filePath[4096];
FILE *fp;
- char tb_prefix[MAX_TB_NAME_SIZE];
+ char tb_prefix[TSDB_TABLE_NAME_LEN];
uint64_t start_table_from;
uint64_t end_table_to;
int64_t ntables;
@@ -608,7 +600,7 @@ SArguments g_args = {
1, // query_times
0, // interlace_rows;
30000, // num_of_RPR
- (1024*1024), // max_sql_len
+ (1024*1024), // max_sql_len
10000, // num_of_tables
10000, // num_of_DPT
0, // abort
@@ -2501,6 +2493,13 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
char* pTblName = childTblName;
while((row = taos_fetch_row(res)) != NULL) {
int32_t* len = taos_fetch_lengths(res);
+
+ if (0 == strlen((char *)row[0])) {
+ errorPrint("%s() LN%d, No.%"PRId64" table return empty name\n",
+ __func__, __LINE__, count);
+ exit(-1);
+ }
+
tstrncpy(pTblName, (char *)row[0], len[0]+1);
//printf("==== sub table name: %s\n", pTblName);
count++;
@@ -3035,7 +3034,7 @@ static int startMultiThreadCreateChildTable(
for (int64_t i = 0; i < threads; i++) {
threadInfo *pThreadInfo = infos + i;
pThreadInfo->threadID = i;
- tstrncpy(pThreadInfo->db_name, db_name, MAX_DB_NAME_SIZE);
+ tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN);
pThreadInfo->superTblInfo = superTblInfo;
verbosePrint("%s() %d db_name: %s\n", __func__, __LINE__, db_name);
pThreadInfo->taos = taos_connect(
@@ -3326,7 +3325,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
goto PARSE_OVER;
}
//tstrncpy(superTbls->columns[k].dataType, dataType->valuestring, MAX_TB_NAME_SIZE);
- tstrncpy(columnCase.dataType, dataType->valuestring, MAX_TB_NAME_SIZE);
+ tstrncpy(columnCase.dataType, dataType->valuestring, strlen(dataType->valuestring) + 1);
cJSON* dataLen = cJSON_GetObjectItem(column, "len");
if (dataLen && dataLen->type == cJSON_Number) {
@@ -3341,7 +3340,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
for (int n = 0; n < count; ++n) {
tstrncpy(superTbls->columns[index].dataType,
- columnCase.dataType, MAX_TB_NAME_SIZE);
+ columnCase.dataType, strlen(columnCase.dataType) + 1);
superTbls->columns[index].dataLen = columnCase.dataLen;
index++;
}
@@ -3397,7 +3396,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
__func__, __LINE__);
goto PARSE_OVER;
}
- tstrncpy(columnCase.dataType, dataType->valuestring, MAX_TB_NAME_SIZE);
+ tstrncpy(columnCase.dataType, dataType->valuestring, strlen(dataType->valuestring) + 1);
cJSON* dataLen = cJSON_GetObjectItem(tag, "len");
if (dataLen && dataLen->type == cJSON_Number) {
@@ -3412,7 +3411,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
for (int n = 0; n < count; ++n) {
tstrncpy(superTbls->tags[index].dataType, columnCase.dataType,
- MAX_TB_NAME_SIZE);
+ strlen(columnCase.dataType) + 1);
superTbls->tags[index].dataLen = columnCase.dataLen;
index++;
}
@@ -3635,7 +3634,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
printf("ERROR: failed to read json, db name not found\n");
goto PARSE_OVER;
}
- tstrncpy(g_Dbs.db[i].dbName, dbName->valuestring, MAX_DB_NAME_SIZE);
+ tstrncpy(g_Dbs.db[i].dbName, dbName->valuestring, TSDB_DB_NAME_LEN);
cJSON *drop = cJSON_GetObjectItem(dbinfo, "drop");
if (drop && drop->type == cJSON_String && drop->valuestring != NULL) {
@@ -3656,10 +3655,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (precision && precision->type == cJSON_String
&& precision->valuestring != NULL) {
tstrncpy(g_Dbs.db[i].dbCfg.precision, precision->valuestring,
- MAX_DB_NAME_SIZE);
+ 8);
} else if (!precision) {
- //tstrncpy(g_Dbs.db[i].dbCfg.precision, "ms", MAX_DB_NAME_SIZE);
- memset(g_Dbs.db[i].dbCfg.precision, 0, MAX_DB_NAME_SIZE);
+ memset(g_Dbs.db[i].dbCfg.precision, 0, 8);
} else {
printf("ERROR: failed to read json, precision not found\n");
goto PARSE_OVER;
@@ -3836,7 +3834,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
goto PARSE_OVER;
}
tstrncpy(g_Dbs.db[i].superTbls[j].sTblName, stbName->valuestring,
- MAX_TB_NAME_SIZE);
+ TSDB_TABLE_NAME_LEN);
cJSON *prefix = cJSON_GetObjectItem(stbInfo, "childtable_prefix");
if (!prefix || prefix->type != cJSON_String || prefix->valuestring == NULL) {
@@ -3844,7 +3842,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
goto PARSE_OVER;
}
tstrncpy(g_Dbs.db[i].superTbls[j].childTblPrefix, prefix->valuestring,
- MAX_DB_NAME_SIZE);
+ TSDB_TABLE_NAME_LEN - 20);
cJSON *autoCreateTbl = cJSON_GetObjectItem(stbInfo, "auto_create_table");
if (autoCreateTbl
@@ -3912,9 +3910,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (dataSource && dataSource->type == cJSON_String
&& dataSource->valuestring != NULL) {
tstrncpy(g_Dbs.db[i].superTbls[j].dataSource,
- dataSource->valuestring, MAX_DB_NAME_SIZE);
+ dataSource->valuestring, TSDB_DB_NAME_LEN);
} else if (!dataSource) {
- tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand", MAX_DB_NAME_SIZE);
+ tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand", TSDB_DB_NAME_LEN);
} else {
errorPrint("%s() LN%d, failed to read json, data_source not found\n",
__func__, __LINE__);
@@ -3972,10 +3970,10 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON *ts = cJSON_GetObjectItem(stbInfo, "start_timestamp");
if (ts && ts->type == cJSON_String && ts->valuestring != NULL) {
tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp,
- ts->valuestring, MAX_DB_NAME_SIZE);
+ ts->valuestring, TSDB_DB_NAME_LEN);
} else if (!ts) {
tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp,
- "now", MAX_DB_NAME_SIZE);
+ "now", TSDB_DB_NAME_LEN);
} else {
printf("ERROR: failed to read json, start_timestamp not found\n");
goto PARSE_OVER;
@@ -3995,9 +3993,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (sampleFormat && sampleFormat->type
== cJSON_String && sampleFormat->valuestring != NULL) {
tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat,
- sampleFormat->valuestring, MAX_DB_NAME_SIZE);
+ sampleFormat->valuestring, TSDB_DB_NAME_LEN);
} else if (!sampleFormat) {
- tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, "csv", MAX_DB_NAME_SIZE);
+ tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, "csv", TSDB_DB_NAME_LEN);
} else {
printf("ERROR: failed to read json, sample_format not found\n");
goto PARSE_OVER;
@@ -4242,7 +4240,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* dbs = cJSON_GetObjectItem(root, "databases");
if (dbs && dbs->type == cJSON_String && dbs->valuestring != NULL) {
- tstrncpy(g_queryInfo.dbName, dbs->valuestring, MAX_DB_NAME_SIZE);
+ tstrncpy(g_queryInfo.dbName, dbs->valuestring, TSDB_DB_NAME_LEN);
} else if (!dbs) {
printf("ERROR: failed to read json, databases not found\n");
goto PARSE_OVER;
@@ -4492,7 +4490,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
if (stblname && stblname->type == cJSON_String
&& stblname->valuestring != NULL) {
tstrncpy(g_queryInfo.superQueryInfo.sTblName, stblname->valuestring,
- MAX_TB_NAME_SIZE);
+ TSDB_TABLE_NAME_LEN);
} else {
errorPrint("%s() LN%d, failed to read json, super table name input error\n",
__func__, __LINE__);
@@ -5103,7 +5101,7 @@ static int32_t generateStbDataTail(
} else {
retLen = getRowDataFromSample(
data,
- remainderBufLen,
+ remainderBufLen < MAX_DATA_SIZE ? remainderBufLen : MAX_DATA_SIZE,
startTime + superTblInfo->timeStampStep * k,
superTblInfo,
pSamplePos);
@@ -6004,6 +6002,12 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
verbosePrint("%s() LN%d: tid=%d seq=%"PRId64" tableName=%s\n",
__func__, __LINE__,
pThreadInfo->threadID, tableSeq, tableName);
+ if (0 == strlen(tableName)) {
+ errorPrint("[%d] %s() LN%d, getTableName return null\n",
+ pThreadInfo->threadID, __func__, __LINE__);
+ free(pThreadInfo->buffer);
+ return NULL;
+ }
int64_t remainderBufLen = maxSqlLen;
char *pstr = pThreadInfo->buffer;
@@ -6296,16 +6300,6 @@ static void startMultiThreadInsertData(int threads, char* db_name,
}
}
- // read sample data from file first
- if ((superTblInfo) && (0 == strncasecmp(superTblInfo->dataSource,
- "sample", strlen("sample")))) {
- if (0 != prepareSampleDataForSTable(superTblInfo)) {
- errorPrint("%s() LN%d, prepare sample data for stable failed!\n",
- __func__, __LINE__);
- exit(-1);
- }
- }
-
TAOS* taos0 = taos_connect(
g_Dbs.host, g_Dbs.user,
g_Dbs.password, db_name, g_Dbs.port);
@@ -6411,7 +6405,7 @@ static void startMultiThreadInsertData(int threads, char* db_name,
for (int i = 0; i < threads; i++) {
threadInfo *pThreadInfo = infos + i;
pThreadInfo->threadID = i;
- tstrncpy(pThreadInfo->db_name, db_name, MAX_DB_NAME_SIZE);
+ tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN);
pThreadInfo->time_precision = timePrec;
pThreadInfo->superTblInfo = superTblInfo;
@@ -6855,7 +6849,7 @@ static void *specifiedTableQuery(void *sarg) {
}
}
- char sqlStr[MAX_DB_NAME_SIZE + 5];
+ char sqlStr[TSDB_DB_NAME_LEN + 5];
sprintf(sqlStr, "use %s", g_queryInfo.dbName);
if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) {
taos_close(pThreadInfo->taos);
@@ -7331,12 +7325,6 @@ static void *superSubscribe(void *sarg) {
performancePrint("st: %"PRIu64" et: %"PRIu64" delta: %"PRIu64"\n", st, et, (et - st));
if (res) {
- if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
- sprintf(pThreadInfo->filePath, "%s-%d",
- g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq],
- pThreadInfo->threadID);
- fetchResult(res, pThreadInfo);
- }
if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
sprintf(pThreadInfo->filePath, "%s-%d",
g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq],
@@ -7443,10 +7431,10 @@ static void *specifiedSubscribe(void *sarg) {
sprintf(pThreadInfo->filePath, "%s-%d",
g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq],
pThreadInfo->threadID);
- fetchResult(
- g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID],
- pThreadInfo);
}
+ fetchResult(
+ g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID],
+ pThreadInfo);
g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] ++;
if ((g_queryInfo.specifiedQueryInfo.resubAfterConsume[pThreadInfo->querySeq] != -1)
@@ -7683,9 +7671,9 @@ static void setParaFromArg(){
g_Dbs.dbCount = 1;
g_Dbs.db[0].drop = true;
- tstrncpy(g_Dbs.db[0].dbName, g_args.database, MAX_DB_NAME_SIZE);
+ tstrncpy(g_Dbs.db[0].dbName, g_args.database, TSDB_DB_NAME_LEN);
g_Dbs.db[0].dbCfg.replica = g_args.replica;
- tstrncpy(g_Dbs.db[0].dbCfg.precision, "ms", MAX_DB_NAME_SIZE);
+ tstrncpy(g_Dbs.db[0].dbCfg.precision, "ms", 8);
tstrncpy(g_Dbs.resultFile, g_args.output_file, MAX_FILE_NAME_LEN);
@@ -7707,7 +7695,7 @@ static void setParaFromArg(){
if (g_args.use_metric) {
g_Dbs.db[0].superTblCount = 1;
- tstrncpy(g_Dbs.db[0].superTbls[0].sTblName, "meters", MAX_TB_NAME_SIZE);
+ tstrncpy(g_Dbs.db[0].superTbls[0].sTblName, "meters", TSDB_TABLE_NAME_LEN);
g_Dbs.db[0].superTbls[0].childTblCount = g_args.num_of_tables;
g_Dbs.threadCount = g_args.num_of_threads;
g_Dbs.threadCountByCreateTbl = g_args.num_of_threads;
@@ -7718,7 +7706,7 @@ static void setParaFromArg(){
g_Dbs.db[0].superTbls[0].disorderRange = g_args.disorderRange;
g_Dbs.db[0].superTbls[0].disorderRatio = g_args.disorderRatio;
tstrncpy(g_Dbs.db[0].superTbls[0].childTblPrefix,
- g_args.tb_prefix, MAX_TB_NAME_SIZE);
+ g_args.tb_prefix, TSDB_TABLE_NAME_LEN - 20);
tstrncpy(g_Dbs.db[0].superTbls[0].dataSource, "rand", MAX_TB_NAME_SIZE);
g_Dbs.db[0].superTbls[0].iface = g_args.iface;
tstrncpy(g_Dbs.db[0].superTbls[0].startTimestamp,
@@ -7735,7 +7723,7 @@ static void setParaFromArg(){
}
tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType,
- data_type[i], MAX_TB_NAME_SIZE);
+ data_type[i], strlen(data_type[i]) + 1);
g_Dbs.db[0].superTbls[0].columns[i].dataLen = g_args.len_of_binary;
g_Dbs.db[0].superTbls[0].columnCount++;
}
@@ -7746,18 +7734,18 @@ static void setParaFromArg(){
for (int i = g_Dbs.db[0].superTbls[0].columnCount;
i < g_args.num_of_CPR; i++) {
tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType,
- "INT", MAX_TB_NAME_SIZE);
+ "INT", strlen("INT") + 1);
g_Dbs.db[0].superTbls[0].columns[i].dataLen = 0;
g_Dbs.db[0].superTbls[0].columnCount++;
}
}
tstrncpy(g_Dbs.db[0].superTbls[0].tags[0].dataType,
- "INT", MAX_TB_NAME_SIZE);
+ "INT", strlen("INT") + 1);
g_Dbs.db[0].superTbls[0].tags[0].dataLen = 0;
tstrncpy(g_Dbs.db[0].superTbls[0].tags[1].dataType,
- "BINARY", MAX_TB_NAME_SIZE);
+ "BINARY", strlen("BINARY") + 1);
g_Dbs.db[0].superTbls[0].tags[1].dataLen = g_args.len_of_binary;
g_Dbs.db[0].superTbls[0].tagCount = 2;
} else {
@@ -7893,11 +7881,11 @@ static void queryResult() {
pThreadInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1;
pThreadInfo->superTblInfo = &g_Dbs.db[0].superTbls[0];
tstrncpy(pThreadInfo->tb_prefix,
- g_Dbs.db[0].superTbls[0].childTblPrefix, MAX_TB_NAME_SIZE);
+ g_Dbs.db[0].superTbls[0].childTblPrefix, TSDB_TABLE_NAME_LEN - 20);
} else {
pThreadInfo->ntables = g_args.num_of_tables;
pThreadInfo->end_table_to = g_args.num_of_tables -1;
- tstrncpy(pThreadInfo->tb_prefix, g_args.tb_prefix, MAX_TB_NAME_SIZE);
+ tstrncpy(pThreadInfo->tb_prefix, g_args.tb_prefix, TSDB_TABLE_NAME_LEN);
}
pThreadInfo->taos = taos_connect(
diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c
index 15db83297c..05c6b1efbb 100644
--- a/src/kit/taosdump/taosdump.c
+++ b/src/kit/taosdump/taosdump.c
@@ -29,6 +29,9 @@
#define COMMAND_SIZE 65536
//#define DEFAULT_DUMP_FILE "taosdump.sql"
+// for strncpy buffer overflow
+#define min(a, b) (((a) < (b)) ? (a) : (b))
+
int converStringToReadable(char *str, int size, char *buf, int bufsize);
int convertNCharToReadable(char *str, int size, char *buf, int bufsize);
void taosDumpCharset(FILE *fp);
@@ -1119,12 +1122,11 @@ int taosGetTableDes(
TAOS_FIELD *fields = taos_fetch_fields(res);
tstrncpy(tableDes->name, table, TSDB_TABLE_NAME_LEN);
-
while ((row = taos_fetch_row(res)) != NULL) {
strncpy(tableDes->cols[count].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX],
fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes);
strncpy(tableDes->cols[count].type, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
- fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes);
+ min(16, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes));
tableDes->cols[count].length = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
strncpy(tableDes->cols[count].note, (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX],
fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes);
@@ -1575,7 +1577,7 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
tstrncpy(tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX],
- fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes);
+ min(TSDB_TABLE_NAME_LEN, fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes));
taosWrite(fd, &tableRecord, sizeof(STableRecord));
@@ -1858,13 +1860,13 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS*
converStringToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE);
//pstr = stpcpy(pstr, tbuf);
//*(pstr++) = '\'';
- pstr += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf);
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf);
break;
}
case TSDB_DATA_TYPE_NCHAR: {
char tbuf[COMMAND_SIZE] = {0};
convertNCharToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE);
- pstr += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf);
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf);
break;
}
case TSDB_DATA_TYPE_TIMESTAMP:
@@ -1897,7 +1899,8 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS*
total_sqlstr_len += curr_sqlstr_len;
- if ((count >= arguments->data_batch) || (sql_buf_len - total_sqlstr_len < TSDB_MAX_BYTES_PER_ROW)) {
+ if ((count >= arguments->data_batch)
+ || (sql_buf_len - total_sqlstr_len < TSDB_MAX_BYTES_PER_ROW)) {
fprintf(fp, ";\n");
count = 0;
} //else {
@@ -1905,6 +1908,8 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS*
//}
}
+ printf("total_sqlstr_len: %d\n", total_sqlstr_len);
+
fprintf(fp, "\n");
atomic_add_fetch_64(&totalDumpOutRows, totalRows);
diff --git a/src/mnode/src/mnodeDnode.c b/src/mnode/src/mnodeDnode.c
index a087b076a5..e3feea7d3a 100644
--- a/src/mnode/src/mnodeDnode.c
+++ b/src/mnode/src/mnodeDnode.c
@@ -101,6 +101,8 @@ static int32_t mnodeDnodeActionInsert(SSdbRow *pRow) {
pDnode->offlineReason = TAOS_DN_OFF_STATUS_NOT_RECEIVED;
}
+ pDnode->customScore = 0;
+
dnodeUpdateEp(pDnode->dnodeId, pDnode->dnodeEp, pDnode->dnodeFqdn, &pDnode->dnodePort);
mnodeUpdateDnodeEps();
@@ -1296,4 +1298,4 @@ int32_t mnodeCompactDnodes() {
mInfo("end to compact dnodes table...");
return 0;
-}
\ No newline at end of file
+}
diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c
index 032c6ee94b..20edb02c38 100644
--- a/src/mnode/src/mnodeTable.c
+++ b/src/mnode/src/mnodeTable.c
@@ -1068,7 +1068,9 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) {
pStable->info.tableId = strdup(pCreate->tableName);
pStable->info.type = TSDB_SUPER_TABLE;
pStable->createdTime = taosGetTimestampMs();
- pStable->uid = (us << 24) + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul));
+ int64_t x = (us&0x000000FFFFFFFFFF);
+ x = x<<24;
+ pStable->uid = x + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul));
pStable->sversion = 0;
pStable->tversion = 0;
pStable->numOfColumns = numOfColumns;
@@ -1740,16 +1742,22 @@ static int32_t mnodeGetSuperTableMeta(SMnodeMsg *pMsg) {
return TSDB_CODE_SUCCESS;
}
-static int32_t calculateVgroupMsgLength(SSTableVgroupMsg* pInfo, int32_t numOfTable) {
+static int32_t doGetVgroupInfoLength(char* name) {
+ SSTableObj *pTable = mnodeGetSuperTable(name);
+ int32_t len = 0;
+ if (pTable != NULL && pTable->vgHash != NULL) {
+ len = (taosHashGetSize(pTable->vgHash) * sizeof(SVgroupMsg) + sizeof(SVgroupsMsg));
+ }
+
+ mnodeDecTableRef(pTable);
+ return len;
+}
+
+static int32_t getVgroupInfoLength(SSTableVgroupMsg* pInfo, int32_t numOfTable) {
int32_t contLen = sizeof(SSTableVgroupRspMsg) + 32 * sizeof(SVgroupMsg) + sizeof(SVgroupsMsg);
for (int32_t i = 0; i < numOfTable; ++i) {
char *stableName = (char *)pInfo + sizeof(SSTableVgroupMsg) + (TSDB_TABLE_FNAME_LEN)*i;
- SSTableObj *pTable = mnodeGetSuperTable(stableName);
- if (pTable != NULL && pTable->vgHash != NULL) {
- contLen += (taosHashGetSize(pTable->vgHash) * sizeof(SVgroupMsg) + sizeof(SVgroupsMsg));
- }
-
- mnodeDecTableRef(pTable);
+ contLen += doGetVgroupInfoLength(stableName);
}
return contLen;
@@ -1820,7 +1828,7 @@ static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *pMsg) {
int32_t numOfTable = htonl(pInfo->numOfTables);
// calculate the required space.
- int32_t contLen = calculateVgroupMsgLength(pInfo, numOfTable);
+ int32_t contLen = getVgroupInfoLength(pInfo, numOfTable);
SSTableVgroupRspMsg *pRsp = rpcMallocCont(contLen);
if (pRsp == NULL) {
return TSDB_CODE_MND_OUT_OF_MEMORY;
@@ -2860,6 +2868,27 @@ static void mnodeProcessAlterTableRsp(SRpcMsg *rpcMsg) {
}
}
+static SMultiTableMeta* ensureMsgBufferSpace(SMultiTableMeta *pMultiMeta, SArray* pList, int32_t* totalMallocLen, int32_t numOfVgroupList) {
+ int32_t len = 0;
+ for (int32_t i = 0; i < numOfVgroupList; ++i) {
+ char *name = taosArrayGetP(pList, i);
+ len += doGetVgroupInfoLength(name);
+ }
+
+ if (len + pMultiMeta->contLen > (*totalMallocLen)) {
+ while (len + pMultiMeta->contLen > (*totalMallocLen)) {
+ (*totalMallocLen) *= 2;
+ }
+
+ pMultiMeta = rpcReallocCont(pMultiMeta, *totalMallocLen);
+ if (pMultiMeta == NULL) {
+ return NULL;
+ }
+ }
+
+ return pMultiMeta;
+}
+
static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) {
SMultiTableInfoMsg *pInfo = pMsg->rpcMsg.pCont;
@@ -2950,8 +2979,6 @@ static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) {
}
}
- char* msg = (char*) pMultiMeta + pMultiMeta->contLen;
-
// add the additional super table names that needs the vgroup info
for(;t < num; ++t) {
taosArrayPush(pList, &nameList[t]);
@@ -2961,6 +2988,13 @@ static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) {
int32_t numOfVgroupList = (int32_t) taosArrayGetSize(pList);
pMultiMeta->numOfVgroup = htonl(numOfVgroupList);
+ pMultiMeta = ensureMsgBufferSpace(pMultiMeta, pList, &totalMallocLen, numOfVgroupList);
+ if (pMultiMeta == NULL) {
+ code = TSDB_CODE_MND_OUT_OF_MEMORY;
+ goto _end;
+ }
+
+ char* msg = (char*) pMultiMeta + pMultiMeta->contLen;
for(int32_t i = 0; i < numOfVgroupList; ++i) {
char* name = taosArrayGetP(pList, i);
diff --git a/src/os/inc/osDef.h b/src/os/inc/osDef.h
index 80617eec56..9176da5b8e 100644
--- a/src/os/inc/osDef.h
+++ b/src/os/inc/osDef.h
@@ -132,10 +132,9 @@ extern "C" {
#define ASSERT(x)
#endif
-#ifdef UNUSED
-#undefine UNUSED
-#endif
+#ifndef UNUSED
#define UNUSED(x) ((void)(x))
+#endif
#ifdef UNUSED_FUNC
#undefine UNUSED_FUNC
diff --git a/src/os/inc/osSysinfo.h b/src/os/inc/osSysinfo.h
index d136f9664c..5f0bc2950c 100644
--- a/src/os/inc/osSysinfo.h
+++ b/src/os/inc/osSysinfo.h
@@ -27,18 +27,20 @@ typedef struct {
} SysDiskSize;
int32_t taosGetDiskSize(char *dataDir, SysDiskSize *diskSize);
-void taosGetSystemInfo();
-bool taosGetProcIO(float *readKB, float *writeKB);
-bool taosGetBandSpeed(float *bandSpeedKb);
-void taosGetDisk();
-bool taosGetCpuUsage(float *sysCpuUsage, float *procCpuUsage);
-bool taosGetProcMemory(float *memoryUsedMB);
-bool taosGetSysMemory(float *memoryUsedMB);
-void taosPrintOsInfo();
-int taosSystem(const char *cmd);
-void taosKillSystem();
-bool taosGetSystemUid(char *uid);
-char * taosGetCmdlineByPID(int pid);
+
+void taosGetSystemInfo();
+bool taosGetProcIO(float *readKB, float *writeKB);
+bool taosGetBandSpeed(float *bandSpeedKb);
+void taosGetDisk();
+bool taosGetCpuUsage(float *sysCpuUsage, float *procCpuUsage) ;
+bool taosGetProcMemory(float *memoryUsedMB) ;
+bool taosGetSysMemory(float *memoryUsedMB);
+void taosPrintOsInfo();
+void taosPrintDiskInfo();
+int taosSystem(const char * cmd) ;
+void taosKillSystem();
+bool taosGetSystemUid(char *uid);
+char *taosGetCmdlineByPID(int pid);
void taosSetCoreDump();
diff --git a/src/os/src/darwin/dwSysInfo.c b/src/os/src/darwin/dwSysInfo.c
index b3c9bd528e..10e0acc130 100644
--- a/src/os/src/darwin/dwSysInfo.c
+++ b/src/os/src/darwin/dwSysInfo.c
@@ -136,9 +136,6 @@ void taosPrintOsInfo() {
// uInfo(" os openMax: %" PRId64, tsOpenMax);
// uInfo(" os streamMax: %" PRId64, tsStreamMax);
uInfo(" os numOfCores: %d", tsNumOfCores);
- uInfo(" os totalDisk: %f(GB)", tsTotalDataDirGB);
- uInfo(" os usedDisk: %f(GB)", tsUsedDataDirGB);
- uInfo(" os availDisk: %f(GB)", tsAvailDataDirGB);
uInfo(" os totalMemory: %d(MB)", tsTotalMemoryMB);
struct utsname buf;
@@ -154,6 +151,14 @@ void taosPrintOsInfo() {
uInfo("==================================");
}
+void taosPrintDiskInfo() {
+ uInfo("==================================");
+ uInfo(" os totalDisk: %f(GB)", tsTotalDataDirGB);
+ uInfo(" os usedDisk: %f(GB)", tsUsedDataDirGB);
+ uInfo(" os availDisk: %f(GB)", tsAvailDataDirGB);
+ uInfo("==================================");
+}
+
void taosKillSystem() {
uError("function taosKillSystem, exit!");
exit(0);
diff --git a/src/os/src/detail/osSysinfo.c b/src/os/src/detail/osSysinfo.c
index d0b284e1ca..891dccaf97 100644
--- a/src/os/src/detail/osSysinfo.c
+++ b/src/os/src/detail/osSysinfo.c
@@ -506,9 +506,6 @@ void taosPrintOsInfo() {
uInfo(" os openMax: %" PRId64, tsOpenMax);
uInfo(" os streamMax: %" PRId64, tsStreamMax);
uInfo(" os numOfCores: %d", tsNumOfCores);
- uInfo(" os totalDisk: %f(GB)", tsTotalDataDirGB);
- uInfo(" os usedDisk: %f(GB)", tsUsedDataDirGB);
- uInfo(" os availDisk: %f(GB)", tsAvailDataDirGB);
uInfo(" os totalMemory: %d(MB)", tsTotalMemoryMB);
struct utsname buf;
@@ -523,6 +520,14 @@ void taosPrintOsInfo() {
uInfo(" os machine: %s", buf.machine);
}
+void taosPrintDiskInfo() {
+ uInfo("==================================");
+ uInfo(" os totalDisk: %f(GB)", tsTotalDataDirGB);
+ uInfo(" os usedDisk: %f(GB)", tsUsedDataDirGB);
+ uInfo(" os availDisk: %f(GB)", tsAvailDataDirGB);
+ uInfo("==================================");
+}
+
void taosKillSystem() {
// SIGINT
uInfo("taosd will shut down soon");
diff --git a/src/os/src/windows/wSysinfo.c b/src/os/src/windows/wSysinfo.c
index 8a81e3079a..72793a1049 100644
--- a/src/os/src/windows/wSysinfo.c
+++ b/src/os/src/windows/wSysinfo.c
@@ -205,10 +205,15 @@ void taosGetSystemInfo() {
void taosPrintOsInfo() {
uInfo(" os numOfCores: %d", tsNumOfCores);
+ uInfo(" os totalMemory: %d(MB)", tsTotalMemoryMB);
+ uInfo("==================================");
+}
+
+void taosPrintDiskInfo() {
+ uInfo("==================================");
uInfo(" os totalDisk: %f(GB)", tsTotalDataDirGB);
uInfo(" os usedDisk: %f(GB)", tsUsedDataDirGB);
uInfo(" os availDisk: %f(GB)", tsAvailDataDirGB);
- uInfo(" os totalMemory: %d(MB)", tsTotalMemoryMB);
uInfo("==================================");
}
diff --git a/src/query/inc/qAggMain.h b/src/query/inc/qAggMain.h
index 57e7d2982f..044c538f47 100644
--- a/src/query/inc/qAggMain.h
+++ b/src/query/inc/qAggMain.h
@@ -204,7 +204,7 @@ typedef struct SAggFunctionInfo {
bool (*init)(SQLFunctionCtx *pCtx); // setup the execute environment
void (*xFunction)(SQLFunctionCtx *pCtx); // blocks version function
- void (*xFunctionF)(SQLFunctionCtx *pCtx, int32_t position); // single-row function version, todo merge with blockwise function
+// void (*xFunctionF)(SQLFunctionCtx *pCtx, int32_t position); // single-row function version, todo merge with blockwise function
// finalizer must be called after all xFunction has been executed to generated final result.
void (*xFinalize)(SQLFunctionCtx *pCtx);
diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h
index 59e32bc754..08355cf4ae 100644
--- a/src/query/inc/qExecutor.h
+++ b/src/query/inc/qExecutor.h
@@ -297,7 +297,7 @@ enum OPERATOR_TYPE_E {
OP_MultiTableAggregate = 14,
OP_MultiTableTimeInterval = 15,
OP_DummyInput = 16, //TODO remove it after fully refactor.
- OP_MultiwayMergeSort = 17, // multi-way data merge into one input stream.
+ OP_MultiwayMergeSort = 17, // multi-way data merge into one input stream.
OP_GlobalAggregate = 18, // global merge for the multi-way data sources.
OP_Filter = 19,
OP_Distinct = 20,
diff --git a/src/query/inc/qTableMeta.h b/src/query/inc/qTableMeta.h
index d2316a8c9b..0cac15875b 100644
--- a/src/query/inc/qTableMeta.h
+++ b/src/query/inc/qTableMeta.h
@@ -17,6 +17,14 @@ typedef struct SCond {
char * cond;
} SCond;
+typedef struct STblCond {
+ uint64_t uid;
+ int16_t idx; //table index
+ int32_t len; // length of tag query condition data
+ char * cond;
+} STblCond;
+
+
typedef struct SJoinNode {
uint64_t uid;
int16_t tagColId;
@@ -116,6 +124,7 @@ typedef struct SQueryInfo {
SOrderVal order;
int16_t fillType; // final result fill type
int16_t numOfTables;
+ int16_t curTableIdx;
STableMetaInfo **pTableMetaInfo;
struct STSBuf *tsBuf;
int64_t * fillVal; // default value for fill
diff --git a/src/query/inc/sql.y b/src/query/inc/sql.y
index a54d46974a..ce2c3e3616 100644
--- a/src/query/inc/sql.y
+++ b/src/query/inc/sql.y
@@ -470,7 +470,7 @@ tagitem(A) ::= PLUS(X) FLOAT(Y). {
//////////////////////// The SELECT statement /////////////////////////////////
%type select {SSqlNode*}
%destructor select {destroySqlNode($$);}
-select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) interval_opt(K) session_option(H) windowstate_option(D) fill_opt(F) sliding_opt(S) groupby_opt(P) orderby_opt(Z) having_opt(N) slimit_opt(G) limit_opt(L). {
+select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) interval_opt(K) session_option(H) windowstate_option(D) fill_opt(F) sliding_opt(S) groupby_opt(P) having_opt(N) orderby_opt(Z) slimit_opt(G) limit_opt(L). {
A = tSetQuerySqlNode(&T, W, X, Y, P, Z, &K, &H, &D, &S, F, &L, &G, N);
}
diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c
index bc14c75af5..8efc4aad4c 100644
--- a/src/query/src/qAggMain.c
+++ b/src/query/src/qAggMain.c
@@ -456,20 +456,6 @@ static void count_function(SQLFunctionCtx *pCtx) {
SET_VAL(pCtx, numOfElem, 1);
}
-static void count_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- char *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- SET_VAL(pCtx, 1, 1);
- *((int64_t *)pCtx->pOutput) += pCtx->size;
-
- // do not need it actually
- SResultRowCellInfo *pInfo = GET_RES_INFO(pCtx);
- pInfo->hasResult = DATA_SET_FLAG;
-}
-
static void count_func_merge(SQLFunctionCtx *pCtx) {
int64_t *pData = (int64_t *)GET_INPUT_DATA_LIST(pCtx);
for (int32_t i = 0; i < pCtx->size; ++i) {
@@ -609,46 +595,6 @@ static void do_sum(SQLFunctionCtx *pCtx) {
}
}
-static void do_sum_f(SQLFunctionCtx *pCtx, int32_t index) {
- void *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- SET_VAL(pCtx, 1, 1);
- int64_t *res = (int64_t*) pCtx->pOutput;
-
- if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) {
- *res += GET_INT8_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) {
- *res += GET_INT16_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) {
- *res += GET_INT32_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT) {
- *res += GET_INT64_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_UTINYINT) {
- uint64_t *r = (uint64_t *)pCtx->pOutput;
- *r += GET_UINT8_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_USMALLINT) {
- uint64_t *r = (uint64_t *)pCtx->pOutput;
- *r += GET_UINT16_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_UINT) {
- uint64_t *r = (uint64_t *)pCtx->pOutput;
- *r += GET_UINT32_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_UBIGINT) {
- uint64_t *r = (uint64_t *)pCtx->pOutput;
- *r += GET_UINT64_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) {
- double *retVal = (double*) pCtx->pOutput;
- *retVal += GET_DOUBLE_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) {
- double *retVal = (double*) pCtx->pOutput;
- *retVal += GET_FLOAT_VAL(pData);
- }
-
- GET_RES_INFO(pCtx)->hasResult = DATA_SET_FLAG;
-}
-
static void sum_function(SQLFunctionCtx *pCtx) {
do_sum(pCtx);
@@ -661,17 +607,6 @@ static void sum_function(SQLFunctionCtx *pCtx) {
}
}
-static void sum_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- do_sum_f(pCtx, index);
-
- // keep the result data in output buffer, not in the intermediate buffer
- SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
- if (pResInfo->hasResult == DATA_SET_FLAG && pCtx->stableQuery) {
- SSumInfo *pSum = (SSumInfo *)pCtx->pOutput;
- pSum->hasResult = DATA_SET_FLAG;
- }
-}
-
static void sum_func_merge(SQLFunctionCtx *pCtx) {
int32_t notNullElems = 0;
@@ -847,53 +782,6 @@ static void avg_function(SQLFunctionCtx *pCtx) {
}
}
-static void avg_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- void *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- SET_VAL(pCtx, 1, 1);
-
- // NOTE: keep the intermediate result into the interResultBuf
- SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
-
- SAvgInfo *pAvgInfo = (SAvgInfo *)GET_ROWCELL_INTERBUF(pResInfo);
-
- if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) {
- pAvgInfo->sum += GET_INT8_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) {
- pAvgInfo->sum += GET_INT16_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) {
- pAvgInfo->sum += GET_INT32_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT) {
- pAvgInfo->sum += GET_INT64_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) {
- pAvgInfo->sum += GET_DOUBLE_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) {
- pAvgInfo->sum += GET_FLOAT_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_UTINYINT) {
- pAvgInfo->sum += GET_UINT8_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_USMALLINT) {
- pAvgInfo->sum += GET_UINT16_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_UINT) {
- pAvgInfo->sum += GET_UINT32_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_UBIGINT) {
- pAvgInfo->sum += GET_UINT64_VAL(pData);
- }
-
- // restore sum and count of elements
- pAvgInfo->num += 1;
-
- // set has result flag
- pResInfo->hasResult = DATA_SET_FLAG;
-
- // keep the data into the final output buffer for super table query since this execution may be the last one
- if (pCtx->stableQuery) {
- memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SAvgInfo));
- }
-}
-
static void avg_func_merge(SQLFunctionCtx *pCtx) {
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
@@ -1307,78 +1195,6 @@ static void max_func_merge(SQLFunctionCtx *pCtx) {
}
}
-static void minMax_function_f(SQLFunctionCtx *pCtx, int32_t index, int32_t isMin) {
- char *pData = GET_INPUT_DATA(pCtx, index);
- TSKEY key = GET_TS_DATA(pCtx, index);
-
- int32_t num = 0;
- if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) {
- int8_t *output = (int8_t *)pCtx->pOutput;
- int8_t i = GET_INT8_VAL(pData);
-
- UPDATE_DATA(pCtx, *output, i, num, isMin, key);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) {
- int16_t *output = (int16_t*) pCtx->pOutput;
- int16_t i = GET_INT16_VAL(pData);
-
- UPDATE_DATA(pCtx, *output, i, num, isMin, key);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) {
- int32_t *output = (int32_t*) pCtx->pOutput;
- int32_t i = GET_INT32_VAL(pData);
-
- UPDATE_DATA(pCtx, *output, i, num, isMin, key);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT) {
- int64_t *output = (int64_t*) pCtx->pOutput;
- int64_t i = GET_INT64_VAL(pData);
-
- UPDATE_DATA(pCtx, *output, i, num, isMin, key);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) {
- float *output = (float*) pCtx->pOutput;
- float i = GET_FLOAT_VAL(pData);
-
- UPDATE_DATA(pCtx, *output, i, num, isMin, key);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) {
- double *output = (double*) pCtx->pOutput;
- double i = GET_DOUBLE_VAL(pData);
-
- UPDATE_DATA(pCtx, *output, i, num, isMin, key);
- }
-
- GET_RES_INFO(pCtx)->hasResult = DATA_SET_FLAG;
-}
-
-static void max_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- char *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- SET_VAL(pCtx, 1, 1);
- minMax_function_f(pCtx, index, 0);
-
- SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
- if (pResInfo->hasResult == DATA_SET_FLAG && pCtx->stableQuery) {
- char *flag = pCtx->pOutput + pCtx->inputBytes;
- *flag = DATA_SET_FLAG;
- }
-}
-
-static void min_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- char *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- SET_VAL(pCtx, 1, 1);
- minMax_function_f(pCtx, index, 1);
-
- SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
- if (pResInfo->hasResult == DATA_SET_FLAG && pCtx->stableQuery) {
- char *flag = pCtx->pOutput + pCtx->inputBytes;
- *flag = DATA_SET_FLAG;
- }
-}
-
#define LOOP_STDDEV_IMPL(type, r, d, ctx, delta, _type, num) \
for (int32_t i = 0; i < (ctx)->size; ++i) { \
if ((ctx)->hasNull && isNull((char *)&((type *)d)[i], (_type))) { \
@@ -1472,114 +1288,6 @@ static void stddev_function(SQLFunctionCtx *pCtx) {
}
}
-static void stddev_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- // the second stage to calculate standard deviation
- SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
- SStddevInfo *pStd = GET_ROWCELL_INTERBUF(pResInfo);
-
- if (pCtx->currentStage == REPEAT_SCAN && pStd->stage == 0) {
- pStd->stage++;
- avg_finalizer(pCtx);
-
- pResInfo->initialized = true; // set it initialized to avoid re-initialization
-
- // save average value into tmpBuf, for second stage scan
- SAvgInfo *pAvg = GET_ROWCELL_INTERBUF(pResInfo);
-
- pStd->avg = GET_DOUBLE_VAL(pCtx->pOutput);
- assert((isnan(pAvg->sum) && pAvg->num == 0) || (pStd->num == pAvg->num && pStd->avg == pAvg->sum));
- }
-
- /* the first stage is to calculate average value */
- if (pStd->stage == 0) {
- avg_function_f(pCtx, index);
- } else if (pStd->num > 0) {
- double avg = pStd->avg;
- void * pData = GET_INPUT_DATA(pCtx, index);
-
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- switch (pCtx->inputType) {
- case TSDB_DATA_TYPE_INT: {
- pStd->res += POW2(GET_INT32_VAL(pData) - avg);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- pStd->res += POW2(GET_FLOAT_VAL(pData) - avg);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- pStd->res += POW2(GET_DOUBLE_VAL(pData) - avg);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- pStd->res += POW2(GET_INT64_VAL(pData) - avg);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- pStd->res += POW2(GET_INT16_VAL(pData) - avg);
- break;
- }
- case TSDB_DATA_TYPE_TINYINT: {
- pStd->res += POW2(GET_INT8_VAL(pData) - avg);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- pStd->res += POW2(GET_UINT32_VAL(pData) - avg);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- pStd->res += POW2(GET_UINT64_VAL(pData) - avg);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- pStd->res += POW2(GET_UINT16_VAL(pData) - avg);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- pStd->res += POW2(GET_UINT8_VAL(pData) - avg);
- break;
- }
- default:
- qError("stddev function not support data type:%d", pCtx->inputType);
- }
-
- SET_VAL(pCtx, 1, 1);
- }
-}
-
-static UNUSED_FUNC void stddev_next_step(SQLFunctionCtx *pCtx) {
- /*
- * the stddevInfo and the average info struct share the same buffer area
- * And the position of each element in their struct is exactly the same matched
- */
- SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
- SStddevInfo *pStd = GET_ROWCELL_INTERBUF(pResInfo);
-
- if (pStd->stage == 0) {
- /*
- * stddev is calculated in two stage:
- * 1. get the average value of all data;
- * 2. get final result, based on the average values;
- * so, if this routine is in second stage, no further step is required
- */
- pStd->stage++;
- avg_finalizer(pCtx);
-
- pResInfo->initialized = true; // set it initialized to avoid re-initialization
-
- // save average value into tmpBuf, for second stage scan
- SAvgInfo *pAvg = GET_ROWCELL_INTERBUF(pResInfo);
-
- pStd->avg = GET_DOUBLE_VAL(pCtx->pOutput);
- assert((isnan(pAvg->sum) && pAvg->num == 0) || (pStd->num == pAvg->num && pStd->avg == pAvg->sum));
- } else {
- pResInfo->complete = true;
- }
-}
-
static void stddev_finalizer(SQLFunctionCtx *pCtx) {
SStddevInfo *pStd = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
@@ -1696,97 +1404,6 @@ static void stddev_dst_function(SQLFunctionCtx *pCtx) {
memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)), sizeof(SAvgInfo));
}
-static void stddev_dst_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- void *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- // the second stage to calculate standard deviation
- SStddevdstInfo *pStd = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
- double *retVal = &pStd->res;
-
- // all data are null, no need to proceed
- SArray* resList = (SArray*) pCtx->param[0].pz;
- if (resList == NULL) {
- return;
- }
-
- // find the correct group average results according to the tag value
- int32_t len = (int32_t) taosArrayGetSize(resList);
- assert(len > 0);
-
- double avg = 0;
- if (len == 1) {
- SResPair* p = taosArrayGet(resList, 0);
- avg = p->avg;
- } else { // todo opt performance by using iterator since the timestamp lsit is matched with the output result
- SResPair* p = bsearch(&pCtx->startTs, resList->pData, len, sizeof(SResPair), tsCompare);
- assert(p != NULL);
-
- avg = p->avg;
- }
-
- int32_t num = 0;
- switch (pCtx->inputType) {
- case TSDB_DATA_TYPE_INT: {
- for (int32_t i = 0; i < pCtx->size; ++i) {
- if (pCtx->hasNull && isNull((const char*) (&((int32_t *)pData)[i]), pCtx->inputType)) {
- continue;
- }
- num += 1;
- *retVal += POW2(((int32_t *)pData)[i] - avg);
- }
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- LOOP_STDDEV_IMPL(float, *retVal, pData, pCtx, avg, pCtx->inputType, num);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- LOOP_STDDEV_IMPL(double, *retVal, pData, pCtx, avg, pCtx->inputType, num);
- break;
- }
- case TSDB_DATA_TYPE_TINYINT: {
- LOOP_STDDEV_IMPL(int8_t, *retVal, pData, pCtx, avg, pCtx->inputType, num);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- LOOP_STDDEV_IMPL(int8_t, *retVal, pData, pCtx, avg, pCtx->inputType, num);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- LOOP_STDDEV_IMPL(int16_t, *retVal, pData, pCtx, avg, pCtx->inputType, num);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- LOOP_STDDEV_IMPL(uint16_t, *retVal, pData, pCtx, avg, pCtx->inputType, num);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- LOOP_STDDEV_IMPL(uint32_t, *retVal, pData, pCtx, avg, pCtx->inputType, num);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- LOOP_STDDEV_IMPL(int64_t, *retVal, pData, pCtx, avg, pCtx->inputType, num);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- LOOP_STDDEV_IMPL(uint64_t, *retVal, pData, pCtx, avg, pCtx->inputType, num);
- break;
- }
- default:
- qError("stddev function not support data type:%d", pCtx->inputType);
- }
-
- pStd->num += num;
- SET_VAL(pCtx, num, 1);
-
- // copy to the final output buffer for super table
- memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)), sizeof(SAvgInfo));
-}
-
-
static void stddev_dst_merge(SQLFunctionCtx *pCtx) {
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
SStddevdstInfo* pRes = GET_ROWCELL_INTERBUF(pResInfo);
@@ -1833,7 +1450,7 @@ static bool first_last_function_setup(SQLFunctionCtx *pCtx) {
// todo opt for null block
static void first_function(SQLFunctionCtx *pCtx) {
- if (pCtx->order == TSDB_ORDER_DESC /*|| pCtx->preAggVals.dataBlockLoaded == false*/) {
+ if (pCtx->order == TSDB_ORDER_DESC) {
return;
}
@@ -1862,27 +1479,6 @@ static void first_function(SQLFunctionCtx *pCtx) {
SET_VAL(pCtx, notNullElems, 1);
}
-static void first_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- if (pCtx->order == TSDB_ORDER_DESC) {
- return;
- }
-
- void *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- SET_VAL(pCtx, 1, 1);
- memcpy(pCtx->pOutput, pData, pCtx->inputBytes);
-
- TSKEY ts = GET_TS_DATA(pCtx, index);
- DO_UPDATE_TAG_COLUMNS(pCtx, ts);
-
- SResultRowCellInfo *pInfo = GET_RES_INFO(pCtx);
- pInfo->hasResult = DATA_SET_FLAG;
- pInfo->complete = true; // get the first not-null data, completed
-}
-
static void first_data_assign_impl(SQLFunctionCtx *pCtx, char *pData, int32_t index) {
int64_t *timestamp = GET_TS_LIST(pCtx);
@@ -1932,21 +1528,6 @@ static void first_dist_function(SQLFunctionCtx *pCtx) {
SET_VAL(pCtx, notNullElems, 1);
}
-static void first_dist_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- char *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- if (pCtx->order == TSDB_ORDER_DESC) {
- return;
- }
-
- first_data_assign_impl(pCtx, pData, index);
-
- SET_VAL(pCtx, 1, 1);
-}
-
static void first_dist_func_merge(SQLFunctionCtx *pCtx) {
assert(pCtx->stableQuery);
@@ -1978,70 +1559,55 @@ static void first_dist_func_merge(SQLFunctionCtx *pCtx) {
* least one data in this block that is not null.(TODO opt for this case)
*/
static void last_function(SQLFunctionCtx *pCtx) {
- if (pCtx->order != pCtx->param[0].i64/* || pCtx->preAggVals.dataBlockLoaded == false*/) {
- return;
- }
-
- int32_t notNullElems = 0;
-
- for (int32_t i = pCtx->size - 1; i >= 0; --i) {
- char *data = GET_INPUT_DATA(pCtx, i);
- if (pCtx->hasNull && isNull(data, pCtx->inputType)) {
- if (!pCtx->requireNull) {
- continue;
- }
- }
-
- memcpy(pCtx->pOutput, data, pCtx->inputBytes);
-
- TSKEY ts = GET_TS_DATA(pCtx, i);
- DO_UPDATE_TAG_COLUMNS(pCtx, ts);
-
- SResultRowCellInfo *pInfo = GET_RES_INFO(pCtx);
- pInfo->hasResult = DATA_SET_FLAG;
-
- pInfo->complete = true; // set query completed on this column
- notNullElems++;
- break;
- }
-
- SET_VAL(pCtx, notNullElems, 1);
-}
-
-static void last_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- void *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- // the scan order is not the required order, ignore it
if (pCtx->order != pCtx->param[0].i64) {
return;
}
+ SResultRowCellInfo* pResInfo = GET_RES_INFO(pCtx);
+
+ int32_t notNullElems = 0;
if (pCtx->order == TSDB_ORDER_DESC) {
- SET_VAL(pCtx, 1, 1);
- memcpy(pCtx->pOutput, pData, pCtx->inputBytes);
- TSKEY ts = GET_TS_DATA(pCtx, index);
- DO_UPDATE_TAG_COLUMNS(pCtx, ts);
+ for (int32_t i = pCtx->size - 1; i >= 0; --i) {
+ char *data = GET_INPUT_DATA(pCtx, i);
+ if (pCtx->hasNull && isNull(data, pCtx->inputType) && (!pCtx->requireNull)) {
+ continue;
+ }
- SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
- pResInfo->hasResult = DATA_SET_FLAG;
- pResInfo->complete = true; // set query completed
- } else { // in case of ascending order check, all data needs to be checked
- SResultRowCellInfo* pResInfo = GET_RES_INFO(pCtx);
- TSKEY ts = GET_TS_DATA(pCtx, index);
+ memcpy(pCtx->pOutput, data, pCtx->inputBytes);
- char* buf = GET_ROWCELL_INTERBUF(pResInfo);
- if (pResInfo->hasResult != DATA_SET_FLAG || (*(TSKEY*)buf) < ts) {
- pResInfo->hasResult = DATA_SET_FLAG;
- memcpy(pCtx->pOutput, pData, pCtx->inputBytes);
-
- *(TSKEY*)buf = ts;
+ TSKEY ts = GET_TS_DATA(pCtx, i);
DO_UPDATE_TAG_COLUMNS(pCtx, ts);
+
+ pResInfo->hasResult = DATA_SET_FLAG;
+ pResInfo->complete = true; // set query completed on this column
+ notNullElems++;
+ break;
+ }
+ } else { // ascending order
+ for (int32_t i = pCtx->size - 1; i >= 0; --i) {
+ char *data = GET_INPUT_DATA(pCtx, i);
+ if (pCtx->hasNull && isNull(data, pCtx->inputType) && (!pCtx->requireNull)) {
+ continue;
+ }
+
+ TSKEY ts = GET_TS_DATA(pCtx, i);
+
+ char* buf = GET_ROWCELL_INTERBUF(pResInfo);
+ if (pResInfo->hasResult != DATA_SET_FLAG || (*(TSKEY*)buf) < ts) {
+ pResInfo->hasResult = DATA_SET_FLAG;
+ memcpy(pCtx->pOutput, data, pCtx->inputBytes);
+
+ *(TSKEY*)buf = ts;
+ DO_UPDATE_TAG_COLUMNS(pCtx, ts);
+ }
+
+ notNullElems++;
+ break;
}
}
+
+ SET_VAL(pCtx, notNullElems, 1);
}
static void last_data_assign_impl(SQLFunctionCtx *pCtx, char *pData, int32_t index) {
@@ -2092,29 +1658,6 @@ static void last_dist_function(SQLFunctionCtx *pCtx) {
SET_VAL(pCtx, notNullElems, 1);
}
-static void last_dist_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- if (pCtx->size == 0) {
- return;
- }
-
- char *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- /*
- * 1. for scan data in asc order, no need to check data
- * 2. for data blocks that are not loaded, no need to check data
- */
- if (pCtx->order != pCtx->param[0].i64) {
- return;
- }
-
- last_data_assign_impl(pCtx, pData, index);
-
- SET_VAL(pCtx, 1, 1);
-}
-
/*
* in the secondary merge(local reduce), the output is limited by the
* final output size, so the main difference between last_dist_func_merge and second_merge
@@ -2616,28 +2159,6 @@ static void top_function(SQLFunctionCtx *pCtx) {
}
}
-static void top_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- char *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- STopBotInfo *pRes = getTopBotOutputInfo(pCtx);
- assert(pRes->num >= 0);
-
- if ((void *)pRes->res[0] != (void *)((char *)pRes + sizeof(STopBotInfo) + POINTER_BYTES * pCtx->param[0].i64)) {
- buildTopBotStruct(pRes, pCtx);
- }
-
- SET_VAL(pCtx, 1, 1);
- TSKEY ts = GET_TS_DATA(pCtx, index);
-
- do_top_function_add(pRes, (int32_t)pCtx->param[0].i64, pData, ts, pCtx->inputType, &pCtx->tagInfo, NULL, 0);
-
- SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
- pResInfo->hasResult = DATA_SET_FLAG;
-}
-
static void top_func_merge(SQLFunctionCtx *pCtx) {
STopBotInfo *pInput = (STopBotInfo *)GET_INPUT_DATA_LIST(pCtx);
@@ -2695,27 +2216,6 @@ static void bottom_function(SQLFunctionCtx *pCtx) {
}
}
-static void bottom_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- char *pData = GET_INPUT_DATA(pCtx, index);
- TSKEY ts = GET_TS_DATA(pCtx, index);
-
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- STopBotInfo *pRes = getTopBotOutputInfo(pCtx);
-
- if ((void *)pRes->res[0] != (void *)((char *)pRes + sizeof(STopBotInfo) + POINTER_BYTES * pCtx->param[0].i64)) {
- buildTopBotStruct(pRes, pCtx);
- }
-
- SET_VAL(pCtx, 1, 1);
- do_bottom_function_add(pRes, (int32_t)pCtx->param[0].i64, pData, ts, pCtx->inputType, &pCtx->tagInfo, NULL, 0);
-
- SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
- pResInfo->hasResult = DATA_SET_FLAG;
-}
-
static void bottom_func_merge(SQLFunctionCtx *pCtx) {
STopBotInfo *pInput = (STopBotInfo *)GET_INPUT_DATA_LIST(pCtx);
@@ -2868,50 +2368,6 @@ static void percentile_function(SQLFunctionCtx *pCtx) {
pResInfo->hasResult = DATA_SET_FLAG;
}
-static void percentile_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- void *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
- SPercentileInfo *pInfo = (SPercentileInfo *)GET_ROWCELL_INTERBUF(pResInfo);
-
- if (pCtx->currentStage == REPEAT_SCAN && pInfo->stage == 0) {
- pInfo->stage += 1;
-
- // all data are null, set it completed
- if (pInfo->numOfElems == 0) {
- pResInfo->complete = true;
-
- return;
- } else {
- pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, pInfo->minval, pInfo->maxval);
- }
- }
-
- if (pInfo->stage == 0) {
- double v = 0;
- GET_TYPED_DATA(v, double, pCtx->inputType, pData);
-
- if (v < GET_DOUBLE_VAL(&pInfo->minval)) {
- SET_DOUBLE_VAL(&pInfo->minval, v);
- }
-
- if (v > GET_DOUBLE_VAL(&pInfo->maxval)) {
- SET_DOUBLE_VAL(&pInfo->maxval, v);
- }
-
- pInfo->numOfElems += 1;
- return;
- }
-
- tMemBucketPut(pInfo->pMemBucket, pData, 1);
-
- SET_VAL(pCtx, 1, 1);
- pResInfo->hasResult = DATA_SET_FLAG;
-}
-
static void percentile_finalizer(SQLFunctionCtx *pCtx) {
double v = pCtx->param[0].nType == TSDB_DATA_TYPE_INT ? pCtx->param[0].i64 : pCtx->param[0].dKey;
@@ -2930,24 +2386,6 @@ static void percentile_finalizer(SQLFunctionCtx *pCtx) {
doFinalizer(pCtx);
}
-static UNUSED_FUNC void percentile_next_step(SQLFunctionCtx *pCtx) {
- SResultRowCellInfo * pResInfo = GET_RES_INFO(pCtx);
- SPercentileInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo);
-
- if (pInfo->stage == 0) {
- // all data are null, set it completed
- if (pInfo->numOfElems == 0) {
- pResInfo->complete = true;
- } else {
- pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, pInfo->minval, pInfo->maxval);
- }
-
- pInfo->stage += 1;
- } else {
- pResInfo->complete = true;
- }
-}
-
//////////////////////////////////////////////////////////////////////////////////
static void buildHistogramInfo(SAPercentileInfo* pInfo) {
pInfo->pHisto = (SHistogramInfo*) ((char*) pInfo + sizeof(SAPercentileInfo));
@@ -3012,24 +2450,6 @@ static void apercentile_function(SQLFunctionCtx *pCtx) {
}
}
-static void apercentile_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- void *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- SResultRowCellInfo * pResInfo = GET_RES_INFO(pCtx);
- SAPercentileInfo *pInfo = getAPerctInfo(pCtx);
-
- double v = 0;
- GET_TYPED_DATA(v, double, pCtx->inputType, pData);
-
- tHistogramAdd(&pInfo->pHisto, v);
-
- SET_VAL(pCtx, 1, 1);
- pResInfo->hasResult = DATA_SET_FLAG;
-}
-
static void apercentile_func_merge(SQLFunctionCtx *pCtx) {
SAPercentileInfo *pInput = (SAPercentileInfo *)GET_INPUT_DATA_LIST(pCtx);
@@ -3213,60 +2633,6 @@ static void leastsquares_function(SQLFunctionCtx *pCtx) {
SET_VAL(pCtx, numOfElem, 1);
}
-static void leastsquares_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- void *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
- SLeastsquaresInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo);
-
- double(*param)[3] = pInfo->mat;
-
- switch (pCtx->inputType) {
- case TSDB_DATA_TYPE_INT: {
- int32_t *p = pData;
- LEASTSQR_CAL(param, pInfo->startVal, p, 0, pCtx->param[1].dKey);
- break;
- }
- case TSDB_DATA_TYPE_TINYINT: {
- int8_t *p = pData;
- LEASTSQR_CAL(param, pInfo->startVal, p, 0, pCtx->param[1].dKey);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- int16_t *p = pData;
- LEASTSQR_CAL(param, pInfo->startVal, p, 0, pCtx->param[1].dKey);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- int64_t *p = pData;
- LEASTSQR_CAL(param, pInfo->startVal, p, 0, pCtx->param[1].dKey);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- float *p = pData;
- LEASTSQR_CAL(param, pInfo->startVal, p, 0, pCtx->param[1].dKey);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- double *p = pData;
- LEASTSQR_CAL(param, pInfo->startVal, p, 0, pCtx->param[1].dKey);
- break;
- }
- default:
- qError("error data type in leastsquare function:%d", pCtx->inputType);
- };
-
- SET_VAL(pCtx, 1, 1);
- pInfo->num += 1;
-
- if (pInfo->num > 0) {
- pResInfo->hasResult = DATA_SET_FLAG;
- }
-}
-
static void leastsquares_finalizer(SQLFunctionCtx *pCtx) {
// no data in query
SResultRowCellInfo * pResInfo = GET_RES_INFO(pCtx);
@@ -3304,25 +2670,23 @@ static void date_col_output_function(SQLFunctionCtx *pCtx) {
*(int64_t *)(pCtx->pOutput) = pCtx->startTs;
}
-static FORCE_INLINE void date_col_output_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- date_col_output_function(pCtx);
-}
-
static void col_project_function(SQLFunctionCtx *pCtx) {
// the number of output rows should not affect the final number of rows, so set it to be 0
if (pCtx->numOfParams == 2) {
return;
}
+
+ // only one row is required.
if (pCtx->param[0].i64 == 1) {
SET_VAL(pCtx, pCtx->size, 1);
} else {
INC_INIT_VAL(pCtx, pCtx->size);
}
-
char *pData = GET_INPUT_DATA_LIST(pCtx);
if (pCtx->order == TSDB_ORDER_ASC) {
- memcpy(pCtx->pOutput, pData, (size_t) pCtx->size * pCtx->inputBytes);
+ int32_t numOfRows = (pCtx->param[0].i64 == 1)? 1:pCtx->size;
+ memcpy(pCtx->pOutput, pData, (size_t) numOfRows * pCtx->inputBytes);
} else {
for(int32_t i = 0; i < pCtx->size; ++i) {
memcpy(pCtx->pOutput + (pCtx->size - 1 - i) * pCtx->inputBytes, pData + i * pCtx->inputBytes,
@@ -3331,22 +2695,6 @@ static void col_project_function(SQLFunctionCtx *pCtx) {
}
}
-static void col_project_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
- if (pCtx->numOfParams == 2) { // the number of output rows should not affect the final number of rows, so set it to be 0
- return;
- }
-
- // only one output
- if (pCtx->param[0].i64 == 1 && pResInfo->numOfRes >= 1) {
- return;
- }
-
- INC_INIT_VAL(pCtx, 1);
- char *pData = GET_INPUT_DATA(pCtx, index);
- memcpy(pCtx->pOutput, pData, pCtx->inputBytes);
-}
-
/**
* only used for tag projection query in select clause
* @param pCtx
@@ -3368,13 +2716,6 @@ static void tag_project_function(SQLFunctionCtx *pCtx) {
}
}
-static void tag_project_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- INC_INIT_VAL(pCtx, 1);
-
- tVariantDump(&pCtx->tag, pCtx->pOutput, pCtx->tag.nType, true);
- pCtx->pOutput += pCtx->outputBytes;
-}
-
/**
* used in group by clause. when applying group by tags, the tags value is
* assign by using tag function.
@@ -3393,11 +2734,6 @@ static void tag_function(SQLFunctionCtx *pCtx) {
}
}
-static void tag_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- SET_VAL(pCtx, 1, 1);
- tVariantDump(&pCtx->tag, pCtx->pOutput, pCtx->outputType, true);
-}
-
static void copy_function(SQLFunctionCtx *pCtx) {
SET_VAL(pCtx, pCtx->size, 1);
@@ -3793,61 +3129,6 @@ static void diff_function(SQLFunctionCtx *pCtx) {
}
}
-static void diff_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- char *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- // the output start from the second source element
- if (pCtx->param[1].nType != INITIAL_VALUE_NOT_ASSIGNED) { // initial value is set
- GET_RES_INFO(pCtx)->numOfRes += 1;
- }
-
- int32_t step = 1/*GET_FORWARD_DIRECTION_FACTOR(pCtx->order)*/;
-
- switch (pCtx->inputType) {
- case TSDB_DATA_TYPE_INT: {
- if (pCtx->param[1].nType == INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet
- pCtx->param[1].nType = pCtx->inputType;
- pCtx->param[1].i64 = *(int32_t *)pData;
- } else {
- *(int32_t *)pCtx->pOutput = *(int32_t *)pData - (int32_t)pCtx->param[1].i64;
- pCtx->param[1].i64 = *(int32_t *)pData;
- *(int64_t *)pCtx->ptsOutputBuf = GET_TS_DATA(pCtx, index);
- }
- break;
- };
- case TSDB_DATA_TYPE_BIGINT: {
- DIFF_IMPL(pCtx, pData, int64_t);
- break;
- };
- case TSDB_DATA_TYPE_DOUBLE: {
- DIFF_IMPL(pCtx, pData, double);
- break;
- };
- case TSDB_DATA_TYPE_FLOAT: {
- DIFF_IMPL(pCtx, pData, float);
- break;
- };
- case TSDB_DATA_TYPE_SMALLINT: {
- DIFF_IMPL(pCtx, pData, int16_t);
- break;
- };
- case TSDB_DATA_TYPE_TINYINT: {
- DIFF_IMPL(pCtx, pData, int8_t);
- break;
- };
- default:
- qError("error input type");
- }
-
- if (GET_RES_INFO(pCtx)->numOfRes > 0) {
- pCtx->pOutput += pCtx->outputBytes * step;
- pCtx->ptsOutputBuf = (char *)pCtx->ptsOutputBuf + TSDB_KEYSIZE * step;
- }
-}
-
char *getArithColumnData(void *param, const char* name, int32_t colId) {
SArithmeticSupport *pSupport = (SArithmeticSupport *)param;
@@ -3870,16 +3151,6 @@ static void arithmetic_function(SQLFunctionCtx *pCtx) {
arithmeticTreeTraverse(sas->pExprInfo->pExpr, pCtx->size, pCtx->pOutput, sas, pCtx->order, getArithColumnData);
}
-static void arithmetic_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- INC_INIT_VAL(pCtx, 1);
- SArithmeticSupport *sas = (SArithmeticSupport *)pCtx->param[1].pz;
-
- sas->offset = index;
- arithmeticTreeTraverse(sas->pExprInfo->pExpr, 1, pCtx->pOutput, sas, pCtx->order, getArithColumnData);
-
- pCtx->pOutput += pCtx->outputBytes;
-}
-
#define LIST_MINMAX_N(ctx, minOutput, maxOutput, elemCnt, data, type, tsdbType, numOfNotNullElem) \
{ \
type *inputData = (type *)data; \
@@ -3998,49 +3269,6 @@ static void spread_function(SQLFunctionCtx *pCtx) {
}
}
-static void spread_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- void *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- SET_VAL(pCtx, 1, 1);
-
- SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
- SSpreadInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo);
-
- double val = 0.0;
- if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) {
- val = GET_INT8_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) {
- val = GET_INT16_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) {
- val = GET_INT32_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT || pCtx->inputType == TSDB_DATA_TYPE_TIMESTAMP) {
- val = (double)(GET_INT64_VAL(pData));
- } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) {
- val = GET_DOUBLE_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) {
- val = GET_FLOAT_VAL(pData);
- }
-
- // keep the result data in output buffer, not in the intermediate buffer
- if (val > pInfo->max) {
- pInfo->max = val;
- }
-
- if (val < pInfo->min) {
- pInfo->min = val;
- }
-
- pResInfo->hasResult = DATA_SET_FLAG;
- pInfo->hasResult = DATA_SET_FLAG;
-
- if (pCtx->stableQuery) {
- memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SSpreadInfo));
- }
-}
-
/*
* here we set the result value back to the intermediate buffer, to apply the finalize the function
* the final result is generated in spread_function_finalizer
@@ -4393,26 +3621,6 @@ static void twa_function(SQLFunctionCtx *pCtx) {
}
}
-static void twa_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- void *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- int32_t notNullElems = twa_function_impl(pCtx, index, 1);
- SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
-
- SET_VAL(pCtx, notNullElems, 1);
-
- if (notNullElems > 0) {
- pResInfo->hasResult = DATA_SET_FLAG;
- }
-
- if (pCtx->stableQuery) {
- memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(pResInfo), sizeof(STwaInfo));
- }
-}
-
/*
* To copy the input to interResBuf to avoid the input buffer space be over writen
* by next input data. The TWA function only applies to each table, so no merge procedure
@@ -4590,23 +3798,6 @@ static void ts_comp_function(SQLFunctionCtx *pCtx) {
pResInfo->hasResult = DATA_SET_FLAG;
}
-static void ts_comp_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- void *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
- STSCompInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo);
-
- STSBuf *pTSbuf = pInfo->pTSBuf;
-
- tsBufAppend(pTSbuf, (int32_t)pCtx->param[0].i64, &pCtx->tag, pData, TSDB_KEYSIZE);
- SET_VAL(pCtx, pCtx->size, 1);
-
- pResInfo->hasResult = DATA_SET_FLAG;
-}
-
static void ts_comp_finalize(SQLFunctionCtx *pCtx) {
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
@@ -4736,46 +3927,6 @@ static void rate_function(SQLFunctionCtx *pCtx) {
}
}
-static void rate_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- void *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- // NOTE: keep the intermediate result into the interResultBuf
- SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
- SRateInfo *pRateInfo = (SRateInfo *)GET_ROWCELL_INTERBUF(pResInfo);
- TSKEY *primaryKey = GET_TS_LIST(pCtx);
-
- double v = 0;
- GET_TYPED_DATA(v, double, pCtx->inputType, pData);
-
- if ((INT64_MIN == pRateInfo->firstValue) || (INT64_MIN == pRateInfo->firstKey)) {
- pRateInfo->firstValue = v;
- pRateInfo->firstKey = primaryKey[index];
- }
-
- if (INT64_MIN == pRateInfo->lastValue) {
- pRateInfo->lastValue = v;
- } else if (v < pRateInfo->lastValue) {
- pRateInfo->correctionValue += pRateInfo->lastValue;
- }
-
- pRateInfo->lastValue = v;
- pRateInfo->lastKey = primaryKey[index];
-
- SET_VAL(pCtx, 1, 1);
-
- // set has result flag
- pRateInfo->hasResult = DATA_SET_FLAG;
- pResInfo->hasResult = DATA_SET_FLAG;
-
- // keep the data into the final output buffer for super table query since this execution may be the last one
- if (pCtx->stableQuery) {
- memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SRateInfo));
- }
-}
-
static void rate_func_copy(SQLFunctionCtx *pCtx) {
assert(pCtx->inputType == TSDB_DATA_TYPE_BINARY);
@@ -4793,7 +3944,7 @@ static void rate_finalizer(SQLFunctionCtx *pCtx) {
return;
}
- *(double*) pCtx->pOutput = do_calc_rate(pRateInfo, TSDB_TICK_PER_SECOND(pCtx->param[0].i64));
+ *(double*) pCtx->pOutput = do_calc_rate(pRateInfo, (double) TSDB_TICK_PER_SECOND(pCtx->param[0].i64));
// cannot set the numOfIteratedElems again since it is set during previous iteration
pResInfo->numOfRes = 1;
@@ -4846,39 +3997,6 @@ static void irate_function(SQLFunctionCtx *pCtx) {
}
}
-static void irate_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- void *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- // NOTE: keep the intermediate result into the interResultBuf
- SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
- SRateInfo *pRateInfo = (SRateInfo *)GET_ROWCELL_INTERBUF(pResInfo);
- TSKEY *primaryKey = GET_TS_LIST(pCtx);
-
- double v = 0;
- GET_TYPED_DATA(v, double, pCtx->inputType, pData);
-
- pRateInfo->firstKey = pRateInfo->lastKey;
- pRateInfo->firstValue = pRateInfo->lastValue;
-
- pRateInfo->lastValue = v;
- pRateInfo->lastKey = primaryKey[index];
-
-// qDebug("====%p irate_function_f() index:%d lastValue:%" PRId64 " lastKey:%" PRId64 " firstValue:%" PRId64 " firstKey:%" PRId64, pCtx, index, pRateInfo->lastValue, pRateInfo->lastKey, pRateInfo->firstValue , pRateInfo->firstKey);
- SET_VAL(pCtx, 1, 1);
-
- // set has result flag
- pRateInfo->hasResult = DATA_SET_FLAG;
- pResInfo->hasResult = DATA_SET_FLAG;
-
- // keep the data into the final output buffer for super table query since this execution may be the last one
- if (pCtx->stableQuery) {
- memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SRateInfo));
- }
-}
-
void blockInfo_func(SQLFunctionCtx* pCtx) {
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
STableBlockDist* pDist = (STableBlockDist*) GET_ROWCELL_INTERBUF(pResInfo);
@@ -5047,8 +4165,7 @@ void blockinfo_func_finalizer(SQLFunctionCtx* pCtx) {
* function compatible list.
* tag and ts are not involved in the compatibility check
*
- * 1. functions that are not simultaneously present with any other functions. e.g.,
- * diff/ts_z/top/bottom
+ * 1. functions that are not simultaneously present with any other functions. e.g., diff/ts_z/top/bottom
* 2. functions that are only allowed to be present only with same functions. e.g., last_row, interp
* 3. functions that are allowed to be present with other functions.
* e.g., count/sum/avg/min/max/stddev/percentile/apercentile/first/last...
@@ -5062,7 +4179,7 @@ int32_t functionCompatList[] = {
// tag, colprj, tagprj, arithmetic, diff, first_dist, last_dist, stddev_dst, interp rate irate
1, 1, 1, 1, -1, 1, 1, 1, 5, 1, 1,
// tid_tag, blk_info
- 6, 7
+ 6, 7
};
SAggFunctionInfo aAggs[] = {{
@@ -5073,7 +4190,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_BASE_FUNC_SO,
function_setup,
count_function,
- count_function_f,
doFinalizer,
count_func_merge,
countRequired,
@@ -5086,7 +4202,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_BASE_FUNC_SO,
function_setup,
sum_function,
- sum_function_f,
function_finalizer,
sum_func_merge,
statisRequired,
@@ -5099,7 +4214,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_BASE_FUNC_SO,
function_setup,
avg_function,
- avg_function_f,
avg_finalizer,
avg_func_merge,
statisRequired,
@@ -5112,7 +4226,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_SELECTIVITY,
min_func_setup,
min_function,
- min_function_f,
function_finalizer,
min_func_merge,
statisRequired,
@@ -5125,7 +4238,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_SELECTIVITY,
max_func_setup,
max_function,
- max_function_f,
function_finalizer,
max_func_merge,
statisRequired,
@@ -5138,7 +4250,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_OF,
function_setup,
stddev_function,
- stddev_function_f,
stddev_finalizer,
noop1,
dataBlockRequired,
@@ -5151,7 +4262,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_OF,
percentile_function_setup,
percentile_function,
- percentile_function_f,
percentile_finalizer,
noop1,
dataBlockRequired,
@@ -5164,7 +4274,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_OF | TSDB_FUNCSTATE_STABLE,
apercentile_function_setup,
apercentile_function,
- apercentile_function_f,
apercentile_finalizer,
apercentile_func_merge,
dataBlockRequired,
@@ -5177,7 +4286,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_SELECTIVITY,
function_setup,
first_function,
- first_function_f,
function_finalizer,
noop1,
firstFuncRequired,
@@ -5190,7 +4298,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_SELECTIVITY,
function_setup,
last_function,
- last_function_f,
function_finalizer,
noop1,
lastFuncRequired,
@@ -5204,7 +4311,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_FUNCSTATE_SELECTIVITY,
first_last_function_setup,
last_row_function,
- noop2,
last_row_finalizer,
last_dist_func_merge,
dataBlockRequired,
@@ -5218,7 +4324,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_FUNCSTATE_SELECTIVITY,
top_bottom_function_setup,
top_function,
- top_function_f,
top_bottom_func_finalizer,
top_func_merge,
dataBlockRequired,
@@ -5232,7 +4337,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_FUNCSTATE_SELECTIVITY,
top_bottom_function_setup,
bottom_function,
- bottom_function_f,
top_bottom_func_finalizer,
bottom_func_merge,
dataBlockRequired,
@@ -5245,7 +4349,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_BASE_FUNC_SO,
spread_function_setup,
spread_function,
- spread_function_f,
spread_function_finalizer,
spread_func_merge,
countRequired,
@@ -5258,7 +4361,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS,
twa_function_setup,
twa_function,
- twa_function_f,
twa_function_finalizer,
twa_function_copy,
dataBlockRequired,
@@ -5271,7 +4373,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_OF,
leastsquares_function_setup,
leastsquares_function,
- leastsquares_function_f,
leastsquares_finalizer,
noop1,
dataBlockRequired,
@@ -5284,7 +4385,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS,
function_setup,
date_col_output_function,
- date_col_output_function_f,
doFinalizer,
copy_function,
noDataRequired,
@@ -5297,7 +4397,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS,
function_setup,
noop1,
- noop2,
doFinalizer,
copy_function,
dataBlockRequired,
@@ -5310,7 +4409,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_BASE_FUNC_SO,
function_setup,
tag_function,
- noop2,
doFinalizer,
copy_function,
noDataRequired,
@@ -5323,7 +4421,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_NEED_TS,
ts_comp_function_setup,
ts_comp_function,
- ts_comp_function_f,
ts_comp_finalize,
copy_function,
dataBlockRequired,
@@ -5336,7 +4433,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_BASE_FUNC_SO,
function_setup,
tag_function,
- tag_function_f,
doFinalizer,
copy_function,
noDataRequired,
@@ -5349,7 +4445,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_BASE_FUNC_MO | TSDB_FUNCSTATE_NEED_TS,
function_setup,
col_project_function,
- col_project_function_f,
doFinalizer,
copy_function,
dataBlockRequired,
@@ -5362,7 +4457,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_BASE_FUNC_MO,
function_setup,
tag_project_function,
- tag_project_function_f,
doFinalizer,
copy_function,
noDataRequired,
@@ -5375,7 +4469,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS,
function_setup,
arithmetic_function,
- arithmetic_function_f,
doFinalizer,
copy_function,
dataBlockRequired,
@@ -5388,7 +4481,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SELECTIVITY,
diff_function_setup,
diff_function,
- diff_function_f,
doFinalizer,
noop1,
dataBlockRequired,
@@ -5402,7 +4494,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SELECTIVITY,
first_last_function_setup,
first_dist_function,
- first_dist_function_f,
function_finalizer,
first_dist_func_merge,
firstDistFuncRequired,
@@ -5415,7 +4506,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SELECTIVITY,
first_last_function_setup,
last_dist_function,
- last_dist_function_f,
function_finalizer,
last_dist_func_merge,
lastDistFuncRequired,
@@ -5428,7 +4518,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STABLE,
function_setup,
stddev_dst_function,
- stddev_dst_function_f,
stddev_dst_finalizer,
stddev_dst_merge,
dataBlockRequired,
@@ -5441,7 +4530,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_OF | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS ,
function_setup,
interp_function,
- do_sum_f, // todo filter handle
doFinalizer,
copy_function,
dataBlockRequired,
@@ -5454,7 +4542,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS,
rate_function_setup,
rate_function,
- rate_function_f,
rate_finalizer,
rate_func_copy,
dataBlockRequired,
@@ -5467,7 +4554,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS,
rate_function_setup,
irate_function,
- irate_function_f,
rate_finalizer,
rate_func_copy,
dataBlockRequired,
@@ -5480,7 +4566,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE,
function_setup,
noop1,
- noop2,
noop1,
noop1,
dataBlockRequired,
@@ -5492,7 +4577,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SELECTIVITY,
deriv_function_setup,
deriv_function,
- noop2,
doFinalizer,
noop1,
dataBlockRequired,
@@ -5505,7 +4589,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STABLE,
function_setup,
blockInfo_func,
- noop2,
blockinfo_func_finalizer,
block_func_merge,
dataBlockRequired,
diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c
index 458c956cea..afc9eab0b0 100644
--- a/src/query/src/qExecutor.c
+++ b/src/query/src/qExecutor.c
@@ -161,7 +161,7 @@ static void setResultOutputBuf(SQueryRuntimeEnv* pRuntimeEnv, SResultRow* pResul
int32_t numOfCols, int32_t* rowCellInfoOffset);
void setResultRowOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResult, SQLFunctionCtx* pCtx, int32_t numOfOutput, int32_t* rowCellInfoOffset);
-static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t functionId);
+static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx);
static void setBlockStatisInfo(SQLFunctionCtx *pCtx, SSDataBlock* pSDataBlock, SColIndex* pColIndex);
@@ -309,7 +309,7 @@ static bool isProjQuery(SQueryAttr *pQueryAttr) {
return true;
}
-static bool hasNullRv(SColIndex* pColIndex, SDataStatis *pStatis) {
+static bool hasNull(SColIndex* pColIndex, SDataStatis *pStatis) {
if (TSDB_COL_IS_TAG(pColIndex->flag) || TSDB_COL_IS_UD_COL(pColIndex->flag) || pColIndex->colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
return false;
}
@@ -708,12 +708,13 @@ static int32_t getNumOfRowsInTimeWindow(SQueryRuntimeEnv* pRuntimeEnv, SDataBloc
static void doApplyFunctions(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, STimeWindow* pWin, int32_t offset,
int32_t forwardStep, TSKEY* tsCol, int32_t numOfTotal, int32_t numOfOutput) {
SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr;
- bool hasPrev = pCtx[0].preAggVals.isSet;
+ bool hasAggregates = pCtx[0].preAggVals.isSet;
for (int32_t k = 0; k < numOfOutput; ++k) {
- pCtx[k].size = forwardStep;
+ pCtx[k].size = forwardStep;
pCtx[k].startTs = pWin->skey;
+ // keep it temprarily
char* start = pCtx[k].pInput;
int32_t pos = (QUERY_IS_ASC_QUERY(pQueryAttr)) ? offset : offset - (forwardStep - 1);
@@ -725,20 +726,18 @@ static void doApplyFunctions(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx
pCtx[k].ptsList = &tsCol[pos];
}
- int32_t functionId = pCtx[k].functionId;
-
// not a whole block involved in query processing, statistics data can not be used
// NOTE: the original value of isSet have been changed here
if (pCtx[k].preAggVals.isSet && forwardStep < numOfTotal) {
pCtx[k].preAggVals.isSet = false;
}
- if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) {
- aAggs[functionId].xFunction(&pCtx[k]);
+ if (functionNeedToExecute(pRuntimeEnv, &pCtx[k])) {
+ aAggs[pCtx[k].functionId].xFunction(&pCtx[k]);
}
// restore it
- pCtx[k].preAggVals.isSet = hasPrev;
+ pCtx[k].preAggVals.isSet = hasAggregates;
pCtx[k].pInput = start;
}
}
@@ -847,9 +846,6 @@ static void setNotInterpoWindowKey(SQLFunctionCtx* pCtx, int32_t numOfOutput, in
}
}
-// window start key interpolation
-
-
static void saveDataBlockLastRow(SQueryRuntimeEnv* pRuntimeEnv, SDataBlockInfo* pDataBlockInfo, SArray* pDataBlock,
int32_t rowIndex) {
if (pDataBlock == NULL) {
@@ -975,10 +971,9 @@ static void doAggregateImpl(SOperatorInfo* pOperator, TSKEY startTs, SQLFunction
SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv;
for (int32_t k = 0; k < pOperator->numOfOutput; ++k) {
- int32_t functionId = pCtx[k].functionId;
- if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) {
+ if (functionNeedToExecute(pRuntimeEnv, &pCtx[k])) {
pCtx[k].startTs = startTs;// this can be set during create the struct
- aAggs[functionId].xFunction(&pCtx[k]);
+ aAggs[pCtx[k].functionId].xFunction(&pCtx[k]);
}
}
}
@@ -1287,6 +1282,15 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn
return;
}
+ int64_t* tsList = NULL;
+ SColumnInfoData* pFirstColData = taosArrayGet(pSDataBlock->pDataBlock, 0);
+ if (pFirstColData->info.type == TSDB_DATA_TYPE_TIMESTAMP) {
+ tsList = (int64_t*) pFirstColData->pData;
+ }
+
+ STimeWindow w = TSWINDOW_INITIALIZER;
+
+ int32_t num = 0;
for (int32_t j = 0; j < pSDataBlock->info.rows; ++j) {
char* val = ((char*)pColInfoData->pData) + bytes * j;
if (isNull(val, type)) {
@@ -1294,33 +1298,59 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn
}
// Compare with the previous row of this column, and do not set the output buffer again if they are identical.
- if (pInfo->prevData == NULL || (memcmp(pInfo->prevData, val, bytes) != 0)) {
- if (pInfo->prevData == NULL) {
- pInfo->prevData = malloc(bytes);
- }
-
+ if (pInfo->prevData == NULL) {
+ pInfo->prevData = malloc(bytes);
memcpy(pInfo->prevData, val, bytes);
+ num++;
+ continue;
+ }
- if (pQueryAttr->stableQuery && pQueryAttr->stabledev && (pRuntimeEnv->prevResult != NULL)) {
- setParamForStableStddevByColData(pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput, pOperator->pExpr, val, bytes);
+ if (IS_VAR_DATA_TYPE(type)) {
+ int32_t len = varDataLen(val);
+ if(len == varDataLen(pInfo->prevData) && memcmp(varDataVal(pInfo->prevData), varDataVal(val), len) == 0) {
+ num++;
+ continue;
}
-
- int32_t ret =
- setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, val, type, bytes, item->groupIndex);
- if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
- longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_APP_ERROR);
+ } else {
+ if (memcmp(pInfo->prevData, val, bytes) == 0) {
+ num++;
+ continue;
}
}
-
- // todo opt perf
- for (int32_t k = 0; k < pOperator->numOfOutput; ++k) {
- pInfo->binfo.pCtx[k].size = 1;
- int32_t functionId = pInfo->binfo.pCtx[k].functionId;
- if (functionNeedToExecute(pRuntimeEnv, &pInfo->binfo.pCtx[k], functionId)) {
- aAggs[functionId].xFunctionF(&pInfo->binfo.pCtx[k], j);
- }
+ if (pQueryAttr->stableQuery && pQueryAttr->stabledev && (pRuntimeEnv->prevResult != NULL)) {
+ setParamForStableStddevByColData(pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput, pOperator->pExpr, pInfo->prevData,
+ bytes);
}
+
+ int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, pInfo->prevData, type, bytes,
+ item->groupIndex);
+ if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
+ longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_APP_ERROR);
+ }
+
+ doApplyFunctions(pRuntimeEnv, pInfo->binfo.pCtx, &w, j - num, num, tsList, pSDataBlock->info.rows, pOperator->numOfOutput);
+
+ num = 1;
+ memcpy(pInfo->prevData, val, bytes);
+ }
+
+ if (num > 0) {
+ char* val = ((char*)pColInfoData->pData) + bytes * (pSDataBlock->info.rows - num);
+ memcpy(pInfo->prevData, val, bytes);
+
+ if (pQueryAttr->stableQuery && pQueryAttr->stabledev && (pRuntimeEnv->prevResult != NULL)) {
+ setParamForStableStddevByColData(pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput, pOperator->pExpr, val,
+ bytes);
+ }
+
+ int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, val, type, bytes,
+ item->groupIndex);
+ if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
+ longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_APP_ERROR);
+ }
+
+ doApplyFunctions(pRuntimeEnv, pInfo->binfo.pCtx, &w, pSDataBlock->info.rows - num, num, tsList, pSDataBlock->info.rows, pOperator->numOfOutput);
}
}
@@ -1394,9 +1424,7 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSWindowOperatorInf
}
static void setResultRowKey(SResultRow* pResultRow, char* pData, int16_t type) {
- int64_t v = -1;
- GET_TYPED_DATA(v, int64_t, type, pData);
- if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
+ if (IS_VAR_DATA_TYPE(type)) {
if (pResultRow->key == NULL) {
pResultRow->key = malloc(varDataTLen(pData));
varDataCopy(pResultRow->key, pData);
@@ -1404,6 +1432,9 @@ static void setResultRowKey(SResultRow* pResultRow, char* pData, int16_t type) {
assert(memcmp(pResultRow->key, pData, varDataTLen(pData)) == 0);
}
} else {
+ int64_t v = -1;
+ GET_TYPED_DATA(v, int64_t, type, pData);
+
pResultRow->win.skey = v;
pResultRow->win.ekey = v;
}
@@ -1419,7 +1450,7 @@ static int32_t setGroupResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasic
// not assign result buffer yet, add new result buffer, TODO remove it
char* d = pData;
int16_t len = bytes;
- if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
+ if (IS_VAR_DATA_TYPE(type)) {
d = varDataVal(pData);
len = varDataLen(pData);
}
@@ -1461,11 +1492,12 @@ static int32_t getGroupbyColumnIndex(SGroupbyExpr *pGroupbyExpr, SSDataBlock* pD
return -1;
}
-static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t functionId) {
+static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx) {
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr;
// in case of timestamp column, always generated results.
+ int32_t functionId = pCtx->functionId;
if (functionId == TSDB_FUNC_TS) {
return true;
}
@@ -1505,7 +1537,7 @@ void setBlockStatisInfo(SQLFunctionCtx *pCtx, SSDataBlock* pSDataBlock, SColInde
pCtx->preAggVals.isSet = false;
}
- pCtx->hasNull = hasNullRv(pColIndex, pStatis);
+ pCtx->hasNull = hasNull(pColIndex, pStatis);
// set the statistics data for primary time stamp column
if (pCtx->functionId == TSDB_FUNC_SPREAD && pColIndex->colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
@@ -1751,7 +1783,10 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf
case OP_SessionWindow: {
pRuntimeEnv->proot =
createSWindowOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput);
- setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream[0]->info, pRuntimeEnv->proot);
+ int32_t opType = pRuntimeEnv->proot->upstream[0]->operatorType;
+ if (opType != OP_DummyInput) {
+ setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream[0]->info, pRuntimeEnv->proot);
+ }
break;
}
case OP_MultiTableAggregate: {
@@ -1787,7 +1822,10 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf
}
case OP_StateWindow: {
pRuntimeEnv->proot = createStatewindowOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput);
- setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream[0]->info, pRuntimeEnv->proot);
+ int32_t opType = pRuntimeEnv->proot->upstream[0]->operatorType;
+ if (opType != OP_DummyInput) {
+ setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream[0]->info, pRuntimeEnv->proot);
+ }
break;
}
@@ -3524,6 +3562,7 @@ int32_t setTimestampListJoinInfo(SQueryRuntimeEnv* pRuntimeEnv, tVariant* pTag,
return 0;
}
+// TODO refactor: this funciton should be merged with setparamForStableStddevColumnData function.
void setParamForStableStddev(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, int32_t numOfOutput, SExprInfo* pExprInfo) {
SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr;
@@ -4729,8 +4768,7 @@ SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv,
SMultiwayMergeInfo* pInfo = calloc(1, sizeof(SMultiwayMergeInfo));
pInfo->resultRowFactor =
- (int32_t)(GET_ROW_PARAM_FOR_MULTIOUTPUT(pRuntimeEnv->pQueryAttr, pRuntimeEnv->pQueryAttr->topBotQuery,
- false));
+ (int32_t)(GET_ROW_PARAM_FOR_MULTIOUTPUT(pRuntimeEnv->pQueryAttr, pRuntimeEnv->pQueryAttr->topBotQuery, false));
pRuntimeEnv->scanFlag = MERGE_STAGE; // TODO init when creating pCtx
@@ -5302,6 +5340,7 @@ static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorI
doApplyFunctions(pRuntimeEnv, pBInfo->pCtx, &pInfo->curWindow, pInfo->start, pInfo->numOfRows, tsList,
pSDataBlock->info.rows, pOperator->numOfOutput);
}
+
static SSDataBlock* doStateWindowAgg(void *param, bool* newgroup) {
SOperatorInfo* pOperator = (SOperatorInfo*) param;
if (pOperator->status == OP_EXEC_DONE) {
@@ -6314,7 +6353,7 @@ static bool validateQueryMsg(SQueryTableMsg *pQueryMsg) {
return true;
}
-static UNUSED_FUNC bool validateQueryTableCols(SQueriedTableInfo* pTableInfo, SSqlExpr** pExpr, int32_t numOfOutput,
+static bool validateQueryTableCols(SQueriedTableInfo* pTableInfo, SSqlExpr** pExpr, int32_t numOfOutput,
SColumnInfo* pTagCols, void* pMsg) {
int32_t numOfTotal = pTableInfo->numOfCols + pTableInfo->numOfTags;
if (pTableInfo->numOfCols < 0 || pTableInfo->numOfTags < 0 || numOfTotal > TSDB_MAX_COLUMNS) {
@@ -6514,6 +6553,7 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) {
pExprMsg->resType = htons(pExprMsg->resType);
pExprMsg->resBytes = htons(pExprMsg->resBytes);
+ pExprMsg->interBytes = htonl(pExprMsg->interBytes);
pExprMsg->functionId = htons(pExprMsg->functionId);
pExprMsg->numOfParams = htons(pExprMsg->numOfParams);
@@ -6721,41 +6761,41 @@ _cleanup:
return code;
}
- int32_t cloneExprFilterInfo(SColumnFilterInfo **dst, SColumnFilterInfo* src, int32_t filterNum) {
- if (filterNum <= 0) {
- return TSDB_CODE_SUCCESS;
- }
-
- *dst = calloc(filterNum, sizeof(*src));
- if (*dst == NULL) {
- return TSDB_CODE_QRY_OUT_OF_MEMORY;
- }
-
- memcpy(*dst, src, sizeof(*src) * filterNum);
-
- for (int32_t i = 0; i < filterNum; i++) {
- if ((*dst)[i].filterstr && dst[i]->len > 0) {
- void *pz = calloc(1, (size_t)(*dst)[i].len + 1);
-
- if (pz == NULL) {
- if (i == 0) {
- free(*dst);
- } else {
- freeColumnFilterInfo(*dst, i);
- }
-
- return TSDB_CODE_QRY_OUT_OF_MEMORY;
- }
-
- memcpy(pz, (void *)src->pz, (size_t)src->len + 1);
-
- (*dst)[i].pz = (int64_t)pz;
- }
- }
-
+int32_t cloneExprFilterInfo(SColumnFilterInfo **dst, SColumnFilterInfo* src, int32_t filterNum) {
+ if (filterNum <= 0) {
return TSDB_CODE_SUCCESS;
}
+ *dst = calloc(filterNum, sizeof(*src));
+ if (*dst == NULL) {
+ return TSDB_CODE_QRY_OUT_OF_MEMORY;
+ }
+
+ memcpy(*dst, src, sizeof(*src) * filterNum);
+
+ for (int32_t i = 0; i < filterNum; i++) {
+ if ((*dst)[i].filterstr && dst[i]->len > 0) {
+ void *pz = calloc(1, (size_t)(*dst)[i].len + 1);
+
+ if (pz == NULL) {
+ if (i == 0) {
+ free(*dst);
+ } else {
+ freeColumnFilterInfo(*dst, i);
+ }
+
+ return TSDB_CODE_QRY_OUT_OF_MEMORY;
+ }
+
+ memcpy(pz, (void *)src->pz, (size_t)src->len + 1);
+
+ (*dst)[i].pz = (int64_t)pz;
+ }
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
int32_t buildArithmeticExprFromMsg(SExprInfo *pExprInfo, void *pQueryMsg) {
qDebug("qmsg:%p create arithmetic expr from binary", pQueryMsg);
@@ -6814,8 +6854,8 @@ int32_t createQueryFunc(SQueriedTableInfo* pTableInfo, int32_t numOfOutput, SExp
for (int32_t i = 0; i < numOfOutput; ++i) {
pExprs[i].base = *pExprMsg[i];
- memset(pExprs[i].base.param, 0, sizeof(tVariant) * tListLen(pExprs[i].base.param));
+ memset(pExprs[i].base.param, 0, sizeof(tVariant) * tListLen(pExprs[i].base.param));
for (int32_t j = 0; j < pExprMsg[i]->numOfParams; ++j) {
tVariantAssign(&pExprs[i].base.param[j], &pExprMsg[i]->param[j]);
}
@@ -6890,6 +6930,7 @@ int32_t createQueryFunc(SQueriedTableInfo* pTableInfo, int32_t numOfOutput, SExp
return TSDB_CODE_QRY_INVALID_MSG;
}
+ // todo remove it
if (getResultDataInfo(type, bytes, pExprs[i].base.functionId, param, &pExprs[i].base.resType, &pExprs[i].base.resBytes,
&pExprs[i].base.interBytes, 0, isSuperTable) != TSDB_CODE_SUCCESS) {
tfree(pExprs);
@@ -7376,11 +7417,16 @@ int32_t initQInfo(STsBufInfo* pTsBufInfo, void* tsdb, void* sourceOptr, SQInfo*
SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr;
STSBuf *pTsBuf = NULL;
- if (pTsBufInfo->tsLen > 0) { // open new file to save the result
- char *tsBlock = start + pTsBufInfo->tsOffset;
- pTsBuf = tsBufCreateFromCompBlocks(tsBlock, pTsBufInfo->tsNumOfBlocks, pTsBufInfo->tsLen, pTsBufInfo->tsOrder,
- pQueryAttr->vgId);
+ if (pTsBufInfo->tsLen > 0) { // open new file to save the result
+ char* tsBlock = start + pTsBufInfo->tsOffset;
+ pTsBuf = tsBufCreateFromCompBlocks(tsBlock, pTsBufInfo->tsNumOfBlocks, pTsBufInfo->tsLen, pTsBufInfo->tsOrder,
+ pQueryAttr->vgId);
+
+ if (pTsBuf == NULL) {
+ code = TSDB_CODE_QRY_NO_DISKSPACE;
+ goto _error;
+ }
tsBufResetPos(pTsBuf);
bool ret = tsBufNextPos(pTsBuf);
UNUSED(ret);
diff --git a/src/query/src/qTsbuf.c b/src/query/src/qTsbuf.c
index cf67e37cf2..27c877fafa 100644
--- a/src/query/src/qTsbuf.c
+++ b/src/query/src/qTsbuf.c
@@ -2,6 +2,7 @@
#include "taoserror.h"
#include "tscompression.h"
#include "tutil.h"
+#include "queryLog.h"
static int32_t getDataStartOffset();
static void TSBufUpdateGroupInfo(STSBuf* pTSBuf, int32_t index, STSGroupBlockInfo* pBlockInfo);
@@ -633,10 +634,15 @@ int32_t STSBufUpdateHeader(STSBuf* pTSBuf, STSBufFileHeader* pHeader) {
int32_t r = fseek(pTSBuf->f, 0, SEEK_SET);
if (r != 0) {
+ qError("fseek failed, errno:%d", errno);
+ return -1;
+ }
+
+ size_t ws = fwrite(pHeader, sizeof(STSBufFileHeader), 1, pTSBuf->f);
+ if (ws != 1) {
+ qError("ts update header fwrite failed, size:%d, expected size:%d", (int32_t)ws, (int32_t)sizeof(STSBufFileHeader));
return -1;
}
-
- fwrite(pHeader, sizeof(STSBufFileHeader), 1, pTSBuf->f);
return 0;
}
@@ -853,9 +859,17 @@ STSBuf* tsBufCreateFromCompBlocks(const char* pData, int32_t numOfBlocks, int32_
TSBufUpdateGroupInfo(pTSBuf, pTSBuf->numOfGroups - 1, pBlockInfo);
int32_t ret = fseek(pTSBuf->f, pBlockInfo->offset, SEEK_SET);
- UNUSED(ret);
+ if (ret == -1) {
+ qError("fseek failed, errno:%d", errno);
+ tsBufDestroy(pTSBuf);
+ return NULL;
+ }
size_t sz = fwrite((void*)pData, 1, len, pTSBuf->f);
- UNUSED(sz);
+ if (sz != len) {
+ qError("ts data fwrite failed, write size:%d, expected size:%d", (int32_t)sz, len);
+ tsBufDestroy(pTSBuf);
+ return NULL;
+ }
pTSBuf->fileSize += len;
pTSBuf->tsOrder = order;
@@ -863,9 +877,16 @@ STSBuf* tsBufCreateFromCompBlocks(const char* pData, int32_t numOfBlocks, int32_
STSBufFileHeader header = {
.magic = TS_COMP_FILE_MAGIC, .numOfGroup = pTSBuf->numOfGroups, .tsOrder = pTSBuf->tsOrder};
- STSBufUpdateHeader(pTSBuf, &header);
+ if (STSBufUpdateHeader(pTSBuf, &header) < 0) {
+ tsBufDestroy(pTSBuf);
+ return NULL;
+ }
- taosFsync(fileno(pTSBuf->f));
+ if (taosFsync(fileno(pTSBuf->f)) == -1) {
+ qError("fsync failed, errno:%d", errno);
+ tsBufDestroy(pTSBuf);
+ return NULL;
+ }
return pTSBuf;
}
diff --git a/src/query/tests/astTest.cpp b/src/query/tests/astTest.cpp
index ce7b2f94a1..1143d00e8d 100644
--- a/src/query/tests/astTest.cpp
+++ b/src/query/tests/astTest.cpp
@@ -10,6 +10,7 @@
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wwrite-strings"
+#pragma GCC diagnostic ignored "-Wunused-function"
typedef struct ResultObj {
int32_t numOfResult;
diff --git a/src/query/tests/histogramTest.cpp b/src/query/tests/histogramTest.cpp
index 3088d6f807..0266ecffc1 100644
--- a/src/query/tests/histogramTest.cpp
+++ b/src/query/tests/histogramTest.cpp
@@ -5,6 +5,10 @@
#include "taos.h"
#include "qHistogram.h"
+
+#pragma GCC diagnostic ignored "-Wunused-function"
+#pragma GCC diagnostic ignored "-Wunused-variable"
+
namespace {
void doHistogramAddTest() {
SHistogramInfo* pHisto = NULL;
diff --git a/src/query/tests/patternMatchTest.cpp b/src/query/tests/patternMatchTest.cpp
index f3e0d3e119..091604c65c 100644
--- a/src/query/tests/patternMatchTest.cpp
+++ b/src/query/tests/patternMatchTest.cpp
@@ -6,6 +6,9 @@
#include "qAggMain.h"
#include "tcompare.h"
+#pragma GCC diagnostic ignored "-Wunused-function"
+#pragma GCC diagnostic ignored "-Wunused-variable"
+
TEST(testCase, patternMatchTest) {
SPatternCompareInfo info = PATTERN_COMPARE_INFO_INITIALIZER;
diff --git a/src/query/tests/percentileTest.cpp b/src/query/tests/percentileTest.cpp
index 104bfb3c06..1b6951201a 100644
--- a/src/query/tests/percentileTest.cpp
+++ b/src/query/tests/percentileTest.cpp
@@ -7,6 +7,9 @@
#include "qPercentile.h"
+#pragma GCC diagnostic ignored "-Wunused-function"
+#pragma GCC diagnostic ignored "-Wunused-variable"
+
namespace {
tMemBucket *createBigIntDataBucket(int32_t start, int32_t end) {
tMemBucket *pBucket = tMemBucketCreate(sizeof(int64_t), TSDB_DATA_TYPE_BIGINT, start, end);
diff --git a/src/query/tests/resultBufferTest.cpp b/src/query/tests/resultBufferTest.cpp
index 491d75ccb9..54ac0bf4e5 100644
--- a/src/query/tests/resultBufferTest.cpp
+++ b/src/query/tests/resultBufferTest.cpp
@@ -6,6 +6,9 @@
#include "taos.h"
#include "tsdb.h"
+#pragma GCC diagnostic ignored "-Wunused-function"
+#pragma GCC diagnostic ignored "-Wunused-variable"
+
namespace {
// simple test
void simpleTest() {
diff --git a/src/query/tests/tsBufTest.cpp b/src/query/tests/tsBufTest.cpp
index dd7f03a494..04c5a15252 100644
--- a/src/query/tests/tsBufTest.cpp
+++ b/src/query/tests/tsBufTest.cpp
@@ -9,6 +9,10 @@
#include "ttoken.h"
#include "tutil.h"
+#pragma GCC diagnostic ignored "-Wunused-function"
+#pragma GCC diagnostic ignored "-Wunused-variable"
+#pragma GCC diagnostic ignored "-Wunused-but-set-variable"
+
namespace {
/**
*
diff --git a/src/query/tests/unitTest.cpp b/src/query/tests/unitTest.cpp
index 33ba8200d3..e5487a061d 100644
--- a/src/query/tests/unitTest.cpp
+++ b/src/query/tests/unitTest.cpp
@@ -6,14 +6,17 @@
#include "taos.h"
#include "tsdb.h"
+#pragma GCC diagnostic ignored "-Wwrite-strings"
+#pragma GCC diagnostic ignored "-Wunused-function"
+#pragma GCC diagnostic ignored "-Wunused-variable"
+#pragma GCC diagnostic ignored "-Wunused-but-set-variable"
+#pragma GCC diagnostic ignored "-Wsign-compare"
+
#include "../../client/inc/tscUtil.h"
#include "tutil.h"
#include "tvariant.h"
#include "ttokendef.h"
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wwrite-strings"
-
namespace {
int32_t testValidateName(char* name) {
SStrToken token = {0};
diff --git a/src/sync/inc/syncInt.h b/src/sync/inc/syncInt.h
index ec6dfcbc82..411e706fb1 100644
--- a/src/sync/inc/syncInt.h
+++ b/src/sync/inc/syncInt.h
@@ -132,6 +132,7 @@ void * syncRestoreData(void *param);
int32_t syncSaveIntoBuffer(SSyncPeer *pPeer, SWalHead *pHead);
void syncRestartConnection(SSyncPeer *pPeer);
void syncBroadcastStatus(SSyncNode *pNode);
+uint32_t syncResolvePeerFqdn(SSyncPeer *pPeer);
SSyncPeer *syncAcquirePeer(int64_t rid);
void syncReleasePeer(SSyncPeer *pPeer);
diff --git a/src/sync/src/syncMain.c b/src/sync/src/syncMain.c
index d34536543c..e9b24315d0 100644
--- a/src/sync/src/syncMain.c
+++ b/src/sync/src/syncMain.c
@@ -559,7 +559,8 @@ static void syncClosePeerConn(SSyncPeer *pPeer) {
static void syncRemovePeer(SSyncPeer *pPeer) {
sInfo("%s, it is removed", pPeer->id);
- pPeer->ip = 0;
+ //pPeer->ip = 0;
+ pPeer->fqdn[0] = '\0';
syncClosePeerConn(pPeer);
//taosRemoveRef(tsPeerRefId, pPeer->rid);
syncReleasePeer(pPeer);
@@ -585,20 +586,31 @@ static void syncStopCheckPeerConn(SSyncPeer *pPeer) {
sDebug("%s, stop check peer connection", pPeer->id);
}
+uint32_t syncResolvePeerFqdn(SSyncPeer *pPeer) {
+ uint32_t ip = taosGetIpv4FromFqdn(pPeer->fqdn);
+ if (ip == 0xFFFFFFFF) {
+ sError("failed to resolve peer fqdn:%s since %s", pPeer->fqdn, strerror(errno));
+ terrno = TSDB_CODE_RPC_FQDN_ERROR;
+ return 0;
+ }
+
+ return ip;
+}
+
static SSyncPeer *syncAddPeer(SSyncNode *pNode, const SNodeInfo *pInfo) {
- uint32_t ip = taosGetIpv4FromFqdn(pInfo->nodeFqdn);
+ /*uint32_t ip = taosGetIpv4FromFqdn(pInfo->nodeFqdn);
if (ip == 0xFFFFFFFF) {
sError("failed to add peer, can resolve fqdn:%s since %s", pInfo->nodeFqdn, strerror(errno));
terrno = TSDB_CODE_RPC_FQDN_ERROR;
return NULL;
}
-
+ */
SSyncPeer *pPeer = calloc(1, sizeof(SSyncPeer));
if (pPeer == NULL) return NULL;
pPeer->nodeId = pInfo->nodeId;
tstrncpy(pPeer->fqdn, pInfo->nodeFqdn, sizeof(pPeer->fqdn));
- pPeer->ip = ip;
+ //pPeer->ip = ip;
pPeer->port = pInfo->nodePort;
pPeer->fqdn[sizeof(pPeer->fqdn) - 1] = 0;
snprintf(pPeer->id, sizeof(pPeer->id), "vgId:%d, nodeId:%d", pNode->vgId, pPeer->nodeId);
@@ -857,14 +869,14 @@ static void syncRestartPeer(SSyncPeer *pPeer) {
sDebug("%s, peer conn is restart and set sstatus:%s", pPeer->id, syncStatus[pPeer->sstatus]);
int32_t ret = strcmp(pPeer->fqdn, tsNodeFqdn);
- if (ret > 0 || (ret == 0 && pPeer->port > tsSyncPort)) {
+ if (pPeer->nodeId == 0 || ret > 0 || (ret == 0 && pPeer->port > tsSyncPort)) {
sDebug("%s, check peer connection in 1000 ms", pPeer->id);
taosTmrReset(syncCheckPeerConnection, SYNC_CHECK_INTERVAL, (void *)pPeer->rid, tsSyncTmrCtrl, &pPeer->timer);
}
}
void syncRestartConnection(SSyncPeer *pPeer) {
- if (pPeer->ip == 0) return;
+ if (pPeer->fqdn[0] == '\0') return;
if (syncAcquirePeer(pPeer->rid) == NULL) return;
@@ -878,7 +890,7 @@ static void syncProcessSyncRequest(char *msg, SSyncPeer *pPeer) {
SSyncNode *pNode = pPeer->pSyncNode;
sInfo("%s, sync-req is received", pPeer->id);
- if (pPeer->ip == 0) return;
+ if (pPeer->fqdn[0] == '\0') return;
if (nodeRole != TAOS_SYNC_ROLE_MASTER) {
sError("%s, I am not master anymore", pPeer->id);
@@ -1090,7 +1102,7 @@ static int32_t syncProcessPeerMsg(int64_t rid, void *buffer) {
}
static int32_t syncSendPeersStatusMsgToPeer(SSyncPeer *pPeer, char ack, int8_t type, uint16_t tranId) {
- if (pPeer->peerFd < 0 || pPeer->ip == 0) {
+ if (pPeer->peerFd < 0 || pPeer->fqdn[0] == '\0') {
sDebug("%s, failed to send status msg, restart fd:%d", pPeer->id, pPeer->peerFd);
syncRestartConnection(pPeer);
return -1;
@@ -1135,7 +1147,13 @@ static void syncSetupPeerConnection(SSyncPeer *pPeer) {
return;
}
- SOCKET connFd = taosOpenTcpClientSocket(pPeer->ip, pPeer->port, 0);
+ uint32_t ip = syncResolvePeerFqdn(pPeer);
+ if (!ip) {
+ taosTmrReset(syncCheckPeerConnection, SYNC_CHECK_INTERVAL, (void *)pPeer->rid, tsSyncTmrCtrl, &pPeer->timer);
+ return;
+ }
+
+ SOCKET connFd = taosOpenTcpClientSocket(ip, pPeer->port, 0);
if (connFd <= 0) {
sDebug("%s, failed to open tcp socket since %s", pPeer->id, strerror(errno));
taosTmrReset(syncCheckPeerConnection, SYNC_CHECK_INTERVAL, (void *)pPeer->rid, tsSyncTmrCtrl, &pPeer->timer);
diff --git a/src/sync/src/syncRetrieve.c b/src/sync/src/syncRetrieve.c
index 505ba68c41..c86ab85499 100644
--- a/src/sync/src/syncRetrieve.c
+++ b/src/sync/src/syncRetrieve.c
@@ -422,6 +422,12 @@ void *syncRetrieveData(void *param) {
return NULL;
}
+ uint32_t ip = syncResolvePeerFqdn(pPeer);
+ if (!ip) {
+ syncReleasePeer(pPeer);
+ return NULL;
+ }
+
SSyncNode *pNode = pPeer->pSyncNode;
taosBlockSIGPIPE();
@@ -430,7 +436,7 @@ void *syncRetrieveData(void *param) {
if (pNode->notifyFlowCtrlFp) (*pNode->notifyFlowCtrlFp)(pNode->vgId, pPeer->numOfRetrieves);
- pPeer->syncFd = taosOpenTcpClientSocket(pPeer->ip, pPeer->port, 0);
+ pPeer->syncFd = taosOpenTcpClientSocket(ip, pPeer->port, 0);
if (pPeer->syncFd < 0) {
sError("%s, failed to open socket to sync", pPeer->id);
} else {
diff --git a/src/tfs/src/tfs.c b/src/tfs/src/tfs.c
index f78535b8ed..9dc68dcdfd 100644
--- a/src/tfs/src/tfs.c
+++ b/src/tfs/src/tfs.c
@@ -480,11 +480,13 @@ static int tfsFormatDir(char *idir, char *odir) {
return -1;
}
- if (realpath(wep.we_wordv[0], odir) == NULL) {
+ char tmp[PATH_MAX] = {0};
+ if (realpath(wep.we_wordv[0], tmp) == NULL) {
terrno = TAOS_SYSTEM_ERROR(errno);
wordfree(&wep);
return -1;
}
+ strcpy(odir, tmp);
wordfree(&wep);
return 0;
diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c
index 92edd4d160..92a0d489b3 100644
--- a/src/tsdb/src/tsdbRead.c
+++ b/src/tsdb/src/tsdbRead.c
@@ -3364,7 +3364,7 @@ static bool tableFilterFp(const void* pNode, void* param) {
GET_TYPED_DATA(v, uint64_t, pInfo->sch.type, val);
return NULL != taosHashGet((SHashObj *)pInfo->q, (char *)&v, sizeof(v));
}
- else if (type == TSDB_DATA_TYPE_DOUBLE || type == TSDB_DATA_TYPE_DOUBLE) {
+ else if (type == TSDB_DATA_TYPE_DOUBLE || type == TSDB_DATA_TYPE_FLOAT) {
double v;
GET_TYPED_DATA(v, double, pInfo->sch.type, val);
return NULL != taosHashGet((SHashObj *)pInfo->q, (char *)&v, sizeof(v));
diff --git a/src/util/src/tconfig.c b/src/util/src/tconfig.c
index c4bd577602..442e83bb4f 100644
--- a/src/util/src/tconfig.c
+++ b/src/util/src/tconfig.c
@@ -151,7 +151,7 @@ static bool taosReadDirectoryConfig(SGlobalCfg *cfg, char *input_value) {
wordfree(&full_path);
- char tmp[1025] = {0};
+ char tmp[PATH_MAX] = {0};
if (realpath(option, tmp) != NULL) {
strcpy(option, tmp);
}
diff --git a/tests/comparisonTest/tdengine/tdengineTest.c b/tests/comparisonTest/tdengine/tdengineTest.c
index d1cf3a1f98..0de419e036 100644
--- a/tests/comparisonTest/tdengine/tdengineTest.c
+++ b/tests/comparisonTest/tdengine/tdengineTest.c
@@ -181,8 +181,8 @@ void writeDataImp(void *param) {
if (lastMachineid != machineid) {
lastMachineid = machineid;
- sqlLen += sprintf(sql + sqlLen, " dev%d using devices tags(%d,'%s',%d) values",
- machineid, machineid, machinename, machinegroup);
+ sqlLen += sprintf(sql + sqlLen, " dev%d values",
+ machineid);
}
sqlLen += sprintf(sql + sqlLen, "(%" PRId64 ",%d,%f)", timestamp, temperature, humidity);
@@ -192,7 +192,8 @@ void writeDataImp(void *param) {
result = taos_query(taos, sql);
code = taos_errno(result);
if (code != 0) {
- printf("thread:%d error:%d reason:%s\n", pThread->threadId, code, taos_errstr(taos));
+ printf("insert into dev%d values (%" PRId64 ",%d,%f)\n",machineid, timestamp, temperature, humidity);
+ printf("thread:%d error:%d reason:%s\n", pThread->threadId, code, taos_errstr(result));
}
taos_free_result(result);
@@ -210,6 +211,7 @@ void writeDataImp(void *param) {
result = taos_query(taos, sql);
code = taos_errno(result);
if (code != 0) {
+ // printf("insert into dev%d using devices tags(%d,'%s',%d) values (%" PRId64 ",%d,%f)",machineid, machineid, machinename, machinegroup, timestamp, temperature, humidity);
printf("thread:%d error:%d reason:%s\n", pThread->threadId, code, taos_errstr(taos));
}
taos_free_result(result);
@@ -246,7 +248,7 @@ void writeData() {
taos_free_result(result);
result = taos_query(taos,
- "create table if not exists db.devices(ts timestamp, temperature int, humidity float) "
+ "create stable if not exists db.devices(ts timestamp, temperature int, humidity float) "
"tags(devid int, devname binary(16), devgroup int)");
code = taos_errno(result);
if (code != 0) {
@@ -254,6 +256,77 @@ void writeData() {
}
taos_free_result(result);
+ //create tables before insert the data
+ result = taos_query(taos, "use db");
+ code = taos_errno(result);
+ if (code != 0) {
+ taos_error(result, taos);
+ }
+ taos_free_result(result);
+
+ char *sql = calloc(1, 8*1024*1024);
+ int sqlLen = 0;
+ int lastMachineid = 0;
+ int counter = 0;
+ int totalRecords = 0;
+ for (int i = 0; i < arguments.filesNum; i++) {
+ char fileName[300];
+ sprintf(fileName, "%s/testdata%d.csv", arguments.dataDir, i);
+
+ FILE *fp = fopen(fileName, "r");
+ if (fp == NULL) {
+ printf("failed to open file %s\n", fileName);
+ exit(1);
+ }
+ printf("open file %s success\n", fileName);
+
+ char *line = NULL;
+ size_t len = 0;
+ while (!feof(fp)) {
+ free(line);
+ line = NULL;
+ len = 0;
+
+ getline(&line, &len, fp);
+ if (line == NULL) break;
+
+ if (strlen(line) < 10) continue;
+
+ int machineid;
+ char machinename[16];
+ int machinegroup;
+ int64_t timestamp;
+ int temperature;
+ float humidity;
+ sscanf(line, "%d%s%d%" PRId64 "%d%f", &machineid, machinename, &machinegroup, ×tamp, &temperature, &humidity);
+
+ if (counter == 0) {
+ sqlLen = sprintf(sql, "create table if not exists");
+ }
+
+ if (lastMachineid != machineid) {
+ lastMachineid = machineid;
+ sqlLen += sprintf(sql + sqlLen, " dev%d using devices tags(%d,'%s',%d)", machineid, machineid, machinename, machinegroup);
+ }
+ counter++;
+
+ if (counter >= arguments.rowsPerRequest) {
+ result = taos_query(taos, sql);
+ code = taos_errno(result);
+ if (code != 0) {
+ printf("create table error:%d reason:%s\n", code, taos_errstr(result));
+ }
+ taos_free_result(result);
+
+ totalRecords += counter;
+ counter = 0;
+ lastMachineid = -1;
+ sqlLen = 0;
+ }
+ }
+ fclose(fp);
+ }
+
int64_t st = getTimeStampMs();
int a = arguments.filesNum / arguments.clients;
@@ -379,5 +452,4 @@ void readData() {
}
free(threads);
-}
-
+}
\ No newline at end of file
diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh
index e68d868b70..c66ccc5477 100755
--- a/tests/pytest/fulltest.sh
+++ b/tests/pytest/fulltest.sh
@@ -235,6 +235,8 @@ python3 ./test.py -f query/queryTscomputWithNow.py
python3 ./test.py -f query/computeErrorinWhere.py
python3 ./test.py -f query/queryTsisNull.py
python3 ./test.py -f query/subqueryFilter.py
+# python3 ./test.py -f query/nestedQuery/queryInterval.py
+python3 ./test.py -f query/queryStateWindow.py
#stream
@@ -322,8 +324,10 @@ python3 ./test.py -f query/last_row_cache.py
python3 ./test.py -f account/account_create.py
python3 ./test.py -f alter/alter_table.py
python3 ./test.py -f query/queryGroupbySort.py
+python3 ./test.py -f functions/queryTestCases.py
python3 ./test.py -f functions/function_stateWindow.py
python3 ./test.py -f functions/function_derivative.py
+python3 ./test.py -f functions/function_irate.py
python3 ./test.py -f insert/unsignedInt.py
python3 ./test.py -f insert/unsignedBigint.py
diff --git a/tests/pytest/functions/function_irate.py b/tests/pytest/functions/function_irate.py
new file mode 100644
index 0000000000..2c85e1bbdd
--- /dev/null
+++ b/tests/pytest/functions/function_irate.py
@@ -0,0 +1,228 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import *
+from util.cases import *
+from util.sql import *
+import numpy as np
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+
+ self.rowNum = 100
+ self.ts = 1537146000000
+ self.ts1 = 1537146000000000
+
+
+ def run(self):
+ # db precison ms
+ tdSql.prepare()
+ tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
+ col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20), tag1 int)''')
+ tdSql.execute("create table test1 using test tags('beijing', 10)")
+ tdSql.execute("create table gtest1 (ts timestamp, col1 float)")
+ tdSql.execute("create table gtest2 (ts timestamp, col1 tinyint)")
+ tdSql.execute("create table gtest3 (ts timestamp, col1 tinyint)")
+ tdSql.execute("create table gtest4 (ts timestamp, col1 tinyint)")
+ tdSql.execute("create table gtest5 (ts timestamp, col1 tinyint)")
+ tdSql.execute("create table gtest6 (ts timestamp, col1 tinyint)")
+ tdSql.execute("create table gtest7 (ts timestamp, col1 tinyint)")
+ tdSql.execute("create table gtest8 (ts timestamp, col1 tinyint)")
+
+
+ for i in range(self.rowNum):
+ tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
+ % (self.ts + i*1000, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
+
+ tdSql.execute("insert into gtest1 values(1537146000000,0);")
+ tdSql.execute("insert into gtest1 values(1537146001100,1.2);")
+ tdSql.execute("insert into gtest2 values(1537146001001,1);")
+ tdSql.execute("insert into gtest2 values(1537146001101,2);")
+ tdSql.execute("insert into gtest3 values(1537146001101,2);")
+ tdSql.execute("insert into gtest4(ts) values(1537146001101);")
+ tdSql.execute("insert into gtest5 values(1537146001002,4);")
+ tdSql.execute("insert into gtest5 values(1537146002202,4);")
+ tdSql.execute("insert into gtest6 values(1537146000000,5);")
+ tdSql.execute("insert into gtest6 values(1537146001000,2);")
+ tdSql.execute("insert into gtest7 values(1537146001000,1);")
+ tdSql.execute("insert into gtest7 values(1537146008000,2);")
+ tdSql.execute("insert into gtest7 values(1537146009000,6);")
+ tdSql.execute("insert into gtest7 values(1537146012000,3);")
+ tdSql.execute("insert into gtest7 values(1537146015000,3);")
+ tdSql.execute("insert into gtest7 values(1537146017000,1);")
+ tdSql.execute("insert into gtest7 values(1537146019000,3);")
+ tdSql.execute("insert into gtest8 values(1537146000002,4);")
+ tdSql.execute("insert into gtest8 values(1537146002202,4);")
+
+ # irate verifacation
+ tdSql.query("select irate(col1) from test1;")
+ tdSql.checkData(0, 0, 1)
+ tdSql.query("select irate(col1) from test1 interval(10s);")
+ tdSql.checkData(0, 1, 1)
+ tdSql.query("select irate(col1) from test1;")
+ tdSql.checkData(0, 0, 1)
+ tdSql.query("select irate(col2) from test1;")
+ tdSql.checkData(0, 0, 1)
+ tdSql.query("select irate(col3) from test1;")
+ tdSql.checkData(0, 0, 1)
+ tdSql.query("select irate(col4) from test1;")
+ tdSql.checkData(0, 0, 1)
+ tdSql.query("select irate(col5) from test1;")
+ tdSql.checkData(0, 0, 1)
+ tdSql.query("select irate(col6) from test1;")
+ tdSql.checkData(0, 0, 1)
+ tdSql.query("select irate(col11) from test1;")
+ tdSql.checkData(0, 0, 1)
+ tdSql.query("select irate(col12) from test1;")
+ tdSql.checkData(0, 0, 1)
+ tdSql.query("select irate(col13) from test1;")
+ tdSql.checkData(0, 0, 1)
+ tdSql.query("select irate(col14) from test1;")
+ tdSql.checkData(0, 0, 1)
+ tdSql.query("select irate(col2) from test1;")
+ tdSql.checkData(0, 0, 1)
+ tdSql.query("select irate(col2) from test1;")
+ tdSql.checkData(0, 0, 1)
+
+ tdSql.query("select irate(col1) from gtest1;")
+ tdSql.checkData(0, 0, 1.2/1.1)
+ tdSql.query("select irate(col1) from gtest2;")
+ tdSql.checkData(0, 0, 10)
+ tdSql.query("select irate(col1) from gtest3;")
+ tdSql.checkData(0, 0, 0)
+ tdSql.query("select irate(col1) from gtest4;")
+ tdSql.checkRows(0)
+ tdSql.query("select irate(col1) from gtest5;")
+ tdSql.checkData(0, 0, 0)
+ tdSql.query("select irate(col1) from gtest6;")
+ tdSql.checkData(0, 0, 2)
+ tdSql.query("select irate(col1) from gtest7;")
+ tdSql.checkData(0, 0, 1)
+ tdSql.query("select irate(col1) from gtest7 interval(5s) order by ts asc;")
+ tdSql.checkData(1, 1, 4)
+ tdSql.checkData(2, 1, 0)
+ tdSql.checkData(3, 1, 1)
+ tdSql.query("select irate(col1) from gtest7 interval(5s) order by ts desc ;")
+ tdSql.checkData(1, 1, 0)
+ tdSql.checkData(2, 1, 4)
+ tdSql.checkData(3, 1, 0)
+
+ #error
+ tdSql.error("select irate(col1) from test")
+ tdSql.error("select irate(ts) from test1")
+ tdSql.error("select irate(col7) from test1")
+ tdSql.error("select irate(col8) from test1")
+ tdSql.error("select irate(col9) from test1")
+ tdSql.error("select irate(loc) from test1")
+ tdSql.error("select irate(tag1) from test1")
+
+ # use db1 precision us
+ tdSql.execute("create database db1 precision 'us' keep 3650 UPDATE 1")
+ tdSql.execute("use db1 ")
+ tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
+ col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
+ tdSql.execute("create table test1 using test tags('beijing')")
+ tdSql.execute("create table gtest1 (ts timestamp, col1 float)")
+ tdSql.execute("create table gtest2 (ts timestamp, col1 tinyint)")
+ tdSql.execute("create table gtest3 (ts timestamp, col1 tinyint)")
+ tdSql.execute("create table gtest4 (ts timestamp, col1 tinyint)")
+ tdSql.execute("create table gtest5 (ts timestamp, col1 tinyint)")
+ tdSql.execute("create table gtest6 (ts timestamp, col1 tinyint)")
+ tdSql.execute("create table gtest7 (ts timestamp, col1 tinyint)")
+ tdSql.execute("create table gtest8 (ts timestamp, col1 tinyint)")
+ tdSql.execute("create table gtest9 (ts timestamp, col1 tinyint)")
+
+ for i in range(self.rowNum):
+ tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
+ % (self.ts1 + i*1000000, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
+
+ tdSql.execute("insert into gtest1 values(1537146000000000,0);")
+ tdSql.execute("insert into gtest1 values(1537146001100000,1.2);")
+ tdSql.execute("insert into gtest2 values(1537146001001000,1);")
+ tdSql.execute("insert into gtest2 values(1537146001101000,2);")
+ tdSql.execute("insert into gtest3 values(1537146001101000,2);")
+ tdSql.execute("insert into gtest4(ts) values(1537146001101000);")
+ tdSql.execute("insert into gtest5 values(1537146001002000,4);")
+ tdSql.execute("insert into gtest5 values(1537146002202000,4);")
+ tdSql.execute("insert into gtest6 values(1537146000000000,5);")
+ tdSql.execute("insert into gtest6 values(1537146001000000,2);")
+ tdSql.execute("insert into gtest7 values(1537146001000000,1);")
+ tdSql.execute("insert into gtest7 values(1537146008000000,2);")
+ tdSql.execute("insert into gtest7 values(1537146009000000,6);")
+ tdSql.execute("insert into gtest7 values(1537146012000000,3);")
+ tdSql.execute("insert into gtest7 values(1537146015000000,3);")
+ tdSql.execute("insert into gtest7 values(1537146017000000,1);")
+ tdSql.execute("insert into gtest7 values(1537146019000000,3);")
+ tdSql.execute("insert into gtest8 values(1537146000002000,3);")
+ tdSql.execute("insert into gtest8 values(1537146001003000,4);")
+ tdSql.execute("insert into gtest9 values(1537146000000000,4);")
+ tdSql.execute("insert into gtest9 values(1537146000000001,5);")
+
+
+ # irate verifacation
+ tdSql.query("select irate(col1) from test1;")
+ tdSql.checkData(0, 0, 1)
+ tdSql.query("select irate(col1) from test1 interval(10s);")
+ tdSql.checkData(0, 1, 1)
+ tdSql.query("select irate(col1) from test1;")
+ tdSql.checkData(0, 0, 1)
+ tdSql.query("select irate(col1) from gtest1;")
+ tdSql.checkData(0, 0, 1.2/1.1)
+ tdSql.query("select irate(col1) from gtest2;")
+ tdSql.checkData(0, 0, 10)
+ tdSql.query("select irate(col1) from gtest3;")
+ tdSql.checkData(0, 0, 0)
+ tdSql.query("select irate(col1) from gtest4;")
+ tdSql.checkRows(0)
+ tdSql.query("select irate(col1) from gtest5;")
+ tdSql.checkData(0, 0, 0)
+ tdSql.query("select irate(col1) from gtest6;")
+ tdSql.checkData(0, 0, 2)
+ tdSql.query("select irate(col1) from gtest7;")
+ tdSql.checkData(0, 0, 1)
+ tdSql.query("select irate(col1) from gtest7 interval(5s) order by ts asc;")
+ tdSql.checkData(1, 1, 4)
+ tdSql.checkData(2, 1, 0)
+ tdSql.checkData(3, 1, 1)
+ tdSql.query("select irate(col1) from gtest7 interval(5s) order by ts desc ;")
+ tdSql.checkData(1, 1, 0)
+ tdSql.checkData(2, 1, 4)
+ tdSql.checkData(3, 1, 0)
+ tdSql.query("select irate(col1) from gtest8;")
+ tdSql.checkData(0, 0, 1/1.001)
+ tdSql.query("select irate(col1) from gtest9;")
+ tdSql.checkData(0, 0, 1000000)
+
+ #error
+ tdSql.error("select irate(col1) from test")
+ tdSql.error("select irate(ts) from test1")
+ tdSql.error("select irate(col7) from test1")
+ tdSql.error("select irate(col8) from test1")
+ tdSql.error("select irate(col9) from test1")
+ tdSql.error("select irate(loc) from test1")
+ tdSql.error("select irate(tag1) from test1")
+
+
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/functions/queryTestCases.py b/tests/pytest/functions/queryTestCases.py
new file mode 100644
index 0000000000..b7480fdbd5
--- /dev/null
+++ b/tests/pytest/functions/queryTestCases.py
@@ -0,0 +1,364 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import subprocess
+
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.dnodes import *
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug(f"start to execute {__file__}")
+ tdSql.init(conn.cursor(), logSql)
+
+ def getBuildPath(self) -> str:
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root) - len("/debug/build/bin")]
+ break
+ return buildPath
+
+ def getCfgDir(self) -> str:
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ cfgDir = self.getBuildPath() + "/community/sim/dnode1/cfg"
+ else:
+ cfgDir = self.getBuildPath() + "/sim/dnode1/cfg"
+ return cfgDir
+
+ def getCfgFile(self) -> str:
+ return self.getCfgDir()+"/taos.cfg"
+
+ def td3690(self):
+ tdLog.printNoPrefix("==========TD-3690==========")
+ tdSql.query("show variables")
+ tdSql.checkData(51, 1, 864000)
+
+ def td4082(self):
+ tdLog.printNoPrefix("==========TD-4082==========")
+ cfgfile = self.getCfgFile()
+ max_compressMsgSize = 100000000
+
+ tdSql.query("show variables")
+ tdSql.checkData(26, 1, -1)
+
+ tdSql.query("show dnodes")
+ index = tdSql.getData(0, 0)
+
+ tdDnodes.stop(index)
+ cmd = f"sed -i '$a compressMSgSize {max_compressMsgSize}' {cfgfile} "
+ try:
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+ tdSql.query("show variables")
+ tdSql.checkData(26, 1, 100000000)
+
+ tdDnodes.stop(index)
+ cmd = f"sed -i '$s/{max_compressMsgSize}/{max_compressMsgSize+10}/g' {cfgfile} "
+ try:
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+ tdSql.query("show variables")
+ tdSql.checkData(26, 1, -1)
+
+ tdDnodes.stop(index)
+ cmd = f"sed -i '$d' {cfgfile}"
+ try:
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+
+ def td4097(self):
+ tdLog.printNoPrefix("==========TD-4097==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("drop database if exists db1")
+ tdSql.execute("create database if not exists db keep 3650")
+ tdSql.execute("create database if not exists db1 keep 3650")
+
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)")
+ tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t1 int)")
+ tdSql.execute("create stable db1.stb3 (ts timestamp, c1 int) tags(t1 int)")
+
+ tdSql.execute("create table db.t10 using db.stb1 tags(1)")
+ tdSql.execute("create table db.t11 using db.stb1 tags(2)")
+ tdSql.execute("create table db.t20 using db.stb2 tags(3)")
+ tdSql.execute("create table db1.t30 using db1.stb3 tags(4)")
+
+ tdLog.printNoPrefix("==========TD-4097==========")
+ # 插入数据,然后进行show create 操作
+
+ # p1 不进入指定数据库
+ tdSql.query("show create database db")
+ tdSql.checkRows(1)
+ tdSql.error("show create database ")
+ tdSql.error("show create databases db ")
+ tdSql.error("show create database db.stb1")
+ tdSql.error("show create database db0")
+ tdSql.error("show create database db db1")
+ tdSql.error("show create database db, db1")
+ tdSql.error("show create database stb1")
+ tdSql.error("show create database * ")
+
+ tdSql.query("show create stable db.stb1")
+ tdSql.checkRows(1)
+ tdSql.error("show create stable db.t10")
+ tdSql.error("show create stable db.stb0")
+ tdSql.error("show create stable stb1")
+ tdSql.error("show create stable ")
+ tdSql.error("show create stable *")
+ tdSql.error("show create stable db.stb1 db.stb2")
+ tdSql.error("show create stable db.stb1, db.stb2")
+
+ tdSql.query("show create table db.stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create table db.t10")
+ tdSql.checkRows(1)
+ tdSql.error("show create table db.stb0")
+ tdSql.error("show create table stb1")
+ tdSql.error("show create table ")
+ tdSql.error("show create table *")
+ tdSql.error("show create table db.stb1 db.stb2")
+ tdSql.error("show create table db.stb1, db.stb2")
+
+ # p2 进入指定数据库
+ tdSql.execute("use db")
+
+ tdSql.query("show create database db")
+ tdSql.checkRows(1)
+ tdSql.query("show create database db1")
+ tdSql.checkRows(1)
+ tdSql.error("show create database ")
+ tdSql.error("show create databases db ")
+ tdSql.error("show create database db.stb1")
+ tdSql.error("show create database db0")
+ tdSql.error("show create database db db1")
+ tdSql.error("show create database db, db1")
+ tdSql.error("show create database stb1")
+ tdSql.error("show create database * ")
+
+ tdSql.query("show create stable db.stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create stable stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create stable db1.stb3")
+ tdSql.checkRows(1)
+ tdSql.error("show create stable db.t10")
+ tdSql.error("show create stable db")
+ tdSql.error("show create stable t10")
+ tdSql.error("show create stable db.stb0")
+ tdSql.error("show create stables stb1")
+ tdSql.error("show create stable ")
+ tdSql.error("show create stable *")
+ tdSql.error("show create stable db.stb1 db.stb2")
+ tdSql.error("show create stable stb1 stb2")
+ tdSql.error("show create stable db.stb1, db.stb2")
+ tdSql.error("show create stable stb1, stb2")
+
+ tdSql.query("show create table db.stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create table stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create table db.t10")
+ tdSql.checkRows(1)
+ tdSql.query("show create table t10")
+ tdSql.checkRows(1)
+ tdSql.query("show create table db1.t30")
+ tdSql.checkRows(1)
+ tdSql.error("show create table t30")
+ tdSql.error("show create table db.stb0")
+ tdSql.error("show create table db.t0")
+ tdSql.error("show create table db")
+ tdSql.error("show create tables stb1")
+ tdSql.error("show create tables t10")
+ tdSql.error("show create table ")
+ tdSql.error("show create table *")
+ tdSql.error("show create table db.stb1 db.stb2")
+ tdSql.error("show create table db.t11 db.t10")
+ tdSql.error("show create table db.stb1, db.stb2")
+ tdSql.error("show create table db.t11, db.t10")
+ tdSql.error("show create table stb1 stb2")
+ tdSql.error("show create table t11 t10")
+ tdSql.error("show create table stb1, stb2")
+ tdSql.error("show create table t11, t10")
+
+ # p3 删库删表后进行查询
+ tdSql.execute("drop table if exists t11")
+
+ tdSql.error("show create table t11")
+ tdSql.error("show create table db.t11")
+ tdSql.query("show create stable stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create table t10")
+ tdSql.checkRows(1)
+
+ tdSql.execute("drop stable if exists stb2")
+
+ tdSql.error("show create table stb2")
+ tdSql.error("show create table db.stb2")
+ tdSql.error("show create stable stb2")
+ tdSql.error("show create stable db.stb2")
+ tdSql.error("show create stable db.t20")
+ tdSql.query("show create database db")
+ tdSql.checkRows(1)
+ tdSql.query("show create stable db.stb1")
+ tdSql.checkRows(1)
+
+ tdSql.execute("drop database if exists db1")
+ tdSql.error("show create database db1")
+ tdSql.error("show create stable db1.t31")
+ tdSql.error("show create stable db1.stb3")
+ tdSql.query("show create database db")
+ tdSql.checkRows(1)
+ tdSql.query("show create stable db.stb1")
+ tdSql.checkRows(1)
+
+ def td4153(self):
+ tdLog.printNoPrefix("==========TD-4153==========")
+
+ pass
+
+ def td4288(self):
+ tdLog.printNoPrefix("==========TD-4288==========")
+ # keep ~ [days,365000]
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db")
+ tdSql.query("show variables")
+ tdSql.checkData(36, 1, 3650)
+ tdSql.query("show databases")
+ tdSql.checkData(0,7,"3650,3650,3650")
+
+ days = tdSql.getData(0, 6)
+ tdSql.error("alter database db keep 3650001")
+ tdSql.error("alter database db keep 9")
+ tdSql.error("alter database db keep 0b")
+ tdSql.error("alter database db keep 3650,9,36500")
+ tdSql.error("alter database db keep 3650,3650,365001")
+ tdSql.error("alter database db keep 36500,a,36500")
+ tdSql.error("alter database db keep (36500,3650,3650)")
+ tdSql.error("alter database db keep [36500,3650,36500]")
+ tdSql.error("alter database db keep 36500,0xff,3650")
+ tdSql.error("alter database db keep 36500,0o365,3650")
+ tdSql.error("alter database db keep 36500,0A3Ch,3650")
+ tdSql.error("alter database db keep")
+ tdSql.error("alter database db keep0 36500")
+
+ tdSql.execute("alter database db keep 36500")
+ tdSql.query("show databases")
+ tdSql.checkData(0, 7, "3650,3650,36500")
+ tdSql.execute("drop database if exists db")
+
+ tdSql.execute("create database if not exists db1")
+ tdSql.query("show databases")
+ tdSql.checkData(0, 7, "3650,3650,3650")
+ tdSql.query("show variables")
+ tdSql.checkData(36, 1, 3650)
+
+ tdSql.execute("alter database db1 keep 365")
+ tdSql.execute("drop database if exists db1")
+
+
+ pass
+
+ def td4724(self):
+ tdLog.printNoPrefix("==========TD-4724==========")
+ cfgfile = self.getCfgFile()
+ minTablesPerVnode = 5
+ maxTablesPerVnode = 10
+ maxVgroupsPerDb = 100
+
+ tdSql.query("show dnodes")
+ index = tdSql.getData(0, 0)
+
+ tdDnodes.stop(index)
+ vnode_cmd = f"sed -i '$a maxVgroupsPerDb {maxVgroupsPerDb}' {cfgfile} "
+ min_cmd = f"sed -i '$a minTablesPerVnode {minTablesPerVnode}' {cfgfile} "
+ max_cmd = f"sed -i '$a maxTablesPerVnode {maxTablesPerVnode}' {cfgfile} "
+ try:
+ _ = subprocess.check_output(vnode_cmd, shell=True).decode("utf-8")
+ _ = subprocess.check_output(min_cmd, shell=True).decode("utf-8")
+ _ = subprocess.check_output(max_cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)")
+ insert_sql = "insert into "
+ for i in range(100):
+ tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})")
+ insert_sql += f" t1{i} values({1604298064000 + i*1000}, {i})"
+ tdSql.query("show dnodes")
+ vnode_count = tdSql.getData(0, 2)
+ if vnode_count <= 1:
+ tdLog.exit("vnode is less than 2")
+
+ tdSql.execute(insert_sql)
+ tdDnodes.stop(index)
+ cmd = f"sed -i '$d' {cfgfile}"
+ try:
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+
+ pass
+
+ def run(self):
+
+ # master branch
+ # self.td3690()
+ # self.td4082()
+ # self.td4288()
+ self.td4724()
+
+ # develop branch
+ # self.td4097()
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
+
+
+
diff --git a/tests/pytest/insert/binary.py b/tests/pytest/insert/binary.py
index 0d583aa2cc..0cbb7876c6 100644
--- a/tests/pytest/insert/binary.py
+++ b/tests/pytest/insert/binary.py
@@ -4,6 +4,8 @@ import sys
from util.log import *
from util.cases import *
from util.sql import *
+import subprocess
+import os
class TDTestCase:
@@ -50,6 +52,10 @@ class TDTestCase:
tdLog.info('==> $data00')
tdLog.info("tdSql.checkData(0, 0, '34567')")
tdSql.checkData(0, 0, '34567')
+ tdLog.info("insert into tb values (now+4a, \"'';\")")
+ config_dir = subprocess.check_output(str("ps -ef |grep dnode1|grep -v grep |awk '{print $NF}'"), stderr=subprocess.STDOUT, shell=True).decode('utf-8').replace('\n', '')
+ result = ''.join(os.popen(r"""taos -s "insert into db.tb values (now+4a, \"'';\")" -c %s"""%(config_dir)).readlines())
+ if "Query OK" not in result: tdLog.exit("err:insert '';")
tdLog.info('drop database db')
tdSql.execute('drop database db')
tdLog.info('show databases')
diff --git a/tests/pytest/insert/in_function.py b/tests/pytest/insert/in_function.py
index d1fbfd702a..263c8a78aa 100644
--- a/tests/pytest/insert/in_function.py
+++ b/tests/pytest/insert/in_function.py
@@ -698,24 +698,24 @@ class TDTestCase:
tdSql.execute(cmd1)
cmd2 = 'select * from normal_in_float_double_1 where in_float in (\'888\');'
- tdLog.info(cmd2)
- tdSql.error(cmd2)
- try:
- tdSql.execute(cmd2)
- tdLog.exit("invalid operation: not supported filter condition")
- except Exception as e:
- tdLog.info(repr(e))
- tdLog.info("invalid operation: not supported filter condition")
-
- cmd3 = 'select * from normal_in_float_double_1 where in_double in (\'66666\');'
- tdLog.info(cmd3)
- tdSql.error(cmd3)
- try:
- tdSql.execute(cmd3)
- tdLog.exit("invalid operation: not supported filter condition")
- except Exception as e:
- tdLog.info(repr(e))
- tdLog.info("invalid operation: not supported filter condition")
+ #tdLog.info(cmd2)
+ #tdSql.error(cmd2)
+ #try:
+ # tdSql.execute(cmd2)
+ # tdLog.exit("invalid operation: not supported filter condition")
+ #except Exception as e:
+ # tdLog.info(repr(e))
+ # tdLog.info("invalid operation: not supported filter condition")
+ #
+ #cmd3 = 'select * from normal_in_float_double_1 where in_double in (\'66666\');'
+ #tdLog.info(cmd3)
+ #tdSql.error(cmd3)
+ #try:
+ # tdSql.execute(cmd3)
+ # tdLog.exit("invalid operation: not supported filter condition")
+ #except Exception as e:
+ # tdLog.info(repr(e))
+ # tdLog.info("invalid operation: not supported filter condition")
def stop(self):
tdSql.close()
diff --git a/tests/pytest/query/nestedQuery/insertData.json b/tests/pytest/query/nestedQuery/insertData.json
new file mode 100644
index 0000000000..d4ef8dbe97
--- /dev/null
+++ b/tests/pytest/query/nestedQuery/insertData.json
@@ -0,0 +1,62 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file":"./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 10,
+ "num_of_records_per_req": 1000,
+ "max_sql_len": 1024000,
+ "databases": [{
+ "dbinfo": {
+ "name": "db",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 50,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 365,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 1,
+ "childtable_prefix": "stb0_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "taosc",
+ "insert_rows": 100000,
+ "childtable_limit": -1,
+ "childtable_offset": 0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval": 0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1,
+ "timestamp_step": 1000,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./tools/taosdemoAllTest/sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BOOL"}],
+ "tags": [{"type": "TINYINT", "count":1}, {"type": "BINARY", "len": 16, "count":1}]
+ }]
+ }]
+}
diff --git a/tests/pytest/query/nestedQuery/nestedQueryJson.py b/tests/pytest/query/nestedQuery/nestedQueryJson.py
new file mode 100644
index 0000000000..36a231a916
--- /dev/null
+++ b/tests/pytest/query/nestedQuery/nestedQueryJson.py
@@ -0,0 +1,81 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import os
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.dnodes import *
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root)-len("/build/bin")]
+ break
+ return buildPath
+
+ def run(self):
+ buildPath = self.getBuildPath()
+ if (buildPath == ""):
+ tdLog.exit("taosd not found!")
+ else:
+ tdLog.info("taosd found in %s" % buildPath)
+ binPath = buildPath+ "/build/bin/"
+
+ # insert: create one or mutiple tables per sql and insert multiple rows per sql
+ os.system("%staosdemo -f query/nestedQuery/insertData.json -y " % binPath)
+ tdSql.execute("use db")
+ tdSql.query("select count (tbname) from stb0")
+ tdSql.checkData(0, 0, 1000)
+ tdSql.query("select count (tbname) from stb1")
+ tdSql.checkData(0, 0, 1000)
+ tdSql.query("select count(*) from stb00_0")
+ tdSql.checkData(0, 0, 100)
+ tdSql.query("select count(*) from stb0")
+ tdSql.checkData(0, 0, 100000)
+ tdSql.query("select count(*) from stb01_1")
+ tdSql.checkData(0, 0, 200)
+ tdSql.query("select count(*) from stb1")
+ tdSql.checkData(0, 0, 200000)
+
+
+
+ testcaseFilename = os.path.split(__file__)[-1]
+ os.system("rm -rf ./insert_res.txt")
+ os.system("rm -rf query/nestedQuery/%s.sql" % testcaseFilename )
+
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/query/nestedQuery/queryInterval.py b/tests/pytest/query/nestedQuery/queryInterval.py
new file mode 100644
index 0000000000..11c42c463e
--- /dev/null
+++ b/tests/pytest/query/nestedQuery/queryInterval.py
@@ -0,0 +1,106 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import os
+import taos
+from util.log import tdLog
+from util.cases import tdCases
+from util.sql import tdSql
+from util.dnodes import tdDnodes
+import random
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ self.ts1 = 1593548685000
+ self.ts2 = 1593548785000
+
+
+ def run(self):
+ # tdSql.execute("drop database db ")
+ tdSql.prepare()
+ tdSql.execute("create table st (ts timestamp, num int, value int , t_instance int) tags (loc nchar(30))")
+ node = 5
+ number = 10
+ for n in range(node):
+ for m in range(number):
+ dt= m*300000+n*60000 # collecting'frequency is 10s
+ args1=(n,n,self.ts1+dt,n,100+2*m+2*n,10+m+n)
+ # args2=(n,self.ts2+dt,n,120+n,15+n)
+ tdSql.execute("insert into t%d using st tags('beijing%d') values(%d, %d, %d, %d)" % args1)
+ # tdSql.execute("insert into t1 using st tags('shanghai') values(%d, %d, %d, %d)" % args2)
+
+ # interval function
+ tdSql.query("select avg(value) from st interval(10m)")
+ # print(tdSql.queryResult)
+ tdSql.checkRows(6)
+ tdSql.checkData(0, 0, "2020-07-01 04:20:00")
+ tdSql.checkData(1, 1, 107.4)
+
+ # subquery with interval
+ tdSql.query("select avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing0' interval(10m));")
+ tdSql.checkData(0, 0, 109.0)
+
+ # subquery with interval and select two Column in parent query
+ tdSql.error("select ts,avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing0' interval(10m));")
+
+ # subquery with interval and sliding
+ tdSql.query("select avg(value) as avg_val from st where loc='beijing0' interval(8m) sliding(30s) limit 1;")
+ tdSql.checkData(0, 0, "2020-07-01 04:17:00")
+ tdSql.checkData(0, 1, 100)
+ tdSql.query("select avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing1' interval(8m) sliding(30s));")
+ tdSql.checkData(0, 0, 111)
+
+ # subquery with interval and offset
+ tdSql.query("select avg(value) as avg_val from st where loc='beijing0' interval(5m,1m);")
+ tdSql.checkData(0, 0, "2020-07-01 04:21:00")
+ tdSql.checkData(0, 1, 100)
+ tdSql.query("select avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing0' interval(5m,1m) group by loc);")
+ tdSql.checkData(0, 0, 109)
+
+ # subquery with interval,sliding and group by ; parent query with interval
+ tdSql.query("select avg(value) as avg_val from st where loc='beijing0' interval(8m) sliding(1m) group by loc limit 1 offset 52 ;")
+ tdSql.checkData(0, 0, "2020-07-01 05:09:00")
+ tdSql.checkData(0, 1, 118)
+ tdSql.query("select avg(avg_val) as ncst from(select avg(value) as avg_val from st where loc!='beijing0' interval(8m) sliding(1m) group by loc ) interval(5m);")
+ tdSql.checkData(1, 1, 105)
+
+ # # subquery and parent query with interval and sliding
+ tdSql.query("select avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing1' interval(8m) sliding(5m)) interval(10m) sliding(2m);")
+ tdSql.checkData(29, 0, "2020-07-01 05:10:00.000")
+
+ # subquery and parent query with top and bottom
+ tdSql.query("select top(avg_val,2) from(select avg(value) as avg_val,num from st where loc!='beijing0' group by num) order by avg_val desc;")
+ tdSql.checkData(0, 1, 117)
+ tdSql.query("select bottom(avg_val,3) from(select avg(value) as avg_val,num from st where loc!='beijing0' group by num) order by avg_val asc;")
+ tdSql.checkData(0, 1, 111)
+
+ #
+ tdSql.query("select top(avg_val,2) from(select avg(value) as avg_val from st where loc='beijing1' interval(8m) sliding(3m));")
+ tdSql.checkData(0, 1, 120)
+
+ # clear env
+ testcaseFilename = os.path.split(__file__)[-1]
+ os.system("rm -rf ./insert_res.txt")
+ os.system("rm -rf wal/%s.sql" % testcaseFilename )
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/query/querySession.py b/tests/pytest/query/querySession.py
index 620f755bcb..216ff68b71 100644
--- a/tests/pytest/query/querySession.py
+++ b/tests/pytest/query/querySession.py
@@ -51,38 +51,73 @@ class TDTestCase:
tdSql.checkRows(15)
tdSql.checkData(0, 1, 2)
+ # session(ts,5a) main query
+ tdSql.query("select count(*) from (select * from dev_001) session(ts,5a)")
+ tdSql.checkRows(15)
+ tdSql.checkData(0, 1, 2)
+
# session(ts,1s)
tdSql.query("select count(*) from dev_001 session(ts,1s)")
tdSql.checkRows(12)
tdSql.checkData(0, 1, 5)
+ # session(ts,1s) main query
+ tdSql.query("select count(*) from (select * from dev_001) session(ts,1s)")
+ tdSql.checkRows(12)
+ tdSql.checkData(0, 1, 5)
+
tdSql.query("select count(*) from dev_001 session(ts,1000a)")
tdSql.checkRows(12)
tdSql.checkData(0, 1, 5)
+ tdSql.query("select count(*) from (select * from dev_001) session(ts,1000a)")
+ tdSql.checkRows(12)
+ tdSql.checkData(0, 1, 5)
+
# session(ts,1m)
tdSql.query("select count(*) from dev_001 session(ts,1m)")
tdSql.checkRows(9)
tdSql.checkData(0, 1, 8)
+ # session(ts,1m)
+ tdSql.query("select count(*) from (select * from dev_001) session(ts,1m)")
+ tdSql.checkRows(9)
+ tdSql.checkData(0, 1, 8)
+
# session(ts,1h)
tdSql.query("select count(*) from dev_001 session(ts,1h)")
tdSql.checkRows(6)
tdSql.checkData(0, 1, 11)
+ # session(ts,1h)
+ tdSql.query("select count(*) from (select * from dev_001) session(ts,1h)")
+ tdSql.checkRows(6)
+ tdSql.checkData(0, 1, 11)
+
# session(ts,1d)
tdSql.query("select count(*) from dev_001 session(ts,1d)")
tdSql.checkRows(4)
tdSql.checkData(0, 1, 13)
+ # session(ts,1d)
+ tdSql.query("select count(*) from (select * from dev_001) session(ts,1d)")
+ tdSql.checkRows(4)
+ tdSql.checkData(0, 1, 13)
+
# session(ts,1w)
tdSql.query("select count(*) from dev_001 session(ts,1w)")
tdSql.checkRows(2)
tdSql.checkData(0, 1, 15)
+ # session(ts,1w)
+ tdSql.query("select count(*) from (select * from dev_001) session(ts,1w)")
+ tdSql.checkRows(2)
+ tdSql.checkData(0, 1, 15)
+
# session with where
tdSql.query("select count(*),first(tagtype),last(tagtype),avg(tagtype),sum(tagtype),min(tagtype),max(tagtype),leastsquares(tagtype, 1, 1),spread(tagtype),stddev(tagtype),percentile(tagtype,0) from dev_001 where ts <'2020-05-20 0:0:0' session(ts,1d)")
+
tdSql.checkRows(2)
tdSql.checkData(0, 1, 13)
tdSql.checkData(0, 2, 1)
@@ -97,6 +132,20 @@ class TDTestCase:
tdSql.checkData(0, 11, 1)
tdSql.checkData(1, 11, 14)
+ # session with where main
+
+ tdSql.query("select count(*),first(tagtype),last(tagtype),avg(tagtype),sum(tagtype),min(tagtype),max(tagtype),leastsquares(tagtype, 1, 1) from (select * from dev_001 where ts <'2020-05-20 0:0:0') session(ts,1d)")
+
+ tdSql.checkRows(2)
+ tdSql.checkData(0, 1, 13)
+ tdSql.checkData(0, 2, 1)
+ tdSql.checkData(0, 3, 13)
+ tdSql.checkData(0, 4, 7)
+ tdSql.checkData(0, 5, 91)
+ tdSql.checkData(0, 6, 1)
+ tdSql.checkData(0, 7, 13)
+ tdSql.checkData(0, 8, '{slop:1.000000, intercept:0.000000}')
+
# tdsql err
tdSql.error("select * from dev_001 session(ts,1w)")
tdSql.error("select count(*) from st session(ts,1w)")
diff --git a/tests/pytest/query/queryStateWindow.py b/tests/pytest/query/queryStateWindow.py
new file mode 100644
index 0000000000..251dbef658
--- /dev/null
+++ b/tests/pytest/query/queryStateWindow.py
@@ -0,0 +1,111 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import *
+from util.cases import *
+from util.sql import *
+import numpy as np
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+ self.rowNum = 100000
+ self.ts = 1537146000000
+
+ def run(self):
+ tdSql.prepare()
+
+ print("==============step1")
+ tdSql.execute(
+ "create table if not exists st (ts timestamp, t1 int, t2 timestamp, t3 bigint, t4 float, t5 double, t6 binary(10), t7 smallint, t8 tinyint, t9 bool, t10 nchar(10), t11 int unsigned, t12 bigint unsigned, t13 smallint unsigned, t14 tinyint unsigned ,t15 int) tags(dev nchar(50), tag2 binary(16))")
+ tdSql.execute(
+ 'CREATE TABLE if not exists dev_001 using st tags("dev_01", "tag_01")')
+ tdSql.execute(
+ 'CREATE TABLE if not exists dev_002 using st tags("dev_02", "tag_02")')
+
+ print("==============step2")
+
+ tdSql.execute(
+ "INSERT INTO dev_001 VALUES('2020-05-13 10:00:00.000', 1, '2020-05-13 10:00:00.000', 10, 3.1, 3.14, 'test', -10, -126, true, '测试', 15, 10, 65534, 254, 1)('2020-05-13 10:00:01.000', 1, '2020-05-13 10:00:01.000', 10, 3.1, 3.14, 'test', -10, -126, true, '测试', 15, 10, 65534, 253, 5)('2020-05-13 10:00:02.000', 10, '2020-05-13 10:00:00.000', 11, 3.1, 3.14, 'test', 10, -127, false, '测试', 15, 10, 65534, 253, 10)('2020-05-13 10:00:03.000', 1, '2020-05-13 10:00:00.000', 11, 3.1, 3.14, 'test', -10, -126, true, '测试', 14, 12, 65532, 254, 15)")
+
+ for i in range(self.rowNum):
+ tdSql.execute("insert into dev_002 (ts,t1) values(%d, %d,)" % (self.ts + i, i + 1))
+
+ tdSql.query("select count(ts) from dev_001 state_window(t1)")
+ tdSql.checkRows(3)
+ tdSql.checkData(0, 0, 2)
+ tdSql.query("select count(ts) from dev_001 state_window(t3)")
+ tdSql.checkRows(2)
+ tdSql.checkData(1, 0, 2)
+ tdSql.query("select count(ts) from dev_001 state_window(t7)")
+ tdSql.checkRows(3)
+ tdSql.checkData(1, 0, 1)
+ tdSql.query("select count(ts) from dev_001 state_window(t8)")
+ tdSql.checkRows(3)
+ tdSql.checkData(2, 0, 1)
+ tdSql.query("select count(ts) from dev_001 state_window(t11)")
+ tdSql.checkRows(2)
+ tdSql.checkData(0, 0, 3)
+ tdSql.query("select count(ts) from dev_001 state_window(t12)")
+ tdSql.checkRows(2)
+ tdSql.checkData(1, 0, 1)
+ tdSql.query("select count(ts) from dev_001 state_window(t13)")
+ tdSql.checkRows(2)
+ tdSql.checkData(1, 0, 1)
+ tdSql.query("select count(ts) from dev_001 state_window(t14)")
+ tdSql.checkRows(3)
+ tdSql.checkData(1, 0, 2)
+ tdSql.query("select count(ts) from dev_002 state_window(t1)")
+ tdSql.checkRows(100000)
+
+ # with all aggregate function
+ tdSql.query("select count(*),sum(t1),avg(t1),twa(t1),stddev(t15),leastsquares(t15,1,1),first(t15),last(t15),spread(t15),percentile(t15,90),t9 from dev_001 state_window(t9);")
+ tdSql.checkRows(3)
+ tdSql.checkData(0, 0, 2)
+ tdSql.checkData(1, 1, 10)
+ tdSql.checkData(0, 2, 1)
+ # tdSql.checkData(0, 3, 1)
+ tdSql.checkData(0, 4, np.std([1,5]))
+ # tdSql.checkData(0, 5, 1)
+ tdSql.checkData(0, 6, 1)
+ tdSql.checkData(0, 7, 5)
+ tdSql.checkData(0, 8, 4)
+ tdSql.checkData(0, 9, 4.6)
+ tdSql.checkData(0, 10, 'True')
+
+ # with where
+ tdSql.query("select avg(t15),t9 from dev_001 where t9='true' state_window(t9);")
+ tdSql.checkData(0, 0, 7)
+ tdSql.checkData(0, 1, 'True')
+
+ # error
+ tdSql.error("select count(*) from dev_001 state_window(t2)")
+ tdSql.error("select count(*) from st state_window(t3)")
+ tdSql.error("select count(*) from dev_001 state_window(t4)")
+ tdSql.error("select count(*) from dev_001 state_window(t5)")
+ tdSql.error("select count(*) from dev_001 state_window(t6)")
+ tdSql.error("select count(*) from dev_001 state_window(t10)")
+ tdSql.error("select count(*) from dev_001 state_window(tag2)")
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/util/taosdemoCfg.py b/tests/pytest/util/taosdemoCfg.py
new file mode 100644
index 0000000000..5071e915a5
--- /dev/null
+++ b/tests/pytest/util/taosdemoCfg.py
@@ -0,0 +1,450 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import os
+import time
+import datetime
+import inspect
+import psutil
+import shutil
+import json
+from util.log import *
+from multiprocessing import cpu_count
+
+
+# TODO: fully test the function. Handle exceptions.
+# Handle json format not accepted by taosdemo
+class TDTaosdemoCfg:
+ def __init__(self):
+ self.insert_cfg = {
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": cpu_count(),
+ "thread_count_create_tbl": cpu_count(),
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "num_of_records_per_req": 32766,
+ "max_sql_len": 32766,
+ "databases": None
+ }
+
+ self.db = {
+ "name": 'db',
+ "drop": 'yes',
+ "replica": 1,
+ "days": 10,
+ "cache": 16,
+ "blocks": 6,
+ "precision": "ms",
+ "keep": 3650,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp": 2,
+ "walLevel": 1,
+ "cachelast": 0,
+ "quorum": 1,
+ "fsync": 3000,
+ "update": 0
+ }
+
+ self.query_cfg = {
+ "filetype": "query",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "confirm_parameter_prompt": "no",
+ "databases": "db",
+ "query_times": 2,
+ "query_mode": "taosc",
+ "specified_table_query": None,
+ "super_table_query": None
+ }
+
+ self.table_query = {
+ "query_interval": 1,
+ "concurrent": 3,
+ "sqls": None
+ }
+
+ self.stable_query = {
+ "stblname": "stb",
+ "query_interval": 1,
+ "threads": 3,
+ "sqls": None
+ }
+
+ self.sub_cfg = {
+ "filetype": "subscribe",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "databases": "db",
+ "confirm_parameter_prompt": "no",
+ "specified_table_query": None,
+ "super_table_query": None
+ }
+
+ self.table_sub = {
+ "concurrent": 1,
+ "mode": "sync",
+ "interval": 10000,
+ "restart": "yes",
+ "keepProgress": "yes",
+ "sqls": None
+ }
+
+ self.stable_sub = {
+ "stblname": "stb",
+ "threads": 1,
+ "mode": "sync",
+ "interval": 10000,
+ "restart": "yes",
+ "keepProgress": "yes",
+ "sqls": None
+ }
+
+ self.stbs = []
+ self.stb_template = {
+ "name": "stb",
+ "child_table_exists": "no",
+ "childtable_count": 100,
+ "childtable_prefix": "stb_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 5,
+ "data_source": "rand",
+ "insert_mode": "taosc",
+ "insert_rows": 100,
+ "childtable_limit": 10,
+ "childtable_offset": 0,
+ "interlace_rows": 0,
+ "insert_interval": 0,
+ "max_sql_len": 32766,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT", "count": 1}],
+ "tags": [{"type": "BIGINT", "count": 1}]
+ }
+
+ self.tb_query_sql = []
+ self.tb_query_sql_template = {
+ "sql": "select last_row(*) from stb_0 ",
+ "result": "temp/query_res0.txt"
+ }
+
+ self.stb_query_sql = []
+ self.stb_query_sql_template = {
+ "sql": "select last_row(ts) from xxxx",
+ "result": "temp/query_res2.txt"
+ }
+
+ self.tb_sub_sql = []
+ self.tb_sub_sql_template = {
+ "sql": "select * from stb_0 ;",
+ "result": "temp/subscribe_res0.txt"
+ }
+
+ self.stb_sub_sql = []
+ self.stb_sub_sql_template = {
+ "sql": "select * from xxxx where ts > '2021-02-25 11:35:00.000' ;",
+ "result": "temp/subscribe_res1.txt"
+ }
+
+ # The following functions are import functions for different dicts and lists
+ # except import_sql, all other import functions will a dict and overwrite the origional dict
+ # dict_in: the dict used to overwrite the target
+ def import_insert_cfg(self, dict_in):
+ self.insert_cfg = dict_in
+
+ def import_db(self, dict_in):
+ self.db = dict_in
+
+ def import_stbs(self, dict_in):
+ self.stbs = dict_in
+
+ def import_query_cfg(self, dict_in):
+ self.query_cfg = dict_in
+
+ def import_table_query(self, dict_in):
+ self.table_query = dict_in
+
+ def import_stable_query(self, dict_in):
+ self.stable_query = dict_in
+
+ def import_sub_cfg(self, dict_in):
+ self.sub_cfg = dict_in
+
+ def import_table_sub(self, dict_in):
+ self.table_sub = dict_in
+
+ def import_stable_sub(self, dict_in):
+ self.stable_sub = dict_in
+
+ def import_sql(self, Sql_in, mode):
+ """used for importing the sql later used
+
+ Args:
+ Sql_in (dict): the imported sql dict
+ mode (str): the sql storing location within TDTaosdemoCfg
+ format: 'fileType_tableType'
+ fileType: query, sub
+ tableType: table, stable
+ """
+ if mode == 'query_table':
+ self.tb_query_sql = Sql_in
+ elif mode == 'query_stable':
+ self.stb_query_sql = Sql_in
+ elif mode == 'sub_table':
+ self.tb_sub_sql = Sql_in
+ elif mode == 'sub_stable':
+ self.stb_sub_sql = Sql_in
+ # import functions end
+
+ # The following functions are alter functions for different dicts
+ # Args:
+ # key: the key that is going to be modified
+ # value: the value of the key that is going to be modified
+ # if key = 'databases' | "specified_table_query" | "super_table_query"|"sqls"
+ # value will not be used
+
+ def alter_insert_cfg(self, key, value):
+
+ if key == 'databases':
+ self.insert_cfg[key] = [
+ {
+ 'dbinfo': self.db,
+ 'super_tables': self.stbs
+ }
+ ]
+ else:
+ self.insert_cfg[key] = value
+
+ def alter_db(self, key, value):
+ self.db[key] = value
+
+ def alter_query_tb(self, key, value):
+ if key == "sqls":
+ self.table_query[key] = self.tb_query_sql
+ else:
+ self.table_query[key] = value
+
+ def alter_query_stb(self, key, value):
+ if key == "sqls":
+ self.stable_query[key] = self.stb_query_sql
+ else:
+ self.stable_query[key] = value
+
+ def alter_query_cfg(self, key, value):
+ if key == "specified_table_query":
+ self.query_cfg["specified_table_query"] = self.table_query
+ elif key == "super_table_query":
+ self.query_cfg["super_table_query"] = self.stable_query
+ else:
+ self.table_query[key] = value
+
+ def alter_sub_cfg(self, key, value):
+ if key == "specified_table_query":
+ self.sub_cfg["specified_table_query"] = self.table_sub
+ elif key == "super_table_query":
+ self.sub_cfg["super_table_query"] = self.stable_sub
+ else:
+ self.table_query[key] = value
+
+ def alter_sub_stb(self, key, value):
+ if key == "sqls":
+ self.stable_sub[key] = self.stb_sub_sql
+ else:
+ self.stable_sub[key] = value
+
+ def alter_sub_tb(self, key, value):
+ if key == "sqls":
+ self.table_sub[key] = self.tb_sub_sql
+ else:
+ self.table_sub[key] = value
+ # alter function ends
+
+ # the following functions are for handling the sql lists
+ def append_sql_stb(self, target, value):
+ """for appending sql dict into specific sql list
+
+ Args:
+ target (str): the target append list
+ format: 'fileType_tableType'
+ fileType: query, sub
+ tableType: table, stable
+ unique: 'insert_stbs'
+ value (dict): the sql dict going to be appended
+ """
+ if target == 'insert_stbs':
+ self.stbs.append(value)
+ elif target == 'query_table':
+ self.tb_query_sql.append(value)
+ elif target == 'query_stable':
+ self.stb_query_sql.append(value)
+ elif target == 'sub_table':
+ self.tb_sub_sql.append(value)
+ elif target == 'sub_stable':
+ self.stb_sub_sql.append(value)
+
+ def pop_sql_stb(self, target, index):
+ """for poping a sql dict from specific sql list
+
+ Args:
+ target (str): the target append list
+ format: 'fileType_tableType'
+ fileType: query, sub
+ tableType: table, stable
+ unique: 'insert_stbs'
+ index (int): the sql dict that is going to be popped
+ """
+ if target == 'insert_stbs':
+ self.stbs.pop(index)
+ elif target == 'query_table':
+ self.tb_query_sql.pop(index)
+ elif target == 'query_stable':
+ self.stb_query_sql.pop(index)
+ elif target == 'sub_table':
+ self.tb_sub_sql.pop(index)
+ elif target == 'sub_stable':
+ self.stb_sub_sql.pop(index)
+ # sql list modification function end
+
+ # The following functions are get functions for different dicts
+ def get_db(self):
+ return self.db
+
+ def get_stb(self):
+ return self.stbs
+
+ def get_insert_cfg(self):
+ return self.insert_cfg
+
+ def get_query_cfg(self):
+ return self.query_cfg
+
+ def get_tb_query(self):
+ return self.table_query
+
+ def get_stb_query(self):
+ return self.stable_query
+
+ def get_sub_cfg(self):
+ return self.sub_cfg
+
+ def get_tb_sub(self):
+ return self.table_sub
+
+ def get_stb_sub(self):
+ return self.stable_sub
+
+ def get_sql(self, target):
+ """general get function for all sql lists
+
+ Args:
+ target (str): the sql list want to get
+ format: 'fileType_tableType'
+ fileType: query, sub
+ tableType: table, stable
+ unique: 'insert_stbs'
+ """
+ if target == 'query_table':
+ return self.tb_query_sql
+ elif target == 'query_stable':
+ return self.stb_query_sql
+ elif target == 'sub_table':
+ return self.tb_sub_sql
+ elif target == 'sub_stable':
+ return self.stb_sub_sql
+
+ def get_template(self, target):
+ """general get function for the default sql template
+
+ Args:
+ target (str): the sql list want to get
+ format: 'fileType_tableType'
+ fileType: query, sub
+ tableType: table, stable
+ unique: 'insert_stbs'
+ """
+ if target == 'insert_stbs':
+ return self.stb_template
+ elif target == 'query_table':
+ return self.tb_query_sql_template
+ elif target == 'query_stable':
+ return self.stb_query_sql_template
+ elif target == 'sub_table':
+ return self.tb_sub_sql_template
+ elif target == 'sub_stable':
+ return self.stb_sub_sql_template
+ else:
+ print(f'did not find {target}')
+
+ # the folloing are the file generation functions
+ """defalut document:
+ generator functio for generating taosdemo json file
+ will assemble the dicts and dump the final json
+
+ Args:
+ pathName (str): the directory wanting the json file to be
+ fileName (str): the name suffix of the json file
+ Returns:
+ str: [pathName]/[filetype]_[filName].json
+ """
+
+ def generate_insert_cfg(self, pathName, fileName):
+ cfgFileName = f'{pathName}/insert_{fileName}.json'
+ self.alter_insert_cfg('databases', None)
+ with open(cfgFileName, 'w') as file:
+ json.dump(self.insert_cfg, file)
+ return cfgFileName
+
+ def generate_query_cfg(self, pathName, fileName):
+ cfgFileName = f'{pathName}/query_{fileName}.json'
+ self.alter_query_tb('sqls', None)
+ self.alter_query_stb('sqls', None)
+ self.alter_query_cfg('specified_table_query', None)
+ self.alter_query_cfg('super_table_query', None)
+ with open(cfgFileName, 'w') as file:
+ json.dump(self.query_cfg, file)
+ return cfgFileName
+
+ def generate_subscribe_cfg(self, pathName, fileName):
+ cfgFileName = f'{pathName}/subscribe_{fileName}.json'
+ self.alter_sub_tb('sqls', None)
+ self.alter_sub_stb('sqls', None)
+ self.alter_sub_cfg('specified_table_query', None)
+ self.alter_sub_cfg('super_table_query', None)
+ with open(cfgFileName, 'w') as file:
+ json.dump(self.sub_cfg, file)
+ return cfgFileName
+ # file generation functions ends
+
+ def drop_cfg_file(self, fileName):
+ os.remove(f'{fileName}')
+
+
+taosdemoCfg = TDTaosdemoCfg()
diff --git a/tests/script/general/parser/condition.sim b/tests/script/general/parser/condition.sim
index ddba3040c2..1da93c8761 100644
--- a/tests/script/general/parser/condition.sim
+++ b/tests/script/general/parser/condition.sim
@@ -85,12 +85,9 @@ if $rows != 28 then
endi
sql_error select * from stb1 where c8 > 0
-sql_error select * from stb1 where c2 in (0,1);
-sql_error select * from stb1 where c6 in (0,2,3,1);
sql_error select * from stb1 where c7 in (0,2,3,1);
sql_error select * from stb1 where c8 in (true);
sql_error select * from stb1 where c8 in (1,2);
-sql_error select * from stb1 where t3 in (3);
sql_error select * from stb1 where t2 in (3.0);
sql_error select ts,c1,c7 from stb1 where c7 > false
sql_error select * from stb1 where c1 > NULL;
@@ -105,6 +102,7 @@ sql_error select * from stb1 where c6 <= 'null';
sql_error select * from stb1 where c7 < 'nuLl';
sql_error select * from stb1 where c8 < 'nuLl';
sql_error select * from stb1 where c9 > 'nuLl';
+sql_error select * from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b;
sql select * from stb1 where c2 > 3.0 or c2 < 60;
if $rows != 28 then
@@ -1425,7 +1423,74 @@ if $rows != 0 then
return -1
endi
-#sql select * from (select * from stb1 where (c1 > 60 or c1 < 10) and (c7 = true or c5 > 2 and c5 < 63)) where (c3 > 61 or c3 < 3);
+sql select * from stb1 where c2 in (0,1);
+if $rows != 1 then
+ return -1
+endi
+if $data00 != @21-05-05 18:19:00.000@ then
+ return -1
+endi
+sql select * from stb1 where c6 in (0,2,3,1);
+if $rows != 3 then
+ return -1
+endi
+if $data00 != @21-05-05 18:19:00.000@ then
+ return -1
+endi
+if $data10 != @21-05-05 18:19:01.000@ then
+ return -1
+endi
+if $data20 != @21-05-05 18:19:02.000@ then
+ return -1
+endi
+sql select ts,c1 from (select * from stb1 where (c1 > 60 or c1 < 10) and (c7 = true or c5 > 2 and c5 < 63)) where (c3 > 61 or c3 < 3);
+if $rows != 3 then
+ return -1
+endi
+if $data00 != @21-05-05 18:19:00.000@ then
+ return -1
+endi
+if $data10 != @21-05-05 18:19:01.000@ then
+ return -1
+endi
+if $data20 != @21-05-05 18:19:25.000@ then
+ return -1
+endi
+
+#sql select a.* from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and a.c1 > 50;
+sql select a.ts from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and a.c1 > 50;
+if $rows != 4 then
+ return -1
+endi
+if $data00 != @21-05-05 18:19:20.000@ then
+ return -1
+endi
+if $data10 != @21-05-05 18:19:21.000@ then
+ return -1
+endi
+if $data20 != @21-05-05 18:19:24.000@ then
+ return -1
+endi
+if $data30 != @21-05-05 18:19:25.000@ then
+ return -1
+endi
+
+#sql select a.ts,a.c1,a.c8,a.c9 from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and a.c1 > 50 and b.c1 < 60;
+sql select a.ts,a.c1,a.c8 from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and a.c1 > 50 and b.c1 < 60;
+if $rows != 2 then
+ return -1
+endi
+if $data00 != @21-05-05 18:19:20.000@ then
+ return -1
+endi
+if $data10 != @21-05-05 18:19:21.000@ then
+ return -1
+endi
+
+sql_error select a.ts,a.c1,a.c8 from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and a.c1 > 50 or b.c1 < 60;
+sql_error select a.ts,a.c1,a.c8 from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and ((a.c1 > 50 and a.c1 < 60) or (b.c2 > 60));
+
+#!!!!!!!!!!!select a.* from (select * from tb1) a, (select * from tb2) b where a.ts = b.ts and a.f1 > 0 and b.f1 > 0;
print "ts test"
diff --git a/tests/script/general/parser/groupby.sim b/tests/script/general/parser/groupby.sim
index 124e76e85c..507431f536 100644
--- a/tests/script/general/parser/groupby.sim
+++ b/tests/script/general/parser/groupby.sim
@@ -654,53 +654,91 @@ if $data31 != @20-03-27 05:10:19.000@ then
return -1
endi
-#sql select irate(c) from st where t1="1" and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' interval(1m) sliding(15s) group by tbname,t1,t2;
-#if $rows != 40 then
-# return -1
-#endi
-#
-#if $data01 != 1.000000000 then
-# return -1
-#endi
-#if $data02 != t1 then
-# return -1
-#endi
-#if $data03 != 1 then
-# return -1
-#endi
-#if $data04 != 1 then
-# return -1
-#endi
-#
-#if $data11 != 1.000000000 then
-# return -1
-#endi
-#if $data12 != t1 then
-# return -1
-#endi
-#if $data13 != 1 then
-# return -1
-#endi
-#if $data14 != 1 then
-# return -1
-#endi
-#
-#sql select irate(c) from st where t1="1" and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' interval(1m) sliding(15s) group by tbname,t1,t2 limit 1;
-#if $rows != 2 then
-# return -1
-#endi
-#
-#if $data11 != 1.000000000 then
-# return -1
-#endi
-#if $data12 != t2 then
-# return -1
-#endi
-#if $data13 != 1 then
-# return -1
-#endi
-#if $data14 != 2 then
-# return -1
-#endi
+print ===============>
+sql select stddev(c),c from st where t2=1 or t2=2 group by c;
+if $rows != 4 then
+ return -1
+endi
+
+if $data00 != 0.000000000 then
+ return -1
+endi
+
+if $data01 != 1 then
+ return -1
+endi
+
+if $data10 != 0.000000000 then
+ return -1
+endi
+
+if $data11 != 2 then
+ return -1
+endi
+
+if $data20 != 0.000000000 then
+ return -1
+endi
+
+if $data21 != 3 then
+ return -1
+endi
+
+if $data30 != 0.000000000 then
+ return -1
+endi
+
+if $data31 != 4 then
+ return -1
+endi
+
+sql select irate(c) from st where t1="1" and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' interval(1m) sliding(15s) group by tbname,t1,t2;
+if $rows != 40 then
+ return -1
+endi
+
+if $data01 != 1.000000000 then
+ return -1
+endi
+if $data02 != t1 then
+ return -1
+endi
+if $data03 != 1 then
+ return -1
+endi
+if $data04 != 1 then
+ return -1
+endi
+
+if $data11 != 1.000000000 then
+ return -1
+endi
+if $data12 != t1 then
+ return -1
+endi
+if $data13 != 1 then
+ return -1
+endi
+if $data14 != 1 then
+ return -1
+endi
+
+sql select irate(c) from st where t1="1" and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' interval(1m) sliding(15s) group by tbname,t1,t2 limit 1;
+if $rows != 2 then
+ return -1
+endi
+
+if $data11 != 1.000000000 then
+ return -1
+endi
+if $data12 != t2 then
+ return -1
+endi
+if $data13 != 1 then
+ return -1
+endi
+if $data14 != 2 then
+ return -1
+endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/general/parser/having.sim b/tests/script/general/parser/having.sim
index ddafdd7329..a8d2102bef 100644
--- a/tests/script/general/parser/having.sim
+++ b/tests/script/general/parser/having.sim
@@ -1,6 +1,6 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c walLevel -v 0
+system sh/cfg.sh -n dnode1 -c walLevel -v 1
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2
system sh/exec.sh -n dnode1 -s start
diff --git a/tests/script/general/parser/lastrow.sim b/tests/script/general/parser/lastrow.sim
index 2b8f294d5d..fea322ec16 100644
--- a/tests/script/general/parser/lastrow.sim
+++ b/tests/script/general/parser/lastrow.sim
@@ -70,4 +70,17 @@ sleep 100
run general/parser/lastrow_query.sim
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+print =================== last_row + nested query
+sql use $db
+sql create table lr_nested(ts timestamp, f int)
+sql insert into lr_nested values(now, 1)
+sql insert into lr_nested values(now+1s, null)
+sql select last_row(*) from (select * from lr_nested)
+if $rows != 1 then
+ return -1
+endi
+if $data01 != NULL then
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/general/parser/select_with_tags.sim b/tests/script/general/parser/select_with_tags.sim
index 45f6f5c49f..f5c94d2ae6 100644
--- a/tests/script/general/parser/select_with_tags.sim
+++ b/tests/script/general/parser/select_with_tags.sim
@@ -68,6 +68,27 @@ endw
sleep 100
+
+#======================= only check first table tag, TD-4827
+sql select count(*) from $mt where t1 in (0)
+if $rows != 1 then
+ return -1
+endi
+if $data00 != $rowNum then
+ return -1;
+endi
+
+$secTag = ' . abc
+$secTag = $secTag . 0
+$secTag = $secTag . '
+sql select count(*) from $mt where t2 =$secTag and t1 in (0)
+if $rows != 1 then
+ return -1
+endi
+if $data00 != $rowNum then
+ return -1;
+endi
+
#================================
sql select ts from select_tags_mt0
print $rows
diff --git a/tests/script/general/parser/testSuite.sim b/tests/script/general/parser/testSuite.sim
index 545e19edec..5f71138966 100644
--- a/tests/script/general/parser/testSuite.sim
+++ b/tests/script/general/parser/testSuite.sim
@@ -63,4 +63,3 @@ run general/parser/between_and.sim
run general/parser/last_cache.sim
run general/parser/nestquery.sim
run general/parser/precision_ns.sim
-
diff --git a/tests/script/general/parser/where.sim b/tests/script/general/parser/where.sim
index 682c5c688c..2c6be78e13 100644
--- a/tests/script/general/parser/where.sim
+++ b/tests/script/general/parser/where.sim
@@ -146,18 +146,18 @@ sql_error select * from $mt where c1 like 1
sql create table wh_mt1 (ts timestamp, c1 smallint, c2 int, c3 bigint, c4 float, c5 double, c6 tinyint, c7 binary(10), c8 nchar(10), c9 bool, c10 timestamp) tags (t1 binary(10), t2 smallint, t3 int, t4 bigint, t5 float, t6 double)
sql create table wh_mt1_tb1 using wh_mt1 tags ('tb11', 1, 1, 1, 1, 1)
sql insert into wh_mt1_tb1 values (now, 1, 1, 1, 1, 1, 1, 'binary', 'nchar', true, '2019-01-01 00:00:00.000')
-sql_error select last(*) from wh_mt1 where c1 in ('1')
-sql_error select last(*) from wh_mt1_tb1 where c1 in ('1')
-sql_error select last(*) from wh_mt1 where c2 in ('1')
-sql_error select last(*) from wh_mt1_tb1 where c2 in ('1')
-sql_error select last(*) from wh_mt1 where c3 in ('1')
-sql_error select last(*) from wh_mt1_tb1 where c3 in ('1')
-sql_error select last(*) from wh_mt1 where c4 in ('1')
-sql_error select last(*) from wh_mt1_tb1 where c4 in ('1')
-sql_error select last(*) from wh_mt1 where c5 in ('1')
-sql_error select last(*) from wh_mt1_tb1 where c5 in ('1')
-sql_error select last(*) from wh_mt1 where c6 in ('1')
-sql_error select last(*) from wh_mt1_tb1 where c6 in ('1')
+#sql_error select last(*) from wh_mt1 where c1 in ('1')
+#sql_error select last(*) from wh_mt1_tb1 where c1 in ('1')
+#sql_error select last(*) from wh_mt1 where c2 in ('1')
+#sql_error select last(*) from wh_mt1_tb1 where c2 in ('1')
+#sql_error select last(*) from wh_mt1 where c3 in ('1')
+#sql_error select last(*) from wh_mt1_tb1 where c3 in ('1')
+#sql_error select last(*) from wh_mt1 where c4 in ('1')
+#sql_error select last(*) from wh_mt1_tb1 where c4 in ('1')
+#sql_error select last(*) from wh_mt1 where c5 in ('1')
+#sql_error select last(*) from wh_mt1_tb1 where c5 in ('1')
+#sql_error select last(*) from wh_mt1 where c6 in ('1')
+#sql_error select last(*) from wh_mt1_tb1 where c6 in ('1')
#sql_error select last(*) from wh_mt1 where c7 in ('binary')
#sql_error select last(*) from wh_mt1_tb1 where c7 in ('binary')
#sql_error select last(*) from wh_mt1 where c8 in ('nchar')
@@ -340,5 +340,18 @@ if $rows != 0 then
return -1
endi
-
+print ==========================> td-4783
+sql create table where_ts(ts timestamp, f int)
+sql insert into where_ts values('2021-06-19 16:22:00', 1);
+sql insert into where_ts values('2021-06-19 16:23:00', 2);
+sql insert into where_ts values('2021-06-19 16:24:00', 3);
+sql insert into where_ts values('2021-06-19 16:25:00', 1);
+sql select * from (select * from where_ts) where ts<'2021-06-19 16:25:00' and ts>'2021-06-19 16:22:00'
+if $row != 2 then
+ return -1
+endi
+print $data00, $data01
+if $data01 != 2 then
+ return -1
+endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/tsim/src/simMain.c b/tests/tsim/src/simMain.c
index 6a9d96bc3b..7d74c62c7d 100644
--- a/tests/tsim/src/simMain.c
+++ b/tests/tsim/src/simMain.c
@@ -35,7 +35,7 @@ int32_t main(int32_t argc, char *argv[]) {
for (int32_t i = 1; i < argc; ++i) {
if (strcmp(argv[i], "-c") == 0 && i < argc - 1) {
- tstrncpy(configDir, argv[++i], MAX_FILE_NAME_LEN);
+ tstrncpy(configDir, argv[++i], 128);
} else if (strcmp(argv[i], "-f") == 0 && i < argc - 1) {
strcpy(scriptFile, argv[++i]);
} else if (strcmp(argv[i], "-a") == 0) {
@@ -75,4 +75,4 @@ int32_t main(int32_t argc, char *argv[]) {
simInfo("execute result %d", ret);
return ret;
-}
\ No newline at end of file
+}