diff --git a/CMakeLists.txt b/CMakeLists.txt
index b572d7bd16..553da9245b 100755
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -36,7 +36,7 @@ ENDIF ()
IF (NOT DEFINED TD_CLUSTER)
MESSAGE(STATUS "Build the Lite Version")
SET(TD_CLUSTER FALSE)
- SET(TD_LITE TRUE)
+ SET(TD_EDGE TRUE)
SET(TD_COMMUNITY_DIR ${PROJECT_SOURCE_DIR})
MESSAGE(STATUS "Community directory: " ${TD_COMMUNITY_DIR})
@@ -53,12 +53,18 @@ IF (NOT DEFINED TD_CLUSTER)
SET(TD_MIPS_32 FALSE)
SET(TD_DARWIN_64 FALSE)
SET(TD_WINDOWS_64 FALSE)
+ SET(TD_PAGMODE_LITE FALSE)
+
+ IF (${PAGMODE} MATCHES "lite")
+ SET(TD_PAGMODE_LITE TRUE)
+ ENDIF ()
# if generate ARM version:
# cmake -DCPUTYPE=aarch32 .. or cmake -DCPUTYPE=aarch64
IF (${CPUTYPE} MATCHES "aarch32")
SET(TD_ARM TRUE)
SET(TD_ARM_32 TRUE)
+ SET(TD_PAGMODE_LITE TRUE)
ADD_DEFINITIONS(-D_TD_ARM_)
ADD_DEFINITIONS(-D_TD_ARM_32_)
ELSEIF (${CPUTYPE} MATCHES "aarch64")
diff --git a/README.md b/README.md
index 299dd0190d..8d60a892e0 100644
--- a/README.md
+++ b/README.md
@@ -45,13 +45,15 @@ mkdir build && cd build
cmake .. && cmake --build .
```
-if compiling on an ARM processor(aarch64 or aarch32), you need add one parameter:
+To compile on an ARM processor (aarch64 or aarch32), please add option CPUTYPE as below:
-```cmd
aarch64:
+```cmd
cmake .. -DCPUTYPE=aarch64 && cmake --build .
+```
aarch32:
+```cmd
cmake .. -DCPUTYPE=aarch32 && cmake --build .
```
@@ -128,3 +130,8 @@ The TDengine community has also kindly built some of their own connectors! Follo
# Contribute to TDengine
Please follow the [contribution guidelines](CONTRIBUTING.md) to contribute to the project.
+
+# Join TDengine WeChat Group
+
+Add WeChat “tdengine” to join the group,you can communicate with other users.
+
diff --git a/documentation/tdenginedocs-cn/super-table/index.html b/documentation/tdenginedocs-cn/super-table/index.html
index 828a69bb0c..42d54ce7e2 100644
--- a/documentation/tdenginedocs-cn/super-table/index.html
+++ b/documentation/tdenginedocs-cn/super-table/index.html
@@ -32,7 +32,7 @@ tags (location binary(20), type int)
查看数据库内全部STable,及其相关信息,包括STable的名称、创建时间、列数量、标签(TAG)数量、通过该STable建表的数量。
删除超级表
DROP TABLE <stable_name>
-Note: 删除STable不会级联删除通过STable创建的表;相反删除STable时要求通过该STable创建的表都已经被删除。
+Note: 删除STable时,所有通过该STable创建的表都将被删除。
查看属于某STable并满足查询条件的表
SELECT TBNAME,[TAG_NAME,…] FROM <stable_name> WHERE <tag_name> <[=|=<|>=|<>] values..> ([AND|OR] …)
查看属于某STable并满足查询条件的表。说明:TBNAME为关键词,显示通过STable建立的子表表名,查询过程中可以使用针对标签的条件。
diff --git a/documentation/tdenginedocs-en/super-table/index.html b/documentation/tdenginedocs-en/super-table/index.html
index 21e7669a19..0e47a7bb9b 100644
--- a/documentation/tdenginedocs-en/super-table/index.html
+++ b/documentation/tdenginedocs-en/super-table/index.html
@@ -73,7 +73,7 @@ INTERVAL(10M)
It lists the STable's schema and tags
Drop a STable
DROP TABLE <stable_name>
-To delete a STable, all the tables created via this STable shall be deleted first, otherwise, it will fail.
+To delete a STable, all the tables created via this STable will be deleted.
List the Associated Tables of a STable
SELECT TBNAME,[TAG_NAME, ...] FROM <stable_name> WHERE <tag_name> <[=|=<|>=|<>] values..> ([AND|OR] ...)
It will list all the tables which satisfy the tag filter conditions. The tables are all created from this specific STable. TBNAME is a new keyword introduced, it is the table name associated with the STable.
diff --git a/documentation/webdocs/markdowndocs/Super Table-ch.md b/documentation/webdocs/markdowndocs/Super Table-ch.md
index e75a8d46c3..38e6f8c17f 100644
--- a/documentation/webdocs/markdowndocs/Super Table-ch.md
+++ b/documentation/webdocs/markdowndocs/Super Table-ch.md
@@ -1,6 +1,6 @@
# 超级表STable:多表聚合
-TDengine要求每个数据采集点单独建表,这样能极大提高数据的插入/查询性能,但是导致系统中表的数量猛增,让应用对表的维护以及聚合、统计操作难度加大。为降低应用的开发难度,TDengine引入了超级表STable (Super Table)的概念。
+TDengine要求每个数据采集点单独建表。独立建表的模式能够避免写入过程中的同步加锁,因此能够极大地提升数据的插入/查询性能。但是独立建表意味着系统中表的数量与采集点的数量在同一个量级。如果采集点众多,将导致系统中表的数量也非常庞大,让应用对表的维护以及聚合、统计操作难度加大。为降低应用的开发难度,TDengine引入了超级表(Super Table, 简称为STable)的概念。
## 什么是超级表
@@ -9,14 +9,14 @@ STable是同一类型数据采集点的抽象,是同类型采集实例的集
TDengine扩展标准SQL语法用于定义STable,使用关键词tags指定标签信息。语法如下:
```mysql
-CREATE TABLE ( TIMESTAMP, field_name1 field_type,…) TAGS(tag_name tag_type, …)
+CREATE TABLE ( TIMESTAMP, field_name1 field_type,…) TAGS(tag_name tag_type, …)
```
-其中tag_name是标签名,tag_type是标签的数据类型。标签可以使用时间戳之外的其他TDengine支持的数据类型,标签的个数最多为6个,名字不能与系统关键词相同,也不能与其他列名相同。如:
+其中tag_name是标签名,tag_type是标签的数据类型。标签可以使用时间戳之外的其他TDengine支持的数据类型,标签的个数最多为32个,名字不能与系统关键词相同,也不能与其他列名相同。如:
```mysql
-create table thermometer (ts timestamp, degree float)
-tags (location binary(20), type int)
+CREATE TABLE thermometer (ts timestamp, degree float)
+TAGS (location binary(20), type int)
```
上述SQL创建了一个名为thermometer的STable,带有标签location和标签type。
@@ -30,7 +30,7 @@ CREATE TABLE USING TAGS (tag_value1,...)
沿用上面温度计的例子,使用超级表thermometer建立单个温度计数据表的语句如下:
```mysql
-create table t1 using thermometer tags ('beijing', 10)
+CREATE TABLE t1 USING thermometer TAGS ('beijing', 10)
```
上述SQL以thermometer为模板,创建了名为t1的表,这张表的Schema就是thermometer的Schema,但标签location值为'beijing',标签type值为10。
@@ -72,7 +72,7 @@ STable从属于库,一个STable只属于一个库,但一个库可以有一
DROP TABLE
```
- Note: 删除STable不会级联删除通过STable创建的表;相反删除STable时要求通过该STable创建的表都已经被删除。
+ Note: 删除STable时,所有通过该STable创建的表都将被删除。
- 查看属于某STable并满足查询条件的表
diff --git a/documentation/webdocs/markdowndocs/Super Table.md b/documentation/webdocs/markdowndocs/Super Table.md
index 609dd11bd2..efc95c5f79 100644
--- a/documentation/webdocs/markdowndocs/Super Table.md
+++ b/documentation/webdocs/markdowndocs/Super Table.md
@@ -142,7 +142,7 @@ It lists the STable's schema and tags
DROP TABLE
```
-To delete a STable, all the tables created via this STable shall be deleted first, otherwise, it will fail.
+To delete a STable, all the tables created via this STable will be deleted first.
### List the Associated Tables of a STable
diff --git a/documentation/webdocs/markdowndocs/TAOS SQL-ch.md b/documentation/webdocs/markdowndocs/TAOS SQL-ch.md
index 1e9383c40c..cd184cbc71 100644
--- a/documentation/webdocs/markdowndocs/TAOS SQL-ch.md
+++ b/documentation/webdocs/markdowndocs/TAOS SQL-ch.md
@@ -191,9 +191,10 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
```
同时向表tb1_name和tb2_name中按列分别插入多条记录
-注意:对同一张表,插入的新记录的时间戳必须递增,否则会跳过插入该条记录。如果时间戳为0,系统将自动使用服务器当前时间作为该记录的时间戳。
+注意:1、对同一张表,插入的新记录的时间戳必须递增,否则会跳过插入该条记录。如果时间戳为0,系统将自动使用服务器当前时间作为该记录的时间戳。
+ 2、允许插入的最老记录的时间戳,是相对于当前服务器时间,减去配置的keep值(数据保留的天数),允许插入的最新记录的时间戳,是相对于当前服务器时间,加上配置的days值(数据文件存储数据的时间跨度,单位为天)。keep和days都是可以在创建数据库时指定的,缺省值分别是3650天和10天。
-**IMPORT**:如果需要将时间戳小于最后一条记录时间的记录写入到数据库中,可使用IMPORT替代INSERT命令,IMPORT的语法与INSERT完全一样。如果同时IMPORT多条记录,需要保证一批记录是按时间戳排序好的。
+**IMPORT**:如果需要将时间戳小于最后一条记录时间的记录写入到数据库中,可使用IMPORT替代INSERT命令,IMPORT的语法与INSERT完全一样。
## 数据查询
diff --git a/documentation/webdocs/markdowndocs/TAOS SQL.md b/documentation/webdocs/markdowndocs/TAOS SQL.md
index 870529417f..99aa73b435 100644
--- a/documentation/webdocs/markdowndocs/TAOS SQL.md
+++ b/documentation/webdocs/markdowndocs/TAOS SQL.md
@@ -181,9 +181,10 @@ All the keywords in a SQL statement are case-insensitive, but strings values are
tb2_name (tb2_field1_name, ...) VALUES(field1_value1, ...) (field1_value2, ...)
```
-Note: For a table, the new record must have a timestamp bigger than the last data record, otherwise, it will be discarded and not inserted. If the timestamp is 0, the time stamp will be set to the system time on the server.
-
-**IMPORT**: If you do want to insert a historical data record into a table, use IMPORT command instead of INSERT. IMPORT has the same syntax as INSERT. If you want to import a batch of historical records, the records must be ordered by the timestamp, otherwise, TDengine won't handle it in the right way.
+Note: 1. For a table, the new record must have a timestamp bigger than the last data record, otherwise, it will be discarded and not inserted. If the timestamp is 0, the time stamp will be set to the system time on the server.
+ 2.The timestamp of the oldest record allowed to be inserted is relative to the current server time, minus the configured keep value (the number of days the data is retained), and the timestamp of the latest record allowed to be inserted is relative to the current server time, plus the configured days value (the time span in which the data file stores data, in days). Both keep and days can be specified when creating the database. The default values are 3650 days and 10 days, respectively.
+
+**IMPORT**: If you do want to insert a historical data record into a table, use IMPORT command instead of INSERT. IMPORT has the same syntax as INSERT.
## Data Query
diff --git a/documentation/webdocs/markdowndocs/advanced features-ch.md b/documentation/webdocs/markdowndocs/advanced features-ch.md
index 4d01eaf364..9dc289a8d5 100644
--- a/documentation/webdocs/markdowndocs/advanced features-ch.md
+++ b/documentation/webdocs/markdowndocs/advanced features-ch.md
@@ -67,7 +67,7 @@ TDengine内嵌支持轻量级的消息订阅与推送服务。使用系统提供
TDengine的订阅与推送服务的状态是客户端维持,TDengine服务器并不维持。因此如果应用重启,从哪个时间点开始获取最新数据,由应用决定。
-订阅相关API请见 [连接器](https://www.taosdata.com/cn/documentation/connector/)。
+订阅相关API文档请见 [C/C++ 数据订阅接口](https://www.taosdata.com/cn/documentation/connector/#C/C++-%E6%95%B0%E6%8D%AE%E8%AE%A2%E9%98%85%E6%8E%A5%E5%8F%A3),《[TDEngine中订阅的用途和用法](https://www.taosdata.com/blog/2020/02/12/1277.html)》则以一个示例详细介绍了这些API的用法。
## 缓存 (Cache)
TDengine采用时间驱动缓存管理策略(First-In-First-Out,FIFO),又称为写驱动的缓存管理机制。这种策略有别于读驱动的数据缓存模式(Least-Recent-Use,LRU),直接将最近写入的数据保存在系统的缓存中。当缓存达到临界值的时候,将最早的数据批量写入磁盘。一般意义上来说,对于物联网数据的使用,用户最为关心最近产生的数据,即当前状态。TDengine充分利用了这一特性,将最近到达的(当前状态)数据保存在缓存中。
@@ -76,7 +76,7 @@ TDengine通过查询函数向用户提供毫秒级的数据获取能力。直接
TDengine分配固定大小的内存空间作为缓存空间,缓存空间可根据应用的需求和硬件资源配置。通过适当的设置缓存空间,TDengine可以提供极高性能的写入和查询的支持。TDengine中每个虚拟节点(virtual node)创建时分配独立的缓存池。每个虚拟节点管理自己的缓存池,不同虚拟节点间不共享缓存池。每个虚拟节点内部所属的全部表共享该虚拟节点的缓存池。
-TDengine将内存池按块划分进行管理,数据在内存块里按照列式存储。一个vnode的内存池是在vnode创建时按块分配好的,而且每个内存块按照先进先出的原则进行管理。一张表所需要的内存块是从vnode的内存池中进行分配的,块的大小由系统配置参数cache决定。每张表最大内存块的数目由配置参数tblocks决定,每张表平均的内存块的个数由配置参数ablocks决定。因此对于一个vnode, 总的内存大小为: cache*ablocks*tables。内存块参数cache不宜过小,一个cache block需要能存储至少几十条以上记录,才会有效率。参数ablocks最小为2,保证每张表平均至少能分配两个内存块。
+TDengine将内存池按块划分进行管理,数据在内存块里按照列式存储。一个vnode的内存池是在vnode创建时按块分配好的,而且每个内存块按照先进先出的原则进行管理。一张表所需要的内存块是从vnode的内存池中进行分配的,块的大小由系统配置参数cache决定。每张表最大内存块的数目由配置参数tblocks决定,每张表平均的内存块的个数由配置参数ablocks决定。因此对于一个vnode, 总的内存大小为: `cache * ablocks * tables`。内存块参数cache不宜过小,一个cache block需要能存储至少几十条以上记录,才会有效率。参数ablocks最小为2,保证每张表平均至少能分配两个内存块。
你可以通过函数last_row快速获取一张表或一张超级表的最后一条记录,这样很便于在大屏显示各设备的实时状态或采集值。例如:
diff --git a/documentation/webdocs/markdowndocs/advanced features.md b/documentation/webdocs/markdowndocs/advanced features.md
index e841a5a6a5..3eae454da5 100644
--- a/documentation/webdocs/markdowndocs/advanced features.md
+++ b/documentation/webdocs/markdowndocs/advanced features.md
@@ -62,7 +62,7 @@ Time series data is a sequence of data points over time. Inside a table, the dat
To reduce the development complexity and improve data consistency, TDengine provides the pub/sub functionality. To publish a message, you simply insert a record into a table. Compared with popular messaging tool Kafka, you subscribe to a table or a SQL query statement, instead of a topic. Once new data points arrive, TDengine will notify the application. The process is just like Kafka.
-The detailed API will be introduced in the [connectors](https://www.taosdata.com/en/documentation/connector/) section.
+The API documentation is at [C/C++ subscription API](https://www.taosdata.com/en/documentation/connector/#C/C++-subscription-API) section, and you can find more information from blog article (only Chinese version at present) [The usage of subscription](https://www.taosdata.com/blog/2020/02/12/1277.html).
##Caching
TDengine allocates a fixed-size buffer in memory, the newly arrived data will be written into the buffer first. Every device or table gets one or more memory blocks. For typical IoT scenarios, the hot data shall always be newly arrived data, they are more important for timely analysis. Based on this observation, TDengine manages the cache blocks in First-In-First-Out strategy. If no enough space in the buffer, the oldest data will be saved into hard disk first, then be overwritten by newly arrived data. TDengine also guarantees every device can keep at least one block of data in the buffer.
diff --git a/minidevops/README.MD b/minidevops/README.MD
index f9ec4a8f19..9937ad04ad 100644
--- a/minidevops/README.MD
+++ b/minidevops/README.MD
@@ -7,8 +7,10 @@
- grafana/grafana Grafana的镜像,一个广泛应用的开源可视化监控软件
- telegraf:latest 一个广泛应用的开源数据采集程序
- prom/prometheus:latest 一个广泛应用的k8s领域的开源数据采集程序
+
## 说明
本文中的图片链接在Github上显示不出来,建议将MD文件下载后用vscode或其他md文件浏览工具进行查看
+
## 前提条件
1. 一台linux服务器或运行linux操作系统的虚拟机或者运行MacOS的计算机
2. 安装了Docker软件。Docker软件的安装方法请参考linux下安装Docker
@@ -216,4 +218,8 @@ use telegraf;
使用telegraf这个数据库。然后执行show tables,describe table等命令详细查询下telegraf这个库里保存了些什么数据。
具体TDengine的查询语句可以参考[TDengine官方文档](https://www.taosdata.com/cn/documentation/taos-sql/)
## 接入多个监控对象
+<<<<<<< HEAD
就像前面原理介绍的,这个miniDevops的小系统,已经提供了一个时序数据库和可视化系统,对于多台机器的监控,只需要将每台机器的telegraf或prometheus配置按上面所述修改,就可以完成监控数据采集和可视化呈现了。
+=======
+就像前面原理介绍的,这个miniDevops的小系统,已经提供了一个时序数据库和可视化系统,对于多台机器的监控,只需要将每台机器的telegraf或prometheus配置按上面所述修改,就可以完成监控数据采集和可视化呈现了。
+>>>>>>> 740f82af58c4ecc2deecfa36fb1de4ef5ee55efc
diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh
index faa29aeac8..55fbd96d3f 100755
--- a/packaging/deb/makedeb.sh
+++ b/packaging/deb/makedeb.sh
@@ -72,10 +72,10 @@ sed -i "2c$debver" ${pkg_dir}/DEBIAN/control
if [ "$verMode" == "cluster" ]; then
debname="TDengine-server-"${tdengine_ver}-${osType}-${cpuType}
-elif [ "$verMode" == "lite" ]; then
+elif [ "$verMode" == "edge" ]; then
debname="TDengine-server"-${tdengine_ver}-${osType}-${cpuType}
else
- echo "unknow verMode, nor cluster or lite"
+ echo "unknow verMode, nor cluster or edge"
exit 1
fi
diff --git a/packaging/docker/Dockerfile b/packaging/docker/Dockerfile
index b01f375db0..22c1bc1902 100644
--- a/packaging/docker/Dockerfile
+++ b/packaging/docker/Dockerfile
@@ -4,7 +4,7 @@ WORKDIR /root
COPY tdengine.tar.gz /root/
RUN tar -zxf tdengine.tar.gz
-WORKDIR /root/tdengine/
+WORKDIR /root/TDengine-server/
RUN sh install.sh
diff --git a/packaging/docker/dockerbuild.sh b/packaging/docker/dockerbuild.sh
index 280c27d7aa..aeea6a2a95 100755
--- a/packaging/docker/dockerbuild.sh
+++ b/packaging/docker/dockerbuild.sh
@@ -1,12 +1,6 @@
#!/bin/bash
set -x
$1
-tar -zxf $1
-DIR=`echo $1|awk -F . '{print($1"."$2"."$3"."$4)}'`
-mv $DIR tdengine
-tar -czf tdengine.tar.gz tdengine
-TMP=`echo $1|awk -F . '{print($2"."$3"."$4)}'`
-TAG="1."$TMP
-docker build --rm -f "Dockerfile" -t tdengine/tdengine:$TAG "."
+docker build --rm -f "Dockerfile" -t tdengine/tdengine:$1 "."
docker login -u tdengine -p ******** #replace the docker registry username and password
-docker push tdengine/tdengine:$TAG
\ No newline at end of file
+docker push tdengine/tdengine:$1
\ No newline at end of file
diff --git a/packaging/release.sh b/packaging/release.sh
index 378a7fe203..a4562d21d2 100755
--- a/packaging/release.sh
+++ b/packaging/release.sh
@@ -5,15 +5,20 @@
set -e
#set -x
-# releash.sh -v [cluster | lite] -c [aarch32 | aarch64 | x64 | x86 | mips64 ...] -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | ...] -V [stable | beta]
+# releash.sh -v [cluster | edge]
+# -c [aarch32 | aarch64 | x64 | x86 | mips64 ...]
+# -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | ...]
+# -V [stable | beta]
+# -l [full | lite]
# set parameters by default value
-verMode=lite # [cluster, lite]
+verMode=edge # [cluster, edge]
verType=stable # [stable, beta]
cpuType=x64 # [aarch32 | aarch64 | x64 | x86 | mips64 ...]
osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | ...]
+pagMode=full # [full | lite]
-while getopts "hv:V:c:o:" arg
+while getopts "hv:V:c:o:l:" arg
do
case $arg in
v)
@@ -28,12 +33,16 @@ do
#echo "cpuType=$OPTARG"
cpuType=$(echo $OPTARG)
;;
+ l)
+ #echo "pagMode=$OPTARG"
+ pagMode=$(echo $OPTARG)
+ ;;
o)
#echo "osType=$OPTARG"
osType=$(echo $OPTARG)
;;
h)
- echo "Usage: `basename $0` -v [cluster | lite] -c [aarch32 | aarch64 | x64 | x86 | mips64 ...] -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | ...] -V [stable | beta]"
+ echo "Usage: `basename $0` -v [cluster | edge] -c [aarch32 | aarch64 | x64 | x86 | mips64 ...] -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | ...] -V [stable | beta] -l [full | lite]"
exit 0
;;
?) #unknow option
@@ -43,7 +52,7 @@ do
esac
done
-echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType}"
+echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode}"
curr_dir=$(pwd)
@@ -193,9 +202,9 @@ cd ${compile_dir}
# check support cpu type
if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" == "aarch32" ]] || [[ "$cpuType" == "mips64" ]] ; then
if [ "$verMode" != "cluster" ]; then
- cmake ../ -DCPUTYPE=${cpuType}
+ cmake ../ -DCPUTYPE=${cpuType} -DPAGMODE=${pagMode}
else
- cmake ../../ -DCPUTYPE=${cpuType}
+ cmake ../../ -DCPUTYPE=${cpuType}
fi
else
echo "input cpuType=${cpuType} error!!!"
@@ -235,8 +244,8 @@ if [ "$osType" != "Darwin" ]; then
echo "====do tar.gz package for all systems===="
cd ${script_dir}/tools
- ${csudo} ./makepkg.sh ${compile_dir} ${version} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType}
- ${csudo} ./makeclient.sh ${compile_dir} ${version} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType}
+ ${csudo} ./makepkg.sh ${compile_dir} ${version} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
+ ${csudo} ./makeclient.sh ${compile_dir} ${version} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
else
cd ${script_dir}/tools
./makeclient.sh ${compile_dir} ${version} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType}
diff --git a/packaging/rpm/makerpm.sh b/packaging/rpm/makerpm.sh
index cc31a6b8d4..678e75c500 100755
--- a/packaging/rpm/makerpm.sh
+++ b/packaging/rpm/makerpm.sh
@@ -65,10 +65,10 @@ cp_rpm_package ${pkg_dir}/RPMS
if [ "$verMode" == "cluster" ]; then
rpmname="TDengine-server-"${tdengine_ver}-${osType}-${cpuType}
-elif [ "$verMode" == "lite" ]; then
+elif [ "$verMode" == "edge" ]; then
rpmname="TDengine-server"-${tdengine_ver}-${osType}-${cpuType}
else
- echo "unknow verMode, nor cluster or lite"
+ echo "unknow verMode, nor cluster or edge"
exit 1
fi
diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh
index beca20e68d..c573a2086f 100644
--- a/packaging/tools/install.sh
+++ b/packaging/tools/install.sh
@@ -6,7 +6,8 @@
set -e
#set -x
-verMode=lite
+verMode=edge
+pagMode=full
# -----------------------Variables definition---------------------
script_dir=$(dirname $(readlink -f "$0"))
@@ -479,7 +480,9 @@ function update_TDengine() {
install_log
install_header
install_lib
- install_connector
+ if [ "$pagMode" != "lite" ]; then
+ install_connector
+ fi
install_examples
if [ -z $1 ]; then
install_bin
@@ -554,7 +557,9 @@ function install_TDengine() {
install_log
install_header
install_lib
- install_connector
+ if [ "$pagMode" != "lite" ]; then
+ install_connector
+ fi
install_examples
if [ -z $1 ]; then # install service and client
diff --git a/packaging/tools/install_client.sh b/packaging/tools/install_client.sh
index c5ecf5e5b9..605944e9b3 100755
--- a/packaging/tools/install_client.sh
+++ b/packaging/tools/install_client.sh
@@ -9,6 +9,7 @@ set -e
# -----------------------Variables definition---------------------
osType=Linux
+pagMode=full
if [ "$osType" != "Darwin" ]; then
script_dir=$(dirname $(readlink -f "$0"))
@@ -180,7 +181,9 @@ function update_TDengine() {
install_log
install_header
install_lib
- install_connector
+ if [ "$pagMode" != "lite" ]; then
+ install_connector
+ fi
install_examples
install_bin
install_config
@@ -205,7 +208,9 @@ function install_TDengine() {
install_log
install_header
install_lib
- install_connector
+ if [ "$pagMode" != "lite" ]; then
+ install_connector
+ fi
install_examples
install_bin
install_config
diff --git a/packaging/tools/makeclient.sh b/packaging/tools/makeclient.sh
index 58ce6183c1..6120f9fcc2 100755
--- a/packaging/tools/makeclient.sh
+++ b/packaging/tools/makeclient.sh
@@ -12,6 +12,7 @@ cpuType=$4
osType=$5
verMode=$6
verType=$7
+pagMode=$8
if [ "$osType" != "Darwin" ]; then
script_dir="$(dirname $(readlink -f $0))"
@@ -39,11 +40,17 @@ fi
# Directories and files.
if [ "$osType" != "Darwin" ]; then
- bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${script_dir}/remove_client.sh"
- lib_files="${build_dir}/lib/libtaos.so.${version}"
-else
+ if [ "$pagMode" == "lite" ]; then
+ strip ${build_dir}/bin/taosd
+ strip ${build_dir}/bin/taos
bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh"
- lib_files="${build_dir}/lib/libtaos.${version}.dylib"
+ else
+ bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${script_dir}/remove_client.sh"
+ fi
+ lib_files="${build_dir}/lib/libtaos.so.${version}"
+else
+ bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh"
+ lib_files="${build_dir}/lib/libtaos.${version}.dylib"
fi
header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
@@ -74,18 +81,23 @@ if [ "$osType" == "Darwin" ]; then
sed 's/osType=Linux/osType=Darwin/g' ${install_dir}/install_client.sh >> install_client_temp.sh
mv install_client_temp.sh ${install_dir}/install_client.sh
fi
+if [ "$pagMode" == "lite" ]; then
+ sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install_client.sh >> install_client_temp.sh
+ mv install_client_temp.sh ${install_dir}/install_client.sh
+fi
chmod a+x ${install_dir}/install_client.sh
# Copy example code
mkdir -p ${install_dir}/examples
examples_dir="${top_dir}/tests/examples"
cp -r ${examples_dir}/c ${install_dir}/examples
-cp -r ${examples_dir}/JDBC ${install_dir}/examples
-cp -r ${examples_dir}/matlab ${install_dir}/examples
-cp -r ${examples_dir}/python ${install_dir}/examples
-cp -r ${examples_dir}/R ${install_dir}/examples
-cp -r ${examples_dir}/go ${install_dir}/examples
-
+if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
+ cp -r ${examples_dir}/JDBC ${install_dir}/examples
+ cp -r ${examples_dir}/matlab ${install_dir}/examples
+ cp -r ${examples_dir}/python ${install_dir}/examples
+ cp -r ${examples_dir}/R ${install_dir}/examples
+ cp -r ${examples_dir}/go ${install_dir}/examples
+fi
# Copy driver
mkdir -p ${install_dir}/driver
cp ${lib_files} ${install_dir}/driver
@@ -94,13 +106,14 @@ cp ${lib_files} ${install_dir}/driver
connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector
-if [ "$osType" != "Darwin" ]; then
+if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
+ if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector
+ fi
+ cp -r ${connector_dir}/grafana ${install_dir}/connector/
+ cp -r ${connector_dir}/python ${install_dir}/connector/
+ cp -r ${connector_dir}/go ${install_dir}/connector
fi
-cp -r ${connector_dir}/grafana ${install_dir}/connector/
-cp -r ${connector_dir}/python ${install_dir}/connector/
-cp -r ${connector_dir}/go ${install_dir}/connector
-
# Copy release note
# cp ${script_dir}/release_note ${install_dir}
@@ -110,13 +123,17 @@ cd ${release_dir}
if [ "$verMode" == "cluster" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
-elif [ "$verMode" == "lite" ]; then
+elif [ "$verMode" == "edge" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
else
- echo "unknow verMode, nor cluster or lite"
+ echo "unknow verMode, nor cluster or edge"
exit 1
fi
+if [ "$pagMode" == "lite" ]; then
+ pkg_name=${pkg_name}-Lite
+fi
+
if [ "$verType" == "beta" ]; then
pkg_name=${pkg_name}-${verType}
elif [ "$verType" == "stable" ]; then
diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh
index 1b095a4c76..d39cf41843 100755
--- a/packaging/tools/makepkg.sh
+++ b/packaging/tools/makepkg.sh
@@ -13,6 +13,7 @@ cpuType=$4
osType=$5
verMode=$6
verType=$7
+pagMode=$8
script_dir="$(dirname $(readlink -f $0))"
top_dir="$(readlink -f ${script_dir}/../..)"
@@ -30,7 +31,14 @@ else
fi
# Directories and files.
-bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdemo ${build_dir}/bin/taosdump ${script_dir}/remove.sh"
+if [ "$pagMode" == "lite" ]; then
+ strip ${build_dir}/bin/taosd
+ strip ${build_dir}/bin/taos
+ bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh"
+else
+ bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdemo ${build_dir}/bin/taosdump ${script_dir}/remove.sh"
+fi
+
lib_files="${build_dir}/lib/libtaos.so.${version}"
header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
cfg_dir="${top_dir}/packaging/cfg"
@@ -56,6 +64,9 @@ mkdir -p ${install_dir}/init.d && cp ${init_file_deb} ${install_dir}/init.d/taos
mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/taosd.rpm
if [ "$verMode" == "cluster" ]; then
+ sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove.sh >> remove_temp.sh
+ mv remove_temp.sh ${install_dir}/bin/remove.sh
+
mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd
cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png
rm -rf ${install_dir}/nginxd/png
@@ -70,25 +81,35 @@ fi
cd ${install_dir}
tar -zcv -f taos.tar.gz * --remove-files || :
+exitcode=$?
+if [ "$exitcode" != "0" ]; then
+ echo "tar taos.tar.gz error !!!"
+ exit $exitcode
+fi
cd ${curr_dir}
cp ${install_files} ${install_dir}
if [ "$verMode" == "cluster" ]; then
- sed 's/verMode=lite/verMode=cluster/g' ${install_dir}/install.sh >> install_temp.sh
- mv install_temp.sh ${install_dir}/install.sh
+ sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install.sh >> install_temp.sh
+ mv install_temp.sh ${install_dir}/install.sh
+fi
+if [ "$pagMode" == "lite" ]; then
+ sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install.sh >> install_temp.sh
+ mv install_temp.sh ${install_dir}/install.sh
fi
chmod a+x ${install_dir}/install.sh
# Copy example code
mkdir -p ${install_dir}/examples
examples_dir="${top_dir}/tests/examples"
-cp -r ${examples_dir}/c ${install_dir}/examples
-cp -r ${examples_dir}/JDBC ${install_dir}/examples
-cp -r ${examples_dir}/matlab ${install_dir}/examples
-cp -r ${examples_dir}/python ${install_dir}/examples
-cp -r ${examples_dir}/R ${install_dir}/examples
-cp -r ${examples_dir}/go ${install_dir}/examples
-
+ cp -r ${examples_dir}/c ${install_dir}/examples
+if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
+ cp -r ${examples_dir}/JDBC ${install_dir}/examples
+ cp -r ${examples_dir}/matlab ${install_dir}/examples
+ cp -r ${examples_dir}/python ${install_dir}/examples
+ cp -r ${examples_dir}/R ${install_dir}/examples
+ cp -r ${examples_dir}/go ${install_dir}/examples
+fi
# Copy driver
mkdir -p ${install_dir}/driver
cp ${lib_files} ${install_dir}/driver
@@ -96,11 +117,12 @@ cp ${lib_files} ${install_dir}/driver
# Copy connector
connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector
-cp ${build_dir}/lib/*.jar ${install_dir}/connector
-cp -r ${connector_dir}/grafana ${install_dir}/connector/
-cp -r ${connector_dir}/python ${install_dir}/connector/
-cp -r ${connector_dir}/go ${install_dir}/connector
-
+if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
+ cp ${build_dir}/lib/*.jar ${install_dir}/connector
+ cp -r ${connector_dir}/grafana ${install_dir}/connector/
+ cp -r ${connector_dir}/python ${install_dir}/connector/
+ cp -r ${connector_dir}/go ${install_dir}/connector
+fi
# Copy release note
# cp ${script_dir}/release_note ${install_dir}
@@ -110,13 +132,17 @@ cd ${release_dir}
if [ "$verMode" == "cluster" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
-elif [ "$verMode" == "lite" ]; then
+elif [ "$verMode" == "edge" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
else
- echo "unknow verMode, nor cluster or lite"
+ echo "unknow verMode, nor cluster or edge"
exit 1
fi
+if [ "$pagMode" == "lite" ]; then
+ pkg_name=${pkg_name}-Lite
+fi
+
if [ "$verType" == "beta" ]; then
pkg_name=${pkg_name}-${verType}
elif [ "$verType" == "stable" ]; then
@@ -127,5 +153,10 @@ else
fi
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
+exitcode=$?
+if [ "$exitcode" != "0" ]; then
+ echo "tar ${pkg_name}.tar.gz error !!!"
+ exit $exitcode
+fi
cd ${curr_dir}
diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh
index 81507e1aa9..28cc835f30 100755
--- a/packaging/tools/remove.sh
+++ b/packaging/tools/remove.sh
@@ -5,7 +5,7 @@
set -e
#set -x
-verMode=lite
+verMode=edge
RED='\033[0;31m'
GREEN='\033[1;32m'
diff --git a/src/client/src/taos.def b/src/client/src/taos.def
index f6de4e8665..39906c7486 100644
--- a/src/client/src/taos.def
+++ b/src/client/src/taos.def
@@ -24,8 +24,6 @@ taos_fetch_row_a
taos_subscribe
taos_consume
taos_unsubscribe
-taos_subfields_count
-taos_fetch_subfields
taos_open_stream
taos_close_stream
taos_fetch_block
diff --git a/src/client/src/tscFunctionImpl.c b/src/client/src/tscFunctionImpl.c
index c6d4b8ff23..f202818578 100644
--- a/src/client/src/tscFunctionImpl.c
+++ b/src/client/src/tscFunctionImpl.c
@@ -138,6 +138,19 @@ typedef struct STSCompInfo {
STSBuf *pTSBuf;
} STSCompInfo;
+typedef struct SRateInfo {
+ int64_t CorrectionValue;
+ int64_t firstValue;
+ TSKEY firstKey;
+ int64_t lastValue;
+ TSKEY lastKey;
+ int8_t hasResult; // flag to denote has value
+ bool isIRate; // true for IRate functions, false for Rate functions
+ int64_t num; // for sum/avg
+ double sum; // for sum/avg
+} SRateInfo;
+
+
int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionId, int32_t param, int16_t *type,
int16_t *bytes, int16_t *intermediateResBytes, int16_t extLength, bool isSuperTable) {
if (!isValidDataType(dataType, dataBytes)) {
@@ -192,7 +205,12 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
*type = TSDB_DATA_TYPE_BINARY;
*bytes = sizeof(SAvgInfo);
*intermediateResBytes = *bytes;
+ return TSDB_CODE_SUCCESS;
+ } else if (functionId >= TSDB_FUNC_RATE && functionId <= TSDB_FUNC_AVG_IRATE) {
+ *type = TSDB_DATA_TYPE_DOUBLE;
+ *bytes = sizeof(SRateInfo);
+ *intermediateResBytes = sizeof(SRateInfo);
return TSDB_CODE_SUCCESS;
} else if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) {
*type = TSDB_DATA_TYPE_BINARY;
@@ -253,6 +271,10 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
*type = TSDB_DATA_TYPE_DOUBLE;
*bytes = sizeof(double);
*intermediateResBytes = sizeof(SAvgInfo);
+ } else if (functionId >= TSDB_FUNC_RATE && functionId <= TSDB_FUNC_AVG_IRATE) {
+ *type = TSDB_DATA_TYPE_DOUBLE;
+ *bytes = sizeof(double);
+ *intermediateResBytes = sizeof(SRateInfo);
} else if (functionId == TSDB_FUNC_STDDEV) {
*type = TSDB_DATA_TYPE_DOUBLE;
*bytes = sizeof(double);
@@ -4360,6 +4382,462 @@ static void ts_comp_finalize(SQLFunctionCtx *pCtx) {
doFinalizer(pCtx);
}
+//////////////////////////////////////////////////////////////////////////////////////////////
+// RATE functions
+
+static double do_calc_rate(const SRateInfo* pRateInfo) {
+ if ((INT64_MIN == pRateInfo->lastKey) || (INT64_MIN == pRateInfo->firstKey) || (pRateInfo->firstKey >= pRateInfo->lastKey)) {
+ return 0;
+ }
+
+ int64_t diff = 0;
+
+ if (pRateInfo->isIRate) {
+ diff = pRateInfo->lastValue;
+ if (diff >= pRateInfo->firstValue) {
+ diff -= pRateInfo->firstValue;
+ }
+ } else {
+ diff = pRateInfo->CorrectionValue + pRateInfo->lastValue - pRateInfo->firstValue;
+ if (diff <= 0) {
+ return 0;
+ }
+ }
+
+ int64_t duration = pRateInfo->lastKey - pRateInfo->firstKey;
+ duration = (duration + 500) / 1000;
+
+ double resultVal = ((double)diff) / duration;
+
+ pTrace("do_calc_rate() isIRate:%d firstKey:%" PRId64 " lastKey:%" PRId64 " firstValue:%" PRId64 " lastValue:%" PRId64 " CorrectionValue:%" PRId64 " resultVal:%f",
+ pRateInfo->isIRate, pRateInfo->firstKey, pRateInfo->lastKey, pRateInfo->firstValue, pRateInfo->lastValue, pRateInfo->CorrectionValue, resultVal);
+
+ return resultVal;
+}
+
+
+static bool rate_function_setup(SQLFunctionCtx *pCtx) {
+ if (!function_setup(pCtx)) {
+ return false;
+ }
+
+ SResultInfo *pResInfo = GET_RES_INFO(pCtx); //->aOutputBuf + pCtx->outputBytes;
+ SRateInfo * pInfo = pResInfo->interResultBuf;
+
+ pInfo->CorrectionValue = 0;
+ pInfo->firstKey = INT64_MIN;
+ pInfo->lastKey = INT64_MIN;
+ pInfo->firstValue = INT64_MIN;
+ pInfo->lastValue = INT64_MIN;
+ pInfo->num = 0;
+ pInfo->sum = 0;
+
+ pInfo->hasResult = 0;
+ pInfo->isIRate = ((pCtx->functionId == TSDB_FUNC_IRATE) || (pCtx->functionId == TSDB_FUNC_SUM_IRATE) || (pCtx->functionId == TSDB_FUNC_AVG_IRATE));
+ return true;
+}
+
+
+static void rate_function(SQLFunctionCtx *pCtx) {
+
+ assert(IS_DATA_BLOCK_LOADED(pCtx->blockStatus));
+
+ int32_t notNullElems = 0;
+ SResultInfo *pResInfo = GET_RES_INFO(pCtx);
+ SRateInfo *pRateInfo = (SRateInfo *)pResInfo->interResultBuf;
+ TSKEY *primaryKey = pCtx->ptsList;
+
+ pTrace("%p rate_function() size:%d, hasNull:%d", pCtx, pCtx->size, pCtx->hasNull);
+
+ for (int32_t i = 0; i < pCtx->size; ++i) {
+ char *pData = GET_INPUT_CHAR_INDEX(pCtx, i);
+ if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
+ pTrace("%p rate_function() index of null data:%d", pCtx, i);
+ continue;
+ }
+
+ notNullElems++;
+
+ int64_t v = 0;
+ switch (pCtx->inputType) {
+ case TSDB_DATA_TYPE_TINYINT:
+ v = (int64_t)GET_INT8_VAL(pData);
+ break;
+ case TSDB_DATA_TYPE_SMALLINT:
+ v = (int64_t)GET_INT16_VAL(pData);
+ break;
+ case TSDB_DATA_TYPE_INT:
+ v = (int64_t)GET_INT32_VAL(pData);
+ break;
+ case TSDB_DATA_TYPE_BIGINT:
+ v = (int64_t)GET_INT64_VAL(pData);
+ break;
+ default:
+ assert(0);
+ }
+
+ if ((INT64_MIN == pRateInfo->firstValue) || (INT64_MIN == pRateInfo->firstKey)) {
+ pRateInfo->firstValue = v;
+ pRateInfo->firstKey = primaryKey[i];
+
+ pTrace("firstValue:%" PRId64 " firstKey:%" PRId64, pRateInfo->firstValue, pRateInfo->firstKey);
+ }
+
+ if (INT64_MIN == pRateInfo->lastValue) {
+ pRateInfo->lastValue = v;
+ } else if (v < pRateInfo->lastValue) {
+ pRateInfo->CorrectionValue += pRateInfo->lastValue;
+ pTrace("CorrectionValue:%" PRId64, pRateInfo->CorrectionValue);
+ }
+
+ pRateInfo->lastValue = v;
+ pRateInfo->lastKey = primaryKey[i];
+ pTrace("lastValue:%" PRId64 " lastKey:%" PRId64, pRateInfo->lastValue, pRateInfo->lastKey);
+ }
+
+ if (!pCtx->hasNull) {
+ assert(pCtx->size == notNullElems);
+ }
+
+ SET_VAL(pCtx, notNullElems, 1);
+
+ if (notNullElems > 0) {
+ pRateInfo->hasResult = DATA_SET_FLAG;
+ pResInfo->hasResult = DATA_SET_FLAG;
+ }
+
+ // keep the data into the final output buffer for super table query since this execution may be the last one
+ if (pResInfo->superTableQ) {
+ memcpy(pCtx->aOutputBuf, pResInfo->interResultBuf, sizeof(SRateInfo));
+ }
+}
+
+static void rate_function_f(SQLFunctionCtx *pCtx, int32_t index) {
+ void *pData = GET_INPUT_CHAR_INDEX(pCtx, index);
+ if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
+ return;
+ }
+
+ // NOTE: keep the intermediate result into the interResultBuf
+ SResultInfo *pResInfo = GET_RES_INFO(pCtx);
+ SRateInfo *pRateInfo = (SRateInfo *)pResInfo->interResultBuf;
+ TSKEY *primaryKey = pCtx->ptsList;
+
+ int64_t v = 0;
+ switch (pCtx->inputType) {
+ case TSDB_DATA_TYPE_TINYINT:
+ v = (int64_t)GET_INT8_VAL(pData);
+ break;
+ case TSDB_DATA_TYPE_SMALLINT:
+ v = (int64_t)GET_INT16_VAL(pData);
+ break;
+ case TSDB_DATA_TYPE_INT:
+ v = (int64_t)GET_INT32_VAL(pData);
+ break;
+ case TSDB_DATA_TYPE_BIGINT:
+ v = (int64_t)GET_INT64_VAL(pData);
+ break;
+ default:
+ assert(0);
+ }
+
+ if ((INT64_MIN == pRateInfo->firstValue) || (INT64_MIN == pRateInfo->firstKey)) {
+ pRateInfo->firstValue = v;
+ pRateInfo->firstKey = primaryKey[index];
+ }
+
+ if (INT64_MIN == pRateInfo->lastValue) {
+ pRateInfo->lastValue = v;
+ } else if (v < pRateInfo->lastValue) {
+ pRateInfo->CorrectionValue += pRateInfo->lastValue;
+ }
+
+ pRateInfo->lastValue = v;
+ pRateInfo->lastKey = primaryKey[index];
+
+ pTrace("====%p rate_function_f() index:%d lastValue:%" PRId64 " lastKey:%" PRId64 " CorrectionValue:%" PRId64, pCtx, index, pRateInfo->lastValue, pRateInfo->lastKey, pRateInfo->CorrectionValue);
+
+ SET_VAL(pCtx, 1, 1);
+
+ // set has result flag
+ pRateInfo->hasResult = DATA_SET_FLAG;
+ pResInfo->hasResult = DATA_SET_FLAG;
+
+ // keep the data into the final output buffer for super table query since this execution may be the last one
+ if (pResInfo->superTableQ) {
+ memcpy(pCtx->aOutputBuf, pResInfo->interResultBuf, sizeof(SRateInfo));
+ }
+}
+
+
+
+static void rate_func_merge(SQLFunctionCtx *pCtx) {
+ SResultInfo *pResInfo = GET_RES_INFO(pCtx);
+ assert(pResInfo->superTableQ);
+
+ pTrace("rate_func_merge() size:%d", pCtx->size);
+
+ //SRateInfo *pRateInfo = (SRateInfo *)pResInfo->interResultBuf;
+ SRateInfo *pBuf = (SRateInfo *)pCtx->aOutputBuf;
+ char *indicator = pCtx->aInputElemBuf;
+
+ assert(1 == pCtx->size);
+
+ int32_t numOfNotNull = 0;
+ for (int32_t i = 0; i < pCtx->size; ++i, indicator += sizeof(SRateInfo)) {
+ SRateInfo *pInput = (SRateInfo *)indicator;
+ if (DATA_SET_FLAG != pInput->hasResult) {
+ continue;
+ }
+
+ numOfNotNull++;
+ memcpy(pBuf, pInput, sizeof(SRateInfo));
+ pTrace("%p rate_func_merge() isIRate:%d firstKey:%" PRId64 " lastKey:%" PRId64 " firstValue:%" PRId64 " lastValue:%" PRId64 " CorrectionValue:%" PRId64,
+ pCtx, pInput->isIRate, pInput->firstKey, pInput->lastKey, pInput->firstValue, pInput->lastValue, pInput->CorrectionValue);
+ }
+
+ SET_VAL(pCtx, numOfNotNull, 1);
+
+ if (numOfNotNull > 0) {
+ pBuf->hasResult = DATA_SET_FLAG;
+ }
+
+ return;
+}
+
+
+
+static void rate_func_copy(SQLFunctionCtx *pCtx) {
+ assert(pCtx->inputType == TSDB_DATA_TYPE_BINARY);
+
+ SResultInfo *pResInfo = GET_RES_INFO(pCtx);
+ memcpy(pResInfo->interResultBuf, pCtx->aInputElemBuf, (size_t)pCtx->inputBytes);
+ pResInfo->hasResult = ((SRateInfo*)pCtx->aInputElemBuf)->hasResult;
+
+ SRateInfo* pRateInfo = (SRateInfo*)pCtx->aInputElemBuf;
+ pTrace("%p rate_func_second_merge() firstKey:%" PRId64 " lastKey:%" PRId64 " firstValue:%" PRId64 " lastValue:%" PRId64 " CorrectionValue:%" PRId64 " hasResult:%d",
+ pCtx, pRateInfo->firstKey, pRateInfo->lastKey, pRateInfo->firstValue, pRateInfo->lastValue, pRateInfo->CorrectionValue, pRateInfo->hasResult);
+}
+
+
+
+static void rate_finalizer(SQLFunctionCtx *pCtx) {
+ SResultInfo *pResInfo = GET_RES_INFO(pCtx);
+ SRateInfo *pRateInfo = (SRateInfo *)pResInfo->interResultBuf;
+
+ pTrace("%p isIRate:%d firstKey:%" PRId64 " lastKey:%" PRId64 " firstValue:%" PRId64 " lastValue:%" PRId64 " CorrectionValue:%" PRId64 " hasResult:%d",
+ pCtx, pRateInfo->isIRate, pRateInfo->firstKey, pRateInfo->lastKey, pRateInfo->firstValue, pRateInfo->lastValue, pRateInfo->CorrectionValue, pRateInfo->hasResult);
+
+ if (pRateInfo->hasResult != DATA_SET_FLAG) {
+ setNull(pCtx->aOutputBuf, TSDB_DATA_TYPE_DOUBLE, sizeof(double));
+ return;
+ }
+
+ *(double*)pCtx->aOutputBuf = do_calc_rate(pRateInfo);
+
+ pTrace("rate_finalizer() output result:%f", *(double *)pCtx->aOutputBuf);
+
+ // cannot set the numOfIteratedElems again since it is set during previous iteration
+ pResInfo->numOfRes = 1;
+ pResInfo->hasResult = DATA_SET_FLAG;
+
+ doFinalizer(pCtx);
+}
+
+
+static void irate_function(SQLFunctionCtx *pCtx) {
+
+ assert(IS_DATA_BLOCK_LOADED(pCtx->blockStatus));
+
+ int32_t notNullElems = 0;
+ SResultInfo *pResInfo = GET_RES_INFO(pCtx);
+ SRateInfo *pRateInfo = (SRateInfo *)pResInfo->interResultBuf;
+ TSKEY *primaryKey = pCtx->ptsList;
+
+ pTrace("%p irate_function() size:%d, hasNull:%d", pCtx, pCtx->size, pCtx->hasNull);
+
+ if (pCtx->size < 1) {
+ return;
+ }
+
+ for (int32_t i = pCtx->size - 1; i >= 0; --i) {
+ char *pData = GET_INPUT_CHAR_INDEX(pCtx, i);
+ if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
+ pTrace("%p irate_function() index of null data:%d", pCtx, i);
+ continue;
+ }
+
+ notNullElems++;
+
+ int64_t v = 0;
+ switch (pCtx->inputType) {
+ case TSDB_DATA_TYPE_TINYINT:
+ v = (int64_t)GET_INT8_VAL(pData);
+ break;
+ case TSDB_DATA_TYPE_SMALLINT:
+ v = (int64_t)GET_INT16_VAL(pData);
+ break;
+ case TSDB_DATA_TYPE_INT:
+ v = (int64_t)GET_INT32_VAL(pData);
+ break;
+ case TSDB_DATA_TYPE_BIGINT:
+ v = (int64_t)GET_INT64_VAL(pData);
+ break;
+ default:
+ assert(0);
+ }
+
+ // TODO: calc once if only call this function once ????
+ if ((INT64_MIN == pRateInfo->lastKey) || (INT64_MIN == pRateInfo->lastValue)) {
+ pRateInfo->lastValue = v;
+ pRateInfo->lastKey = primaryKey[i];
+
+ pTrace("%p irate_function() lastValue:%" PRId64 " lastKey:%" PRId64, pCtx, pRateInfo->lastValue, pRateInfo->lastKey);
+ continue;
+ }
+
+ if ((INT64_MIN == pRateInfo->firstKey) || (INT64_MIN == pRateInfo->firstValue)){
+ pRateInfo->firstValue = v;
+ pRateInfo->firstKey = primaryKey[i];
+
+ pTrace("%p irate_function() firstValue:%" PRId64 " firstKey:%" PRId64, pCtx, pRateInfo->firstValue, pRateInfo->firstKey);
+ break;
+ }
+ }
+
+ SET_VAL(pCtx, notNullElems, 1);
+
+ if (notNullElems > 0) {
+ pRateInfo->hasResult = DATA_SET_FLAG;
+ pResInfo->hasResult = DATA_SET_FLAG;
+ }
+
+ // keep the data into the final output buffer for super table query since this execution may be the last one
+ if (pResInfo->superTableQ) {
+ memcpy(pCtx->aOutputBuf, pResInfo->interResultBuf, sizeof(SRateInfo));
+ }
+}
+
+static void irate_function_f(SQLFunctionCtx *pCtx, int32_t index) {
+ void *pData = GET_INPUT_CHAR_INDEX(pCtx, index);
+ if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
+ return;
+ }
+
+ // NOTE: keep the intermediate result into the interResultBuf
+ SResultInfo *pResInfo = GET_RES_INFO(pCtx);
+ SRateInfo *pRateInfo = (SRateInfo *)pResInfo->interResultBuf;
+ TSKEY *primaryKey = pCtx->ptsList;
+
+ int64_t v = 0;
+ switch (pCtx->inputType) {
+ case TSDB_DATA_TYPE_TINYINT:
+ v = (int64_t)GET_INT8_VAL(pData);
+ break;
+ case TSDB_DATA_TYPE_SMALLINT:
+ v = (int64_t)GET_INT16_VAL(pData);
+ break;
+ case TSDB_DATA_TYPE_INT:
+ v = (int64_t)GET_INT32_VAL(pData);
+ break;
+ case TSDB_DATA_TYPE_BIGINT:
+ v = (int64_t)GET_INT64_VAL(pData);
+ break;
+ default:
+ assert(0);
+ }
+
+ pRateInfo->firstKey = pRateInfo->lastKey;
+ pRateInfo->firstValue = pRateInfo->lastValue;
+
+ pRateInfo->lastValue = v;
+ pRateInfo->lastKey = primaryKey[index];
+
+ pTrace("====%p irate_function_f() index:%d lastValue:%" PRId64 " lastKey:%" PRId64 " firstValue:%" PRId64 " firstKey:%" PRId64, pCtx, index, pRateInfo->lastValue, pRateInfo->lastKey, pRateInfo->firstValue , pRateInfo->firstKey);
+
+ SET_VAL(pCtx, 1, 1);
+
+ // set has result flag
+ pRateInfo->hasResult = DATA_SET_FLAG;
+ pResInfo->hasResult = DATA_SET_FLAG;
+
+ // keep the data into the final output buffer for super table query since this execution may be the last one
+ if (pResInfo->superTableQ) {
+ memcpy(pCtx->aOutputBuf, pResInfo->interResultBuf, sizeof(SRateInfo));
+ }
+}
+
+static void do_sumrate_merge(SQLFunctionCtx *pCtx) {
+ SResultInfo *pResInfo = GET_RES_INFO(pCtx);
+ assert(pResInfo->superTableQ);
+
+ SRateInfo *pRateInfo = (SRateInfo *)pResInfo->interResultBuf;
+ char * input = GET_INPUT_CHAR(pCtx);
+
+ for (int32_t i = 0; i < pCtx->size; ++i, input += pCtx->inputBytes) {
+ SRateInfo *pInput = (SRateInfo *)input;
+
+ pTrace("%p do_sumrate_merge() hasResult:%d input num:%" PRId64 " input sum:%f total num:%" PRId64 " total sum:%f", pCtx, pInput->hasResult, pInput->num, pInput->sum, pRateInfo->num, pRateInfo->sum);
+
+ if (pInput->hasResult != DATA_SET_FLAG) {
+ continue;
+ } else if (pInput->num == 0) {
+ pRateInfo->sum += do_calc_rate(pInput);
+ pRateInfo->num++;
+ } else {
+ pRateInfo->sum += pInput->sum;
+ pRateInfo->num += pInput->num;
+ }
+ pRateInfo->hasResult = DATA_SET_FLAG;
+ }
+
+ // if the data set hasResult is not set, the result is null
+ if (DATA_SET_FLAG == pRateInfo->hasResult) {
+ pResInfo->hasResult = DATA_SET_FLAG;
+ SET_VAL(pCtx, pRateInfo->num, 1);
+ memcpy(pCtx->aOutputBuf, pResInfo->interResultBuf, sizeof(SRateInfo));
+ }
+}
+
+static void sumrate_func_merge(SQLFunctionCtx *pCtx) {
+ pTrace("%p sumrate_func_merge() process ...", pCtx);
+ do_sumrate_merge(pCtx);
+}
+
+static void sumrate_func_second_merge(SQLFunctionCtx *pCtx) {
+ pTrace("%p sumrate_func_second_merge() process ...", pCtx);
+ do_sumrate_merge(pCtx);
+}
+
+static void sumrate_finalizer(SQLFunctionCtx *pCtx) {
+ SResultInfo *pResInfo = GET_RES_INFO(pCtx);
+ SRateInfo *pRateInfo = (SRateInfo *)pResInfo->interResultBuf;
+
+ pTrace("%p sumrate_finalizer() superTableQ:%d num:%" PRId64 " sum:%f hasResult:%d", pCtx, pResInfo->superTableQ, pRateInfo->num, pRateInfo->sum, pRateInfo->hasResult);
+
+ if (pRateInfo->hasResult != DATA_SET_FLAG) {
+ setNull(pCtx->aOutputBuf, TSDB_DATA_TYPE_DOUBLE, sizeof(double));
+ return;
+ }
+
+ if (pRateInfo->num == 0) {
+ // from meter
+ *(double*)pCtx->aOutputBuf = do_calc_rate(pRateInfo);
+ } else if (pCtx->functionId == TSDB_FUNC_SUM_RATE || pCtx->functionId == TSDB_FUNC_SUM_IRATE) {
+ *(double*)pCtx->aOutputBuf = pRateInfo->sum;
+ } else {
+ *(double*)pCtx->aOutputBuf = pRateInfo->sum / pRateInfo->num;
+ }
+
+ pResInfo->numOfRes = 1;
+ pResInfo->hasResult = DATA_SET_FLAG;
+ doFinalizer(pCtx);
+}
+
+
+/////////////////////////////////////////////////////////////////////////////////////////////
+
+
/*
* function compatible list.
* tag and ts are not involved in the compatibility check
@@ -4371,23 +4849,18 @@ static void ts_comp_finalize(SQLFunctionCtx *pCtx) {
* e.g., count/sum/avg/min/max/stddev/percentile/apercentile/first/last...
*
*/
-int32_t funcCompatDefList[28] = {
- /*
- * count, sum, avg, min, max, stddev, percentile, apercentile, first, last
- */
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+int32_t funcCompatDefList[] = {
+ // count, sum, avg, min, max, stddev, percentile, apercentile, first, last
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ // last_row, top, bottom, spread, twa, leastsqr, ts, ts_dummy, tag_dummy, ts_z
+ 4, -1, -1, 1, 1, 1, 1, 1, 1, -1,
+ // tag, colprj, tagprj, arithmetic, diff, first_dist, last_dist, interp rate irate
+ 1, 1, 1, 1, -1, 1, 1, 5, 1, 1,
+ // sum_rate, sum_irate, avg_rate, avg_irate
+ 1, 1, 1, 1,
+};
- /*
- * last_row, top, bottom, spread, twa, leastsqr, ts, ts_dummy, tag_dummy, ts_z, tag
- */
- 4, -1, -1, 1, 1, 1, 1, 1, 1, -1, 1,
-
- /*
- * colprj, tagprj, arithmetic, diff, first_dist, last_dist, interp
- */
- 1, 1, 1, -1, 1, 1, 5};
-
-SQLAggFuncElem aAggs[28] = {{
+SQLAggFuncElem aAggs[] = {{
// 0, count function does not invoke the finalize function
"count",
TSDB_FUNC_COUNT,
@@ -4810,4 +5283,94 @@ SQLAggFuncElem aAggs[28] = {{
noop1,
copy_function,
no_data_info,
+ },
+ {
+ // 28
+ "rate",
+ TSDB_FUNC_RATE,
+ TSDB_FUNC_RATE,
+ TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS,
+ rate_function_setup,
+ rate_function,
+ rate_function_f,
+ no_next_step,
+ rate_finalizer,
+ rate_func_merge,
+ rate_func_copy,
+ data_req_load_info,
+ },
+ {
+ // 29
+ "irate",
+ TSDB_FUNC_IRATE,
+ TSDB_FUNC_IRATE,
+ TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS,
+ rate_function_setup,
+ irate_function,
+ irate_function_f,
+ no_next_step,
+ rate_finalizer,
+ rate_func_merge,
+ rate_func_copy,
+ data_req_load_info,
+ },
+ {
+ // 30
+ "sum_rate",
+ TSDB_FUNC_SUM_RATE,
+ TSDB_FUNC_SUM_RATE,
+ TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS,
+ rate_function_setup,
+ rate_function,
+ rate_function_f,
+ no_next_step,
+ sumrate_finalizer,
+ sumrate_func_merge,
+ sumrate_func_second_merge,
+ data_req_load_info,
+ },
+ {
+ // 31
+ "sum_irate",
+ TSDB_FUNC_SUM_IRATE,
+ TSDB_FUNC_SUM_IRATE,
+ TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS,
+ rate_function_setup,
+ irate_function,
+ irate_function_f,
+ no_next_step,
+ sumrate_finalizer,
+ sumrate_func_merge,
+ sumrate_func_second_merge,
+ data_req_load_info,
+ },
+ {
+ // 32
+ "avg_rate",
+ TSDB_FUNC_AVG_RATE,
+ TSDB_FUNC_AVG_RATE,
+ TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS,
+ rate_function_setup,
+ rate_function,
+ rate_function_f,
+ no_next_step,
+ sumrate_finalizer,
+ sumrate_func_merge,
+ sumrate_func_second_merge,
+ data_req_load_info,
+ },
+ {
+ // 33
+ "avg_irate",
+ TSDB_FUNC_AVG_IRATE,
+ TSDB_FUNC_AVG_IRATE,
+ TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS,
+ rate_function_setup,
+ irate_function,
+ irate_function_f,
+ no_next_step,
+ sumrate_finalizer,
+ sumrate_func_merge,
+ sumrate_func_second_merge,
+ data_req_load_info,
}};
diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c
index acb2aaba6b..889029e834 100644
--- a/src/client/src/tscSQLParser.c
+++ b/src/client/src/tscSQLParser.c
@@ -27,7 +27,6 @@
#include "tscUtil.h"
#include "tschemautil.h"
#include "tsclient.h"
-#include "tast.h"
#define DEFAULT_PRIMARY_TIMESTAMP_COL_NAME "_c0"
@@ -908,7 +907,7 @@ bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField) {
return false;
}
- if (pTagField->type < TSDB_DATA_TYPE_BOOL && pTagField->type > TSDB_DATA_TYPE_NCHAR) {
+ if ((pTagField->type < TSDB_DATA_TYPE_BOOL) || (pTagField->type > TSDB_DATA_TYPE_NCHAR)) {
invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
return false;
}
@@ -1572,6 +1571,12 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIdx, tSQLExprIt
}
case TK_SUM:
case TK_AVG:
+ case TK_RATE:
+ case TK_IRATE:
+ case TK_SUM_RATE:
+ case TK_SUM_IRATE:
+ case TK_AVG_RATE:
+ case TK_AVG_IRATE:
case TK_TWA:
case TK_MIN:
case TK_MAX:
@@ -1724,7 +1729,7 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIdx, tSQLExprIt
SSchema* pSchema = tsGetSchema(pMeterMetaInfo->pMeterMeta);
// functions can not be applied to tags
- if (index.columnIndex >= pMeterMetaInfo->pMeterMeta->numOfColumns) {
+ if ((index.columnIndex >= pMeterMetaInfo->pMeterMeta->numOfColumns) || (index.columnIndex < 0)) {
return invalidSqlErrMsg(pQueryInfo->msg, msg6);
}
@@ -2037,6 +2042,24 @@ int32_t changeFunctionID(int32_t optr, int16_t* functionId) {
case TK_AVG:
*functionId = TSDB_FUNC_AVG;
break;
+ case TK_RATE:
+ *functionId = TSDB_FUNC_RATE;
+ break;
+ case TK_IRATE:
+ *functionId = TSDB_FUNC_IRATE;
+ break;
+ case TK_SUM_RATE:
+ *functionId = TSDB_FUNC_SUM_RATE;
+ break;
+ case TK_SUM_IRATE:
+ *functionId = TSDB_FUNC_SUM_IRATE;
+ break;
+ case TK_AVG_RATE:
+ *functionId = TSDB_FUNC_AVG_RATE;
+ break;
+ case TK_AVG_IRATE:
+ *functionId = TSDB_FUNC_AVG_IRATE;
+ break;
case TK_MIN:
*functionId = TSDB_FUNC_MIN;
break;
@@ -2231,7 +2254,8 @@ int32_t tscTansformSQLFunctionForSTableQuery(SQueryInfo* pQueryInfo) {
SSchema* pSrcSchema = tsGetColumnSchema(pMeterMetaInfo->pMeterMeta, colIndex);
if ((functionId >= TSDB_FUNC_SUM && functionId <= TSDB_FUNC_TWA) ||
- (functionId >= TSDB_FUNC_FIRST_DST && functionId <= TSDB_FUNC_LAST_DST)) {
+ (functionId >= TSDB_FUNC_FIRST_DST && functionId <= TSDB_FUNC_LAST_DST) ||
+ (functionId >= TSDB_FUNC_RATE && functionId <= TSDB_FUNC_AVG_IRATE)) {
if (getResultDataInfo(pSrcSchema->type, pSrcSchema->bytes, functionId, pExpr->param[0].i64Key, &type, &bytes,
&intermediateBytes, 0, true) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_INVALID_SQL;
@@ -2257,7 +2281,7 @@ void tscRestoreSQLFunctionForMetricQuery(SQueryInfo* pQueryInfo) {
for (int32_t i = 0; i < pQueryInfo->exprsInfo.numOfExprs; ++i) {
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
SSchema* pSchema = tsGetColumnSchema(pMeterMetaInfo->pMeterMeta, pExpr->colInfo.colIdx);
-
+
// if (/*(pExpr->functionId >= TSDB_FUNC_FIRST_DST && pExpr->functionId <= TSDB_FUNC_LAST_DST) ||
// (pExpr->functionId >= TSDB_FUNC_SUM && pExpr->functionId <= TSDB_FUNC_MAX) ||
// pExpr->functionId == TSDB_FUNC_LAST_ROW*/) {
@@ -3086,8 +3110,8 @@ static bool isValidExpr(tSQLExpr* pLeft, tSQLExpr* pRight, int32_t optr) {
*
* However, columnA < 4+12 is valid
*/
- if ((pLeft->nSQLOptr >= TK_COUNT && pLeft->nSQLOptr <= TK_LAST_ROW) ||
- (pRight->nSQLOptr >= TK_COUNT && pRight->nSQLOptr <= TK_LAST_ROW) ||
+ if ((pLeft->nSQLOptr >= TK_COUNT && pLeft->nSQLOptr <= TK_AVG_IRATE) ||
+ (pRight->nSQLOptr >= TK_COUNT && pRight->nSQLOptr <= TK_AVG_IRATE) ||
(pLeft->nSQLOptr >= TK_BOOL && pLeft->nSQLOptr <= TK_BINARY && pRight->nSQLOptr >= TK_BOOL &&
pRight->nSQLOptr <= TK_BINARY)) {
return false;
@@ -5832,4 +5856,4 @@ static int32_t tSQLBinaryExprCreateFromSqlExpr(tSQLSyntaxNode **pExpr, tSQLExpr*
}
return TSDB_CODE_SUCCESS;
-}
\ No newline at end of file
+}
diff --git a/src/connector/grafana/tdengine/package-lock.json b/src/connector/grafana/tdengine/package-lock.json
index 9401fbcd0a..f8d2df1150 100644
--- a/src/connector/grafana/tdengine/package-lock.json
+++ b/src/connector/grafana/tdengine/package-lock.json
@@ -3992,9 +3992,9 @@
}
},
"yarn": {
- "version": "1.21.1",
- "resolved": "https://registry.npmjs.org/yarn/-/yarn-1.21.1.tgz",
- "integrity": "sha512-dQgmJv676X/NQczpbiDtc2hsE/pppGDJAzwlRiADMTvFzYbdxPj2WO4PcNyriSt2c4jsCMpt8UFRKHUozt21GQ=="
+ "version": "1.22.0",
+ "resolved": "https://registry.npmjs.org/yarn/-/yarn-1.22.0.tgz",
+ "integrity": "sha512-KMHP/Jq53jZKTY9iTUt3dIVl/be6UPs2INo96+BnZHLKxYNTfwMmlgHTaMWyGZoO74RI4AIFvnWhYrXq2USJkg=="
}
}
}
diff --git a/src/connector/grafana/tdengine/package.json b/src/connector/grafana/tdengine/package.json
index 8e542bef26..0eb7a76be6 100644
--- a/src/connector/grafana/tdengine/package.json
+++ b/src/connector/grafana/tdengine/package.json
@@ -39,7 +39,7 @@
},
"dependencies": {
"lodash": "^4.17.13",
- "yarn": "^1.21.1"
+ "yarn": "^1.22.0"
},
"homepage": "https://github.com/taosdata/TDengine/tree/develop/src/connector/grafana/tdengine"
}
diff --git a/src/connector/grafana/tdengine/yarn.lock b/src/connector/grafana/tdengine/yarn.lock
index b2b869cb1a..fe7e8122ec 100644
--- a/src/connector/grafana/tdengine/yarn.lock
+++ b/src/connector/grafana/tdengine/yarn.lock
@@ -2957,7 +2957,7 @@ yargs@~3.10.0:
decamelize "^1.0.0"
window-size "0.1.0"
-yarn@^1.21.1:
- version "1.21.1"
- resolved "https://registry.yarnpkg.com/yarn/-/yarn-1.21.1.tgz#1d5da01a9a03492dc4a5957befc1fd12da83d89c"
- integrity sha512-dQgmJv676X/NQczpbiDtc2hsE/pppGDJAzwlRiADMTvFzYbdxPj2WO4PcNyriSt2c4jsCMpt8UFRKHUozt21GQ==
+yarn@^1.22.0:
+ version "1.22.0"
+ resolved "https://registry.yarnpkg.com/yarn/-/yarn-1.22.0.tgz#acf82906e36bcccd1ccab1cfb73b87509667c881"
+ integrity sha512-KMHP/Jq53jZKTY9iTUt3dIVl/be6UPs2INo96+BnZHLKxYNTfwMmlgHTaMWyGZoO74RI4AIFvnWhYrXq2USJkg==
diff --git a/src/inc/tsqlfunction.h b/src/inc/tsqlfunction.h
index 9b53e5d84b..8783285fbb 100644
--- a/src/inc/tsqlfunction.h
+++ b/src/inc/tsqlfunction.h
@@ -60,6 +60,13 @@ extern "C" {
#define TSDB_FUNC_LAST_DST 26
#define TSDB_FUNC_INTERP 27
+#define TSDB_FUNC_RATE 28
+#define TSDB_FUNC_IRATE 29
+#define TSDB_FUNC_SUM_RATE 30
+#define TSDB_FUNC_SUM_IRATE 31
+#define TSDB_FUNC_AVG_RATE 32
+#define TSDB_FUNC_AVG_IRATE 33
+
#define TSDB_FUNCSTATE_SO 0x1U // single output
#define TSDB_FUNCSTATE_MO 0x2U // dynamic number of output, not multinumber of output e.g., TOP/BOTTOM
#define TSDB_FUNCSTATE_STREAM 0x4U // function avail for stream
@@ -288,10 +295,10 @@ typedef struct STwaInfo {
} STwaInfo;
/* global sql function array */
-extern struct SQLAggFuncElem aAggs[28];
+extern struct SQLAggFuncElem aAggs[];
/* compatible check array list */
-extern int32_t funcCompatDefList[28];
+extern int32_t funcCompatDefList[];
void getStatistics(char *priData, char *data, int32_t size, int32_t numOfRow, int32_t type, int64_t *min, int64_t *max,
int64_t *sum, int16_t *minIndex, int16_t *maxIndex, int32_t *numOfNull);
diff --git a/src/kit/shell/CMakeLists.txt b/src/kit/shell/CMakeLists.txt
index 244eafb44f..0f490c58b1 100644
--- a/src/kit/shell/CMakeLists.txt
+++ b/src/kit/shell/CMakeLists.txt
@@ -11,7 +11,13 @@ IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM))
LIST(REMOVE_ITEM SRC ./src/shellWindows.c)
LIST(REMOVE_ITEM SRC ./src/shellDarwin.c)
ADD_EXECUTABLE(shell ${SRC})
- TARGET_LINK_LIBRARIES(shell taos_static)
+
+ IF (TD_PAGMODE_LITE)
+ TARGET_LINK_LIBRARIES(shell taos)
+ ELSE ()
+ TARGET_LINK_LIBRARIES(shell taos_static)
+ ENDIF ()
+
SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME taos)
ELSEIF (TD_WINDOWS_64)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/pthread)
diff --git a/src/kit/taosdemo/CMakeLists.txt b/src/kit/taosdemo/CMakeLists.txt
index 599875fa60..d47e6a06df 100644
--- a/src/kit/taosdemo/CMakeLists.txt
+++ b/src/kit/taosdemo/CMakeLists.txt
@@ -9,5 +9,11 @@ INCLUDE_DIRECTORIES(inc)
IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM))
AUX_SOURCE_DIRECTORY(. SRC)
ADD_EXECUTABLE(taosdemo ${SRC})
- TARGET_LINK_LIBRARIES(taosdemo taos_static)
+
+ IF (TD_PAGMODE_LITE)
+ TARGET_LINK_LIBRARIES(taosdemo taos)
+ ELSE ()
+ TARGET_LINK_LIBRARIES(taosdemo taos_static)
+ ENDIF ()
+
ENDIF ()
diff --git a/src/kit/taosdump/CMakeLists.txt b/src/kit/taosdump/CMakeLists.txt
index 76b40d1c2d..5b54540782 100644
--- a/src/kit/taosdump/CMakeLists.txt
+++ b/src/kit/taosdump/CMakeLists.txt
@@ -9,5 +9,12 @@ INCLUDE_DIRECTORIES(inc)
IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM))
AUX_SOURCE_DIRECTORY(. SRC)
ADD_EXECUTABLE(taosdump ${SRC})
- TARGET_LINK_LIBRARIES(taosdump taos_static)
+
+ IF (TD_PAGMODE_LITE)
+ TARGET_LINK_LIBRARIES(taosdump taos)
+ ELSE ()
+ TARGET_LINK_LIBRARIES(taosdump taos_static)
+ ENDIF ()
+
+
ENDIF ()
diff --git a/src/modules/http/inc/httpHandle.h b/src/modules/http/inc/httpHandle.h
index 1b746e1520..b10ced9c8d 100644
--- a/src/modules/http/inc/httpHandle.h
+++ b/src/modules/http/inc/httpHandle.h
@@ -37,7 +37,7 @@
#define HTTP_STEP_SIZE 1024 //http message get process step by step
#define HTTP_MAX_URL 5 //http url stack size
#define HTTP_METHOD_SCANNER_SIZE 7 //http method fp size
-#define HTTP_GC_TARGET_SIZE 128
+#define HTTP_GC_TARGET_SIZE 512
#define HTTP_VERSION_10 0
#define HTTP_VERSION_11 1
diff --git a/src/modules/http/src/gcJson.c b/src/modules/http/src/gcJson.c
index 1a86c5d24f..8f59633714 100644
--- a/src/modules/http/src/gcJson.c
+++ b/src/modules/http/src/gcJson.c
@@ -127,42 +127,43 @@ bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result,
// for group by
if (groupFields != -1) {
char target[HTTP_GC_TARGET_SIZE];
+ int len;
+ len = snprintf(target,HTTP_GC_TARGET_SIZE,"%s{",aliasBuffer);
+ for (int i = dataFields + 1; ispi = 0;
pHeader->tcp = 0;
pHeader->encrypt = 0;
- pHeader->tranId = atomic_add_fetch_32(&pConn->tranId, 1);
- if (pHeader->tranId == 0) pHeader->tranId = atomic_add_fetch_32(&pConn->tranId, 1);
+ pHeader->tranId = atomic_add_fetch_16(&pConn->tranId, 1);
+ if (pHeader->tranId == 0) pHeader->tranId = atomic_add_fetch_16(&pConn->tranId, 1);
pHeader->sourceId = pConn->ownId;
pHeader->destId = pConn->peerId;
diff --git a/src/system/CMakeLists.txt b/src/system/CMakeLists.txt
index 5c4ab62d24..516b9e85e2 100644
--- a/src/system/CMakeLists.txt
+++ b/src/system/CMakeLists.txt
@@ -3,6 +3,6 @@ PROJECT(TDengine)
ADD_SUBDIRECTORY(detail)
-IF (TD_LITE)
+IF (TD_EDGE)
ADD_SUBDIRECTORY(lite)
ENDIF ()
\ No newline at end of file
diff --git a/src/system/detail/CMakeLists.txt b/src/system/detail/CMakeLists.txt
index 95cce3dfe6..6268b97f91 100644
--- a/src/system/detail/CMakeLists.txt
+++ b/src/system/detail/CMakeLists.txt
@@ -16,10 +16,14 @@ IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM))
ADD_EXECUTABLE(taosd ${SRC})
- TARGET_LINK_LIBRARIES(taosd taos_static trpc tutil sdb monitor pthread http)
+ IF (TD_PAGMODE_LITE)
+ TARGET_LINK_LIBRARIES(taosd taos trpc tutil sdb monitor pthread http)
+ ELSE ()
+ TARGET_LINK_LIBRARIES(taosd taos_static trpc tutil sdb monitor pthread http)
+ ENDIF ()
- IF (TD_LITE)
- TARGET_LINK_LIBRARIES(taosd taosd_lite)
+ IF (TD_EDGE)
+ TARGET_LINK_LIBRARIES(taosd taosd_edge)
ELSE ()
TARGET_LINK_LIBRARIES(taosd taosd_cluster)
ENDIF ()
diff --git a/src/system/detail/inc/vnodeQueryImpl.h b/src/system/detail/inc/vnodeQueryImpl.h
index 6df52479c0..8da88d6781 100644
--- a/src/system/detail/inc/vnodeQueryImpl.h
+++ b/src/system/detail/inc/vnodeQueryImpl.h
@@ -125,6 +125,7 @@ static FORCE_INLINE SMeterObj* getMeterObj(void* hashHandle, int32_t sid) {
bool isQueryKilled(SQuery* pQuery);
bool isFixedOutputQuery(SQuery* pQuery);
bool isPointInterpoQuery(SQuery* pQuery);
+bool isSumAvgRateQuery(SQuery *pQuery);
bool isTopBottomQuery(SQuery* pQuery);
bool isFirstLastRowQuery(SQuery* pQuery);
bool isTSCompQuery(SQuery* pQuery);
diff --git a/src/system/detail/src/vnodeQueryImpl.c b/src/system/detail/src/vnodeQueryImpl.c
index dc2664ee55..56273d8891 100644
--- a/src/system/detail/src/vnodeQueryImpl.c
+++ b/src/system/detail/src/vnodeQueryImpl.c
@@ -2663,8 +2663,9 @@ void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, int64_t startQueryTimes
// store the first&last timestamp into the intermediate buffer [1], the true
// value may be null but timestamp will never be null
pCtx->ptsList = (int64_t *)(primaryColumnData + startOffset * TSDB_KEYSIZE);
- } else if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_TWA ||
- functionId == TSDB_FUNC_DIFF) {
+ } else if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM ||
+ functionId == TSDB_FUNC_TWA || functionId == TSDB_FUNC_DIFF ||
+ (functionId >= TSDB_FUNC_RATE && functionId <= TSDB_FUNC_AVG_IRATE)) {
/*
* leastsquares function needs two columns of input, currently, the x value of linear equation is set to
* timestamp column, and the y-value is the column specified in pQuery->pSelectExpr[i].colIdxInBuffer
@@ -2953,6 +2954,22 @@ bool isPointInterpoQuery(SQuery *pQuery) {
}
// TODO REFACTOR:MERGE WITH CLIENT-SIDE FUNCTION
+bool isSumAvgRateQuery(SQuery *pQuery) {
+ for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) {
+ int32_t functionId = pQuery->pSelectExpr[i].pBase.functionId;
+ if (functionId == TSDB_FUNC_TS) {
+ continue;
+ }
+
+ if (functionId == TSDB_FUNC_SUM_RATE || functionId == TSDB_FUNC_SUM_IRATE ||
+ functionId == TSDB_FUNC_AVG_RATE || functionId == TSDB_FUNC_AVG_IRATE) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
bool isTopBottomQuery(SQuery *pQuery) {
for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) {
int32_t functionId = pQuery->pSelectExpr[i].pBase.functionId;
diff --git a/src/system/detail/src/vnodeQueryProcess.c b/src/system/detail/src/vnodeQueryProcess.c
index 213e837f0c..16c37e0580 100644
--- a/src/system/detail/src/vnodeQueryProcess.c
+++ b/src/system/detail/src/vnodeQueryProcess.c
@@ -963,7 +963,7 @@ static void vnodeMultiMeterQueryProcessor(SQInfo *pQInfo) {
return;
}
- if (pQuery->intervalTime > 0) {
+ if (pQuery->intervalTime > 0 || isSumAvgRateQuery(pQuery)) {
assert(pSupporter->subgroupIdx == 0 && pSupporter->numOfGroupResultPages == 0);
if (mergeMetersResultToOneGroups(pSupporter) == TSDB_CODE_SUCCESS) {
diff --git a/src/system/detail/src/vnodeShell.c b/src/system/detail/src/vnodeShell.c
index 4ae7ee2520..c473b4241d 100644
--- a/src/system/detail/src/vnodeShell.c
+++ b/src/system/detail/src/vnodeShell.c
@@ -485,7 +485,7 @@ void vnodeExecuteRetrieveReq(SSchedMsg *pSched) {
// write the progress information of each meter to response
// this is required by subscriptions
- if (numOfRows > 0 && code == TSDB_CODE_SUCCESS) {
+ if (pQInfo != NULL ) {
if (pQInfo->pTableQuerySupporter != NULL && pQInfo->pTableQuerySupporter->pMeterSidExtInfo != NULL) {
*((int32_t *)pMsg) = htonl(pQInfo->pTableQuerySupporter->numOfMeters);
pMsg += sizeof(int32_t);
diff --git a/src/system/lite/CMakeLists.txt b/src/system/lite/CMakeLists.txt
index a22ed60563..8c648747e7 100644
--- a/src/system/lite/CMakeLists.txt
+++ b/src/system/lite/CMakeLists.txt
@@ -13,5 +13,5 @@ IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM))
INCLUDE_DIRECTORIES(inc)
AUX_SOURCE_DIRECTORY(./src SRC)
- ADD_LIBRARY(taosd_lite ${SRC})
+ ADD_LIBRARY(taosd_edge ${SRC})
ENDIF ()
diff --git a/src/util/CMakeLists.txt b/src/util/CMakeLists.txt
index 4202092947..d8f74f46f4 100644
--- a/src/util/CMakeLists.txt
+++ b/src/util/CMakeLists.txt
@@ -34,6 +34,7 @@ ELSEIF (TD_WINDOWS_64)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/iconv)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/regex)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/inc)
+ LIST(APPEND SRC ./src/hash.c)
LIST(APPEND SRC ./src/ihash.c)
LIST(APPEND SRC ./src/lz4.c)
LIST(APPEND SRC ./src/shash.c)
@@ -54,6 +55,7 @@ ELSEIF (TD_WINDOWS_64)
LIST(APPEND SRC ./src/tmempool.c)
LIST(APPEND SRC ./src/tmodule.c)
LIST(APPEND SRC ./src/tnote.c)
+ LIST(APPEND SRC ./src/tpercentile.c)
LIST(APPEND SRC ./src/tsched.c)
LIST(APPEND SRC ./src/tskiplist.c)
LIST(APPEND SRC ./src/tsocket.c)
@@ -68,6 +70,7 @@ ELSEIF (TD_WINDOWS_64)
TARGET_LINK_LIBRARIES(tutil iconv regex pthread os winmm IPHLPAPI ws2_32)
ELSEIF(TD_DARWIN_64)
ADD_DEFINITIONS(-DUSE_LIBICONV)
+ LIST(APPEND SRC ./src/hash.c)
LIST(APPEND SRC ./src/ihash.c)
LIST(APPEND SRC ./src/lz4.c)
LIST(APPEND SRC ./src/shash.c)
@@ -88,6 +91,7 @@ ELSEIF(TD_DARWIN_64)
LIST(APPEND SRC ./src/tmempool.c)
LIST(APPEND SRC ./src/tmodule.c)
LIST(APPEND SRC ./src/tnote.c)
+ LIST(APPEND SRC ./src/tpercentile.c)
LIST(APPEND SRC ./src/tsched.c)
LIST(APPEND SRC ./src/tskiplist.c)
LIST(APPEND SRC ./src/tsocket.c)
diff --git a/src/util/src/tglobalcfg.c b/src/util/src/tglobalcfg.c
index 034d293f05..45efcad563 100644
--- a/src/util/src/tglobalcfg.c
+++ b/src/util/src/tglobalcfg.c
@@ -80,7 +80,12 @@ short tsNumOfVnodesPerCore = 8;
short tsNumOfTotalVnodes = 0;
short tsCheckHeaderFile = 0;
+#ifdef _TD_ARM_32_
+int tsSessionsPerVnode = 100;
+#else
int tsSessionsPerVnode = 1000;
+#endif
+
int tsCacheBlockSize = 16384; // 256 columns
int tsAverageCacheBlocks = TSDB_DEFAULT_AVG_BLOCKS;
/**
diff --git a/src/util/src/ttokenizer.c b/src/util/src/ttokenizer.c
index d4f3bd6879..7cbb4552b4 100644
--- a/src/util/src/ttokenizer.c
+++ b/src/util/src/ttokenizer.c
@@ -231,6 +231,7 @@ static SKeyword keywordTable[] = {
{"RATE", TK_RATE},
{"IRATE", TK_IRATE},
{"SUM_RATE", TK_SUM_RATE},
+ {"SUM_IRATE", TK_SUM_IRATE},
{"AVG_RATE", TK_AVG_RATE},
{"AVG_IRATE", TK_AVG_IRATE},
};