Merge remote-tracking branch 'origin/feature/query' into feature/query

This commit is contained in:
Haojun Liao 2020-09-17 13:56:30 +08:00
commit 8e5a5d8ee1
67 changed files with 2303 additions and 1527 deletions

35
alert/cmd/alert/install_driver.sh Normal file → Executable file
View File

@ -9,9 +9,7 @@ set -e
script_dir=$(dirname $(readlink -f "$0"))
# Dynamic directory
lib_link_dir="/usr/lib"
#install main path
install_main_dir="/usr/local/taos"
lib64_link_dir="/usr/lib64"
# Color setting
RED='\033[0;31m'
@ -25,24 +23,23 @@ if command -v sudo > /dev/null; then
csudo="sudo"
fi
function clean_driver() {
${csudo} rm -f /usr/lib/libtaos.so || :
}
function install_driver() {
echo -e "${GREEN}Start to install TDengine client driver ...${NC}"
#create install main dir and all sub dir
${csudo} mkdir -p ${install_main_dir}
${csudo} mkdir -p ${install_main_dir}/driver
${csudo} rm -f ${lib_link_dir}/libtaos.* || :
${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
echo
echo -e "\033[44;32;1mTDengine client driver is successfully installed!${NC}"
if [[ -d ${lib_link_dir} && ! -e ${lib_link_dir}/libtaos.so ]]; then
echo -e "${GREEN}Start to install TDengine client driver ...${NC}"
${csudo} ln -s ${script_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 || :
${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so || :
if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then
${csudo} ln -s ${script_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
fi
echo
echo -e "${GREEN}TDengine client driver is successfully installed!${NC}"
else
echo -e "${GREEN}TDengine client driver already exists, Please confirm whether the alert version matches the client driver version!${NC}"
fi
}
install_driver

View File

@ -119,7 +119,7 @@ WantedBy=multi-user.target
return nil
}
const version = "TDengine alert v2.0.0.1"
var version = "2.0.0.1s"
func main() {
var (
@ -133,7 +133,7 @@ func main() {
flag.Parse()
if showVersion {
fmt.Println(version)
fmt.Println("TDengine alert v" + version)
return
}

View File

@ -6,9 +6,9 @@ set -e
# set parameters by default value
cpuType=amd64 # [armv6l | arm64 | amd64 | 386]
osType=linux # [linux | darwin | windows]
version=""
declare -A archMap=(["armv6l"]="arm" ["arm64"]="arm64" ["amd64"]="x64" ["386"]="x86")
while getopts "h:c:o:" arg
while getopts "h:c:o:n:" arg
do
case $arg in
c)
@ -19,6 +19,10 @@ do
#echo "osType=$OPTARG"
osType=$(echo $OPTARG)
;;
n)
#echo "version=$OPTARG"
version=$(echo $OPTARG)
;;
h)
echo "Usage: `basename $0` -c [armv6l | arm64 | amd64 | 386] -o [linux | darwin | windows]"
exit 0
@ -30,18 +34,27 @@ do
esac
done
if [ "$version" == "" ]; then
echo "Please input the correct version!"
exit 1
fi
startdir=$(pwd)
scriptdir=$(dirname $(readlink -f $0))
cd ${scriptdir}/cmd/alert
version=$(grep 'const version =' main.go | awk '{print $NF}')
version=${version%\"}
version=${version:1}
echo "cpuType=${cpuType}"
echo "osType=${osType}"
echo "version=${version}"
GOOS=${osType} GOARCH=${cpuType} go build
GOOS=${osType} GOARCH=${cpuType} go build -ldflags '-X main.version='${version}
mkdir -p TDengine-alert/driver
cp alert alert.cfg install_driver.sh ./TDengine-alert/.
cp ../../../debug/build/lib/libtaos.so.${version} ./TDengine-alert/driver/.
chmod 777 ./TDengine-alert/install_driver.sh
tar -I 'gzip -9' -cf ${startdir}/TDengine-alert-${version}-${osType^}-${archMap[${cpuType}]}.tar.gz TDengine-alert/
rm -rf ./TDengine-alert
tar -I 'gzip -9' -cf ${startdir}/TDengine-alert-${version}-${osType^}-${archMap[${cpuType}]}.tar.gz alert alert.cfg install_driver.sh driver/

View File

@ -100,7 +100,7 @@ IF (TD_LINUX)
ENDIF ()
SET(DEBUG_FLAGS "-O0 -DDEBUG")
SET(RELEASE_FLAGS "-O0")
SET(RELEASE_FLAGS "-O0 -Wno-unused-variable -Wunused-but-set-variable")
IF (${COVER} MATCHES "true")
MESSAGE(STATUS "Test coverage mode, add extra flags")

View File

@ -11,7 +11,7 @@ TDengine能够与开源数据可视化系统[Grafana](https://www.grafana.com/)
### 配置Grafana
TDengine的Grafana插件在安装包的/usr/local/taos/connector/grafana目录下。
TDengine的Grafana插件在安装包的/usr/local/taos/connector/grafanaplugin目录下。
以CentOS 7.2操作系统为例将tdengine目录拷贝到/var/lib/grafana/plugins目录下重新启动grafana即可。

View File

@ -78,6 +78,18 @@ taos> SELECT SUM(current) FROM meters INTERVAL(1s);
2018-10-03 14:38:16.000 | 36.000000000 |
Query OK, 5 row(s) in set (0.001538s)
```
降采样操作也支持时间偏移,比如:将所有智能电表采集的电流值每秒钟求和,但要求每个时间窗口从 500 毫秒开始
```mysql
taos> SELECT SUM(current) FROM meters INTERVAL(1s, 500a);
ts | sum(current) |
======================================================
2018-10-03 14:38:04.500 | 11.189999809 |
2018-10-03 14:38:05.500 | 31.900000572 |
2018-10-03 14:38:06.500 | 11.600000000 |
2018-10-03 14:38:15.500 | 12.300000381 |
2018-10-03 14:38:16.500 | 35.000000000 |
Query OK, 5 row(s) in set (0.001521s)
```
物联网场景里,每个数据采集点采集数据的时间是难同步的,但很多分析算法(比如FFT)需要把采集的数据严格按照时间等间隔的对齐在很多系统里需要应用自己写程序来处理但使用TDengine的降采样操作就轻松解决。如果一个时间间隔里没有采集的数据TDengine还提供插值计算的功能。

View File

@ -148,7 +148,7 @@ INSERT INTO <tb1_name> USING <stb1_name> TAGS (<tag1_value1>, ...) VALUES (<fiel
SELECT function<field_name>,…
FROM <stable_name>
WHERE <tag_name> <[=|<=|>=|<>] values..> ([AND|OR] …)
INTERVAL (<time range>)
INTERVAL (<interval> [, offset])
GROUP BY <tag_name>, <tag_name>
ORDER BY <tag_name> <asc|desc>
SLIMIT <group_limit>

View File

@ -33,8 +33,7 @@ taos> DESCRIBE meters;
- 内部函数now是服务器的当前时间
- 插入记录时如果时间戳为now插入数据时使用服务器当前时间
- Epoch Time: 时间戳也可以是一个长整数表示从1970-01-01 08:00:00.000开始的毫秒数
- 时间可以加减,比如 now-2h表明查询时刻向前推2个小时(最近2小时)。数字后面的时间单位a(毫秒), s(秒), m(分), h(小时), d(天)w(周), n(月), y(年)。比如select * from t1 where ts > now-2w and ts <= now-1w, 表示查询两周前整整一周的数据
- TDengine暂不支持时间窗口按照自然年和自然月切分。Where条件中的时间窗口单位的换算关系如下interval(1y) 等效于 interval(365d), interval(1n) 等效于 interval(30d), interval(1w) 等效于 interval(7d)
- 时间可以加减,比如 now-2h表明查询时刻向前推2个小时(最近2小时)。 数字后面的时间单位可以是 a(毫秒)、s(秒)、 m(分)、h(小时)、d(天)、w(周)。 比如select * from t1 where ts > now-2w and ts <= now-1w, 表示查询两周前整整一周的数据。 在指定降频操作(down sampling)的时间窗口(interval)时,时间单位还可以使用 n(自然月) 和 y(自然年)。
TDengine缺省的时间戳是毫秒精度但通过修改配置参数enableMicrosecond就可支持微秒。
@ -299,7 +298,7 @@ TDengine缺省的时间戳是毫秒精度但通过修改配置参数enableMic
SELECT select_expr [, select_expr ...]
FROM {tb_name_list}
[WHERE where_condition]
[INTERVAL [interval_offset,] interval_val]
[INTERVAL (interval_val [, interval_offset])]
[FILL fill_val]
[SLIDING fill_val]
[GROUP BY col_list]
@ -972,17 +971,17 @@ TDengine支持按时间段进行聚合可以将表中数据按照时间段进
```mysql
SELECT function_list FROM tb_name
[WHERE where_condition]
INTERVAL (interval)
INTERVAL (interval [, offset])
[FILL ({NONE | VALUE | PREV | NULL | LINEAR})]
SELECT function_list FROM stb_name
[WHERE where_condition]
INTERVAL (interval)
INTERVAL (interval [, offset])
[FILL ({ VALUE | PREV | NULL | LINEAR})]
[GROUP BY tags]
```
- 聚合时间段的长度由关键词INTERVAL指定最短时间间隔10毫秒10a。聚合查询中能够同时执行的聚合和选择函数仅限于单个输出的函数count、avg、sum 、stddev、leastsquares、percentile、min、max、first、last不能使用具有多行输出结果的函数例如top、bottom、diff以及四则运算
- 聚合时间段的长度由关键词INTERVAL指定最短时间间隔10毫秒10a,并且支持偏移(偏移必须小于间隔)。聚合查询中能够同时执行的聚合和选择函数仅限于单个输出的函数count、avg、sum 、stddev、leastsquares、percentile、min、max、first、last不能使用具有多行输出结果的函数例如top、bottom、diff以及四则运算
- WHERE语句可以指定查询的起止时间和其他过滤条件
- FILL语句指定某一时间区间数据缺失的情况下的填充模式。填充模式包括以下几种
1. 不进行填充NONE(默认填充模式)。

View File

@ -1,7 +1,7 @@
# TDengine 2.0 错误码以及对应的十进制码
| Code | bit | error code | 错误描述 | 十进制错误码 |
| 状态码 | 模 | 错误码(十六进制) | 错误描述 | 错误码(十进制) |
|-----------------------| :---: | :---------: | :------------------------ | ---------------- |
|TSDB_CODE_RPC_ACTION_IN_PROGRESS| 0 | 0x0001| "Action in progress"| -2147483647|
|TSDB_CODE_RPC_AUTH_REQUIRED| 0 | 0x0002 | "Authentication required"| -2147483646|
@ -87,7 +87,7 @@
|TSDB_CODE_MND_INVALID_ACCT_OPTION| 0 | 0x0342 | "Invalid account options"| -2147482814|
|TSDB_CODE_MND_USER_ALREADY_EXIST| 0 | 0x0350 | "User already exists"| -2147482800|
|TSDB_CODE_MND_INVALID_USER |0 | 0x0351 | "Invalid user" |-2147482799|
|TSDB_CODE_MND_INVALID_USER_FORMAT| |0 |0x0352 |"Invalid user format" |-2147482798|
|TSDB_CODE_MND_INVALID_USER_FORMAT| 0 |0x0352 |"Invalid user format" |-2147482798|
|TSDB_CODE_MND_INVALID_PASS_FORMAT| 0| 0x0353 | "Invalid password format"| -2147482797|
|TSDB_CODE_MND_NO_USER_FROM_CONN| 0 | 0x0354 | "Can not get user from conn"| -2147482796|
|TSDB_CODE_MND_TOO_MANY_USERS| 0 | 0x0355| "Too many users"| -2147482795|
@ -107,7 +107,7 @@
|TSDB_CODE_MND_DB_NOT_SELECTED| 0 | 0x0380 | "Database not specified or available"| -2147482752|
|TSDB_CODE_MND_DB_ALREADY_EXIST| 0 | 0x0381 | "Database already exists"| -2147482751|
|TSDB_CODE_MND_INVALID_DB_OPTION| 0 | 0x0382 | "Invalid database options"| -2147482750|
|TSDB_CODE_MND_INVALID_DB| |0 | 0x0383 | "Invalid database name"| -2147482749|
|TSDB_CODE_MND_INVALID_DB| 0 | 0x0383 | "Invalid database name"| -2147482749|
|TSDB_CODE_MND_MONITOR_DB_FORBIDDEN| 0 | 0x0384 | "Cannot delete monitor database"| -2147482748|
|TSDB_CODE_MND_TOO_MANY_DATABASES| 0| 0x0385 | "Too many databases for account"| -2147482747|
|TSDB_CODE_MND_DB_IN_DROPPING| 0 | 0x0386| "Database not available" |-2147482746|

View File

@ -1,48 +1,69 @@
# TDengine 集群安装、管理
多个taosd的运行实例可以组成一个集群以保证TDengine的高可靠运行并提供水平扩展能力。要了解TDengine 2.0的集群管理需要对集群的基本概念有所了解请看TDengine 2.0整体架构一章。而且在安装集群之前,请按照[《立即开始》](https://www.taosdata.com/cn/getting-started20/)一章安装并体验单节点功能。
多个TDengine服务器也就是多个taosd的运行实例可以组成一个集群以保证TDengine的高可靠运行并提供水平扩展能力。要了解TDengine 2.0的集群管理需要对集群的基本概念有所了解请看TDengine 2.0整体架构一章。而且在安装集群之前,请按照[《立即开始》](https://www.taosdata.com/cn/getting-started20/)一章安装并体验单节点功能。
集群的每个节点是由End Point来唯一标识的End Point是由FQDN(Fully Qualified Domain Name)外加Port组成比如 h1.taosdata.com:6030。一般FQDN就是服务器的hostname可通过Linux命令`hostname -f`获取,FQDN配置参考[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。端口是这个节点对外服务的端口号缺省是6030但可以通过taos.cfg里配置参数serverPort进行修改。一个节点可能配置了多个hostname, TDengine会自动获取第一个但也可以通过taos.cfg里配置参数fqdn进行指定。如果习惯IP地址直接访问可以将参数fqdn设置为本节点的IP地址。
集群的每个数据节点是由End Point来唯一标识的End Point是由FQDN(Fully Qualified Domain Name)外加Port组成比如 h1.taosdata.com:6030。一般FQDN就是服务器的hostname可通过Linux命令`hostname -f`获取,FQDN配置参考[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。端口是这个数据节点对外服务的端口号缺省是6030但可以通过taos.cfg里配置参数serverPort进行修改。一个物理节点可能配置了多个hostname, TDengine会自动获取第一个但也可以通过taos.cfg里配置参数fqdn进行指定。如果习惯IP地址直接访问可以将参数fqdn设置为本节点的IP地址。
TDengine的集群管理极其简单除添加和删除节点需要人工干预之外其他全部是自动完成最大程度的降低了运维的工作量。本章对集群管理的操作做详细的描述。
## 准备工作
**第一步**如果搭建集群的节点中存有之前的测试数据、装过1.X的版本或者装过其他版本的TDengine请先将其删除并清空所有数据具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html )
**第零步**规划集群所有物理节点的FQDN将规划好的FQDN分别添加到每个物理节点的/etc/hostname修改每个物理节点的/etc/hosts将所有集群物理节点的IP与FQDN的对应添加好【如部署了DNS请联系网络管理员在DNS上做好相关配置】
**第一步**如果搭建集群的物理节点中存有之前的测试数据、装过1.X的版本或者装过其他版本的TDengine请先将其删除并清空所有数据具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html )
**注意1**因为FQDN的信息会写进文件如果之前没有配置或者更改FQDN且启动了TDengine。请一定在确保数据无用或者备份的前提下清理一下之前的数据rm -rf /var/lib/taos/
**注意2**客户端也需要配置确保它可以正确解析每个节点的FQDN配置不管是通过DNS服务还是 Host 文件。
**第二步**建议关闭防火墙至少保证端口6030 - 6042的TCP和UDP端口都是开放的。**强烈建议**先关闭防火墙,集群搭建完毕之后,再来配置端口;
**第二步**:建议关闭所有物理节点的防火墙至少保证端口6030 - 6042的TCP和UDP端口都是开放的。**强烈建议**先关闭防火墙,集群搭建完毕之后,再来配置端口;
**第三步**在所有节点安装TDengine且版本必须是一致的**但不要启动taosd**
**第三步**在所有节点安装TDengine且版本必须是一致的**但不要启动taosd**。安装时提示输入是否要加入一个已经存在的TDengine集群时第一个物理节点直接回车创建新集群后续物理节点则输入该集群任何一个在线的物理节点的FQDN:端口号(默认6030)
**第四步**:检查、配置所有节点的FQDN
**第四步**:检查所有数据节点,以及应用所在物理节点的网络设置
1. 每个节点上执行命令`hostname -f`查看和确认所有节点的hostname是不相同的
2. 每个节点上执行`ping host`, 其中host是其他节点的hostname, 看能否ping通其它节点; 如果不能ping通需要检查网络设置, 或/etc/hosts文件或DNS的配置。如果无法ping通是无法组成集群的
3. 每个节点的FQDN就是输出的hostname外加端口号比如h1.taosdata.com:6030
1. 每个物理节点上执行命令`hostname -f`查看和确认所有节点的hostname是不相同的(应用驱动所在节点无需做此项检查)
2. 每个物理节点上执行`ping host`, 其中host是其他物理节点的hostname, 看能否ping通其它物理节点; 如果不能ping通需要检查网络设置, 或/etc/hosts文件(Windows系统默认路径为C:\Windows\system32\drivers\etc\hosts)或DNS的配置。如果无法ping通是无法组成集群的
3. 每个数据节点的End Point就是输出的hostname外加端口号比如h1.taosdata.com:6030
**第五步**修改TDengine的配置文件所有节点的文件/etc/taos/taos.cfg都需要修改。假设准备启动的第一个节点End Point为 h1.taosdata.com:6030, 那么以下几个参数与集群相关
**第五步**修改TDengine的配置文件所有节点的文件/etc/taos/taos.cfg都需要修改。假设准备启动的第一个数据节点End Point为 h1.taosdata.com:6030, 其与集群配置相关参数如下
```
// firstEp 集群中所有节点的配置都是一致的,对其第一次访问后,就获得了整个集群的信息
// firstEp 是每个数据节点首次启动后连接的第一个数据节点
firstEp h1.taosdata.com:6030
// 配置本节点的FQDN如果本机只有一个hostname, 无需配置
// 配置本数据节点的FQDN如果本机只有一个hostname, 无需配置
fqdn h1.taosdata.com
// 配置本节点的端口号缺省是6030
// 配置本数据节点的端口号缺省是6030
serverPort 6030
// 服务端节点数为偶数的时候需要配置请参考《Arbitrator的使用》的部分
arbitrator ha.taosdata.com:6042
```
一定要修改的参数是firstEp, 其他参数可不做任何修改,除非你很清楚为什么要修改。
一定要修改的参数是firstEp和fqdn, 其他参数可不做任何修改,除非你很清楚为什么要修改。
## 启动第一个节点
**加入到集群中的数据节点dnode涉及集群相关的下表11项参数必须完全相同否则不能成功加入到集群中。**
| **#** | **配置参数名称** | **含义** |
| ----- | ------------------ | ---------------------------------------- |
| 1 | numOfMnodes | 系统中管理节点个数 |
| 2 | mnodeEqualVnodeNum | 一个mnode等同于vnode消耗的个数 |
| 3 | offlineThreshold | dnode离线阈值超过该时间将导致Dnode离线 |
| 4 | statusInterval | dnode向mnode报告状态时长 |
| 5 | arbitrator | 系统中裁决器的end point |
| 6 | timezone | 时区 |
| 7 | locale | 系统区位信息及编码格式 |
| 8 | charset | 字符集编码 |
| 9 | balance | 是否启动负载均衡 |
| 10 | maxTablesPerVnode | 每个vnode中能够创建的最大表个数 |
| 11 | maxVgroupsPerDb | 每个DB中 能够使用的最大vnode个数 |
## 启动第一个数据节点
按照[《立即开始》](https://www.taosdata.com/cn/getting-started20/)里的指示启动第一个数据节点例如h1.taosdata.com然后执行taos, 启动taos shell从shell里执行命令"show dnodes;",如下所示:
按照[《立即开始》](https://www.taosdata.com/cn/getting-started20/)里的指示启动第一个节点h1.taosdata.com然后执行taos, 启动taos shell从shell里执行命令"show dnodes;",如下所示:
```
Welcome to the TDengine shell from Linux, Client Version:2.0.0.0
Copyright (c) 2017 by TAOS Data, Inc. All rights reserved.
@ -55,74 +76,86 @@ Query OK, 1 row(s) in set (0.006385s)
taos>
```
上述命令里可以看到这个刚启动的这个节点的End Point是h1.taos.com:6030
## 启动后续节点
上述命令里可以看到这个刚启动的这个数据节点的End Point是h1.taos.com:6030就是这个新集群的firstEP。
将后续的节点添加到现有集群,具体有以下几步:
## 启动后续数据节点
1. 按照["立即开始“](https://www.taosdata.com/cn/getting-started/)一章的方法在每个节点启动taosd。
将后续的数据节点添加到现有集群,具体有以下几步:
2. 在第一个节点使用CLI程序taos, 登录进TDengine系统, 执行命令:
1. 按照["立即开始“](https://www.taosdata.com/cn/getting-started/)一章的方法在每个物理节点启动taosd
```
CREATE DNODE "h2.taos.com:6030"
```
2. 在第一个数据节点使用CLI程序taos, 登录进TDengine系统, 执行命令:
将新节点的End Point (准备工作中第四步获知的) 添加进集群的EP列表。**"fqdn:port"需要用双引号引起来**否则出错。请注意将示例的“h2.taos.com:6030" 替换为这个新节点的End Point。
```
CREATE DNODE "h2.taos.com:6030"
```
将新数据节点的End Point (准备工作中第四步获知的) 添加进集群的EP列表。**"fqdn:port"需要用双引号引起来**否则出错。请注意将示例的“h2.taos.com:6030" 替换为这个新数据节点的End Point。
3. 然后执行命令
```
SHOW DNODES
```
```
SHOW DNODES
```
查看新节点是否被成功加入。如果该被加入的节点处于离线状态,请做两个检查
- 查看该节点的taosd是否正常工作如果没有正常运行需要先检查为什么
- 查看该节点taosd日志文件taosdlog.0里前面几行日志(一般在/var/log/taos目录)看日志里输出的该节点fqdn以及端口号是否为刚添加的End Point。如果不一致需要将正确的End Point添加进去。
查看新节点是否被成功加入。如果该被加入的数据节点处于离线状态,请做两个检查
按照上述步骤可以源源不断的将新的节点加入到集群。
- 查看该数据节点的taosd是否正常工作如果没有正常运行需要先检查为什么
- 查看该数据节点taosd日志文件taosdlog.0里前面几行日志(一般在/var/log/taos目录)看日志里输出的该数据节点fqdn以及端口号是否为刚添加的End Point。如果不一致需要将正确的End Point添加进去。
按照上述步骤可以源源不断的将新的数据节点加入到集群。
**提示:**
- firstEp这个参数仅仅在该节点第一次加入集群时有作用加入集群后该节点会保存最新的mnode的End Point列表不再依赖这两个参数。
- 两个没有配置firstEp参数的dnode启动后会独立运行起来。这个时候无法将其中一个节点加入到另外一个节点形成集群。**无法将两个独立的集群合并成为新的集群**。
- 任何已经加入集群在线的数据节点都可以作为后续待加入节点的firstEP。
- firstEp这个参数仅仅在该数据节点首次加入集群时有作用加入集群后该数据节点会保存最新的mnode的End Point列表不再依赖这个参数。
- 两个没有配置firstEp参数的数据节点dnode启动后会独立运行起来。这个时候无法将其中一个数据节点加入到另外一个数据节点形成集群。**无法将两个独立的集群合并成为新的集群**。
## 节点管理
## 数据节点管理
### 添加数据节点
### 添加节点
执行CLI程序taos, 使用root账号登录进系统, 执行:
```
CREATE DNODE "fqdn:port"
```
将新节点的End Point添加进集群的EP列表。**"fqdn:port"需要用双引号引起来**否则出错。一个节点对外服务的fqdn和port可以通过配置文件taos.cfg进行配置缺省是自动获取。
### 删除节点
将新数据节点的End Point添加进集群的EP列表。**"fqdn:port"需要用双引号引起来**否则出错。一个数据节点对外服务的fqdn和port可以通过配置文件taos.cfg进行配置缺省是自动获取。【强烈不建议用自动获取方式来配置FQDN可能导致生成的数据节点的End Point不是所期望的】
### 删除数据节点
执行CLI程序taos, 使用root账号登录进TDengine系统执行
```
DROP DNODE "fqdn:port";
```
其中fqdn是被删除的节点的FQDNport是其对外服务器的端口号
### 查看节点
### 查看数据节点
执行CLI程序taos,使用root账号登录进TDengine系统执行
```
SHOW DNODES;
```
它将列出集群中所有的dnode,每个dnode的fqdn:port, 状态(ready, offline等vnode数目还未使用的vnode数目等信息。在添加或删除一个节点后可以使用该命令查看。
它将列出集群中所有的dnode,每个dnode的fqdn:port, 状态(ready, offline等vnode数目还未使用的vnode数目等信息。在添加或删除一个数据节点后可以使用该命令查看。
### 查看虚拟节点组
为充分利用多核技术并提供scalability数据需要分片处理。因此TDengine会将一个DB的数据切分成多份存放在多个vnode里。这些vnode可能分布在多个dnode里这样就实现了水平扩展。一个vnode仅仅属于一个DB但一个DB可以有多个vnode。vnode的是mnode根据当前系统资源的情况自动进行分配的无需任何人工干预。
为充分利用多核技术并提供scalability数据需要分片处理。因此TDengine会将一个DB的数据切分成多份存放在多个vnode里。这些vnode可能分布在多个数据节点dnode里这样就实现了水平扩展。一个vnode仅仅属于一个DB但一个DB可以有多个vnode。vnode的是mnode根据当前系统资源的情况自动进行分配的无需任何人工干预。
执行CLI程序taos,使用root账号登录进TDengine系统执行
```
SHOW VGROUPS;
```
## vnode的高可用性
TDengine通过多副本的机制来提供系统的高可用性包括vnode和mnode的高可用性。
vnode的副本数是与DB关联的一个集群里可以有多个DB根据运营的需求每个DB可以配置不同的副本数。创建数据库时通过参数replica 指定副本数缺省为1。如果副本数为1系统的可靠性无法保证只要数据所在的节点宕机就将无法提供服务。集群的节点数必须大于等于副本数否则创建表时将返回错误“more dnodes are needed"。比如下面的命令将创建副本数为3的数据库demo
@ -130,21 +163,25 @@ vnode的副本数是与DB关联的一个集群里可以有多个DB根据
```
CREATE DATABASE demo replica 3;
```
一个DB里的数据会被切片分到多个vnode groupvnode group里的vnode数目就是DB的副本数同一个vnode group里各vnode的数据是完全一致的。为保证高可用性vnode group里的vnode一定要分布在不同的dnode里实际部署时需要在不同的物理机上只要一个vgroup里超过半数的vnode处于工作状态这个vgroup就能正常的对外服务。
一个dnode里可能有多个DB的数据因此一个dnode离线时可能会影响到多个DB。如果一个vnode group里的一半或一半以上的vnode不工作那么该vnode group就无法对外服务无法插入或读取数据这样会影响到它所属的DB的一部分表的读写操作
一个DB里的数据会被切片分到多个vnode groupvnode group里的vnode数目就是DB的副本数同一个vnode group里各vnode的数据是完全一致的。为保证高可用性vnode group里的vnode一定要分布在不同的数据节点dnode里实际部署时需要在不同的物理机上只要一个vgroup里超过半数的vnode处于工作状态这个vgroup就能正常的对外服务
因为vnode的引入无法简单的给出结论“集群中过半dnode工作集群就应该工作”。但是对于简单的情形很好下结论。比如副本数为3只有三个dnode那如果仅有一个节点不工作整个集群还是可以正常工作的但如果有两个节点不工作那整个集群就无法正常工作了。
一个数据节点dnode里可能有多个DB的数据因此一个dnode离线时可能会影响到多个DB。如果一个vnode group里的一半或一半以上的vnode不工作那么该vnode group就无法对外服务无法插入或读取数据这样会影响到它所属的DB的一部分表的读写操作。
因为vnode的引入无法简单的给出结论“集群中过半数据节点dnode工作集群就应该工作”。但是对于简单的情形很好下结论。比如副本数为3只有三个dnode那如果仅有一个节点不工作整个集群还是可以正常工作的但如果有两个数据节点不工作那整个集群就无法正常工作了。
## Mnode的高可用性
TDengine集群是由mnode (taosd的一个模块逻辑节点) 负责管理的为保证mnode的高可用可以配置多个mnode副本副本数由系统配置参数numOfMnodes决定有效范围为1-3。为保证元数据的强一致性mnode副本之间是通过同步的方式进行数据复制的。
一个集群有多个dnode, 但一个dnode至多运行一个mnode实例。多个dnode情况下哪个dnode可以作为mnode呢这是完全由系统根据整个系统资源情况自动指定的。用户可通过CLI程序taos在TDengine的console里执行如下命令
TDengine集群是由mnode (taosd的一个模块管理节点) 负责管理的为保证mnode的高可用可以配置多个mnode副本副本数由系统配置参数numOfMnodes决定有效范围为1-3。为保证元数据的强一致性mnode副本之间是通过同步的方式进行数据复制的。
一个集群有多个数据节点dnode, 但一个dnode至多运行一个mnode实例。多个dnode情况下哪个dnode可以作为mnode呢这是完全由系统根据整个系统资源情况自动指定的。用户可通过CLI程序taos在TDengine的console里执行如下命令
```
SHOW MNODES;
```
来查看mnode列表该列表将列出mnode所处的dnode的End Point和角色(master, slave, unsynced 或offline)。
当集群中第一个节点启动时该节点一定会运行一个mnode实例否则该dnode无法正常工作因为一个系统是必须有至少一个mnode的。如果numOfMnodes配置为2启动第二个dnode时该dnode也将运行一个mnode实例。
当集群中第一个数据节点启动时,该数据节点一定会运行一个mnode实例否则该数据节点dnode无法正常工作因为一个系统是必须有至少一个mnode的。如果numOfMnodes配置为2启动第二个dnode时该dnode也将运行一个mnode实例。
为保证mnode服务的高可用性numOfMnodes必须设置为2或更大。因为mnode保存的元数据必须是强一致的如果numOfMnodes大于2复制参数quorum自动设为2也就是说至少要保证有两个副本写入数据成功才通知客户端应用写入成功。
@ -154,22 +191,25 @@ SHOW MNODES;
有三种情况,将触发负载均衡,而且都无需人工干预。
- 当一个新节点添加进集群时,系统将自动触发负载均衡,一些节点上的数据将被自动转移到新节点上,无需任何人工干预。
- 当一个节点从集群中移除时,系统将自动把该节点上的数据转移到其他节点,无需任何人工干预。
- 如果一个节点过热数据量过大系统将自动进行负载均衡将该节点的一些vnode自动挪到其他节点。
- 当一个新数据节点添加进集群时,系统将自动触发负载均衡,一些节点上的数据将被自动转移到新数据节点上,无需任何人工干预。
- 当一个数据节点从集群中移除时,系统将自动把该数据节点上的数据转移到其他数据节点,无需任何人工干预。
- 如果一个数据节点过热(数据量过大),系统将自动进行负载均衡,将该数据节点的一些vnode自动挪到其他节点。
当上述三种情况发生时,系统将启动一各个节点的负载计算,从而决定如何挪动。
当上述三种情况发生时,系统将启动一各个数据节点的负载计算,从而决定如何挪动。
##节点离线处理
如果一个节点离线TDengine集群将自动检测到。有如下两种情况
- 改节点离线超过一定时间taos.cfg里配置参数offlineThreshold控制时长),系统将自动把该节点删除,产生系统报警信息,触发负载均衡流程。如果该被删除的节点重现上线时,它将无法加入集群,需要系统管理员重新将其添加进集群才会开始工作。
**【提示】负载均衡由参数balance控制它决定是否启动自动负载均衡。**
## 数据节点离线处理
如果一个数据节点离线TDengine集群将自动检测到。有如下两种情况
- 该数据节点离线超过一定时间taos.cfg里配置参数offlineThreshold控制时长),系统将自动把该数据节点删除,产生系统报警信息,触发负载均衡流程。如果该被删除的数据节点重现上线时,它将无法加入集群,需要系统管理员重新将其添加进集群才会开始工作。
- 离线后在offlineThreshold的时长内重新上线系统将自动启动数据恢复流程等数据完全恢复后该节点将开始正常工作。
**注意:**如果一个虚拟节点组包括mnode组里每个节点都处于离线或unsynced状态必须等该虚拟节点组里的所有节点都上线、都能交换状态信息后才能选出Master该虚拟节点组才能对外提供服务。比如整个集群有3个节点副本数为3如果3个节点都宕机然后2个节点重启是无法工作的只有等3个节点都重启成功才能对外服务。
**注意:**如果一个虚拟节点组包括mnode组所归属的每个数据节点都处于离线或unsynced状态必须等该虚拟节点组里的所有数据节点都上线、都能交换状态信息后才能选出Master该虚拟节点组才能对外提供服务。比如整个集群有3个数据节点副本数为3如果3个数据节点都宕机然后2个数据节点重启是无法工作的只有等3个数据节点都重启成功,才能对外服务。
## Arbitrator的使用
如果副本数为偶数当一个vnode group里一半或超过一半的vnode不工作时是无法从中选出master的。同理一半或超过一半的mnode不工作时是无法选出mnode的master的因为存在“split brain”问题。为解决这个问题TDengine引入了arbitrator的概念。Arbitrator模拟一个vnode或mnode在工作但只简单的负责网络连接不处理任何数据插入或访问。只要包含arbitrator在内超过半数的vnode或mnode工作那么该vnode group或mnode组就可以正常的提供数据插入或查询服务。比如对于副本数为2的情形如果一个节点A离线但另外一个节点B正常而且能连接到arbitrator, 那么节点B就能正常工作。
TDengine安装包里带有一个执行程序tarbitrator, 找任何一台Linux服务器运行它即可。该程序对系统资源几乎没有要求只需要保证有网络连接即可。该应用的命令行参数`-p`可以指定其对外服务的端口号缺省是6042。配置每个taosd实例时可以在配置文件taos.cfg里将参数arbitrator设置为arbitrator的End Point。如果该参数配置了当副本数为偶数数系统将自动连接配置的arbitrator。

View File

@ -69,6 +69,7 @@ typedef struct SJoinSupporter {
SSubqueryState* pState;
SSqlObj* pObj; // parent SqlObj
int32_t subqueryIndex; // index of sub query
SInterval interval;
SLimitVal limit; // limit info
uint64_t uid; // query table uid
SArray* colList; // previous query information, no need to use this attribute, and the corresponding attribution

View File

@ -226,12 +226,8 @@ typedef struct SQueryInfo {
int16_t command; // the command may be different for each subclause, so keep it seperately.
uint32_t type; // query/insert type
// TODO refactor
char intervalTimeUnit;
char slidingTimeUnit;
STimeWindow window; // query time window
int64_t intervalTime; // aggregation time window range
int64_t slidingTime; // sliding window in mseconds
int64_t intervalOffset;// start offset of each time window
SInterval interval;
int32_t tz; // query client timezone
SSqlGroupbyExpr groupbyExpr; // group by tags info
@ -334,7 +330,7 @@ typedef struct STscObj {
struct SSqlStream *streamList;
void* pDnodeConn;
pthread_mutex_t mutex;
T_REF_DECLARE();
T_REF_DECLARE()
} STscObj;
typedef struct SSqlObj {
@ -370,8 +366,6 @@ typedef struct SSqlStream {
uint32_t streamId;
char listed;
bool isProject;
char intervalTimeUnit;
char slidingTimeUnit;
int16_t precision;
int64_t num; // number of computing count
@ -385,8 +379,7 @@ typedef struct SSqlStream {
int64_t ctime; // stream created time
int64_t stime; // stream next executed time
int64_t etime; // stream end query time, when time is larger then etime, the stream will be closed
int64_t intervalTime;
int64_t slidingTime;
SInterval interval;
void * pTimer;
void (*fp)();
@ -470,7 +463,7 @@ static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pField
int32_t type = pInfo->pSqlExpr->resType;
int32_t bytes = pInfo->pSqlExpr->resBytes;
char* pData = pRes->data + pInfo->pSqlExpr->offset * pRes->numOfRows + bytes * pRes->row;
char* pData = pRes->data + (int32_t)(pInfo->pSqlExpr->offset * pRes->numOfRows + bytes * pRes->row);
// user defined constant value output columns
if (TSDB_COL_IS_UD_COL(pInfo->pSqlExpr->colInfo.flag)) {

View File

@ -2460,11 +2460,11 @@ static void percentile_function(SQLFunctionCtx *pCtx) {
if (pInfo->stage == 0) {
if (pCtx->preAggVals.isSet) {
if (pInfo->minval > pCtx->preAggVals.statis.min) {
pInfo->minval = pCtx->preAggVals.statis.min;
pInfo->minval = (double)pCtx->preAggVals.statis.min;
}
if (pInfo->maxval < pCtx->preAggVals.statis.max) {
pInfo->maxval = pCtx->preAggVals.statis.max;
pInfo->maxval = (double)pCtx->preAggVals.statis.max;
}
pInfo->numOfElems += (pCtx->size - pCtx->preAggVals.statis.numOfNull);

View File

@ -368,13 +368,12 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
TSKEY stime = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.skey : pQueryInfo->window.ekey;
int64_t revisedSTime =
taosGetIntervalStartTimestamp(stime, pQueryInfo->slidingTime, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, tinfo.precision);
int64_t revisedSTime = taosTimeTruncate(stime, &pQueryInfo->interval, tinfo.precision);
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
SFillColInfo* pFillCol = createFillColInfo(pQueryInfo);
pReducer->pFillInfo = taosInitFillInfo(pQueryInfo->order.order, revisedSTime, pQueryInfo->groupbyExpr.numOfGroupCols,
4096, (int32_t)numOfCols, pQueryInfo->slidingTime, pQueryInfo->slidingTimeUnit,
4096, (int32_t)numOfCols, pQueryInfo->interval.sliding, pQueryInfo->interval.slidingUnit,
tinfo.precision, pQueryInfo->fillType, pFillCol);
}
}
@ -551,7 +550,7 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm
}
// primary timestamp column is involved in final result
if (pQueryInfo->intervalTime != 0 || tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
if (pQueryInfo->interval.interval != 0 || tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
numOfGroupByCols++;
}
@ -568,7 +567,7 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm
orderIdx[i] = startCols++;
}
if (pQueryInfo->intervalTime != 0) {
if (pQueryInfo->interval.interval != 0) {
// the first column is the timestamp, handles queries like "interval(10m) group by tags"
orderIdx[numOfGroupByCols - 1] = PRIMARYKEY_TIMESTAMP_COL_INDEX;
}
@ -612,12 +611,12 @@ bool isSameGroup(SSqlCmd *pCmd, SLocalReducer *pReducer, char *pPrev, tFilePage
* super table interval query
* if the order columns is the primary timestamp, all result data belongs to one group
*/
assert(pQueryInfo->intervalTime > 0);
assert(pQueryInfo->interval.interval > 0);
if (numOfCols == 1) {
return true;
}
} else { // simple group by query
assert(pQueryInfo->intervalTime == 0);
assert(pQueryInfo->interval.interval == 0);
}
// only one row exists
@ -825,8 +824,7 @@ void savePrevRecordAndSetupFillInfo(SLocalReducer *pLocalReducer, SQueryInfo *pQ
if (pFillInfo != NULL) {
int64_t stime = (pQueryInfo->window.skey < pQueryInfo->window.ekey) ? pQueryInfo->window.skey : pQueryInfo->window.ekey;
int64_t revisedSTime =
taosGetIntervalStartTimestamp(stime, pQueryInfo->slidingTime, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, tinfo.precision);
int64_t revisedSTime = taosTimeTruncate(stime, &pQueryInfo->interval, tinfo.precision);
taosResetFillInfo(pFillInfo, revisedSTime);
}
@ -839,7 +837,7 @@ void savePrevRecordAndSetupFillInfo(SLocalReducer *pLocalReducer, SQueryInfo *pQ
}
static void genFinalResWithoutFill(SSqlRes* pRes, SLocalReducer *pLocalReducer, SQueryInfo* pQueryInfo) {
assert(pQueryInfo->intervalTime == 0 || pQueryInfo->fillType == TSDB_FILL_NONE);
assert(pQueryInfo->interval.interval == 0 || pQueryInfo->fillType == TSDB_FILL_NONE);
tFilePage * pBeforeFillData = pLocalReducer->pResultBuf;
@ -1220,7 +1218,7 @@ bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCur
#endif
// no interval query, no fill operation
if (pQueryInfo->intervalTime == 0 || pQueryInfo->fillType == TSDB_FILL_NONE) {
if (pQueryInfo->interval.interval == 0 || pQueryInfo->fillType == TSDB_FILL_NONE) {
genFinalResWithoutFill(pRes, pLocalReducer, pQueryInfo);
} else {
SFillInfo* pFillInfo = pLocalReducer->pFillInfo;
@ -1258,13 +1256,10 @@ static void resetEnvForNewResultset(SSqlRes *pRes, SSqlCmd *pCmd, SLocalReducer
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
int8_t precision = tinfo.precision;
// for group result interpolation, do not return if not data is generated
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
TSKEY skey = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.skey:pQueryInfo->window.ekey;//MIN(pQueryInfo->window.skey, pQueryInfo->window.ekey);
int64_t newTime =
taosGetIntervalStartTimestamp(skey, pQueryInfo->slidingTime, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, precision);
int64_t newTime = taosTimeTruncate(skey, &pQueryInfo->interval, tinfo.precision);
taosResetFillInfo(pLocalReducer->pFillInfo, newTime);
}
}

View File

@ -142,7 +142,7 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
return tscInvalidSQLErrMsg(error, "value expected in timestamp", sToken.z);
}
if (getTimestampInUsFromStr(valueToken.z, valueToken.n, &interval) != TSDB_CODE_SUCCESS) {
if (parseAbsoluteDuration(valueToken.z, valueToken.n, &interval) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}

View File

@ -259,11 +259,11 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
pSdesc->num = htobe64(pStream->num);
pSdesc->useconds = htobe64(pStream->useconds);
pSdesc->stime = htobe64(pStream->stime - pStream->intervalTime);
pSdesc->stime = htobe64(pStream->stime - pStream->interval.interval);
pSdesc->ctime = htobe64(pStream->ctime);
pSdesc->slidingTime = htobe64(pStream->slidingTime);
pSdesc->interval = htobe64(pStream->intervalTime);
pSdesc->slidingTime = htobe64(pStream->interval.sliding);
pSdesc->interval = htobe64(pStream->interval.interval);
pHeartbeat->numOfStreams++;
pSdesc++;

View File

@ -81,6 +81,7 @@ static void setColumnOffsetValueInResultset(SQueryInfo* pQueryInfo);
static int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, tVariantList* pList, SSqlCmd* pCmd);
static int32_t parseIntervalClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql);
static int32_t parseOffsetClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql);
static int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql);
static int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExprItem* pItem);
@ -595,24 +596,28 @@ int32_t parseIntervalClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQ
// interval is not null
SStrToken* t = &pQuerySql->interval;
if (parseDuration(t->z, t->n, &pQueryInfo->intervalTime, &pQueryInfo->intervalTimeUnit) != TSDB_CODE_SUCCESS) {
if (parseNatualDuration(t->z, t->n, &pQueryInfo->interval.interval, &pQueryInfo->interval.intervalUnit) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
if (pQueryInfo->intervalTimeUnit != 'n' && pQueryInfo->intervalTimeUnit != 'y') {
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit != 'y') {
// if the unit of time window value is millisecond, change the value from microsecond
if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
pQueryInfo->intervalTime = pQueryInfo->intervalTime / 1000;
pQueryInfo->interval.interval = pQueryInfo->interval.interval / 1000;
}
// interval cannot be less than 10 milliseconds
if (pQueryInfo->intervalTime < tsMinIntervalTime) {
if (pQueryInfo->interval.interval < tsMinIntervalTime) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
}
// for top/bottom + interval query, we do not add additional timestamp column in the front
if (isTopBottomQuery(pQueryInfo)) {
if (parseOffsetClause(pCmd, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
if (parseSlidingClause(pCmd, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@ -636,7 +641,7 @@ int32_t parseIntervalClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQ
* check invalid SQL:
* select tbname, tags_fields from super_table_name interval(1s)
*/
if (tscQueryTags(pQueryInfo) && pQueryInfo->intervalTime > 0) {
if (tscQueryTags(pQueryInfo) && pQueryInfo->interval.interval > 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
@ -662,6 +667,10 @@ int32_t parseIntervalClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQ
SColumnIndex index = {tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX};
tscAddSpecialColumnForSelect(pQueryInfo, 0, TSDB_FUNC_TS, &index, &s, TSDB_COL_NORMAL);
if (parseOffsetClause(pCmd, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
if (parseSlidingClause(pCmd, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@ -669,6 +678,57 @@ int32_t parseIntervalClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQ
return TSDB_CODE_SUCCESS;
}
int32_t parseOffsetClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
const char* msg1 = "interval offset cannot be negative";
const char* msg2 = "interval offset should be shorter than interval";
const char* msg3 = "cannot use 'year' as offset when interval is 'month'";
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
SStrToken* t = &pQuerySql->offset;
if (t->n == 0) {
pQueryInfo->interval.offsetUnit = pQueryInfo->interval.intervalUnit;
pQueryInfo->interval.offset = 0;
return TSDB_CODE_SUCCESS;
}
if (parseNatualDuration(t->z, t->n, &pQueryInfo->interval.offset, &pQueryInfo->interval.offsetUnit) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
if (pQueryInfo->interval.offset < 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pQueryInfo->interval.offsetUnit != 'n' && pQueryInfo->interval.offsetUnit != 'y') {
// if the unit of time window value is millisecond, change the value from microsecond
if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
pQueryInfo->interval.offset = pQueryInfo->interval.offset / 1000;
}
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit != 'y') {
if (pQueryInfo->interval.offset >= pQueryInfo->interval.interval) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
}
} else if (pQueryInfo->interval.offsetUnit == pQueryInfo->interval.intervalUnit) {
if (pQueryInfo->interval.offset >= pQueryInfo->interval.interval) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
} else if (pQueryInfo->interval.intervalUnit == 'n' && pQueryInfo->interval.offsetUnit == 'y') {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
} else if (pQueryInfo->interval.intervalUnit == 'y' && pQueryInfo->interval.offsetUnit == 'n') {
if (pQueryInfo->interval.interval * 12 <= pQueryInfo->interval.offset) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
} else {
// TODO: offset should be shorter than interval, but how to check
// conflicts like 30days offset and 1 month interval
}
return TSDB_CODE_SUCCESS;
}
int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
const char* msg0 = "sliding value too small";
const char* msg1 = "sliding value no larger than the interval value";
@ -682,29 +742,29 @@ int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu
SStrToken* pSliding = &pQuerySql->sliding;
if (pSliding->n == 0) {
pQueryInfo->slidingTimeUnit = pQueryInfo->intervalTimeUnit;
pQueryInfo->slidingTime = pQueryInfo->intervalTime;
pQueryInfo->interval.slidingUnit = pQueryInfo->interval.intervalUnit;
pQueryInfo->interval.sliding = pQueryInfo->interval.interval;
return TSDB_CODE_SUCCESS;
}
if (pQueryInfo->intervalTimeUnit == 'n' || pQueryInfo->intervalTimeUnit == 'y') {
if (pQueryInfo->interval.intervalUnit == 'n' || pQueryInfo->interval.intervalUnit == 'y') {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
getTimestampInUsFromStr(pSliding->z, pSliding->n, &pQueryInfo->slidingTime);
parseAbsoluteDuration(pSliding->z, pSliding->n, &pQueryInfo->interval.sliding);
if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
pQueryInfo->slidingTime /= 1000;
pQueryInfo->interval.sliding /= 1000;
}
if (pQueryInfo->slidingTime < tsMinSlidingTime) {
if (pQueryInfo->interval.sliding < tsMinSlidingTime) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
}
if (pQueryInfo->slidingTime > pQueryInfo->intervalTime) {
if (pQueryInfo->interval.sliding > pQueryInfo->interval.interval) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if ((pQueryInfo->intervalTime != 0) && (pQueryInfo->intervalTime/pQueryInfo->slidingTime > INTERVAL_SLIDING_FACTOR)) {
if ((pQueryInfo->interval.interval != 0) && (pQueryInfo->interval.interval/pQueryInfo->interval.sliding > INTERVAL_SLIDING_FACTOR)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
@ -4716,9 +4776,9 @@ int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
const char* msg0 = "sample interval can not be less than 10ms.";
const char* msg1 = "functions not allowed in select clause";
if (pQueryInfo->intervalTime != 0 && pQueryInfo->intervalTime < 10 &&
pQueryInfo->intervalTimeUnit != 'n' &&
pQueryInfo->intervalTimeUnit != 'y') {
if (pQueryInfo->interval.interval != 0 && pQueryInfo->interval.interval < 10 &&
pQueryInfo->interval.intervalUnit != 'n' &&
pQueryInfo->interval.intervalUnit != 'y') {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
}
@ -5503,7 +5563,7 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo
insertResultField(pQueryInfo, (int32_t)size, &ids, bytes, (int8_t)type, name, pExpr);
} else {
// if this query is "group by" normal column, interval is not allowed
if (pQueryInfo->intervalTime > 0) {
if (pQueryInfo->interval.interval > 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
@ -5536,7 +5596,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
// only retrieve tags, group by is not supportted
if (tscQueryTags(pQueryInfo)) {
if (pQueryInfo->groupbyExpr.numOfGroupCols > 0 || pQueryInfo->intervalTime > 0) {
if (pQueryInfo->groupbyExpr.numOfGroupCols > 0 || pQueryInfo->interval.interval > 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
} else {
return TSDB_CODE_SUCCESS;
@ -5988,7 +6048,7 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) {
if (parseIntervalClause(pCmd, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
} else {
if ((pQueryInfo->intervalTime > 0) &&
if ((pQueryInfo->interval.interval > 0) &&
(validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS)) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@ -6018,7 +6078,7 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) {
* not here.
*/
if (pQuerySql->fillType != NULL) {
if (pQueryInfo->intervalTime == 0) {
if (pQueryInfo->interval.interval == 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
@ -6186,7 +6246,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
if (parseIntervalClause(pCmd, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
} else {
if ((pQueryInfo->intervalTime > 0) &&
if ((pQueryInfo->interval.interval > 0) &&
(validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS)) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@ -6237,12 +6297,11 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
* the columns may be increased due to group by operation
*/
if (pQuerySql->fillType != NULL) {
if (pQueryInfo->intervalTime == 0 && (!tscIsPointInterpQuery(pQueryInfo))) {
if (pQueryInfo->interval.interval == 0 && (!tscIsPointInterpQuery(pQueryInfo))) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
if (pQueryInfo->intervalTime > 0 && pQueryInfo->intervalTimeUnit != 'n' && pQueryInfo->intervalTimeUnit != 'y') {
// to avoid data overflow
if (pQueryInfo->interval.interval > 0 && pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit != 'y') {
bool initialWindows = TSWINDOW_IS_EQUAL(pQueryInfo->window, TSWINDOW_INITIALIZER);
if (initialWindows) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
@ -6250,7 +6309,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
int64_t timeRange = ABS(pQueryInfo->window.skey - pQueryInfo->window.ekey);
// number of result is not greater than 10,000,000
if ((timeRange == 0) || (timeRange / pQueryInfo->intervalTime) > MAX_INTERVAL_TIME_WINDOW) {
if ((timeRange == 0) || (timeRange / pQueryInfo->interval.interval) > MAX_INTERVAL_TIME_WINDOW) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
}
@ -6393,4 +6452,4 @@ bool hasNormalColumnFilter(SQueryInfo* pQueryInfo) {
}
return false;
}
}

View File

@ -647,8 +647,8 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
return TSDB_CODE_TSC_INVALID_SQL;
}
if (pQueryInfo->intervalTime < 0) {
tscError("%p illegal value of aggregation time interval in query msg: %ld", pSql, pQueryInfo->intervalTime);
if (pQueryInfo->interval.interval < 0) {
tscError("%p illegal value of aggregation time interval in query msg: %ld", pSql, pQueryInfo->interval.interval);
return TSDB_CODE_TSC_INVALID_SQL;
}
@ -675,10 +675,12 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pQueryMsg->limit = htobe64(pQueryInfo->limit.limit);
pQueryMsg->offset = htobe64(pQueryInfo->limit.offset);
pQueryMsg->numOfCols = htons((int16_t)taosArrayGetSize(pQueryInfo->colList));
pQueryMsg->intervalTime = htobe64(pQueryInfo->intervalTime);
pQueryMsg->slidingTime = htobe64(pQueryInfo->slidingTime);
pQueryMsg->intervalTimeUnit = pQueryInfo->intervalTimeUnit;
pQueryMsg->slidingTimeUnit = pQueryInfo->slidingTimeUnit;
pQueryMsg->interval.interval = htobe64(pQueryInfo->interval.interval);
pQueryMsg->interval.sliding = htobe64(pQueryInfo->interval.sliding);
pQueryMsg->interval.offset = htobe64(pQueryInfo->interval.offset);
pQueryMsg->interval.intervalUnit = pQueryInfo->interval.intervalUnit;
pQueryMsg->interval.slidingUnit = pQueryInfo->interval.slidingUnit;
pQueryMsg->interval.offsetUnit = pQueryInfo->interval.offsetUnit;
pQueryMsg->numOfGroupCols = htons(pQueryInfo->groupbyExpr.numOfGroupCols);
pQueryMsg->numOfTags = htonl(numOfTags);
pQueryMsg->tagNameRelType = htons(pQueryInfo->tagCond.relType);

View File

@ -51,7 +51,7 @@ static int64_t tscGetRetryDelayTime(SSqlStream* pStream, int64_t slidingTime, in
int64_t retryDelta = (int64_t)(tsStreamCompRetryDelay * retryRangeFactor);
retryDelta = ((rand() % retryDelta) + tsStreamCompRetryDelay) * 1000L;
if (pStream->intervalTimeUnit != 'n' && pStream->intervalTimeUnit != 'y') {
if (pStream->interval.intervalUnit != 'n' && pStream->interval.intervalUnit != 'y') {
// change to ms
if (prec == TSDB_TIME_PRECISION_MICRO) {
slidingTime = slidingTime / 1000;
@ -87,7 +87,7 @@ static void tscProcessStreamLaunchQuery(SSchedMsg *pMsg) {
// failed to get meter/metric meta, retry in 10sec.
if (code != TSDB_CODE_SUCCESS) {
int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->slidingTime, pStream->precision);
int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->interval.sliding, pStream->precision);
tscDebug("%p stream:%p,get metermeta failed, retry in %" PRId64 "ms", pStream->pSql, pStream, retryDelayTime);
tscSetRetryTimer(pStream, pSql, retryDelayTime);
@ -132,15 +132,16 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
}
if (etime > pStream->etime) {
etime = pStream->etime;
} else if (pStream->intervalTimeUnit != 'y' && pStream->intervalTimeUnit != 'n') {
etime = pStream->stime + (etime - pStream->stime) / pStream->intervalTime * pStream->intervalTime;
} else if (pStream->interval.intervalUnit != 'y' && pStream->interval.intervalUnit != 'n') {
etime = pStream->stime + (etime - pStream->stime) / pStream->interval.interval * pStream->interval.interval;
} else {
etime = taosGetIntervalStartTimestamp(etime, pStream->slidingTime, pStream->intervalTime, pStream->slidingTimeUnit, pStream->precision);
etime = taosTimeTruncate(etime, &pStream->interval, pStream->precision);
//etime = taosGetIntervalStartTimestamp(etime, pStream->interval.sliding, pStream->interval.sliding, pStream->interval.slidingUnit, pStream->precision);
}
pQueryInfo->window.ekey = etime;
if (pQueryInfo->window.skey >= pQueryInfo->window.ekey) {
int64_t timer = pStream->slidingTime;
if (pStream->intervalTimeUnit == 'y' || pStream->intervalTimeUnit == 'n') {
int64_t timer = pStream->interval.sliding;
if (pStream->interval.intervalUnit == 'y' || pStream->interval.intervalUnit == 'n') {
timer = 86400 * 1000l;
} else if (pStream->precision == TSDB_TIME_PRECISION_MICRO) {
timer /= 1000l;
@ -162,7 +163,7 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOfRows) {
SSqlStream *pStream = (SSqlStream *)param;
if (tres == NULL || numOfRows < 0) {
int64_t retryDelay = tscGetRetryDelayTime(pStream, pStream->slidingTime, pStream->precision);
int64_t retryDelay = tscGetRetryDelayTime(pStream, pStream->interval.sliding, pStream->precision);
tscError("%p stream:%p, query data failed, code:0x%08x, retry in %" PRId64 "ms", pStream->pSql, pStream, numOfRows,
retryDelay);
@ -223,7 +224,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
SSqlObj * pSql = (SSqlObj *)res;
if (pSql == NULL || numOfRows < 0) {
int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->slidingTime, pStream->precision);
int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->interval.sliding, pStream->precision);
tscError("%p stream:%p, retrieve data failed, code:0x%08x, retry in %" PRId64 "ms", pSql, pStream, numOfRows, retryDelayTime);
tscSetRetryTimer(pStream, pStream->pSql, retryDelayTime);
@ -246,11 +247,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
}
if (!pStream->isProject) {
if (pStream->intervalTimeUnit == 'y' || pStream->intervalTimeUnit == 'n') {
pStream->stime = taosAddNatualInterval(pStream->stime, pStream->slidingTime, pStream->slidingTimeUnit, pStream->precision);
} else {
pStream->stime += pStream->slidingTime;
}
pStream->stime = taosTimeAdd(pStream->stime, pStream->interval.sliding, pStream->interval.slidingUnit, pStream->precision);
}
// actually only one row is returned. this following is not necessary
taos_fetch_rows_a(res, tscProcessStreamRetrieveResult, pStream);
@ -310,7 +307,7 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer)
now + timer, timer, delay, pStream->stime, etime);
} else {
tscDebug("%p stream:%p, next start at %" PRId64 ", in %" PRId64 "ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64, pStream->pSql, pStream,
pStream->stime, timer, delay, pStream->stime - pStream->intervalTime, pStream->stime - 1);
pStream->stime, timer, delay, pStream->stime - pStream->interval.interval, pStream->stime - 1);
}
pSql->cmd.command = TSDB_SQL_SELECT;
@ -324,12 +321,12 @@ static int64_t getLaunchTimeDelay(const SSqlStream* pStream) {
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMaxStreamComputDelay * 1000L : tsMaxStreamComputDelay;
int64_t delayDelta = maxDelay;
if (pStream->intervalTimeUnit != 'n' && pStream->intervalTimeUnit != 'y') {
delayDelta = (int64_t)(pStream->slidingTime * tsStreamComputDelayRatio);
if (pStream->interval.intervalUnit != 'n' && pStream->interval.intervalUnit != 'y') {
delayDelta = (int64_t)(pStream->interval.sliding * tsStreamComputDelayRatio);
if (delayDelta > maxDelay) {
delayDelta = maxDelay;
}
int64_t remainTimeWindow = pStream->slidingTime - delayDelta;
int64_t remainTimeWindow = pStream->interval.sliding - delayDelta;
if (maxDelay > remainTimeWindow) {
maxDelay = (int64_t)(remainTimeWindow / 1.5f);
}
@ -337,8 +334,8 @@ static int64_t getLaunchTimeDelay(const SSqlStream* pStream) {
int64_t currentDelay = (rand() % maxDelay); // a random number
currentDelay += delayDelta;
if (pStream->intervalTimeUnit != 'n' && pStream->intervalTimeUnit != 'y') {
assert(currentDelay < pStream->slidingTime);
if (pStream->interval.intervalUnit != 'n' && pStream->interval.intervalUnit != 'y') {
assert(currentDelay < pStream->interval.sliding);
}
return currentDelay;
@ -353,7 +350,7 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) {
* for project query, no mater fetch data successfully or not, next launch will issue
* more than the sliding time window
*/
timer = pStream->slidingTime;
timer = pStream->interval.sliding;
if (pStream->stime > pStream->etime) {
tscDebug("%p stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql, pStream,
pStream->stime, pStream->etime);
@ -366,7 +363,8 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) {
return;
}
} else {
int64_t stime = taosGetIntervalStartTimestamp(pStream->stime - 1, pStream->intervalTime, pStream->intervalTime, pStream->intervalTimeUnit, pStream->precision);
int64_t stime = taosTimeTruncate(pStream->stime - 1, &pStream->interval, pStream->precision);
//int64_t stime = taosGetIntervalStartTimestamp(pStream->stime - 1, pStream->interval.interval, pStream->interval.interval, pStream->interval.intervalUnit, pStream->precision);
if (stime >= pStream->etime) {
tscDebug("%p stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql, pStream,
pStream->stime, pStream->etime);
@ -400,43 +398,43 @@ static void tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
if (pQueryInfo->intervalTimeUnit != 'n' && pQueryInfo->intervalTimeUnit != 'y' && pQueryInfo->intervalTime < minIntervalTime) {
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit!= 'y' && pQueryInfo->interval.interval < minIntervalTime) {
tscWarn("%p stream:%p, original sample interval:%ld too small, reset to:%" PRId64, pSql, pStream,
pQueryInfo->intervalTime, minIntervalTime);
pQueryInfo->intervalTime = minIntervalTime;
pQueryInfo->interval.interval, minIntervalTime);
pQueryInfo->interval.interval = minIntervalTime;
}
pStream->intervalTimeUnit = pQueryInfo->intervalTimeUnit;
pStream->intervalTime = pQueryInfo->intervalTime; // it shall be derived from sql string
pStream->interval.intervalUnit = pQueryInfo->interval.intervalUnit;
pStream->interval.interval = pQueryInfo->interval.interval; // it shall be derived from sql string
if (pQueryInfo->slidingTime <= 0) {
pQueryInfo->slidingTime = pQueryInfo->intervalTime;
pQueryInfo->slidingTimeUnit = pQueryInfo->intervalTimeUnit;
if (pQueryInfo->interval.sliding <= 0) {
pQueryInfo->interval.sliding = pQueryInfo->interval.interval;
pQueryInfo->interval.slidingUnit = pQueryInfo->interval.intervalUnit;
}
int64_t minSlidingTime =
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinSlidingTime * 1000L : tsMinSlidingTime;
if (pQueryInfo->intervalTimeUnit != 'n' && pQueryInfo->intervalTimeUnit != 'y' && pQueryInfo->slidingTime < minSlidingTime) {
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit!= 'y' && pQueryInfo->interval.sliding < minSlidingTime) {
tscWarn("%p stream:%p, original sliding value:%" PRId64 " too small, reset to:%" PRId64, pSql, pStream,
pQueryInfo->slidingTime, minSlidingTime);
pQueryInfo->interval.sliding, minSlidingTime);
pQueryInfo->slidingTime = minSlidingTime;
pQueryInfo->interval.sliding = minSlidingTime;
}
if (pQueryInfo->slidingTime > pQueryInfo->intervalTime) {
if (pQueryInfo->interval.sliding > pQueryInfo->interval.interval) {
tscWarn("%p stream:%p, sliding value:%" PRId64 " can not be larger than interval range, reset to:%" PRId64, pSql, pStream,
pQueryInfo->slidingTime, pQueryInfo->intervalTime);
pQueryInfo->interval.sliding, pQueryInfo->interval.interval);
pQueryInfo->slidingTime = pQueryInfo->intervalTime;
pQueryInfo->interval.sliding = pQueryInfo->interval.interval;
}
pStream->slidingTimeUnit = pQueryInfo->slidingTimeUnit;
pStream->slidingTime = pQueryInfo->slidingTime;
pStream->interval.slidingUnit = pQueryInfo->interval.slidingUnit;
pStream->interval.sliding = pQueryInfo->interval.sliding;
if (pStream->isProject) {
pQueryInfo->intervalTime = 0; // clear the interval value to avoid the force time window split by query processor
pQueryInfo->slidingTime = 0;
pQueryInfo->interval.interval = 0; // clear the interval value to avoid the force time window split by query processor
pQueryInfo->interval.sliding = 0;
}
}
@ -445,8 +443,8 @@ static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, in
if (pStream->isProject) {
// no data in table, flush all data till now to destination meter, 10sec delay
pStream->intervalTime = tsProjectExecInterval;
pStream->slidingTime = tsProjectExecInterval;
pStream->interval.interval = tsProjectExecInterval;
pStream->interval.sliding = tsProjectExecInterval;
if (stime != 0) { // first projection start from the latest event timestamp
assert(stime >= pQueryInfo->window.skey);
@ -459,12 +457,15 @@ static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, in
stime = pQueryInfo->window.skey;
if (stime == INT64_MIN) {
stime = (int64_t)taosGetTimestamp(pStream->precision);
stime = taosGetIntervalStartTimestamp(stime, pStream->intervalTime, pStream->intervalTime, pStream->intervalTimeUnit, pStream->precision);
stime = taosGetIntervalStartTimestamp(stime - 1, pStream->intervalTime, pStream->intervalTime, pStream->intervalTimeUnit, pStream->precision);
stime = taosTimeTruncate(stime, &pStream->interval, pStream->precision);
stime = taosTimeTruncate(stime - 1, &pStream->interval, pStream->precision);
//stime = taosGetIntervalStartTimestamp(stime, pStream->interval.interval, pStream->interval.interval, pStream->interval.intervalUnit, pStream->precision);
//stime = taosGetIntervalStartTimestamp(stime - 1, pStream->interval.interval, pStream->interval.interval, pStream->interval.intervalUnit, pStream->precision);
tscWarn("%p stream:%p, last timestamp:0, reset to:%" PRId64, pSql, pStream, stime);
}
} else {
int64_t newStime = taosGetIntervalStartTimestamp(stime, pStream->intervalTime, pStream->intervalTime, pStream->intervalTimeUnit, pStream->precision);
//int64_t newStime = taosGetIntervalStartTimestamp(stime, pStream->interval.interval, pStream->interval.interval, pStream->interval.intervalUnit, pStream->precision);
int64_t newStime = taosTimeTruncate(stime, &pStream->interval, pStream->precision);
if (newStime != stime) {
tscWarn("%p stream:%p, last timestamp:%" PRId64 ", reset to:%" PRId64, pSql, pStream, stime, newStime);
stime = newStime;
@ -534,7 +535,7 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) {
taosTmrReset(tscProcessStreamTimer, (int32_t)starttime, pStream, tscTmr, &pStream->pTimer);
tscDebug("%p stream:%p is opened, query on:%s, interval:%" PRId64 ", sliding:%" PRId64 ", first launched in:%" PRId64 ", sql:%s", pSql,
pStream, pTableMetaInfo->name, pStream->intervalTime, pStream->slidingTime, starttime, pSql->sqlstr);
pStream, pTableMetaInfo->name, pStream->interval.interval, pStream->interval.sliding, starttime, pSql->sqlstr);
}
TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),

View File

@ -113,7 +113,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSupporter* pSupporter1, SJ
* in case of stable query, limit/offset is not applied here. the limit/offset is applied to the
* final results which is acquired after the secondry merge of in the client.
*/
if (pLimit->offset == 0 || pQueryInfo->intervalTime > 0 || QUERY_IS_STABLE_QUERY(pQueryInfo->type)) {
if (pLimit->offset == 0 || pQueryInfo->interval.interval > 0 || QUERY_IS_STABLE_QUERY(pQueryInfo->type)) {
if (win->skey > elem1.ts) {
win->skey = elem1.ts;
}
@ -178,6 +178,7 @@ SJoinSupporter* tscCreateJoinSupporter(SSqlObj* pSql, SSubqueryState* pState, in
pSupporter->subqueryIndex = index;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
memcpy(&pSupporter->interval, &pQueryInfo->interval, sizeof(pSupporter->interval));
pSupporter->limit = pQueryInfo->limit;
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, pSql->cmd.clauseIndex, index);
@ -297,18 +298,20 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
success = false;
break;
}
tscClearSubqueryInfo(&pNew->cmd);
pSql->pSubs[i] = pNew;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0);
pQueryInfo->tsBuf = pTSBuf; // transfer the ownership of timestamp comp-z data to the new created object
// set the second stage sub query for join process
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE);
memcpy(&pQueryInfo->interval, &pSupporter->interval, sizeof(pQueryInfo->interval));
tscTagCondCopy(&pQueryInfo->tagCond, &pSupporter->tagCond);
pQueryInfo->colList = pSupporter->colList;
pQueryInfo->exprList = pSupporter->exprList;
pQueryInfo->fieldsInfo = pSupporter->fieldsInfo;
@ -1204,7 +1207,7 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
}
pNew->cmd.numOfCols = 0;
pNewQueryInfo->intervalTime = 0;
pNewQueryInfo->interval.interval = 0;
pSupporter->limit = pNewQueryInfo->limit;
pNewQueryInfo->limit.limit = -1;
@ -1950,7 +1953,7 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
pSql->numOfSubs = taosArrayGetSize(pCmd->pDataBlocks);
pSql->numOfSubs = (uint16_t)taosArrayGetSize(pCmd->pDataBlocks);
assert(pSql->numOfSubs > 0);
pRes->code = TSDB_CODE_SUCCESS;
@ -2185,7 +2188,7 @@ void **doSetResultRowData(SSqlObj *pSql, bool finalResult) {
}
// primary key column cannot be null in interval query, no need to check
if (i == 0 && pQueryInfo->intervalTime > 0) {
if (i == 0 && pQueryInfo->interval.interval > 0) {
continue;
}

View File

@ -1849,6 +1849,7 @@ static void doSetSqlExprAndResultFieldInfo(SQueryInfo* pQueryInfo, SQueryInfo* p
}
assert(matched);
(void)matched;
}
tscFieldInfoUpdateOffset(pNewQueryInfo);
@ -1899,10 +1900,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
pNewQueryInfo->command = pQueryInfo->command;
pNewQueryInfo->intervalTimeUnit = pQueryInfo->intervalTimeUnit;
pNewQueryInfo->slidingTimeUnit = pQueryInfo->slidingTimeUnit;
pNewQueryInfo->intervalTime = pQueryInfo->intervalTime;
pNewQueryInfo->slidingTime = pQueryInfo->slidingTime;
memcpy(&pNewQueryInfo->interval, &pQueryInfo->interval, sizeof(pNewQueryInfo->interval));
pNewQueryInfo->type = pQueryInfo->type;
pNewQueryInfo->window = pQueryInfo->window;
pNewQueryInfo->limit = pQueryInfo->limit;

View File

@ -35,8 +35,6 @@ bool tscValidateTableNameLength(size_t len);
SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters);
int64_t taosAddNatualInterval(int64_t key, int64_t intervalTime, char timeUnit, int16_t precision);
int32_t taosCountNatualInterval(int64_t skey, int64_t ekey, int64_t intervalTime, char timeUnit, int16_t precision);
int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision);
// int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision);
#endif // TDENGINE_NAME_H

View File

@ -957,17 +957,6 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
// http configs
cfg.option = "httpCacheSessions";
cfg.ptr = &tsHttpCacheSessions;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
cfg.minValue = 1;
cfg.maxValue = 100000;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "httpEnableRecordSql";
cfg.ptr = &tsHttpEnableRecordSql;
cfg.valType = TAOS_CFG_VTYPE_INT32;

View File

@ -99,62 +99,7 @@ SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numO
return pFilter;
}
int64_t taosAddNatualInterval(int64_t key, int64_t intervalTime, char timeUnit, int16_t precision) {
key /= 1000;
if (precision == TSDB_TIME_PRECISION_MICRO) {
key /= 1000;
}
struct tm tm;
time_t t = (time_t)key;
localtime_r(&t, &tm);
if (timeUnit == 'y') {
intervalTime *= 12;
}
int mon = (int)(tm.tm_year * 12 + tm.tm_mon + intervalTime);
tm.tm_year = mon / 12;
tm.tm_mon = mon % 12;
key = mktime(&tm) * 1000L;
if (precision == TSDB_TIME_PRECISION_MICRO) {
key *= 1000L;
}
return key;
}
int32_t taosCountNatualInterval(int64_t skey, int64_t ekey, int64_t intervalTime, char timeUnit, int16_t precision) {
skey /= 1000;
ekey /= 1000;
if (precision == TSDB_TIME_PRECISION_MICRO) {
skey /= 1000;
ekey /= 1000;
}
if (ekey < skey) {
int64_t tmp = ekey;
ekey = skey;
skey = tmp;
}
struct tm tm;
time_t t = (time_t)skey;
localtime_r(&t, &tm);
int smon = tm.tm_year * 12 + tm.tm_mon;
t = (time_t)ekey;
localtime_r(&t, &tm);
int emon = tm.tm_year * 12 + tm.tm_mon;
if (timeUnit == 'y') {
intervalTime *= 12;
}
return (emon - smon) / (int32_t)intervalTime;
}
#if 0
int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision) {
if (slidingTime == 0) {
return startTime;
@ -219,6 +164,8 @@ int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, in
return start;
}
#endif
/*
* tablePrefix.columnName
* extract table name and save it in pTable, with only column name in pToken

View File

@ -101,6 +101,7 @@ extern const int32_t TYPE_BYTES[11];
#define TSDB_TIME_PRECISION_MILLI 0
#define TSDB_TIME_PRECISION_MICRO 1
#define TSDB_TIME_PRECISION_NANO 2
#define TSDB_TICK_PER_SECOND(precision) ((precision)==TSDB_TIME_PRECISION_MILLI ? 1e3L : ((precision)==TSDB_TIME_PRECISION_MICRO ? 1e6L : 1e9L))
#define TSDB_TIME_PRECISION_MILLI_STR "ms"
#define TSDB_TIME_PRECISION_MICRO_STR "us"

View File

@ -460,11 +460,7 @@ typedef struct {
int16_t order;
int16_t orderColId;
int16_t numOfCols; // the number of columns will be load from vnode
int64_t intervalTime; // time interval for aggregation, in million second
int64_t intervalOffset; // start offset for interval query
int64_t slidingTime; // value for sliding window
char intervalTimeUnit;
char slidingTimeUnit; // time interval type, for revisement of interval(1d)
SInterval interval;
uint16_t tagCondLen; // tag length in current query
int16_t numOfGroupCols; // num of group by columns
int16_t orderByIdx;

View File

@ -765,7 +765,9 @@ void read_history() {
FILE *f = fopen(f_history, "r");
if (f == NULL) {
#ifndef WINDOWS
fprintf(stderr, "Failed to open file %s\n", f_history);
if (errno != ENOENT) {
fprintf(stderr, "Failed to open file %s, reason:%s\n", f_history, strerror(errno));
}
#endif
return;
}
@ -792,7 +794,7 @@ void write_history() {
FILE *f = fopen(f_history, "w");
if (f == NULL) {
#ifndef WINDOWS
fprintf(stderr, "Failed to open file %s for write\n", f_history);
fprintf(stderr, "Failed to open file %s for write, reason:%s\n", f_history, strerror(errno));
#endif
return;
}

View File

@ -30,8 +30,6 @@ extern "C" {
#define MILLISECOND_PER_HOUR (MILLISECOND_PER_MINUTE * 60)
#define MILLISECOND_PER_DAY (MILLISECOND_PER_HOUR * 24)
#define MILLISECOND_PER_WEEK (MILLISECOND_PER_DAY * 7)
#define MILLISECOND_PER_MONTH (MILLISECOND_PER_DAY * 30)
#define MILLISECOND_PER_YEAR (MILLISECOND_PER_DAY * 365)
//@return timestamp in second
int32_t taosGetTimestampSec();
@ -63,8 +61,22 @@ static FORCE_INLINE int64_t taosGetTimestamp(int32_t precision) {
}
}
int32_t getTimestampInUsFromStr(char* token, int32_t tokenlen, int64_t* ts);
int32_t parseDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit);
typedef struct SInterval {
char intervalUnit;
char slidingUnit;
char offsetUnit;
int64_t interval;
int64_t sliding;
int64_t offset;
} SInterval;
int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision);
int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precision);
int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision);
int32_t parseAbsoluteDuration(char* token, int32_t tokenlen, int64_t* ts);
int32_t parseNatualDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit);
int32_t taosParseTime(char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t dayligth);
void deltaToUtcInitOnce();

View File

@ -321,7 +321,7 @@ int32_t parseLocaltimeWithDst(char* timestr, int64_t* time, int32_t timePrec) {
}
static int32_t getTimestampInUsFromStrImpl(int64_t val, char unit, int64_t* result) {
static int32_t getDurationInUs(int64_t val, char unit, int64_t* result) {
*result = val;
int64_t factor = 1000L;
@ -342,19 +342,12 @@ static int32_t getTimestampInUsFromStrImpl(int64_t val, char unit, int64_t* resu
case 'w':
(*result) *= MILLISECOND_PER_WEEK*factor;
break;
case 'n':
(*result) *= MILLISECOND_PER_MONTH*factor;
break;
case 'y':
(*result) *= MILLISECOND_PER_YEAR*factor;
break;
case 'a':
(*result) *= factor;
break;
case 'u':
break;
default: {
;
return -1;
}
}
@ -373,7 +366,7 @@ static int32_t getTimestampInUsFromStrImpl(int64_t val, char unit, int64_t* resu
* n - Months (30 days)
* y - Years (365 days)
*/
int32_t getTimestampInUsFromStr(char* token, int32_t tokenlen, int64_t* ts) {
int32_t parseAbsoluteDuration(char* token, int32_t tokenlen, int64_t* duration) {
errno = 0;
char* endPtr = NULL;
@ -383,10 +376,16 @@ int32_t getTimestampInUsFromStr(char* token, int32_t tokenlen, int64_t* ts) {
return -1;
}
return getTimestampInUsFromStrImpl(timestamp, token[tokenlen - 1], ts);
/* natual month/year are not allowed in absolute duration */
char unit = token[tokenlen - 1];
if (unit == 'n' || unit == 'y') {
return -1;
}
return getDurationInUs(timestamp, unit, duration);
}
int32_t parseDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit) {
int32_t parseNatualDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit) {
errno = 0;
/* get the basic numeric value */
@ -400,7 +399,121 @@ int32_t parseDuration(const char* token, int32_t tokenLen, int64_t* duration, ch
return 0;
}
return getTimestampInUsFromStrImpl(*duration, *unit, duration);
return getDurationInUs(*duration, *unit, duration);
}
int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision) {
if (duration == 0) {
return t;
}
if (unit == 'y') {
duration *= 12;
} else if (unit != 'n') {
return t + duration;
}
struct tm tm;
time_t tt = (time_t)(t / TSDB_TICK_PER_SECOND(precision));
localtime_r(&tt, &tm);
int mon = tm.tm_year * 12 + tm.tm_mon + (int)duration;
tm.tm_year = mon / 12;
tm.tm_mon = mon % 12;
return (int64_t)(mktime(&tm) * TSDB_TICK_PER_SECOND(precision));
}
int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision) {
if (ekey < skey) {
int64_t tmp = ekey;
ekey = skey;
skey = tmp;
}
if (unit != 'n' && unit != 'y') {
return (int32_t)((ekey - skey) / interval);
}
skey /= (int64_t)(TSDB_TICK_PER_SECOND(precision));
ekey /= (int64_t)(TSDB_TICK_PER_SECOND(precision));
struct tm tm;
time_t t = (time_t)skey;
localtime_r(&t, &tm);
int smon = tm.tm_year * 12 + tm.tm_mon;
t = (time_t)ekey;
localtime_r(&t, &tm);
int emon = tm.tm_year * 12 + tm.tm_mon;
if (unit == 'y') {
interval *= 12;
}
return (emon - smon) / (int32_t)interval;
}
int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precision) {
if (pInterval->sliding == 0) {
assert(pInterval->interval == 0);
return t;
}
int64_t start = t;
if (pInterval->slidingUnit == 'n' || pInterval->slidingUnit == 'y') {
start /= (int64_t)(TSDB_TICK_PER_SECOND(precision));
struct tm tm;
time_t tt = (time_t)start;
localtime_r(&tt, &tm);
tm.tm_sec = 0;
tm.tm_min = 0;
tm.tm_hour = 0;
tm.tm_mday = 1;
if (pInterval->slidingUnit == 'y') {
tm.tm_mon = 0;
tm.tm_year = (int)(tm.tm_year / pInterval->sliding * pInterval->sliding);
} else {
int mon = tm.tm_year * 12 + tm.tm_mon;
mon = (int)(mon / pInterval->sliding * pInterval->sliding);
tm.tm_year = mon / 12;
tm.tm_mon = mon % 12;
}
start = (int64_t)(mktime(&tm) * TSDB_TICK_PER_SECOND(precision));
} else {
int64_t delta = t - pInterval->interval;
int32_t factor = delta > 0 ? 1 : -1;
start = (delta / pInterval->sliding + factor) * pInterval->sliding;
if (pInterval->intervalUnit == 'd' || pInterval->intervalUnit == 'w') {
/*
* here we revised the start time of day according to the local time zone,
* but in case of DST, the start time of one day need to be dynamically decided.
*/
// todo refactor to extract function that is available for Linux/Windows/Mac platform
#if defined(WINDOWS) && _MSC_VER >= 1900
// see https://docs.microsoft.com/en-us/cpp/c-runtime-library/daylight-dstbias-timezone-and-tzname?view=vs-2019
int64_t timezone = _timezone;
int32_t daylight = _daylight;
char** tzname = _tzname;
#endif
start += (int64_t)(timezone * TSDB_TICK_PER_SECOND(precision));
}
int64_t end = start + pInterval->interval - 1;
if (end < t) {
start += pInterval->sliding;
}
}
if (pInterval->offset > 0) {
start = taosTimeAdd(start, pInterval->offset, pInterval->offsetUnit, precision);
if (start > t) {
start = taosTimeAdd(start, -pInterval->interval, pInterval->intervalUnit, precision);
}
}
return start;
}
// internal function, when program is paused in debugger,
@ -411,24 +524,38 @@ int32_t parseDuration(const char* token, int32_t tokenLen, int64_t* duration, ch
// 2020-07-03 17:48:42
// and the parameter can also be a variable.
const char* fmtts(int64_t ts) {
static char buf[32];
static char buf[96];
size_t pos = 0;
struct tm tm;
time_t tt;
if (ts > -62135625943 && ts < 32503651200) {
tt = ts;
} else if (ts > -62135625943000 && ts < 32503651200000) {
tt = ts / 1000;
} else {
tt = ts / 1000000;
time_t t = (time_t)ts;
localtime_r(&t, &tm);
pos += strftime(buf + pos, sizeof(buf), "s=%Y-%m-%d %H:%M:%S", &tm);
}
struct tm* ptm = localtime(&tt);
size_t pos = strftime(buf, sizeof(buf), "%Y-%m-%d %H:%M:%S", ptm);
if (ts > -62135625943000 && ts < 32503651200000) {
time_t t = (time_t)(ts / 1000);
localtime_r(&t, &tm);
if (pos > 0) {
buf[pos++] = ' ';
buf[pos++] = '|';
buf[pos++] = ' ';
}
pos += strftime(buf + pos, sizeof(buf), "ms=%Y-%m-%d %H:%M:%S", &tm);
pos += sprintf(buf + pos, ".%03d", (int)(ts % 1000));
}
if (ts <= -62135625943000 || ts >= 32503651200000) {
sprintf(buf + pos, ".%06d", (int)(ts % 1000000));
} else if (ts <= -62135625943 || ts >= 32503651200) {
sprintf(buf + pos, ".%03d", (int)(ts % 1000));
{
time_t t = (time_t)(ts / 1000000);
localtime_r(&t, &tm);
if (pos > 0) {
buf[pos++] = ' ';
buf[pos++] = '|';
buf[pos++] = ' ';
}
pos += strftime(buf + pos, sizeof(buf), "us=%Y-%m-%d %H:%M:%S", &tm);
pos += sprintf(buf + pos, ".%06d", (int)(ts % 1000000));
}
return buf;

View File

@ -131,8 +131,6 @@ HttpContext *httpCreateContext(int32_t fd) {
HttpContext *httpGetContext(void *ptr) {
uint64_t handleVal = (uint64_t)ptr;
HttpContext **ppContext = taosCacheAcquireByKey(tsHttpServer.contextCache, &handleVal, sizeof(HttpContext *));
ASSERT(ppContext);
ASSERT(*ppContext);
if (ppContext) {
HttpContext *pContext = *ppContext;

View File

@ -87,15 +87,12 @@ bool restBuildSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result,
JsonBuf *jsonBuf = httpMallocJsonBuf(pContext);
if (jsonBuf == NULL) return false;
cmd->numOfRows += numOfRows;
int32_t num_fields = taos_num_fields(result);
TAOS_FIELD *fields = taos_fetch_fields(result);
for (int32_t k = 0; k < numOfRows; ++k) {
TAOS_ROW row = taos_fetch_row(result);
if (row == NULL) {
cmd->numOfRows--;
continue;
}
int32_t* length = taos_fetch_lengths(result);
@ -151,24 +148,23 @@ bool restBuildSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result,
}
// data row array end
httpJsonToken(jsonBuf, JsonArrEnd);
}
httpJsonToken(jsonBuf, JsonArrEnd);
cmd->numOfRows ++;
if (cmd->numOfRows >= tsRestRowLimit) {
httpDebug("context:%p, fd:%d, user:%s, retrieve rows:%d larger than limit:%d, abort retrieve", pContext,
pContext->fd, pContext->user, cmd->numOfRows, tsRestRowLimit);
return false;
} else {
if (pContext->fd <= 0) {
httpError("context:%p, fd:%d, user:%s, connection is closed, abort retrieve", pContext, pContext->fd,
pContext->user);
httpError("context:%p, fd:%d, user:%s, conn closed, abort retrieve", pContext, pContext->fd, pContext->user);
return false;
}
if (cmd->numOfRows >= tsRestRowLimit) {
httpDebug("context:%p, fd:%d, user:%s, retrieve rows:%d larger than limit:%d, abort retrieve", pContext,
pContext->fd, pContext->user, cmd->numOfRows, tsRestRowLimit);
return false;
} else {
httpDebug("context:%p, fd:%d, user:%s, total rows:%d retrieved", pContext, pContext->fd, pContext->user,
cmd->numOfRows);
return true;
}
}
httpDebug("context:%p, fd:%d, user:%s, retrieved row:%d", pContext, pContext->fd, pContext->user, cmd->numOfRows);
return true;
}
bool restBuildSqlTimestampJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, int32_t numOfRows) {

View File

@ -315,45 +315,48 @@ static bool httpReadData(HttpContext *pContext) {
pContext->accessTimes++;
pContext->lastAccessTime = taosGetTimestampSec();
char buf[HTTP_STEP_SIZE + 1] = {0};
char buf[HTTP_STEP_SIZE + 1] = {0};
int32_t nread = (int32_t)taosReadSocket(pContext->fd, buf, HTTP_STEP_SIZE);
if (nread > 0) {
buf[nread] = '\0';
httpTraceL("context:%p, fd:%d, nread:%d content:%s", pContext, pContext->fd, nread, buf);
int32_t ok = httpParseBuf(pParser, buf, nread);
while (1) {
int32_t nread = (int32_t)taosReadSocket(pContext->fd, buf, HTTP_STEP_SIZE);
if (nread > 0) {
buf[nread] = '\0';
httpTraceL("context:%p, fd:%d, nread:%d content:%s", pContext, pContext->fd, nread, buf);
int32_t ok = httpParseBuf(pParser, buf, nread);
if (ok) {
httpError("context:%p, fd:%d, parse failed, ret:%d code:%d close connect", pContext, pContext->fd, ok, pParser->parseCode);
httpSendErrorResp(pContext, pParser->parseCode);
httpNotifyContextClose(pContext);
return false;
}
if (ok) {
httpError("context:%p, fd:%d, parse failed, ret:%d code:%d close connect", pContext, pContext->fd, ok,
pParser->parseCode);
httpSendErrorResp(pContext, pParser->parseCode);
httpNotifyContextClose(pContext);
return false;
}
if (pParser->parseCode) {
httpError("context:%p, fd:%d, parse failed, code:%d close connect", pContext, pContext->fd, pParser->parseCode);
httpSendErrorResp(pContext, pParser->parseCode);
httpNotifyContextClose(pContext);
return false;
}
if (pParser->parseCode) {
httpError("context:%p, fd:%d, parse failed, code:%d close connect", pContext, pContext->fd, pParser->parseCode);
httpSendErrorResp(pContext, pParser->parseCode);
httpNotifyContextClose(pContext);
return false;
}
if (!pParser->parsed) {
httpTrace("context:%p, fd:%d, read not over yet, len:%d", pContext, pContext->fd, pParser->body.pos);
return false;
if (!pParser->parsed) {
httpTrace("context:%p, fd:%d, read not finished", pContext, pContext->fd);
continue;
} else {
httpDebug("context:%p, fd:%d, bodyLen:%d", pContext, pContext->fd, pParser->body.pos);
return true;
}
} else if (nread < 0) {
if (errno == EINTR || errno == EAGAIN || errno == EWOULDBLOCK) {
httpDebug("context:%p, fd:%d, read from socket error:%d, wait another event", pContext, pContext->fd, errno);
return false; // later again
} else {
httpError("context:%p, fd:%d, read from socket error:%d, close connect", pContext, pContext->fd, errno);
return false;
}
} else {
httpDebug("context:%p, fd:%d, totalLen:%d", pContext, pContext->fd, pParser->body.pos);
return true;
}
} else if (nread < 0) {
if (errno == EINTR || errno == EAGAIN || errno == EWOULDBLOCK) {
httpDebug("context:%p, fd:%d, read from socket error:%d, wait another event", pContext, pContext->fd, errno);
return false; // later again
} else {
httpError("context:%p, fd:%d, read from socket error:%d, close connect", pContext, pContext->fd, errno);
httpError("context:%p, fd:%d, nread:%d, wait another event", pContext, pContext->fd, nread);
return false;
}
} else {
httpError("context:%p, fd:%d, nread:%d, wait another event", pContext, pContext->fd, nread);
return false;
}
}

View File

@ -132,12 +132,9 @@ typedef struct SQueryCostInfo {
typedef struct SQuery {
int16_t numOfCols;
int16_t numOfTags;
char intervalTimeUnit;
char slidingTimeUnit; // interval data type, used for daytime revise
SOrderVal order;
STimeWindow window;
int64_t intervalTime;
int64_t slidingTime; // sliding time for sliding window query
SInterval interval;
int16_t precision;
int16_t numOfOutput;
int16_t fillType;

View File

@ -51,12 +51,11 @@ typedef struct SFillInfo {
int32_t rowSize; // size of each row
// char ** pTags; // tags value for current interpolation
SFillTagColInfo* pTags; // tags value for filling gap
int64_t slidingTime; // sliding value to determine the number of result for a given time window
SInterval interval;
char * prevValues; // previous row of data, to generate the interpolation results
char * nextValues; // next row of data
char** pData; // original result data block involved in filling data
int32_t capacityInRows; // data buffer size in rows
int8_t slidingUnit; // sliding time unit
int8_t precision; // time resoluation
SFillColInfo* pFillCol; // column info for fill operations
} SFillInfo;

View File

@ -65,6 +65,11 @@ typedef struct tVariantList {
tVariantListItem *a; /* One entry for each expression */
} tVariantList;
typedef struct SIntervalVal {
SStrToken interval;
SStrToken offset;
} SIntervalVal;
typedef struct SQuerySQL {
struct tSQLExprList *pSelection; // select clause
tVariantList * from; // from clause
@ -72,6 +77,7 @@ typedef struct SQuerySQL {
tVariantList * pGroupby; // groupby clause, only for tags[optional]
tVariantList * pSortOrder; // orderby [optional]
SStrToken interval; // interval [optional]
SStrToken offset; // offset window [optional]
SStrToken sliding; // sliding window [optional]
SLimitVal limit; // limit offset [optional]
SLimitVal slimit; // group limit offset [optional]
@ -259,7 +265,7 @@ tSQLExprList *tSQLExprListAppend(tSQLExprList *pList, tSQLExpr *pNode, SStrToken
void tSQLExprListDestroy(tSQLExprList *pList);
SQuerySQL *tSetQuerySQLElems(SStrToken *pSelectToken, tSQLExprList *pSelection, tVariantList *pFrom, tSQLExpr *pWhere,
tVariantList *pGroupby, tVariantList *pSortOrder, SStrToken *pInterval,
tVariantList *pGroupby, tVariantList *pSortOrder, SIntervalVal *pInterval,
SStrToken *pSliding, tVariantList *pFill, SLimitVal *pLimit, SLimitVal *pGLimit);
SCreateTableSQL *tSetCreateSQLElems(tFieldList *pCols, tFieldList *pTags, SStrToken *pMetricName,

View File

@ -458,9 +458,10 @@ tablelist(A) ::= tablelist(Y) COMMA ids(X) cpxName(Z) ids(F). {
%type tmvar {SStrToken}
tmvar(A) ::= VARIABLE(X). {A = X;}
%type interval_opt {SStrToken}
interval_opt(N) ::= INTERVAL LP tmvar(E) RP. {N = E; }
interval_opt(N) ::= . {N.n = 0; N.z = NULL; N.type = 0; }
%type interval_opt {SIntervalVal}
interval_opt(N) ::= INTERVAL LP tmvar(E) RP. {N.interval = E; N.offset.n = 0; N.offset.z = NULL; N.offset.type = 0;}
interval_opt(N) ::= INTERVAL LP tmvar(E) COMMA tmvar(O) RP. {N.interval = E; N.offset = O;}
interval_opt(N) ::= . {memset(&N, 0, sizeof(N));}
%type fill_opt {tVariantList*}
%destructor fill_opt {tVariantListDestroy($$);}

View File

@ -131,21 +131,21 @@ static UNUSED_FUNC void* u_realloc(void* p, size_t __size) {
static void setQueryStatus(SQuery *pQuery, int8_t status);
static void finalizeQueryResult(SQueryRuntimeEnv *pRuntimeEnv);
#define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->intervalTime > 0)
#define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->interval.interval > 0)
static void getNextTimeWindow(SQuery* pQuery, STimeWindow* tw) {
int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order);
if (pQuery->intervalTimeUnit != 'n' && pQuery->intervalTimeUnit != 'y') {
tw->skey += pQuery->slidingTime * factor;
tw->ekey = tw->skey + pQuery->intervalTime - 1;
if (pQuery->interval.intervalUnit != 'n' && pQuery->interval.intervalUnit != 'y') {
tw->skey += pQuery->interval.sliding * factor;
tw->ekey = tw->skey + pQuery->interval.interval - 1;
return;
}
int64_t key = tw->skey / 1000, interval = pQuery->intervalTime;
int64_t key = tw->skey / 1000, interval = pQuery->interval.interval;
if (pQuery->precision == TSDB_TIME_PRECISION_MICRO) {
key /= 1000;
}
if (pQuery->intervalTimeUnit == 'y') {
if (pQuery->interval.intervalUnit == 'y') {
interval *= 12;
}
@ -510,10 +510,10 @@ static STimeWindow getActiveTimeWindow(SWindowResInfo *pWindowResInfo, int64_t t
if (pWindowResInfo->curIndex == -1) { // the first window, from the previous stored value
w.skey = pWindowResInfo->prevSKey;
if (pQuery->intervalTimeUnit == 'n' || pQuery->intervalTimeUnit == 'y') {
w.ekey = taosAddNatualInterval(w.skey, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision) - 1;
if (pQuery->interval.intervalUnit == 'n' || pQuery->interval.intervalUnit == 'y') {
w.ekey = taosTimeAdd(w.skey, pQuery->interval.interval, pQuery->interval.intervalUnit, pQuery->precision);
} else {
w.ekey = w.skey + pQuery->intervalTime - 1;
w.ekey = w.skey + pQuery->interval.interval - 1;
}
} else {
int32_t slot = curTimeWindowIndex(pWindowResInfo);
@ -522,23 +522,23 @@ static STimeWindow getActiveTimeWindow(SWindowResInfo *pWindowResInfo, int64_t t
}
if (w.skey > ts || w.ekey < ts) {
if (pQuery->intervalTimeUnit == 'n' || pQuery->intervalTimeUnit == 'y') {
w.skey = taosGetIntervalStartTimestamp(ts, pQuery->slidingTime, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision);
w.ekey = taosAddNatualInterval(w.skey, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision) - 1;
if (pQuery->interval.intervalUnit == 'n' || pQuery->interval.intervalUnit == 'y') {
w.skey = taosTimeTruncate(ts, &pQuery->interval, pQuery->precision);
w.ekey = taosTimeAdd(w.skey, pQuery->interval.interval, pQuery->interval.intervalUnit, pQuery->precision) - 1;
} else {
int64_t st = w.skey;
if (st > ts) {
st -= ((st - ts + pQuery->slidingTime - 1) / pQuery->slidingTime) * pQuery->slidingTime;
st -= ((st - ts + pQuery->interval.sliding - 1) / pQuery->interval.sliding) * pQuery->interval.sliding;
}
int64_t et = st + pQuery->intervalTime - 1;
int64_t et = st + pQuery->interval.interval - 1;
if (et < ts) {
st += ((ts - et + pQuery->slidingTime - 1) / pQuery->slidingTime) * pQuery->slidingTime;
st += ((ts - et + pQuery->interval.sliding - 1) / pQuery->interval.sliding) * pQuery->interval.sliding;
}
w.skey = st;
w.ekey = w.skey + pQuery->intervalTime - 1;
w.ekey = w.skey + pQuery->interval.interval - 1;
}
}
@ -550,8 +550,6 @@ static STimeWindow getActiveTimeWindow(SWindowResInfo *pWindowResInfo, int64_t t
w.ekey = pQuery->window.ekey;
}
assert(ts >= w.skey && ts <= w.ekey);
return w;
}
@ -856,7 +854,7 @@ static int32_t getNextQualifiedWindow(SQueryRuntimeEnv *pRuntimeEnv, STimeWindow
int32_t startPos = 0;
// tumbling time window query, a special case of sliding time window query
if (pQuery->slidingTime == pQuery->intervalTime && prevPosition != -1) {
if (pQuery->interval.sliding == pQuery->interval.interval && prevPosition != -1) {
int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order);
startPos = prevPosition + factor;
} else {
@ -869,21 +867,21 @@ static int32_t getNextQualifiedWindow(SQueryRuntimeEnv *pRuntimeEnv, STimeWindow
*/
if (QUERY_IS_ASC_QUERY(pQuery) && primaryKeys[startPos] > pNext->ekey) {
TSKEY next = primaryKeys[startPos];
if (pQuery->intervalTimeUnit == 'n' || pQuery->intervalTimeUnit == 'y') {
pNext->skey = taosGetIntervalStartTimestamp(next, pQuery->slidingTime, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision);
pNext->ekey = taosAddNatualInterval(pNext->skey, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision) - 1;
if (pQuery->interval.intervalUnit == 'n' || pQuery->interval.intervalUnit == 'y') {
pNext->skey = taosTimeTruncate(next, &pQuery->interval, pQuery->precision);
pNext->ekey = taosTimeAdd(pNext->skey, pQuery->interval.interval, pQuery->interval.intervalUnit, pQuery->precision) - 1;
} else {
pNext->ekey += ((next - pNext->ekey + pQuery->slidingTime - 1)/pQuery->slidingTime) * pQuery->slidingTime;
pNext->skey = pNext->ekey - pQuery->intervalTime + 1;
pNext->ekey += ((next - pNext->ekey + pQuery->interval.sliding - 1)/pQuery->interval.sliding) * pQuery->interval.sliding;
pNext->skey = pNext->ekey - pQuery->interval.interval + 1;
}
} else if ((!QUERY_IS_ASC_QUERY(pQuery)) && primaryKeys[startPos] < pNext->skey) {
TSKEY next = primaryKeys[startPos];
if (pQuery->intervalTimeUnit == 'n' || pQuery->intervalTimeUnit == 'y') {
pNext->skey = taosGetIntervalStartTimestamp(next, pQuery->slidingTime, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision);
pNext->ekey = taosAddNatualInterval(pNext->skey, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision) - 1;
if (pQuery->interval.intervalUnit == 'n' || pQuery->interval.intervalUnit == 'y') {
pNext->skey = taosTimeTruncate(next, &pQuery->interval, pQuery->precision);
pNext->ekey = taosTimeAdd(pNext->skey, pQuery->interval.interval, pQuery->interval.intervalUnit, pQuery->precision) - 1;
} else {
pNext->skey -= ((pNext->skey - next + pQuery->slidingTime - 1) / pQuery->slidingTime) * pQuery->slidingTime;
pNext->ekey = pNext->skey + pQuery->intervalTime - 1;
pNext->skey -= ((pNext->skey - next + pQuery->interval.sliding - 1) / pQuery->interval.sliding) * pQuery->interval.sliding;
pNext->ekey = pNext->skey + pQuery->interval.interval - 1;
}
}
@ -1879,20 +1877,20 @@ static bool onlyQueryTags(SQuery* pQuery) {
/////////////////////////////////////////////////////////////////////////////////////////////
void getAlignQueryTimeWindow(SQuery *pQuery, int64_t key, int64_t keyFirst, int64_t keyLast, STimeWindow *win) {
assert(key >= keyFirst && key <= keyLast && pQuery->slidingTime <= pQuery->intervalTime);
win->skey = taosGetIntervalStartTimestamp(key, pQuery->slidingTime, pQuery->intervalTime, pQuery->slidingTimeUnit, pQuery->precision);
assert(key >= keyFirst && key <= keyLast && pQuery->interval.sliding <= pQuery->interval.interval);
win->skey = taosTimeTruncate(key, &pQuery->interval, pQuery->precision);
/*
* if the realSkey > INT64_MAX - pQuery->intervalTime, the query duration between
* if the realSkey > INT64_MAX - pQuery->interval.interval, the query duration between
* realSkey and realEkey must be less than one interval.Therefore, no need to adjust the query ranges.
*/
if (keyFirst > (INT64_MAX - pQuery->intervalTime)) {
assert(keyLast - keyFirst < pQuery->intervalTime);
if (keyFirst > (INT64_MAX - pQuery->interval.interval)) {
assert(keyLast - keyFirst < pQuery->interval.interval);
win->ekey = INT64_MAX;
} else if (pQuery->intervalTimeUnit == 'n' || pQuery->intervalTimeUnit == 'y') {
win->ekey = taosAddNatualInterval(win->skey, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision) - 1;
} else if (pQuery->interval.intervalUnit == 'n' || pQuery->interval.intervalUnit == 'y') {
win->ekey = taosTimeAdd(win->skey, pQuery->interval.interval, pQuery->interval.intervalUnit, pQuery->precision) - 1;
} else {
win->ekey = win->skey + pQuery->intervalTime - 1;
win->ekey = win->skey + pQuery->interval.interval - 1;
}
}
@ -2006,7 +2004,7 @@ static void changeExecuteScanOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bo
return;
}
if (isPointInterpoQuery(pQuery) && pQuery->intervalTime == 0) {
if (isPointInterpoQuery(pQuery) && pQuery->interval.interval == 0) {
if (!QUERY_IS_ASC_QUERY(pQuery)) {
qDebug(msg, GET_QINFO_ADDR(pQuery), "interp", pQuery->order.order, TSDB_ORDER_ASC, pQuery->window.skey,
pQuery->window.ekey, pQuery->window.ekey, pQuery->window.skey);
@ -2017,7 +2015,7 @@ static void changeExecuteScanOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bo
return;
}
if (pQuery->intervalTime == 0) {
if (pQuery->interval.interval == 0) {
if (onlyFirstQuery(pQuery)) {
if (!QUERY_IS_ASC_QUERY(pQuery)) {
qDebug(msg, GET_QINFO_ADDR(pQuery), "only-first", pQuery->order.order, TSDB_ORDER_ASC, pQuery->window.skey,
@ -4277,8 +4275,8 @@ static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv, TSKEY* start) {
}
/*
* 1. for interval without interpolation query we forward pQuery->intervalTime at a time for
* pQuery->limit.offset times. Since hole exists, pQuery->intervalTime*pQuery->limit.offset value is
* 1. for interval without interpolation query we forward pQuery->interval.interval at a time for
* pQuery->limit.offset times. Since hole exists, pQuery->interval.interval*pQuery->limit.offset value is
* not valid. otherwise, we only forward pQuery->limit.offset number of points
*/
assert(pRuntimeEnv->windowResInfo.prevSKey == TSKEY_INITIAL_VAL);
@ -4577,7 +4575,7 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo
getAlignQueryTimeWindow(pQuery, pQuery->window.skey, sk, ek, &w);
pRuntimeEnv->pFillInfo = taosInitFillInfo(pQuery->order.order, w.skey, 0, (int32_t)pQuery->rec.capacity, pQuery->numOfOutput,
pQuery->slidingTime, pQuery->slidingTimeUnit, (int8_t)pQuery->precision,
pQuery->interval.sliding, pQuery->interval.slidingUnit, (int8_t)pQuery->precision,
pQuery->fillType, pColInfo);
}
@ -5438,7 +5436,7 @@ static void stableQueryImpl(SQInfo *pQInfo) {
(isFixedOutputQuery(pRuntimeEnv) && (!isPointInterpoQuery(pQuery)) && (!pRuntimeEnv->groupbyNormalCol))) {
multiTableQueryProcess(pQInfo);
} else {
assert((pQuery->checkBuffer == 1 && pQuery->intervalTime == 0) || isPointInterpoQuery(pQuery) ||
assert((pQuery->checkBuffer == 1 && pQuery->interval.interval == 0) || isPointInterpoQuery(pQuery) ||
isFirstLastRowQuery(pQuery) || pRuntimeEnv->groupbyNormalCol);
sequentialTableProcess(pQInfo);
@ -5476,6 +5474,7 @@ static int32_t getColumnIndexInSource(SQueryTableMsg *pQueryMsg, SSqlFuncMsg *pE
}
}
assert(0);
return -1;
}
bool validateExprColumnInfo(SQueryTableMsg *pQueryMsg, SSqlFuncMsg *pExprMsg, SColumnInfo* pTagCols) {
@ -5484,8 +5483,8 @@ bool validateExprColumnInfo(SQueryTableMsg *pQueryMsg, SSqlFuncMsg *pExprMsg, SC
}
static bool validateQueryMsg(SQueryTableMsg *pQueryMsg) {
if (pQueryMsg->intervalTime < 0) {
qError("qmsg:%p illegal value of interval time %" PRId64, pQueryMsg, pQueryMsg->intervalTime);
if (pQueryMsg->interval.interval < 0) {
qError("qmsg:%p illegal value of interval time %" PRId64, pQueryMsg, pQueryMsg->interval.interval);
return false;
}
@ -5564,8 +5563,12 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList,
pQueryMsg->window.skey = htobe64(pQueryMsg->window.skey);
pQueryMsg->window.ekey = htobe64(pQueryMsg->window.ekey);
pQueryMsg->intervalTime = htobe64(pQueryMsg->intervalTime);
pQueryMsg->slidingTime = htobe64(pQueryMsg->slidingTime);
pQueryMsg->interval.interval = htobe64(pQueryMsg->interval.interval);
pQueryMsg->interval.sliding = htobe64(pQueryMsg->interval.sliding);
pQueryMsg->interval.offset = htobe64(pQueryMsg->interval.offset);
pQueryMsg->interval.intervalUnit = pQueryMsg->interval.intervalUnit;
pQueryMsg->interval.slidingUnit = pQueryMsg->interval.slidingUnit;
pQueryMsg->interval.offsetUnit = pQueryMsg->interval.offsetUnit;
pQueryMsg->limit = htobe64(pQueryMsg->limit);
pQueryMsg->offset = htobe64(pQueryMsg->offset);
@ -5778,7 +5781,7 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList,
qDebug("qmsg:%p query %d tables, type:%d, qrange:%" PRId64 "-%" PRId64 ", numOfGroupbyTagCols:%d, order:%d, "
"outputCols:%d, numOfCols:%d, interval:%" PRId64 ", fillType:%d, comptsLen:%d, compNumOfBlocks:%d, limit:%" PRId64 ", offset:%" PRId64,
pQueryMsg, pQueryMsg->numOfTables, pQueryMsg->queryType, pQueryMsg->window.skey, pQueryMsg->window.ekey, pQueryMsg->numOfGroupCols,
pQueryMsg->order, pQueryMsg->numOfOutput, pQueryMsg->numOfCols, pQueryMsg->intervalTime,
pQueryMsg->order, pQueryMsg->numOfOutput, pQueryMsg->numOfCols, pQueryMsg->interval.interval,
pQueryMsg->fillType, pQueryMsg->tsLen, pQueryMsg->tsNumOfBlocks, pQueryMsg->limit, pQueryMsg->offset);
return TSDB_CODE_SUCCESS;
@ -6117,10 +6120,7 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGrou
pQuery->order.orderColId = pQueryMsg->orderColId;
pQuery->pSelectExpr = pExprs;
pQuery->pGroupbyExpr = pGroupbyExpr;
pQuery->intervalTime = pQueryMsg->intervalTime;
pQuery->slidingTime = pQueryMsg->slidingTime;
pQuery->intervalTimeUnit = pQueryMsg->intervalTimeUnit;
pQuery->slidingTimeUnit = pQueryMsg->slidingTimeUnit;
memcpy(&pQuery->interval, &pQueryMsg->interval, sizeof(pQuery->interval));
pQuery->fillType = pQueryMsg->fillType;
pQuery->numOfTags = pQueryMsg->numOfTags;
pQuery->tagColList = pTagCols;

View File

@ -38,8 +38,8 @@ SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_
pFillInfo->numOfTags = numOfTags;
pFillInfo->numOfCols = numOfCols;
pFillInfo->precision = precision;
pFillInfo->slidingTime = slidingTime;
pFillInfo->slidingUnit = slidingUnit;
pFillInfo->interval.sliding = slidingTime;
pFillInfo->interval.slidingUnit = slidingUnit;
pFillInfo->pData = malloc(POINTER_BYTES * numOfCols);
if (numOfTags > 0) {
@ -108,21 +108,15 @@ void* taosDestoryFillInfo(SFillInfo* pFillInfo) {
return NULL;
}
static TSKEY taosGetRevisedEndKey(TSKEY ekey, int32_t order, int64_t timeInterval, int8_t slidingTimeUnit, int8_t precision) {
if (order == TSDB_ORDER_ASC) {
return ekey;
} else {
return taosGetIntervalStartTimestamp(ekey, timeInterval, timeInterval, slidingTimeUnit, precision);
}
}
void taosFillSetStartInfo(SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey) {
if (pFillInfo->fillType == TSDB_FILL_NONE) {
return;
}
pFillInfo->endKey = taosGetRevisedEndKey(endKey, pFillInfo->order, pFillInfo->slidingTime, pFillInfo->slidingUnit,
pFillInfo->precision);
pFillInfo->endKey = endKey;
if (pFillInfo->order != TSDB_ORDER_ASC) {
pFillInfo->endKey = taosTimeTruncate(endKey, &pFillInfo->interval, pFillInfo->precision);
}
pFillInfo->rowIdx = 0;
pFillInfo->numOfRows = numOfRows;
@ -172,30 +166,34 @@ int64_t getFilledNumOfRes(SFillInfo* pFillInfo, TSKEY ekey, int32_t maxNumOfRows
int32_t numOfRows = taosNumOfRemainRows(pFillInfo);
TSKEY ekey1 = taosGetRevisedEndKey(ekey, pFillInfo->order, pFillInfo->slidingTime, pFillInfo->slidingUnit,
pFillInfo->precision);
TSKEY ekey1 = ekey;
if (pFillInfo->order != TSDB_ORDER_ASC) {
pFillInfo->endKey = taosTimeTruncate(ekey, &pFillInfo->interval, pFillInfo->precision);
}
int64_t numOfRes = -1;
if (numOfRows > 0) { // still fill gap within current data block, not generating data after the result set.
TSKEY lastKey = tsList[pFillInfo->numOfRows - 1];
if (pFillInfo->slidingUnit != 'y' && pFillInfo->slidingUnit != 'n') {
numOfRes = (int64_t)(ABS(lastKey - pFillInfo->start) / pFillInfo->slidingTime) + 1;
} else {
numOfRes = taosCountNatualInterval(lastKey, pFillInfo->start, pFillInfo->slidingTime, pFillInfo->slidingUnit, pFillInfo->precision) + 1;
}
numOfRes = taosTimeCountInterval(
lastKey,
pFillInfo->start,
pFillInfo->interval.sliding,
pFillInfo->interval.slidingUnit,
pFillInfo->precision);
numOfRes += 1;
assert(numOfRes >= numOfRows);
} else { // reach the end of data
if ((ekey1 < pFillInfo->start && FILL_IS_ASC_FILL(pFillInfo)) ||
(ekey1 > pFillInfo->start && !FILL_IS_ASC_FILL(pFillInfo))) {
return 0;
}
// the numOfRes rows are all filled with specified policy
if (pFillInfo->slidingUnit != 'y' && pFillInfo->slidingUnit != 'n') {
numOfRes = (ABS(ekey1 - pFillInfo->start) / pFillInfo->slidingTime) + 1;
} else {
numOfRes = taosCountNatualInterval(ekey1, pFillInfo->start, pFillInfo->slidingTime, pFillInfo->slidingUnit, pFillInfo->precision) + 1;
}
numOfRes = taosTimeCountInterval(
ekey1,
pFillInfo->start,
pFillInfo->interval.sliding,
pFillInfo->interval.slidingUnit,
pFillInfo->precision);
numOfRes += 1;
}
return (numOfRes > maxNumOfRows) ? maxNumOfRows : numOfRes;
@ -374,12 +372,7 @@ static void doFillResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t* nu
setTagsValue(pFillInfo, data, *num);
}
// TODO natual sliding time
if (pFillInfo->slidingUnit != 'n' && pFillInfo->slidingUnit != 'y') {
pFillInfo->start += (pFillInfo->slidingTime * step);
} else {
pFillInfo->start = taosAddNatualInterval(pFillInfo->start, pFillInfo->slidingTime*step, pFillInfo->slidingUnit, pFillInfo->precision);
}
pFillInfo->start = taosTimeAdd(pFillInfo->start, pFillInfo->interval.sliding * step, pFillInfo->interval.slidingUnit, pFillInfo->precision);
pFillInfo->numOfCurrent++;
(*num) += 1;
@ -486,12 +479,7 @@ int32_t generateDataBlockImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t nu
// set the tag value for final result
setTagsValue(pFillInfo, data, num);
// TODO natual sliding time
if (pFillInfo->slidingUnit != 'n' && pFillInfo->slidingUnit != 'y') {
pFillInfo->start += (pFillInfo->slidingTime * step);
} else {
pFillInfo->start = taosAddNatualInterval(pFillInfo->start, pFillInfo->slidingTime*step, pFillInfo->slidingUnit, pFillInfo->precision);
}
pFillInfo->start = taosTimeAdd(pFillInfo->start, pFillInfo->interval.sliding*step, pFillInfo->interval.slidingUnit, pFillInfo->precision);
pFillInfo->rowIdx += 1;
pFillInfo->numOfCurrent +=1;

View File

@ -135,7 +135,7 @@ tSQLExpr *tSQLExprIdValueCreate(SStrToken *pToken, int32_t optrType) {
pSQLExpr->val.nType = TSDB_DATA_TYPE_BIGINT;
pSQLExpr->nSQLOptr = TK_TIMESTAMP; // TK_TIMESTAMP used to denote the time value is in microsecond
} else if (optrType == TK_VARIABLE) {
int32_t ret = getTimestampInUsFromStr(pToken->z, pToken->n, &pSQLExpr->val.i64Key);
int32_t ret = parseAbsoluteDuration(pToken->z, pToken->n, &pSQLExpr->val.i64Key);
UNUSED(ret);
pSQLExpr->val.nType = TSDB_DATA_TYPE_BIGINT;
@ -443,44 +443,6 @@ void setDBName(SStrToken *pCpxName, SStrToken *pDB) {
pCpxName->n = pDB->n;
}
int32_t getTimestampInUsFromStrImpl(int64_t val, char unit, int64_t *result) {
*result = val;
switch (unit) {
case 's':
(*result) *= MILLISECOND_PER_SECOND;
break;
case 'm':
(*result) *= MILLISECOND_PER_MINUTE;
break;
case 'h':
(*result) *= MILLISECOND_PER_HOUR;
break;
case 'd':
(*result) *= MILLISECOND_PER_DAY;
break;
case 'w':
(*result) *= MILLISECOND_PER_WEEK;
break;
case 'n':
(*result) *= MILLISECOND_PER_MONTH;
break;
case 'y':
(*result) *= MILLISECOND_PER_YEAR;
break;
case 'a':
break;
default: {
;
return -1;
}
}
/* get the value in microsecond */
(*result) *= 1000L;
return 0;
}
void tSQLSetColumnInfo(TAOS_FIELD *pField, SStrToken *pName, TAOS_FIELD *pType) {
int32_t maxLen = sizeof(pField->name) / sizeof(pField->name[0]);
@ -535,7 +497,7 @@ void tSQLSetColumnType(TAOS_FIELD *pField, SStrToken *type) {
* extract the select info out of sql string
*/
SQuerySQL *tSetQuerySQLElems(SStrToken *pSelectToken, tSQLExprList *pSelection, tVariantList *pFrom, tSQLExpr *pWhere,
tVariantList *pGroupby, tVariantList *pSortOrder, SStrToken *pInterval,
tVariantList *pGroupby, tVariantList *pSortOrder, SIntervalVal *pInterval,
SStrToken *pSliding, tVariantList *pFill, SLimitVal *pLimit, SLimitVal *pGLimit) {
assert(pSelection != NULL);
@ -558,7 +520,8 @@ SQuerySQL *tSetQuerySQLElems(SStrToken *pSelectToken, tSQLExprList *pSelection,
}
if (pInterval != NULL) {
pQuery->interval = *pInterval;
pQuery->interval = pInterval->interval;
pQuery->offset = pInterval->offset;
}
if (pSliding != NULL) {

View File

@ -54,7 +54,7 @@ int32_t initWindowResInfo(SWindowResInfo *pWindowResInfo, SQueryRuntimeEnv *pRun
return TSDB_CODE_QRY_OUT_OF_MEMORY;
}
pWindowResInfo->interval = pRuntimeEnv->pQuery->intervalTime;
pWindowResInfo->interval = pRuntimeEnv->pQuery->interval.interval;
pSummary->internalSupSize += sizeof(SWindowResult) * threshold;
pSummary->internalSupSize += (pRuntimeEnv->pQuery->numOfOutput * sizeof(SResultInfo) + pRuntimeEnv->interBufSize) * pWindowResInfo->capacity;

File diff suppressed because it is too large Load Diff

0
src/util/src/tarray.c Executable file → Normal file
View File

View File

@ -433,7 +433,7 @@ void taosPrintLongString(const char *flags, int32_t dflag, const char *format, .
va_list argpointer;
char buffer[MAX_LOGLINE_DUMP_BUFFER_SIZE];
int32_t len;
int32_t len;
struct tm Tm, *ptm;
struct timeval timeSecs;
time_t curTime;

View File

@ -186,6 +186,7 @@ python3 ./test.py -f functions/function_sum.py
python3 ./test.py -f functions/function_top.py
#python3 ./test.py -f functions/function_twa.py
python3 queryCount.py
python3 ./test.py -f query/queryGroupbyWithInterval.py
# tools
python3 test.py -f tools/taosdemo.py

View File

@ -0,0 +1,225 @@
###################################################################
# Copyright (c) 2020 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def general(self):
tdSql.execute("create table meters(ts timestamp, s int) tags(id int)")
tdSql.execute("create table t0 using meters tags(0)")
tdSql.execute("create table t1 using meters tags(1)")
tdSql.execute("create table t2 using meters tags(2)")
tdSql.execute("create table t3 using meters tags(3)")
tdSql.execute("create table t4 using meters tags(4)")
tdSql.execute("insert into t0 values('2019-01-01 00:00:00', 1)")
tdSql.execute("insert into t1 values('2019-01-01 00:00:01', 1)")
tdSql.execute("insert into t2 values('2019-01-01 00:01:00', 1)")
tdSql.execute("insert into t1 values('2019-01-01 00:01:01', 1)")
tdSql.execute("insert into t1 values('2019-01-01 00:01:02', 1)")
tdSql.execute("insert into t1 values('2019-01-01 00:01:03', 1)")
tdSql.execute("insert into t1 values('2019-01-01 00:01:30', 1)")
tdSql.execute("insert into t1 values('2019-01-01 00:01:50', 1)")
tdSql.execute("insert into t2 values('2019-01-01 00:02:00', 1)")
tdSql.execute("insert into t3 values('2019-01-01 00:02:02', 1)")
tdSql.execute("insert into t3 values('2019-01-01 00:02:59', 1)")
tdSql.execute("insert into t4 values('2019-01-01 00:02:59', 1)")
tdSql.execute("insert into t1 values('2019-01-01 00:03:10', 1)")
tdSql.execute("insert into t2 values('2019-01-01 00:08:00', 1)")
tdSql.execute("insert into t1 values('2019-01-01 00:08:00', 1)")
tdSql.query("select count(*) from meters interval(1m, 1s)")
tdSql.checkData(0, 1, 1)
tdSql.checkData(1, 1, 2)
tdSql.checkData(2, 1, 6)
tdSql.checkData(3, 1, 3)
tdSql.checkData(4, 1, 1)
tdSql.checkData(5, 1, 2)
tdSql.query("select count(*) from meters interval(1m, 2s)")
tdSql.checkData(0, 1, 2)
tdSql.checkData(1, 1, 2)
tdSql.checkData(2, 1, 5)
tdSql.checkData(3, 1, 3)
tdSql.checkData(4, 1, 1)
tdSql.checkData(5, 1, 2)
tdSql.query("select count(*) from meters interval(90s, 1500a)")
tdSql.checkData(0, 1, 2)
tdSql.checkData(1, 1, 5)
tdSql.checkData(2, 1, 5)
tdSql.checkData(3, 1, 1)
tdSql.checkData(4, 1, 2)
def singleTable(self):
tdSql.execute("create table car(ts timestamp, s int)")
tdSql.execute("insert into car values('2019-01-01 00:00:00', 1)")
tdSql.execute("insert into car values('2019-05-13 12:00:00', 1)")
tdSql.execute("insert into car values('2019-12-31 23:59:59', 1)")
tdSql.execute("insert into car values('2020-01-01 12:00:00', 1)")
tdSql.execute("insert into car values('2020-01-02 12:00:00', 1)")
tdSql.execute("insert into car values('2020-01-03 12:00:00', 1)")
tdSql.execute("insert into car values('2020-01-04 12:00:00', 1)")
tdSql.execute("insert into car values('2020-01-05 12:00:00', 1)")
tdSql.execute("insert into car values('2020-01-31 12:00:00', 1)")
tdSql.execute("insert into car values('2020-02-01 12:00:00', 1)")
tdSql.execute("insert into car values('2020-02-02 12:00:00', 1)")
tdSql.execute("insert into car values('2020-02-29 12:00:00', 1)")
tdSql.execute("insert into car values('2020-03-01 12:00:00', 1)")
tdSql.execute("insert into car values('2020-03-02 12:00:00', 1)")
tdSql.execute("insert into car values('2020-03-15 12:00:00', 1)")
tdSql.execute("insert into car values('2020-03-31 12:00:00', 1)")
tdSql.execute("insert into car values('2020-05-01 12:00:00', 1)")
tdSql.query("select count(*) from car interval(1n, 10d)")
tdSql.checkData(0, 1, 1)
tdSql.checkData(1, 1, 1)
tdSql.checkData(2, 1, 6)
tdSql.checkData(3, 1, 3)
tdSql.checkData(4, 1, 3)
tdSql.checkData(5, 1, 2)
tdSql.checkData(6, 1, 1)
tdSql.query("select count(*) from car interval(1n, 10d) order by ts desc")
tdSql.checkData(0, 1, 1)
tdSql.checkData(1, 1, 2)
tdSql.checkData(2, 1, 3)
tdSql.checkData(3, 1, 3)
tdSql.checkData(4, 1, 6)
tdSql.checkData(5, 1, 1)
tdSql.checkData(6, 1, 1)
tdSql.query("select count(*) from car interval(2n, 5d)")
tdSql.checkData(0, 1, 1)
tdSql.checkData(1, 1, 1)
tdSql.checkData(2, 1, 6)
tdSql.checkData(3, 1, 6)
tdSql.checkData(4, 1, 3)
tdSql.query("select count(*) from car interval(2n) order by ts desc")
tdSql.checkData(0, 1, 3)
tdSql.checkData(1, 1, 6)
tdSql.checkData(2, 1, 6)
tdSql.checkData(3, 1, 1)
tdSql.checkData(4, 1, 1)
tdSql.query("select count(*) from car interval(1y, 1n)")
tdSql.checkData(0, 1, 1)
tdSql.checkData(1, 1, 8)
tdSql.checkData(2, 1, 8)
tdSql.query("select count(*) from car interval(1y, 2n)")
tdSql.checkData(0, 1, 1)
tdSql.checkData(1, 1, 11)
tdSql.checkData(2, 1, 5)
tdSql.query("select count(*) from car where ts > '2019-05-14 00:00:00' interval(1y, 5d)")
tdSql.checkData(0, 1, 6)
tdSql.checkData(1, 1, 9)
def superTable(self):
tdSql.execute("create table cars(ts timestamp, s int) tags(id int)")
tdSql.execute("create table car0 using cars tags(0)")
tdSql.execute("create table car1 using cars tags(1)")
tdSql.execute("create table car2 using cars tags(2)")
tdSql.execute("create table car3 using cars tags(3)")
tdSql.execute("create table car4 using cars tags(4)")
tdSql.execute("insert into car0 values('2019-01-01 00:00:00', 1)")
tdSql.execute("insert into car1 values('2019-05-13 12:00:00', 1)")
tdSql.execute("insert into car2 values('2019-12-31 23:59:59', 1)")
tdSql.execute("insert into car1 values('2020-01-01 12:00:00', 1)")
tdSql.execute("insert into car1 values('2020-01-02 12:00:00', 1)")
tdSql.execute("insert into car1 values('2020-01-03 12:00:00', 1)")
tdSql.execute("insert into car1 values('2020-01-04 12:00:00', 1)")
tdSql.execute("insert into car1 values('2020-01-05 12:00:00', 1)")
tdSql.execute("insert into car1 values('2020-01-31 12:00:00', 1)")
tdSql.execute("insert into car1 values('2020-02-01 12:00:00', 1)")
tdSql.execute("insert into car2 values('2020-02-02 12:00:00', 1)")
tdSql.execute("insert into car2 values('2020-02-29 12:00:00', 1)")
tdSql.execute("insert into car3 values('2020-03-01 12:00:00', 1)")
tdSql.execute("insert into car3 values('2020-03-02 12:00:00', 1)")
tdSql.execute("insert into car3 values('2020-03-15 12:00:00', 1)")
tdSql.execute("insert into car4 values('2020-03-31 12:00:00', 1)")
tdSql.execute("insert into car3 values('2020-05-01 12:00:00', 1)")
tdSql.query("select count(*) from cars interval(1n, 10d)")
tdSql.checkData(0, 1, 1)
tdSql.checkData(1, 1, 1)
tdSql.checkData(2, 1, 6)
tdSql.checkData(3, 1, 3)
tdSql.checkData(4, 1, 3)
tdSql.checkData(5, 1, 2)
tdSql.checkData(6, 1, 1)
tdSql.query("select count(*) from cars interval(1n, 10d) order by ts desc")
tdSql.checkData(0, 1, 1)
tdSql.checkData(1, 1, 2)
tdSql.checkData(2, 1, 3)
tdSql.checkData(3, 1, 3)
tdSql.checkData(4, 1, 6)
tdSql.checkData(5, 1, 1)
tdSql.checkData(6, 1, 1)
tdSql.query("select count(*) from cars interval(2n, 5d)")
tdSql.checkData(0, 1, 1)
tdSql.checkData(1, 1, 1)
tdSql.checkData(2, 1, 6)
tdSql.checkData(3, 1, 6)
tdSql.checkData(4, 1, 3)
tdSql.query("select count(*) from cars interval(2n) order by ts desc")
tdSql.checkData(0, 1, 3)
tdSql.checkData(1, 1, 6)
tdSql.checkData(2, 1, 6)
tdSql.checkData(3, 1, 1)
tdSql.checkData(4, 1, 1)
tdSql.query("select count(*) from cars interval(1y, 1n)")
tdSql.checkData(0, 1, 1)
tdSql.checkData(1, 1, 8)
tdSql.checkData(2, 1, 8)
tdSql.query("select count(*) from cars interval(1y, 2n)")
tdSql.checkData(0, 1, 1)
tdSql.checkData(1, 1, 11)
tdSql.checkData(2, 1, 5)
tdSql.query("select count(*) from cars where ts > '2019-05-14 00:00:00' interval(1y, 5d)")
tdSql.checkData(0, 1, 6)
tdSql.checkData(1, 1, 9)
def run(self):
tdSql.prepare()
self.general()
self.singleTable()
self.superTable()
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -89,10 +89,10 @@ class TDTestCase:
def superTable(self):
tdSql.execute("create table cars(ts timestamp, s int) tags(id int)")
tdSql.execute("create table car0 using cars tags(0)")
tdSql.execute("create table car1 using cars tags(0)")
tdSql.execute("create table car2 using cars tags(0)")
tdSql.execute("create table car3 using cars tags(0)")
tdSql.execute("create table car4 using cars tags(0)")
tdSql.execute("create table car1 using cars tags(1)")
tdSql.execute("create table car2 using cars tags(2)")
tdSql.execute("create table car3 using cars tags(3)")
tdSql.execute("create table car4 using cars tags(4)")
tdSql.execute("insert into car0 values('2019-01-01 00:00:00', 1)")
tdSql.execute("insert into car1 values('2019-05-13 12:00:00', 1)")

View File

@ -0,0 +1,53 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import tdLog
from util.cases import tdCases
from util.sql import tdSql
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
tdSql.execute(
"create table stest(ts timestamp,size INT,filenum INT) tags (appname binary(500),tenant binary(500))")
tdSql.execute(
"insert into test1 using stest tags('test1','aaa') values ('2020-09-04 16:53:54.003',210,3)")
tdSql.execute(
"insert into test2 using stest tags('test1','aaa') values ('2020-09-04 16:53:56.003',210,3)")
tdSql.execute(
"insert into test11 using stest tags('test11','bbb') values ('2020-09-04 16:53:57.003',210,3)")
tdSql.execute(
"insert into test12 using stest tags('test11','bbb') values ('2020-09-04 16:53:58.003',210,3)")
tdSql.execute(
"insert into test21 using stest tags('test21','ccc') values ('2020-09-04 16:53:59.003',210,3)")
tdSql.execute(
"insert into test22 using stest tags('test21','ccc') values ('2020-09-04 16:54:54.003',210,3)")
tdSql.query("select sum(size) from stest interval(1d) group by appname")
tdSql.checkRows(3)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -1,5 +1,6 @@
system sh/stop_dnodes.sh
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
system sh/deploy.sh -n dnode1 -i 1
print ========= start dnodes

View File

@ -1,5 +1,6 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
print ========= start dnodes
system sh/exec.sh -n dnode1 -s start

View File

@ -4,6 +4,8 @@ system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/deploy.sh -n dnode2 -i 2
system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4
print ========== prepare data
system sh/exec.sh -n dnode1 -s start

View File

@ -1,7 +1,7 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 1
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 1
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode1 -c ctime -v 30
system sh/exec.sh -n dnode1 -s start
sleep 3000

View File

@ -86,8 +86,6 @@ print ========== insert data by multi-format
sql create table abc.tk_mt (ts timestamp, a int, b binary(16), c bool, d float, e double, f nchar(16)) tags (t1 int, t2 binary(16))
sql create table abc.tk_subt001 using tk_mt tags(1, 'subt001')
sql insert into abc.tk_subt001 values (now-1y, 1, 'binary_1', true, 1.001, 2.001, 'nchar_1')
sql insert into abc.tk_subt001 values (now-1n, 2, 'binary_2', true, 1.002, 2.002, 'nchar_2')
sql insert into abc.tk_subt001 values (now-1w, 3, 'binary_3', true, 1.003, 2.003, 'nchar_3')
sql insert into abc.tk_subt001 (ts, a, c, e, f) values (now-1d, 4, false, 2.004, 'nchar_4')
sql insert into abc.tk_subt001 (ts, a, c, e, f) values (now-1h, 5, false, 2.005, 'nchar_5')
@ -95,35 +93,29 @@ sql insert into abc.tk_subt001 (ts, b, d) values (now-1m, 'binary_6',
sql insert into abc.tk_subt001 (ts, b, d) values (now-1s, 'binary_7', 1.007)
sql insert into abc.tk_subt001 (ts, b, d) values (now-1a, 'binary_8', 1.008)
sql select * from tk_subt001
if $rows != 8 then
print ==== expect rows is 8, but actually is $rows
if $rows != 6 then
print ==== expect rows is 6, but actually is $rows
return -1
endi
sql insert into abc.tk_subt002 using tk_mt tags (22,'subt002x') values (now-2y, 2008, 'binary_2008', false, 2008.001, 2008.001, 'nchar_2008')
sql insert into abc.tk_subt002 using tk_mt tags (2, 'subt002') values (now-1y, 2007, 'binary_2007', false, 2007.001, 2007.001, 'nchar_2007')
sql insert into abc.tk_subt002 using tk_mt tags (2, 'subt002') values (now-1n, 2006, 'binary_2006', true, 2006.001, 2006.001, 'nchar_2006')
sql insert into abc.tk_subt002 using tk_mt tags (2, 'subt002') values (now+1s, 2001, 'binary_2001', true, 2001.001, 2001.001, 'nchar_2001')
sql insert into abc.tk_subt002 using tk_mt tags (22, 'subt002x') values (now+1s, 2001, 'binary_2001', true, 2001.001, 2001.001, 'nchar_2001')
sql insert into abc.tk_subt002 using tk_mt tags (2, 'subt002') values (now+1m, 2002, 'binary_2002', false, 2002.001, 2002.001, 'nchar_2002')
sql insert into abc.tk_subt002 using tk_mt tags (2, 'subt002') values (now+1h, 2003, 'binary_2003', false, 2003.001, 2003.001, 'nchar_2003')
sql insert into abc.tk_subt002 using tk_mt tags (2, 'subt002') values (now+1d, 2004, 'binary_2004', true, 2004.001, 2004.001, 'nchar_2004')
sql insert into abc.tk_subt002 using tk_mt tags (2, 'subt002') values (now+1w, 2005, 'binary_2005', false, 2005.001, 2005.001, 'nchar_2005')
sql select * from tk_subt002
if $rows != 8 then
print ==== expect rows is 8, but actually is $rows
if $rows != 5 then
print ==== expect rows is 5, but actually is $rows
return -1
endi
sql insert into abc.tk_subt003 (ts, a, c, e, f) using tk_mt tags (3, 'subt003') values (now-5y, 3001, false, 3001.001, 'nchar_3001')
sql insert into abc.tk_subt003 (ts, a, c, e, f) using tk_mt tags (3, 'subt003') values (now-4y, 3002, false, 3002.001, 'nchar_3002')
sql insert into abc.tk_subt003 (ts, a, c, e, f) using tk_mt tags (3, 'subt003') values (now-3y, 3003, true , 3003.001, 'nchar_3003')
sql insert into abc.tk_subt003 (ts, a, c, e, f) using tk_mt tags (3, 'subt003') values (now-2y, 3004, false, 3004.001, 'nchar_3004')
sql insert into abc.tk_subt003 values (now-37d, 3005, 'binary_3005', false, 3005.001, 3005.001, 'nchar_3005')
sql insert into abc.tk_subt003 (ts, a, c, e, f) using tk_mt tags (3, 'subt003') values (now-38d, 3004, false, 3004.001, 'nchar_3004')
sql insert into abc.tk_subt003 (ts, a, c, e, f) using tk_mt tags (3, 'subt003') values (now-37d, 3005, false, 3005.001, 'nchar_3005')
sql insert into abc.tk_subt003 values (now-36d, 3006, 'binary_3006', true, 3006.001, 3006.001, 'nchar_3006')
sql insert into abc.tk_subt003 (ts, a, c, e, f) using tk_mt tags (33, 'subt003x') values (now-35d, 3007, false, 3007.001, 'nchar_3007')
sql select * from tk_subt003
if $rows != 7 then
print ==== expect rows is 7, but actually is $rows
if $rows != 4 then
print ==== expect rows is 4, but actually is $rows
return -1
endi

View File

@ -4,6 +4,7 @@ system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
system sh/exec.sh -n dnode1 -s start
sleep 3000

View File

@ -3,6 +3,7 @@ system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
system sh/exec.sh -n dnode1 -s start
sleep 3000

View File

@ -2,6 +2,7 @@ system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 4
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
system sh/exec.sh -n dnode1 -s start
sleep 3000

View File

@ -35,6 +35,16 @@ system sh/cfg.sh -n dnode6 -c wallevel -v 1
system sh/cfg.sh -n dnode7 -c wallevel -v 1
system sh/cfg.sh -n dnode8 -c wallevel -v 1
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode4 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode5 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode6 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode7 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode8 -c maxTablesPerVnode -v 4
print ============== step1
print ========= start dnode1
system sh/exec.sh -n dnode1 -s start

View File

@ -35,6 +35,16 @@ system sh/cfg.sh -n dnode6 -c mnodeEqualVnodeNum -v 0
system sh/cfg.sh -n dnode7 -c mnodeEqualVnodeNum -v 0
system sh/cfg.sh -n dnode8 -c mnodeEqualVnodeNum -v 0
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode4 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode5 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode6 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode7 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode8 -c maxTablesPerVnode -v 4
print ============== step1
print ========= start dnode1
system sh/exec.sh -n dnode1 -s start

View File

@ -16,6 +16,11 @@ system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 4
print ========== step1
system sh/exec.sh -n dnode1 -s start
sleep 3000

View File

@ -25,6 +25,11 @@ system sh/cfg.sh -n dnode2 -c wallevel -v 2
system sh/cfg.sh -n dnode3 -c wallevel -v 2
system sh/cfg.sh -n dnode4 -c wallevel -v 2
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode4 -c maxTablesPerVnode -v 4
print ========== step1
system sh/exec.sh -n dnode1 -s start
sql connect

View File

@ -12,6 +12,12 @@ system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 4
system sh/cfg.sh -n dnode4 -c mnodeEqualVnodeNum -v 4
system sh/cfg.sh -n dnode5 -c mnodeEqualVnodeNum -v 4
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode4 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode5 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode1 -c wallevel -v 1
system sh/cfg.sh -n dnode2 -c wallevel -v 1
system sh/cfg.sh -n dnode3 -c wallevel -v 1

View File

@ -21,6 +21,13 @@ system sh/cfg.sh -n dnode4 -c wallevel -v 1
system sh/cfg.sh -n dnode5 -c wallevel -v 1
system sh/cfg.sh -n dnode6 -c wallevel -v 1
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode4 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode5 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode6 -c maxTablesPerVnode -v 4
print ========== step1
system sh/exec.sh -n dnode1 -s start
sql connect

View File

@ -15,6 +15,11 @@ system sh/cfg.sh -n dnode2 -c wallevel -v 1
system sh/cfg.sh -n dnode3 -c wallevel -v 1
system sh/cfg.sh -n dnode4 -c wallevel -v 1
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode4 -c maxTablesPerVnode -v 4
print ========== step1
system sh/exec.sh -n dnode1 -s start
sql connect

View File

@ -16,6 +16,13 @@ system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 4
system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 4
system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 4
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode4 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode1 -c wallevel -v 1
system sh/cfg.sh -n dnode2 -c wallevel -v 1
system sh/cfg.sh -n dnode3 -c wallevel -v 1

View File

@ -15,6 +15,11 @@ system sh/cfg.sh -n dnode2 -c wallevel -v 1
system sh/cfg.sh -n dnode3 -c wallevel -v 1
system sh/cfg.sh -n dnode4 -c wallevel -v 1
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode4 -c maxTablesPerVnode -v 4
print ========== step1
system sh/exec.sh -n dnode1 -s start
sleep 3000

View File

@ -15,6 +15,11 @@ system sh/cfg.sh -n dnode2 -c wallevel -v 2
system sh/cfg.sh -n dnode3 -c wallevel -v 2
system sh/cfg.sh -n dnode4 -c wallevel -v 2
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode4 -c maxTablesPerVnode -v 4
print ========== step1
system sh/exec.sh -n dnode1 -s start
sleep 3000

View File

@ -15,6 +15,11 @@ system sh/cfg.sh -n dnode2 -c wallevel -v 1
system sh/cfg.sh -n dnode3 -c wallevel -v 1
system sh/cfg.sh -n dnode4 -c wallevel -v 1
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode4 -c maxTablesPerVnode -v 4
print ========== step1
system sh/exec.sh -n dnode1 -s start
sql connect

View File

@ -30,6 +30,11 @@ system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode4 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3