Merge branch 'develop' into coverity_scan
This commit is contained in:
commit
efdc657c79
|
@ -2,7 +2,6 @@
|
||||||
[](https://ci.appveyor.com/project/sangshuduo/tdengine-2n8ge/branch/master)
|
[](https://ci.appveyor.com/project/sangshuduo/tdengine-2n8ge/branch/master)
|
||||||
[](https://coveralls.io/github/taosdata/TDengine?branch=develop)
|
[](https://coveralls.io/github/taosdata/TDengine?branch=develop)
|
||||||
[](https://bestpractices.coreinfrastructure.org/projects/4201)
|
[](https://bestpractices.coreinfrastructure.org/projects/4201)
|
||||||
[](https://hub.docker.com/repository/docker/tdengine/tdengine)
|
|
||||||
[](https://snapcraft.io/tdengine)
|
[](https://snapcraft.io/tdengine)
|
||||||
|
|
||||||
[](https://www.taosdata.com)
|
[](https://www.taosdata.com)
|
||||||
|
|
|
@ -9,9 +9,7 @@ set -e
|
||||||
script_dir=$(dirname $(readlink -f "$0"))
|
script_dir=$(dirname $(readlink -f "$0"))
|
||||||
# Dynamic directory
|
# Dynamic directory
|
||||||
lib_link_dir="/usr/lib"
|
lib_link_dir="/usr/lib"
|
||||||
|
lib64_link_dir="/usr/lib64"
|
||||||
#install main path
|
|
||||||
install_main_dir="/usr/local/taos"
|
|
||||||
|
|
||||||
# Color setting
|
# Color setting
|
||||||
RED='\033[0;31m'
|
RED='\033[0;31m'
|
||||||
|
@ -25,24 +23,23 @@ if command -v sudo > /dev/null; then
|
||||||
csudo="sudo"
|
csudo="sudo"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
function clean_driver() {
|
|
||||||
${csudo} rm -f /usr/lib/libtaos.so || :
|
|
||||||
}
|
|
||||||
|
|
||||||
function install_driver() {
|
function install_driver() {
|
||||||
|
echo
|
||||||
|
if [[ -d ${lib_link_dir} && ! -e ${lib_link_dir}/libtaos.so ]]; then
|
||||||
echo -e "${GREEN}Start to install TDengine client driver ...${NC}"
|
echo -e "${GREEN}Start to install TDengine client driver ...${NC}"
|
||||||
|
${csudo} ln -s ${script_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 || :
|
||||||
|
${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so || :
|
||||||
|
|
||||||
#create install main dir and all sub dir
|
if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then
|
||||||
${csudo} mkdir -p ${install_main_dir}
|
${csudo} ln -s ${script_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
|
||||||
${csudo} mkdir -p ${install_main_dir}/driver
|
${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
|
||||||
|
fi
|
||||||
${csudo} rm -f ${lib_link_dir}/libtaos.* || :
|
|
||||||
${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
|
|
||||||
${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
|
|
||||||
${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
|
|
||||||
|
|
||||||
echo
|
echo
|
||||||
echo -e "\033[44;32;1mTDengine client driver is successfully installed!${NC}"
|
echo -e "${GREEN}TDengine client driver is successfully installed!${NC}"
|
||||||
|
else
|
||||||
|
echo -e "${GREEN}TDengine client driver already exists, Please confirm whether the alert version matches the client driver version!${NC}"
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
install_driver
|
install_driver
|
||||||
|
|
|
@ -119,7 +119,7 @@ WantedBy=multi-user.target
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
const version = "TDengine alert v2.0.0.1"
|
var version = "2.0.0.1s"
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
var (
|
var (
|
||||||
|
@ -133,7 +133,7 @@ func main() {
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
if showVersion {
|
if showVersion {
|
||||||
fmt.Println(version)
|
fmt.Println("TDengine alert v" + version)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -6,9 +6,9 @@ set -e
|
||||||
# set parameters by default value
|
# set parameters by default value
|
||||||
cpuType=amd64 # [armv6l | arm64 | amd64 | 386]
|
cpuType=amd64 # [armv6l | arm64 | amd64 | 386]
|
||||||
osType=linux # [linux | darwin | windows]
|
osType=linux # [linux | darwin | windows]
|
||||||
|
version=""
|
||||||
declare -A archMap=(["armv6l"]="arm" ["arm64"]="arm64" ["amd64"]="x64" ["386"]="x86")
|
declare -A archMap=(["armv6l"]="arm" ["arm64"]="arm64" ["amd64"]="x64" ["386"]="x86")
|
||||||
while getopts "h:c:o:" arg
|
while getopts "h:c:o:n:" arg
|
||||||
do
|
do
|
||||||
case $arg in
|
case $arg in
|
||||||
c)
|
c)
|
||||||
|
@ -19,6 +19,10 @@ do
|
||||||
#echo "osType=$OPTARG"
|
#echo "osType=$OPTARG"
|
||||||
osType=$(echo $OPTARG)
|
osType=$(echo $OPTARG)
|
||||||
;;
|
;;
|
||||||
|
n)
|
||||||
|
#echo "version=$OPTARG"
|
||||||
|
version=$(echo $OPTARG)
|
||||||
|
;;
|
||||||
h)
|
h)
|
||||||
echo "Usage: `basename $0` -c [armv6l | arm64 | amd64 | 386] -o [linux | darwin | windows]"
|
echo "Usage: `basename $0` -c [armv6l | arm64 | amd64 | 386] -o [linux | darwin | windows]"
|
||||||
exit 0
|
exit 0
|
||||||
|
@ -30,18 +34,27 @@ do
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
|
|
||||||
|
if [ "$version" == "" ]; then
|
||||||
|
echo "Please input the correct version!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
startdir=$(pwd)
|
startdir=$(pwd)
|
||||||
scriptdir=$(dirname $(readlink -f $0))
|
scriptdir=$(dirname $(readlink -f $0))
|
||||||
cd ${scriptdir}/cmd/alert
|
cd ${scriptdir}/cmd/alert
|
||||||
version=$(grep 'const version =' main.go | awk '{print $NF}')
|
|
||||||
version=${version%\"}
|
|
||||||
version=${version:1}
|
|
||||||
|
|
||||||
echo "cpuType=${cpuType}"
|
echo "cpuType=${cpuType}"
|
||||||
echo "osType=${osType}"
|
echo "osType=${osType}"
|
||||||
echo "version=${version}"
|
echo "version=${version}"
|
||||||
|
|
||||||
GOOS=${osType} GOARCH=${cpuType} go build
|
GOOS=${osType} GOARCH=${cpuType} go build -ldflags '-X main.version='${version}
|
||||||
|
|
||||||
|
mkdir -p TDengine-alert/driver
|
||||||
|
|
||||||
|
cp alert alert.cfg install_driver.sh ./TDengine-alert/.
|
||||||
|
cp ../../../debug/build/lib/libtaos.so.${version} ./TDengine-alert/driver/.
|
||||||
|
chmod 777 ./TDengine-alert/install_driver.sh
|
||||||
|
|
||||||
|
tar -I 'gzip -9' -cf ${startdir}/TDengine-alert-${version}-${osType^}-${archMap[${cpuType}]}.tar.gz TDengine-alert/
|
||||||
|
rm -rf ./TDengine-alert
|
||||||
|
|
||||||
tar -I 'gzip -9' -cf ${startdir}/TDengine-alert-${version}-${osType^}-${archMap[${cpuType}]}.tar.gz alert alert.cfg install_driver.sh driver/
|
|
||||||
|
|
|
@ -100,7 +100,7 @@ IF (TD_LINUX)
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
SET(DEBUG_FLAGS "-O0 -DDEBUG")
|
SET(DEBUG_FLAGS "-O0 -DDEBUG")
|
||||||
SET(RELEASE_FLAGS "-O0")
|
SET(RELEASE_FLAGS "-O0 -Wno-unused-variable -Wunused-but-set-variable")
|
||||||
|
|
||||||
IF (${COVER} MATCHES "true")
|
IF (${COVER} MATCHES "true")
|
||||||
MESSAGE(STATUS "Test coverage mode, add extra flags")
|
MESSAGE(STATUS "Test coverage mode, add extra flags")
|
||||||
|
|
|
@ -13,7 +13,7 @@ ELSEIF (TD_WINDOWS)
|
||||||
IF (NOT TD_GODLL)
|
IF (NOT TD_GODLL)
|
||||||
#INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/go DESTINATION connector)
|
#INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/go DESTINATION connector)
|
||||||
#INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/grafana DESTINATION connector)
|
#INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/grafana DESTINATION connector)
|
||||||
#INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/python DESTINATION connector)
|
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/python DESTINATION connector)
|
||||||
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/tests/examples DESTINATION .)
|
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/tests/examples DESTINATION .)
|
||||||
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/packaging/cfg DESTINATION .)
|
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/packaging/cfg DESTINATION .)
|
||||||
INSTALL(FILES ${TD_COMMUNITY_DIR}/src/inc/taos.h DESTINATION include)
|
INSTALL(FILES ${TD_COMMUNITY_DIR}/src/inc/taos.h DESTINATION include)
|
||||||
|
|
|
@ -11,7 +11,7 @@ TDengine能够与开源数据可视化系统[Grafana](https://www.grafana.com/)
|
||||||
|
|
||||||
### 配置Grafana
|
### 配置Grafana
|
||||||
|
|
||||||
TDengine的Grafana插件在安装包的/usr/local/taos/connector/grafana目录下。
|
TDengine的Grafana插件在安装包的/usr/local/taos/connector/grafanaplugin目录下。
|
||||||
|
|
||||||
以CentOS 7.2操作系统为例,将tdengine目录拷贝到/var/lib/grafana/plugins目录下,重新启动grafana即可。
|
以CentOS 7.2操作系统为例,将tdengine目录拷贝到/var/lib/grafana/plugins目录下,重新启动grafana即可。
|
||||||
|
|
||||||
|
|
|
@ -78,6 +78,18 @@ taos> SELECT SUM(current) FROM meters INTERVAL(1s);
|
||||||
2018-10-03 14:38:16.000 | 36.000000000 |
|
2018-10-03 14:38:16.000 | 36.000000000 |
|
||||||
Query OK, 5 row(s) in set (0.001538s)
|
Query OK, 5 row(s) in set (0.001538s)
|
||||||
```
|
```
|
||||||
|
降采样操作也支持时间偏移,比如:将所有智能电表采集的电流值每秒钟求和,但要求每个时间窗口从 500 毫秒开始
|
||||||
|
```mysql
|
||||||
|
taos> SELECT SUM(current) FROM meters INTERVAL(1s, 500a);
|
||||||
|
ts | sum(current) |
|
||||||
|
======================================================
|
||||||
|
2018-10-03 14:38:04.500 | 11.189999809 |
|
||||||
|
2018-10-03 14:38:05.500 | 31.900000572 |
|
||||||
|
2018-10-03 14:38:06.500 | 11.600000000 |
|
||||||
|
2018-10-03 14:38:15.500 | 12.300000381 |
|
||||||
|
2018-10-03 14:38:16.500 | 35.000000000 |
|
||||||
|
Query OK, 5 row(s) in set (0.001521s)
|
||||||
|
```
|
||||||
|
|
||||||
物联网场景里,每个数据采集点采集数据的时间是难同步的,但很多分析算法(比如FFT)需要把采集的数据严格按照时间等间隔的对齐,在很多系统里,需要应用自己写程序来处理,但使用TDengine的降采样操作就轻松解决。如果一个时间间隔里,没有采集的数据,TDengine还提供插值计算的功能。
|
物联网场景里,每个数据采集点采集数据的时间是难同步的,但很多分析算法(比如FFT)需要把采集的数据严格按照时间等间隔的对齐,在很多系统里,需要应用自己写程序来处理,但使用TDengine的降采样操作就轻松解决。如果一个时间间隔里,没有采集的数据,TDengine还提供插值计算的功能。
|
||||||
|
|
||||||
|
|
|
@ -148,7 +148,7 @@ INSERT INTO <tb1_name> USING <stb1_name> TAGS (<tag1_value1>, ...) VALUES (<fiel
|
||||||
SELECT function<field_name>,…
|
SELECT function<field_name>,…
|
||||||
FROM <stable_name>
|
FROM <stable_name>
|
||||||
WHERE <tag_name> <[=|<=|>=|<>] values..> ([AND|OR] …)
|
WHERE <tag_name> <[=|<=|>=|<>] values..> ([AND|OR] …)
|
||||||
INTERVAL (<time range>)
|
INTERVAL (<interval> [, offset])
|
||||||
GROUP BY <tag_name>, <tag_name>…
|
GROUP BY <tag_name>, <tag_name>…
|
||||||
ORDER BY <tag_name> <asc|desc>
|
ORDER BY <tag_name> <asc|desc>
|
||||||
SLIMIT <group_limit>
|
SLIMIT <group_limit>
|
||||||
|
|
|
@ -33,8 +33,7 @@ taos> DESCRIBE meters;
|
||||||
- 内部函数now是服务器的当前时间
|
- 内部函数now是服务器的当前时间
|
||||||
- 插入记录时,如果时间戳为now,插入数据时使用服务器当前时间
|
- 插入记录时,如果时间戳为now,插入数据时使用服务器当前时间
|
||||||
- Epoch Time: 时间戳也可以是一个长整数,表示从1970-01-01 08:00:00.000开始的毫秒数
|
- Epoch Time: 时间戳也可以是一个长整数,表示从1970-01-01 08:00:00.000开始的毫秒数
|
||||||
- 时间可以加减,比如 now-2h,表明查询时刻向前推2个小时(最近2小时)。数字后面的时间单位:a(毫秒), s(秒), m(分), h(小时), d(天),w(周), n(月), y(年)。比如select * from t1 where ts > now-2w and ts <= now-1w, 表示查询两周前整整一周的数据
|
- 时间可以加减,比如 now-2h,表明查询时刻向前推2个小时(最近2小时)。 数字后面的时间单位可以是 a(毫秒)、s(秒)、 m(分)、h(小时)、d(天)、w(周)。 比如select * from t1 where ts > now-2w and ts <= now-1w, 表示查询两周前整整一周的数据。 在指定降频操作(down sampling)的时间窗口(interval)时,时间单位还可以使用 n(自然月) 和 y(自然年)。
|
||||||
- TDengine暂不支持时间窗口按照自然年和自然月切分。Where条件中的时间窗口单位的换算关系如下:interval(1y) 等效于 interval(365d), interval(1n) 等效于 interval(30d), interval(1w) 等效于 interval(7d)
|
|
||||||
|
|
||||||
TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMicrosecond就可支持微秒。
|
TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMicrosecond就可支持微秒。
|
||||||
|
|
||||||
|
@ -299,7 +298,7 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
|
||||||
SELECT select_expr [, select_expr ...]
|
SELECT select_expr [, select_expr ...]
|
||||||
FROM {tb_name_list}
|
FROM {tb_name_list}
|
||||||
[WHERE where_condition]
|
[WHERE where_condition]
|
||||||
[INTERVAL [interval_offset,] interval_val]
|
[INTERVAL (interval_val [, interval_offset])]
|
||||||
[FILL fill_val]
|
[FILL fill_val]
|
||||||
[SLIDING fill_val]
|
[SLIDING fill_val]
|
||||||
[GROUP BY col_list]
|
[GROUP BY col_list]
|
||||||
|
@ -972,17 +971,17 @@ TDengine支持按时间段进行聚合,可以将表中数据按照时间段进
|
||||||
```mysql
|
```mysql
|
||||||
SELECT function_list FROM tb_name
|
SELECT function_list FROM tb_name
|
||||||
[WHERE where_condition]
|
[WHERE where_condition]
|
||||||
INTERVAL (interval)
|
INTERVAL (interval [, offset])
|
||||||
[FILL ({NONE | VALUE | PREV | NULL | LINEAR})]
|
[FILL ({NONE | VALUE | PREV | NULL | LINEAR})]
|
||||||
|
|
||||||
SELECT function_list FROM stb_name
|
SELECT function_list FROM stb_name
|
||||||
[WHERE where_condition]
|
[WHERE where_condition]
|
||||||
INTERVAL (interval)
|
INTERVAL (interval [, offset])
|
||||||
[FILL ({ VALUE | PREV | NULL | LINEAR})]
|
[FILL ({ VALUE | PREV | NULL | LINEAR})]
|
||||||
[GROUP BY tags]
|
[GROUP BY tags]
|
||||||
```
|
```
|
||||||
|
|
||||||
- 聚合时间段的长度由关键词INTERVAL指定,最短时间间隔10毫秒(10a)。聚合查询中,能够同时执行的聚合和选择函数仅限于单个输出的函数:count、avg、sum 、stddev、leastsquares、percentile、min、max、first、last,不能使用具有多行输出结果的函数(例如:top、bottom、diff以及四则运算)。
|
- 聚合时间段的长度由关键词INTERVAL指定,最短时间间隔10毫秒(10a),并且支持偏移(偏移必须小于间隔)。聚合查询中,能够同时执行的聚合和选择函数仅限于单个输出的函数:count、avg、sum 、stddev、leastsquares、percentile、min、max、first、last,不能使用具有多行输出结果的函数(例如:top、bottom、diff以及四则运算)。
|
||||||
- WHERE语句可以指定查询的起止时间和其他过滤条件
|
- WHERE语句可以指定查询的起止时间和其他过滤条件
|
||||||
- FILL语句指定某一时间区间数据缺失的情况下的填充模式。填充模式包括以下几种:
|
- FILL语句指定某一时间区间数据缺失的情况下的填充模式。填充模式包括以下几种:
|
||||||
1. 不进行填充:NONE(默认填充模式)。
|
1. 不进行填充:NONE(默认填充模式)。
|
||||||
|
@ -1020,5 +1019,5 @@ SELECT AVG(current),MAX(current),LEASTSQUARES(current, start_val, step_val), PER
|
||||||
- 表名最大长度为193,每行数据最大长度16k个字符
|
- 表名最大长度为193,每行数据最大长度16k个字符
|
||||||
- 列名最大长度为65,最多允许1024列,最少需要2列,第一列必须是时间戳
|
- 列名最大长度为65,最多允许1024列,最少需要2列,第一列必须是时间戳
|
||||||
- 标签最多允许128个,可以0个,标签总长度不超过16k个字符
|
- 标签最多允许128个,可以0个,标签总长度不超过16k个字符
|
||||||
- SQL语句最大长度65480个字符,但可通过系统配置参数maxSQLLength修改,最长可配置为8M
|
- SQL语句最大长度65480个字符,但可通过系统配置参数maxSQLLength修改,最长可配置为1M
|
||||||
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制
|
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
# TDengine 2.0 错误码以及对应的十进制码
|
# TDengine 2.0 错误码以及对应的十进制码
|
||||||
|
|
||||||
|
|
||||||
| Code | bit | error code | 错误描述 | 十进制错误码 |
|
| 状态码 | 模 | 错误码(十六进制) | 错误描述 | 错误码(十进制) |
|
||||||
|-----------------------| :---: | :---------: | :------------------------ | ---------------- |
|
|-----------------------| :---: | :---------: | :------------------------ | ---------------- |
|
||||||
|TSDB_CODE_RPC_ACTION_IN_PROGRESS| 0 | 0x0001| "Action in progress"| -2147483647|
|
|TSDB_CODE_RPC_ACTION_IN_PROGRESS| 0 | 0x0001| "Action in progress"| -2147483647|
|
||||||
|TSDB_CODE_RPC_AUTH_REQUIRED| 0 | 0x0002 | "Authentication required"| -2147483646|
|
|TSDB_CODE_RPC_AUTH_REQUIRED| 0 | 0x0002 | "Authentication required"| -2147483646|
|
||||||
|
@ -87,7 +87,7 @@
|
||||||
|TSDB_CODE_MND_INVALID_ACCT_OPTION| 0 | 0x0342 | "Invalid account options"| -2147482814|
|
|TSDB_CODE_MND_INVALID_ACCT_OPTION| 0 | 0x0342 | "Invalid account options"| -2147482814|
|
||||||
|TSDB_CODE_MND_USER_ALREADY_EXIST| 0 | 0x0350 | "User already exists"| -2147482800|
|
|TSDB_CODE_MND_USER_ALREADY_EXIST| 0 | 0x0350 | "User already exists"| -2147482800|
|
||||||
|TSDB_CODE_MND_INVALID_USER |0 | 0x0351 | "Invalid user" |-2147482799|
|
|TSDB_CODE_MND_INVALID_USER |0 | 0x0351 | "Invalid user" |-2147482799|
|
||||||
|TSDB_CODE_MND_INVALID_USER_FORMAT| |0 |0x0352 |"Invalid user format" |-2147482798|
|
|TSDB_CODE_MND_INVALID_USER_FORMAT| 0 |0x0352 |"Invalid user format" |-2147482798|
|
||||||
|TSDB_CODE_MND_INVALID_PASS_FORMAT| 0| 0x0353 | "Invalid password format"| -2147482797|
|
|TSDB_CODE_MND_INVALID_PASS_FORMAT| 0| 0x0353 | "Invalid password format"| -2147482797|
|
||||||
|TSDB_CODE_MND_NO_USER_FROM_CONN| 0 | 0x0354 | "Can not get user from conn"| -2147482796|
|
|TSDB_CODE_MND_NO_USER_FROM_CONN| 0 | 0x0354 | "Can not get user from conn"| -2147482796|
|
||||||
|TSDB_CODE_MND_TOO_MANY_USERS| 0 | 0x0355| "Too many users"| -2147482795|
|
|TSDB_CODE_MND_TOO_MANY_USERS| 0 | 0x0355| "Too many users"| -2147482795|
|
||||||
|
@ -107,7 +107,7 @@
|
||||||
|TSDB_CODE_MND_DB_NOT_SELECTED| 0 | 0x0380 | "Database not specified or available"| -2147482752|
|
|TSDB_CODE_MND_DB_NOT_SELECTED| 0 | 0x0380 | "Database not specified or available"| -2147482752|
|
||||||
|TSDB_CODE_MND_DB_ALREADY_EXIST| 0 | 0x0381 | "Database already exists"| -2147482751|
|
|TSDB_CODE_MND_DB_ALREADY_EXIST| 0 | 0x0381 | "Database already exists"| -2147482751|
|
||||||
|TSDB_CODE_MND_INVALID_DB_OPTION| 0 | 0x0382 | "Invalid database options"| -2147482750|
|
|TSDB_CODE_MND_INVALID_DB_OPTION| 0 | 0x0382 | "Invalid database options"| -2147482750|
|
||||||
|TSDB_CODE_MND_INVALID_DB| |0 | 0x0383 | "Invalid database name"| -2147482749|
|
|TSDB_CODE_MND_INVALID_DB| 0 | 0x0383 | "Invalid database name"| -2147482749|
|
||||||
|TSDB_CODE_MND_MONITOR_DB_FORBIDDEN| 0 | 0x0384 | "Cannot delete monitor database"| -2147482748|
|
|TSDB_CODE_MND_MONITOR_DB_FORBIDDEN| 0 | 0x0384 | "Cannot delete monitor database"| -2147482748|
|
||||||
|TSDB_CODE_MND_TOO_MANY_DATABASES| 0| 0x0385 | "Too many databases for account"| -2147482747|
|
|TSDB_CODE_MND_TOO_MANY_DATABASES| 0| 0x0385 | "Too many databases for account"| -2147482747|
|
||||||
|TSDB_CODE_MND_DB_IN_DROPPING| 0 | 0x0386| "Database not available" |-2147482746|
|
|TSDB_CODE_MND_DB_IN_DROPPING| 0 | 0x0386| "Database not available" |-2147482746|
|
||||||
|
|
|
@ -67,7 +67,7 @@ TDengine 分布式架构的逻辑结构图如下:
|
||||||
<center> 图 1 TDengine架构示意图 </center>
|
<center> 图 1 TDengine架构示意图 </center>
|
||||||
一个完整的 TDengine 系统是运行在一到多个物理节点上的,逻辑上,它包含数据节点(dnode)、TDengine客户端(taosc)以及应用(app)。系统中存在一到多个数据节点,这些数据节点组成一个集群(cluster)。应用通过taosc的API与TDengine集群进行互动。下面对每个逻辑单元进行简要介绍。
|
一个完整的 TDengine 系统是运行在一到多个物理节点上的,逻辑上,它包含数据节点(dnode)、TDengine客户端(taosc)以及应用(app)。系统中存在一到多个数据节点,这些数据节点组成一个集群(cluster)。应用通过taosc的API与TDengine集群进行互动。下面对每个逻辑单元进行简要介绍。
|
||||||
|
|
||||||
**物理节点(pnode):** pnode是一独立运行、拥有自己的计算、存储和网络能力的计算机,可以是安装有OS的物理机、虚拟机或容器。物理节点由其配置的 FQDN(Fully Qualified Domain Name)来标识。
|
**物理节点(pnode):** pnode是一独立运行、拥有自己的计算、存储和网络能力的计算机,可以是安装有OS的物理机、虚拟机或容器。物理节点由其配置的 FQDN(Fully Qualified Domain Name)来标识。TDengine完全依赖FQDN来进行网络通讯,如果不了解FQDN,请看博文《[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)》。
|
||||||
|
|
||||||
**数据节点(dnode):** dnode 是 TDengine 服务器侧执行代码 taosd 在物理节点上的一个运行实例,一个工作的系统必须有至少一个数据节点。dnode包含零到多个逻辑的虚拟节点(VNODE),零或者至多一个逻辑的管理节点(mnode)。dnode在系统中的唯一标识由实例的End Point (EP )决定。EP是dnode所在物理节点的FQDN (Fully Qualified Domain Name)和系统所配置的网络端口号(Port)的组合。通过配置不同的端口,一个物理节点(一台物理机、虚拟机或容器)可以运行多个实例,或有多个数据节点。
|
**数据节点(dnode):** dnode 是 TDengine 服务器侧执行代码 taosd 在物理节点上的一个运行实例,一个工作的系统必须有至少一个数据节点。dnode包含零到多个逻辑的虚拟节点(VNODE),零或者至多一个逻辑的管理节点(mnode)。dnode在系统中的唯一标识由实例的End Point (EP )决定。EP是dnode所在物理节点的FQDN (Fully Qualified Domain Name)和系统所配置的网络端口号(Port)的组合。通过配置不同的端口,一个物理节点(一台物理机、虚拟机或容器)可以运行多个实例,或有多个数据节点。
|
||||||
|
|
||||||
|
|
|
@ -1,48 +1,70 @@
|
||||||
# TDengine 集群安装、管理
|
# TDengine 集群安装、管理
|
||||||
|
|
||||||
多个taosd的运行实例可以组成一个集群,以保证TDengine的高可靠运行,并提供水平扩展能力。要了解TDengine 2.0的集群管理,需要对集群的基本概念有所了解,请看TDengine 2.0整体架构一章。而且在安装集群之前,请按照[《立即开始》](https://www.taosdata.com/cn/getting-started20/)一章安装并体验过单节点功能。
|
多个TDengine服务器,也就是多个taosd的运行实例可以组成一个集群,以保证TDengine的高可靠运行,并提供水平扩展能力。要了解TDengine 2.0的集群管理,需要对集群的基本概念有所了解,请看TDengine 2.0整体架构一章。而且在安装集群之前,先请按照[《立即开始》](https://www.taosdata.com/cn/getting-started20/)一章安装并体验单节点功能。
|
||||||
|
|
||||||
集群的每个节点是由End Point来唯一标识的,End Point是由FQDN(Fully Qualified Domain Name)外加Port组成,比如 h1.taosdata.com:6030。一般FQDN就是服务器的hostname,可通过Linux命令`hostname -f`获取,FQDN配置参考:[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。端口是这个节点对外服务的端口号,缺省是6030,但可以通过taos.cfg里配置参数serverPort进行修改。一个节点可能配置了多个hostname, TDengine会自动获取第一个,但也可以通过taos.cfg里配置参数fqdn进行指定。如果习惯IP地址直接访问,可以将参数fqdn设置为本节点的IP地址。
|
集群的每个数据节点是由End Point来唯一标识的,End Point是由FQDN(Fully Qualified Domain Name)外加Port组成,比如 h1.taosdata.com:6030。一般FQDN就是服务器的hostname,可通过Linux命令`hostname -f`获取(如何配置FQDN,请参考:[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html))。端口是这个数据节点对外服务的端口号,缺省是6030,但可以通过taos.cfg里配置参数serverPort进行修改。一个物理节点可能配置了多个hostname, TDengine会自动获取第一个,但也可以通过taos.cfg里配置参数fqdn进行指定。如果习惯IP地址直接访问,可以将参数fqdn设置为本节点的IP地址。
|
||||||
|
|
||||||
TDengine的集群管理极其简单,除添加和删除节点需要人工干预之外,其他全部是自动完成,最大程度的降低了运维的工作量。本章对集群管理的操作做详细的描述。
|
TDengine的集群管理极其简单,除添加和删除节点需要人工干预之外,其他全部是自动完成,最大程度的降低了运维的工作量。本章对集群管理的操作做详细的描述。
|
||||||
|
|
||||||
## 准备工作
|
## 准备工作
|
||||||
|
|
||||||
**第一步**:如果搭建集群的节点中,存有之前的测试数据、装过1.X的版本,或者装过其他版本的TDengine,请先将其删除,并清空所有数据,具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html )
|
**第零步**:如果没有部署DNS服务,请规划集群所有物理节点的FQDN,然后按照《[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)》里的步骤,将所有集群物理节点的IP与FQDN的对应关系添加好。
|
||||||
|
|
||||||
|
**第一步**:如果搭建集群的物理节点中,存有之前的测试数据、装过1.X的版本,或者装过其他版本的TDengine,请先将其删除,并清空所有数据,具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html )
|
||||||
**注意1:**因为FQDN的信息会写进文件,如果之前没有配置或者更改FQDN,且启动了TDengine。请一定在确保数据无用或者备份的前提下,清理一下之前的数据(rm -rf /var/lib/taos/);
|
**注意1:**因为FQDN的信息会写进文件,如果之前没有配置或者更改FQDN,且启动了TDengine。请一定在确保数据无用或者备份的前提下,清理一下之前的数据(rm -rf /var/lib/taos/);
|
||||||
**注意2:**客户端也需要配置,确保它可以正确解析每个节点的FQDN配置,不管是通过DNS服务,还是 Host 文件。
|
**注意2:**客户端也需要配置,确保它可以正确解析每个节点的FQDN配置,不管是通过DNS服务,还是 Host 文件。
|
||||||
|
|
||||||
**第二步**:建议关闭防火墙,至少保证端口:6030 - 6042的TCP和UDP端口都是开放的。**强烈建议**先关闭防火墙,集群搭建完毕之后,再来配置端口;
|
**第二步**:建议关闭所有物理节点的防火墙,至少保证端口:6030 - 6042的TCP和UDP端口都是开放的。**强烈建议**先关闭防火墙,集群搭建完毕之后,再来配置端口;
|
||||||
|
|
||||||
**第三步**:在所有节点安装TDengine,且版本必须是一致的,**但不要启动taosd**;
|
**第三步**:在所有节点安装TDengine,且版本必须是一致的,**但不要启动taosd**。安装时,提示输入是否要加入一个已经存在的TDengine集群时,第一个物理节点直接回车创建新集群,后续物理节点则输入该集群任何一个在线的物理节点的FQDN:端口号(默认6030);
|
||||||
|
|
||||||
**第四步**:检查、配置所有节点的FQDN:
|
**第四步**:检查所有数据节点,以及应用所在物理节点的网络设置:
|
||||||
|
|
||||||
1. 每个节点上执行命令`hostname -f`,查看和确认所有节点的hostname是不相同的;
|
1. 每个物理节点上执行命令`hostname -f`,查看和确认所有节点的hostname是不相同的(应用驱动所在节点无需做此项检查);
|
||||||
2. 每个节点上执行`ping host`, 其中host是其他节点的hostname, 看能否ping通其它节点; 如果不能ping通,需要检查网络设置, 或/etc/hosts文件,或DNS的配置。如果无法ping通,是无法组成集群的。
|
2. 每个物理节点上执行`ping host`, 其中host是其他物理节点的hostname, 看能否ping通其它物理节点; 如果不能ping通,需要检查网络设置, 或/etc/hosts文件(Windows系统默认路径为C:\Windows\system32\drivers\etc\hosts),或DNS的配置。如果无法ping通,是无法组成集群的;
|
||||||
3. 每个节点的FQDN就是输出的hostname外加端口号,比如h1.taosdata.com:6030
|
3. 从应用运行的物理节点,ping taosd运行的数据节点,如果无法ping通,应用是无法连接taosd的,请检查应用所在物理节点的DNS设置或hosts文件;
|
||||||
|
4. 每个数据节点的End Point就是输出的hostname外加端口号,比如h1.taosdata.com:6030
|
||||||
|
|
||||||
**第五步**:修改TDengine的配置文件(所有节点的文件/etc/taos/taos.cfg都需要修改)。假设准备启动的第一个节点End Point为 h1.taosdata.com:6030, 那么以下几个参数与集群相关:
|
**第五步**:修改TDengine的配置文件(所有节点的文件/etc/taos/taos.cfg都需要修改)。假设准备启动的第一个数据节点End Point为 h1.taosdata.com:6030, 其与集群配置相关参数如下:
|
||||||
|
|
||||||
```
|
```
|
||||||
// firstEp 集群中所有节点的配置都是一致的,对其第一次访问后,就获得了整个集群的信息
|
// firstEp 是每个数据节点首次启动后连接的第一个数据节点
|
||||||
firstEp h1.taosdata.com:6030
|
firstEp h1.taosdata.com:6030
|
||||||
|
|
||||||
// 配置本节点的FQDN,如果本机只有一个hostname, 无需配置
|
// 配置本数据节点的FQDN,如果本机只有一个hostname, 无需配置
|
||||||
fqdn h1.taosdata.com
|
fqdn h1.taosdata.com
|
||||||
|
|
||||||
// 配置本节点的端口号,缺省是6030
|
// 配置本数据节点的端口号,缺省是6030
|
||||||
serverPort 6030
|
serverPort 6030
|
||||||
|
|
||||||
// 服务端节点数为偶数的时候,需要配置,请参考《Arbitrator的使用》的部分
|
// 服务端节点数为偶数的时候,需要配置,请参考《Arbitrator的使用》的部分
|
||||||
arbitrator ha.taosdata.com:6042
|
arbitrator ha.taosdata.com:6042
|
||||||
```
|
```
|
||||||
|
|
||||||
一定要修改的参数是firstEp, 其他参数可不做任何修改,除非你很清楚为什么要修改。
|
一定要修改的参数是firstEp和fqdn, 其他参数可不做任何修改,除非你很清楚为什么要修改。
|
||||||
|
|
||||||
## 启动第一个节点
|
**加入到集群中的数据节点dnode,涉及集群相关的下表11项参数必须完全相同,否则不能成功加入到集群中。**
|
||||||
|
|
||||||
|
| **#** | **配置参数名称** | **含义** |
|
||||||
|
| ----- | ------------------ | ---------------------------------------- |
|
||||||
|
| 1 | numOfMnodes | 系统中管理节点个数 |
|
||||||
|
| 2 | mnodeEqualVnodeNum | 一个mnode等同于vnode消耗的个数 |
|
||||||
|
| 3 | offlineThreshold | dnode离线阈值,超过该时间将导致Dnode离线 |
|
||||||
|
| 4 | statusInterval | dnode向mnode报告状态时长 |
|
||||||
|
| 5 | arbitrator | 系统中裁决器的end point |
|
||||||
|
| 6 | timezone | 时区 |
|
||||||
|
| 7 | locale | 系统区位信息及编码格式 |
|
||||||
|
| 8 | charset | 字符集编码 |
|
||||||
|
| 9 | balance | 是否启动负载均衡 |
|
||||||
|
| 10 | maxTablesPerVnode | 每个vnode中能够创建的最大表个数 |
|
||||||
|
| 11 | maxVgroupsPerDb | 每个DB中 能够使用的最大vnode个数 |
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## 启动第一个数据节点
|
||||||
|
|
||||||
|
按照[《立即开始》](https://www.taosdata.com/cn/getting-started20/)里的指示,启动第一个数据节点,例如h1.taosdata.com,然后执行taos, 启动taos shell,从shell里执行命令"show dnodes;",如下所示:
|
||||||
|
|
||||||
按照[《立即开始》](https://www.taosdata.com/cn/getting-started20/)里的指示,启动第一个节点h1.taosdata.com,然后执行taos, 启动taos shell,从shell里执行命令"show dnodes;",如下所示:
|
|
||||||
```
|
```
|
||||||
Welcome to the TDengine shell from Linux, Client Version:2.0.0.0
|
Welcome to the TDengine shell from Linux, Client Version:2.0.0.0
|
||||||
Copyright (c) 2017 by TAOS Data, Inc. All rights reserved.
|
Copyright (c) 2017 by TAOS Data, Inc. All rights reserved.
|
||||||
|
@ -55,21 +77,22 @@ Query OK, 1 row(s) in set (0.006385s)
|
||||||
|
|
||||||
taos>
|
taos>
|
||||||
```
|
```
|
||||||
上述命令里,可以看到这个刚启动的这个节点的End Point是:h1.taos.com:6030
|
|
||||||
|
|
||||||
## 启动后续节点
|
上述命令里,可以看到这个刚启动的这个数据节点的End Point是:h1.taos.com:6030,就是这个新集群的firstEP。
|
||||||
|
|
||||||
将后续的节点添加到现有集群,具体有以下几步:
|
## 启动后续数据节点
|
||||||
|
|
||||||
1. 按照["立即开始“](https://www.taosdata.com/cn/getting-started/)一章的方法在每个节点启动taosd。
|
将后续的数据节点添加到现有集群,具体有以下几步:
|
||||||
|
|
||||||
2. 在第一个节点,使用CLI程序taos, 登录进TDengine系统, 执行命令:
|
1. 按照["立即开始“](https://www.taosdata.com/cn/getting-started/)一章的方法在每个物理节点启动taosd;
|
||||||
|
|
||||||
|
2. 在第一个数据节点,使用CLI程序taos, 登录进TDengine系统, 执行命令:
|
||||||
|
|
||||||
```
|
```
|
||||||
CREATE DNODE "h2.taos.com:6030";
|
CREATE DNODE "h2.taos.com:6030";
|
||||||
```
|
```
|
||||||
|
|
||||||
将新节点的End Point (准备工作中第四步获知的) 添加进集群的EP列表。**"fqdn:port"需要用双引号引起来**,否则出错。请注意将示例的“h2.taos.com:6030" 替换为这个新节点的End Point。
|
将新数据节点的End Point (准备工作中第四步获知的) 添加进集群的EP列表。**"fqdn:port"需要用双引号引起来**,否则出错。请注意将示例的“h2.taos.com:6030" 替换为这个新数据节点的End Point。
|
||||||
|
|
||||||
3. 然后执行命令
|
3. 然后执行命令
|
||||||
|
|
||||||
|
@ -77,52 +100,65 @@ taos>
|
||||||
SHOW DNODES;
|
SHOW DNODES;
|
||||||
```
|
```
|
||||||
|
|
||||||
查看新节点是否被成功加入。如果该被加入的节点处于离线状态,请做两个检查
|
查看新节点是否被成功加入。如果该被加入的数据节点处于离线状态,请做两个检查
|
||||||
|
|
||||||
- 查看该节点的taosd是否正常工作,如果没有正常运行,需要先检查为什么
|
- 查看该数据节点的taosd是否正常工作,如果没有正常运行,需要先检查为什么
|
||||||
- 查看该节点taosd日志文件taosdlog.0里前面几行日志(一般在/var/log/taos目录),看日志里输出的该节点fqdn以及端口号是否为刚添加的End Point。如果不一致,需要将正确的End Point添加进去。
|
- 查看该数据节点taosd日志文件taosdlog.0里前面几行日志(一般在/var/log/taos目录),看日志里输出的该数据节点fqdn以及端口号是否为刚添加的End Point。如果不一致,需要将正确的End Point添加进去。
|
||||||
|
|
||||||
按照上述步骤可以源源不断的将新的节点加入到集群。
|
按照上述步骤可以源源不断的将新的数据节点加入到集群。
|
||||||
|
|
||||||
**提示:**
|
**提示:**
|
||||||
|
|
||||||
- firstEp这个参数仅仅在该节点第一次加入集群时有作用,加入集群后,该节点会保存最新的mnode的End Point列表,不再依赖这两个参数。
|
- 任何已经加入集群在线的数据节点,都可以作为后续待加入节点的firstEP。
|
||||||
- 两个没有配置firstEp参数的dnode启动后,会独立运行起来。这个时候,无法将其中一个节点加入到另外一个节点,形成集群。**无法将两个独立的集群合并成为新的集群**。
|
- firstEp这个参数仅仅在该数据节点首次加入集群时有作用,加入集群后,该数据节点会保存最新的mnode的End Point列表,不再依赖这个参数。
|
||||||
|
- 两个没有配置firstEp参数的数据节点dnode启动后,会独立运行起来。这个时候,无法将其中一个数据节点加入到另外一个数据节点,形成集群。**无法将两个独立的集群合并成为新的集群**。
|
||||||
|
|
||||||
## 节点管理
|
## 数据节点管理
|
||||||
|
|
||||||
|
上面已经介绍如何从零开始搭建集群。集群组建完后,还可以随时添加新的数据节点进行扩容,或删除数据节点,并检查集群当前状态。
|
||||||
|
|
||||||
|
### 添加数据节点
|
||||||
|
|
||||||
### 添加节点
|
|
||||||
执行CLI程序taos, 使用root账号登录进系统, 执行:
|
执行CLI程序taos, 使用root账号登录进系统, 执行:
|
||||||
|
|
||||||
```
|
```
|
||||||
CREATE DNODE "fqdn:port";
|
CREATE DNODE "fqdn:port";
|
||||||
```
|
```
|
||||||
将新节点的End Point添加进集群的EP列表。**"fqdn:port"需要用双引号引起来**,否则出错。一个节点对外服务的fqdn和port可以通过配置文件taos.cfg进行配置,缺省是自动获取。
|
|
||||||
|
|
||||||
### 删除节点
|
将新数据节点的End Point添加进集群的EP列表。**"fqdn:port"需要用双引号引起来**,否则出错。一个数据节点对外服务的fqdn和port可以通过配置文件taos.cfg进行配置,缺省是自动获取。【强烈不建议用自动获取方式来配置FQDN,可能导致生成的数据节点的End Point不是所期望的】
|
||||||
|
|
||||||
|
### 删除数据节点
|
||||||
|
|
||||||
执行CLI程序taos, 使用root账号登录进TDengine系统,执行:
|
执行CLI程序taos, 使用root账号登录进TDengine系统,执行:
|
||||||
|
|
||||||
```
|
```
|
||||||
DROP DNODE "fqdn:port";
|
DROP DNODE "fqdn:port";
|
||||||
```
|
```
|
||||||
|
|
||||||
其中fqdn是被删除的节点的FQDN,port是其对外服务器的端口号
|
其中fqdn是被删除的节点的FQDN,port是其对外服务器的端口号
|
||||||
|
|
||||||
### 查看节点
|
### 查看数据节点
|
||||||
|
|
||||||
执行CLI程序taos,使用root账号登录进TDengine系统,执行:
|
执行CLI程序taos,使用root账号登录进TDengine系统,执行:
|
||||||
|
|
||||||
```
|
```
|
||||||
SHOW DNODES;
|
SHOW DNODES;
|
||||||
```
|
```
|
||||||
它将列出集群中所有的dnode,每个dnode的fqdn:port, 状态(ready, offline等),vnode数目,还未使用的vnode数目等信息。在添加或删除一个节点后,可以使用该命令查看。
|
|
||||||
|
它将列出集群中所有的dnode,每个dnode的fqdn:port, 状态(ready, offline等),vnode数目,还未使用的vnode数目等信息。在添加或删除一个数据节点后,可以使用该命令查看。
|
||||||
|
|
||||||
### 查看虚拟节点组
|
### 查看虚拟节点组
|
||||||
|
|
||||||
为充分利用多核技术,并提供scalability,数据需要分片处理。因此TDengine会将一个DB的数据切分成多份,存放在多个vnode里。这些vnode可能分布在多个dnode里,这样就实现了水平扩展。一个vnode仅仅属于一个DB,但一个DB可以有多个vnode。vnode的是mnode根据当前系统资源的情况,自动进行分配的,无需任何人工干预。
|
为充分利用多核技术,并提供scalability,数据需要分片处理。因此TDengine会将一个DB的数据切分成多份,存放在多个vnode里。这些vnode可能分布在多个数据节点dnode里,这样就实现了水平扩展。一个vnode仅仅属于一个DB,但一个DB可以有多个vnode。vnode的是mnode根据当前系统资源的情况,自动进行分配的,无需任何人工干预。
|
||||||
|
|
||||||
执行CLI程序taos,使用root账号登录进TDengine系统,执行:
|
执行CLI程序taos,使用root账号登录进TDengine系统,执行:
|
||||||
|
|
||||||
```
|
```
|
||||||
SHOW VGROUPS;
|
SHOW VGROUPS;
|
||||||
```
|
```
|
||||||
|
|
||||||
## vnode的高可用性
|
## vnode的高可用性
|
||||||
|
|
||||||
TDengine通过多副本的机制来提供系统的高可用性,包括vnode和mnode的高可用性。
|
TDengine通过多副本的机制来提供系统的高可用性,包括vnode和mnode的高可用性。
|
||||||
|
|
||||||
vnode的副本数是与DB关联的,一个集群里可以有多个DB,根据运营的需求,每个DB可以配置不同的副本数。创建数据库时,通过参数replica 指定副本数(缺省为1)。如果副本数为1,系统的可靠性无法保证,只要数据所在的节点宕机,就将无法提供服务。集群的节点数必须大于等于副本数,否则创建表时将返回错误“more dnodes are needed"。比如下面的命令将创建副本数为3的数据库demo:
|
vnode的副本数是与DB关联的,一个集群里可以有多个DB,根据运营的需求,每个DB可以配置不同的副本数。创建数据库时,通过参数replica 指定副本数(缺省为1)。如果副本数为1,系统的可靠性无法保证,只要数据所在的节点宕机,就将无法提供服务。集群的节点数必须大于等于副本数,否则创建表时将返回错误“more dnodes are needed"。比如下面的命令将创建副本数为3的数据库demo:
|
||||||
|
@ -130,21 +166,25 @@ vnode的副本数是与DB关联的,一个集群里可以有多个DB,根据
|
||||||
```
|
```
|
||||||
CREATE DATABASE demo replica 3;
|
CREATE DATABASE demo replica 3;
|
||||||
```
|
```
|
||||||
一个DB里的数据会被切片分到多个vnode group,vnode group里的vnode数目就是DB的副本数,同一个vnode group里各vnode的数据是完全一致的。为保证高可用性,vnode group里的vnode一定要分布在不同的dnode里(实际部署时,需要在不同的物理机上),只要一个vgroup里超过半数的vnode处于工作状态,这个vgroup就能正常的对外服务。
|
|
||||||
|
|
||||||
一个dnode里可能有多个DB的数据,因此一个dnode离线时,可能会影响到多个DB。如果一个vnode group里的一半或一半以上的vnode不工作,那么该vnode group就无法对外服务,无法插入或读取数据,这样会影响到它所属的DB的一部分表的读写操作。
|
一个DB里的数据会被切片分到多个vnode group,vnode group里的vnode数目就是DB的副本数,同一个vnode group里各vnode的数据是完全一致的。为保证高可用性,vnode group里的vnode一定要分布在不同的数据节点dnode里(实际部署时,需要在不同的物理机上),只要一个vgroup里超过半数的vnode处于工作状态,这个vgroup就能正常的对外服务。
|
||||||
|
|
||||||
因为vnode的引入,无法简单的给出结论:“集群中过半dnode工作,集群就应该工作”。但是对于简单的情形,很好下结论。比如副本数为3,只有三个dnode,那如果仅有一个节点不工作,整个集群还是可以正常工作的,但如果有两个节点不工作,那整个集群就无法正常工作了。
|
一个数据节点dnode里可能有多个DB的数据,因此一个dnode离线时,可能会影响到多个DB。如果一个vnode group里的一半或一半以上的vnode不工作,那么该vnode group就无法对外服务,无法插入或读取数据,这样会影响到它所属的DB的一部分表的读写操作。
|
||||||
|
|
||||||
|
因为vnode的引入,无法简单的给出结论:“集群中过半数据节点dnode工作,集群就应该工作”。但是对于简单的情形,很好下结论。比如副本数为3,只有三个dnode,那如果仅有一个节点不工作,整个集群还是可以正常工作的,但如果有两个数据节点不工作,那整个集群就无法正常工作了。
|
||||||
|
|
||||||
## Mnode的高可用性
|
## Mnode的高可用性
|
||||||
TDengine集群是由mnode (taosd的一个模块,逻辑节点) 负责管理的,为保证mnode的高可用,可以配置多个mnode副本,副本数由系统配置参数numOfMnodes决定,有效范围为1-3。为保证元数据的强一致性,mnode副本之间是通过同步的方式进行数据复制的。
|
|
||||||
|
|
||||||
一个集群有多个dnode, 但一个dnode至多运行一个mnode实例。多个dnode情况下,哪个dnode可以作为mnode呢?这是完全由系统根据整个系统资源情况,自动指定的。用户可通过CLI程序taos,在TDengine的console里,执行如下命令:
|
TDengine集群是由mnode (taosd的一个模块,管理节点) 负责管理的,为保证mnode的高可用,可以配置多个mnode副本,副本数由系统配置参数numOfMnodes决定,有效范围为1-3。为保证元数据的强一致性,mnode副本之间是通过同步的方式进行数据复制的。
|
||||||
|
|
||||||
|
一个集群有多个数据节点dnode, 但一个dnode至多运行一个mnode实例。多个dnode情况下,哪个dnode可以作为mnode呢?这是完全由系统根据整个系统资源情况,自动指定的。用户可通过CLI程序taos,在TDengine的console里,执行如下命令:
|
||||||
|
|
||||||
```
|
```
|
||||||
SHOW MNODES;
|
SHOW MNODES;
|
||||||
```
|
```
|
||||||
|
|
||||||
来查看mnode列表,该列表将列出mnode所处的dnode的End Point和角色(master, slave, unsynced 或offline)。
|
来查看mnode列表,该列表将列出mnode所处的dnode的End Point和角色(master, slave, unsynced 或offline)。
|
||||||
当集群中第一个节点启动时,该节点一定会运行一个mnode实例,否则该dnode无法正常工作,因为一个系统是必须有至少一个mnode的。如果numOfMnodes配置为2,启动第二个dnode时,该dnode也将运行一个mnode实例。
|
当集群中第一个数据节点启动时,该数据节点一定会运行一个mnode实例,否则该数据节点dnode无法正常工作,因为一个系统是必须有至少一个mnode的。如果numOfMnodes配置为2,启动第二个dnode时,该dnode也将运行一个mnode实例。
|
||||||
|
|
||||||
为保证mnode服务的高可用性,numOfMnodes必须设置为2或更大。因为mnode保存的元数据必须是强一致的,如果numOfMnodes大于2,复制参数quorum自动设为2,也就是说,至少要保证有两个副本写入数据成功,才通知客户端应用写入成功。
|
为保证mnode服务的高可用性,numOfMnodes必须设置为2或更大。因为mnode保存的元数据必须是强一致的,如果numOfMnodes大于2,复制参数quorum自动设为2,也就是说,至少要保证有两个副本写入数据成功,才通知客户端应用写入成功。
|
||||||
|
|
||||||
|
@ -154,22 +194,25 @@ SHOW MNODES;
|
||||||
|
|
||||||
有三种情况,将触发负载均衡,而且都无需人工干预。
|
有三种情况,将触发负载均衡,而且都无需人工干预。
|
||||||
|
|
||||||
- 当一个新节点添加进集群时,系统将自动触发负载均衡,一些节点上的数据将被自动转移到新节点上,无需任何人工干预。
|
- 当一个新数据节点添加进集群时,系统将自动触发负载均衡,一些节点上的数据将被自动转移到新数据节点上,无需任何人工干预。
|
||||||
- 当一个节点从集群中移除时,系统将自动把该节点上的数据转移到其他节点,无需任何人工干预。
|
- 当一个数据节点从集群中移除时,系统将自动把该数据节点上的数据转移到其他数据节点,无需任何人工干预。
|
||||||
- 如果一个节点过热(数据量过大),系统将自动进行负载均衡,将该节点的一些vnode自动挪到其他节点。
|
- 如果一个数据节点过热(数据量过大),系统将自动进行负载均衡,将该数据节点的一些vnode自动挪到其他节点。
|
||||||
|
|
||||||
当上述三种情况发生时,系统将启动一各个节点的负载计算,从而决定如何挪动。
|
当上述三种情况发生时,系统将启动一各个数据节点的负载计算,从而决定如何挪动。
|
||||||
|
|
||||||
##节点离线处理
|
**【提示】负载均衡由参数balance控制,它决定是否启动自动负载均衡。**
|
||||||
如果一个节点离线,TDengine集群将自动检测到。有如下两种情况:
|
|
||||||
- 改节点离线超过一定时间(taos.cfg里配置参数offlineThreshold控制时长),系统将自动把该节点删除,产生系统报警信息,触发负载均衡流程。如果该被删除的节点重现上线时,它将无法加入集群,需要系统管理员重新将其添加进集群才会开始工作。
|
## 数据节点离线处理
|
||||||
|
|
||||||
|
如果一个数据节点离线,TDengine集群将自动检测到。有如下两种情况:
|
||||||
|
|
||||||
|
- 该数据节点离线超过一定时间(taos.cfg里配置参数offlineThreshold控制时长),系统将自动把该数据节点删除,产生系统报警信息,触发负载均衡流程。如果该被删除的数据节点重现上线时,它将无法加入集群,需要系统管理员重新将其添加进集群才会开始工作。
|
||||||
- 离线后,在offlineThreshold的时长内重新上线,系统将自动启动数据恢复流程,等数据完全恢复后,该节点将开始正常工作。
|
- 离线后,在offlineThreshold的时长内重新上线,系统将自动启动数据恢复流程,等数据完全恢复后,该节点将开始正常工作。
|
||||||
|
|
||||||
**注意:**如果一个虚拟节点组(包括mnode组)里每个节点都处于离线或unsynced状态,必须等该虚拟节点组里的所有节点都上线、都能交换状态信息后,才能选出Master,该虚拟节点组才能对外提供服务。比如整个集群有3个节点,副本数为3,如果3个节点都宕机,然后2个节点重启,是无法工作的,只有等3个节点都重启成功,才能对外服务。
|
**注意:**如果一个虚拟节点组(包括mnode组)里所归属的每个数据节点都处于离线或unsynced状态,必须等该虚拟节点组里的所有数据节点都上线、都能交换状态信息后,才能选出Master,该虚拟节点组才能对外提供服务。比如整个集群有3个数据节点,副本数为3,如果3个数据节点都宕机,然后2个数据节点重启,是无法工作的,只有等3个数据节点都重启成功,才能对外服务。
|
||||||
|
|
||||||
## Arbitrator的使用
|
## Arbitrator的使用
|
||||||
|
|
||||||
如果副本数为偶数,当一个vnode group里一半或超过一半的vnode不工作时,是无法从中选出master的。同理,一半或超过一半的mnode不工作时,是无法选出mnode的master的,因为存在“split brain”问题。为解决这个问题,TDengine引入了arbitrator的概念。Arbitrator模拟一个vnode或mnode在工作,但只简单的负责网络连接,不处理任何数据插入或访问。只要包含arbitrator在内,超过半数的vnode或mnode工作,那么该vnode group或mnode组就可以正常的提供数据插入或查询服务。比如对于副本数为2的情形,如果一个节点A离线,但另外一个节点B正常,而且能连接到arbitrator, 那么节点B就能正常工作。
|
如果副本数为偶数,当一个vnode group里一半或超过一半的vnode不工作时,是无法从中选出master的。同理,一半或超过一半的mnode不工作时,是无法选出mnode的master的,因为存在“split brain”问题。为解决这个问题,TDengine引入了arbitrator的概念。Arbitrator模拟一个vnode或mnode在工作,但只简单的负责网络连接,不处理任何数据插入或访问。只要包含arbitrator在内,超过半数的vnode或mnode工作,那么该vnode group或mnode组就可以正常的提供数据插入或查询服务。比如对于副本数为2的情形,如果一个节点A离线,但另外一个节点B正常,而且能连接到arbitrator, 那么节点B就能正常工作。
|
||||||
|
|
||||||
TDengine安装包里带有一个执行程序tarbitrator, 找任何一台Linux服务器运行它即可。该程序对系统资源几乎没有要求,只需要保证有网络连接即可。该应用的命令行参数`-p`可以指定其对外服务的端口号,缺省是6042。配置每个taosd实例时,可以在配置文件taos.cfg里将参数arbitrator设置为arbitrator的End Point。如果该参数配置了,当副本数为偶数数,系统将自动连接配置的arbitrator。
|
TDengine安装包里带有一个执行程序tarbitrator, 找任何一台Linux服务器运行它即可。该程序对系统资源几乎没有要求,只需要保证有网络连接即可。该应用的命令行参数`-p`可以指定其对外服务的端口号,缺省是6042。配置每个taosd实例时,可以在配置文件taos.cfg里将参数arbitrator设置为arbitrator的End Point。如果该参数配置了,当副本数为偶数数,系统将自动连接配置的arbitrator。
|
||||||
|
|
||||||
|
|
|
@ -69,17 +69,13 @@ typedef struct SJoinSupporter {
|
||||||
SSubqueryState* pState;
|
SSubqueryState* pState;
|
||||||
SSqlObj* pObj; // parent SqlObj
|
SSqlObj* pObj; // parent SqlObj
|
||||||
int32_t subqueryIndex; // index of sub query
|
int32_t subqueryIndex; // index of sub query
|
||||||
char intervalTimeUnit;
|
SInterval interval;
|
||||||
char slidingTimeUnit;
|
|
||||||
int64_t intervalTime; // interval time
|
|
||||||
int64_t slidingTime; // sliding time
|
|
||||||
SLimitVal limit; // limit info
|
SLimitVal limit; // limit info
|
||||||
uint64_t uid; // query meter uid
|
uint64_t uid; // query table uid
|
||||||
SArray* colList; // previous query information, no need to use this attribute, and the corresponding attribution
|
SArray* colList; // previous query information, no need to use this attribute, and the corresponding attribution
|
||||||
SArray* exprList;
|
SArray* exprList;
|
||||||
SFieldInfo fieldsInfo;
|
SFieldInfo fieldsInfo;
|
||||||
STagCond tagCond;
|
STagCond tagCond;
|
||||||
SSqlGroupbyExpr groupbyExpr;
|
|
||||||
struct STSBuf* pTSBuf; // the TSBuf struct that holds the compressed timestamp array
|
struct STSBuf* pTSBuf; // the TSBuf struct that holds the compressed timestamp array
|
||||||
FILE* f; // temporary file in order to create TSBuf
|
FILE* f; // temporary file in order to create TSBuf
|
||||||
char path[PATH_MAX]; // temporary file path, todo dynamic allocate memory
|
char path[PATH_MAX]; // temporary file path, todo dynamic allocate memory
|
||||||
|
|
|
@ -29,6 +29,7 @@ extern "C" {
|
||||||
#include "tglobal.h"
|
#include "tglobal.h"
|
||||||
#include "tsqlfunction.h"
|
#include "tsqlfunction.h"
|
||||||
#include "tutil.h"
|
#include "tutil.h"
|
||||||
|
#include "tcache.h"
|
||||||
|
|
||||||
#include "qExecutor.h"
|
#include "qExecutor.h"
|
||||||
#include "qSqlparser.h"
|
#include "qSqlparser.h"
|
||||||
|
@ -225,12 +226,8 @@ typedef struct SQueryInfo {
|
||||||
int16_t command; // the command may be different for each subclause, so keep it seperately.
|
int16_t command; // the command may be different for each subclause, so keep it seperately.
|
||||||
uint32_t type; // query/insert type
|
uint32_t type; // query/insert type
|
||||||
// TODO refactor
|
// TODO refactor
|
||||||
char intervalTimeUnit;
|
|
||||||
char slidingTimeUnit;
|
|
||||||
STimeWindow window; // query time window
|
STimeWindow window; // query time window
|
||||||
int64_t intervalTime; // aggregation time window range
|
SInterval interval;
|
||||||
int64_t slidingTime; // sliding window in mseconds
|
|
||||||
int64_t intervalOffset;// start offset of each time window
|
|
||||||
int32_t tz; // query client timezone
|
int32_t tz; // query client timezone
|
||||||
|
|
||||||
SSqlGroupbyExpr groupbyExpr; // group by tags info
|
SSqlGroupbyExpr groupbyExpr; // group by tags info
|
||||||
|
@ -333,6 +330,7 @@ typedef struct STscObj {
|
||||||
struct SSqlStream *streamList;
|
struct SSqlStream *streamList;
|
||||||
void* pDnodeConn;
|
void* pDnodeConn;
|
||||||
pthread_mutex_t mutex;
|
pthread_mutex_t mutex;
|
||||||
|
T_REF_DECLARE()
|
||||||
} STscObj;
|
} STscObj;
|
||||||
|
|
||||||
typedef struct SSqlObj {
|
typedef struct SSqlObj {
|
||||||
|
@ -359,6 +357,8 @@ typedef struct SSqlObj {
|
||||||
uint16_t numOfSubs;
|
uint16_t numOfSubs;
|
||||||
struct SSqlObj **pSubs;
|
struct SSqlObj **pSubs;
|
||||||
struct SSqlObj * prev, *next;
|
struct SSqlObj * prev, *next;
|
||||||
|
|
||||||
|
struct SSqlObj **self;
|
||||||
} SSqlObj;
|
} SSqlObj;
|
||||||
|
|
||||||
typedef struct SSqlStream {
|
typedef struct SSqlStream {
|
||||||
|
@ -366,8 +366,6 @@ typedef struct SSqlStream {
|
||||||
uint32_t streamId;
|
uint32_t streamId;
|
||||||
char listed;
|
char listed;
|
||||||
bool isProject;
|
bool isProject;
|
||||||
char intervalTimeUnit;
|
|
||||||
char slidingTimeUnit;
|
|
||||||
int16_t precision;
|
int16_t precision;
|
||||||
int64_t num; // number of computing count
|
int64_t num; // number of computing count
|
||||||
|
|
||||||
|
@ -381,8 +379,7 @@ typedef struct SSqlStream {
|
||||||
int64_t ctime; // stream created time
|
int64_t ctime; // stream created time
|
||||||
int64_t stime; // stream next executed time
|
int64_t stime; // stream next executed time
|
||||||
int64_t etime; // stream end query time, when time is larger then etime, the stream will be closed
|
int64_t etime; // stream end query time, when time is larger then etime, the stream will be closed
|
||||||
int64_t intervalTime;
|
SInterval interval;
|
||||||
int64_t slidingTime;
|
|
||||||
void * pTimer;
|
void * pTimer;
|
||||||
|
|
||||||
void (*fp)();
|
void (*fp)();
|
||||||
|
@ -413,7 +410,6 @@ int32_t tscTansformSQLFuncForSTableQuery(SQueryInfo *pQueryInfo);
|
||||||
void tscRestoreSQLFuncForSTableQuery(SQueryInfo *pQueryInfo);
|
void tscRestoreSQLFuncForSTableQuery(SQueryInfo *pQueryInfo);
|
||||||
|
|
||||||
int32_t tscCreateResPointerInfo(SSqlRes *pRes, SQueryInfo *pQueryInfo);
|
int32_t tscCreateResPointerInfo(SSqlRes *pRes, SQueryInfo *pQueryInfo);
|
||||||
void tscDestroyResPointerInfo(SSqlRes *pRes);
|
|
||||||
|
|
||||||
void tscResetSqlCmdObj(SSqlCmd *pCmd, bool removeFromCache);
|
void tscResetSqlCmdObj(SSqlCmd *pCmd, bool removeFromCache);
|
||||||
|
|
||||||
|
@ -425,17 +421,19 @@ void tscFreeSqlResult(SSqlObj *pSql);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* only free part of resources allocated during query.
|
* only free part of resources allocated during query.
|
||||||
|
* TODO remove it later
|
||||||
* Note: this function is multi-thread safe.
|
* Note: this function is multi-thread safe.
|
||||||
* @param pObj
|
* @param pObj
|
||||||
*/
|
*/
|
||||||
void tscPartiallyFreeSqlObj(SSqlObj *pObj);
|
void tscPartiallyFreeSqlObj(SSqlObj *pSql);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* free sql object, release allocated resource
|
* free sql object, release allocated resource
|
||||||
* @param pObj Free metric/meta information, dynamically allocated payload, and
|
* @param pObj
|
||||||
* response buffer, object itself
|
|
||||||
*/
|
*/
|
||||||
void tscFreeSqlObj(SSqlObj *pObj);
|
void tscFreeSqlObj(SSqlObj *pSql);
|
||||||
|
|
||||||
|
void tscFreeSqlObjInCache(void *pSql);
|
||||||
|
|
||||||
void tscCloseTscObj(STscObj *pObj);
|
void tscCloseTscObj(STscObj *pObj);
|
||||||
|
|
||||||
|
@ -451,9 +449,6 @@ void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen)
|
||||||
bool tscIsUpdateQuery(SSqlObj* pSql);
|
bool tscIsUpdateQuery(SSqlObj* pSql);
|
||||||
bool tscHasReachLimitation(SQueryInfo *pQueryInfo, SSqlRes *pRes);
|
bool tscHasReachLimitation(SQueryInfo *pQueryInfo, SSqlRes *pRes);
|
||||||
|
|
||||||
// todo remove this function.
|
|
||||||
bool tscResultsetFetchCompleted(TAOS_RES *result);
|
|
||||||
|
|
||||||
char *tscGetErrorMsgPayload(SSqlCmd *pCmd);
|
char *tscGetErrorMsgPayload(SSqlCmd *pCmd);
|
||||||
|
|
||||||
int32_t tscInvalidSQLErrMsg(char *msg, const char *additionalInfo, const char *sql);
|
int32_t tscInvalidSQLErrMsg(char *msg, const char *additionalInfo, const char *sql);
|
||||||
|
@ -468,7 +463,7 @@ static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pField
|
||||||
int32_t type = pInfo->pSqlExpr->resType;
|
int32_t type = pInfo->pSqlExpr->resType;
|
||||||
int32_t bytes = pInfo->pSqlExpr->resBytes;
|
int32_t bytes = pInfo->pSqlExpr->resBytes;
|
||||||
|
|
||||||
char* pData = pRes->data + pInfo->pSqlExpr->offset * pRes->numOfRows + bytes * pRes->row;
|
char* pData = pRes->data + (int32_t)(pInfo->pSqlExpr->offset * pRes->numOfRows + bytes * pRes->row);
|
||||||
|
|
||||||
// user defined constant value output columns
|
// user defined constant value output columns
|
||||||
if (TSDB_COL_IS_UD_COL(pInfo->pSqlExpr->colInfo.flag)) {
|
if (TSDB_COL_IS_UD_COL(pInfo->pSqlExpr->colInfo.flag)) {
|
||||||
|
@ -502,7 +497,8 @@ static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pField
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
extern void * tscCacheHandle;
|
extern SCacheObj* tscMetaCache;
|
||||||
|
extern SCacheObj* tscObjCache;
|
||||||
extern void * tscTmr;
|
extern void * tscTmr;
|
||||||
extern void * tscQhandle;
|
extern void * tscQhandle;
|
||||||
extern int tscKeepConn[];
|
extern int tscKeepConn[];
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
|
|
||||||
#include "tnote.h"
|
#include "tnote.h"
|
||||||
#include "trpc.h"
|
#include "trpc.h"
|
||||||
|
#include "tcache.h"
|
||||||
#include "tscLog.h"
|
#include "tscLog.h"
|
||||||
#include "tscSubquery.h"
|
#include "tscSubquery.h"
|
||||||
#include "tscLocalMerge.h"
|
#include "tscLocalMerge.h"
|
||||||
|
@ -40,6 +41,8 @@ static void tscAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOfRows);
|
||||||
static void tscAsyncFetchSingleRowProxy(void *param, TAOS_RES *tres, int numOfRows);
|
static void tscAsyncFetchSingleRowProxy(void *param, TAOS_RES *tres, int numOfRows);
|
||||||
|
|
||||||
void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, void (*fp)(), void* param, const char* sqlstr, size_t sqlLen) {
|
void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, void (*fp)(), void* param, const char* sqlstr, size_t sqlLen) {
|
||||||
|
SSqlCmd* pCmd = &pSql->cmd;
|
||||||
|
|
||||||
pSql->signature = pSql;
|
pSql->signature = pSql;
|
||||||
pSql->param = param;
|
pSql->param = param;
|
||||||
pSql->pTscObj = pObj;
|
pSql->pTscObj = pObj;
|
||||||
|
@ -48,6 +51,11 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, void (*fp)(), void* param, const
|
||||||
pSql->fp = fp;
|
pSql->fp = fp;
|
||||||
pSql->fetchFp = fp;
|
pSql->fetchFp = fp;
|
||||||
|
|
||||||
|
uint64_t handle = (uint64_t) pSql;
|
||||||
|
pSql->self = taosCachePut(tscObjCache, &handle, sizeof(uint64_t), &pSql, sizeof(uint64_t), 2*3600*1000);
|
||||||
|
|
||||||
|
T_REF_INC(pSql->pTscObj);
|
||||||
|
|
||||||
pSql->sqlstr = calloc(1, sqlLen + 1);
|
pSql->sqlstr = calloc(1, sqlLen + 1);
|
||||||
if (pSql->sqlstr == NULL) {
|
if (pSql->sqlstr == NULL) {
|
||||||
tscError("%p failed to malloc sql string buffer", pSql);
|
tscError("%p failed to malloc sql string buffer", pSql);
|
||||||
|
@ -59,7 +67,7 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, void (*fp)(), void* param, const
|
||||||
strntolower(pSql->sqlstr, sqlstr, (int32_t)sqlLen);
|
strntolower(pSql->sqlstr, sqlstr, (int32_t)sqlLen);
|
||||||
|
|
||||||
tscDebugL("%p SQL: %s", pSql, pSql->sqlstr);
|
tscDebugL("%p SQL: %s", pSql, pSql->sqlstr);
|
||||||
pSql->cmd.curSql = pSql->sqlstr;
|
pCmd->curSql = pSql->sqlstr;
|
||||||
|
|
||||||
int32_t code = tsParseSql(pSql, true);
|
int32_t code = tsParseSql(pSql, true);
|
||||||
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) return;
|
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) return;
|
||||||
|
|
|
@ -117,6 +117,10 @@ typedef struct SFirstLastInfo {
|
||||||
typedef struct SFirstLastInfo SLastrowInfo;
|
typedef struct SFirstLastInfo SLastrowInfo;
|
||||||
typedef struct SPercentileInfo {
|
typedef struct SPercentileInfo {
|
||||||
tMemBucket *pMemBucket;
|
tMemBucket *pMemBucket;
|
||||||
|
int32_t stage;
|
||||||
|
double minval;
|
||||||
|
double maxval;
|
||||||
|
int64_t numOfElems;
|
||||||
} SPercentileInfo;
|
} SPercentileInfo;
|
||||||
|
|
||||||
typedef struct STopBotInfo {
|
typedef struct STopBotInfo {
|
||||||
|
@ -302,7 +306,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
|
||||||
} else if (functionId == TSDB_FUNC_PERCT) {
|
} else if (functionId == TSDB_FUNC_PERCT) {
|
||||||
*type = (int16_t)TSDB_DATA_TYPE_DOUBLE;
|
*type = (int16_t)TSDB_DATA_TYPE_DOUBLE;
|
||||||
*bytes = (int16_t)sizeof(double);
|
*bytes = (int16_t)sizeof(double);
|
||||||
*interBytes = (int16_t)sizeof(double);
|
*interBytes = (int16_t)sizeof(SPercentileInfo);
|
||||||
} else if (functionId == TSDB_FUNC_LEASTSQR) {
|
} else if (functionId == TSDB_FUNC_LEASTSQR) {
|
||||||
*type = TSDB_DATA_TYPE_BINARY;
|
*type = TSDB_DATA_TYPE_BINARY;
|
||||||
*bytes = TSDB_AVG_FUNCTION_INTER_BUFFER_SIZE; // string
|
*bytes = TSDB_AVG_FUNCTION_INTER_BUFFER_SIZE; // string
|
||||||
|
@ -708,12 +712,15 @@ static int32_t firstDistFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY en
|
||||||
return BLK_DATA_ALL_NEEDED;
|
return BLK_DATA_ALL_NEEDED;
|
||||||
}
|
}
|
||||||
|
|
||||||
SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes);
|
|
||||||
if (pInfo->hasResult != DATA_SET_FLAG) {
|
|
||||||
return BLK_DATA_ALL_NEEDED;
|
return BLK_DATA_ALL_NEEDED;
|
||||||
} else { // data in current block is not earlier than current result
|
// TODO pCtx->aOutputBuf is the previous windowRes output buffer, not current unloaded block. so the following filter
|
||||||
return (pInfo->ts <= start) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED;
|
// is invalid
|
||||||
}
|
// SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes);
|
||||||
|
// if (pInfo->hasResult != DATA_SET_FLAG) {
|
||||||
|
// return BLK_DATA_ALL_NEEDED;
|
||||||
|
// } else { // data in current block is not earlier than current result
|
||||||
|
// return (pInfo->ts <= start) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED;
|
||||||
|
// }
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t lastDistFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) {
|
static int32_t lastDistFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) {
|
||||||
|
@ -726,12 +733,16 @@ static int32_t lastDistFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end
|
||||||
return BLK_DATA_ALL_NEEDED;
|
return BLK_DATA_ALL_NEEDED;
|
||||||
}
|
}
|
||||||
|
|
||||||
SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes);
|
|
||||||
if (pInfo->hasResult != DATA_SET_FLAG) {
|
|
||||||
return BLK_DATA_ALL_NEEDED;
|
return BLK_DATA_ALL_NEEDED;
|
||||||
} else {
|
// TODO pCtx->aOutputBuf is the previous windowRes output buffer, not current unloaded block. so the following filter
|
||||||
return (pInfo->ts > end) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED;
|
// is invalid
|
||||||
}
|
|
||||||
|
// SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes);
|
||||||
|
// if (pInfo->hasResult != DATA_SET_FLAG) {
|
||||||
|
// return BLK_DATA_ALL_NEEDED;
|
||||||
|
// } else {
|
||||||
|
// return (pInfo->ts > end) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED;
|
||||||
|
// }
|
||||||
}
|
}
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
@ -2429,10 +2440,12 @@ static bool percentile_function_setup(SQLFunctionCtx *pCtx) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// in the first round, get the min-max value of all involved data
|
||||||
SResultInfo *pResInfo = GET_RES_INFO(pCtx);
|
SResultInfo *pResInfo = GET_RES_INFO(pCtx);
|
||||||
|
SPercentileInfo *pInfo = pResInfo->interResultBuf;
|
||||||
((SPercentileInfo *)(pResInfo->interResultBuf))->pMemBucket =
|
pInfo->minval = DBL_MAX;
|
||||||
tMemBucketCreate(pCtx->inputBytes, pCtx->inputType);
|
pInfo->maxval = -DBL_MAX;
|
||||||
|
pInfo->numOfElems = 0;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -2443,6 +2456,64 @@ static void percentile_function(SQLFunctionCtx *pCtx) {
|
||||||
SResultInfo * pResInfo = GET_RES_INFO(pCtx);
|
SResultInfo * pResInfo = GET_RES_INFO(pCtx);
|
||||||
SPercentileInfo *pInfo = pResInfo->interResultBuf;
|
SPercentileInfo *pInfo = pResInfo->interResultBuf;
|
||||||
|
|
||||||
|
// the first stage, only acquire the min/max value
|
||||||
|
if (pInfo->stage == 0) {
|
||||||
|
if (pCtx->preAggVals.isSet) {
|
||||||
|
if (pInfo->minval > pCtx->preAggVals.statis.min) {
|
||||||
|
pInfo->minval = (double)pCtx->preAggVals.statis.min;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pInfo->maxval < pCtx->preAggVals.statis.max) {
|
||||||
|
pInfo->maxval = (double)pCtx->preAggVals.statis.max;
|
||||||
|
}
|
||||||
|
|
||||||
|
pInfo->numOfElems += (pCtx->size - pCtx->preAggVals.statis.numOfNull);
|
||||||
|
} else {
|
||||||
|
for (int32_t i = 0; i < pCtx->size; ++i) {
|
||||||
|
char *data = GET_INPUT_CHAR_INDEX(pCtx, i);
|
||||||
|
if (pCtx->hasNull && isNull(data, pCtx->inputType)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO extract functions
|
||||||
|
double v = 0;
|
||||||
|
switch (pCtx->inputType) {
|
||||||
|
case TSDB_DATA_TYPE_TINYINT:
|
||||||
|
v = GET_INT8_VAL(data);
|
||||||
|
break;
|
||||||
|
case TSDB_DATA_TYPE_SMALLINT:
|
||||||
|
v = GET_INT16_VAL(data);
|
||||||
|
break;
|
||||||
|
case TSDB_DATA_TYPE_BIGINT:
|
||||||
|
v = (double)(GET_INT64_VAL(data));
|
||||||
|
break;
|
||||||
|
case TSDB_DATA_TYPE_FLOAT:
|
||||||
|
v = GET_FLOAT_VAL(data);
|
||||||
|
break;
|
||||||
|
case TSDB_DATA_TYPE_DOUBLE:
|
||||||
|
v = GET_DOUBLE_VAL(data);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
v = GET_INT32_VAL(data);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (v < pInfo->minval) {
|
||||||
|
pInfo->minval = v;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (v > pInfo->maxval) {
|
||||||
|
pInfo->maxval = v;
|
||||||
|
}
|
||||||
|
|
||||||
|
pInfo->numOfElems += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// the second stage, calculate the true percentile value
|
||||||
for (int32_t i = 0; i < pCtx->size; ++i) {
|
for (int32_t i = 0; i < pCtx->size; ++i) {
|
||||||
char *data = GET_INPUT_CHAR_INDEX(pCtx, i);
|
char *data = GET_INPUT_CHAR_INDEX(pCtx, i);
|
||||||
if (pCtx->hasNull && isNull(data, pCtx->inputType)) {
|
if (pCtx->hasNull && isNull(data, pCtx->inputType)) {
|
||||||
|
@ -2466,6 +2537,43 @@ static void percentile_function_f(SQLFunctionCtx *pCtx, int32_t index) {
|
||||||
SResultInfo *pResInfo = GET_RES_INFO(pCtx);
|
SResultInfo *pResInfo = GET_RES_INFO(pCtx);
|
||||||
|
|
||||||
SPercentileInfo *pInfo = (SPercentileInfo *)pResInfo->interResultBuf;
|
SPercentileInfo *pInfo = (SPercentileInfo *)pResInfo->interResultBuf;
|
||||||
|
|
||||||
|
if (pInfo->stage == 0) {
|
||||||
|
// TODO extract functions
|
||||||
|
double v = 0;
|
||||||
|
switch (pCtx->inputType) {
|
||||||
|
case TSDB_DATA_TYPE_TINYINT:
|
||||||
|
v = GET_INT8_VAL(pData);
|
||||||
|
break;
|
||||||
|
case TSDB_DATA_TYPE_SMALLINT:
|
||||||
|
v = GET_INT16_VAL(pData);
|
||||||
|
break;
|
||||||
|
case TSDB_DATA_TYPE_BIGINT:
|
||||||
|
v = (double)(GET_INT64_VAL(pData));
|
||||||
|
break;
|
||||||
|
case TSDB_DATA_TYPE_FLOAT:
|
||||||
|
v = GET_FLOAT_VAL(pData);
|
||||||
|
break;
|
||||||
|
case TSDB_DATA_TYPE_DOUBLE:
|
||||||
|
v = GET_DOUBLE_VAL(pData);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
v = GET_INT32_VAL(pData);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (v < pInfo->minval) {
|
||||||
|
pInfo->minval = v;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (v > pInfo->maxval) {
|
||||||
|
pInfo->maxval = v;
|
||||||
|
}
|
||||||
|
|
||||||
|
pInfo->numOfElems += 1;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
tMemBucketPut(pInfo->pMemBucket, pData, 1);
|
tMemBucketPut(pInfo->pMemBucket, pData, 1);
|
||||||
|
|
||||||
SET_VAL(pCtx, 1, 1);
|
SET_VAL(pCtx, 1, 1);
|
||||||
|
@ -2488,6 +2596,23 @@ static void percentile_finalizer(SQLFunctionCtx *pCtx) {
|
||||||
doFinalizer(pCtx);
|
doFinalizer(pCtx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void percentile_next_step(SQLFunctionCtx *pCtx) {
|
||||||
|
SResultInfo * pResInfo = GET_RES_INFO(pCtx);
|
||||||
|
SPercentileInfo *pInfo = pResInfo->interResultBuf;
|
||||||
|
|
||||||
|
if (pInfo->stage == 0) {
|
||||||
|
// all data are null, set it completed
|
||||||
|
if (pInfo->numOfElems == 0) {
|
||||||
|
pResInfo->complete = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
pInfo->stage += 1;
|
||||||
|
pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, pInfo->minval, pInfo->maxval);
|
||||||
|
} else {
|
||||||
|
pResInfo->complete = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////
|
||||||
static SAPercentileInfo *getAPerctInfo(SQLFunctionCtx *pCtx) {
|
static SAPercentileInfo *getAPerctInfo(SQLFunctionCtx *pCtx) {
|
||||||
SResultInfo *pResInfo = GET_RES_INFO(pCtx);
|
SResultInfo *pResInfo = GET_RES_INFO(pCtx);
|
||||||
|
@ -4513,7 +4638,7 @@ SQLAggFuncElem aAggs[] = {{
|
||||||
percentile_function_setup,
|
percentile_function_setup,
|
||||||
percentile_function,
|
percentile_function,
|
||||||
percentile_function_f,
|
percentile_function_f,
|
||||||
no_next_step,
|
percentile_next_step,
|
||||||
percentile_finalizer,
|
percentile_finalizer,
|
||||||
noop1,
|
noop1,
|
||||||
noop1,
|
noop1,
|
||||||
|
|
|
@ -16,7 +16,6 @@
|
||||||
#include "os.h"
|
#include "os.h"
|
||||||
#include "taosmsg.h"
|
#include "taosmsg.h"
|
||||||
|
|
||||||
#include "qExtbuffer.h"
|
|
||||||
#include "taosdef.h"
|
#include "taosdef.h"
|
||||||
#include "tcache.h"
|
#include "tcache.h"
|
||||||
#include "tname.h"
|
#include "tname.h"
|
||||||
|
@ -430,7 +429,7 @@ int tscProcessLocalCmd(SSqlObj *pSql) {
|
||||||
pRes->qhandle = 0x1;
|
pRes->qhandle = 0x1;
|
||||||
pRes->numOfRows = 0;
|
pRes->numOfRows = 0;
|
||||||
} else if (pCmd->command == TSDB_SQL_RESET_CACHE) {
|
} else if (pCmd->command == TSDB_SQL_RESET_CACHE) {
|
||||||
taosCacheEmpty(tscCacheHandle);
|
taosCacheEmpty(tscMetaCache);
|
||||||
pRes->code = TSDB_CODE_SUCCESS;
|
pRes->code = TSDB_CODE_SUCCESS;
|
||||||
} else if (pCmd->command == TSDB_SQL_SERV_VERSION) {
|
} else if (pCmd->command == TSDB_SQL_SERV_VERSION) {
|
||||||
pRes->code = tscProcessServerVer(pSql);
|
pRes->code = tscProcessServerVer(pSql);
|
||||||
|
|
|
@ -368,13 +368,12 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
|
||||||
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
|
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
|
||||||
|
|
||||||
TSKEY stime = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.skey : pQueryInfo->window.ekey;
|
TSKEY stime = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.skey : pQueryInfo->window.ekey;
|
||||||
int64_t revisedSTime =
|
int64_t revisedSTime = taosTimeTruncate(stime, &pQueryInfo->interval, tinfo.precision);
|
||||||
taosGetIntervalStartTimestamp(stime, pQueryInfo->slidingTime, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, tinfo.precision);
|
|
||||||
|
|
||||||
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
|
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
|
||||||
SFillColInfo* pFillCol = createFillColInfo(pQueryInfo);
|
SFillColInfo* pFillCol = createFillColInfo(pQueryInfo);
|
||||||
pReducer->pFillInfo = taosInitFillInfo(pQueryInfo->order.order, revisedSTime, pQueryInfo->groupbyExpr.numOfGroupCols,
|
pReducer->pFillInfo = taosInitFillInfo(pQueryInfo->order.order, revisedSTime, pQueryInfo->groupbyExpr.numOfGroupCols,
|
||||||
4096, (int32_t)numOfCols, pQueryInfo->slidingTime, pQueryInfo->slidingTimeUnit,
|
4096, (int32_t)numOfCols, pQueryInfo->interval.sliding, pQueryInfo->interval.slidingUnit,
|
||||||
tinfo.precision, pQueryInfo->fillType, pFillCol);
|
tinfo.precision, pQueryInfo->fillType, pFillCol);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -472,10 +471,8 @@ void tscDestroyLocalReducer(SSqlObj *pSql) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
tscDebug("%p start to free local reducer", pSql);
|
|
||||||
SSqlRes *pRes = &(pSql->res);
|
SSqlRes *pRes = &(pSql->res);
|
||||||
if (pRes->pLocalReducer == NULL) {
|
if (pRes->pLocalReducer == NULL) {
|
||||||
tscDebug("%p local reducer has been freed, abort", pSql);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -553,7 +550,7 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm
|
||||||
}
|
}
|
||||||
|
|
||||||
// primary timestamp column is involved in final result
|
// primary timestamp column is involved in final result
|
||||||
if (pQueryInfo->intervalTime != 0 || tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
|
if (pQueryInfo->interval.interval != 0 || tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
|
||||||
numOfGroupByCols++;
|
numOfGroupByCols++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -570,7 +567,7 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm
|
||||||
orderIdx[i] = startCols++;
|
orderIdx[i] = startCols++;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pQueryInfo->intervalTime != 0) {
|
if (pQueryInfo->interval.interval != 0) {
|
||||||
// the first column is the timestamp, handles queries like "interval(10m) group by tags"
|
// the first column is the timestamp, handles queries like "interval(10m) group by tags"
|
||||||
orderIdx[numOfGroupByCols - 1] = PRIMARYKEY_TIMESTAMP_COL_INDEX;
|
orderIdx[numOfGroupByCols - 1] = PRIMARYKEY_TIMESTAMP_COL_INDEX;
|
||||||
}
|
}
|
||||||
|
@ -614,12 +611,12 @@ bool isSameGroup(SSqlCmd *pCmd, SLocalReducer *pReducer, char *pPrev, tFilePage
|
||||||
* super table interval query
|
* super table interval query
|
||||||
* if the order columns is the primary timestamp, all result data belongs to one group
|
* if the order columns is the primary timestamp, all result data belongs to one group
|
||||||
*/
|
*/
|
||||||
assert(pQueryInfo->intervalTime > 0);
|
assert(pQueryInfo->interval.interval > 0);
|
||||||
if (numOfCols == 1) {
|
if (numOfCols == 1) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
} else { // simple group by query
|
} else { // simple group by query
|
||||||
assert(pQueryInfo->intervalTime == 0);
|
assert(pQueryInfo->interval.interval == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
// only one row exists
|
// only one row exists
|
||||||
|
@ -827,8 +824,7 @@ void savePrevRecordAndSetupFillInfo(SLocalReducer *pLocalReducer, SQueryInfo *pQ
|
||||||
|
|
||||||
if (pFillInfo != NULL) {
|
if (pFillInfo != NULL) {
|
||||||
int64_t stime = (pQueryInfo->window.skey < pQueryInfo->window.ekey) ? pQueryInfo->window.skey : pQueryInfo->window.ekey;
|
int64_t stime = (pQueryInfo->window.skey < pQueryInfo->window.ekey) ? pQueryInfo->window.skey : pQueryInfo->window.ekey;
|
||||||
int64_t revisedSTime =
|
int64_t revisedSTime = taosTimeTruncate(stime, &pQueryInfo->interval, tinfo.precision);
|
||||||
taosGetIntervalStartTimestamp(stime, pQueryInfo->slidingTime, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, tinfo.precision);
|
|
||||||
|
|
||||||
taosResetFillInfo(pFillInfo, revisedSTime);
|
taosResetFillInfo(pFillInfo, revisedSTime);
|
||||||
}
|
}
|
||||||
|
@ -841,7 +837,7 @@ void savePrevRecordAndSetupFillInfo(SLocalReducer *pLocalReducer, SQueryInfo *pQ
|
||||||
}
|
}
|
||||||
|
|
||||||
static void genFinalResWithoutFill(SSqlRes* pRes, SLocalReducer *pLocalReducer, SQueryInfo* pQueryInfo) {
|
static void genFinalResWithoutFill(SSqlRes* pRes, SLocalReducer *pLocalReducer, SQueryInfo* pQueryInfo) {
|
||||||
assert(pQueryInfo->intervalTime == 0 || pQueryInfo->fillType == TSDB_FILL_NONE);
|
assert(pQueryInfo->interval.interval == 0 || pQueryInfo->fillType == TSDB_FILL_NONE);
|
||||||
|
|
||||||
tFilePage * pBeforeFillData = pLocalReducer->pResultBuf;
|
tFilePage * pBeforeFillData = pLocalReducer->pResultBuf;
|
||||||
|
|
||||||
|
@ -1222,7 +1218,7 @@ bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCur
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// no interval query, no fill operation
|
// no interval query, no fill operation
|
||||||
if (pQueryInfo->intervalTime == 0 || pQueryInfo->fillType == TSDB_FILL_NONE) {
|
if (pQueryInfo->interval.interval == 0 || pQueryInfo->fillType == TSDB_FILL_NONE) {
|
||||||
genFinalResWithoutFill(pRes, pLocalReducer, pQueryInfo);
|
genFinalResWithoutFill(pRes, pLocalReducer, pQueryInfo);
|
||||||
} else {
|
} else {
|
||||||
SFillInfo* pFillInfo = pLocalReducer->pFillInfo;
|
SFillInfo* pFillInfo = pLocalReducer->pFillInfo;
|
||||||
|
@ -1260,13 +1256,10 @@ static void resetEnvForNewResultset(SSqlRes *pRes, SSqlCmd *pCmd, SLocalReducer
|
||||||
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
|
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
|
||||||
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
|
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
|
||||||
|
|
||||||
int8_t precision = tinfo.precision;
|
|
||||||
|
|
||||||
// for group result interpolation, do not return if not data is generated
|
// for group result interpolation, do not return if not data is generated
|
||||||
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
|
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
|
||||||
TSKEY skey = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.skey:pQueryInfo->window.ekey;//MIN(pQueryInfo->window.skey, pQueryInfo->window.ekey);
|
TSKEY skey = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.skey:pQueryInfo->window.ekey;//MIN(pQueryInfo->window.skey, pQueryInfo->window.ekey);
|
||||||
int64_t newTime =
|
int64_t newTime = taosTimeTruncate(skey, &pQueryInfo->interval, tinfo.precision);
|
||||||
taosGetIntervalStartTimestamp(skey, pQueryInfo->slidingTime, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, precision);
|
|
||||||
taosResetFillInfo(pLocalReducer->pFillInfo, newTime);
|
taosResetFillInfo(pLocalReducer->pFillInfo, newTime);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -142,7 +142,7 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
|
||||||
return tscInvalidSQLErrMsg(error, "value expected in timestamp", sToken.z);
|
return tscInvalidSQLErrMsg(error, "value expected in timestamp", sToken.z);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (getTimestampInUsFromStr(valueToken.z, valueToken.n, &interval) != TSDB_CODE_SUCCESS) {
|
if (parseAbsoluteDuration(valueToken.z, valueToken.n, &interval) != TSDB_CODE_SUCCESS) {
|
||||||
return TSDB_CODE_TSC_INVALID_SQL;
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -526,7 +526,7 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, STableMeta *pTableMe
|
||||||
int32_t index = 0;
|
int32_t index = 0;
|
||||||
SStrToken sToken;
|
SStrToken sToken;
|
||||||
|
|
||||||
int16_t numOfRows = 0;
|
int32_t numOfRows = 0;
|
||||||
|
|
||||||
SSchema *pSchema = tscGetTableSchema(pTableMeta);
|
SSchema *pSchema = tscGetTableSchema(pTableMeta);
|
||||||
STableComInfo tinfo = tscGetTableInfo(pTableMeta);
|
STableComInfo tinfo = tscGetTableInfo(pTableMeta);
|
||||||
|
|
|
@ -259,11 +259,11 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
|
||||||
pSdesc->num = htobe64(pStream->num);
|
pSdesc->num = htobe64(pStream->num);
|
||||||
|
|
||||||
pSdesc->useconds = htobe64(pStream->useconds);
|
pSdesc->useconds = htobe64(pStream->useconds);
|
||||||
pSdesc->stime = htobe64(pStream->stime - pStream->intervalTime);
|
pSdesc->stime = htobe64(pStream->stime - pStream->interval.interval);
|
||||||
pSdesc->ctime = htobe64(pStream->ctime);
|
pSdesc->ctime = htobe64(pStream->ctime);
|
||||||
|
|
||||||
pSdesc->slidingTime = htobe64(pStream->slidingTime);
|
pSdesc->slidingTime = htobe64(pStream->interval.sliding);
|
||||||
pSdesc->interval = htobe64(pStream->intervalTime);
|
pSdesc->interval = htobe64(pStream->interval.interval);
|
||||||
|
|
||||||
pHeartbeat->numOfStreams++;
|
pHeartbeat->numOfStreams++;
|
||||||
pSdesc++;
|
pSdesc++;
|
||||||
|
|
|
@ -81,6 +81,7 @@ static void setColumnOffsetValueInResultset(SQueryInfo* pQueryInfo);
|
||||||
static int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, tVariantList* pList, SSqlCmd* pCmd);
|
static int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, tVariantList* pList, SSqlCmd* pCmd);
|
||||||
|
|
||||||
static int32_t parseIntervalClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql);
|
static int32_t parseIntervalClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql);
|
||||||
|
static int32_t parseOffsetClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql);
|
||||||
static int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql);
|
static int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql);
|
||||||
|
|
||||||
static int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExprItem* pItem);
|
static int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExprItem* pItem);
|
||||||
|
@ -92,7 +93,6 @@ static int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQueryS
|
||||||
static int32_t tsRewriteFieldNameIfNecessary(SSqlCmd* pCmd, SQueryInfo* pQueryInfo);
|
static int32_t tsRewriteFieldNameIfNecessary(SSqlCmd* pCmd, SQueryInfo* pQueryInfo);
|
||||||
static int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo);
|
static int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo);
|
||||||
static int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd, SQueryInfo* pQueryInfo);
|
static int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd, SQueryInfo* pQueryInfo);
|
||||||
static int32_t arithmeticExprToString(tSQLExpr* pExpr, char** exprString);
|
|
||||||
static int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo);
|
static int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo);
|
||||||
static int32_t validateArithmeticSQLExpr(SSqlCmd* pCmd, tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList, int32_t* type);
|
static int32_t validateArithmeticSQLExpr(SSqlCmd* pCmd, tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList, int32_t* type);
|
||||||
static int32_t validateEp(char* ep);
|
static int32_t validateEp(char* ep);
|
||||||
|
@ -103,6 +103,7 @@ static int32_t setKillInfo(SSqlObj* pSql, struct SSqlInfo* pInfo, int32_t killTy
|
||||||
|
|
||||||
static bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField);
|
static bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField);
|
||||||
static bool hasTimestampForPointInterpQuery(SQueryInfo* pQueryInfo);
|
static bool hasTimestampForPointInterpQuery(SQueryInfo* pQueryInfo);
|
||||||
|
static bool hasNormalColumnFilter(SQueryInfo* pQueryInfo);
|
||||||
|
|
||||||
static int32_t parseLimitClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t index, SQuerySQL* pQuerySql, SSqlObj* pSql);
|
static int32_t parseLimitClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t index, SQuerySQL* pQuerySql, SSqlObj* pSql);
|
||||||
static int32_t parseCreateDBOptions(SSqlCmd* pCmd, SCreateDBInfo* pCreateDbSql);
|
static int32_t parseCreateDBOptions(SSqlCmd* pCmd, SCreateDBInfo* pCreateDbSql);
|
||||||
|
@ -350,7 +351,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
||||||
case TSDB_SQL_DESCRIBE_TABLE: {
|
case TSDB_SQL_DESCRIBE_TABLE: {
|
||||||
SStrToken* pToken = &pInfo->pDCLInfo->a[0];
|
SStrToken* pToken = &pInfo->pDCLInfo->a[0];
|
||||||
const char* msg1 = "invalid table name";
|
const char* msg1 = "invalid table name";
|
||||||
const char* msg2 = "table name is too long";
|
const char* msg2 = "table name too long";
|
||||||
|
|
||||||
if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) {
|
if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) {
|
||||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||||
|
@ -360,8 +361,9 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
||||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// additional msg has been attached already
|
||||||
if (tscSetTableFullName(pTableMetaInfo, pToken, pSql) != TSDB_CODE_SUCCESS) {
|
if (tscSetTableFullName(pTableMetaInfo, pToken, pSql) != TSDB_CODE_SUCCESS) {
|
||||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
}
|
}
|
||||||
|
|
||||||
return tscGetTableMeta(pSql, pTableMetaInfo);
|
return tscGetTableMeta(pSql, pTableMetaInfo);
|
||||||
|
@ -408,7 +410,6 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
||||||
const char* msg3 = "name too long";
|
const char* msg3 = "name too long";
|
||||||
|
|
||||||
pCmd->command = pInfo->type;
|
pCmd->command = pInfo->type;
|
||||||
// tDCLSQL* pDCL = pInfo->pDCLInfo;
|
|
||||||
|
|
||||||
SUserInfo* pUser = &pInfo->pDCLInfo->user;
|
SUserInfo* pUser = &pInfo->pDCLInfo->user;
|
||||||
SStrToken* pName = &pUser->user;
|
SStrToken* pName = &pUser->user;
|
||||||
|
@ -594,24 +595,28 @@ int32_t parseIntervalClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQ
|
||||||
|
|
||||||
// interval is not null
|
// interval is not null
|
||||||
SStrToken* t = &pQuerySql->interval;
|
SStrToken* t = &pQuerySql->interval;
|
||||||
if (parseDuration(t->z, t->n, &pQueryInfo->intervalTime, &pQueryInfo->intervalTimeUnit) != TSDB_CODE_SUCCESS) {
|
if (parseNatualDuration(t->z, t->n, &pQueryInfo->interval.interval, &pQueryInfo->interval.intervalUnit) != TSDB_CODE_SUCCESS) {
|
||||||
return TSDB_CODE_TSC_INVALID_SQL;
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pQueryInfo->intervalTimeUnit != 'n' && pQueryInfo->intervalTimeUnit != 'y') {
|
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit != 'y') {
|
||||||
// if the unit of time window value is millisecond, change the value from microsecond
|
// if the unit of time window value is millisecond, change the value from microsecond
|
||||||
if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
|
if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
|
||||||
pQueryInfo->intervalTime = pQueryInfo->intervalTime / 1000;
|
pQueryInfo->interval.interval = pQueryInfo->interval.interval / 1000;
|
||||||
}
|
}
|
||||||
|
|
||||||
// interval cannot be less than 10 milliseconds
|
// interval cannot be less than 10 milliseconds
|
||||||
if (pQueryInfo->intervalTime < tsMinIntervalTime) {
|
if (pQueryInfo->interval.interval < tsMinIntervalTime) {
|
||||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// for top/bottom + interval query, we do not add additional timestamp column in the front
|
// for top/bottom + interval query, we do not add additional timestamp column in the front
|
||||||
if (isTopBottomQuery(pQueryInfo)) {
|
if (isTopBottomQuery(pQueryInfo)) {
|
||||||
|
if (parseOffsetClause(pCmd, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
|
||||||
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
|
}
|
||||||
|
|
||||||
if (parseSlidingClause(pCmd, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
|
if (parseSlidingClause(pCmd, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
|
||||||
return TSDB_CODE_TSC_INVALID_SQL;
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
}
|
}
|
||||||
|
@ -635,7 +640,7 @@ int32_t parseIntervalClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQ
|
||||||
* check invalid SQL:
|
* check invalid SQL:
|
||||||
* select tbname, tags_fields from super_table_name interval(1s)
|
* select tbname, tags_fields from super_table_name interval(1s)
|
||||||
*/
|
*/
|
||||||
if (tscQueryTags(pQueryInfo) && pQueryInfo->intervalTime > 0) {
|
if (tscQueryTags(pQueryInfo) && pQueryInfo->interval.interval > 0) {
|
||||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -661,6 +666,10 @@ int32_t parseIntervalClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQ
|
||||||
SColumnIndex index = {tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX};
|
SColumnIndex index = {tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX};
|
||||||
tscAddSpecialColumnForSelect(pQueryInfo, 0, TSDB_FUNC_TS, &index, &s, TSDB_COL_NORMAL);
|
tscAddSpecialColumnForSelect(pQueryInfo, 0, TSDB_FUNC_TS, &index, &s, TSDB_COL_NORMAL);
|
||||||
|
|
||||||
|
if (parseOffsetClause(pCmd, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
|
||||||
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
|
}
|
||||||
|
|
||||||
if (parseSlidingClause(pCmd, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
|
if (parseSlidingClause(pCmd, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
|
||||||
return TSDB_CODE_TSC_INVALID_SQL;
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
}
|
}
|
||||||
|
@ -668,6 +677,57 @@ int32_t parseIntervalClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQ
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t parseOffsetClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
|
||||||
|
const char* msg1 = "interval offset cannot be negative";
|
||||||
|
const char* msg2 = "interval offset should be shorter than interval";
|
||||||
|
const char* msg3 = "cannot use 'year' as offset when interval is 'month'";
|
||||||
|
|
||||||
|
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||||
|
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
|
||||||
|
|
||||||
|
SStrToken* t = &pQuerySql->offset;
|
||||||
|
if (t->n == 0) {
|
||||||
|
pQueryInfo->interval.offsetUnit = pQueryInfo->interval.intervalUnit;
|
||||||
|
pQueryInfo->interval.offset = 0;
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (parseNatualDuration(t->z, t->n, &pQueryInfo->interval.offset, &pQueryInfo->interval.offsetUnit) != TSDB_CODE_SUCCESS) {
|
||||||
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pQueryInfo->interval.offset < 0) {
|
||||||
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pQueryInfo->interval.offsetUnit != 'n' && pQueryInfo->interval.offsetUnit != 'y') {
|
||||||
|
// if the unit of time window value is millisecond, change the value from microsecond
|
||||||
|
if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
|
||||||
|
pQueryInfo->interval.offset = pQueryInfo->interval.offset / 1000;
|
||||||
|
}
|
||||||
|
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit != 'y') {
|
||||||
|
if (pQueryInfo->interval.offset >= pQueryInfo->interval.interval) {
|
||||||
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if (pQueryInfo->interval.offsetUnit == pQueryInfo->interval.intervalUnit) {
|
||||||
|
if (pQueryInfo->interval.offset >= pQueryInfo->interval.interval) {
|
||||||
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||||
|
}
|
||||||
|
} else if (pQueryInfo->interval.intervalUnit == 'n' && pQueryInfo->interval.offsetUnit == 'y') {
|
||||||
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||||
|
} else if (pQueryInfo->interval.intervalUnit == 'y' && pQueryInfo->interval.offsetUnit == 'n') {
|
||||||
|
if (pQueryInfo->interval.interval * 12 <= pQueryInfo->interval.offset) {
|
||||||
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// TODO: offset should be shorter than interval, but how to check
|
||||||
|
// conflicts like 30days offset and 1 month interval
|
||||||
|
}
|
||||||
|
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
|
int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
|
||||||
const char* msg0 = "sliding value too small";
|
const char* msg0 = "sliding value too small";
|
||||||
const char* msg1 = "sliding value no larger than the interval value";
|
const char* msg1 = "sliding value no larger than the interval value";
|
||||||
|
@ -681,29 +741,29 @@ int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu
|
||||||
|
|
||||||
SStrToken* pSliding = &pQuerySql->sliding;
|
SStrToken* pSliding = &pQuerySql->sliding;
|
||||||
if (pSliding->n == 0) {
|
if (pSliding->n == 0) {
|
||||||
pQueryInfo->slidingTimeUnit = pQueryInfo->intervalTimeUnit;
|
pQueryInfo->interval.slidingUnit = pQueryInfo->interval.intervalUnit;
|
||||||
pQueryInfo->slidingTime = pQueryInfo->intervalTime;
|
pQueryInfo->interval.sliding = pQueryInfo->interval.interval;
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pQueryInfo->intervalTimeUnit == 'n' || pQueryInfo->intervalTimeUnit == 'y') {
|
if (pQueryInfo->interval.intervalUnit == 'n' || pQueryInfo->interval.intervalUnit == 'y') {
|
||||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||||
}
|
}
|
||||||
|
|
||||||
getTimestampInUsFromStr(pSliding->z, pSliding->n, &pQueryInfo->slidingTime);
|
parseAbsoluteDuration(pSliding->z, pSliding->n, &pQueryInfo->interval.sliding);
|
||||||
if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
|
if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
|
||||||
pQueryInfo->slidingTime /= 1000;
|
pQueryInfo->interval.sliding /= 1000;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pQueryInfo->slidingTime < tsMinSlidingTime) {
|
if (pQueryInfo->interval.sliding < tsMinSlidingTime) {
|
||||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pQueryInfo->slidingTime > pQueryInfo->intervalTime) {
|
if (pQueryInfo->interval.sliding > pQueryInfo->interval.interval) {
|
||||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((pQueryInfo->intervalTime != 0) && (pQueryInfo->intervalTime/pQueryInfo->slidingTime > INTERVAL_SLIDING_FACTOR)) {
|
if ((pQueryInfo->interval.interval != 0) && (pQueryInfo->interval.interval/pQueryInfo->interval.sliding > INTERVAL_SLIDING_FACTOR)) {
|
||||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -712,8 +772,7 @@ int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu
|
||||||
|
|
||||||
int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pzTableName, SSqlObj* pSql) {
|
int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pzTableName, SSqlObj* pSql) {
|
||||||
const char* msg1 = "name too long";
|
const char* msg1 = "name too long";
|
||||||
const char* msg2 = "invalid db name";
|
const char* msg2 = "current database or database name invalid";
|
||||||
const char *msg = msg1;
|
|
||||||
|
|
||||||
SSqlCmd* pCmd = &pSql->cmd;
|
SSqlCmd* pCmd = &pSql->cmd;
|
||||||
int32_t code = TSDB_CODE_SUCCESS;
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
|
@ -728,17 +787,24 @@ int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pzTableNa
|
||||||
if (hasSpecifyDB(pzTableName)) {
|
if (hasSpecifyDB(pzTableName)) {
|
||||||
// db has been specified in sql string so we ignore current db path
|
// db has been specified in sql string so we ignore current db path
|
||||||
code = setObjFullName(pTableMetaInfo->name, getAccountId(pSql), NULL, pzTableName, NULL);
|
code = setObjFullName(pTableMetaInfo->name, getAccountId(pSql), NULL, pzTableName, NULL);
|
||||||
|
if (code != 0) {
|
||||||
|
invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||||
|
}
|
||||||
} else { // get current DB name first, then set it into path
|
} else { // get current DB name first, then set it into path
|
||||||
SStrToken t = {0};
|
SStrToken t = {0};
|
||||||
getCurrentDBName(pSql, &t);
|
getCurrentDBName(pSql, &t);
|
||||||
if (t.n == 0) {
|
if (t.n == 0) {
|
||||||
msg = msg2;
|
invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||||
}
|
}
|
||||||
|
|
||||||
code = setObjFullName(pTableMetaInfo->name, NULL, &t, pzTableName, NULL);
|
code = setObjFullName(pTableMetaInfo->name, NULL, &t, pzTableName, NULL);
|
||||||
|
if (code != 0) {
|
||||||
|
invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
|
taosTFree(oldName);
|
||||||
free(oldName);
|
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1181,13 +1247,14 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t
|
||||||
} END_TRY
|
} END_TRY
|
||||||
|
|
||||||
len = tbufTell(&bw);
|
len = tbufTell(&bw);
|
||||||
char* c = tbufGetData(&bw, true);
|
char* c = tbufGetData(&bw, false);
|
||||||
|
|
||||||
// set the serialized binary string as the parameter of arithmetic expression
|
// set the serialized binary string as the parameter of arithmetic expression
|
||||||
addExprParams(pExpr, c, TSDB_DATA_TYPE_BINARY, (int32_t)len, index.tableIndex);
|
addExprParams(pExpr, c, TSDB_DATA_TYPE_BINARY, (int32_t)len, index.tableIndex);
|
||||||
|
|
||||||
insertResultField(pQueryInfo, exprIndex, &columnList, sizeof(double), TSDB_DATA_TYPE_DOUBLE, pExpr->aliasName, pExpr);
|
insertResultField(pQueryInfo, exprIndex, &columnList, sizeof(double), TSDB_DATA_TYPE_DOUBLE, pExpr->aliasName, pExpr);
|
||||||
|
|
||||||
|
tbufCloseWriter(&bw);
|
||||||
taosArrayDestroy(colList);
|
taosArrayDestroy(colList);
|
||||||
tExprTreeDestroy(&pNode, NULL);
|
tExprTreeDestroy(&pNode, NULL);
|
||||||
} else {
|
} else {
|
||||||
|
@ -1331,7 +1398,7 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* transfer sql functions that need secondary merge into another format
|
* transfer sql functions that need secondary merge into another format
|
||||||
* in dealing with metric queries such as: count/first/last
|
* in dealing with super table queries such as: count/first/last
|
||||||
*/
|
*/
|
||||||
if (isSTable) {
|
if (isSTable) {
|
||||||
tscTansformSQLFuncForSTableQuery(pQueryInfo);
|
tscTansformSQLFuncForSTableQuery(pQueryInfo);
|
||||||
|
@ -1516,16 +1583,16 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t
|
||||||
|
|
||||||
static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSchema* pSchema, SConvertFunc cvtFunc, char* aliasName,
|
static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSchema* pSchema, SConvertFunc cvtFunc, char* aliasName,
|
||||||
int32_t resColIdx, SColumnIndex* pColIndex) {
|
int32_t resColIdx, SColumnIndex* pColIndex) {
|
||||||
|
const char* msg1 = "not support column types";
|
||||||
|
|
||||||
int16_t type = 0;
|
int16_t type = 0;
|
||||||
int16_t bytes = 0;
|
int16_t bytes = 0;
|
||||||
char columnName[TSDB_COL_NAME_LEN] = {0};
|
char columnName[TSDB_COL_NAME_LEN] = {0};
|
||||||
const char* msg1 = "not support column types";
|
|
||||||
int32_t functionID = cvtFunc.execFuncId;
|
int32_t functionID = cvtFunc.execFuncId;
|
||||||
|
|
||||||
if (functionID == TSDB_FUNC_SPREAD) {
|
if (functionID == TSDB_FUNC_SPREAD) {
|
||||||
if (pSchema[pColIndex->columnIndex].type == TSDB_DATA_TYPE_BINARY ||
|
int32_t t1 = pSchema[pColIndex->columnIndex].type;
|
||||||
pSchema[pColIndex->columnIndex].type == TSDB_DATA_TYPE_NCHAR ||
|
if (t1 == TSDB_DATA_TYPE_BINARY || t1 == TSDB_DATA_TYPE_NCHAR || t1 == TSDB_DATA_TYPE_BOOL) {
|
||||||
pSchema[pColIndex->columnIndex].type == TSDB_DATA_TYPE_BOOL) {
|
|
||||||
invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||||
return -1;
|
return -1;
|
||||||
} else {
|
} else {
|
||||||
|
@ -1543,7 +1610,6 @@ static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SS
|
||||||
getRevisedName(columnName, cvtFunc.originFuncId, sizeof(columnName) - 1, pSchema[pColIndex->columnIndex].name);
|
getRevisedName(columnName, cvtFunc.originFuncId, sizeof(columnName) - 1, pSchema[pColIndex->columnIndex].name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, functionID, pColIndex, type, bytes, bytes, false);
|
SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, functionID, pColIndex, type, bytes, bytes, false);
|
||||||
tstrncpy(pExpr->aliasName, columnName, sizeof(pExpr->aliasName));
|
tstrncpy(pExpr->aliasName, columnName, sizeof(pExpr->aliasName));
|
||||||
|
|
||||||
|
@ -1800,10 +1866,13 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
||||||
if (changeFunctionID(optr, &functionID) != TSDB_CODE_SUCCESS) {
|
if (changeFunctionID(optr, &functionID) != TSDB_CODE_SUCCESS) {
|
||||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg9);
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg9);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NOTE: has time range condition or normal column filter condition, the last_row query will be transferred to last query
|
||||||
SConvertFunc cvtFunc = {.originFuncId = functionID, .execFuncId = functionID};
|
SConvertFunc cvtFunc = {.originFuncId = functionID, .execFuncId = functionID};
|
||||||
if (functionID == TSDB_FUNC_LAST_ROW && TSWINDOW_IS_EQUAL(pQueryInfo->window,TSWINDOW_INITIALIZER)) {
|
if (functionID == TSDB_FUNC_LAST_ROW && ((!TSWINDOW_IS_EQUAL(pQueryInfo->window, TSWINDOW_INITIALIZER)) || (hasNormalColumnFilter(pQueryInfo)))) {
|
||||||
cvtFunc.execFuncId = TSDB_FUNC_LAST;
|
cvtFunc.execFuncId = TSDB_FUNC_LAST;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!requireAllFields) {
|
if (!requireAllFields) {
|
||||||
if (pItem->pNode->pParam->nExpr < 1) {
|
if (pItem->pNode->pParam->nExpr < 1) {
|
||||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||||
|
@ -2591,7 +2660,7 @@ static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (functionId == TSDB_FUNC_PRJ && pExpr1->colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
|
if (functionId == TSDB_FUNC_PRJ && (pExpr1->colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX || TSDB_COL_IS_UD_COL(pExpr1->colInfo.flag))) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2803,6 +2872,12 @@ static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo,
|
||||||
case TK_LIKE:
|
case TK_LIKE:
|
||||||
pColumnFilter->lowerRelOptr = TSDB_RELATION_LIKE;
|
pColumnFilter->lowerRelOptr = TSDB_RELATION_LIKE;
|
||||||
break;
|
break;
|
||||||
|
case TK_ISNULL:
|
||||||
|
pColumnFilter->lowerRelOptr = TSDB_RELATION_ISNULL;
|
||||||
|
break;
|
||||||
|
case TK_NOTNULL:
|
||||||
|
pColumnFilter->lowerRelOptr = TSDB_RELATION_NOTNULL;
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
|
||||||
}
|
}
|
||||||
|
@ -2848,19 +2923,19 @@ static int32_t tSQLExprNodeToString(tSQLExpr* pExpr, char** str) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// pExpr->nSQLOptr == 0 while handling "is null" query
|
||||||
static bool isExprLeafNode(tSQLExpr* pExpr) {
|
static bool isExprLeafNode(tSQLExpr* pExpr) {
|
||||||
return (pExpr->pRight == NULL && pExpr->pLeft == NULL) &&
|
return (pExpr->pRight == NULL && pExpr->pLeft == NULL) &&
|
||||||
(pExpr->nSQLOptr == TK_ID || (pExpr->nSQLOptr >= TK_BOOL && pExpr->nSQLOptr <= TK_NCHAR) ||
|
(pExpr->nSQLOptr == 0 || pExpr->nSQLOptr == TK_ID || (pExpr->nSQLOptr >= TK_BOOL && pExpr->nSQLOptr <= TK_NCHAR) || pExpr->nSQLOptr == TK_SET);
|
||||||
pExpr->nSQLOptr == TK_SET);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool isExprDirectParentOfLeaftNode(tSQLExpr* pExpr) {
|
static bool isExprDirectParentOfLeafNode(tSQLExpr* pExpr) {
|
||||||
return (pExpr->pLeft != NULL && pExpr->pRight != NULL) &&
|
return (pExpr->pLeft != NULL && pExpr->pRight != NULL) &&
|
||||||
(isExprLeafNode(pExpr->pLeft) && isExprLeafNode(pExpr->pRight));
|
(isExprLeafNode(pExpr->pLeft) && isExprLeafNode(pExpr->pRight));
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t tSQLExprLeafToString(tSQLExpr* pExpr, bool addParentheses, char** output) {
|
static int32_t tSQLExprLeafToString(tSQLExpr* pExpr, bool addParentheses, char** output) {
|
||||||
if (!isExprDirectParentOfLeaftNode(pExpr)) {
|
if (!isExprDirectParentOfLeafNode(pExpr)) {
|
||||||
return TSDB_CODE_TSC_INVALID_SQL;
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3071,7 +3146,7 @@ static int32_t getTagCondString(tSQLExpr* pExpr, char** str) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!isExprDirectParentOfLeaftNode(pExpr)) {
|
if (!isExprDirectParentOfLeafNode(pExpr)) {
|
||||||
*(*str) = '(';
|
*(*str) = '(';
|
||||||
*str += 1;
|
*str += 1;
|
||||||
|
|
||||||
|
@ -3127,7 +3202,7 @@ static int32_t getColumnQueryCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQ
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!isExprDirectParentOfLeaftNode(pExpr)) { // internal node
|
if (!isExprDirectParentOfLeafNode(pExpr)) { // internal node
|
||||||
int32_t ret = getColumnQueryCondInfo(pCmd, pQueryInfo, pExpr->pLeft, pExpr->nSQLOptr);
|
int32_t ret = getColumnQueryCondInfo(pCmd, pQueryInfo, pExpr->pLeft, pExpr->nSQLOptr);
|
||||||
if (ret != TSDB_CODE_SUCCESS) {
|
if (ret != TSDB_CODE_SUCCESS) {
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -3153,7 +3228,7 @@ static int32_t getJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr*
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!isExprDirectParentOfLeaftNode(pExpr)) {
|
if (!isExprDirectParentOfLeafNode(pExpr)) {
|
||||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3225,19 +3300,6 @@ int32_t doArithmeticExprToString(tSQLExpr* pExpr, char** exprString) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static UNUSED_FUNC int32_t arithmeticExprToString(tSQLExpr* pExpr, char** str) {
|
|
||||||
char* start = *str;
|
|
||||||
|
|
||||||
int32_t code = doArithmeticExprToString(pExpr, str);
|
|
||||||
if (code == TSDB_CODE_SUCCESS) { // remove out the parenthesis
|
|
||||||
int32_t len = (int32_t)strlen(start);
|
|
||||||
memmove(start, start + 1, len - 2);
|
|
||||||
start[len - 2] = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
return code;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int32_t validateSQLExpr(SSqlCmd* pCmd, tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList, int32_t* type) {
|
static int32_t validateSQLExpr(SSqlCmd* pCmd, tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList, int32_t* type) {
|
||||||
if (pExpr->nSQLOptr == TK_ID) {
|
if (pExpr->nSQLOptr == TK_ID) {
|
||||||
if (*type == NON_ARITHMEIC_EXPR) {
|
if (*type == NON_ARITHMEIC_EXPR) {
|
||||||
|
@ -3485,7 +3547,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQL
|
||||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(isExprDirectParentOfLeaftNode(*pExpr));
|
assert(isExprDirectParentOfLeafNode(*pExpr));
|
||||||
|
|
||||||
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
|
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
|
||||||
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
|
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
|
||||||
|
@ -3531,7 +3593,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQL
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// in case of in operator, keep it in a seperate attribute
|
// in case of in operator, keep it in a seprate attribute
|
||||||
if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
|
if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
|
||||||
if (!validTableNameOptr(*pExpr)) {
|
if (!validTableNameOptr(*pExpr)) {
|
||||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7);
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7);
|
||||||
|
@ -3552,7 +3614,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQL
|
||||||
*type = TSQL_EXPR_TBNAME;
|
*type = TSQL_EXPR_TBNAME;
|
||||||
*pExpr = NULL;
|
*pExpr = NULL;
|
||||||
} else {
|
} else {
|
||||||
if (pRight->nSQLOptr == TK_ID) { // join on tag columns for stable query
|
if (pRight != NULL && pRight->nSQLOptr == TK_ID) { // join on tag columns for stable query
|
||||||
if (!validateJoinExprNode(pCmd, pQueryInfo, *pExpr, &index)) {
|
if (!validateJoinExprNode(pCmd, pQueryInfo, *pExpr, &index)) {
|
||||||
return TSDB_CODE_TSC_INVALID_SQL;
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
}
|
}
|
||||||
|
@ -3605,7 +3667,7 @@ int32_t getQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr** pExpr
|
||||||
int32_t leftType = -1;
|
int32_t leftType = -1;
|
||||||
int32_t rightType = -1;
|
int32_t rightType = -1;
|
||||||
|
|
||||||
if (!isExprDirectParentOfLeaftNode(*pExpr)) {
|
if (!isExprDirectParentOfLeafNode(*pExpr)) {
|
||||||
int32_t ret = getQueryCondExpr(pCmd, pQueryInfo, &(*pExpr)->pLeft, pCondExpr, &leftType, (*pExpr)->nSQLOptr);
|
int32_t ret = getQueryCondExpr(pCmd, pQueryInfo, &(*pExpr)->pLeft, pCondExpr, &leftType, (*pExpr)->nSQLOptr);
|
||||||
if (ret != TSDB_CODE_SUCCESS) {
|
if (ret != TSDB_CODE_SUCCESS) {
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -3636,7 +3698,7 @@ int32_t getQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr** pExpr
|
||||||
}
|
}
|
||||||
|
|
||||||
static void doCompactQueryExpr(tSQLExpr** pExpr) {
|
static void doCompactQueryExpr(tSQLExpr** pExpr) {
|
||||||
if (*pExpr == NULL || isExprDirectParentOfLeaftNode(*pExpr)) {
|
if (*pExpr == NULL || isExprDirectParentOfLeafNode(*pExpr)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3667,7 +3729,7 @@ static void doCompactQueryExpr(tSQLExpr** pExpr) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static void doExtractExprForSTable(SSqlCmd* pCmd, tSQLExpr** pExpr, SQueryInfo* pQueryInfo, tSQLExpr** pOut, int32_t tableIndex) {
|
static void doExtractExprForSTable(SSqlCmd* pCmd, tSQLExpr** pExpr, SQueryInfo* pQueryInfo, tSQLExpr** pOut, int32_t tableIndex) {
|
||||||
if (isExprDirectParentOfLeaftNode(*pExpr)) {
|
if (isExprDirectParentOfLeafNode(*pExpr)) {
|
||||||
tSQLExpr* pLeft = (*pExpr)->pLeft;
|
tSQLExpr* pLeft = (*pExpr)->pLeft;
|
||||||
|
|
||||||
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
|
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
|
||||||
|
@ -3826,7 +3888,7 @@ static int32_t getTimeRangeFromExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLE
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!isExprDirectParentOfLeaftNode(pExpr)) {
|
if (!isExprDirectParentOfLeafNode(pExpr)) {
|
||||||
if (pExpr->nSQLOptr == TK_OR) {
|
if (pExpr->nSQLOptr == TK_OR) {
|
||||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||||
}
|
}
|
||||||
|
@ -3996,7 +4058,6 @@ int32_t parseWhereClause(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, SSqlObj* pSql
|
||||||
const char* msg2 = "invalid filter expression";
|
const char* msg2 = "invalid filter expression";
|
||||||
|
|
||||||
int32_t ret = TSDB_CODE_SUCCESS;
|
int32_t ret = TSDB_CODE_SUCCESS;
|
||||||
pQueryInfo->window = TSWINDOW_INITIALIZER;
|
|
||||||
|
|
||||||
// tags query condition may be larger than 512bytes, therefore, we need to prepare enough large space
|
// tags query condition may be larger than 512bytes, therefore, we need to prepare enough large space
|
||||||
SStringBuilder sb; memset(&sb, 0, sizeof(sb));
|
SStringBuilder sb; memset(&sb, 0, sizeof(sb));
|
||||||
|
@ -4714,9 +4775,9 @@ int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
|
||||||
const char* msg0 = "sample interval can not be less than 10ms.";
|
const char* msg0 = "sample interval can not be less than 10ms.";
|
||||||
const char* msg1 = "functions not allowed in select clause";
|
const char* msg1 = "functions not allowed in select clause";
|
||||||
|
|
||||||
if (pQueryInfo->intervalTime != 0 && pQueryInfo->intervalTime < 10 &&
|
if (pQueryInfo->interval.interval != 0 && pQueryInfo->interval.interval < 10 &&
|
||||||
pQueryInfo->intervalTimeUnit != 'n' &&
|
pQueryInfo->interval.intervalUnit != 'n' &&
|
||||||
pQueryInfo->intervalTimeUnit != 'y') {
|
pQueryInfo->interval.intervalUnit != 'y') {
|
||||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5244,7 +5305,8 @@ static void doUpdateSqlFunctionForTagPrj(SQueryInfo* pQueryInfo) {
|
||||||
|
|
||||||
for (int32_t i = 0; i < size; ++i) {
|
for (int32_t i = 0; i < size; ++i) {
|
||||||
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
|
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
|
||||||
if (pExpr->functionId != TSDB_FUNC_TAG_DUMMY && pExpr->functionId != TSDB_FUNC_TS_DUMMY) {
|
if ((pExpr->functionId != TSDB_FUNC_TAG_DUMMY && pExpr->functionId != TSDB_FUNC_TS_DUMMY) &&
|
||||||
|
!(pExpr->functionId == TSDB_FUNC_PRJ && TSDB_COL_IS_UD_COL(pExpr->colInfo.flag))) {
|
||||||
SSchema* pColSchema = &pSchema[pExpr->colInfo.colIndex];
|
SSchema* pColSchema = &pSchema[pExpr->colInfo.colIndex];
|
||||||
getResultDataInfo(pColSchema->type, pColSchema->bytes, pExpr->functionId, (int32_t)pExpr->param[0].i64Key, &pExpr->resType,
|
getResultDataInfo(pColSchema->type, pColSchema->bytes, pExpr->functionId, (int32_t)pExpr->param[0].i64Key, &pExpr->resType,
|
||||||
&pExpr->resBytes, &pExpr->interBytes, tagLength, true);
|
&pExpr->resBytes, &pExpr->interBytes, tagLength, true);
|
||||||
|
@ -5252,12 +5314,13 @@ static void doUpdateSqlFunctionForTagPrj(SQueryInfo* pQueryInfo) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void doUpdateSqlFunctionForColPrj(SQueryInfo* pQueryInfo) {
|
static int32_t doUpdateSqlFunctionForColPrj(SQueryInfo* pQueryInfo) {
|
||||||
size_t size = taosArrayGetSize(pQueryInfo->exprList);
|
size_t size = taosArrayGetSize(pQueryInfo->exprList);
|
||||||
|
|
||||||
for (int32_t i = 0; i < size; ++i) {
|
for (int32_t i = 0; i < size; ++i) {
|
||||||
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
|
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
|
||||||
if (pExpr->functionId == TSDB_FUNC_PRJ) {
|
|
||||||
|
if (pExpr->functionId == TSDB_FUNC_PRJ && (!TSDB_COL_IS_UD_COL(pExpr->colInfo.flag))) {
|
||||||
bool qualifiedCol = false;
|
bool qualifiedCol = false;
|
||||||
for (int32_t j = 0; j < pQueryInfo->groupbyExpr.numOfGroupCols; ++j) {
|
for (int32_t j = 0; j < pQueryInfo->groupbyExpr.numOfGroupCols; ++j) {
|
||||||
SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, j);
|
SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, j);
|
||||||
|
@ -5270,9 +5333,14 @@ static void doUpdateSqlFunctionForColPrj(SQueryInfo* pQueryInfo) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(qualifiedCol);
|
// it is not a tag column/tbname column/user-defined column, return error
|
||||||
|
if (!qualifiedCol) {
|
||||||
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool tagColumnInGroupby(SSqlGroupbyExpr* pGroupbyExpr, int16_t columnId) {
|
static bool tagColumnInGroupby(SSqlGroupbyExpr* pGroupbyExpr, int16_t columnId) {
|
||||||
|
@ -5346,16 +5414,23 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, SSqlCmd* pCmd)
|
||||||
const char* msg1 = "only one selectivity function allowed in presence of tags function";
|
const char* msg1 = "only one selectivity function allowed in presence of tags function";
|
||||||
const char* msg3 = "aggregation function should not be mixed up with projection";
|
const char* msg3 = "aggregation function should not be mixed up with projection";
|
||||||
|
|
||||||
bool tagColExists = false;
|
bool tagTsColExists = false;
|
||||||
int16_t numOfSelectivity = 0;
|
int16_t numOfSelectivity = 0;
|
||||||
int16_t numOfAggregation = 0;
|
int16_t numOfAggregation = 0;
|
||||||
|
|
||||||
|
// todo is 0??
|
||||||
|
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||||
|
bool isSTable = UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo);
|
||||||
|
if (!isSTable) {
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
size_t numOfExprs = taosArrayGetSize(pQueryInfo->exprList);
|
size_t numOfExprs = taosArrayGetSize(pQueryInfo->exprList);
|
||||||
for (int32_t i = 0; i < numOfExprs; ++i) {
|
for (int32_t i = 0; i < numOfExprs; ++i) {
|
||||||
SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, i);
|
SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, i);
|
||||||
if (pExpr->functionId == TSDB_FUNC_TAGPRJ ||
|
if (pExpr->functionId == TSDB_FUNC_TAGPRJ ||
|
||||||
(pExpr->functionId == TSDB_FUNC_PRJ && pExpr->colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX)) {
|
(pExpr->functionId == TSDB_FUNC_PRJ && pExpr->colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX)) {
|
||||||
tagColExists = true;
|
tagTsColExists = true; // selectivity + ts/tag column
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -5376,7 +5451,7 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, SSqlCmd* pCmd)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tagColExists) { // check if the selectivity function exists
|
if (tagTsColExists) { // check if the selectivity function exists
|
||||||
// When the tag projection function on tag column that is not in the group by clause, aggregation function and
|
// When the tag projection function on tag column that is not in the group by clause, aggregation function and
|
||||||
// selectivity function exist in select clause is not allowed.
|
// selectivity function exist in select clause is not allowed.
|
||||||
if (numOfAggregation > 0) {
|
if (numOfAggregation > 0) {
|
||||||
|
@ -5388,26 +5463,36 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, SSqlCmd* pCmd)
|
||||||
*/
|
*/
|
||||||
if (numOfSelectivity == 1) {
|
if (numOfSelectivity == 1) {
|
||||||
doUpdateSqlFunctionForTagPrj(pQueryInfo);
|
doUpdateSqlFunctionForTagPrj(pQueryInfo);
|
||||||
doUpdateSqlFunctionForColPrj(pQueryInfo);
|
int32_t code = doUpdateSqlFunctionForColPrj(pQueryInfo);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
} else if (numOfSelectivity > 1) {
|
} else if (numOfSelectivity > 1) {
|
||||||
/*
|
/*
|
||||||
* If more than one selectivity functions exist, all the selectivity functions must be last_row.
|
* If more than one selectivity functions exist, all the selectivity functions must be last_row.
|
||||||
* Otherwise, return with error code.
|
* Otherwise, return with error code.
|
||||||
*/
|
*/
|
||||||
for (int32_t i = 0; i < numOfExprs; ++i) {
|
for (int32_t i = 0; i < numOfExprs; ++i) {
|
||||||
|
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
|
||||||
int16_t functionId = tscSqlExprGet(pQueryInfo, i)->functionId;
|
int16_t functionId = pExpr->functionId;
|
||||||
if (functionId == TSDB_FUNC_TAGPRJ) {
|
if (functionId == TSDB_FUNC_TAGPRJ || (aAggs[functionId].nStatus & TSDB_FUNCSTATE_SELECTIVITY) == 0) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (((aAggs[functionId].nStatus & TSDB_FUNCSTATE_SELECTIVITY) != 0) && (functionId != TSDB_FUNC_LAST_ROW)) {
|
if ((functionId == TSDB_FUNC_LAST_ROW) ||
|
||||||
|
(functionId == TSDB_FUNC_LAST_DST && (pExpr->colInfo.flag & TSDB_COL_NULL) != 0)) {
|
||||||
|
// do nothing
|
||||||
|
} else {
|
||||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
doUpdateSqlFunctionForTagPrj(pQueryInfo);
|
doUpdateSqlFunctionForTagPrj(pQueryInfo);
|
||||||
doUpdateSqlFunctionForColPrj(pQueryInfo);
|
int32_t code = doUpdateSqlFunctionForColPrj(pQueryInfo);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
return code;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if ((pQueryInfo->type & TSDB_QUERY_TYPE_PROJECTION_QUERY) != 0) {
|
if ((pQueryInfo->type & TSDB_QUERY_TYPE_PROJECTION_QUERY) != 0) {
|
||||||
|
@ -5418,7 +5503,10 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, SSqlCmd* pCmd)
|
||||||
if (numOfAggregation > 0 || numOfSelectivity > 0) {
|
if (numOfAggregation > 0 || numOfSelectivity > 0) {
|
||||||
// clear the projection type flag
|
// clear the projection type flag
|
||||||
pQueryInfo->type &= (~TSDB_QUERY_TYPE_PROJECTION_QUERY);
|
pQueryInfo->type &= (~TSDB_QUERY_TYPE_PROJECTION_QUERY);
|
||||||
doUpdateSqlFunctionForColPrj(pQueryInfo);
|
int32_t code = doUpdateSqlFunctionForColPrj(pQueryInfo);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
return code;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -5474,7 +5562,7 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo
|
||||||
insertResultField(pQueryInfo, (int32_t)size, &ids, bytes, (int8_t)type, name, pExpr);
|
insertResultField(pQueryInfo, (int32_t)size, &ids, bytes, (int8_t)type, name, pExpr);
|
||||||
} else {
|
} else {
|
||||||
// if this query is "group by" normal column, interval is not allowed
|
// if this query is "group by" normal column, interval is not allowed
|
||||||
if (pQueryInfo->intervalTime > 0) {
|
if (pQueryInfo->interval.interval > 0) {
|
||||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5507,7 +5595,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
|
||||||
|
|
||||||
// only retrieve tags, group by is not supportted
|
// only retrieve tags, group by is not supportted
|
||||||
if (tscQueryTags(pQueryInfo)) {
|
if (tscQueryTags(pQueryInfo)) {
|
||||||
if (pQueryInfo->groupbyExpr.numOfGroupCols > 0 || pQueryInfo->intervalTime > 0) {
|
if (pQueryInfo->groupbyExpr.numOfGroupCols > 0 || pQueryInfo->interval.interval > 0) {
|
||||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
|
||||||
} else {
|
} else {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
|
@ -5959,7 +6047,7 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) {
|
||||||
if (parseIntervalClause(pCmd, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
|
if (parseIntervalClause(pCmd, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
|
||||||
return TSDB_CODE_TSC_INVALID_SQL;
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
} else {
|
} else {
|
||||||
if ((pQueryInfo->intervalTime > 0) &&
|
if ((pQueryInfo->interval.interval > 0) &&
|
||||||
(validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS)) {
|
(validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS)) {
|
||||||
return TSDB_CODE_TSC_INVALID_SQL;
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
}
|
}
|
||||||
|
@ -5989,7 +6077,7 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) {
|
||||||
* not here.
|
* not here.
|
||||||
*/
|
*/
|
||||||
if (pQuerySql->fillType != NULL) {
|
if (pQuerySql->fillType != NULL) {
|
||||||
if (pQueryInfo->intervalTime == 0) {
|
if (pQueryInfo->interval.interval == 0) {
|
||||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6128,30 +6216,6 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
|
||||||
return TSDB_CODE_TSC_INVALID_SQL;
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t joinQuery = (pQuerySql->from != NULL && pQuerySql->from->nExpr > 2);
|
|
||||||
|
|
||||||
if (pQuerySql->pWhere) {
|
|
||||||
pQueryInfo->window = TSWINDOW_INITIALIZER;
|
|
||||||
}
|
|
||||||
if (parseSelectClause(pCmd, index, pQuerySql->pSelection, isSTable, joinQuery) != TSDB_CODE_SUCCESS) {
|
|
||||||
return TSDB_CODE_TSC_INVALID_SQL;
|
|
||||||
}
|
|
||||||
|
|
||||||
// set interval value
|
|
||||||
if (parseIntervalClause(pCmd, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
|
|
||||||
return TSDB_CODE_TSC_INVALID_SQL;
|
|
||||||
} else {
|
|
||||||
if ((pQueryInfo->intervalTime > 0) &&
|
|
||||||
(validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS)) {
|
|
||||||
return TSDB_CODE_TSC_INVALID_SQL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// set order by info
|
|
||||||
if (parseOrderbyClause(pCmd, pQueryInfo, pQuerySql, tscGetTableSchema(pTableMetaInfo->pTableMeta)) != TSDB_CODE_SUCCESS) {
|
|
||||||
return TSDB_CODE_TSC_INVALID_SQL;
|
|
||||||
}
|
|
||||||
|
|
||||||
// set where info
|
// set where info
|
||||||
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
|
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
|
||||||
|
|
||||||
|
@ -6166,12 +6230,32 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
|
||||||
pQueryInfo->window.ekey = pQueryInfo->window.ekey / 1000;
|
pQueryInfo->window.ekey = pQueryInfo->window.ekey / 1000;
|
||||||
}
|
}
|
||||||
} else { // set the time rang
|
} else { // set the time rang
|
||||||
pQueryInfo->window = TSWINDOW_INITIALIZER;
|
|
||||||
if (pQuerySql->from->nExpr > 2) { // it is a join query, no wher clause is not allowed.
|
if (pQuerySql->from->nExpr > 2) { // it is a join query, no wher clause is not allowed.
|
||||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "condition missing for join query ");
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "condition missing for join query ");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t joinQuery = (pQuerySql->from != NULL && pQuerySql->from->nExpr > 2);
|
||||||
|
|
||||||
|
if (parseSelectClause(pCmd, index, pQuerySql->pSelection, isSTable, joinQuery) != TSDB_CODE_SUCCESS) {
|
||||||
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
|
}
|
||||||
|
|
||||||
|
// set interval value
|
||||||
|
if (parseIntervalClause(pCmd, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
|
||||||
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
|
} else {
|
||||||
|
if ((pQueryInfo->interval.interval > 0) &&
|
||||||
|
(validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS)) {
|
||||||
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// set order by info
|
||||||
|
if (parseOrderbyClause(pCmd, pQueryInfo, pQuerySql, tscGetTableSchema(pTableMetaInfo->pTableMeta)) != TSDB_CODE_SUCCESS) {
|
||||||
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
|
}
|
||||||
|
|
||||||
// user does not specified the query time window, twa is not allowed in such case.
|
// user does not specified the query time window, twa is not allowed in such case.
|
||||||
if ((pQueryInfo->window.skey == INT64_MIN || pQueryInfo->window.ekey == INT64_MAX ||
|
if ((pQueryInfo->window.skey == INT64_MIN || pQueryInfo->window.ekey == INT64_MAX ||
|
||||||
(pQueryInfo->window.ekey == INT64_MAX / 1000 && tinfo.precision == TSDB_TIME_PRECISION_MILLI)) && tscIsTWAQuery(pQueryInfo)) {
|
(pQueryInfo->window.ekey == INT64_MAX / 1000 && tinfo.precision == TSDB_TIME_PRECISION_MILLI)) && tscIsTWAQuery(pQueryInfo)) {
|
||||||
|
@ -6212,14 +6296,19 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
|
||||||
* the columns may be increased due to group by operation
|
* the columns may be increased due to group by operation
|
||||||
*/
|
*/
|
||||||
if (pQuerySql->fillType != NULL) {
|
if (pQuerySql->fillType != NULL) {
|
||||||
if (pQueryInfo->intervalTime == 0 && (!tscIsPointInterpQuery(pQueryInfo))) {
|
if (pQueryInfo->interval.interval == 0 && (!tscIsPointInterpQuery(pQueryInfo))) {
|
||||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pQueryInfo->intervalTime > 0 && pQueryInfo->intervalTimeUnit != 'n' && pQueryInfo->intervalTimeUnit != 'y') {
|
if (pQueryInfo->interval.interval > 0 && pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit != 'y') {
|
||||||
|
bool initialWindows = TSWINDOW_IS_EQUAL(pQueryInfo->window, TSWINDOW_INITIALIZER);
|
||||||
|
if (initialWindows) {
|
||||||
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
|
||||||
|
}
|
||||||
|
|
||||||
int64_t timeRange = ABS(pQueryInfo->window.skey - pQueryInfo->window.ekey);
|
int64_t timeRange = ABS(pQueryInfo->window.skey - pQueryInfo->window.ekey);
|
||||||
// number of result is not greater than 10,000,000
|
// number of result is not greater than 10,000,000
|
||||||
if ((timeRange == 0) || (timeRange / pQueryInfo->intervalTime) > MAX_INTERVAL_TIME_WINDOW) {
|
if ((timeRange == 0) || (timeRange / pQueryInfo->interval.interval) > MAX_INTERVAL_TIME_WINDOW) {
|
||||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
|
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -6251,6 +6340,11 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSQLExpr* pS
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (pSqlExpr->pLeft == NULL && pSqlExpr->pRight == NULL && pSqlExpr->nSQLOptr == 0) {
|
||||||
|
*pExpr = calloc(1, sizeof(tExprNode));
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
if (pSqlExpr->pLeft == NULL) {
|
if (pSqlExpr->pLeft == NULL) {
|
||||||
if (pSqlExpr->nSQLOptr >= TK_BOOL && pSqlExpr->nSQLOptr <= TK_STRING) {
|
if (pSqlExpr->nSQLOptr >= TK_BOOL && pSqlExpr->nSQLOptr <= TK_STRING) {
|
||||||
*pExpr = calloc(1, sizeof(tExprNode));
|
*pExpr = calloc(1, sizeof(tExprNode));
|
||||||
|
@ -6323,6 +6417,7 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSQLExpr* pS
|
||||||
|
|
||||||
assert((*pExpr)->_node.optr != 0);
|
assert((*pExpr)->_node.optr != 0);
|
||||||
|
|
||||||
|
// check for dividing by 0
|
||||||
if ((*pExpr)->_node.optr == TSDB_BINARY_OP_DIVIDE) {
|
if ((*pExpr)->_node.optr == TSDB_BINARY_OP_DIVIDE) {
|
||||||
if (pRight->nodeType == TSQL_NODE_VALUE) {
|
if (pRight->nodeType == TSQL_NODE_VALUE) {
|
||||||
if (pRight->pVal->nType == TSDB_DATA_TYPE_INT && pRight->pVal->i64Key == 0) {
|
if (pRight->pVal->nType == TSDB_DATA_TYPE_INT && pRight->pVal->i64Key == 0) {
|
||||||
|
@ -6335,7 +6430,7 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSQLExpr* pS
|
||||||
|
|
||||||
// NOTE: binary|nchar data allows the >|< type filter
|
// NOTE: binary|nchar data allows the >|< type filter
|
||||||
if ((*pExpr)->_node.optr != TSDB_RELATION_EQUAL && (*pExpr)->_node.optr != TSDB_RELATION_NOT_EQUAL) {
|
if ((*pExpr)->_node.optr != TSDB_RELATION_EQUAL && (*pExpr)->_node.optr != TSDB_RELATION_NOT_EQUAL) {
|
||||||
if (pRight->nodeType == TSQL_NODE_VALUE) {
|
if (pRight != NULL && pRight->nodeType == TSQL_NODE_VALUE) {
|
||||||
if (pRight->pVal->nType == TSDB_DATA_TYPE_BOOL) {
|
if (pRight->pVal->nType == TSDB_DATA_TYPE_BOOL) {
|
||||||
return TSDB_CODE_TSC_INVALID_SQL;
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
}
|
}
|
||||||
|
@ -6345,3 +6440,15 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSQLExpr* pS
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool hasNormalColumnFilter(SQueryInfo* pQueryInfo) {
|
||||||
|
size_t numOfCols = taosArrayGetSize(pQueryInfo->colList);
|
||||||
|
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||||
|
SColumn* pCol = taosArrayGetP(pQueryInfo->colList, i);
|
||||||
|
if (pCol->numOfFilters > 0) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
|
@ -27,10 +27,7 @@
|
||||||
#include "tutil.h"
|
#include "tutil.h"
|
||||||
#include "tlockfree.h"
|
#include "tlockfree.h"
|
||||||
|
|
||||||
#define TSC_MGMT_VNODE 999
|
|
||||||
|
|
||||||
SRpcCorEpSet tscMgmtEpSet;
|
SRpcCorEpSet tscMgmtEpSet;
|
||||||
SRpcEpSet tscDnodeEpSet;
|
|
||||||
|
|
||||||
int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo) = {0};
|
int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo) = {0};
|
||||||
|
|
||||||
|
@ -236,20 +233,27 @@ int tscSendMsgToServer(SSqlObj *pSql) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
|
void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
|
||||||
SSqlObj *pSql = (SSqlObj *)rpcMsg->ahandle;
|
uint64_t handle = (uint64_t) rpcMsg->ahandle;
|
||||||
if (pSql == NULL || pSql->signature != pSql) {
|
|
||||||
tscError("%p sql is already released", pSql);
|
void** p = taosCacheAcquireByKey(tscObjCache, &handle, sizeof(uint64_t));
|
||||||
|
if (p == NULL) {
|
||||||
|
rpcFreeCont(rpcMsg->pCont);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SSqlObj* pSql = *p;
|
||||||
|
assert(pSql != NULL);
|
||||||
|
|
||||||
STscObj *pObj = pSql->pTscObj;
|
STscObj *pObj = pSql->pTscObj;
|
||||||
SSqlRes *pRes = &pSql->res;
|
SSqlRes *pRes = &pSql->res;
|
||||||
SSqlCmd *pCmd = &pSql->cmd;
|
SSqlCmd *pCmd = &pSql->cmd;
|
||||||
|
|
||||||
|
assert(*pSql->self == pSql);
|
||||||
|
|
||||||
if (pObj->signature != pObj) {
|
if (pObj->signature != pObj) {
|
||||||
tscDebug("%p DB connection is closed, cmd:%d pObj:%p signature:%p", pSql, pCmd->command, pObj, pObj->signature);
|
tscDebug("%p DB connection is closed, cmd:%d pObj:%p signature:%p", pSql, pCmd->command, pObj, pObj->signature);
|
||||||
|
|
||||||
tscFreeSqlObj(pSql);
|
taosCacheRelease(tscObjCache, (void**) &p, true);
|
||||||
rpcFreeCont(rpcMsg->pCont);
|
rpcFreeCont(rpcMsg->pCont);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -261,14 +265,17 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
|
||||||
tscDebug("%p sqlObj needs to be released or DB connection is closed, cmd:%d type:%d, pObj:%p signature:%p",
|
tscDebug("%p sqlObj needs to be released or DB connection is closed, cmd:%d type:%d, pObj:%p signature:%p",
|
||||||
pSql, pCmd->command, pQueryInfo->type, pObj, pObj->signature);
|
pSql, pCmd->command, pQueryInfo->type, pObj, pObj->signature);
|
||||||
|
|
||||||
tscFreeSqlObj(pSql);
|
void** p1 = p;
|
||||||
|
taosCacheRelease(tscObjCache, (void**) &p1, false);
|
||||||
|
|
||||||
|
taosCacheRelease(tscObjCache, (void**) &p, true);
|
||||||
rpcFreeCont(rpcMsg->pCont);
|
rpcFreeCont(rpcMsg->pCont);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pEpSet) {
|
if (pEpSet) {
|
||||||
if (!tscEpSetIsEqual(&pSql->epSet, pEpSet)) {
|
if (!tscEpSetIsEqual(&pSql->epSet, pEpSet)) {
|
||||||
if(pCmd->command < TSDB_SQL_MGMT) {
|
if (pCmd->command < TSDB_SQL_MGMT) {
|
||||||
tscUpdateVgroupInfo(pSql, pEpSet);
|
tscUpdateVgroupInfo(pSql, pEpSet);
|
||||||
} else {
|
} else {
|
||||||
tscUpdateMgmtEpSet(pEpSet);
|
tscUpdateMgmtEpSet(pEpSet);
|
||||||
|
@ -294,7 +301,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
|
||||||
if (pSql->retry > pSql->maxRetry) {
|
if (pSql->retry > pSql->maxRetry) {
|
||||||
tscError("%p max retry %d reached, give up", pSql, pSql->maxRetry);
|
tscError("%p max retry %d reached, give up", pSql, pSql->maxRetry);
|
||||||
} else {
|
} else {
|
||||||
// wait for a little bit moment and then retry
|
// wait for a little bit moment and then retry, todo do not sleep in rpc callback thread
|
||||||
if (rpcMsg->code == TSDB_CODE_APP_NOT_READY || rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID) {
|
if (rpcMsg->code == TSDB_CODE_APP_NOT_READY || rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID) {
|
||||||
int32_t duration = getWaitingTimeInterval(pSql->retry);
|
int32_t duration = getWaitingTimeInterval(pSql->retry);
|
||||||
taosMsleep(duration);
|
taosMsleep(duration);
|
||||||
|
@ -304,6 +311,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
|
||||||
|
|
||||||
// if there is an error occurring, proceed to the following error handling procedure.
|
// if there is an error occurring, proceed to the following error handling procedure.
|
||||||
if (rpcMsg->code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
|
if (rpcMsg->code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
|
||||||
|
taosCacheRelease(tscObjCache, (void**) &p, false);
|
||||||
rpcFreeCont(rpcMsg->pCont);
|
rpcFreeCont(rpcMsg->pCont);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -365,16 +373,18 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
|
||||||
rpcMsg->code = (*tscProcessMsgRsp[pCmd->command])(pSql);
|
rpcMsg->code = (*tscProcessMsgRsp[pCmd->command])(pSql);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool shouldFree = tscShouldBeFreed(pSql);
|
||||||
if (rpcMsg->code != TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
|
if (rpcMsg->code != TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
|
||||||
rpcMsg->code = (pRes->code == TSDB_CODE_SUCCESS) ? (int32_t)pRes->numOfRows : pRes->code;
|
rpcMsg->code = (pRes->code == TSDB_CODE_SUCCESS) ? (int32_t)pRes->numOfRows : pRes->code;
|
||||||
|
|
||||||
bool shouldFree = tscShouldBeFreed(pSql);
|
|
||||||
(*pSql->fp)(pSql->param, pSql, rpcMsg->code);
|
(*pSql->fp)(pSql->param, pSql, rpcMsg->code);
|
||||||
|
|
||||||
if (shouldFree) {
|
|
||||||
tscDebug("%p sqlObj is automatically freed", pSql);
|
|
||||||
tscFreeSqlObj(pSql);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void** p1 = p;
|
||||||
|
taosCacheRelease(tscObjCache, (void**) &p1, false);
|
||||||
|
|
||||||
|
if (shouldFree) { // in case of table-meta/vgrouplist query, automatically free it
|
||||||
|
taosCacheRelease(tscObjCache, (void **)&p, true);
|
||||||
|
tscDebug("%p sqlObj is automatically freed", pSql);
|
||||||
}
|
}
|
||||||
|
|
||||||
rpcFreeCont(rpcMsg->pCont);
|
rpcFreeCont(rpcMsg->pCont);
|
||||||
|
@ -637,8 +647,8 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
||||||
return TSDB_CODE_TSC_INVALID_SQL;
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pQueryInfo->intervalTime < 0) {
|
if (pQueryInfo->interval.interval < 0) {
|
||||||
tscError("%p illegal value of aggregation time interval in query msg: %ld", pSql, pQueryInfo->intervalTime);
|
tscError("%p illegal value of aggregation time interval in query msg: %ld", pSql, pQueryInfo->interval.interval);
|
||||||
return TSDB_CODE_TSC_INVALID_SQL;
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -665,10 +675,12 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
||||||
pQueryMsg->limit = htobe64(pQueryInfo->limit.limit);
|
pQueryMsg->limit = htobe64(pQueryInfo->limit.limit);
|
||||||
pQueryMsg->offset = htobe64(pQueryInfo->limit.offset);
|
pQueryMsg->offset = htobe64(pQueryInfo->limit.offset);
|
||||||
pQueryMsg->numOfCols = htons((int16_t)taosArrayGetSize(pQueryInfo->colList));
|
pQueryMsg->numOfCols = htons((int16_t)taosArrayGetSize(pQueryInfo->colList));
|
||||||
pQueryMsg->intervalTime = htobe64(pQueryInfo->intervalTime);
|
pQueryMsg->interval.interval = htobe64(pQueryInfo->interval.interval);
|
||||||
pQueryMsg->slidingTime = htobe64(pQueryInfo->slidingTime);
|
pQueryMsg->interval.sliding = htobe64(pQueryInfo->interval.sliding);
|
||||||
pQueryMsg->intervalTimeUnit = pQueryInfo->intervalTimeUnit;
|
pQueryMsg->interval.offset = htobe64(pQueryInfo->interval.offset);
|
||||||
pQueryMsg->slidingTimeUnit = pQueryInfo->slidingTimeUnit;
|
pQueryMsg->interval.intervalUnit = pQueryInfo->interval.intervalUnit;
|
||||||
|
pQueryMsg->interval.slidingUnit = pQueryInfo->interval.slidingUnit;
|
||||||
|
pQueryMsg->interval.offsetUnit = pQueryInfo->interval.offsetUnit;
|
||||||
pQueryMsg->numOfGroupCols = htons(pQueryInfo->groupbyExpr.numOfGroupCols);
|
pQueryMsg->numOfGroupCols = htons(pQueryInfo->groupbyExpr.numOfGroupCols);
|
||||||
pQueryMsg->numOfTags = htonl(numOfTags);
|
pQueryMsg->numOfTags = htonl(numOfTags);
|
||||||
pQueryMsg->tagNameRelType = htons(pQueryInfo->tagCond.relType);
|
pQueryMsg->tagNameRelType = htons(pQueryInfo->tagCond.relType);
|
||||||
|
@ -1667,8 +1679,10 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
|
||||||
pMetaMsg->contLen = htons(pMetaMsg->contLen);
|
pMetaMsg->contLen = htons(pMetaMsg->contLen);
|
||||||
pMetaMsg->numOfColumns = htons(pMetaMsg->numOfColumns);
|
pMetaMsg->numOfColumns = htons(pMetaMsg->numOfColumns);
|
||||||
|
|
||||||
if (pMetaMsg->sid < 0 || pMetaMsg->vgroup.numOfEps < 0) {
|
if ((pMetaMsg->tableType != TSDB_SUPER_TABLE) &&
|
||||||
tscError("invalid meter vgId:%d, sid%d", pMetaMsg->vgroup.numOfEps, pMetaMsg->sid);
|
(pMetaMsg->sid <= 0 || pMetaMsg->vgroup.vgId < 2 || pMetaMsg->vgroup.numOfEps <= 0)) {
|
||||||
|
tscError("invalid value in table numOfEps:%d, vgId:%d tid:%d, name:%s", pMetaMsg->vgroup.numOfEps, pMetaMsg->vgroup.vgId,
|
||||||
|
pMetaMsg->sid, pMetaMsg->tableId);
|
||||||
return TSDB_CODE_TSC_INVALID_VALUE;
|
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1708,7 +1722,7 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
|
||||||
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
|
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
|
||||||
assert(pTableMetaInfo->pTableMeta == NULL);
|
assert(pTableMetaInfo->pTableMeta == NULL);
|
||||||
|
|
||||||
pTableMetaInfo->pTableMeta = (STableMeta *) taosCachePut(tscCacheHandle, pTableMetaInfo->name,
|
pTableMetaInfo->pTableMeta = (STableMeta *) taosCachePut(tscMetaCache, pTableMetaInfo->name,
|
||||||
strlen(pTableMetaInfo->name), pTableMeta, size, tsTableMetaKeepTimer * 1000);
|
strlen(pTableMetaInfo->name), pTableMeta, size, tsTableMetaKeepTimer * 1000);
|
||||||
|
|
||||||
// todo handle out of memory case
|
// todo handle out of memory case
|
||||||
|
@ -1820,7 +1834,7 @@ int tscProcessMultiMeterMetaRsp(SSqlObj *pSql) {
|
||||||
// int32_t size = (int32_t)(rsp - ((char *)pMeta)); // Consistent with STableMeta in cache
|
// int32_t size = (int32_t)(rsp - ((char *)pMeta)); // Consistent with STableMeta in cache
|
||||||
//
|
//
|
||||||
// pMeta->index = 0;
|
// pMeta->index = 0;
|
||||||
// (void)taosCachePut(tscCacheHandle, pMeta->tableId, (char *)pMeta, size, tsTableMetaKeepTimer);
|
// (void)taosCachePut(tscMetaCache, pMeta->tableId, (char *)pMeta, size, tsTableMetaKeepTimer);
|
||||||
// }
|
// }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1907,12 +1921,14 @@ int tscProcessShowRsp(SSqlObj *pSql) {
|
||||||
key[0] = pCmd->msgType + 'a';
|
key[0] = pCmd->msgType + 'a';
|
||||||
strcpy(key + 1, "showlist");
|
strcpy(key + 1, "showlist");
|
||||||
|
|
||||||
taosCacheRelease(tscCacheHandle, (void *)&(pTableMetaInfo->pTableMeta), false);
|
if (pTableMetaInfo->pTableMeta != NULL) {
|
||||||
|
taosCacheRelease(tscMetaCache, (void *)&(pTableMetaInfo->pTableMeta), false);
|
||||||
|
}
|
||||||
|
|
||||||
size_t size = 0;
|
size_t size = 0;
|
||||||
STableMeta* pTableMeta = tscCreateTableMetaFromMsg(pMetaMsg, &size);
|
STableMeta* pTableMeta = tscCreateTableMetaFromMsg(pMetaMsg, &size);
|
||||||
|
|
||||||
pTableMetaInfo->pTableMeta = taosCachePut(tscCacheHandle, key, strlen(key), (char *)pTableMeta, size,
|
pTableMetaInfo->pTableMeta = taosCachePut(tscMetaCache, key, strlen(key), (char *)pTableMeta, size,
|
||||||
tsTableMetaKeepTimer * 1000);
|
tsTableMetaKeepTimer * 1000);
|
||||||
SSchema *pTableSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta);
|
SSchema *pTableSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta);
|
||||||
|
|
||||||
|
@ -1971,6 +1987,8 @@ static void createHBObj(STscObj* pObj) {
|
||||||
pSql->pTscObj = pObj;
|
pSql->pTscObj = pObj;
|
||||||
pSql->signature = pSql;
|
pSql->signature = pSql;
|
||||||
pObj->pHb = pSql;
|
pObj->pHb = pSql;
|
||||||
|
T_REF_INC(pObj);
|
||||||
|
|
||||||
tscAddSubqueryInfo(&pObj->pHb->cmd);
|
tscAddSubqueryInfo(&pObj->pHb->cmd);
|
||||||
|
|
||||||
tscDebug("%p HB is allocated, pObj:%p", pObj->pHb, pObj);
|
tscDebug("%p HB is allocated, pObj:%p", pObj->pHb, pObj);
|
||||||
|
@ -2000,7 +2018,7 @@ int tscProcessConnectRsp(SSqlObj *pSql) {
|
||||||
|
|
||||||
createHBObj(pObj);
|
createHBObj(pObj);
|
||||||
|
|
||||||
taosTmrReset(tscProcessActivityTimer, tsShellActivityTimer * 500, pObj, tscTmr, &pObj->pTimer);
|
// taosTmrReset(tscProcessActivityTimer, tsShellActivityTimer * 500, pObj, tscTmr, &pObj->pTimer);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -2015,14 +2033,14 @@ int tscProcessUseDbRsp(SSqlObj *pSql) {
|
||||||
|
|
||||||
int tscProcessDropDbRsp(SSqlObj *pSql) {
|
int tscProcessDropDbRsp(SSqlObj *pSql) {
|
||||||
pSql->pTscObj->db[0] = 0;
|
pSql->pTscObj->db[0] = 0;
|
||||||
taosCacheEmpty(tscCacheHandle);
|
taosCacheEmpty(tscMetaCache);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int tscProcessDropTableRsp(SSqlObj *pSql) {
|
int tscProcessDropTableRsp(SSqlObj *pSql) {
|
||||||
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
|
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
|
||||||
|
|
||||||
STableMeta *pTableMeta = taosCacheAcquireByKey(tscCacheHandle, pTableMetaInfo->name, strlen(pTableMetaInfo->name));
|
STableMeta *pTableMeta = taosCacheAcquireByKey(tscMetaCache, pTableMetaInfo->name, strlen(pTableMetaInfo->name));
|
||||||
if (pTableMeta == NULL) { /* not in cache, abort */
|
if (pTableMeta == NULL) { /* not in cache, abort */
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -2035,10 +2053,10 @@ int tscProcessDropTableRsp(SSqlObj *pSql) {
|
||||||
* instead.
|
* instead.
|
||||||
*/
|
*/
|
||||||
tscDebug("%p force release table meta after drop table:%s", pSql, pTableMetaInfo->name);
|
tscDebug("%p force release table meta after drop table:%s", pSql, pTableMetaInfo->name);
|
||||||
taosCacheRelease(tscCacheHandle, (void **)&pTableMeta, true);
|
taosCacheRelease(tscMetaCache, (void **)&pTableMeta, true);
|
||||||
|
|
||||||
if (pTableMetaInfo->pTableMeta) {
|
if (pTableMetaInfo->pTableMeta) {
|
||||||
taosCacheRelease(tscCacheHandle, (void **)&(pTableMetaInfo->pTableMeta), true);
|
taosCacheRelease(tscMetaCache, (void **)&(pTableMetaInfo->pTableMeta), true);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -2047,21 +2065,21 @@ int tscProcessDropTableRsp(SSqlObj *pSql) {
|
||||||
int tscProcessAlterTableMsgRsp(SSqlObj *pSql) {
|
int tscProcessAlterTableMsgRsp(SSqlObj *pSql) {
|
||||||
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
|
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
|
||||||
|
|
||||||
STableMeta *pTableMeta = taosCacheAcquireByKey(tscCacheHandle, pTableMetaInfo->name, strlen(pTableMetaInfo->name));
|
STableMeta *pTableMeta = taosCacheAcquireByKey(tscMetaCache, pTableMetaInfo->name, strlen(pTableMetaInfo->name));
|
||||||
if (pTableMeta == NULL) { /* not in cache, abort */
|
if (pTableMeta == NULL) { /* not in cache, abort */
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
tscDebug("%p force release metermeta in cache after alter-table: %s", pSql, pTableMetaInfo->name);
|
tscDebug("%p force release metermeta in cache after alter-table: %s", pSql, pTableMetaInfo->name);
|
||||||
taosCacheRelease(tscCacheHandle, (void **)&pTableMeta, true);
|
taosCacheRelease(tscMetaCache, (void **)&pTableMeta, true);
|
||||||
|
|
||||||
if (pTableMetaInfo->pTableMeta) {
|
if (pTableMetaInfo->pTableMeta) {
|
||||||
bool isSuperTable = UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo);
|
bool isSuperTable = UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo);
|
||||||
taosCacheRelease(tscCacheHandle, (void **)&(pTableMetaInfo->pTableMeta), true);
|
taosCacheRelease(tscMetaCache, (void **)&(pTableMetaInfo->pTableMeta), true);
|
||||||
|
|
||||||
if (isSuperTable) { // if it is a super table, reset whole query cache
|
if (isSuperTable) { // if it is a super table, reset whole query cache
|
||||||
tscDebug("%p reset query cache since table:%s is stable", pSql, pTableMetaInfo->name);
|
tscDebug("%p reset query cache since table:%s is stable", pSql, pTableMetaInfo->name);
|
||||||
taosCacheEmpty(tscCacheHandle);
|
taosCacheEmpty(tscMetaCache);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2146,6 +2164,12 @@ static int32_t getTableMetaFromMgmt(SSqlObj *pSql, STableMetaInfo *pTableMetaInf
|
||||||
pNew->signature = pNew;
|
pNew->signature = pNew;
|
||||||
pNew->cmd.command = TSDB_SQL_META;
|
pNew->cmd.command = TSDB_SQL_META;
|
||||||
|
|
||||||
|
T_REF_INC(pNew->pTscObj);
|
||||||
|
|
||||||
|
// TODO add test case on x86 platform
|
||||||
|
uint64_t adr = (uint64_t) pNew;
|
||||||
|
pNew->self = taosCachePut(tscObjCache, &adr, sizeof(uint64_t), &pNew, sizeof(uint64_t), 2*60*1000);
|
||||||
|
|
||||||
tscAddSubqueryInfo(&pNew->cmd);
|
tscAddSubqueryInfo(&pNew->cmd);
|
||||||
|
|
||||||
SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetailSafely(&pNew->cmd, 0);
|
SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetailSafely(&pNew->cmd, 0);
|
||||||
|
@ -2182,10 +2206,10 @@ int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) {
|
||||||
|
|
||||||
// If this STableMetaInfo owns a table meta, release it first
|
// If this STableMetaInfo owns a table meta, release it first
|
||||||
if (pTableMetaInfo->pTableMeta != NULL) {
|
if (pTableMetaInfo->pTableMeta != NULL) {
|
||||||
taosCacheRelease(tscCacheHandle, (void **)&(pTableMetaInfo->pTableMeta), false);
|
taosCacheRelease(tscMetaCache, (void **)&(pTableMetaInfo->pTableMeta), false);
|
||||||
}
|
}
|
||||||
|
|
||||||
pTableMetaInfo->pTableMeta = (STableMeta *)taosCacheAcquireByKey(tscCacheHandle, pTableMetaInfo->name, strlen(pTableMetaInfo->name));
|
pTableMetaInfo->pTableMeta = (STableMeta *)taosCacheAcquireByKey(tscMetaCache, pTableMetaInfo->name, strlen(pTableMetaInfo->name));
|
||||||
if (pTableMetaInfo->pTableMeta != NULL) {
|
if (pTableMetaInfo->pTableMeta != NULL) {
|
||||||
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
|
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
|
||||||
tscDebug("%p retrieve table Meta from cache, the number of columns:%d, numOfTags:%d, %p", pSql, tinfo.numOfColumns,
|
tscDebug("%p retrieve table Meta from cache, the number of columns:%d, numOfTags:%d, %p", pSql, tinfo.numOfColumns,
|
||||||
|
@ -2220,7 +2244,7 @@ int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex) {
|
||||||
tscGetNumOfTags(pTableMeta), tscGetNumOfColumns(pTableMeta), pTableMeta->id.uid, pTableMeta);
|
tscGetNumOfTags(pTableMeta), tscGetNumOfColumns(pTableMeta), pTableMeta->id.uid, pTableMeta);
|
||||||
}
|
}
|
||||||
|
|
||||||
taosCacheRelease(tscCacheHandle, (void **)&(pTableMetaInfo->pTableMeta), true);
|
taosCacheRelease(tscMetaCache, (void **)&(pTableMetaInfo->pTableMeta), true);
|
||||||
return getTableMetaFromMgmt(pSql, pTableMetaInfo);
|
return getTableMetaFromMgmt(pSql, pTableMetaInfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2251,6 +2275,7 @@ int tscGetSTableVgroupInfo(SSqlObj *pSql, int32_t clauseIndex) {
|
||||||
|
|
||||||
pNew->cmd.command = TSDB_SQL_STABLEVGROUP;
|
pNew->cmd.command = TSDB_SQL_STABLEVGROUP;
|
||||||
|
|
||||||
|
// TODO TEST IT
|
||||||
SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetailSafely(&pNew->cmd, 0);
|
SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetailSafely(&pNew->cmd, 0);
|
||||||
if (pNewQueryInfo == NULL) {
|
if (pNewQueryInfo == NULL) {
|
||||||
tscFreeSqlObj(pNew);
|
tscFreeSqlObj(pNew);
|
||||||
|
@ -2260,7 +2285,7 @@ int tscGetSTableVgroupInfo(SSqlObj *pSql, int32_t clauseIndex) {
|
||||||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, clauseIndex);
|
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, clauseIndex);
|
||||||
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
|
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
|
||||||
STableMetaInfo *pMInfo = tscGetMetaInfo(pQueryInfo, i);
|
STableMetaInfo *pMInfo = tscGetMetaInfo(pQueryInfo, i);
|
||||||
STableMeta *pTableMeta = taosCacheAcquireByData(tscCacheHandle, pMInfo->pTableMeta);
|
STableMeta *pTableMeta = taosCacheAcquireByData(tscMetaCache, pMInfo->pTableMeta);
|
||||||
tscAddTableMetaInfo(pNewQueryInfo, pMInfo->name, pTableMeta, NULL, pMInfo->tagColList);
|
tscAddTableMetaInfo(pNewQueryInfo, pMInfo->name, pTableMeta, NULL, pMInfo->tagColList);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2270,6 +2295,10 @@ int tscGetSTableVgroupInfo(SSqlObj *pSql, int32_t clauseIndex) {
|
||||||
}
|
}
|
||||||
|
|
||||||
pNewQueryInfo->numOfTables = pQueryInfo->numOfTables;
|
pNewQueryInfo->numOfTables = pQueryInfo->numOfTables;
|
||||||
|
T_REF_INC(pNew->pTscObj);
|
||||||
|
|
||||||
|
uint64_t p = (uint64_t) pNew;
|
||||||
|
pNew->self = taosCachePut(tscObjCache, &p, sizeof(uint64_t), &pNew, sizeof(uint64_t), 2 * 600 * 1000);
|
||||||
tscDebug("%p new sqlObj:%p to get vgroupInfo, numOfTables:%d", pSql, pNew, pNewQueryInfo->numOfTables);
|
tscDebug("%p new sqlObj:%p to get vgroupInfo, numOfTables:%d", pSql, pNew, pNewQueryInfo->numOfTables);
|
||||||
|
|
||||||
pNew->fp = tscTableMetaCallBack;
|
pNew->fp = tscTableMetaCallBack;
|
||||||
|
|
|
@ -26,6 +26,7 @@
|
||||||
#include "tsclient.h"
|
#include "tsclient.h"
|
||||||
#include "ttokendef.h"
|
#include "ttokendef.h"
|
||||||
#include "tutil.h"
|
#include "tutil.h"
|
||||||
|
#include "tscProfile.h"
|
||||||
|
|
||||||
static bool validImpl(const char* str, size_t maxsize) {
|
static bool validImpl(const char* str, size_t maxsize) {
|
||||||
if (str == NULL) {
|
if (str == NULL) {
|
||||||
|
@ -100,6 +101,8 @@ SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pass, con
|
||||||
}
|
}
|
||||||
|
|
||||||
pObj->signature = pObj;
|
pObj->signature = pObj;
|
||||||
|
pObj->pDnodeConn = pDnodeConn;
|
||||||
|
T_REF_INIT_VAL(pObj, 1);
|
||||||
|
|
||||||
tstrncpy(pObj->user, user, sizeof(pObj->user));
|
tstrncpy(pObj->user, user, sizeof(pObj->user));
|
||||||
secretEncryptLen = MIN(secretEncryptLen, sizeof(pObj->pass));
|
secretEncryptLen = MIN(secretEncryptLen, sizeof(pObj->pass));
|
||||||
|
@ -135,17 +138,12 @@ SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pass, con
|
||||||
pSql->pTscObj = pObj;
|
pSql->pTscObj = pObj;
|
||||||
pSql->signature = pSql;
|
pSql->signature = pSql;
|
||||||
pSql->maxRetry = TSDB_MAX_REPLICA;
|
pSql->maxRetry = TSDB_MAX_REPLICA;
|
||||||
tsem_init(&pSql->rspSem, 0, 0);
|
|
||||||
|
|
||||||
pObj->pDnodeConn = pDnodeConn;
|
|
||||||
|
|
||||||
pSql->fp = fp;
|
pSql->fp = fp;
|
||||||
pSql->param = param;
|
pSql->param = param;
|
||||||
if (taos != NULL) {
|
|
||||||
*taos = pObj;
|
|
||||||
}
|
|
||||||
|
|
||||||
pSql->cmd.command = TSDB_SQL_CONNECT;
|
pSql->cmd.command = TSDB_SQL_CONNECT;
|
||||||
|
|
||||||
|
tsem_init(&pSql->rspSem, 0, 0);
|
||||||
|
|
||||||
if (TSDB_CODE_SUCCESS != tscAllocPayload(&pSql->cmd, TSDB_DEFAULT_PAYLOAD_SIZE)) {
|
if (TSDB_CODE_SUCCESS != tscAllocPayload(&pSql->cmd, TSDB_DEFAULT_PAYLOAD_SIZE)) {
|
||||||
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
rpcClose(pDnodeConn);
|
rpcClose(pDnodeConn);
|
||||||
|
@ -154,7 +152,16 @@ SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pass, con
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (taos != NULL) {
|
||||||
|
*taos = pObj;
|
||||||
|
}
|
||||||
|
|
||||||
|
T_REF_INC(pSql->pTscObj);
|
||||||
|
|
||||||
|
uint64_t key = (uint64_t) pSql;
|
||||||
|
pSql->self = taosCachePut(tscObjCache, &key, sizeof(uint64_t), &pSql, sizeof(uint64_t), 2*3600*1000);
|
||||||
tsInsertHeadSize = sizeof(SMsgDesc) + sizeof(SSubmitMsg);
|
tsInsertHeadSize = sizeof(SMsgDesc) + sizeof(SSubmitMsg);
|
||||||
|
|
||||||
return pSql;
|
return pSql;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -257,6 +264,33 @@ void taos_close(TAOS *taos) {
|
||||||
tscFreeSqlObj(pObj->pHb);
|
tscFreeSqlObj(pObj->pHb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// free all sqlObjs created by using this connect before free the STscObj
|
||||||
|
// while(1) {
|
||||||
|
// pthread_mutex_lock(&pObj->mutex);
|
||||||
|
// void* p = pObj->sqlList;
|
||||||
|
// pthread_mutex_unlock(&pObj->mutex);
|
||||||
|
//
|
||||||
|
// if (p == NULL) {
|
||||||
|
// break;
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// tscDebug("%p waiting for sqlObj to be freed, %p", pObj, p);
|
||||||
|
// taosMsleep(100);
|
||||||
|
//
|
||||||
|
// // todo fix me!! two threads call taos_free_result will cause problem.
|
||||||
|
// tscDebug("%p free :%p", pObj, p);
|
||||||
|
// taos_free_result(p);
|
||||||
|
// }
|
||||||
|
|
||||||
|
int32_t ref = T_REF_DEC(pObj);
|
||||||
|
assert(ref >= 0);
|
||||||
|
|
||||||
|
if (ref > 0) {
|
||||||
|
tscDebug("%p %d remain sqlObjs, not free tscObj and dnodeConn:%p", pObj, ref, pObj->pDnodeConn);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
tscDebug("%p all sqlObj are freed, free tscObj and close dnodeConn:%p", pObj, pObj->pDnodeConn);
|
||||||
tscCloseTscObj(pObj);
|
tscCloseTscObj(pObj);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -533,57 +567,51 @@ int taos_select_db(TAOS *taos, const char *db) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// send free message to vnode to free qhandle and corresponding resources in vnode
|
// send free message to vnode to free qhandle and corresponding resources in vnode
|
||||||
static bool tscKillQueryInVnode(SSqlObj* pSql) {
|
static UNUSED_FUNC bool tscKillQueryInDnode(SSqlObj* pSql) {
|
||||||
SSqlCmd* pCmd = &pSql->cmd;
|
SSqlCmd* pCmd = &pSql->cmd;
|
||||||
SSqlRes* pRes = &pSql->res;
|
SSqlRes* pRes = &pSql->res;
|
||||||
|
|
||||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
|
if (pRes == NULL || pRes->qhandle == 0) {
|
||||||
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
|
||||||
|
|
||||||
if (pRes->code == TSDB_CODE_SUCCESS && pRes->completed == false && !tscIsTwoStageSTableQuery(pQueryInfo, 0) &&
|
|
||||||
(pCmd->command == TSDB_SQL_SELECT ||
|
|
||||||
pCmd->command == TSDB_SQL_SHOW ||
|
|
||||||
pCmd->command == TSDB_SQL_RETRIEVE ||
|
|
||||||
pCmd->command == TSDB_SQL_FETCH) &&
|
|
||||||
(pSql->pStream == NULL && pTableMetaInfo->pTableMeta != NULL)) {
|
|
||||||
|
|
||||||
pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
|
|
||||||
tscDebug("%p send msg to dnode to free qhandle ASAP, command:%s, ", pSql, sqlCmd[pCmd->command]);
|
|
||||||
tscProcessSql(pSql);
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
|
||||||
|
if ((pQueryInfo == NULL) || tscIsTwoStageSTableQuery(pQueryInfo, 0)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||||
|
tscRemoveFromSqlList(pSql);
|
||||||
|
|
||||||
|
int32_t cmd = pCmd->command;
|
||||||
|
if (pRes->code == TSDB_CODE_SUCCESS && pRes->completed == false && pSql->pStream == NULL && (pTableMetaInfo->pTableMeta != NULL) &&
|
||||||
|
(cmd == TSDB_SQL_SELECT ||
|
||||||
|
cmd == TSDB_SQL_SHOW ||
|
||||||
|
cmd == TSDB_SQL_RETRIEVE ||
|
||||||
|
cmd == TSDB_SQL_FETCH)) {
|
||||||
|
pQueryInfo->type = TSDB_QUERY_TYPE_FREE_RESOURCE;
|
||||||
|
pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
|
||||||
|
tscDebug("%p send msg to dnode to free qhandle ASAP before free sqlObj, command:%s", pSql, sqlCmd[pCmd->command]);
|
||||||
|
|
||||||
|
tscProcessSql(pSql);
|
||||||
return false;
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void taos_free_result(TAOS_RES *res) {
|
void taos_free_result(TAOS_RES *res) {
|
||||||
SSqlObj *pSql = (SSqlObj *)res;
|
SSqlObj* pSql = (SSqlObj*) res;
|
||||||
|
|
||||||
if (pSql == NULL || pSql->signature != pSql) {
|
if (pSql == NULL || pSql->signature != pSql) {
|
||||||
tscDebug("%p sqlObj has been freed", pSql);
|
tscError("%p already released sqlObj", res);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// The semaphore can not be changed while freeing async sub query objects.
|
bool freeNow = tscKillQueryInDnode(pSql);
|
||||||
SSqlRes *pRes = &pSql->res;
|
if (freeNow) {
|
||||||
if (pRes == NULL || pRes->qhandle == 0) {
|
tscDebug("%p free sqlObj in cache", pSql);
|
||||||
tscFreeSqlObj(pSql);
|
SSqlObj** p = pSql->self;
|
||||||
tscDebug("%p SqlObj is freed by app, qhandle is null", pSql);
|
taosCacheRelease(tscObjCache, (void**) &p, true);
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// set freeFlag to 1 in retrieve message if there are un-retrieved results data in node
|
|
||||||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
|
||||||
if (pQueryInfo == NULL) {
|
|
||||||
tscFreeSqlObj(pSql);
|
|
||||||
tscDebug("%p SqlObj is freed by app", pSql);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
pQueryInfo->type = TSDB_QUERY_TYPE_FREE_RESOURCE;
|
|
||||||
if (!tscKillQueryInVnode(pSql)) {
|
|
||||||
tscFreeSqlObj(pSql);
|
|
||||||
tscDebug("%p sqlObj is freed by app", pSql);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -51,7 +51,7 @@ static int64_t tscGetRetryDelayTime(SSqlStream* pStream, int64_t slidingTime, in
|
||||||
int64_t retryDelta = (int64_t)(tsStreamCompRetryDelay * retryRangeFactor);
|
int64_t retryDelta = (int64_t)(tsStreamCompRetryDelay * retryRangeFactor);
|
||||||
retryDelta = ((rand() % retryDelta) + tsStreamCompRetryDelay) * 1000L;
|
retryDelta = ((rand() % retryDelta) + tsStreamCompRetryDelay) * 1000L;
|
||||||
|
|
||||||
if (pStream->intervalTimeUnit != 'n' && pStream->intervalTimeUnit != 'y') {
|
if (pStream->interval.intervalUnit != 'n' && pStream->interval.intervalUnit != 'y') {
|
||||||
// change to ms
|
// change to ms
|
||||||
if (prec == TSDB_TIME_PRECISION_MICRO) {
|
if (prec == TSDB_TIME_PRECISION_MICRO) {
|
||||||
slidingTime = slidingTime / 1000;
|
slidingTime = slidingTime / 1000;
|
||||||
|
@ -87,7 +87,7 @@ static void tscProcessStreamLaunchQuery(SSchedMsg *pMsg) {
|
||||||
|
|
||||||
// failed to get meter/metric meta, retry in 10sec.
|
// failed to get meter/metric meta, retry in 10sec.
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->slidingTime, pStream->precision);
|
int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->interval.sliding, pStream->precision);
|
||||||
tscDebug("%p stream:%p,get metermeta failed, retry in %" PRId64 "ms", pStream->pSql, pStream, retryDelayTime);
|
tscDebug("%p stream:%p,get metermeta failed, retry in %" PRId64 "ms", pStream->pSql, pStream, retryDelayTime);
|
||||||
tscSetRetryTimer(pStream, pSql, retryDelayTime);
|
tscSetRetryTimer(pStream, pSql, retryDelayTime);
|
||||||
|
|
||||||
|
@ -132,15 +132,16 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
|
||||||
}
|
}
|
||||||
if (etime > pStream->etime) {
|
if (etime > pStream->etime) {
|
||||||
etime = pStream->etime;
|
etime = pStream->etime;
|
||||||
} else if (pStream->intervalTimeUnit != 'y' && pStream->intervalTimeUnit != 'n') {
|
} else if (pStream->interval.intervalUnit != 'y' && pStream->interval.intervalUnit != 'n') {
|
||||||
etime = pStream->stime + (etime - pStream->stime) / pStream->intervalTime * pStream->intervalTime;
|
etime = pStream->stime + (etime - pStream->stime) / pStream->interval.interval * pStream->interval.interval;
|
||||||
} else {
|
} else {
|
||||||
etime = taosGetIntervalStartTimestamp(etime, pStream->slidingTime, pStream->intervalTime, pStream->slidingTimeUnit, pStream->precision);
|
etime = taosTimeTruncate(etime, &pStream->interval, pStream->precision);
|
||||||
|
//etime = taosGetIntervalStartTimestamp(etime, pStream->interval.sliding, pStream->interval.sliding, pStream->interval.slidingUnit, pStream->precision);
|
||||||
}
|
}
|
||||||
pQueryInfo->window.ekey = etime;
|
pQueryInfo->window.ekey = etime;
|
||||||
if (pQueryInfo->window.skey >= pQueryInfo->window.ekey) {
|
if (pQueryInfo->window.skey >= pQueryInfo->window.ekey) {
|
||||||
int64_t timer = pStream->slidingTime;
|
int64_t timer = pStream->interval.sliding;
|
||||||
if (pStream->intervalTimeUnit == 'y' || pStream->intervalTimeUnit == 'n') {
|
if (pStream->interval.intervalUnit == 'y' || pStream->interval.intervalUnit == 'n') {
|
||||||
timer = 86400 * 1000l;
|
timer = 86400 * 1000l;
|
||||||
} else if (pStream->precision == TSDB_TIME_PRECISION_MICRO) {
|
} else if (pStream->precision == TSDB_TIME_PRECISION_MICRO) {
|
||||||
timer /= 1000l;
|
timer /= 1000l;
|
||||||
|
@ -162,12 +163,12 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
|
||||||
static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOfRows) {
|
static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOfRows) {
|
||||||
SSqlStream *pStream = (SSqlStream *)param;
|
SSqlStream *pStream = (SSqlStream *)param;
|
||||||
if (tres == NULL || numOfRows < 0) {
|
if (tres == NULL || numOfRows < 0) {
|
||||||
int64_t retryDelay = tscGetRetryDelayTime(pStream, pStream->slidingTime, pStream->precision);
|
int64_t retryDelay = tscGetRetryDelayTime(pStream, pStream->interval.sliding, pStream->precision);
|
||||||
tscError("%p stream:%p, query data failed, code:0x%08x, retry in %" PRId64 "ms", pStream->pSql, pStream, numOfRows,
|
tscError("%p stream:%p, query data failed, code:0x%08x, retry in %" PRId64 "ms", pStream->pSql, pStream, numOfRows,
|
||||||
retryDelay);
|
retryDelay);
|
||||||
|
|
||||||
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pStream->pSql->cmd, 0, 0);
|
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pStream->pSql->cmd, 0, 0);
|
||||||
taosCacheRelease(tscCacheHandle, (void**)&(pTableMetaInfo->pTableMeta), true);
|
taosCacheRelease(tscMetaCache, (void**)&(pTableMetaInfo->pTableMeta), true);
|
||||||
taosTFree(pTableMetaInfo->vgroupList);
|
taosTFree(pTableMetaInfo->vgroupList);
|
||||||
|
|
||||||
tscSetRetryTimer(pStream, pStream->pSql, retryDelay);
|
tscSetRetryTimer(pStream, pStream->pSql, retryDelay);
|
||||||
|
@ -223,7 +224,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
|
||||||
SSqlObj * pSql = (SSqlObj *)res;
|
SSqlObj * pSql = (SSqlObj *)res;
|
||||||
|
|
||||||
if (pSql == NULL || numOfRows < 0) {
|
if (pSql == NULL || numOfRows < 0) {
|
||||||
int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->slidingTime, pStream->precision);
|
int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->interval.sliding, pStream->precision);
|
||||||
tscError("%p stream:%p, retrieve data failed, code:0x%08x, retry in %" PRId64 "ms", pSql, pStream, numOfRows, retryDelayTime);
|
tscError("%p stream:%p, retrieve data failed, code:0x%08x, retry in %" PRId64 "ms", pSql, pStream, numOfRows, retryDelayTime);
|
||||||
|
|
||||||
tscSetRetryTimer(pStream, pStream->pSql, retryDelayTime);
|
tscSetRetryTimer(pStream, pStream->pSql, retryDelayTime);
|
||||||
|
@ -246,11 +247,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!pStream->isProject) {
|
if (!pStream->isProject) {
|
||||||
if (pStream->intervalTimeUnit == 'y' || pStream->intervalTimeUnit == 'n') {
|
pStream->stime = taosTimeAdd(pStream->stime, pStream->interval.sliding, pStream->interval.slidingUnit, pStream->precision);
|
||||||
pStream->stime = taosAddNatualInterval(pStream->stime, pStream->slidingTime, pStream->slidingTimeUnit, pStream->precision);
|
|
||||||
} else {
|
|
||||||
pStream->stime += pStream->slidingTime;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// actually only one row is returned. this following is not necessary
|
// actually only one row is returned. this following is not necessary
|
||||||
taos_fetch_rows_a(res, tscProcessStreamRetrieveResult, pStream);
|
taos_fetch_rows_a(res, tscProcessStreamRetrieveResult, pStream);
|
||||||
|
@ -275,7 +272,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
|
||||||
|
|
||||||
// release the metric/meter meta information reference, so data in cache can be updated
|
// release the metric/meter meta information reference, so data in cache can be updated
|
||||||
|
|
||||||
taosCacheRelease(tscCacheHandle, (void**)&(pTableMetaInfo->pTableMeta), false);
|
taosCacheRelease(tscMetaCache, (void**)&(pTableMetaInfo->pTableMeta), false);
|
||||||
tscFreeSqlResult(pSql);
|
tscFreeSqlResult(pSql);
|
||||||
taosTFree(pSql->pSubs);
|
taosTFree(pSql->pSubs);
|
||||||
pSql->numOfSubs = 0;
|
pSql->numOfSubs = 0;
|
||||||
|
@ -310,7 +307,7 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer)
|
||||||
now + timer, timer, delay, pStream->stime, etime);
|
now + timer, timer, delay, pStream->stime, etime);
|
||||||
} else {
|
} else {
|
||||||
tscDebug("%p stream:%p, next start at %" PRId64 ", in %" PRId64 "ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64, pStream->pSql, pStream,
|
tscDebug("%p stream:%p, next start at %" PRId64 ", in %" PRId64 "ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64, pStream->pSql, pStream,
|
||||||
pStream->stime, timer, delay, pStream->stime - pStream->intervalTime, pStream->stime - 1);
|
pStream->stime, timer, delay, pStream->stime - pStream->interval.interval, pStream->stime - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
pSql->cmd.command = TSDB_SQL_SELECT;
|
pSql->cmd.command = TSDB_SQL_SELECT;
|
||||||
|
@ -324,12 +321,12 @@ static int64_t getLaunchTimeDelay(const SSqlStream* pStream) {
|
||||||
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMaxStreamComputDelay * 1000L : tsMaxStreamComputDelay;
|
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMaxStreamComputDelay * 1000L : tsMaxStreamComputDelay;
|
||||||
|
|
||||||
int64_t delayDelta = maxDelay;
|
int64_t delayDelta = maxDelay;
|
||||||
if (pStream->intervalTimeUnit != 'n' && pStream->intervalTimeUnit != 'y') {
|
if (pStream->interval.intervalUnit != 'n' && pStream->interval.intervalUnit != 'y') {
|
||||||
delayDelta = (int64_t)(pStream->slidingTime * tsStreamComputDelayRatio);
|
delayDelta = (int64_t)(pStream->interval.sliding * tsStreamComputDelayRatio);
|
||||||
if (delayDelta > maxDelay) {
|
if (delayDelta > maxDelay) {
|
||||||
delayDelta = maxDelay;
|
delayDelta = maxDelay;
|
||||||
}
|
}
|
||||||
int64_t remainTimeWindow = pStream->slidingTime - delayDelta;
|
int64_t remainTimeWindow = pStream->interval.sliding - delayDelta;
|
||||||
if (maxDelay > remainTimeWindow) {
|
if (maxDelay > remainTimeWindow) {
|
||||||
maxDelay = (int64_t)(remainTimeWindow / 1.5f);
|
maxDelay = (int64_t)(remainTimeWindow / 1.5f);
|
||||||
}
|
}
|
||||||
|
@ -337,8 +334,8 @@ static int64_t getLaunchTimeDelay(const SSqlStream* pStream) {
|
||||||
|
|
||||||
int64_t currentDelay = (rand() % maxDelay); // a random number
|
int64_t currentDelay = (rand() % maxDelay); // a random number
|
||||||
currentDelay += delayDelta;
|
currentDelay += delayDelta;
|
||||||
if (pStream->intervalTimeUnit != 'n' && pStream->intervalTimeUnit != 'y') {
|
if (pStream->interval.intervalUnit != 'n' && pStream->interval.intervalUnit != 'y') {
|
||||||
assert(currentDelay < pStream->slidingTime);
|
assert(currentDelay < pStream->interval.sliding);
|
||||||
}
|
}
|
||||||
|
|
||||||
return currentDelay;
|
return currentDelay;
|
||||||
|
@ -353,7 +350,7 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) {
|
||||||
* for project query, no mater fetch data successfully or not, next launch will issue
|
* for project query, no mater fetch data successfully or not, next launch will issue
|
||||||
* more than the sliding time window
|
* more than the sliding time window
|
||||||
*/
|
*/
|
||||||
timer = pStream->slidingTime;
|
timer = pStream->interval.sliding;
|
||||||
if (pStream->stime > pStream->etime) {
|
if (pStream->stime > pStream->etime) {
|
||||||
tscDebug("%p stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql, pStream,
|
tscDebug("%p stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql, pStream,
|
||||||
pStream->stime, pStream->etime);
|
pStream->stime, pStream->etime);
|
||||||
|
@ -366,7 +363,8 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
int64_t stime = taosGetIntervalStartTimestamp(pStream->stime - 1, pStream->intervalTime, pStream->intervalTime, pStream->intervalTimeUnit, pStream->precision);
|
int64_t stime = taosTimeTruncate(pStream->stime - 1, &pStream->interval, pStream->precision);
|
||||||
|
//int64_t stime = taosGetIntervalStartTimestamp(pStream->stime - 1, pStream->interval.interval, pStream->interval.interval, pStream->interval.intervalUnit, pStream->precision);
|
||||||
if (stime >= pStream->etime) {
|
if (stime >= pStream->etime) {
|
||||||
tscDebug("%p stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql, pStream,
|
tscDebug("%p stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql, pStream,
|
||||||
pStream->stime, pStream->etime);
|
pStream->stime, pStream->etime);
|
||||||
|
@ -400,43 +398,43 @@ static void tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
|
||||||
|
|
||||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||||
|
|
||||||
if (pQueryInfo->intervalTimeUnit != 'n' && pQueryInfo->intervalTimeUnit != 'y' && pQueryInfo->intervalTime < minIntervalTime) {
|
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit!= 'y' && pQueryInfo->interval.interval < minIntervalTime) {
|
||||||
tscWarn("%p stream:%p, original sample interval:%ld too small, reset to:%" PRId64, pSql, pStream,
|
tscWarn("%p stream:%p, original sample interval:%ld too small, reset to:%" PRId64, pSql, pStream,
|
||||||
pQueryInfo->intervalTime, minIntervalTime);
|
pQueryInfo->interval.interval, minIntervalTime);
|
||||||
pQueryInfo->intervalTime = minIntervalTime;
|
pQueryInfo->interval.interval = minIntervalTime;
|
||||||
}
|
}
|
||||||
|
|
||||||
pStream->intervalTimeUnit = pQueryInfo->intervalTimeUnit;
|
pStream->interval.intervalUnit = pQueryInfo->interval.intervalUnit;
|
||||||
pStream->intervalTime = pQueryInfo->intervalTime; // it shall be derived from sql string
|
pStream->interval.interval = pQueryInfo->interval.interval; // it shall be derived from sql string
|
||||||
|
|
||||||
if (pQueryInfo->slidingTime <= 0) {
|
if (pQueryInfo->interval.sliding <= 0) {
|
||||||
pQueryInfo->slidingTime = pQueryInfo->intervalTime;
|
pQueryInfo->interval.sliding = pQueryInfo->interval.interval;
|
||||||
pQueryInfo->slidingTimeUnit = pQueryInfo->intervalTimeUnit;
|
pQueryInfo->interval.slidingUnit = pQueryInfo->interval.intervalUnit;
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t minSlidingTime =
|
int64_t minSlidingTime =
|
||||||
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinSlidingTime * 1000L : tsMinSlidingTime;
|
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinSlidingTime * 1000L : tsMinSlidingTime;
|
||||||
|
|
||||||
if (pQueryInfo->intervalTimeUnit != 'n' && pQueryInfo->intervalTimeUnit != 'y' && pQueryInfo->slidingTime < minSlidingTime) {
|
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit!= 'y' && pQueryInfo->interval.sliding < minSlidingTime) {
|
||||||
tscWarn("%p stream:%p, original sliding value:%" PRId64 " too small, reset to:%" PRId64, pSql, pStream,
|
tscWarn("%p stream:%p, original sliding value:%" PRId64 " too small, reset to:%" PRId64, pSql, pStream,
|
||||||
pQueryInfo->slidingTime, minSlidingTime);
|
pQueryInfo->interval.sliding, minSlidingTime);
|
||||||
|
|
||||||
pQueryInfo->slidingTime = minSlidingTime;
|
pQueryInfo->interval.sliding = minSlidingTime;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pQueryInfo->slidingTime > pQueryInfo->intervalTime) {
|
if (pQueryInfo->interval.sliding > pQueryInfo->interval.interval) {
|
||||||
tscWarn("%p stream:%p, sliding value:%" PRId64 " can not be larger than interval range, reset to:%" PRId64, pSql, pStream,
|
tscWarn("%p stream:%p, sliding value:%" PRId64 " can not be larger than interval range, reset to:%" PRId64, pSql, pStream,
|
||||||
pQueryInfo->slidingTime, pQueryInfo->intervalTime);
|
pQueryInfo->interval.sliding, pQueryInfo->interval.interval);
|
||||||
|
|
||||||
pQueryInfo->slidingTime = pQueryInfo->intervalTime;
|
pQueryInfo->interval.sliding = pQueryInfo->interval.interval;
|
||||||
}
|
}
|
||||||
|
|
||||||
pStream->slidingTimeUnit = pQueryInfo->slidingTimeUnit;
|
pStream->interval.slidingUnit = pQueryInfo->interval.slidingUnit;
|
||||||
pStream->slidingTime = pQueryInfo->slidingTime;
|
pStream->interval.sliding = pQueryInfo->interval.sliding;
|
||||||
|
|
||||||
if (pStream->isProject) {
|
if (pStream->isProject) {
|
||||||
pQueryInfo->intervalTime = 0; // clear the interval value to avoid the force time window split by query processor
|
pQueryInfo->interval.interval = 0; // clear the interval value to avoid the force time window split by query processor
|
||||||
pQueryInfo->slidingTime = 0;
|
pQueryInfo->interval.sliding = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -445,8 +443,8 @@ static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, in
|
||||||
|
|
||||||
if (pStream->isProject) {
|
if (pStream->isProject) {
|
||||||
// no data in table, flush all data till now to destination meter, 10sec delay
|
// no data in table, flush all data till now to destination meter, 10sec delay
|
||||||
pStream->intervalTime = tsProjectExecInterval;
|
pStream->interval.interval = tsProjectExecInterval;
|
||||||
pStream->slidingTime = tsProjectExecInterval;
|
pStream->interval.sliding = tsProjectExecInterval;
|
||||||
|
|
||||||
if (stime != 0) { // first projection start from the latest event timestamp
|
if (stime != 0) { // first projection start from the latest event timestamp
|
||||||
assert(stime >= pQueryInfo->window.skey);
|
assert(stime >= pQueryInfo->window.skey);
|
||||||
|
@ -459,12 +457,15 @@ static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, in
|
||||||
stime = pQueryInfo->window.skey;
|
stime = pQueryInfo->window.skey;
|
||||||
if (stime == INT64_MIN) {
|
if (stime == INT64_MIN) {
|
||||||
stime = (int64_t)taosGetTimestamp(pStream->precision);
|
stime = (int64_t)taosGetTimestamp(pStream->precision);
|
||||||
stime = taosGetIntervalStartTimestamp(stime, pStream->intervalTime, pStream->intervalTime, pStream->intervalTimeUnit, pStream->precision);
|
stime = taosTimeTruncate(stime, &pStream->interval, pStream->precision);
|
||||||
stime = taosGetIntervalStartTimestamp(stime - 1, pStream->intervalTime, pStream->intervalTime, pStream->intervalTimeUnit, pStream->precision);
|
stime = taosTimeTruncate(stime - 1, &pStream->interval, pStream->precision);
|
||||||
|
//stime = taosGetIntervalStartTimestamp(stime, pStream->interval.interval, pStream->interval.interval, pStream->interval.intervalUnit, pStream->precision);
|
||||||
|
//stime = taosGetIntervalStartTimestamp(stime - 1, pStream->interval.interval, pStream->interval.interval, pStream->interval.intervalUnit, pStream->precision);
|
||||||
tscWarn("%p stream:%p, last timestamp:0, reset to:%" PRId64, pSql, pStream, stime);
|
tscWarn("%p stream:%p, last timestamp:0, reset to:%" PRId64, pSql, pStream, stime);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
int64_t newStime = taosGetIntervalStartTimestamp(stime, pStream->intervalTime, pStream->intervalTime, pStream->intervalTimeUnit, pStream->precision);
|
//int64_t newStime = taosGetIntervalStartTimestamp(stime, pStream->interval.interval, pStream->interval.interval, pStream->interval.intervalUnit, pStream->precision);
|
||||||
|
int64_t newStime = taosTimeTruncate(stime, &pStream->interval, pStream->precision);
|
||||||
if (newStime != stime) {
|
if (newStime != stime) {
|
||||||
tscWarn("%p stream:%p, last timestamp:%" PRId64 ", reset to:%" PRId64, pSql, pStream, stime, newStime);
|
tscWarn("%p stream:%p, last timestamp:%" PRId64 ", reset to:%" PRId64, pSql, pStream, stime, newStime);
|
||||||
stime = newStime;
|
stime = newStime;
|
||||||
|
@ -534,7 +535,7 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) {
|
||||||
taosTmrReset(tscProcessStreamTimer, (int32_t)starttime, pStream, tscTmr, &pStream->pTimer);
|
taosTmrReset(tscProcessStreamTimer, (int32_t)starttime, pStream, tscTmr, &pStream->pTimer);
|
||||||
|
|
||||||
tscDebug("%p stream:%p is opened, query on:%s, interval:%" PRId64 ", sliding:%" PRId64 ", first launched in:%" PRId64 ", sql:%s", pSql,
|
tscDebug("%p stream:%p is opened, query on:%s, interval:%" PRId64 ", sliding:%" PRId64 ", first launched in:%" PRId64 ", sql:%s", pSql,
|
||||||
pStream, pTableMetaInfo->name, pStream->intervalTime, pStream->slidingTime, starttime, pSql->sqlstr);
|
pStream, pTableMetaInfo->name, pStream->interval.interval, pStream->interval.sliding, starttime, pSql->sqlstr);
|
||||||
}
|
}
|
||||||
|
|
||||||
TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
|
TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
|
||||||
|
|
|
@ -92,7 +92,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSupporter* pSupporter1, SJ
|
||||||
STSElem elem2 = tsBufGetElem(pSupporter2->pTSBuf);
|
STSElem elem2 = tsBufGetElem(pSupporter2->pTSBuf);
|
||||||
|
|
||||||
#ifdef _DEBUG_VIEW
|
#ifdef _DEBUG_VIEW
|
||||||
tscInfo("%" PRId64 ", tags:%d \t %" PRId64 ", tags:%d", elem1.ts, elem1.tag, elem2.ts, elem2.tag);
|
tscInfo("%" PRId64 ", tags:%"PRId64" \t %" PRId64 ", tags:%"PRId64, elem1.ts, elem1.tag.i64Key, elem2.ts, elem2.tag.i64Key);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
int32_t res = tVariantCompare(&elem1.tag, &elem2.tag);
|
int32_t res = tVariantCompare(&elem1.tag, &elem2.tag);
|
||||||
|
@ -113,7 +113,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSupporter* pSupporter1, SJ
|
||||||
* in case of stable query, limit/offset is not applied here. the limit/offset is applied to the
|
* in case of stable query, limit/offset is not applied here. the limit/offset is applied to the
|
||||||
* final results which is acquired after the secondry merge of in the client.
|
* final results which is acquired after the secondry merge of in the client.
|
||||||
*/
|
*/
|
||||||
if (pLimit->offset == 0 || pQueryInfo->intervalTime > 0 || QUERY_IS_STABLE_QUERY(pQueryInfo->type)) {
|
if (pLimit->offset == 0 || pQueryInfo->interval.interval > 0 || QUERY_IS_STABLE_QUERY(pQueryInfo->type)) {
|
||||||
if (win->skey > elem1.ts) {
|
if (win->skey > elem1.ts) {
|
||||||
win->skey = elem1.ts;
|
win->skey = elem1.ts;
|
||||||
}
|
}
|
||||||
|
@ -178,10 +178,7 @@ SJoinSupporter* tscCreateJoinSupporter(SSqlObj* pSql, SSubqueryState* pState, in
|
||||||
pSupporter->subqueryIndex = index;
|
pSupporter->subqueryIndex = index;
|
||||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
|
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
|
||||||
|
|
||||||
pSupporter->intervalTimeUnit = pQueryInfo->intervalTimeUnit;
|
memcpy(&pSupporter->interval, &pQueryInfo->interval, sizeof(pSupporter->interval));
|
||||||
pSupporter->slidingTime = pQueryInfo->slidingTimeUnit;
|
|
||||||
pSupporter->intervalTime = pQueryInfo->intervalTime;
|
|
||||||
pSupporter->slidingTime = pQueryInfo->slidingTime;
|
|
||||||
pSupporter->limit = pQueryInfo->limit;
|
pSupporter->limit = pQueryInfo->limit;
|
||||||
|
|
||||||
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, pSql->cmd.clauseIndex, index);
|
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, pSql->cmd.clauseIndex, index);
|
||||||
|
@ -311,11 +308,7 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
|
||||||
// set the second stage sub query for join process
|
// set the second stage sub query for join process
|
||||||
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE);
|
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE);
|
||||||
|
|
||||||
pQueryInfo->intervalTimeUnit = pSupporter->intervalTimeUnit;
|
memcpy(&pQueryInfo->interval, &pSupporter->interval, sizeof(pQueryInfo->interval));
|
||||||
pQueryInfo->slidingTimeUnit = pSupporter->slidingTimeUnit;
|
|
||||||
pQueryInfo->intervalTime = pSupporter->intervalTime;
|
|
||||||
pQueryInfo->slidingTime = pSupporter->slidingTime;
|
|
||||||
pQueryInfo->groupbyExpr = pSupporter->groupbyExpr;
|
|
||||||
|
|
||||||
tscTagCondCopy(&pQueryInfo->tagCond, &pSupporter->tagCond);
|
tscTagCondCopy(&pQueryInfo->tagCond, &pSupporter->tagCond);
|
||||||
|
|
||||||
|
@ -1214,14 +1207,13 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
|
||||||
}
|
}
|
||||||
|
|
||||||
pNew->cmd.numOfCols = 0;
|
pNew->cmd.numOfCols = 0;
|
||||||
pNewQueryInfo->intervalTime = 0;
|
pNewQueryInfo->interval.interval = 0;
|
||||||
pSupporter->limit = pNewQueryInfo->limit;
|
pSupporter->limit = pNewQueryInfo->limit;
|
||||||
|
|
||||||
pNewQueryInfo->limit.limit = -1;
|
pNewQueryInfo->limit.limit = -1;
|
||||||
pNewQueryInfo->limit.offset = 0;
|
pNewQueryInfo->limit.offset = 0;
|
||||||
|
|
||||||
// backup the data and clear it in the sqlcmd object
|
// backup the data and clear it in the sqlcmd object
|
||||||
pSupporter->groupbyExpr = pNewQueryInfo->groupbyExpr;
|
|
||||||
memset(&pNewQueryInfo->groupbyExpr, 0, sizeof(SSqlGroupbyExpr));
|
memset(&pNewQueryInfo->groupbyExpr, 0, sizeof(SSqlGroupbyExpr));
|
||||||
|
|
||||||
tscInitQueryInfo(pNewQueryInfo);
|
tscInitQueryInfo(pNewQueryInfo);
|
||||||
|
@ -1523,9 +1515,9 @@ static void tscFreeSubSqlObj(SRetrieveSupport *trsupport, SSqlObj *pSql) {
|
||||||
SSqlObj *pParentSql = trsupport->pParentSql;
|
SSqlObj *pParentSql = trsupport->pParentSql;
|
||||||
|
|
||||||
assert(pSql == pParentSql->pSubs[index]);
|
assert(pSql == pParentSql->pSubs[index]);
|
||||||
pParentSql->pSubs[index] = NULL;
|
// pParentSql->pSubs[index] = NULL;
|
||||||
|
//
|
||||||
taos_free_result(pSql);
|
// taos_free_result(pSql);
|
||||||
taosTFree(trsupport->localBuffer);
|
taosTFree(trsupport->localBuffer);
|
||||||
taosTFree(trsupport);
|
taosTFree(trsupport);
|
||||||
}
|
}
|
||||||
|
@ -1739,10 +1731,6 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
|
||||||
|
|
||||||
assert(tres != NULL);
|
assert(tres != NULL);
|
||||||
SSqlObj *pSql = (SSqlObj *)tres;
|
SSqlObj *pSql = (SSqlObj *)tres;
|
||||||
// if (pSql == NULL) { // sql object has been released in error process, return immediately
|
|
||||||
// tscDebug("%p subquery has been released, idx:%d, abort", pParentSql, idx);
|
|
||||||
// return;
|
|
||||||
// }
|
|
||||||
|
|
||||||
SSubqueryState* pState = trsupport->pState;
|
SSubqueryState* pState = trsupport->pState;
|
||||||
assert(pState->numOfRemain <= pState->numOfTotal && pState->numOfRemain >= 0 && pParentSql->numOfSubs == pState->numOfTotal);
|
assert(pState->numOfRemain <= pState->numOfTotal && pState->numOfRemain >= 0 && pParentSql->numOfSubs == pState->numOfTotal);
|
||||||
|
@ -1918,9 +1906,7 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
|
||||||
pParentObj->res.code = pSql->res.code;
|
pParentObj->res.code = pSql->res.code;
|
||||||
}
|
}
|
||||||
|
|
||||||
taos_free_result(tres);
|
|
||||||
taosTFree(pSupporter);
|
taosTFree(pSupporter);
|
||||||
|
|
||||||
if (atomic_sub_fetch_32(&pState->numOfRemain, 1) > 0) {
|
if (atomic_sub_fetch_32(&pState->numOfRemain, 1) > 0) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -1964,28 +1950,27 @@ int32_t tscHandleInsertRetry(SSqlObj* pSql) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
|
int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
|
||||||
SSqlRes *pRes = &pSql->res;
|
|
||||||
SSqlCmd *pCmd = &pSql->cmd;
|
SSqlCmd *pCmd = &pSql->cmd;
|
||||||
|
SSqlRes *pRes = &pSql->res;
|
||||||
|
|
||||||
size_t size = taosArrayGetSize(pCmd->pDataBlocks);
|
pSql->numOfSubs = (uint16_t)taosArrayGetSize(pCmd->pDataBlocks);
|
||||||
assert(size > 0);
|
assert(pSql->numOfSubs > 0);
|
||||||
|
|
||||||
|
pRes->code = TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
// the number of already initialized subqueries
|
// the number of already initialized subqueries
|
||||||
int32_t numOfSub = 0;
|
int32_t numOfSub = 0;
|
||||||
|
|
||||||
pSql->numOfSubs = (uint16_t)size;
|
|
||||||
pSql->pSubs = calloc(size, POINTER_BYTES);
|
|
||||||
if (pSql->pSubs == NULL) {
|
|
||||||
goto _error;
|
|
||||||
}
|
|
||||||
|
|
||||||
tscDebug("%p submit data to %" PRIzu " vnode(s)", pSql, size);
|
|
||||||
|
|
||||||
SSubqueryState *pState = calloc(1, sizeof(SSubqueryState));
|
SSubqueryState *pState = calloc(1, sizeof(SSubqueryState));
|
||||||
pState->numOfTotal = pSql->numOfSubs;
|
pState->numOfTotal = pSql->numOfSubs;
|
||||||
pState->numOfRemain = pSql->numOfSubs;
|
pState->numOfRemain = pSql->numOfSubs;
|
||||||
|
|
||||||
pRes->code = TSDB_CODE_SUCCESS;
|
pSql->pSubs = calloc(pSql->numOfSubs, POINTER_BYTES);
|
||||||
|
if (pSql->pSubs == NULL) {
|
||||||
|
goto _error;
|
||||||
|
}
|
||||||
|
|
||||||
|
tscDebug("%p submit data to %d vnode(s)", pSql, pSql->numOfSubs);
|
||||||
|
|
||||||
while(numOfSub < pSql->numOfSubs) {
|
while(numOfSub < pSql->numOfSubs) {
|
||||||
SInsertSupporter* pSupporter = calloc(1, sizeof(SInsertSupporter));
|
SInsertSupporter* pSupporter = calloc(1, sizeof(SInsertSupporter));
|
||||||
|
@ -2016,8 +2001,8 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
|
||||||
tscDebug("%p sub:%p create subObj success. orderOfSub:%d", pSql, pNew, numOfSub);
|
tscDebug("%p sub:%p create subObj success. orderOfSub:%d", pSql, pNew, numOfSub);
|
||||||
numOfSub++;
|
numOfSub++;
|
||||||
} else {
|
} else {
|
||||||
tscDebug("%p prepare submit data block failed in async insertion, vnodeIdx:%d, total:%" PRIzu ", code:%s", pSql, numOfSub,
|
tscDebug("%p prepare submit data block failed in async insertion, vnodeIdx:%d, total:%d, code:%s", pSql, numOfSub,
|
||||||
size, tstrerror(pRes->code));
|
pSql->numOfSubs, tstrerror(pRes->code));
|
||||||
goto _error;
|
goto _error;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2040,11 +2025,6 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
_error:
|
_error:
|
||||||
for(int32_t j = 0; j < numOfSub; ++j) {
|
|
||||||
taosTFree(pSql->pSubs[j]->param);
|
|
||||||
taos_free_result(pSql->pSubs[j]);
|
|
||||||
}
|
|
||||||
|
|
||||||
taosTFree(pState);
|
taosTFree(pState);
|
||||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
@ -2208,7 +2188,7 @@ void **doSetResultRowData(SSqlObj *pSql, bool finalResult) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// primary key column cannot be null in interval query, no need to check
|
// primary key column cannot be null in interval query, no need to check
|
||||||
if (i == 0 && pQueryInfo->intervalTime > 0) {
|
if (i == 0 && pQueryInfo->interval.interval > 0) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2220,16 +2200,15 @@ void **doSetResultRowData(SSqlObj *pSql, bool finalResult) {
|
||||||
// calculate the result from several other columns
|
// calculate the result from several other columns
|
||||||
if (pSup->pArithExprInfo != NULL) {
|
if (pSup->pArithExprInfo != NULL) {
|
||||||
if (pRes->pArithSup == NULL) {
|
if (pRes->pArithSup == NULL) {
|
||||||
SArithmeticSupport *sas = (SArithmeticSupport *) calloc(1, sizeof(SArithmeticSupport));
|
pRes->pArithSup = (SArithmeticSupport*)calloc(1, sizeof(SArithmeticSupport));
|
||||||
sas->offset = 0;
|
|
||||||
sas->pArithExpr = pSup->pArithExprInfo;
|
|
||||||
sas->numOfCols = (int32_t)tscSqlExprNumOfExprs(pQueryInfo);
|
|
||||||
sas->exprList = pQueryInfo->exprList;
|
|
||||||
sas->data = calloc(sas->numOfCols, POINTER_BYTES);
|
|
||||||
|
|
||||||
pRes->pArithSup = sas;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pRes->pArithSup->offset = 0;
|
||||||
|
pRes->pArithSup->pArithExpr = pSup->pArithExprInfo;
|
||||||
|
pRes->pArithSup->numOfCols = (int32_t)tscSqlExprNumOfExprs(pQueryInfo);
|
||||||
|
pRes->pArithSup->exprList = pQueryInfo->exprList;
|
||||||
|
pRes->pArithSup->data = calloc(pRes->pArithSup->numOfCols, POINTER_BYTES);
|
||||||
|
|
||||||
if (pRes->buffer[i] == NULL) {
|
if (pRes->buffer[i] == NULL) {
|
||||||
TAOS_FIELD* field = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
|
TAOS_FIELD* field = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
|
||||||
pRes->buffer[i] = malloc(field->bytes);
|
pRes->buffer[i] = malloc(field->bytes);
|
||||||
|
|
|
@ -30,7 +30,8 @@
|
||||||
#include "tlocale.h"
|
#include "tlocale.h"
|
||||||
|
|
||||||
// global, not configurable
|
// global, not configurable
|
||||||
void * tscCacheHandle;
|
SCacheObj* tscMetaCache;
|
||||||
|
SCacheObj* tscObjCache;
|
||||||
void * tscTmr;
|
void * tscTmr;
|
||||||
void * tscQhandle;
|
void * tscQhandle;
|
||||||
void * tscCheckDiskUsageTmr;
|
void * tscCheckDiskUsageTmr;
|
||||||
|
@ -144,8 +145,9 @@ void taos_init_imp(void) {
|
||||||
refreshTime = refreshTime > 10 ? 10 : refreshTime;
|
refreshTime = refreshTime > 10 ? 10 : refreshTime;
|
||||||
refreshTime = refreshTime < 10 ? 10 : refreshTime;
|
refreshTime = refreshTime < 10 ? 10 : refreshTime;
|
||||||
|
|
||||||
if (tscCacheHandle == NULL) {
|
if (tscMetaCache == NULL) {
|
||||||
tscCacheHandle = taosCacheInit(TSDB_DATA_TYPE_BINARY, refreshTime, false, NULL, "tableMeta");
|
tscMetaCache = taosCacheInit(TSDB_DATA_TYPE_BINARY, refreshTime, false, NULL, "tableMeta");
|
||||||
|
tscObjCache = taosCacheInit(TSDB_DATA_TYPE_BIGINT, refreshTime/2, false, tscFreeSqlObjInCache, "sqlObj");
|
||||||
}
|
}
|
||||||
|
|
||||||
tscDebug("client is initialized successfully");
|
tscDebug("client is initialized successfully");
|
||||||
|
@ -154,9 +156,12 @@ void taos_init_imp(void) {
|
||||||
void taos_init() { pthread_once(&tscinit, taos_init_imp); }
|
void taos_init() { pthread_once(&tscinit, taos_init_imp); }
|
||||||
|
|
||||||
void taos_cleanup() {
|
void taos_cleanup() {
|
||||||
if (tscCacheHandle != NULL) {
|
if (tscMetaCache != NULL) {
|
||||||
taosCacheCleanup(tscCacheHandle);
|
taosCacheCleanup(tscMetaCache);
|
||||||
tscCacheHandle = NULL;
|
tscMetaCache = NULL;
|
||||||
|
|
||||||
|
taosCacheCleanup(tscObjCache);
|
||||||
|
tscObjCache = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tscQhandle != NULL) {
|
if (tscQhandle != NULL) {
|
||||||
|
|
|
@ -268,7 +268,7 @@ int32_t tscCreateResPointerInfo(SSqlRes* pRes, SQueryInfo* pQueryInfo) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
void tscDestroyResPointerInfo(SSqlRes* pRes) {
|
static void tscDestroyResPointerInfo(SSqlRes* pRes) {
|
||||||
if (pRes->buffer != NULL) { // free all buffers containing the multibyte string
|
if (pRes->buffer != NULL) { // free all buffers containing the multibyte string
|
||||||
for (int i = 0; i < pRes->numOfCols; i++) {
|
for (int i = 0; i < pRes->numOfCols; i++) {
|
||||||
taosTFree(pRes->buffer[i]);
|
taosTFree(pRes->buffer[i]);
|
||||||
|
@ -344,8 +344,6 @@ void tscPartiallyFreeSqlObj(SSqlObj* pSql) {
|
||||||
}
|
}
|
||||||
|
|
||||||
SSqlCmd* pCmd = &pSql->cmd;
|
SSqlCmd* pCmd = &pSql->cmd;
|
||||||
STscObj* pObj = pSql->pTscObj;
|
|
||||||
|
|
||||||
int32_t cmd = pCmd->command;
|
int32_t cmd = pCmd->command;
|
||||||
if (cmd < TSDB_SQL_INSERT || cmd == TSDB_SQL_RETRIEVE_LOCALMERGE || cmd == TSDB_SQL_RETRIEVE_EMPTY_RESULT ||
|
if (cmd < TSDB_SQL_INSERT || cmd == TSDB_SQL_RETRIEVE_LOCALMERGE || cmd == TSDB_SQL_RETRIEVE_EMPTY_RESULT ||
|
||||||
cmd == TSDB_SQL_TABLE_JOIN_RETRIEVE) {
|
cmd == TSDB_SQL_TABLE_JOIN_RETRIEVE) {
|
||||||
|
@ -353,26 +351,61 @@ void tscPartiallyFreeSqlObj(SSqlObj* pSql) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// pSql->sqlstr will be used by tscBuildQueryStreamDesc
|
// pSql->sqlstr will be used by tscBuildQueryStreamDesc
|
||||||
if (pObj->signature == pObj) {
|
// if (pObj->signature == pObj) {
|
||||||
//pthread_mutex_lock(&pObj->mutex);
|
//pthread_mutex_lock(&pObj->mutex);
|
||||||
taosTFree(pSql->sqlstr);
|
taosTFree(pSql->sqlstr);
|
||||||
//pthread_mutex_unlock(&pObj->mutex);
|
//pthread_mutex_unlock(&pObj->mutex);
|
||||||
}
|
// }
|
||||||
|
|
||||||
tscFreeSqlResult(pSql);
|
tscFreeSqlResult(pSql);
|
||||||
|
|
||||||
taosTFree(pSql->pSubs);
|
taosTFree(pSql->pSubs);
|
||||||
pSql->numOfSubs = 0;
|
pSql->numOfSubs = 0;
|
||||||
|
pSql->self = 0;
|
||||||
|
|
||||||
tscResetSqlCmdObj(pCmd, false);
|
tscResetSqlCmdObj(pCmd, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static UNUSED_FUNC void tscFreeSubobj(SSqlObj* pSql) {
|
||||||
|
if (pSql->numOfSubs == 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
tscDebug("%p start to free sub SqlObj, numOfSub:%d", pSql, pSql->numOfSubs);
|
||||||
|
|
||||||
|
for(int32_t i = 0; i < pSql->numOfSubs; ++i) {
|
||||||
|
tscDebug("%p free sub SqlObj:%p, index:%d", pSql, pSql->pSubs[i], i);
|
||||||
|
taos_free_result(pSql->pSubs[i]);
|
||||||
|
pSql->pSubs[i] = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
pSql->numOfSubs = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The free operation will cause the pSql to be removed from hash table and free it in
|
||||||
|
* the function of processmsgfromserver is impossible in this case, since it will fail
|
||||||
|
* to retrieve pSqlObj in hashtable.
|
||||||
|
*
|
||||||
|
* @param pSql
|
||||||
|
*/
|
||||||
|
void tscFreeSqlObjInCache(void *pSql) {
|
||||||
|
assert(pSql != NULL);
|
||||||
|
SSqlObj** p = (SSqlObj**)pSql;
|
||||||
|
|
||||||
|
assert((*p)->self != 0 && (*p)->self == (p));
|
||||||
|
tscFreeSqlObj(*p);
|
||||||
|
}
|
||||||
|
|
||||||
void tscFreeSqlObj(SSqlObj* pSql) {
|
void tscFreeSqlObj(SSqlObj* pSql) {
|
||||||
if (pSql == NULL || pSql->signature != pSql) {
|
if (pSql == NULL || pSql->signature != pSql) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
tscDebug("%p start to free sql object", pSql);
|
tscDebug("%p start to free sqlObj", pSql);
|
||||||
|
STscObj* pTscObj = pSql->pTscObj;
|
||||||
|
|
||||||
|
tscFreeSubobj(pSql);
|
||||||
tscPartiallyFreeSqlObj(pSql);
|
tscPartiallyFreeSqlObj(pSql);
|
||||||
|
|
||||||
pSql->signature = NULL;
|
pSql->signature = NULL;
|
||||||
|
@ -388,6 +421,14 @@ void tscFreeSqlObj(SSqlObj* pSql) {
|
||||||
tsem_destroy(&pSql->rspSem);
|
tsem_destroy(&pSql->rspSem);
|
||||||
|
|
||||||
free(pSql);
|
free(pSql);
|
||||||
|
tscDebug("%p free sqlObj completed", pSql);
|
||||||
|
|
||||||
|
int32_t ref = T_REF_DEC(pTscObj);
|
||||||
|
assert(ref >= 0);
|
||||||
|
|
||||||
|
if (ref == 0) {
|
||||||
|
tscCloseTscObj(pTscObj);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void tscDestroyDataBlock(STableDataBlocks* pDataBlock) {
|
void tscDestroyDataBlock(STableDataBlocks* pDataBlock) {
|
||||||
|
@ -399,7 +440,10 @@ void tscDestroyDataBlock(STableDataBlocks* pDataBlock) {
|
||||||
taosTFree(pDataBlock->params);
|
taosTFree(pDataBlock->params);
|
||||||
|
|
||||||
// free the refcount for metermeta
|
// free the refcount for metermeta
|
||||||
taosCacheRelease(tscCacheHandle, (void**)&(pDataBlock->pTableMeta), false);
|
if (pDataBlock->pTableMeta != NULL) {
|
||||||
|
taosCacheRelease(tscMetaCache, (void**)&(pDataBlock->pTableMeta), false);
|
||||||
|
}
|
||||||
|
|
||||||
taosTFree(pDataBlock);
|
taosTFree(pDataBlock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -454,9 +498,12 @@ int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) {
|
||||||
// set the correct table meta object, the table meta has been locked in pDataBlocks, so it must be in the cache
|
// set the correct table meta object, the table meta has been locked in pDataBlocks, so it must be in the cache
|
||||||
if (pTableMetaInfo->pTableMeta != pDataBlock->pTableMeta) {
|
if (pTableMetaInfo->pTableMeta != pDataBlock->pTableMeta) {
|
||||||
tstrncpy(pTableMetaInfo->name, pDataBlock->tableId, sizeof(pTableMetaInfo->name));
|
tstrncpy(pTableMetaInfo->name, pDataBlock->tableId, sizeof(pTableMetaInfo->name));
|
||||||
taosCacheRelease(tscCacheHandle, (void**)&(pTableMetaInfo->pTableMeta), false);
|
|
||||||
|
|
||||||
pTableMetaInfo->pTableMeta = taosCacheTransfer(tscCacheHandle, (void**)&pDataBlock->pTableMeta);
|
if (pTableMetaInfo->pTableMeta != NULL) {
|
||||||
|
taosCacheRelease(tscMetaCache, (void**)&(pTableMetaInfo->pTableMeta), false);
|
||||||
|
}
|
||||||
|
|
||||||
|
pTableMetaInfo->pTableMeta = taosCacheTransfer(tscMetaCache, (void**)&pDataBlock->pTableMeta);
|
||||||
} else {
|
} else {
|
||||||
assert(strncmp(pTableMetaInfo->name, pDataBlock->tableId, tListLen(pDataBlock->tableId)) == 0);
|
assert(strncmp(pTableMetaInfo->name, pDataBlock->tableId, tListLen(pDataBlock->tableId)) == 0);
|
||||||
}
|
}
|
||||||
|
@ -527,7 +574,7 @@ int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOff
|
||||||
* due to operation such as drop database. So here we add the reference count directly instead of invoke
|
* due to operation such as drop database. So here we add the reference count directly instead of invoke
|
||||||
* taosGetDataFromCache, which may return NULL value.
|
* taosGetDataFromCache, which may return NULL value.
|
||||||
*/
|
*/
|
||||||
dataBuf->pTableMeta = taosCacheAcquireByData(tscCacheHandle, pTableMeta);
|
dataBuf->pTableMeta = taosCacheAcquireByData(tscMetaCache, pTableMeta);
|
||||||
assert(initialSize > 0 && pTableMeta != NULL && dataBuf->pTableMeta != NULL);
|
assert(initialSize > 0 && pTableMeta != NULL && dataBuf->pTableMeta != NULL);
|
||||||
|
|
||||||
*dataBlocks = dataBuf;
|
*dataBlocks = dataBuf;
|
||||||
|
@ -724,14 +771,16 @@ void tscCloseTscObj(STscObj* pObj) {
|
||||||
|
|
||||||
pObj->signature = NULL;
|
pObj->signature = NULL;
|
||||||
taosTmrStopA(&(pObj->pTimer));
|
taosTmrStopA(&(pObj->pTimer));
|
||||||
pthread_mutex_destroy(&pObj->mutex);
|
|
||||||
|
|
||||||
|
void* p = pObj->pDnodeConn;
|
||||||
if (pObj->pDnodeConn != NULL) {
|
if (pObj->pDnodeConn != NULL) {
|
||||||
rpcClose(pObj->pDnodeConn);
|
rpcClose(pObj->pDnodeConn);
|
||||||
pObj->pDnodeConn = NULL;
|
pObj->pDnodeConn = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
tscDebug("%p DB connection is closed, dnodeConn:%p", pObj, pObj->pDnodeConn);
|
pthread_mutex_destroy(&pObj->mutex);
|
||||||
|
|
||||||
|
tscDebug("%p DB connection is closed, dnodeConn:%p", pObj, p);
|
||||||
taosTFree(pObj);
|
taosTFree(pObj);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1554,6 +1603,8 @@ int32_t tscAddSubqueryInfo(SSqlCmd* pCmd) {
|
||||||
}
|
}
|
||||||
|
|
||||||
tscInitQueryInfo(pQueryInfo);
|
tscInitQueryInfo(pQueryInfo);
|
||||||
|
|
||||||
|
pQueryInfo->window = TSWINDOW_INITIALIZER;
|
||||||
pQueryInfo->msg = pCmd->payload; // pointer to the parent error message buffer
|
pQueryInfo->msg = pCmd->payload; // pointer to the parent error message buffer
|
||||||
|
|
||||||
pCmd->pQueryInfo[pCmd->numOfClause++] = pQueryInfo;
|
pCmd->pQueryInfo[pCmd->numOfClause++] = pQueryInfo;
|
||||||
|
@ -1665,7 +1716,10 @@ void tscClearTableMetaInfo(STableMetaInfo* pTableMetaInfo, bool removeFromCache)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
taosCacheRelease(tscCacheHandle, (void**)&(pTableMetaInfo->pTableMeta), removeFromCache);
|
if (pTableMetaInfo->pTableMeta != NULL) {
|
||||||
|
taosCacheRelease(tscMetaCache, (void**)&(pTableMetaInfo->pTableMeta), removeFromCache);
|
||||||
|
}
|
||||||
|
|
||||||
taosTFree(pTableMetaInfo->vgroupList);
|
taosTFree(pTableMetaInfo->vgroupList);
|
||||||
|
|
||||||
tscColumnListDestroy(pTableMetaInfo->tagColList);
|
tscColumnListDestroy(pTableMetaInfo->tagColList);
|
||||||
|
@ -1689,6 +1743,8 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, void (*fp)(), void* param, int32_t cm
|
||||||
}
|
}
|
||||||
|
|
||||||
pNew->pTscObj = pSql->pTscObj;
|
pNew->pTscObj = pSql->pTscObj;
|
||||||
|
T_REF_INC(pNew->pTscObj);
|
||||||
|
|
||||||
pNew->signature = pNew;
|
pNew->signature = pNew;
|
||||||
|
|
||||||
SSqlCmd* pCmd = &pNew->cmd;
|
SSqlCmd* pCmd = &pNew->cmd;
|
||||||
|
@ -1719,6 +1775,11 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, void (*fp)(), void* param, int32_t cm
|
||||||
STableMetaInfo* pMasterTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, pSql->cmd.clauseIndex, 0);
|
STableMetaInfo* pMasterTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, pSql->cmd.clauseIndex, 0);
|
||||||
|
|
||||||
tscAddTableMetaInfo(pQueryInfo, pMasterTableMetaInfo->name, NULL, NULL, NULL);
|
tscAddTableMetaInfo(pQueryInfo, pMasterTableMetaInfo->name, NULL, NULL, NULL);
|
||||||
|
|
||||||
|
T_REF_INC(pNew->pTscObj);
|
||||||
|
|
||||||
|
uint64_t p = (uint64_t) pNew;
|
||||||
|
pNew->self = taosCachePut(tscObjCache, &p, sizeof(uint64_t), &pNew, sizeof(uint64_t), 2 * 600 * 1000);
|
||||||
return pNew;
|
return pNew;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1788,6 +1849,7 @@ static void doSetSqlExprAndResultFieldInfo(SQueryInfo* pQueryInfo, SQueryInfo* p
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(matched);
|
assert(matched);
|
||||||
|
(void)matched;
|
||||||
}
|
}
|
||||||
|
|
||||||
tscFieldInfoUpdateOffset(pNewQueryInfo);
|
tscFieldInfoUpdateOffset(pNewQueryInfo);
|
||||||
|
@ -1807,6 +1869,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
|
||||||
|
|
||||||
pNew->pTscObj = pSql->pTscObj;
|
pNew->pTscObj = pSql->pTscObj;
|
||||||
pNew->signature = pNew;
|
pNew->signature = pNew;
|
||||||
|
T_REF_INC(pNew->pTscObj);
|
||||||
|
|
||||||
pNew->sqlstr = strdup(pSql->sqlstr);
|
pNew->sqlstr = strdup(pSql->sqlstr);
|
||||||
if (pNew->sqlstr == NULL) {
|
if (pNew->sqlstr == NULL) {
|
||||||
|
@ -1837,10 +1900,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
|
||||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
|
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
|
||||||
|
|
||||||
pNewQueryInfo->command = pQueryInfo->command;
|
pNewQueryInfo->command = pQueryInfo->command;
|
||||||
pNewQueryInfo->intervalTimeUnit = pQueryInfo->intervalTimeUnit;
|
memcpy(&pNewQueryInfo->interval, &pQueryInfo->interval, sizeof(pNewQueryInfo->interval));
|
||||||
pNewQueryInfo->slidingTimeUnit = pQueryInfo->slidingTimeUnit;
|
|
||||||
pNewQueryInfo->intervalTime = pQueryInfo->intervalTime;
|
|
||||||
pNewQueryInfo->slidingTime = pQueryInfo->slidingTime;
|
|
||||||
pNewQueryInfo->type = pQueryInfo->type;
|
pNewQueryInfo->type = pQueryInfo->type;
|
||||||
pNewQueryInfo->window = pQueryInfo->window;
|
pNewQueryInfo->window = pQueryInfo->window;
|
||||||
pNewQueryInfo->limit = pQueryInfo->limit;
|
pNewQueryInfo->limit = pQueryInfo->limit;
|
||||||
|
@ -1911,14 +1971,14 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
|
||||||
STableMetaInfo* pFinalInfo = NULL;
|
STableMetaInfo* pFinalInfo = NULL;
|
||||||
|
|
||||||
if (pPrevSql == NULL) {
|
if (pPrevSql == NULL) {
|
||||||
STableMeta* pTableMeta = taosCacheAcquireByData(tscCacheHandle, pTableMetaInfo->pTableMeta); // get by name may failed due to the cache cleanup
|
STableMeta* pTableMeta = taosCacheAcquireByData(tscMetaCache, pTableMetaInfo->pTableMeta); // get by name may failed due to the cache cleanup
|
||||||
assert(pTableMeta != NULL);
|
assert(pTableMeta != NULL);
|
||||||
|
|
||||||
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pTableMeta, pTableMetaInfo->vgroupList, pTableMetaInfo->tagColList);
|
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pTableMeta, pTableMetaInfo->vgroupList, pTableMetaInfo->tagColList);
|
||||||
} else { // transfer the ownership of pTableMeta to the newly create sql object.
|
} else { // transfer the ownership of pTableMeta to the newly create sql object.
|
||||||
STableMetaInfo* pPrevInfo = tscGetTableMetaInfoFromCmd(&pPrevSql->cmd, pPrevSql->cmd.clauseIndex, 0);
|
STableMetaInfo* pPrevInfo = tscGetTableMetaInfoFromCmd(&pPrevSql->cmd, pPrevSql->cmd.clauseIndex, 0);
|
||||||
|
|
||||||
STableMeta* pPrevTableMeta = taosCacheTransfer(tscCacheHandle, (void**)&pPrevInfo->pTableMeta);
|
STableMeta* pPrevTableMeta = taosCacheTransfer(tscMetaCache, (void**)&pPrevInfo->pTableMeta);
|
||||||
|
|
||||||
SVgroupsInfo* pVgroupsInfo = pPrevInfo->vgroupList;
|
SVgroupsInfo* pVgroupsInfo = pPrevInfo->vgroupList;
|
||||||
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pPrevTableMeta, pVgroupsInfo, pTableMetaInfo->tagColList);
|
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pPrevTableMeta, pVgroupsInfo, pTableMetaInfo->tagColList);
|
||||||
|
@ -1958,6 +2018,10 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
|
||||||
tscDebug("%p new sub insertion: %p, vnodeIdx:%d", pSql, pNew, pTableMetaInfo->vgroupIndex);
|
tscDebug("%p new sub insertion: %p, vnodeIdx:%d", pSql, pNew, pTableMetaInfo->vgroupIndex);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
T_REF_INC(pNew->pTscObj);
|
||||||
|
|
||||||
|
uint64_t p = (uint64_t) pNew;
|
||||||
|
pNew->self = taosCachePut(tscObjCache, &p, sizeof(uint64_t), &pNew, sizeof(uint64_t), 2 * 600 * 10);
|
||||||
return pNew;
|
return pNew;
|
||||||
|
|
||||||
_error:
|
_error:
|
||||||
|
@ -2069,6 +2133,7 @@ int32_t tscSQLSyntaxErrMsg(char* msg, const char* additionalInfo, const char* s
|
||||||
return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
|
return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tscInvalidSQLErrMsg(char* msg, const char* additionalInfo, const char* sql) {
|
int32_t tscInvalidSQLErrMsg(char* msg, const char* additionalInfo, const char* sql) {
|
||||||
const char* msgFormat1 = "invalid SQL: %s";
|
const char* msgFormat1 = "invalid SQL: %s";
|
||||||
const char* msgFormat2 = "invalid SQL: \'%s\' (%s)";
|
const char* msgFormat2 = "invalid SQL: \'%s\' (%s)";
|
||||||
|
@ -2099,11 +2164,6 @@ bool tscHasReachLimitation(SQueryInfo* pQueryInfo, SSqlRes* pRes) {
|
||||||
return (pQueryInfo->clauseLimit > 0 && pRes->numOfClauseTotal >= pQueryInfo->clauseLimit);
|
return (pQueryInfo->clauseLimit > 0 && pRes->numOfClauseTotal >= pQueryInfo->clauseLimit);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool tscResultsetFetchCompleted(TAOS_RES *result) {
|
|
||||||
SSqlRes* pRes = result;
|
|
||||||
return pRes->completed;
|
|
||||||
}
|
|
||||||
|
|
||||||
char* tscGetErrorMsgPayload(SSqlCmd* pCmd) { return pCmd->payload; }
|
char* tscGetErrorMsgPayload(SSqlCmd* pCmd) { return pCmd->payload; }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -35,8 +35,6 @@ bool tscValidateTableNameLength(size_t len);
|
||||||
|
|
||||||
SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters);
|
SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters);
|
||||||
|
|
||||||
int64_t taosAddNatualInterval(int64_t key, int64_t intervalTime, char timeUnit, int16_t precision);
|
// int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision);
|
||||||
int32_t taosCountNatualInterval(int64_t skey, int64_t ekey, int64_t intervalTime, char timeUnit, int16_t precision);
|
|
||||||
int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision);
|
|
||||||
|
|
||||||
#endif // TDENGINE_NAME_H
|
#endif // TDENGINE_NAME_H
|
||||||
|
|
|
@ -957,17 +957,6 @@ static void doInitGlobalConfig(void) {
|
||||||
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
||||||
taosInitConfigOption(cfg);
|
taosInitConfigOption(cfg);
|
||||||
|
|
||||||
// http configs
|
|
||||||
cfg.option = "httpCacheSessions";
|
|
||||||
cfg.ptr = &tsHttpCacheSessions;
|
|
||||||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
|
||||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
|
|
||||||
cfg.minValue = 1;
|
|
||||||
cfg.maxValue = 100000;
|
|
||||||
cfg.ptrLength = 0;
|
|
||||||
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
|
||||||
taosInitConfigOption(cfg);
|
|
||||||
|
|
||||||
cfg.option = "httpEnableRecordSql";
|
cfg.option = "httpEnableRecordSql";
|
||||||
cfg.ptr = &tsHttpEnableRecordSql;
|
cfg.ptr = &tsHttpEnableRecordSql;
|
||||||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
||||||
|
|
|
@ -62,10 +62,9 @@ SSchema tGetUserSpecifiedColumnSchema(tVariant* pVal, SStrToken* exprStr, const
|
||||||
if (name != NULL) {
|
if (name != NULL) {
|
||||||
tstrncpy(s.name, name, sizeof(s.name));
|
tstrncpy(s.name, name, sizeof(s.name));
|
||||||
} else {
|
} else {
|
||||||
size_t len = strdequote(exprStr->z);
|
size_t tlen = MIN(sizeof(s.name), exprStr->n + 1);
|
||||||
size_t tlen = MIN(sizeof(s.name), len + 1);
|
|
||||||
|
|
||||||
tstrncpy(s.name, exprStr->z, tlen);
|
tstrncpy(s.name, exprStr->z, tlen);
|
||||||
|
strdequote(s.name);
|
||||||
}
|
}
|
||||||
|
|
||||||
return s;
|
return s;
|
||||||
|
@ -100,62 +99,7 @@ SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numO
|
||||||
return pFilter;
|
return pFilter;
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t taosAddNatualInterval(int64_t key, int64_t intervalTime, char timeUnit, int16_t precision) {
|
#if 0
|
||||||
key /= 1000;
|
|
||||||
if (precision == TSDB_TIME_PRECISION_MICRO) {
|
|
||||||
key /= 1000;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct tm tm;
|
|
||||||
time_t t = (time_t)key;
|
|
||||||
localtime_r(&t, &tm);
|
|
||||||
|
|
||||||
if (timeUnit == 'y') {
|
|
||||||
intervalTime *= 12;
|
|
||||||
}
|
|
||||||
|
|
||||||
int mon = (int)(tm.tm_year * 12 + tm.tm_mon + intervalTime);
|
|
||||||
tm.tm_year = mon / 12;
|
|
||||||
tm.tm_mon = mon % 12;
|
|
||||||
|
|
||||||
key = mktime(&tm) * 1000L;
|
|
||||||
|
|
||||||
if (precision == TSDB_TIME_PRECISION_MICRO) {
|
|
||||||
key *= 1000L;
|
|
||||||
}
|
|
||||||
|
|
||||||
return key;
|
|
||||||
}
|
|
||||||
|
|
||||||
int32_t taosCountNatualInterval(int64_t skey, int64_t ekey, int64_t intervalTime, char timeUnit, int16_t precision) {
|
|
||||||
skey /= 1000;
|
|
||||||
ekey /= 1000;
|
|
||||||
if (precision == TSDB_TIME_PRECISION_MICRO) {
|
|
||||||
skey /= 1000;
|
|
||||||
ekey /= 1000;
|
|
||||||
}
|
|
||||||
if (ekey < skey) {
|
|
||||||
int64_t tmp = ekey;
|
|
||||||
ekey = skey;
|
|
||||||
skey = tmp;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct tm tm;
|
|
||||||
time_t t = (time_t)skey;
|
|
||||||
localtime_r(&t, &tm);
|
|
||||||
int smon = tm.tm_year * 12 + tm.tm_mon;
|
|
||||||
|
|
||||||
t = (time_t)ekey;
|
|
||||||
localtime_r(&t, &tm);
|
|
||||||
int emon = tm.tm_year * 12 + tm.tm_mon;
|
|
||||||
|
|
||||||
if (timeUnit == 'y') {
|
|
||||||
intervalTime *= 12;
|
|
||||||
}
|
|
||||||
|
|
||||||
return (emon - smon) / (int32_t)intervalTime;
|
|
||||||
}
|
|
||||||
|
|
||||||
int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision) {
|
int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision) {
|
||||||
if (slidingTime == 0) {
|
if (slidingTime == 0) {
|
||||||
return startTime;
|
return startTime;
|
||||||
|
@ -220,6 +164,8 @@ int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, in
|
||||||
return start;
|
return start;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* tablePrefix.columnName
|
* tablePrefix.columnName
|
||||||
* extract table name and save it in pTable, with only column name in pToken
|
* extract table name and save it in pTable, with only column name in pToken
|
||||||
|
|
|
@ -540,9 +540,7 @@ void assignVal(char *val, const char *src, int32_t len, int32_t type) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size) {
|
void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size, void* buf) {
|
||||||
char tmpBuf[4096] = {0};
|
|
||||||
|
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case TSDB_DATA_TYPE_INT: {
|
case TSDB_DATA_TYPE_INT: {
|
||||||
SWAP(*(int32_t *)(pLeft), *(int32_t *)(pRight), int32_t);
|
SWAP(*(int32_t *)(pLeft), *(int32_t *)(pRight), int32_t);
|
||||||
|
@ -575,10 +573,9 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size) {
|
||||||
}
|
}
|
||||||
|
|
||||||
default: {
|
default: {
|
||||||
assert(size <= 4096);
|
memcpy(buf, pLeft, size);
|
||||||
memcpy(tmpBuf, pLeft, size);
|
|
||||||
memcpy(pLeft, pRight, size);
|
memcpy(pLeft, pRight, size);
|
||||||
memcpy(pRight, tmpBuf, size);
|
memcpy(pRight, buf, size);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,6 +7,7 @@
|
||||||
<packaging>jar</packaging>
|
<packaging>jar</packaging>
|
||||||
<name>JDBCDriver</name>
|
<name>JDBCDriver</name>
|
||||||
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
|
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
|
||||||
|
|
||||||
<description>TDengine JDBC Driver</description>
|
<description>TDengine JDBC Driver</description>
|
||||||
<licenses>
|
<licenses>
|
||||||
<license>
|
<license>
|
||||||
|
@ -48,11 +49,6 @@
|
||||||
</exclusion>
|
</exclusion>
|
||||||
</exclusions>
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
|
||||||
<groupId>org.apache.commons</groupId>
|
|
||||||
<artifactId>commons-lang3</artifactId>
|
|
||||||
<version>${commons-lang3.version}</version>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>junit</groupId>
|
<groupId>junit</groupId>
|
||||||
<artifactId>junit</artifactId>
|
<artifactId>junit</artifactId>
|
||||||
|
|
|
@ -53,66 +53,12 @@ public class TSDBConnection implements Connection {
|
||||||
public TSDBConnection(Properties info, TSDBDatabaseMetaData meta) throws SQLException {
|
public TSDBConnection(Properties info, TSDBDatabaseMetaData meta) throws SQLException {
|
||||||
this.dbMetaData = meta;
|
this.dbMetaData = meta;
|
||||||
|
|
||||||
//load taos.cfg start
|
|
||||||
File cfgDir = loadConfigDir(info.getProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR));
|
|
||||||
File cfgFile = cfgDir.listFiles((dir, name) -> "taos.cfg".equalsIgnoreCase(name))[0];
|
|
||||||
List<String> endpoints = loadConfigEndpoints(cfgFile);
|
|
||||||
if (!endpoints.isEmpty()) {
|
|
||||||
info.setProperty(TSDBDriver.PROPERTY_KEY_HOST, endpoints.get(0).split(":")[0]);
|
|
||||||
info.setProperty(TSDBDriver.PROPERTY_KEY_PORT, endpoints.get(0).split(":")[1]);
|
|
||||||
}
|
|
||||||
//load taos.cfg end
|
|
||||||
|
|
||||||
connect(info.getProperty(TSDBDriver.PROPERTY_KEY_HOST),
|
connect(info.getProperty(TSDBDriver.PROPERTY_KEY_HOST),
|
||||||
Integer.parseInt(info.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "0")),
|
Integer.parseInt(info.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "0")),
|
||||||
info.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME), info.getProperty(TSDBDriver.PROPERTY_KEY_USER),
|
info.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME), info.getProperty(TSDBDriver.PROPERTY_KEY_USER),
|
||||||
info.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD));
|
info.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD));
|
||||||
}
|
}
|
||||||
|
|
||||||
private List<String> loadConfigEndpoints(File cfgFile) {
|
|
||||||
List<String> endpoints = new ArrayList<>();
|
|
||||||
try (BufferedReader reader = new BufferedReader(new FileReader(cfgFile))) {
|
|
||||||
String line = null;
|
|
||||||
while ((line = reader.readLine()) != null) {
|
|
||||||
if (line.trim().startsWith("firstEp") || line.trim().startsWith("secondEp")) {
|
|
||||||
endpoints.add(line.substring(line.indexOf('p') + 1).trim());
|
|
||||||
}
|
|
||||||
if (endpoints.size() > 1)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
} catch (FileNotFoundException e) {
|
|
||||||
e.printStackTrace();
|
|
||||||
} catch (IOException e) {
|
|
||||||
e.printStackTrace();
|
|
||||||
}
|
|
||||||
return endpoints;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @param cfgDirPath
|
|
||||||
* @return return the config dir
|
|
||||||
**/
|
|
||||||
private File loadConfigDir(String cfgDirPath) {
|
|
||||||
if (cfgDirPath == null)
|
|
||||||
return loadDefaultConfigDir();
|
|
||||||
File cfgDir = new File(cfgDirPath);
|
|
||||||
if (!cfgDir.exists())
|
|
||||||
return loadDefaultConfigDir();
|
|
||||||
return cfgDir;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @return search the default config dir, if the config dir is not exist will return null
|
|
||||||
*/
|
|
||||||
private File loadDefaultConfigDir() {
|
|
||||||
File cfgDir;
|
|
||||||
File cfgDir_linux = new File("/etc/taos");
|
|
||||||
cfgDir = cfgDir_linux.exists() ? cfgDir_linux : null;
|
|
||||||
File cfgDir_windows = new File("C:\\TDengine\\cfg");
|
|
||||||
cfgDir = (cfgDir == null && cfgDir_windows.exists()) ? cfgDir_windows : cfgDir;
|
|
||||||
return cfgDir;
|
|
||||||
}
|
|
||||||
|
|
||||||
private void connect(String host, int port, String dbName, String user, String password) throws SQLException {
|
private void connect(String host, int port, String dbName, String user, String password) throws SQLException {
|
||||||
this.connector = new TSDBJNIConnector();
|
this.connector = new TSDBJNIConnector();
|
||||||
this.connector.connect(host, port, dbName, user, password);
|
this.connector.connect(host, port, dbName, user, password);
|
||||||
|
|
|
@ -68,15 +68,15 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean nullsAreSortedLow() throws SQLException {
|
public boolean nullsAreSortedLow() throws SQLException {
|
||||||
return false;
|
return !nullsAreSortedHigh();
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean nullsAreSortedAtStart() throws SQLException {
|
public boolean nullsAreSortedAtStart() throws SQLException {
|
||||||
return false;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean nullsAreSortedAtEnd() throws SQLException {
|
public boolean nullsAreSortedAtEnd() throws SQLException {
|
||||||
return false;
|
return !nullsAreSortedAtStart();
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getDatabaseProductName() throws SQLException {
|
public String getDatabaseProductName() throws SQLException {
|
||||||
|
|
|
@ -14,9 +14,12 @@
|
||||||
*****************************************************************************/
|
*****************************************************************************/
|
||||||
package com.taosdata.jdbc;
|
package com.taosdata.jdbc;
|
||||||
|
|
||||||
import org.apache.commons.lang3.StringUtils;
|
|
||||||
|
import java.io.*;
|
||||||
|
|
||||||
import java.sql.*;
|
import java.sql.*;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
import java.util.Properties;
|
import java.util.Properties;
|
||||||
import java.util.logging.Logger;
|
import java.util.logging.Logger;
|
||||||
|
|
||||||
|
@ -41,6 +44,7 @@ import java.util.logging.Logger;
|
||||||
*/
|
*/
|
||||||
public class TSDBDriver implements java.sql.Driver {
|
public class TSDBDriver implements java.sql.Driver {
|
||||||
|
|
||||||
|
|
||||||
@Deprecated
|
@Deprecated
|
||||||
private static final String URL_PREFIX1 = "jdbc:TSDB://";
|
private static final String URL_PREFIX1 = "jdbc:TSDB://";
|
||||||
|
|
||||||
|
@ -75,6 +79,7 @@ public class TSDBDriver implements java.sql.Driver {
|
||||||
*/
|
*/
|
||||||
public static final String PROPERTY_KEY_USER = "user";
|
public static final String PROPERTY_KEY_USER = "user";
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Key for the configuration file directory of TSDB client in properties instance
|
* Key for the configuration file directory of TSDB client in properties instance
|
||||||
*/
|
*/
|
||||||
|
@ -98,6 +103,7 @@ public class TSDBDriver implements java.sql.Driver {
|
||||||
|
|
||||||
public static final String PROPERTY_KEY_PROTOCOL = "protocol";
|
public static final String PROPERTY_KEY_PROTOCOL = "protocol";
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Index for port coming out of parseHostPortPair().
|
* Index for port coming out of parseHostPortPair().
|
||||||
*/
|
*/
|
||||||
|
@ -118,17 +124,71 @@ public class TSDBDriver implements java.sql.Driver {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private List<String> loadConfigEndpoints(File cfgFile) {
|
||||||
|
List<String> endpoints = new ArrayList<>();
|
||||||
|
try (BufferedReader reader = new BufferedReader(new FileReader(cfgFile))) {
|
||||||
|
String line = null;
|
||||||
|
while ((line = reader.readLine()) != null) {
|
||||||
|
if (line.trim().startsWith("firstEp") || line.trim().startsWith("secondEp")) {
|
||||||
|
endpoints.add(line.substring(line.indexOf('p') + 1).trim());
|
||||||
|
}
|
||||||
|
if (endpoints.size() > 1)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
} catch (FileNotFoundException e) {
|
||||||
|
e.printStackTrace();
|
||||||
|
} catch (IOException e) {
|
||||||
|
e.printStackTrace();
|
||||||
|
}
|
||||||
|
return endpoints;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param cfgDirPath
|
||||||
|
* @return return the config dir
|
||||||
|
**/
|
||||||
|
private File loadConfigDir(String cfgDirPath) {
|
||||||
|
if (cfgDirPath == null)
|
||||||
|
return loadDefaultConfigDir();
|
||||||
|
File cfgDir = new File(cfgDirPath);
|
||||||
|
if (!cfgDir.exists())
|
||||||
|
return loadDefaultConfigDir();
|
||||||
|
return cfgDir;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return search the default config dir, if the config dir is not exist will return null
|
||||||
|
*/
|
||||||
|
private File loadDefaultConfigDir() {
|
||||||
|
File cfgDir;
|
||||||
|
File cfgDir_linux = new File("/etc/taos");
|
||||||
|
cfgDir = cfgDir_linux.exists() ? cfgDir_linux : null;
|
||||||
|
File cfgDir_windows = new File("C:\\TDengine\\cfg");
|
||||||
|
cfgDir = (cfgDir == null && cfgDir_windows.exists()) ? cfgDir_windows : cfgDir;
|
||||||
|
return cfgDir;
|
||||||
|
}
|
||||||
|
|
||||||
public Connection connect(String url, Properties info) throws SQLException {
|
public Connection connect(String url, Properties info) throws SQLException {
|
||||||
if (url == null) {
|
if (url == null) {
|
||||||
throw new SQLException(TSDBConstants.WrapErrMsg("url is not set!"));
|
throw new SQLException(TSDBConstants.WrapErrMsg("url is not set!"));
|
||||||
}
|
}
|
||||||
|
|
||||||
Properties props = null;
|
Properties props = null;
|
||||||
|
|
||||||
if ((props = parseURL(url, info)) == null) {
|
if ((props = parseURL(url, info)) == null) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//load taos.cfg start
|
||||||
|
if (info.getProperty(TSDBDriver.PROPERTY_KEY_HOST) == null && info.getProperty(TSDBDriver.PROPERTY_KEY_PORT) == null){
|
||||||
|
File cfgDir = loadConfigDir(info.getProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR));
|
||||||
|
File cfgFile = cfgDir.listFiles((dir, name) -> "taos.cfg".equalsIgnoreCase(name))[0];
|
||||||
|
List<String> endpoints = loadConfigEndpoints(cfgFile);
|
||||||
|
if (!endpoints.isEmpty()) {
|
||||||
|
info.setProperty(TSDBDriver.PROPERTY_KEY_HOST, endpoints.get(0).split(":")[0]);
|
||||||
|
info.setProperty(TSDBDriver.PROPERTY_KEY_PORT, endpoints.get(0).split(":")[1]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
TSDBJNIConnector.init((String) props.get(PROPERTY_KEY_CONFIG_DIR), (String) props.get(PROPERTY_KEY_LOCALE), (String) props.get(PROPERTY_KEY_CHARSET),
|
TSDBJNIConnector.init((String) props.get(PROPERTY_KEY_CONFIG_DIR), (String) props.get(PROPERTY_KEY_LOCALE), (String) props.get(PROPERTY_KEY_CHARSET),
|
||||||
(String) props.get(PROPERTY_KEY_TIME_ZONE));
|
(String) props.get(PROPERTY_KEY_TIME_ZONE));
|
||||||
|
@ -153,13 +213,9 @@ public class TSDBDriver implements java.sql.Driver {
|
||||||
* and the element of index PORT_NUMBER_INDEX being the port (or null if not
|
* and the element of index PORT_NUMBER_INDEX being the port (or null if not
|
||||||
* specified).
|
* specified).
|
||||||
*
|
*
|
||||||
* @param hostPortPair
|
* @param hostPortPair host and port in form of of [host][:port]
|
||||||
* host and port in form of of [host][:port]
|
|
||||||
*
|
|
||||||
* @return array containing host and port as Strings
|
* @return array containing host and port as Strings
|
||||||
*
|
* @throws SQLException if a parse error occurs
|
||||||
* @throws SQLException
|
|
||||||
* if a parse error occurs
|
|
||||||
*/
|
*/
|
||||||
protected static String[] parseHostPortPair(String hostPortPair) throws SQLException {
|
protected static String[] parseHostPortPair(String hostPortPair) throws SQLException {
|
||||||
String[] splitValues = new String[2];
|
String[] splitValues = new String[2];
|
||||||
|
@ -188,7 +244,7 @@ public class TSDBDriver implements java.sql.Driver {
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean acceptsURL(String url) throws SQLException {
|
public boolean acceptsURL(String url) throws SQLException {
|
||||||
return StringUtils.isNotBlank(url) && url.startsWith(URL_PREFIX);
|
return (url != null && url.length() > 0 && url.trim().length() > 0) && url.toLowerCase().startsWith(URL_PREFIX);
|
||||||
}
|
}
|
||||||
|
|
||||||
public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) throws SQLException {
|
public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) throws SQLException {
|
||||||
|
@ -203,8 +259,7 @@ public class TSDBDriver implements java.sql.Driver {
|
||||||
DriverPropertyInfo hostProp = new DriverPropertyInfo(PROPERTY_KEY_HOST, info.getProperty(PROPERTY_KEY_HOST));
|
DriverPropertyInfo hostProp = new DriverPropertyInfo(PROPERTY_KEY_HOST, info.getProperty(PROPERTY_KEY_HOST));
|
||||||
hostProp.required = true;
|
hostProp.required = true;
|
||||||
|
|
||||||
DriverPropertyInfo portProp = new DriverPropertyInfo(PROPERTY_KEY_PORT,
|
DriverPropertyInfo portProp = new DriverPropertyInfo(PROPERTY_KEY_PORT, info.getProperty(PROPERTY_KEY_PORT, TSDBConstants.DEFAULT_PORT));
|
||||||
info.getProperty(PROPERTY_KEY_PORT, TSDBConstants.DEFAULT_PORT));
|
|
||||||
portProp.required = false;
|
portProp.required = false;
|
||||||
|
|
||||||
DriverPropertyInfo dbProp = new DriverPropertyInfo(PROPERTY_KEY_DBNAME, info.getProperty(PROPERTY_KEY_DBNAME));
|
DriverPropertyInfo dbProp = new DriverPropertyInfo(PROPERTY_KEY_DBNAME, info.getProperty(PROPERTY_KEY_DBNAME));
|
||||||
|
@ -214,8 +269,7 @@ public class TSDBDriver implements java.sql.Driver {
|
||||||
DriverPropertyInfo userProp = new DriverPropertyInfo(PROPERTY_KEY_USER, info.getProperty(PROPERTY_KEY_USER));
|
DriverPropertyInfo userProp = new DriverPropertyInfo(PROPERTY_KEY_USER, info.getProperty(PROPERTY_KEY_USER));
|
||||||
userProp.required = true;
|
userProp.required = true;
|
||||||
|
|
||||||
DriverPropertyInfo passwordProp = new DriverPropertyInfo(PROPERTY_KEY_PASSWORD,
|
DriverPropertyInfo passwordProp = new DriverPropertyInfo(PROPERTY_KEY_PASSWORD, info.getProperty(PROPERTY_KEY_PASSWORD));
|
||||||
info.getProperty(PROPERTY_KEY_PASSWORD));
|
|
||||||
passwordProp.required = true;
|
passwordProp.required = true;
|
||||||
|
|
||||||
DriverPropertyInfo[] propertyInfo = new DriverPropertyInfo[5];
|
DriverPropertyInfo[] propertyInfo = new DriverPropertyInfo[5];
|
||||||
|
@ -231,14 +285,14 @@ public class TSDBDriver implements java.sql.Driver {
|
||||||
/**
|
/**
|
||||||
* example: jdbc:TSDB://127.0.0.1:0/db?user=root&password=your_password
|
* example: jdbc:TSDB://127.0.0.1:0/db?user=root&password=your_password
|
||||||
*/
|
*/
|
||||||
|
|
||||||
public Properties parseURL(String url, Properties defaults) throws java.sql.SQLException {
|
public Properties parseURL(String url, Properties defaults) throws java.sql.SQLException {
|
||||||
Properties urlProps = (defaults != null) ? defaults : new Properties();
|
Properties urlProps = (defaults != null) ? defaults : new Properties();
|
||||||
if (url == null) {
|
if (url == null) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!StringUtils.startsWithIgnoreCase(url, URL_PREFIX) && !StringUtils.startsWithIgnoreCase(url, URL_PREFIX1)) {
|
String lowerUrl = url.toLowerCase();
|
||||||
|
if (!lowerUrl.startsWith(URL_PREFIX) && !lowerUrl.startsWith(URL_PREFIX1)) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -263,7 +317,7 @@ public class TSDBDriver implements java.sql.Driver {
|
||||||
url = url.trim().substring(url.indexOf("?") + 1);
|
url = url.trim().substring(url.indexOf("?") + 1);
|
||||||
} else {
|
} else {
|
||||||
// without user & password so return
|
// without user & password so return
|
||||||
if(!url.trim().isEmpty()) {
|
if (!url.trim().isEmpty()) {
|
||||||
String dbName = url.trim();
|
String dbName = url.trim();
|
||||||
urlProps.setProperty(PROPERTY_KEY_DBNAME, dbName);
|
urlProps.setProperty(PROPERTY_KEY_DBNAME, dbName);
|
||||||
}
|
}
|
||||||
|
@ -284,7 +338,7 @@ public class TSDBDriver implements java.sql.Driver {
|
||||||
String[] queryStrings = url.trim().split("&");
|
String[] queryStrings = url.trim().split("&");
|
||||||
for (String queryStr : queryStrings) {
|
for (String queryStr : queryStrings) {
|
||||||
String[] kvPair = queryStr.trim().split("=");
|
String[] kvPair = queryStr.trim().split("=");
|
||||||
if (kvPair.length < 2){
|
if (kvPair.length < 2) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
setPropertyValue(urlProps, kvPair);
|
setPropertyValue(urlProps, kvPair);
|
||||||
|
@ -339,9 +393,7 @@ public class TSDBDriver implements java.sql.Driver {
|
||||||
/**
|
/**
|
||||||
* Returns the host property
|
* Returns the host property
|
||||||
*
|
*
|
||||||
* @param props
|
* @param props the java.util.Properties instance to retrieve the hostname from.
|
||||||
* the java.util.Properties instance to retrieve the hostname from.
|
|
||||||
*
|
|
||||||
* @return the host
|
* @return the host
|
||||||
*/
|
*/
|
||||||
public String host(Properties props) {
|
public String host(Properties props) {
|
||||||
|
@ -351,9 +403,7 @@ public class TSDBDriver implements java.sql.Driver {
|
||||||
/**
|
/**
|
||||||
* Returns the port number property
|
* Returns the port number property
|
||||||
*
|
*
|
||||||
* @param props
|
* @param props the properties to get the port number from
|
||||||
* the properties to get the port number from
|
|
||||||
*
|
|
||||||
* @return the port number
|
* @return the port number
|
||||||
*/
|
*/
|
||||||
public int port(Properties props) {
|
public int port(Properties props) {
|
||||||
|
@ -363,9 +413,7 @@ public class TSDBDriver implements java.sql.Driver {
|
||||||
/**
|
/**
|
||||||
* Returns the database property from <code>props</code>
|
* Returns the database property from <code>props</code>
|
||||||
*
|
*
|
||||||
* @param props
|
* @param props the Properties to look for the database property.
|
||||||
* the Properties to look for the database property.
|
|
||||||
*
|
|
||||||
* @return the database name.
|
* @return the database name.
|
||||||
*/
|
*/
|
||||||
public String database(Properties props) {
|
public String database(Properties props) {
|
||||||
|
|
|
@ -242,7 +242,7 @@ public class TSDBStatement implements Statement {
|
||||||
|
|
||||||
public void addBatch(String sql) throws SQLException {
|
public void addBatch(String sql) throws SQLException {
|
||||||
if (batchedArgs == null) {
|
if (batchedArgs == null) {
|
||||||
batchedArgs = new ArrayList<String>();
|
batchedArgs = new ArrayList<>();
|
||||||
}
|
}
|
||||||
batchedArgs.add(sql);
|
batchedArgs.add(sql);
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,10 +30,12 @@
|
||||||
#include "tlog.h"
|
#include "tlog.h"
|
||||||
#include "twal.h"
|
#include "twal.h"
|
||||||
|
|
||||||
#define cError(...) { if (cqDebugFlag & DEBUG_ERROR) { taosPrintLog("ERROR CQ ", cqDebugFlag, __VA_ARGS__); }}
|
#define cFatal(...) { if (cqDebugFlag & DEBUG_FATAL) { taosPrintLog("CQ FATAL ", 255, __VA_ARGS__); }}
|
||||||
#define cWarn(...) { if (cqDebugFlag & DEBUG_WARN) { taosPrintLog("WARN CQ ", cqDebugFlag, __VA_ARGS__); }}
|
#define cError(...) { if (cqDebugFlag & DEBUG_ERROR) { taosPrintLog("CQ ERROR ", 255, __VA_ARGS__); }}
|
||||||
|
#define cWarn(...) { if (cqDebugFlag & DEBUG_WARN) { taosPrintLog("CQ WARN ", 255, __VA_ARGS__); }}
|
||||||
|
#define cInfo(...) { if (cqDebugFlag & DEBUG_INFO) { taosPrintLog("CQ ", 255, __VA_ARGS__); }}
|
||||||
|
#define cDebug(...) { if (cqDebugFlag & DEBUG_DEBUG) { taosPrintLog("CQ ", cqDebugFlag, __VA_ARGS__); }}
|
||||||
#define cTrace(...) { if (cqDebugFlag & DEBUG_TRACE) { taosPrintLog("CQ ", cqDebugFlag, __VA_ARGS__); }}
|
#define cTrace(...) { if (cqDebugFlag & DEBUG_TRACE) { taosPrintLog("CQ ", cqDebugFlag, __VA_ARGS__); }}
|
||||||
#define cPrint(...) { taosPrintLog("CQ ", 255, __VA_ARGS__); }
|
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int vgId;
|
int vgId;
|
||||||
|
@ -94,7 +96,7 @@ void *cqOpen(void *ahandle, const SCqCfg *pCfg) {
|
||||||
|
|
||||||
pthread_mutex_init(&pContext->mutex, NULL);
|
pthread_mutex_init(&pContext->mutex, NULL);
|
||||||
|
|
||||||
cTrace("vgId:%d, CQ is opened", pContext->vgId);
|
cInfo("vgId:%d, CQ is opened", pContext->vgId);
|
||||||
|
|
||||||
return pContext;
|
return pContext;
|
||||||
}
|
}
|
||||||
|
@ -125,7 +127,7 @@ void cqClose(void *handle) {
|
||||||
taosTmrCleanUp(pContext->tmrCtrl);
|
taosTmrCleanUp(pContext->tmrCtrl);
|
||||||
pContext->tmrCtrl = NULL;
|
pContext->tmrCtrl = NULL;
|
||||||
|
|
||||||
cTrace("vgId:%d, CQ is closed", pContext->vgId);
|
cInfo("vgId:%d, CQ is closed", pContext->vgId);
|
||||||
free(pContext);
|
free(pContext);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -133,7 +135,7 @@ void cqStart(void *handle) {
|
||||||
SCqContext *pContext = handle;
|
SCqContext *pContext = handle;
|
||||||
if (pContext->dbConn || pContext->master) return;
|
if (pContext->dbConn || pContext->master) return;
|
||||||
|
|
||||||
cTrace("vgId:%d, start all CQs", pContext->vgId);
|
cInfo("vgId:%d, start all CQs", pContext->vgId);
|
||||||
pthread_mutex_lock(&pContext->mutex);
|
pthread_mutex_lock(&pContext->mutex);
|
||||||
|
|
||||||
pContext->master = 1;
|
pContext->master = 1;
|
||||||
|
@ -149,7 +151,7 @@ void cqStart(void *handle) {
|
||||||
|
|
||||||
void cqStop(void *handle) {
|
void cqStop(void *handle) {
|
||||||
SCqContext *pContext = handle;
|
SCqContext *pContext = handle;
|
||||||
cTrace("vgId:%d, stop all CQs", pContext->vgId);
|
cInfo("vgId:%d, stop all CQs", pContext->vgId);
|
||||||
if (pContext->dbConn == NULL || pContext->master == 0) return;
|
if (pContext->dbConn == NULL || pContext->master == 0) return;
|
||||||
|
|
||||||
pthread_mutex_lock(&pContext->mutex);
|
pthread_mutex_lock(&pContext->mutex);
|
||||||
|
@ -160,7 +162,7 @@ void cqStop(void *handle) {
|
||||||
if (pObj->pStream) {
|
if (pObj->pStream) {
|
||||||
taos_close_stream(pObj->pStream);
|
taos_close_stream(pObj->pStream);
|
||||||
pObj->pStream = NULL;
|
pObj->pStream = NULL;
|
||||||
cTrace("vgId:%d, id:%d CQ:%s is closed", pContext->vgId, pObj->tid, pObj->sqlStr);
|
cInfo("vgId:%d, id:%d CQ:%s is closed", pContext->vgId, pObj->tid, pObj->sqlStr);
|
||||||
} else {
|
} else {
|
||||||
taosTmrStop(pObj->tmrId);
|
taosTmrStop(pObj->tmrId);
|
||||||
pObj->tmrId = 0;
|
pObj->tmrId = 0;
|
||||||
|
@ -188,7 +190,7 @@ void *cqCreate(void *handle, uint64_t uid, int tid, char *sqlStr, STSchema *pSch
|
||||||
pObj->pSchema = tdDupSchema(pSchema);
|
pObj->pSchema = tdDupSchema(pSchema);
|
||||||
pObj->rowSize = schemaTLen(pSchema);
|
pObj->rowSize = schemaTLen(pSchema);
|
||||||
|
|
||||||
cTrace("vgId:%d, id:%d CQ:%s is created", pContext->vgId, pObj->tid, pObj->sqlStr);
|
cInfo("vgId:%d, id:%d CQ:%s is created", pContext->vgId, pObj->tid, pObj->sqlStr);
|
||||||
|
|
||||||
pthread_mutex_lock(&pContext->mutex);
|
pthread_mutex_lock(&pContext->mutex);
|
||||||
|
|
||||||
|
@ -228,7 +230,7 @@ void cqDrop(void *handle) {
|
||||||
pObj->tmrId = 0;
|
pObj->tmrId = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
cTrace("vgId:%d, id:%d CQ:%s is dropped", pContext->vgId, pObj->tid, pObj->sqlStr);
|
cInfo("vgId:%d, id:%d CQ:%s is dropped", pContext->vgId, pObj->tid, pObj->sqlStr);
|
||||||
tdFreeSchema(pObj->pSchema);
|
tdFreeSchema(pObj->pSchema);
|
||||||
free(pObj->sqlStr);
|
free(pObj->sqlStr);
|
||||||
free(pObj);
|
free(pObj);
|
||||||
|
@ -262,7 +264,7 @@ static void cqCreateStream(SCqContext *pContext, SCqObj *pObj) {
|
||||||
pObj->pStream = taos_open_stream(pContext->dbConn, pObj->sqlStr, cqProcessStreamRes, 0, pObj, NULL);
|
pObj->pStream = taos_open_stream(pContext->dbConn, pObj->sqlStr, cqProcessStreamRes, 0, pObj, NULL);
|
||||||
if (pObj->pStream) {
|
if (pObj->pStream) {
|
||||||
pContext->num++;
|
pContext->num++;
|
||||||
cTrace("vgId:%d, id:%d CQ:%s is openned", pContext->vgId, pObj->tid, pObj->sqlStr);
|
cInfo("vgId:%d, id:%d CQ:%s is openned", pContext->vgId, pObj->tid, pObj->sqlStr);
|
||||||
} else {
|
} else {
|
||||||
cError("vgId:%d, id:%d CQ:%s, failed to open", pContext->vgId, pObj->tid, pObj->sqlStr);
|
cError("vgId:%d, id:%d CQ:%s, failed to open", pContext->vgId, pObj->tid, pObj->sqlStr);
|
||||||
}
|
}
|
||||||
|
@ -278,7 +280,7 @@ static void cqProcessStreamRes(void *param, TAOS_RES *tres, TAOS_ROW row) {
|
||||||
STSchema *pSchema = pObj->pSchema;
|
STSchema *pSchema = pObj->pSchema;
|
||||||
if (pObj->pStream == NULL) return;
|
if (pObj->pStream == NULL) return;
|
||||||
|
|
||||||
cTrace("vgId:%d, id:%d CQ:%s stream result is ready", pContext->vgId, pObj->tid, pObj->sqlStr);
|
cDebug("vgId:%d, id:%d CQ:%s stream result is ready", pContext->vgId, pObj->tid, pObj->sqlStr);
|
||||||
|
|
||||||
int size = sizeof(SWalHead) + sizeof(SSubmitMsg) + sizeof(SSubmitBlk) + TD_DATA_ROW_HEAD_SIZE + pObj->rowSize;
|
int size = sizeof(SWalHead) + sizeof(SSubmitMsg) + sizeof(SSubmitBlk) + TD_DATA_ROW_HEAD_SIZE + pObj->rowSize;
|
||||||
char *buffer = calloc(size, 1);
|
char *buffer = calloc(size, 1);
|
||||||
|
|
|
@ -101,6 +101,7 @@ extern const int32_t TYPE_BYTES[11];
|
||||||
#define TSDB_TIME_PRECISION_MILLI 0
|
#define TSDB_TIME_PRECISION_MILLI 0
|
||||||
#define TSDB_TIME_PRECISION_MICRO 1
|
#define TSDB_TIME_PRECISION_MICRO 1
|
||||||
#define TSDB_TIME_PRECISION_NANO 2
|
#define TSDB_TIME_PRECISION_NANO 2
|
||||||
|
#define TSDB_TICK_PER_SECOND(precision) ((precision)==TSDB_TIME_PRECISION_MILLI ? 1e3L : ((precision)==TSDB_TIME_PRECISION_MICRO ? 1e6L : 1e9L))
|
||||||
|
|
||||||
#define TSDB_TIME_PRECISION_MILLI_STR "ms"
|
#define TSDB_TIME_PRECISION_MILLI_STR "ms"
|
||||||
#define TSDB_TIME_PRECISION_MICRO_STR "us"
|
#define TSDB_TIME_PRECISION_MICRO_STR "us"
|
||||||
|
@ -198,7 +199,7 @@ void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems);
|
||||||
void* getNullValue(int32_t type);
|
void* getNullValue(int32_t type);
|
||||||
|
|
||||||
void assignVal(char *val, const char *src, int32_t len, int32_t type);
|
void assignVal(char *val, const char *src, int32_t len, int32_t type);
|
||||||
void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
|
void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size, void* buf);
|
||||||
|
|
||||||
// TODO: check if below is necessary
|
// TODO: check if below is necessary
|
||||||
#define TSDB_RELATION_INVALID 0
|
#define TSDB_RELATION_INVALID 0
|
||||||
|
@ -209,21 +210,24 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
|
||||||
#define TSDB_RELATION_GREATER_EQUAL 5
|
#define TSDB_RELATION_GREATER_EQUAL 5
|
||||||
#define TSDB_RELATION_NOT_EQUAL 6
|
#define TSDB_RELATION_NOT_EQUAL 6
|
||||||
#define TSDB_RELATION_LIKE 7
|
#define TSDB_RELATION_LIKE 7
|
||||||
#define TSDB_RELATION_IN 8
|
#define TSDB_RELATION_ISNULL 8
|
||||||
|
#define TSDB_RELATION_NOTNULL 9
|
||||||
|
#define TSDB_RELATION_IN 10
|
||||||
|
|
||||||
#define TSDB_RELATION_AND 9
|
#define TSDB_RELATION_AND 11
|
||||||
#define TSDB_RELATION_OR 10
|
#define TSDB_RELATION_OR 12
|
||||||
#define TSDB_RELATION_NOT 11
|
#define TSDB_RELATION_NOT 13
|
||||||
|
|
||||||
#define TSDB_BINARY_OP_ADD 12
|
#define TSDB_BINARY_OP_ADD 30
|
||||||
#define TSDB_BINARY_OP_SUBTRACT 13
|
#define TSDB_BINARY_OP_SUBTRACT 31
|
||||||
#define TSDB_BINARY_OP_MULTIPLY 14
|
#define TSDB_BINARY_OP_MULTIPLY 32
|
||||||
#define TSDB_BINARY_OP_DIVIDE 15
|
#define TSDB_BINARY_OP_DIVIDE 33
|
||||||
#define TSDB_BINARY_OP_REMAINDER 16
|
#define TSDB_BINARY_OP_REMAINDER 34
|
||||||
#define TS_PATH_DELIMITER_LEN 1
|
#define TS_PATH_DELIMITER_LEN 1
|
||||||
|
|
||||||
#define TSDB_UNI_LEN 24
|
#define TSDB_UNI_LEN 24
|
||||||
#define TSDB_USER_LEN TSDB_UNI_LEN
|
#define TSDB_USER_LEN TSDB_UNI_LEN
|
||||||
|
|
||||||
// ACCOUNT is a 32 bit positive integer
|
// ACCOUNT is a 32 bit positive integer
|
||||||
// this is the length of its string representation
|
// this is the length of its string representation
|
||||||
// including the terminator zero
|
// including the terminator zero
|
||||||
|
@ -241,7 +245,7 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
|
||||||
#define TSDB_MAX_SAVED_SQL_LEN TSDB_MAX_COLUMNS * 64
|
#define TSDB_MAX_SAVED_SQL_LEN TSDB_MAX_COLUMNS * 64
|
||||||
#define TSDB_MAX_SQL_LEN TSDB_PAYLOAD_SIZE
|
#define TSDB_MAX_SQL_LEN TSDB_PAYLOAD_SIZE
|
||||||
#define TSDB_MAX_SQL_SHOW_LEN 256
|
#define TSDB_MAX_SQL_SHOW_LEN 256
|
||||||
#define TSDB_MAX_ALLOWED_SQL_LEN (8*1024*1024U) // sql length should be less than 8mb
|
#define TSDB_MAX_ALLOWED_SQL_LEN (1*1024*1024U) // sql length should be less than 8mb
|
||||||
|
|
||||||
#define TSDB_MAX_BYTES_PER_ROW 16384
|
#define TSDB_MAX_BYTES_PER_ROW 16384
|
||||||
#define TSDB_MAX_TAGS_LEN 16384
|
#define TSDB_MAX_TAGS_LEN 16384
|
||||||
|
|
|
@ -460,11 +460,7 @@ typedef struct {
|
||||||
int16_t order;
|
int16_t order;
|
||||||
int16_t orderColId;
|
int16_t orderColId;
|
||||||
int16_t numOfCols; // the number of columns will be load from vnode
|
int16_t numOfCols; // the number of columns will be load from vnode
|
||||||
int64_t intervalTime; // time interval for aggregation, in million second
|
SInterval interval;
|
||||||
int64_t intervalOffset; // start offset for interval query
|
|
||||||
int64_t slidingTime; // value for sliding window
|
|
||||||
char intervalTimeUnit;
|
|
||||||
char slidingTimeUnit; // time interval type, for revisement of interval(1d)
|
|
||||||
uint16_t tagCondLen; // tag length in current query
|
uint16_t tagCondLen; // tag length in current query
|
||||||
int16_t numOfGroupCols; // num of group by columns
|
int16_t numOfGroupCols; // num of group by columns
|
||||||
int16_t orderByIdx;
|
int16_t orderByIdx;
|
||||||
|
|
|
@ -765,7 +765,9 @@ void read_history() {
|
||||||
FILE *f = fopen(f_history, "r");
|
FILE *f = fopen(f_history, "r");
|
||||||
if (f == NULL) {
|
if (f == NULL) {
|
||||||
#ifndef WINDOWS
|
#ifndef WINDOWS
|
||||||
fprintf(stderr, "Failed to open file %s\n", f_history);
|
if (errno != ENOENT) {
|
||||||
|
fprintf(stderr, "Failed to open file %s, reason:%s\n", f_history, strerror(errno));
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -792,7 +794,7 @@ void write_history() {
|
||||||
FILE *f = fopen(f_history, "w");
|
FILE *f = fopen(f_history, "w");
|
||||||
if (f == NULL) {
|
if (f == NULL) {
|
||||||
#ifndef WINDOWS
|
#ifndef WINDOWS
|
||||||
fprintf(stderr, "Failed to open file %s for write\n", f_history);
|
fprintf(stderr, "Failed to open file %s for write, reason:%s\n", f_history, strerror(errno));
|
||||||
#endif
|
#endif
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,8 +30,6 @@ extern "C" {
|
||||||
#define MILLISECOND_PER_HOUR (MILLISECOND_PER_MINUTE * 60)
|
#define MILLISECOND_PER_HOUR (MILLISECOND_PER_MINUTE * 60)
|
||||||
#define MILLISECOND_PER_DAY (MILLISECOND_PER_HOUR * 24)
|
#define MILLISECOND_PER_DAY (MILLISECOND_PER_HOUR * 24)
|
||||||
#define MILLISECOND_PER_WEEK (MILLISECOND_PER_DAY * 7)
|
#define MILLISECOND_PER_WEEK (MILLISECOND_PER_DAY * 7)
|
||||||
#define MILLISECOND_PER_MONTH (MILLISECOND_PER_DAY * 30)
|
|
||||||
#define MILLISECOND_PER_YEAR (MILLISECOND_PER_DAY * 365)
|
|
||||||
|
|
||||||
//@return timestamp in second
|
//@return timestamp in second
|
||||||
int32_t taosGetTimestampSec();
|
int32_t taosGetTimestampSec();
|
||||||
|
@ -63,8 +61,22 @@ static FORCE_INLINE int64_t taosGetTimestamp(int32_t precision) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t getTimestampInUsFromStr(char* token, int32_t tokenlen, int64_t* ts);
|
|
||||||
int32_t parseDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit);
|
typedef struct SInterval {
|
||||||
|
char intervalUnit;
|
||||||
|
char slidingUnit;
|
||||||
|
char offsetUnit;
|
||||||
|
int64_t interval;
|
||||||
|
int64_t sliding;
|
||||||
|
int64_t offset;
|
||||||
|
} SInterval;
|
||||||
|
|
||||||
|
int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision);
|
||||||
|
int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precision);
|
||||||
|
int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision);
|
||||||
|
|
||||||
|
int32_t parseAbsoluteDuration(char* token, int32_t tokenlen, int64_t* ts);
|
||||||
|
int32_t parseNatualDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit);
|
||||||
|
|
||||||
int32_t taosParseTime(char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t dayligth);
|
int32_t taosParseTime(char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t dayligth);
|
||||||
void deltaToUtcInitOnce();
|
void deltaToUtcInitOnce();
|
||||||
|
|
|
@ -321,7 +321,7 @@ int32_t parseLocaltimeWithDst(char* timestr, int64_t* time, int32_t timePrec) {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static int32_t getTimestampInUsFromStrImpl(int64_t val, char unit, int64_t* result) {
|
static int32_t getDurationInUs(int64_t val, char unit, int64_t* result) {
|
||||||
*result = val;
|
*result = val;
|
||||||
|
|
||||||
int64_t factor = 1000L;
|
int64_t factor = 1000L;
|
||||||
|
@ -342,19 +342,12 @@ static int32_t getTimestampInUsFromStrImpl(int64_t val, char unit, int64_t* resu
|
||||||
case 'w':
|
case 'w':
|
||||||
(*result) *= MILLISECOND_PER_WEEK*factor;
|
(*result) *= MILLISECOND_PER_WEEK*factor;
|
||||||
break;
|
break;
|
||||||
case 'n':
|
|
||||||
(*result) *= MILLISECOND_PER_MONTH*factor;
|
|
||||||
break;
|
|
||||||
case 'y':
|
|
||||||
(*result) *= MILLISECOND_PER_YEAR*factor;
|
|
||||||
break;
|
|
||||||
case 'a':
|
case 'a':
|
||||||
(*result) *= factor;
|
(*result) *= factor;
|
||||||
break;
|
break;
|
||||||
case 'u':
|
case 'u':
|
||||||
break;
|
break;
|
||||||
default: {
|
default: {
|
||||||
;
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -373,7 +366,7 @@ static int32_t getTimestampInUsFromStrImpl(int64_t val, char unit, int64_t* resu
|
||||||
* n - Months (30 days)
|
* n - Months (30 days)
|
||||||
* y - Years (365 days)
|
* y - Years (365 days)
|
||||||
*/
|
*/
|
||||||
int32_t getTimestampInUsFromStr(char* token, int32_t tokenlen, int64_t* ts) {
|
int32_t parseAbsoluteDuration(char* token, int32_t tokenlen, int64_t* duration) {
|
||||||
errno = 0;
|
errno = 0;
|
||||||
char* endPtr = NULL;
|
char* endPtr = NULL;
|
||||||
|
|
||||||
|
@ -383,10 +376,16 @@ int32_t getTimestampInUsFromStr(char* token, int32_t tokenlen, int64_t* ts) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
return getTimestampInUsFromStrImpl(timestamp, token[tokenlen - 1], ts);
|
/* natual month/year are not allowed in absolute duration */
|
||||||
|
char unit = token[tokenlen - 1];
|
||||||
|
if (unit == 'n' || unit == 'y') {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return getDurationInUs(timestamp, unit, duration);
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t parseDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit) {
|
int32_t parseNatualDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit) {
|
||||||
errno = 0;
|
errno = 0;
|
||||||
|
|
||||||
/* get the basic numeric value */
|
/* get the basic numeric value */
|
||||||
|
@ -400,7 +399,121 @@ int32_t parseDuration(const char* token, int32_t tokenLen, int64_t* duration, ch
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
return getTimestampInUsFromStrImpl(*duration, *unit, duration);
|
return getDurationInUs(*duration, *unit, duration);
|
||||||
|
}
|
||||||
|
|
||||||
|
int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision) {
|
||||||
|
if (duration == 0) {
|
||||||
|
return t;
|
||||||
|
}
|
||||||
|
if (unit == 'y') {
|
||||||
|
duration *= 12;
|
||||||
|
} else if (unit != 'n') {
|
||||||
|
return t + duration;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct tm tm;
|
||||||
|
time_t tt = (time_t)(t / TSDB_TICK_PER_SECOND(precision));
|
||||||
|
localtime_r(&tt, &tm);
|
||||||
|
int mon = tm.tm_year * 12 + tm.tm_mon + (int)duration;
|
||||||
|
tm.tm_year = mon / 12;
|
||||||
|
tm.tm_mon = mon % 12;
|
||||||
|
|
||||||
|
return (int64_t)(mktime(&tm) * TSDB_TICK_PER_SECOND(precision));
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision) {
|
||||||
|
if (ekey < skey) {
|
||||||
|
int64_t tmp = ekey;
|
||||||
|
ekey = skey;
|
||||||
|
skey = tmp;
|
||||||
|
}
|
||||||
|
if (unit != 'n' && unit != 'y') {
|
||||||
|
return (int32_t)((ekey - skey) / interval);
|
||||||
|
}
|
||||||
|
|
||||||
|
skey /= (int64_t)(TSDB_TICK_PER_SECOND(precision));
|
||||||
|
ekey /= (int64_t)(TSDB_TICK_PER_SECOND(precision));
|
||||||
|
|
||||||
|
struct tm tm;
|
||||||
|
time_t t = (time_t)skey;
|
||||||
|
localtime_r(&t, &tm);
|
||||||
|
int smon = tm.tm_year * 12 + tm.tm_mon;
|
||||||
|
|
||||||
|
t = (time_t)ekey;
|
||||||
|
localtime_r(&t, &tm);
|
||||||
|
int emon = tm.tm_year * 12 + tm.tm_mon;
|
||||||
|
|
||||||
|
if (unit == 'y') {
|
||||||
|
interval *= 12;
|
||||||
|
}
|
||||||
|
|
||||||
|
return (emon - smon) / (int32_t)interval;
|
||||||
|
}
|
||||||
|
|
||||||
|
int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precision) {
|
||||||
|
if (pInterval->sliding == 0) {
|
||||||
|
assert(pInterval->interval == 0);
|
||||||
|
return t;
|
||||||
|
}
|
||||||
|
|
||||||
|
int64_t start = t;
|
||||||
|
if (pInterval->slidingUnit == 'n' || pInterval->slidingUnit == 'y') {
|
||||||
|
start /= (int64_t)(TSDB_TICK_PER_SECOND(precision));
|
||||||
|
struct tm tm;
|
||||||
|
time_t tt = (time_t)start;
|
||||||
|
localtime_r(&tt, &tm);
|
||||||
|
tm.tm_sec = 0;
|
||||||
|
tm.tm_min = 0;
|
||||||
|
tm.tm_hour = 0;
|
||||||
|
tm.tm_mday = 1;
|
||||||
|
|
||||||
|
if (pInterval->slidingUnit == 'y') {
|
||||||
|
tm.tm_mon = 0;
|
||||||
|
tm.tm_year = (int)(tm.tm_year / pInterval->sliding * pInterval->sliding);
|
||||||
|
} else {
|
||||||
|
int mon = tm.tm_year * 12 + tm.tm_mon;
|
||||||
|
mon = (int)(mon / pInterval->sliding * pInterval->sliding);
|
||||||
|
tm.tm_year = mon / 12;
|
||||||
|
tm.tm_mon = mon % 12;
|
||||||
|
}
|
||||||
|
|
||||||
|
start = (int64_t)(mktime(&tm) * TSDB_TICK_PER_SECOND(precision));
|
||||||
|
} else {
|
||||||
|
int64_t delta = t - pInterval->interval;
|
||||||
|
int32_t factor = delta > 0 ? 1 : -1;
|
||||||
|
|
||||||
|
start = (delta / pInterval->sliding + factor) * pInterval->sliding;
|
||||||
|
|
||||||
|
if (pInterval->intervalUnit == 'd' || pInterval->intervalUnit == 'w') {
|
||||||
|
/*
|
||||||
|
* here we revised the start time of day according to the local time zone,
|
||||||
|
* but in case of DST, the start time of one day need to be dynamically decided.
|
||||||
|
*/
|
||||||
|
// todo refactor to extract function that is available for Linux/Windows/Mac platform
|
||||||
|
#if defined(WINDOWS) && _MSC_VER >= 1900
|
||||||
|
// see https://docs.microsoft.com/en-us/cpp/c-runtime-library/daylight-dstbias-timezone-and-tzname?view=vs-2019
|
||||||
|
int64_t timezone = _timezone;
|
||||||
|
int32_t daylight = _daylight;
|
||||||
|
char** tzname = _tzname;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
start += (int64_t)(timezone * TSDB_TICK_PER_SECOND(precision));
|
||||||
|
}
|
||||||
|
|
||||||
|
int64_t end = start + pInterval->interval - 1;
|
||||||
|
if (end < t) {
|
||||||
|
start += pInterval->sliding;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pInterval->offset > 0) {
|
||||||
|
start = taosTimeAdd(start, pInterval->offset, pInterval->offsetUnit, precision);
|
||||||
|
if (start > t) {
|
||||||
|
start = taosTimeAdd(start, -pInterval->interval, pInterval->intervalUnit, precision);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return start;
|
||||||
}
|
}
|
||||||
|
|
||||||
// internal function, when program is paused in debugger,
|
// internal function, when program is paused in debugger,
|
||||||
|
@ -411,24 +524,38 @@ int32_t parseDuration(const char* token, int32_t tokenLen, int64_t* duration, ch
|
||||||
// 2020-07-03 17:48:42
|
// 2020-07-03 17:48:42
|
||||||
// and the parameter can also be a variable.
|
// and the parameter can also be a variable.
|
||||||
const char* fmtts(int64_t ts) {
|
const char* fmtts(int64_t ts) {
|
||||||
static char buf[32];
|
static char buf[96];
|
||||||
|
size_t pos = 0;
|
||||||
|
struct tm tm;
|
||||||
|
|
||||||
time_t tt;
|
|
||||||
if (ts > -62135625943 && ts < 32503651200) {
|
if (ts > -62135625943 && ts < 32503651200) {
|
||||||
tt = ts;
|
time_t t = (time_t)ts;
|
||||||
} else if (ts > -62135625943000 && ts < 32503651200000) {
|
localtime_r(&t, &tm);
|
||||||
tt = ts / 1000;
|
pos += strftime(buf + pos, sizeof(buf), "s=%Y-%m-%d %H:%M:%S", &tm);
|
||||||
} else {
|
|
||||||
tt = ts / 1000000;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct tm* ptm = localtime(&tt);
|
if (ts > -62135625943000 && ts < 32503651200000) {
|
||||||
size_t pos = strftime(buf, sizeof(buf), "%Y-%m-%d %H:%M:%S", ptm);
|
time_t t = (time_t)(ts / 1000);
|
||||||
|
localtime_r(&t, &tm);
|
||||||
|
if (pos > 0) {
|
||||||
|
buf[pos++] = ' ';
|
||||||
|
buf[pos++] = '|';
|
||||||
|
buf[pos++] = ' ';
|
||||||
|
}
|
||||||
|
pos += strftime(buf + pos, sizeof(buf), "ms=%Y-%m-%d %H:%M:%S", &tm);
|
||||||
|
pos += sprintf(buf + pos, ".%03d", (int)(ts % 1000));
|
||||||
|
}
|
||||||
|
|
||||||
if (ts <= -62135625943000 || ts >= 32503651200000) {
|
{
|
||||||
sprintf(buf + pos, ".%06d", (int)(ts % 1000000));
|
time_t t = (time_t)(ts / 1000000);
|
||||||
} else if (ts <= -62135625943 || ts >= 32503651200) {
|
localtime_r(&t, &tm);
|
||||||
sprintf(buf + pos, ".%03d", (int)(ts % 1000));
|
if (pos > 0) {
|
||||||
|
buf[pos++] = ' ';
|
||||||
|
buf[pos++] = '|';
|
||||||
|
buf[pos++] = ' ';
|
||||||
|
}
|
||||||
|
pos += strftime(buf + pos, sizeof(buf), "us=%Y-%m-%d %H:%M:%S", &tm);
|
||||||
|
pos += sprintf(buf + pos, ".%06d", (int)(ts % 1000000));
|
||||||
}
|
}
|
||||||
|
|
||||||
return buf;
|
return buf;
|
||||||
|
|
|
@ -36,7 +36,7 @@
|
||||||
#define HTTP_BUFFER_SIZE 8388608
|
#define HTTP_BUFFER_SIZE 8388608
|
||||||
#define HTTP_STEP_SIZE 4096 //http message get process step by step
|
#define HTTP_STEP_SIZE 4096 //http message get process step by step
|
||||||
#define HTTP_METHOD_SCANNER_SIZE 7 //http method fp size
|
#define HTTP_METHOD_SCANNER_SIZE 7 //http method fp size
|
||||||
#define TSDB_CODE_HTTP_GC_TARGET_SIZE 512
|
#define HTTP_GC_TARGET_SIZE 512
|
||||||
#define HTTP_WRITE_RETRY_TIMES 500
|
#define HTTP_WRITE_RETRY_TIMES 500
|
||||||
#define HTTP_WRITE_WAIT_TIME_MS 5
|
#define HTTP_WRITE_WAIT_TIME_MS 5
|
||||||
#define HTTP_SESSION_ID_LEN (TSDB_USER_LEN + TSDB_PASSWORD_LEN)
|
#define HTTP_SESSION_ID_LEN (TSDB_USER_LEN + TSDB_PASSWORD_LEN)
|
||||||
|
|
|
@ -33,6 +33,6 @@ int32_t httpCurSqlCmdPos(HttpContext *pContext);
|
||||||
|
|
||||||
void httpTrimTableName(char *name);
|
void httpTrimTableName(char *name);
|
||||||
int32_t httpShrinkTableName(HttpContext *pContext, int32_t pos, char *name);
|
int32_t httpShrinkTableName(HttpContext *pContext, int32_t pos, char *name);
|
||||||
char *httpGetCmdsString(HttpContext *pContext, int32_t pos);
|
char * httpGetCmdsString(HttpContext *pContext, int32_t pos);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -131,8 +131,6 @@ HttpContext *httpCreateContext(int32_t fd) {
|
||||||
HttpContext *httpGetContext(void *ptr) {
|
HttpContext *httpGetContext(void *ptr) {
|
||||||
uint64_t handleVal = (uint64_t)ptr;
|
uint64_t handleVal = (uint64_t)ptr;
|
||||||
HttpContext **ppContext = taosCacheAcquireByKey(tsHttpServer.contextCache, &handleVal, sizeof(HttpContext *));
|
HttpContext **ppContext = taosCacheAcquireByKey(tsHttpServer.contextCache, &handleVal, sizeof(HttpContext *));
|
||||||
ASSERT(ppContext);
|
|
||||||
ASSERT(*ppContext);
|
|
||||||
|
|
||||||
if (ppContext) {
|
if (ppContext) {
|
||||||
HttpContext *pContext = *ppContext;
|
HttpContext *pContext = *ppContext;
|
||||||
|
@ -232,4 +230,5 @@ void httpCloseContextByServer(HttpContext *pContext) {
|
||||||
|
|
||||||
pContext->parsed = false;
|
pContext->parsed = false;
|
||||||
httpRemoveContextFromEpoll(pContext);
|
httpRemoveContextFromEpoll(pContext);
|
||||||
|
httpReleaseContext(pContext, true);
|
||||||
}
|
}
|
||||||
|
|
|
@ -228,7 +228,7 @@ bool gcProcessQueryRequest(HttpContext* pContext) {
|
||||||
cmd->values = refIdBuffer;
|
cmd->values = refIdBuffer;
|
||||||
cmd->table = aliasBuffer;
|
cmd->table = aliasBuffer;
|
||||||
cmd->numOfRows = 0; // hack way as target flags
|
cmd->numOfRows = 0; // hack way as target flags
|
||||||
cmd->timestamp = httpAddToSqlCmdBufferWithSize(pContext, TSDB_CODE_HTTP_GC_TARGET_SIZE + 1); // hack way
|
cmd->timestamp = httpAddToSqlCmdBufferWithSize(pContext, HTTP_GC_TARGET_SIZE + 1); // hack way
|
||||||
|
|
||||||
if (cmd->timestamp == -1) {
|
if (cmd->timestamp == -1) {
|
||||||
httpWarn("context:%p, fd:%d, user:%s, cant't malloc target size, sql buffer is full", pContext, pContext->fd,
|
httpWarn("context:%p, fd:%d, user:%s, cant't malloc target size, sql buffer is full", pContext, pContext->fd,
|
||||||
|
|
|
@ -129,48 +129,48 @@ bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result,
|
||||||
|
|
||||||
// for group by
|
// for group by
|
||||||
if (groupFields != -1) {
|
if (groupFields != -1) {
|
||||||
char target[TSDB_CODE_HTTP_GC_TARGET_SIZE] = {0};
|
char target[HTTP_GC_TARGET_SIZE] = {0};
|
||||||
int32_t len;
|
int32_t len;
|
||||||
len = snprintf(target,TSDB_CODE_HTTP_GC_TARGET_SIZE,"%s{",aliasBuffer);
|
len = snprintf(target,HTTP_GC_TARGET_SIZE,"%s{",aliasBuffer);
|
||||||
for (int32_t i = dataFields + 1; i<num_fields; i++){
|
for (int32_t i = dataFields + 1; i<num_fields; i++){
|
||||||
switch (fields[i].type) {
|
switch (fields[i].type) {
|
||||||
case TSDB_DATA_TYPE_BOOL:
|
case TSDB_DATA_TYPE_BOOL:
|
||||||
case TSDB_DATA_TYPE_TINYINT:
|
case TSDB_DATA_TYPE_TINYINT:
|
||||||
len += snprintf(target + len, TSDB_CODE_HTTP_GC_TARGET_SIZE - len, "%s:%d", fields[i].name, *((int8_t *)row[i]));
|
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:%d", fields[i].name, *((int8_t *)row[i]));
|
||||||
break;
|
break;
|
||||||
case TSDB_DATA_TYPE_SMALLINT:
|
case TSDB_DATA_TYPE_SMALLINT:
|
||||||
len += snprintf(target + len, TSDB_CODE_HTTP_GC_TARGET_SIZE - len, "%s:%d", fields[i].name, *((int16_t *)row[i]));
|
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:%d", fields[i].name, *((int16_t *)row[i]));
|
||||||
break;
|
break;
|
||||||
case TSDB_DATA_TYPE_INT:
|
case TSDB_DATA_TYPE_INT:
|
||||||
len += snprintf(target + len, TSDB_CODE_HTTP_GC_TARGET_SIZE - len, "%s:%d,", fields[i].name, *((int32_t *)row[i]));
|
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:%d,", fields[i].name, *((int32_t *)row[i]));
|
||||||
break;
|
break;
|
||||||
case TSDB_DATA_TYPE_BIGINT:
|
case TSDB_DATA_TYPE_BIGINT:
|
||||||
len += snprintf(target + len, TSDB_CODE_HTTP_GC_TARGET_SIZE - len, "%s:%ld", fields[i].name, *((int64_t *)row[i]));
|
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:%ld", fields[i].name, *((int64_t *)row[i]));
|
||||||
break;
|
break;
|
||||||
case TSDB_DATA_TYPE_FLOAT:
|
case TSDB_DATA_TYPE_FLOAT:
|
||||||
len += snprintf(target + len, TSDB_CODE_HTTP_GC_TARGET_SIZE - len, "%s:%.5f", fields[i].name, *((float *)row[i]));
|
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:%.5f", fields[i].name, *((float *)row[i]));
|
||||||
break;
|
break;
|
||||||
case TSDB_DATA_TYPE_DOUBLE:
|
case TSDB_DATA_TYPE_DOUBLE:
|
||||||
len += snprintf(target + len, TSDB_CODE_HTTP_GC_TARGET_SIZE - len, "%s:%.9f", fields[i].name, *((double *)row[i]));
|
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:%.9f", fields[i].name, *((double *)row[i]));
|
||||||
break;
|
break;
|
||||||
case TSDB_DATA_TYPE_BINARY:
|
case TSDB_DATA_TYPE_BINARY:
|
||||||
case TSDB_DATA_TYPE_NCHAR:
|
case TSDB_DATA_TYPE_NCHAR:
|
||||||
if (row[i]!= NULL){
|
if (row[i]!= NULL){
|
||||||
len += snprintf(target + len, TSDB_CODE_HTTP_GC_TARGET_SIZE - len, "%s:", fields[i].name);
|
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:", fields[i].name);
|
||||||
memcpy(target + len, (char *) row[i], length[i]);
|
memcpy(target + len, (char *) row[i], length[i]);
|
||||||
len = strlen(target);
|
len = strlen(target);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
len += snprintf(target + len, TSDB_CODE_HTTP_GC_TARGET_SIZE - len, "%s:%s", fields[i].name, "-");
|
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:%s", fields[i].name, "-");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if(i < num_fields - 1 ){
|
if(i < num_fields - 1 ){
|
||||||
len += snprintf(target + len, TSDB_CODE_HTTP_GC_TARGET_SIZE - len, ", ");
|
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, ", ");
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
len += snprintf(target + len, TSDB_CODE_HTTP_GC_TARGET_SIZE - len, "}");
|
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "}");
|
||||||
|
|
||||||
if (strcmp(target, targetBuffer) != 0) {
|
if (strcmp(target, targetBuffer) != 0) {
|
||||||
// first target not write this section
|
// first target not write this section
|
||||||
|
@ -180,7 +180,7 @@ bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result,
|
||||||
|
|
||||||
// start new target
|
// start new target
|
||||||
gcWriteTargetStartJson(jsonBuf, refIdBuffer, target);
|
gcWriteTargetStartJson(jsonBuf, refIdBuffer, target);
|
||||||
strncpy(targetBuffer, target, TSDB_CODE_HTTP_GC_TARGET_SIZE);
|
strncpy(targetBuffer, target, HTTP_GC_TARGET_SIZE);
|
||||||
}
|
}
|
||||||
} // end of group by
|
} // end of group by
|
||||||
|
|
||||||
|
|
|
@ -238,6 +238,7 @@ static int32_t httpOnParseHeaderField(HttpParser *parser, const char *key, const
|
||||||
httpTrace("context:%p, fd:%d, keepAlive:%d", pContext, pContext->fd, pContext->parser->keepAlive);
|
httpTrace("context:%p, fd:%d, keepAlive:%d", pContext, pContext->fd, pContext->parser->keepAlive);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if 0
|
||||||
else if (0 == strcasecmp(key, "Content-Encoding")) {
|
else if (0 == strcasecmp(key, "Content-Encoding")) {
|
||||||
if (0 == strcmp(val, "gzip")) {
|
if (0 == strcmp(val, "gzip")) {
|
||||||
parser->contentChunked = 1;
|
parser->contentChunked = 1;
|
||||||
|
@ -245,8 +246,9 @@ static int32_t httpOnParseHeaderField(HttpParser *parser, const char *key, const
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
else if (0 == strcasecmp(key, "Transfer-Encoding")) {
|
else if (0 == strcasecmp(key, "Transfer-Encoding") || 0 == strcasecmp(key, "Content-Encoding")) {
|
||||||
if (strstr(val, "gzip")) {
|
if (strstr(val, "gzip")) {
|
||||||
parser->transferGzip = 1;
|
parser->transferGzip = 1;
|
||||||
ehttp_gzip_conf_t conf = {0};
|
ehttp_gzip_conf_t conf = {0};
|
||||||
|
|
|
@ -87,15 +87,12 @@ bool restBuildSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result,
|
||||||
JsonBuf *jsonBuf = httpMallocJsonBuf(pContext);
|
JsonBuf *jsonBuf = httpMallocJsonBuf(pContext);
|
||||||
if (jsonBuf == NULL) return false;
|
if (jsonBuf == NULL) return false;
|
||||||
|
|
||||||
cmd->numOfRows += numOfRows;
|
|
||||||
|
|
||||||
int32_t num_fields = taos_num_fields(result);
|
int32_t num_fields = taos_num_fields(result);
|
||||||
TAOS_FIELD *fields = taos_fetch_fields(result);
|
TAOS_FIELD *fields = taos_fetch_fields(result);
|
||||||
|
|
||||||
for (int32_t k = 0; k < numOfRows; ++k) {
|
for (int32_t k = 0; k < numOfRows; ++k) {
|
||||||
TAOS_ROW row = taos_fetch_row(result);
|
TAOS_ROW row = taos_fetch_row(result);
|
||||||
if (row == NULL) {
|
if (row == NULL) {
|
||||||
cmd->numOfRows--;
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
int32_t* length = taos_fetch_lengths(result);
|
int32_t* length = taos_fetch_lengths(result);
|
||||||
|
@ -152,23 +149,22 @@ bool restBuildSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result,
|
||||||
|
|
||||||
// data row array end
|
// data row array end
|
||||||
httpJsonToken(jsonBuf, JsonArrEnd);
|
httpJsonToken(jsonBuf, JsonArrEnd);
|
||||||
|
cmd->numOfRows ++;
|
||||||
|
|
||||||
|
if (pContext->fd <= 0) {
|
||||||
|
httpError("context:%p, fd:%d, user:%s, conn closed, abort retrieve", pContext, pContext->fd, pContext->user);
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cmd->numOfRows >= tsRestRowLimit) {
|
if (cmd->numOfRows >= tsRestRowLimit) {
|
||||||
httpDebug("context:%p, fd:%d, user:%s, retrieve rows:%d larger than limit:%d, abort retrieve", pContext,
|
httpDebug("context:%p, fd:%d, user:%s, retrieve rows:%d larger than limit:%d, abort retrieve", pContext,
|
||||||
pContext->fd, pContext->user, cmd->numOfRows, tsRestRowLimit);
|
pContext->fd, pContext->user, cmd->numOfRows, tsRestRowLimit);
|
||||||
return false;
|
return false;
|
||||||
} else {
|
}
|
||||||
if (pContext->fd <= 0) {
|
}
|
||||||
httpError("context:%p, fd:%d, user:%s, connection is closed, abort retrieve", pContext, pContext->fd,
|
|
||||||
pContext->user);
|
httpDebug("context:%p, fd:%d, user:%s, retrieved row:%d", pContext, pContext->fd, pContext->user, cmd->numOfRows);
|
||||||
return false;
|
|
||||||
} else {
|
|
||||||
httpDebug("context:%p, fd:%d, user:%s, total rows:%d retrieved", pContext, pContext->fd, pContext->user,
|
|
||||||
cmd->numOfRows);
|
|
||||||
return true;
|
return true;
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool restBuildSqlTimestampJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, int32_t numOfRows) {
|
bool restBuildSqlTimestampJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, int32_t numOfRows) {
|
||||||
|
|
|
@ -308,19 +308,25 @@ static bool httpReadData(HttpContext *pContext) {
|
||||||
httpInitParser(pParser);
|
httpInitParser(pParser);
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(!pParser->parsed);
|
if (pParser->parsed) {
|
||||||
|
httpDebug("context:%p, fd:%d, not in ready state, parsed:%d", pContext, pContext->fd, pParser->parsed);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
pContext->accessTimes++;
|
pContext->accessTimes++;
|
||||||
pContext->lastAccessTime = taosGetTimestampSec();
|
pContext->lastAccessTime = taosGetTimestampSec();
|
||||||
|
|
||||||
char buf[HTTP_STEP_SIZE + 1] = {0};
|
char buf[HTTP_STEP_SIZE + 1] = {0};
|
||||||
int32_t nread = (int32_t)taosReadSocket(pContext->fd, buf, sizeof(buf));
|
|
||||||
|
while (1) {
|
||||||
|
int32_t nread = (int32_t)taosReadSocket(pContext->fd, buf, HTTP_STEP_SIZE);
|
||||||
if (nread > 0) {
|
if (nread > 0) {
|
||||||
buf[nread] = '\0';
|
buf[nread] = '\0';
|
||||||
httpTrace("context:%p, fd:%d, nread:%d", pContext, pContext->fd, nread);
|
httpTraceL("context:%p, fd:%d, nread:%d content:%s", pContext, pContext->fd, nread, buf);
|
||||||
int32_t ok = httpParseBuf(pParser, buf, nread);
|
int32_t ok = httpParseBuf(pParser, buf, nread);
|
||||||
|
|
||||||
if (ok) {
|
if (ok) {
|
||||||
httpError("context:%p, fd:%d, parse failed, ret:%d code:%d close connect", pContext, pContext->fd, ok, pParser->parseCode);
|
httpError("context:%p, fd:%d, parse failed, ret:%d code:%d close connect", pContext, pContext->fd, ok,
|
||||||
|
pParser->parseCode);
|
||||||
httpSendErrorResp(pContext, pParser->parseCode);
|
httpSendErrorResp(pContext, pParser->parseCode);
|
||||||
httpNotifyContextClose(pContext);
|
httpNotifyContextClose(pContext);
|
||||||
return false;
|
return false;
|
||||||
|
@ -334,10 +340,10 @@ static bool httpReadData(HttpContext *pContext) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!pParser->parsed) {
|
if (!pParser->parsed) {
|
||||||
httpTrace("context:%p, fd:%d, read not over yet, len:%d", pContext, pContext->fd, pParser->body.pos);
|
httpTrace("context:%p, fd:%d, read not finished", pContext, pContext->fd);
|
||||||
return false;
|
continue;
|
||||||
} else {
|
} else {
|
||||||
httpTraceL("context:%p, fd:%d, len:%d, body:%s", pContext, pContext->fd, pParser->body.pos, pParser->body.str);
|
httpDebug("context:%p, fd:%d, bodyLen:%d", pContext, pContext->fd, pParser->body.pos);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
} else if (nread < 0) {
|
} else if (nread < 0) {
|
||||||
|
@ -352,4 +358,5 @@ static bool httpReadData(HttpContext *pContext) {
|
||||||
httpError("context:%p, fd:%d, nread:%d, wait another event", pContext, pContext->fd, nread);
|
httpError("context:%p, fd:%d, nread:%d, wait another event", pContext, pContext->fd, nread);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -51,10 +51,6 @@ void httpProcessMultiSqlRetrieveCallBackImp(void *param, TAOS_RES *result, int n
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// if (tscResultsetFetchCompleted(result)) {
|
|
||||||
// isContinue = false;
|
|
||||||
// }
|
|
||||||
|
|
||||||
if (isContinue) {
|
if (isContinue) {
|
||||||
// retrieve next batch of rows
|
// retrieve next batch of rows
|
||||||
httpDebug("context:%p, fd:%d, user:%s, process pos:%d, continue retrieve, numOfRows:%d, sql:%s", pContext,
|
httpDebug("context:%p, fd:%d, user:%s, process pos:%d, continue retrieve, numOfRows:%d, sql:%s", pContext,
|
||||||
|
@ -224,14 +220,6 @@ void httpProcessSingleSqlRetrieveCallBackImp(void *param, TAOS_RES *result, int
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#if 0
|
|
||||||
// todo refactor
|
|
||||||
if (tscResultsetFetchCompleted(result)) {
|
|
||||||
httpDebug("context:%p, fd:%d, user:%s, resultset fetch completed", pContext, pContext->fd, pContext->user);
|
|
||||||
isContinue = false;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (isContinue) {
|
if (isContinue) {
|
||||||
// retrieve next batch of rows
|
// retrieve next batch of rows
|
||||||
httpDebug("context:%p, fd:%d, user:%s, continue retrieve, numOfRows:%d", pContext, pContext->fd, pContext->user,
|
httpDebug("context:%p, fd:%d, user:%s, continue retrieve, numOfRows:%d", pContext, pContext->fd, pContext->user,
|
||||||
|
|
|
@ -32,6 +32,7 @@ struct tExprNode;
|
||||||
struct SSchema;
|
struct SSchema;
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
|
TSQL_NODE_DUMMY = 0x0,
|
||||||
TSQL_NODE_EXPR = 0x1,
|
TSQL_NODE_EXPR = 0x1,
|
||||||
TSQL_NODE_COL = 0x2,
|
TSQL_NODE_COL = 0x2,
|
||||||
TSQL_NODE_VALUE = 0x4,
|
TSQL_NODE_VALUE = 0x4,
|
||||||
|
|
|
@ -57,7 +57,7 @@ typedef struct SWindowResult {
|
||||||
uint16_t numOfRows; // number of rows of current time window
|
uint16_t numOfRows; // number of rows of current time window
|
||||||
bool closed; // this result status: closed or opened
|
bool closed; // this result status: closed or opened
|
||||||
SResultInfo* resultInfo; // For each result column, there is a resultInfo
|
SResultInfo* resultInfo; // For each result column, there is a resultInfo
|
||||||
TSKEY skey; // start key of current time window
|
union {STimeWindow win; char* key;}; // start key of current time window
|
||||||
} SWindowResult;
|
} SWindowResult;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -132,12 +132,9 @@ typedef struct SQueryCostInfo {
|
||||||
typedef struct SQuery {
|
typedef struct SQuery {
|
||||||
int16_t numOfCols;
|
int16_t numOfCols;
|
||||||
int16_t numOfTags;
|
int16_t numOfTags;
|
||||||
char intervalTimeUnit;
|
|
||||||
char slidingTimeUnit; // interval data type, used for daytime revise
|
|
||||||
SOrderVal order;
|
SOrderVal order;
|
||||||
STimeWindow window;
|
STimeWindow window;
|
||||||
int64_t intervalTime;
|
SInterval interval;
|
||||||
int64_t slidingTime; // sliding time for sliding window query
|
|
||||||
int16_t precision;
|
int16_t precision;
|
||||||
int16_t numOfOutput;
|
int16_t numOfOutput;
|
||||||
int16_t fillType;
|
int16_t fillType;
|
||||||
|
|
|
@ -51,12 +51,11 @@ typedef struct SFillInfo {
|
||||||
int32_t rowSize; // size of each row
|
int32_t rowSize; // size of each row
|
||||||
// char ** pTags; // tags value for current interpolation
|
// char ** pTags; // tags value for current interpolation
|
||||||
SFillTagColInfo* pTags; // tags value for filling gap
|
SFillTagColInfo* pTags; // tags value for filling gap
|
||||||
int64_t slidingTime; // sliding value to determine the number of result for a given time window
|
SInterval interval;
|
||||||
char * prevValues; // previous row of data, to generate the interpolation results
|
char * prevValues; // previous row of data, to generate the interpolation results
|
||||||
char * nextValues; // next row of data
|
char * nextValues; // next row of data
|
||||||
char** pData; // original result data block involved in filling data
|
char** pData; // original result data block involved in filling data
|
||||||
int32_t capacityInRows; // data buffer size in rows
|
int32_t capacityInRows; // data buffer size in rows
|
||||||
int8_t slidingUnit; // sliding time unit
|
|
||||||
int8_t precision; // time resoluation
|
int8_t precision; // time resoluation
|
||||||
SFillColInfo* pFillCol; // column info for fill operations
|
SFillColInfo* pFillCol; // column info for fill operations
|
||||||
} SFillInfo;
|
} SFillInfo;
|
||||||
|
|
|
@ -64,11 +64,11 @@ typedef struct tMemBucket {
|
||||||
__perc_hash_func_t hashFunc;
|
__perc_hash_func_t hashFunc;
|
||||||
} tMemBucket;
|
} tMemBucket;
|
||||||
|
|
||||||
tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType);
|
tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType, double minval, double maxval);
|
||||||
|
|
||||||
void tMemBucketDestroy(tMemBucket *pBucket);
|
void tMemBucketDestroy(tMemBucket *pBucket);
|
||||||
|
|
||||||
void tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size);
|
int32_t tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size);
|
||||||
|
|
||||||
double getPercentile(tMemBucket *pMemBucket, double percent);
|
double getPercentile(tMemBucket *pMemBucket, double percent);
|
||||||
|
|
||||||
|
|
|
@ -77,8 +77,7 @@ typedef struct SDiskbasedResultBuf {
|
||||||
SResultBufStatis statis;
|
SResultBufStatis statis;
|
||||||
} SDiskbasedResultBuf;
|
} SDiskbasedResultBuf;
|
||||||
|
|
||||||
#define DEFAULT_INTERN_BUF_PAGE_SIZE (4096L)
|
#define DEFAULT_INTERN_BUF_PAGE_SIZE (256L) // in bytes
|
||||||
#define DEFAULT_INMEM_BUF_PAGES 10
|
|
||||||
#define PAGE_INFO_INITIALIZER (SPageDiskInfo){-1, -1}
|
#define PAGE_INFO_INITIALIZER (SPageDiskInfo){-1, -1}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -65,6 +65,11 @@ typedef struct tVariantList {
|
||||||
tVariantListItem *a; /* One entry for each expression */
|
tVariantListItem *a; /* One entry for each expression */
|
||||||
} tVariantList;
|
} tVariantList;
|
||||||
|
|
||||||
|
typedef struct SIntervalVal {
|
||||||
|
SStrToken interval;
|
||||||
|
SStrToken offset;
|
||||||
|
} SIntervalVal;
|
||||||
|
|
||||||
typedef struct SQuerySQL {
|
typedef struct SQuerySQL {
|
||||||
struct tSQLExprList *pSelection; // select clause
|
struct tSQLExprList *pSelection; // select clause
|
||||||
tVariantList * from; // from clause
|
tVariantList * from; // from clause
|
||||||
|
@ -72,6 +77,7 @@ typedef struct SQuerySQL {
|
||||||
tVariantList * pGroupby; // groupby clause, only for tags[optional]
|
tVariantList * pGroupby; // groupby clause, only for tags[optional]
|
||||||
tVariantList * pSortOrder; // orderby [optional]
|
tVariantList * pSortOrder; // orderby [optional]
|
||||||
SStrToken interval; // interval [optional]
|
SStrToken interval; // interval [optional]
|
||||||
|
SStrToken offset; // offset window [optional]
|
||||||
SStrToken sliding; // sliding window [optional]
|
SStrToken sliding; // sliding window [optional]
|
||||||
SLimitVal limit; // limit offset [optional]
|
SLimitVal limit; // limit offset [optional]
|
||||||
SLimitVal slimit; // group limit offset [optional]
|
SLimitVal slimit; // group limit offset [optional]
|
||||||
|
@ -259,7 +265,7 @@ tSQLExprList *tSQLExprListAppend(tSQLExprList *pList, tSQLExpr *pNode, SStrToken
|
||||||
void tSQLExprListDestroy(tSQLExprList *pList);
|
void tSQLExprListDestroy(tSQLExprList *pList);
|
||||||
|
|
||||||
SQuerySQL *tSetQuerySQLElems(SStrToken *pSelectToken, tSQLExprList *pSelection, tVariantList *pFrom, tSQLExpr *pWhere,
|
SQuerySQL *tSetQuerySQLElems(SStrToken *pSelectToken, tSQLExprList *pSelection, tVariantList *pFrom, tSQLExpr *pWhere,
|
||||||
tVariantList *pGroupby, tVariantList *pSortOrder, SStrToken *pInterval,
|
tVariantList *pGroupby, tVariantList *pSortOrder, SIntervalVal *pInterval,
|
||||||
SStrToken *pSliding, tVariantList *pFill, SLimitVal *pLimit, SLimitVal *pGLimit);
|
SStrToken *pSliding, tVariantList *pFill, SLimitVal *pLimit, SLimitVal *pGLimit);
|
||||||
|
|
||||||
SCreateTableSQL *tSetCreateSQLElems(tFieldList *pCols, tFieldList *pTags, SStrToken *pMetricName,
|
SCreateTableSQL *tSetCreateSQLElems(tFieldList *pCols, tFieldList *pTags, SStrToken *pMetricName,
|
||||||
|
|
|
@ -39,7 +39,6 @@ static FORCE_INLINE SWindowResult *getWindowResult(SWindowResInfo *pWindowResInf
|
||||||
}
|
}
|
||||||
|
|
||||||
#define curTimeWindowIndex(_winres) ((_winres)->curIndex)
|
#define curTimeWindowIndex(_winres) ((_winres)->curIndex)
|
||||||
#define GET_TIMEWINDOW(_winresInfo, _win) (STimeWindow) {(_win)->skey, ((_win)->skey + (_winresInfo)->interval - 1)}
|
|
||||||
#define GET_ROW_PARAM_FOR_MULTIOUTPUT(_q, tbq, sq) (((tbq) && (!sq))? (_q)->pSelectExpr[1].base.arg->argValue.i64:1)
|
#define GET_ROW_PARAM_FOR_MULTIOUTPUT(_q, tbq, sq) (((tbq) && (!sq))? (_q)->pSelectExpr[1].base.arg->argValue.i64:1)
|
||||||
|
|
||||||
bool isWindowResClosed(SWindowResInfo *pWindowResInfo, int32_t slot);
|
bool isWindowResClosed(SWindowResInfo *pWindowResInfo, int32_t slot);
|
||||||
|
@ -51,13 +50,15 @@ static FORCE_INLINE char *getPosInResultPage(SQueryRuntimeEnv *pRuntimeEnv, int3
|
||||||
assert(pResult != NULL && pRuntimeEnv != NULL);
|
assert(pResult != NULL && pRuntimeEnv != NULL);
|
||||||
|
|
||||||
SQuery *pQuery = pRuntimeEnv->pQuery;
|
SQuery *pQuery = pRuntimeEnv->pQuery;
|
||||||
// tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, pResult->pos.pageId);
|
|
||||||
|
|
||||||
int32_t realRowId = (int32_t)(pResult->pos.rowId * GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, pRuntimeEnv->topBotQuery, pRuntimeEnv->stableQuery));
|
int32_t realRowId = (int32_t)(pResult->pos.rowId * GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, pRuntimeEnv->topBotQuery, pRuntimeEnv->stableQuery));
|
||||||
return ((char *)page->data) + pRuntimeEnv->offset[columnIndex] * pRuntimeEnv->numOfRowsPerPage +
|
return ((char *)page->data) + pRuntimeEnv->offset[columnIndex] * pRuntimeEnv->numOfRowsPerPage +
|
||||||
pQuery->pSelectExpr[columnIndex].bytes * realRowId;
|
pQuery->pSelectExpr[columnIndex].bytes * realRowId;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool isNull_filter(SColumnFilterElem *pFilter, char* minval, char* maxval);
|
||||||
|
bool notNull_filter(SColumnFilterElem *pFilter, char* minval, char* maxval);
|
||||||
|
|
||||||
__filter_func_t *getRangeFilterFuncArray(int32_t type);
|
__filter_func_t *getRangeFilterFuncArray(int32_t type);
|
||||||
__filter_func_t *getValueFilterFuncArray(int32_t type);
|
__filter_func_t *getValueFilterFuncArray(int32_t type);
|
||||||
|
|
||||||
|
|
|
@ -458,9 +458,10 @@ tablelist(A) ::= tablelist(Y) COMMA ids(X) cpxName(Z) ids(F). {
|
||||||
%type tmvar {SStrToken}
|
%type tmvar {SStrToken}
|
||||||
tmvar(A) ::= VARIABLE(X). {A = X;}
|
tmvar(A) ::= VARIABLE(X). {A = X;}
|
||||||
|
|
||||||
%type interval_opt {SStrToken}
|
%type interval_opt {SIntervalVal}
|
||||||
interval_opt(N) ::= INTERVAL LP tmvar(E) RP. {N = E; }
|
interval_opt(N) ::= INTERVAL LP tmvar(E) RP. {N.interval = E; N.offset.n = 0; N.offset.z = NULL; N.offset.type = 0;}
|
||||||
interval_opt(N) ::= . {N.n = 0; N.z = NULL; N.type = 0; }
|
interval_opt(N) ::= INTERVAL LP tmvar(E) COMMA tmvar(O) RP. {N.interval = E; N.offset = O;}
|
||||||
|
interval_opt(N) ::= . {memset(&N, 0, sizeof(N));}
|
||||||
|
|
||||||
%type fill_opt {tVariantList*}
|
%type fill_opt {tVariantList*}
|
||||||
%destructor fill_opt {tVariantListDestroy($$);}
|
%destructor fill_opt {tVariantListDestroy($$);}
|
||||||
|
@ -583,21 +584,18 @@ expr(A) ::= STRING(X). {A = tSQLExprIdValueCreate(&X, TK_STRING);}
|
||||||
expr(A) ::= NOW(X). {A = tSQLExprIdValueCreate(&X, TK_NOW); }
|
expr(A) ::= NOW(X). {A = tSQLExprIdValueCreate(&X, TK_NOW); }
|
||||||
expr(A) ::= VARIABLE(X). {A = tSQLExprIdValueCreate(&X, TK_VARIABLE);}
|
expr(A) ::= VARIABLE(X). {A = tSQLExprIdValueCreate(&X, TK_VARIABLE);}
|
||||||
expr(A) ::= BOOL(X). {A = tSQLExprIdValueCreate(&X, TK_BOOL);}
|
expr(A) ::= BOOL(X). {A = tSQLExprIdValueCreate(&X, TK_BOOL);}
|
||||||
// normal functions: min(x)
|
|
||||||
expr(A) ::= ID(X) LP exprlist(Y) RP(E). {
|
|
||||||
A = tSQLExprCreateFunction(Y, &X, &E, X.type);
|
|
||||||
}
|
|
||||||
|
|
||||||
// this is for: count(*)/first(*)/last(*) operation
|
// ordinary functions: min(x), max(x), top(k, 20)
|
||||||
expr(A) ::= ID(X) LP STAR RP(Y). {
|
expr(A) ::= ID(X) LP exprlist(Y) RP(E). { A = tSQLExprCreateFunction(Y, &X, &E, X.type); }
|
||||||
A = tSQLExprCreateFunction(NULL, &X, &Y, X.type);
|
|
||||||
}
|
|
||||||
|
|
||||||
//binary expression: a+2, b+3
|
// for parsing sql functions with wildcard for parameters. e.g., count(*)/first(*)/last(*) operation
|
||||||
expr(A) ::= expr(X) AND expr(Y). {A = tSQLExprCreate(X, Y, TK_AND);}
|
expr(A) ::= ID(X) LP STAR RP(Y). { A = tSQLExprCreateFunction(NULL, &X, &Y, X.type); }
|
||||||
expr(A) ::= expr(X) OR expr(Y). {A = tSQLExprCreate(X, Y, TK_OR); }
|
|
||||||
|
|
||||||
//binary relational expression
|
// is (not) null expression
|
||||||
|
expr(A) ::= expr(X) IS NULL. {A = tSQLExprCreate(X, NULL, TK_ISNULL);}
|
||||||
|
expr(A) ::= expr(X) IS NOT NULL. {A = tSQLExprCreate(X, NULL, TK_NOTNULL);}
|
||||||
|
|
||||||
|
// relational expression
|
||||||
expr(A) ::= expr(X) LT expr(Y). {A = tSQLExprCreate(X, Y, TK_LT);}
|
expr(A) ::= expr(X) LT expr(Y). {A = tSQLExprCreate(X, Y, TK_LT);}
|
||||||
expr(A) ::= expr(X) GT expr(Y). {A = tSQLExprCreate(X, Y, TK_GT);}
|
expr(A) ::= expr(X) GT expr(Y). {A = tSQLExprCreate(X, Y, TK_GT);}
|
||||||
expr(A) ::= expr(X) LE expr(Y). {A = tSQLExprCreate(X, Y, TK_LE);}
|
expr(A) ::= expr(X) LE expr(Y). {A = tSQLExprCreate(X, Y, TK_LE);}
|
||||||
|
@ -605,14 +603,17 @@ expr(A) ::= expr(X) GE expr(Y). {A = tSQLExprCreate(X, Y, TK_GE);}
|
||||||
expr(A) ::= expr(X) NE expr(Y). {A = tSQLExprCreate(X, Y, TK_NE);}
|
expr(A) ::= expr(X) NE expr(Y). {A = tSQLExprCreate(X, Y, TK_NE);}
|
||||||
expr(A) ::= expr(X) EQ expr(Y). {A = tSQLExprCreate(X, Y, TK_EQ);}
|
expr(A) ::= expr(X) EQ expr(Y). {A = tSQLExprCreate(X, Y, TK_EQ);}
|
||||||
|
|
||||||
//binary arithmetic expression
|
expr(A) ::= expr(X) AND expr(Y). {A = tSQLExprCreate(X, Y, TK_AND);}
|
||||||
|
expr(A) ::= expr(X) OR expr(Y). {A = tSQLExprCreate(X, Y, TK_OR); }
|
||||||
|
|
||||||
|
// binary arithmetic expression
|
||||||
expr(A) ::= expr(X) PLUS expr(Y). {A = tSQLExprCreate(X, Y, TK_PLUS); }
|
expr(A) ::= expr(X) PLUS expr(Y). {A = tSQLExprCreate(X, Y, TK_PLUS); }
|
||||||
expr(A) ::= expr(X) MINUS expr(Y). {A = tSQLExprCreate(X, Y, TK_MINUS); }
|
expr(A) ::= expr(X) MINUS expr(Y). {A = tSQLExprCreate(X, Y, TK_MINUS); }
|
||||||
expr(A) ::= expr(X) STAR expr(Y). {A = tSQLExprCreate(X, Y, TK_STAR); }
|
expr(A) ::= expr(X) STAR expr(Y). {A = tSQLExprCreate(X, Y, TK_STAR); }
|
||||||
expr(A) ::= expr(X) SLASH expr(Y). {A = tSQLExprCreate(X, Y, TK_DIVIDE);}
|
expr(A) ::= expr(X) SLASH expr(Y). {A = tSQLExprCreate(X, Y, TK_DIVIDE);}
|
||||||
expr(A) ::= expr(X) REM expr(Y). {A = tSQLExprCreate(X, Y, TK_REM); }
|
expr(A) ::= expr(X) REM expr(Y). {A = tSQLExprCreate(X, Y, TK_REM); }
|
||||||
|
|
||||||
//like expression
|
// like expression
|
||||||
expr(A) ::= expr(X) LIKE expr(Y). {A = tSQLExprCreate(X, Y, TK_LIKE); }
|
expr(A) ::= expr(X) LIKE expr(Y). {A = tSQLExprCreate(X, Y, TK_LIKE); }
|
||||||
|
|
||||||
//in expression
|
//in expression
|
||||||
|
|
|
@ -108,7 +108,7 @@ extern "C" {
|
||||||
|
|
||||||
#define GET_FORWARD_DIRECTION_FACTOR(ord) (((ord) == TSDB_ORDER_ASC) ? QUERY_ASC_FORWARD_STEP : QUERY_DESC_FORWARD_STEP)
|
#define GET_FORWARD_DIRECTION_FACTOR(ord) (((ord) == TSDB_ORDER_ASC) ? QUERY_ASC_FORWARD_STEP : QUERY_DESC_FORWARD_STEP)
|
||||||
|
|
||||||
#define MAX_INTERVAL_TIME_WINDOW 10000000
|
#define MAX_INTERVAL_TIME_WINDOW 1000000 // maximum allowed time windows in final results
|
||||||
#define TOP_BOTTOM_QUERY_LIMIT 100
|
#define TOP_BOTTOM_QUERY_LIMIT 100
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
|
|
|
@ -188,6 +188,10 @@ uint8_t getBinaryExprOptr(SStrToken *pToken) {
|
||||||
return TSDB_BINARY_OP_REMAINDER;
|
return TSDB_BINARY_OP_REMAINDER;
|
||||||
case TK_LIKE:
|
case TK_LIKE:
|
||||||
return TSDB_RELATION_LIKE;
|
return TSDB_RELATION_LIKE;
|
||||||
|
case TK_ISNULL:
|
||||||
|
return TSDB_RELATION_ISNULL;
|
||||||
|
case TK_NOTNULL:
|
||||||
|
return TSDB_RELATION_NOTNULL;
|
||||||
default: { return 0; }
|
default: { return 0; }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -489,8 +493,8 @@ static void tQueryIndexColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SArr
|
||||||
bool comp = true;
|
bool comp = true;
|
||||||
int32_t ret = 0;
|
int32_t ret = 0;
|
||||||
|
|
||||||
while(tSkipListIterNext(iter)) {
|
while (tSkipListIterNext(iter)) {
|
||||||
SSkipListNode* pNode = tSkipListIterGet(iter);
|
SSkipListNode *pNode = tSkipListIterGet(iter);
|
||||||
|
|
||||||
if (comp) {
|
if (comp) {
|
||||||
ret = pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.end->v);
|
ret = pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.end->v);
|
||||||
|
@ -500,11 +504,24 @@ static void tQueryIndexColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SArr
|
||||||
if (ret == 0 && optr == TSDB_RELATION_LESS) {
|
if (ret == 0 && optr == TSDB_RELATION_LESS) {
|
||||||
continue;
|
continue;
|
||||||
} else {
|
} else {
|
||||||
STableKeyInfo info = {.pTable = *(void**)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL};
|
STableKeyInfo info = {.pTable = *(void **)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL};
|
||||||
taosArrayPush(result, &info);
|
taosArrayPush(result, &info);
|
||||||
comp = false; // no need to compare anymore
|
comp = false; // no need to compare anymore
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
assert(pQueryInfo->optr == TSDB_RELATION_ISNULL || pQueryInfo->optr == TSDB_RELATION_NOTNULL);
|
||||||
|
|
||||||
|
while (tSkipListIterNext(iter)) {
|
||||||
|
SSkipListNode *pNode = tSkipListIterGet(iter);
|
||||||
|
|
||||||
|
bool isnull = isNull(SL_GET_NODE_KEY(pSkipList, pNode), pQueryInfo->sch.type);
|
||||||
|
if ((pQueryInfo->optr == TSDB_RELATION_ISNULL && isnull) ||
|
||||||
|
(pQueryInfo->optr == TSDB_RELATION_NOTNULL && (!isnull))) {
|
||||||
|
STableKeyInfo info = {.pTable = *(void **)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL};
|
||||||
|
taosArrayPush(result, &info);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -683,6 +700,7 @@ static void tQueryIndexlessColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo,
|
||||||
char * pData = SL_GET_NODE_DATA(pNode);
|
char * pData = SL_GET_NODE_DATA(pNode);
|
||||||
|
|
||||||
tstr *name = (tstr*) tsdbGetTableName(*(void**) pData);
|
tstr *name = (tstr*) tsdbGetTableName(*(void**) pData);
|
||||||
|
|
||||||
// todo speed up by using hash
|
// todo speed up by using hash
|
||||||
if (pQueryInfo->sch.colId == TSDB_TBNAME_COLUMN_INDEX) {
|
if (pQueryInfo->sch.colId == TSDB_TBNAME_COLUMN_INDEX) {
|
||||||
if (pQueryInfo->optr == TSDB_RELATION_IN) {
|
if (pQueryInfo->optr == TSDB_RELATION_IN) {
|
||||||
|
@ -714,7 +732,7 @@ void tExprTreeTraverse(tExprNode *pExpr, SSkipList *pSkipList, SArray *result, S
|
||||||
|
|
||||||
// column project
|
// column project
|
||||||
if (pLeft->nodeType != TSQL_NODE_EXPR && pRight->nodeType != TSQL_NODE_EXPR) {
|
if (pLeft->nodeType != TSQL_NODE_EXPR && pRight->nodeType != TSQL_NODE_EXPR) {
|
||||||
assert(pLeft->nodeType == TSQL_NODE_COL && pRight->nodeType == TSQL_NODE_VALUE);
|
assert(pLeft->nodeType == TSQL_NODE_COL && (pRight->nodeType == TSQL_NODE_VALUE || pRight->nodeType == TSQL_NODE_DUMMY));
|
||||||
|
|
||||||
param->setupInfoFn(pExpr, param->pExtInfo);
|
param->setupInfoFn(pExpr, param->pExtInfo);
|
||||||
if (pSkipList == NULL) {
|
if (pSkipList == NULL) {
|
||||||
|
|
|
@ -131,21 +131,21 @@ static UNUSED_FUNC void* u_realloc(void* p, size_t __size) {
|
||||||
static void setQueryStatus(SQuery *pQuery, int8_t status);
|
static void setQueryStatus(SQuery *pQuery, int8_t status);
|
||||||
static void finalizeQueryResult(SQueryRuntimeEnv *pRuntimeEnv);
|
static void finalizeQueryResult(SQueryRuntimeEnv *pRuntimeEnv);
|
||||||
|
|
||||||
#define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->intervalTime > 0)
|
#define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->interval.interval > 0)
|
||||||
|
|
||||||
static void getNextTimeWindow(SQuery* pQuery, STimeWindow* tw) {
|
static void getNextTimeWindow(SQuery* pQuery, STimeWindow* tw) {
|
||||||
int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order);
|
int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order);
|
||||||
if (pQuery->intervalTimeUnit != 'n' && pQuery->intervalTimeUnit != 'y') {
|
if (pQuery->interval.intervalUnit != 'n' && pQuery->interval.intervalUnit != 'y') {
|
||||||
tw->skey += pQuery->slidingTime * factor;
|
tw->skey += pQuery->interval.sliding * factor;
|
||||||
tw->ekey = tw->skey + pQuery->intervalTime - 1;
|
tw->ekey = tw->skey + pQuery->interval.interval - 1;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t key = tw->skey / 1000, interval = pQuery->intervalTime;
|
int64_t key = tw->skey / 1000, interval = pQuery->interval.interval;
|
||||||
if (pQuery->precision == TSDB_TIME_PRECISION_MICRO) {
|
if (pQuery->precision == TSDB_TIME_PRECISION_MICRO) {
|
||||||
key /= 1000;
|
key /= 1000;
|
||||||
}
|
}
|
||||||
if (pQuery->intervalTimeUnit == 'y') {
|
if (pQuery->interval.intervalUnit == 'y') {
|
||||||
interval *= 12;
|
interval *= 12;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -187,7 +187,7 @@ static void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, void* inputData,
|
||||||
SDataStatis *pStatis, void *param, int32_t colIndex);
|
SDataStatis *pStatis, void *param, int32_t colIndex);
|
||||||
|
|
||||||
static void initCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv);
|
static void initCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv);
|
||||||
static void destroyTableQueryInfo(STableQueryInfo *pTableQueryInfo);
|
static void destroyTableQueryInfoImpl(STableQueryInfo *pTableQueryInfo);
|
||||||
static void resetCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv);
|
static void resetCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv);
|
||||||
static bool hasMainOutput(SQuery *pQuery);
|
static bool hasMainOutput(SQuery *pQuery);
|
||||||
static void buildTagQueryResult(SQInfo *pQInfo);
|
static void buildTagQueryResult(SQInfo *pQInfo);
|
||||||
|
@ -200,14 +200,28 @@ bool doFilterData(SQuery *pQuery, int32_t elemPos) {
|
||||||
SSingleColumnFilterInfo *pFilterInfo = &pQuery->pFilterInfo[k];
|
SSingleColumnFilterInfo *pFilterInfo = &pQuery->pFilterInfo[k];
|
||||||
|
|
||||||
char *pElem = (char*)pFilterInfo->pData + pFilterInfo->info.bytes * elemPos;
|
char *pElem = (char*)pFilterInfo->pData + pFilterInfo->info.bytes * elemPos;
|
||||||
if (isNull(pElem, pFilterInfo->info.type)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool qualified = false;
|
bool qualified = false;
|
||||||
for (int32_t j = 0; j < pFilterInfo->numOfFilters; ++j) {
|
for (int32_t j = 0; j < pFilterInfo->numOfFilters; ++j) {
|
||||||
SColumnFilterElem *pFilterElem = &pFilterInfo->pFilters[j];
|
SColumnFilterElem *pFilterElem = &pFilterInfo->pFilters[j];
|
||||||
|
|
||||||
|
bool isnull = isNull(pElem, pFilterInfo->info.type);
|
||||||
|
if (isnull) {
|
||||||
|
if (pFilterElem->fp == isNull_filter) {
|
||||||
|
qualified = true;
|
||||||
|
break;
|
||||||
|
} else {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (pFilterElem->fp == notNull_filter) {
|
||||||
|
qualified = true;
|
||||||
|
break;
|
||||||
|
} else if (pFilterElem->fp == isNull_filter) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (pFilterElem->fp(pFilterElem, pElem, pElem)) {
|
if (pFilterElem->fp(pFilterElem, pElem, pElem)) {
|
||||||
qualified = true;
|
qualified = true;
|
||||||
break;
|
break;
|
||||||
|
@ -496,35 +510,35 @@ static STimeWindow getActiveTimeWindow(SWindowResInfo *pWindowResInfo, int64_t t
|
||||||
|
|
||||||
if (pWindowResInfo->curIndex == -1) { // the first window, from the previous stored value
|
if (pWindowResInfo->curIndex == -1) { // the first window, from the previous stored value
|
||||||
w.skey = pWindowResInfo->prevSKey;
|
w.skey = pWindowResInfo->prevSKey;
|
||||||
if (pQuery->intervalTimeUnit == 'n' || pQuery->intervalTimeUnit == 'y') {
|
if (pQuery->interval.intervalUnit == 'n' || pQuery->interval.intervalUnit == 'y') {
|
||||||
w.ekey = taosAddNatualInterval(w.skey, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision) - 1;
|
w.ekey = taosTimeAdd(w.skey, pQuery->interval.interval, pQuery->interval.intervalUnit, pQuery->precision);
|
||||||
} else {
|
} else {
|
||||||
w.ekey = w.skey + pQuery->intervalTime - 1;
|
w.ekey = w.skey + pQuery->interval.interval - 1;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
int32_t slot = curTimeWindowIndex(pWindowResInfo);
|
int32_t slot = curTimeWindowIndex(pWindowResInfo);
|
||||||
SWindowResult* pWindowRes = getWindowResult(pWindowResInfo, slot);
|
SWindowResult* pWindowRes = getWindowResult(pWindowResInfo, slot);
|
||||||
w = GET_TIMEWINDOW(pWindowResInfo, pWindowRes);
|
w = pWindowRes->win;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (w.skey > ts || w.ekey < ts) {
|
if (w.skey > ts || w.ekey < ts) {
|
||||||
if (pQuery->intervalTimeUnit == 'n' || pQuery->intervalTimeUnit == 'y') {
|
if (pQuery->interval.intervalUnit == 'n' || pQuery->interval.intervalUnit == 'y') {
|
||||||
w.skey = taosGetIntervalStartTimestamp(ts, pQuery->slidingTime, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision);
|
w.skey = taosTimeTruncate(ts, &pQuery->interval, pQuery->precision);
|
||||||
w.ekey = taosAddNatualInterval(w.skey, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision) - 1;
|
w.ekey = taosTimeAdd(w.skey, pQuery->interval.interval, pQuery->interval.intervalUnit, pQuery->precision) - 1;
|
||||||
} else {
|
} else {
|
||||||
int64_t st = w.skey;
|
int64_t st = w.skey;
|
||||||
|
|
||||||
if (st > ts) {
|
if (st > ts) {
|
||||||
st -= ((st - ts + pQuery->slidingTime - 1) / pQuery->slidingTime) * pQuery->slidingTime;
|
st -= ((st - ts + pQuery->interval.sliding - 1) / pQuery->interval.sliding) * pQuery->interval.sliding;
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t et = st + pQuery->intervalTime - 1;
|
int64_t et = st + pQuery->interval.interval - 1;
|
||||||
if (et < ts) {
|
if (et < ts) {
|
||||||
st += ((ts - et + pQuery->slidingTime - 1) / pQuery->slidingTime) * pQuery->slidingTime;
|
st += ((ts - et + pQuery->interval.sliding - 1) / pQuery->interval.sliding) * pQuery->interval.sliding;
|
||||||
}
|
}
|
||||||
|
|
||||||
w.skey = st;
|
w.skey = st;
|
||||||
w.ekey = w.skey + pQuery->intervalTime - 1;
|
w.ekey = w.skey + pQuery->interval.interval - 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -536,8 +550,6 @@ static STimeWindow getActiveTimeWindow(SWindowResInfo *pWindowResInfo, int64_t t
|
||||||
w.ekey = pQuery->window.ekey;
|
w.ekey = pQuery->window.ekey;
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(ts >= w.skey && ts <= w.ekey);
|
|
||||||
|
|
||||||
return w;
|
return w;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -610,7 +622,7 @@ static int32_t setWindowOutputBufByKey(SQueryRuntimeEnv *pRuntimeEnv, SWindowRes
|
||||||
}
|
}
|
||||||
|
|
||||||
// set time window for current result
|
// set time window for current result
|
||||||
pWindowRes->skey = win->skey;
|
pWindowRes->win = (*win);
|
||||||
|
|
||||||
setWindowResOutputBufInitCtx(pRuntimeEnv, pWindowRes);
|
setWindowResOutputBufInitCtx(pRuntimeEnv, pWindowRes);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
|
@ -683,12 +695,12 @@ static int32_t doCheckQueryCompleted(SQueryRuntimeEnv *pRuntimeEnv, TSKEY lastKe
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
TSKEY ekey = pResult->skey + pWindowResInfo->interval;
|
TSKEY ekey = pResult->win.ekey;
|
||||||
if ((ekey <= lastKey && QUERY_IS_ASC_QUERY(pQuery)) ||
|
if ((ekey <= lastKey && QUERY_IS_ASC_QUERY(pQuery)) ||
|
||||||
(pResult->skey >= lastKey && !QUERY_IS_ASC_QUERY(pQuery))) {
|
(pResult->win.skey >= lastKey && !QUERY_IS_ASC_QUERY(pQuery))) {
|
||||||
closeTimeWindow(pWindowResInfo, i);
|
closeTimeWindow(pWindowResInfo, i);
|
||||||
} else {
|
} else {
|
||||||
skey = pResult->skey;
|
skey = pResult->win.skey;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -701,7 +713,7 @@ static int32_t doCheckQueryCompleted(SQueryRuntimeEnv *pRuntimeEnv, TSKEY lastKe
|
||||||
pWindowResInfo->curIndex = i;
|
pWindowResInfo->curIndex = i;
|
||||||
}
|
}
|
||||||
|
|
||||||
pWindowResInfo->prevSKey = pWindowResInfo->pResult[pWindowResInfo->curIndex].skey;
|
pWindowResInfo->prevSKey = pWindowResInfo->pResult[pWindowResInfo->curIndex].win.skey;
|
||||||
|
|
||||||
// the number of completed slots are larger than the threshold, return current generated results to client.
|
// the number of completed slots are larger than the threshold, return current generated results to client.
|
||||||
if (numOfClosed > pWindowResInfo->threshold) {
|
if (numOfClosed > pWindowResInfo->threshold) {
|
||||||
|
@ -765,31 +777,36 @@ static int32_t getNumOfRowsInTimeWindow(SQuery *pQuery, SDataBlockInfo *pDataBlo
|
||||||
return num;
|
return num;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void doBlockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, bool closed, STimeWindow *pWin,
|
static void doBlockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, bool closed, STimeWindow *pWin, int32_t offset,
|
||||||
int32_t offset, int32_t forwardStep, TSKEY *tsBuf, int32_t numOfTotal) {
|
int32_t forwardStep, TSKEY *tsCol, int32_t numOfTotal) {
|
||||||
SQuery * pQuery = pRuntimeEnv->pQuery;
|
SQuery * pQuery = pRuntimeEnv->pQuery;
|
||||||
SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx;
|
SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx;
|
||||||
|
|
||||||
|
bool hasPrev = pCtx[0].preAggVals.isSet;
|
||||||
|
|
||||||
if (IS_MASTER_SCAN(pRuntimeEnv) || closed) {
|
if (IS_MASTER_SCAN(pRuntimeEnv) || closed) {
|
||||||
for (int32_t k = 0; k < pQuery->numOfOutput; ++k) {
|
for (int32_t k = 0; k < pQuery->numOfOutput; ++k) {
|
||||||
int32_t functionId = pQuery->pSelectExpr[k].base.functionId;
|
|
||||||
|
|
||||||
pCtx[k].nStartQueryTimestamp = pWin->skey;
|
pCtx[k].nStartQueryTimestamp = pWin->skey;
|
||||||
pCtx[k].size = forwardStep;
|
pCtx[k].size = forwardStep;
|
||||||
pCtx[k].startOffset = (QUERY_IS_ASC_QUERY(pQuery)) ? offset : offset - (forwardStep - 1);
|
pCtx[k].startOffset = (QUERY_IS_ASC_QUERY(pQuery)) ? offset : offset - (forwardStep - 1);
|
||||||
|
|
||||||
|
int32_t functionId = pQuery->pSelectExpr[k].base.functionId;
|
||||||
if ((aAggs[functionId].nStatus & TSDB_FUNCSTATE_SELECTIVITY) != 0) {
|
if ((aAggs[functionId].nStatus & TSDB_FUNCSTATE_SELECTIVITY) != 0) {
|
||||||
pCtx[k].ptsList = &tsBuf[offset];
|
pCtx[k].ptsList = &tsCol[pCtx[k].startOffset];
|
||||||
}
|
}
|
||||||
|
|
||||||
// not a whole block involved in query processing, statistics data can not be used
|
// not a whole block involved in query processing, statistics data can not be used
|
||||||
if (forwardStep != numOfTotal) {
|
// NOTE: the original value of isSet have been changed here
|
||||||
|
if (pCtx[k].preAggVals.isSet && forwardStep < numOfTotal) {
|
||||||
pCtx[k].preAggVals.isSet = false;
|
pCtx[k].preAggVals.isSet = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) {
|
if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) {
|
||||||
aAggs[functionId].xFunction(&pCtx[k]);
|
aAggs[functionId].xFunction(&pCtx[k]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// restore it
|
||||||
|
pCtx[k].preAggVals.isSet = hasPrev;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -837,7 +854,7 @@ static int32_t getNextQualifiedWindow(SQueryRuntimeEnv *pRuntimeEnv, STimeWindow
|
||||||
|
|
||||||
int32_t startPos = 0;
|
int32_t startPos = 0;
|
||||||
// tumbling time window query, a special case of sliding time window query
|
// tumbling time window query, a special case of sliding time window query
|
||||||
if (pQuery->slidingTime == pQuery->intervalTime && prevPosition != -1) {
|
if (pQuery->interval.sliding == pQuery->interval.interval && prevPosition != -1) {
|
||||||
int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order);
|
int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order);
|
||||||
startPos = prevPosition + factor;
|
startPos = prevPosition + factor;
|
||||||
} else {
|
} else {
|
||||||
|
@ -850,21 +867,21 @@ static int32_t getNextQualifiedWindow(SQueryRuntimeEnv *pRuntimeEnv, STimeWindow
|
||||||
*/
|
*/
|
||||||
if (QUERY_IS_ASC_QUERY(pQuery) && primaryKeys[startPos] > pNext->ekey) {
|
if (QUERY_IS_ASC_QUERY(pQuery) && primaryKeys[startPos] > pNext->ekey) {
|
||||||
TSKEY next = primaryKeys[startPos];
|
TSKEY next = primaryKeys[startPos];
|
||||||
if (pQuery->intervalTimeUnit == 'n' || pQuery->intervalTimeUnit == 'y') {
|
if (pQuery->interval.intervalUnit == 'n' || pQuery->interval.intervalUnit == 'y') {
|
||||||
pNext->skey = taosGetIntervalStartTimestamp(next, pQuery->slidingTime, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision);
|
pNext->skey = taosTimeTruncate(next, &pQuery->interval, pQuery->precision);
|
||||||
pNext->ekey = taosAddNatualInterval(pNext->skey, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision) - 1;
|
pNext->ekey = taosTimeAdd(pNext->skey, pQuery->interval.interval, pQuery->interval.intervalUnit, pQuery->precision) - 1;
|
||||||
} else {
|
} else {
|
||||||
pNext->ekey += ((next - pNext->ekey + pQuery->slidingTime - 1)/pQuery->slidingTime) * pQuery->slidingTime;
|
pNext->ekey += ((next - pNext->ekey + pQuery->interval.sliding - 1)/pQuery->interval.sliding) * pQuery->interval.sliding;
|
||||||
pNext->skey = pNext->ekey - pQuery->intervalTime + 1;
|
pNext->skey = pNext->ekey - pQuery->interval.interval + 1;
|
||||||
}
|
}
|
||||||
} else if ((!QUERY_IS_ASC_QUERY(pQuery)) && primaryKeys[startPos] < pNext->skey) {
|
} else if ((!QUERY_IS_ASC_QUERY(pQuery)) && primaryKeys[startPos] < pNext->skey) {
|
||||||
TSKEY next = primaryKeys[startPos];
|
TSKEY next = primaryKeys[startPos];
|
||||||
if (pQuery->intervalTimeUnit == 'n' || pQuery->intervalTimeUnit == 'y') {
|
if (pQuery->interval.intervalUnit == 'n' || pQuery->interval.intervalUnit == 'y') {
|
||||||
pNext->skey = taosGetIntervalStartTimestamp(next, pQuery->slidingTime, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision);
|
pNext->skey = taosTimeTruncate(next, &pQuery->interval, pQuery->precision);
|
||||||
pNext->ekey = taosAddNatualInterval(pNext->skey, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision) - 1;
|
pNext->ekey = taosTimeAdd(pNext->skey, pQuery->interval.interval, pQuery->interval.intervalUnit, pQuery->precision) - 1;
|
||||||
} else {
|
} else {
|
||||||
pNext->skey -= ((pNext->skey - next + pQuery->slidingTime - 1) / pQuery->slidingTime) * pQuery->slidingTime;
|
pNext->skey -= ((pNext->skey - next + pQuery->interval.sliding - 1) / pQuery->interval.sliding) * pQuery->interval.sliding;
|
||||||
pNext->ekey = pNext->skey + pQuery->intervalTime - 1;
|
pNext->ekey = pNext->skey + pQuery->interval.interval - 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -910,19 +927,11 @@ static char *getDataBlock(SQueryRuntimeEnv *pRuntimeEnv, SArithmeticSupport *sas
|
||||||
|
|
||||||
char *dataBlock = NULL;
|
char *dataBlock = NULL;
|
||||||
SQuery *pQuery = pRuntimeEnv->pQuery;
|
SQuery *pQuery = pRuntimeEnv->pQuery;
|
||||||
SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx;
|
|
||||||
|
|
||||||
int32_t functionId = pQuery->pSelectExpr[col].base.functionId;
|
int32_t functionId = pQuery->pSelectExpr[col].base.functionId;
|
||||||
if (functionId == TSDB_FUNC_ARITHM) {
|
if (functionId == TSDB_FUNC_ARITHM) {
|
||||||
sas->pArithExpr = &pQuery->pSelectExpr[col];
|
sas->pArithExpr = &pQuery->pSelectExpr[col];
|
||||||
|
|
||||||
// set the start offset to be the lowest start position, no matter asc/desc query order
|
|
||||||
if (QUERY_IS_ASC_QUERY(pQuery)) {
|
|
||||||
pCtx->startOffset = pQuery->pos;
|
|
||||||
} else {
|
|
||||||
pCtx->startOffset = pQuery->pos - (size - 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
sas->offset = 0;
|
sas->offset = 0;
|
||||||
sas->colList = pQuery->colList;
|
sas->colList = pQuery->colList;
|
||||||
sas->numOfCols = pQuery->numOfCols;
|
sas->numOfCols = pQuery->numOfCols;
|
||||||
|
@ -1002,7 +1011,7 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order);
|
int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order);
|
||||||
if (QUERY_IS_INTERVAL_QUERY(pQuery)/* && tsCols != NULL*/) {
|
if (QUERY_IS_INTERVAL_QUERY(pQuery)) {
|
||||||
TSKEY ts = TSKEY_INITIAL_VAL;
|
TSKEY ts = TSKEY_INITIAL_VAL;
|
||||||
|
|
||||||
if (tsCols == NULL) {
|
if (tsCols == NULL) {
|
||||||
|
@ -1094,8 +1103,25 @@ static int32_t setGroupResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, char *pDat
|
||||||
|
|
||||||
SDiskbasedResultBuf *pResultBuf = pRuntimeEnv->pResultBuf;
|
SDiskbasedResultBuf *pResultBuf = pRuntimeEnv->pResultBuf;
|
||||||
|
|
||||||
int64_t v = -1;
|
|
||||||
// not assign result buffer yet, add new result buffer
|
// not assign result buffer yet, add new result buffer
|
||||||
|
char* d = pData;
|
||||||
|
int16_t len = bytes;
|
||||||
|
if (type == TSDB_DATA_TYPE_BINARY||type == TSDB_DATA_TYPE_NCHAR) {
|
||||||
|
d = varDataVal(pData);
|
||||||
|
len = varDataLen(pData);
|
||||||
|
} else if (type == TSDB_DATA_TYPE_FLOAT || type == TSDB_DATA_TYPE_DOUBLE) {
|
||||||
|
SQInfo* pQInfo = GET_QINFO_ADDR(pRuntimeEnv);
|
||||||
|
qError("QInfo:%p group by not supported on double/float/binary/nchar columns, abort", pQInfo);
|
||||||
|
|
||||||
|
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_APP_ERROR);
|
||||||
|
}
|
||||||
|
|
||||||
|
SWindowResult *pWindowRes = doSetTimeWindowFromKey(pRuntimeEnv, &pRuntimeEnv->windowResInfo, d, len, true);
|
||||||
|
if (pWindowRes == NULL) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
int64_t v = -1;
|
||||||
switch(type) {
|
switch(type) {
|
||||||
case TSDB_DATA_TYPE_BOOL:
|
case TSDB_DATA_TYPE_BOOL:
|
||||||
case TSDB_DATA_TYPE_TINYINT: v = GET_INT8_VAL(pData); break;
|
case TSDB_DATA_TYPE_TINYINT: v = GET_INT8_VAL(pData); break;
|
||||||
|
@ -1104,12 +1130,14 @@ static int32_t setGroupResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, char *pDat
|
||||||
case TSDB_DATA_TYPE_BIGINT: v = GET_INT64_VAL(pData); break;
|
case TSDB_DATA_TYPE_BIGINT: v = GET_INT64_VAL(pData); break;
|
||||||
}
|
}
|
||||||
|
|
||||||
SWindowResult *pWindowRes = doSetTimeWindowFromKey(pRuntimeEnv, &pRuntimeEnv->windowResInfo, pData, bytes, true);
|
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
|
||||||
if (pWindowRes == NULL) {
|
pWindowRes->key = malloc(varDataTLen(pData));
|
||||||
return -1;
|
varDataCopy(pWindowRes->key, pData);
|
||||||
|
} else {
|
||||||
|
pWindowRes->win.skey = v;
|
||||||
|
pWindowRes->win.ekey = v;
|
||||||
}
|
}
|
||||||
|
|
||||||
pWindowRes->skey = v;
|
|
||||||
assert(pRuntimeEnv->windowResInfo.interval == 0);
|
assert(pRuntimeEnv->windowResInfo.interval == 0);
|
||||||
|
|
||||||
if (pWindowRes->pos.pageId == -1) {
|
if (pWindowRes->pos.pageId == -1) {
|
||||||
|
@ -1180,7 +1208,7 @@ static int32_t doTSJoinFilter(SQueryRuntimeEnv *pRuntimeEnv, int32_t offset) {
|
||||||
|
|
||||||
#if defined(_DEBUG_VIEW)
|
#if defined(_DEBUG_VIEW)
|
||||||
printf("elem in comp ts file:%" PRId64 ", key:%" PRId64 ", tag:%"PRIu64", query order:%d, ts order:%d, traverse:%d, index:%d\n",
|
printf("elem in comp ts file:%" PRId64 ", key:%" PRId64 ", tag:%"PRIu64", query order:%d, ts order:%d, traverse:%d, index:%d\n",
|
||||||
elem.ts, key, elem.tag, pQuery->order.order, pRuntimeEnv->pTSBuf->tsOrder,
|
elem.ts, key, elem.tag.i64Key, pQuery->order.order, pRuntimeEnv->pTSBuf->tsOrder,
|
||||||
pRuntimeEnv->pTSBuf->cur.order, pRuntimeEnv->pTSBuf->cur.tsIndex);
|
pRuntimeEnv->pTSBuf->cur.order, pRuntimeEnv->pTSBuf->cur.tsIndex);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -1461,12 +1489,15 @@ void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, void* inputData, TSKEY
|
||||||
pCtx->preAggVals.dataBlockLoaded = (inputData != NULL);
|
pCtx->preAggVals.dataBlockLoaded = (inputData != NULL);
|
||||||
|
|
||||||
// limit/offset query will affect this value
|
// limit/offset query will affect this value
|
||||||
pCtx->startOffset = QUERY_IS_ASC_QUERY(pQuery) ? pQuery->pos:0;
|
|
||||||
pCtx->size = QUERY_IS_ASC_QUERY(pQuery) ? pBlockInfo->rows - pQuery->pos : pQuery->pos + 1;
|
pCtx->size = QUERY_IS_ASC_QUERY(pQuery) ? pBlockInfo->rows - pQuery->pos : pQuery->pos + 1;
|
||||||
|
|
||||||
|
// minimum value no matter ascending/descending order query
|
||||||
|
pCtx->startOffset = QUERY_IS_ASC_QUERY(pQuery) ? pQuery->pos: (pQuery->pos - pCtx->size + 1);
|
||||||
|
assert(pCtx->startOffset >= 0);
|
||||||
|
|
||||||
uint32_t status = aAggs[functionId].nStatus;
|
uint32_t status = aAggs[functionId].nStatus;
|
||||||
if (((status & (TSDB_FUNCSTATE_SELECTIVITY | TSDB_FUNCSTATE_NEED_TS)) != 0) && (tsCol != NULL)) {
|
if (((status & (TSDB_FUNCSTATE_SELECTIVITY | TSDB_FUNCSTATE_NEED_TS)) != 0) && (tsCol != NULL)) {
|
||||||
pCtx->ptsList = tsCol;
|
pCtx->ptsList = &tsCol[pCtx->startOffset];
|
||||||
}
|
}
|
||||||
|
|
||||||
if (functionId >= TSDB_FUNC_FIRST_DST && functionId <= TSDB_FUNC_LAST_DST) {
|
if (functionId >= TSDB_FUNC_FIRST_DST && functionId <= TSDB_FUNC_LAST_DST) {
|
||||||
|
@ -1846,20 +1877,20 @@ static bool onlyQueryTags(SQuery* pQuery) {
|
||||||
/////////////////////////////////////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
void getAlignQueryTimeWindow(SQuery *pQuery, int64_t key, int64_t keyFirst, int64_t keyLast, STimeWindow *win) {
|
void getAlignQueryTimeWindow(SQuery *pQuery, int64_t key, int64_t keyFirst, int64_t keyLast, STimeWindow *win) {
|
||||||
assert(key >= keyFirst && key <= keyLast && pQuery->slidingTime <= pQuery->intervalTime);
|
assert(key >= keyFirst && key <= keyLast && pQuery->interval.sliding <= pQuery->interval.interval);
|
||||||
win->skey = taosGetIntervalStartTimestamp(key, pQuery->slidingTime, pQuery->intervalTime, pQuery->slidingTimeUnit, pQuery->precision);
|
win->skey = taosTimeTruncate(key, &pQuery->interval, pQuery->precision);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if the realSkey > INT64_MAX - pQuery->intervalTime, the query duration between
|
* if the realSkey > INT64_MAX - pQuery->interval.interval, the query duration between
|
||||||
* realSkey and realEkey must be less than one interval.Therefore, no need to adjust the query ranges.
|
* realSkey and realEkey must be less than one interval.Therefore, no need to adjust the query ranges.
|
||||||
*/
|
*/
|
||||||
if (keyFirst > (INT64_MAX - pQuery->intervalTime)) {
|
if (keyFirst > (INT64_MAX - pQuery->interval.interval)) {
|
||||||
assert(keyLast - keyFirst < pQuery->intervalTime);
|
assert(keyLast - keyFirst < pQuery->interval.interval);
|
||||||
win->ekey = INT64_MAX;
|
win->ekey = INT64_MAX;
|
||||||
} else if (pQuery->intervalTimeUnit == 'n' || pQuery->intervalTimeUnit == 'y') {
|
} else if (pQuery->interval.intervalUnit == 'n' || pQuery->interval.intervalUnit == 'y') {
|
||||||
win->ekey = taosAddNatualInterval(win->skey, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision) - 1;
|
win->ekey = taosTimeAdd(win->skey, pQuery->interval.interval, pQuery->interval.intervalUnit, pQuery->precision) - 1;
|
||||||
} else {
|
} else {
|
||||||
win->ekey = win->skey + pQuery->intervalTime - 1;
|
win->ekey = win->skey + pQuery->interval.interval - 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1951,40 +1982,39 @@ static void changeExecuteScanOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bo
|
||||||
|
|
||||||
// todo handle the case the the order irrelevant query type mixed up with order critical query type
|
// todo handle the case the the order irrelevant query type mixed up with order critical query type
|
||||||
// descending order query for last_row query
|
// descending order query for last_row query
|
||||||
if (isFirstLastRowQuery(pQuery) && !QUERY_IS_ASC_QUERY(pQuery)) {
|
if (isFirstLastRowQuery(pQuery)) {
|
||||||
qDebug("QInfo:%p scan order changed for last_row query, old:%d, new:%d", GET_QINFO_ADDR(pQuery),
|
qDebug("QInfo:%p scan order changed for last_row query, old:%d, new:%d", pQInfo, pQuery->order.order, TSDB_ORDER_ASC);
|
||||||
pQuery->order.order, TSDB_ORDER_ASC);
|
|
||||||
|
|
||||||
SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
|
|
||||||
pQuery->order.order = TSDB_ORDER_ASC;
|
pQuery->order.order = TSDB_ORDER_ASC;
|
||||||
assert (pQuery->window.skey <= pQuery->window.ekey);
|
if (pQuery->window.skey > pQuery->window.ekey) {
|
||||||
|
SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
|
||||||
|
}
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (isGroupbyNormalCol(pQuery->pGroupbyExpr) && pQuery->order.order == TSDB_ORDER_DESC) {
|
||||||
|
pQuery->order.order = TSDB_ORDER_ASC;
|
||||||
|
if (pQuery->window.skey > pQuery->window.ekey) {
|
||||||
|
SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
|
||||||
|
}
|
||||||
|
|
||||||
doExchangeTimeWindow(pQInfo, &pQuery->window);
|
doExchangeTimeWindow(pQInfo, &pQuery->window);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (isGroupbyNormalCol(pQuery->pGroupbyExpr) && !QUERY_IS_ASC_QUERY(pQuery)) {
|
if (isPointInterpoQuery(pQuery) && pQuery->interval.interval == 0) {
|
||||||
pQuery->order.order = TSDB_ORDER_ASC;
|
if (!QUERY_IS_ASC_QUERY(pQuery)) {
|
||||||
SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
|
|
||||||
assert (pQuery->window.skey <= pQuery->window.ekey);
|
|
||||||
|
|
||||||
doExchangeTimeWindow(pQInfo, &pQuery->window);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (isPointInterpoQuery(pQuery) && (pQuery->intervalTime == 0) && !QUERY_IS_ASC_QUERY(pQuery)) {
|
|
||||||
qDebug(msg, GET_QINFO_ADDR(pQuery), "interp", pQuery->order.order, TSDB_ORDER_ASC, pQuery->window.skey,
|
qDebug(msg, GET_QINFO_ADDR(pQuery), "interp", pQuery->order.order, TSDB_ORDER_ASC, pQuery->window.skey,
|
||||||
pQuery->window.ekey, pQuery->window.ekey, pQuery->window.skey);
|
pQuery->window.ekey, pQuery->window.ekey, pQuery->window.skey);
|
||||||
SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
|
SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
|
||||||
|
}
|
||||||
|
|
||||||
pQuery->order.order = TSDB_ORDER_ASC;
|
pQuery->order.order = TSDB_ORDER_ASC;
|
||||||
|
|
||||||
assert (pQuery->window.skey <= pQuery->window.ekey);
|
|
||||||
doExchangeTimeWindow(pQInfo, &pQuery->window);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pQuery->intervalTime == 0) {
|
if (pQuery->interval.interval == 0) {
|
||||||
if (onlyFirstQuery(pQuery)) {
|
if (onlyFirstQuery(pQuery)) {
|
||||||
if (!QUERY_IS_ASC_QUERY(pQuery)) {
|
if (!QUERY_IS_ASC_QUERY(pQuery)) {
|
||||||
qDebug(msg, GET_QINFO_ADDR(pQuery), "only-first", pQuery->order.order, TSDB_ORDER_ASC, pQuery->window.skey,
|
qDebug(msg, GET_QINFO_ADDR(pQuery), "only-first", pQuery->order.order, TSDB_ORDER_ASC, pQuery->window.skey,
|
||||||
|
@ -2055,13 +2085,14 @@ static int32_t getInitialPageNum(SQInfo *pQInfo) {
|
||||||
|
|
||||||
static void getIntermediateBufInfo(SQueryRuntimeEnv* pRuntimeEnv, int32_t* ps, int32_t* rowsize) {
|
static void getIntermediateBufInfo(SQueryRuntimeEnv* pRuntimeEnv, int32_t* ps, int32_t* rowsize) {
|
||||||
SQuery* pQuery = pRuntimeEnv->pQuery;
|
SQuery* pQuery = pRuntimeEnv->pQuery;
|
||||||
|
int32_t MIN_ROWS_PER_PAGE = 4;
|
||||||
|
|
||||||
*rowsize = (int32_t)(pQuery->rowSize * GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, pRuntimeEnv->topBotQuery, pRuntimeEnv->stableQuery));
|
*rowsize = (int32_t)(pQuery->rowSize * GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, pRuntimeEnv->topBotQuery, pRuntimeEnv->stableQuery));
|
||||||
int32_t overhead = sizeof(tFilePage);
|
int32_t overhead = sizeof(tFilePage);
|
||||||
|
|
||||||
// one page contains at least two rows
|
// one page contains at least two rows
|
||||||
*ps = DEFAULT_INTERN_BUF_PAGE_SIZE;
|
*ps = DEFAULT_INTERN_BUF_PAGE_SIZE;
|
||||||
while(((*rowsize) * 2) > (*ps) - overhead) {
|
while(((*rowsize) * MIN_ROWS_PER_PAGE) > (*ps) - overhead) {
|
||||||
*ps = (*ps << 1u);
|
*ps = (*ps << 1u);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2185,43 +2216,43 @@ static bool overlapWithTimeWindow(SQuery* pQuery, SDataBlockInfo* pBlockInfo) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t loadDataBlockOnDemand(SQueryRuntimeEnv *pRuntimeEnv, void* pQueryHandle, SDataBlockInfo* pBlockInfo, SDataStatis **pStatis, SArray** pDataBlock) {
|
int32_t loadDataBlockOnDemand(SQueryRuntimeEnv *pRuntimeEnv, void* pQueryHandle, SDataBlockInfo* pBlockInfo, SDataStatis **pStatis, SArray** pDataBlock, uint32_t* status) {
|
||||||
SQuery *pQuery = pRuntimeEnv->pQuery;
|
SQuery *pQuery = pRuntimeEnv->pQuery;
|
||||||
|
|
||||||
uint32_t status = 0;
|
*status = 0;
|
||||||
if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTSBuf > 0) {
|
if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTSBuf > 0) {
|
||||||
status = BLK_DATA_ALL_NEEDED;
|
*status = BLK_DATA_ALL_NEEDED;
|
||||||
} else { // check if this data block is required to load
|
} else { // check if this data block is required to load
|
||||||
|
|
||||||
// Calculate all time windows that are overlapping or contain current data block.
|
// Calculate all time windows that are overlapping or contain current data block.
|
||||||
// If current data block is contained by all possible time window, do not load current data block.
|
// If current data block is contained by all possible time window, do not load current data block.
|
||||||
if (QUERY_IS_INTERVAL_QUERY(pQuery) && overlapWithTimeWindow(pQuery, pBlockInfo)) {
|
if (QUERY_IS_INTERVAL_QUERY(pQuery) && overlapWithTimeWindow(pQuery, pBlockInfo)) {
|
||||||
status = BLK_DATA_ALL_NEEDED;
|
*status = BLK_DATA_ALL_NEEDED;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (status != BLK_DATA_ALL_NEEDED) {
|
if ((*status) != BLK_DATA_ALL_NEEDED) {
|
||||||
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
|
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
|
||||||
SSqlFuncMsg* pSqlFunc = &pQuery->pSelectExpr[i].base;
|
SSqlFuncMsg* pSqlFunc = &pQuery->pSelectExpr[i].base;
|
||||||
|
|
||||||
int32_t functionId = pSqlFunc->functionId;
|
int32_t functionId = pSqlFunc->functionId;
|
||||||
int32_t colId = pSqlFunc->colInfo.colId;
|
int32_t colId = pSqlFunc->colInfo.colId;
|
||||||
|
|
||||||
status |= aAggs[functionId].dataReqFunc(&pRuntimeEnv->pCtx[i], pBlockInfo->window.skey, pBlockInfo->window.ekey, colId);
|
(*status) |= aAggs[functionId].dataReqFunc(&pRuntimeEnv->pCtx[i], pBlockInfo->window.skey, pBlockInfo->window.ekey, colId);
|
||||||
if ((status & BLK_DATA_ALL_NEEDED) == BLK_DATA_ALL_NEEDED) {
|
if (((*status) & BLK_DATA_ALL_NEEDED) == BLK_DATA_ALL_NEEDED) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (status == BLK_DATA_NO_NEEDED) {
|
if ((*status) == BLK_DATA_NO_NEEDED) {
|
||||||
qDebug("QInfo:%p data block discard, brange:%"PRId64 "-%"PRId64", rows:%d", GET_QINFO_ADDR(pRuntimeEnv),
|
qDebug("QInfo:%p data block discard, brange:%"PRId64 "-%"PRId64", rows:%d", GET_QINFO_ADDR(pRuntimeEnv),
|
||||||
pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows);
|
pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows);
|
||||||
pRuntimeEnv->summary.discardBlocks += 1;
|
pRuntimeEnv->summary.discardBlocks += 1;
|
||||||
} else if (status == BLK_DATA_STATIS_NEEDED) {
|
} else if ((*status) == BLK_DATA_STATIS_NEEDED) {
|
||||||
if (tsdbRetrieveDataBlockStatisInfo(pQueryHandle, pStatis) != TSDB_CODE_SUCCESS) {
|
|
||||||
// return DISK_DATA_LOAD_FAILED;
|
// this function never returns error?
|
||||||
}
|
tsdbRetrieveDataBlockStatisInfo(pQueryHandle, pStatis);
|
||||||
|
|
||||||
pRuntimeEnv->summary.loadBlockStatis += 1;
|
pRuntimeEnv->summary.loadBlockStatis += 1;
|
||||||
|
|
||||||
|
@ -2230,24 +2261,26 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv *pRuntimeEnv, void* pQueryHandle,
|
||||||
pRuntimeEnv->summary.totalCheckedRows += pBlockInfo->rows;
|
pRuntimeEnv->summary.totalCheckedRows += pBlockInfo->rows;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
assert(status == BLK_DATA_ALL_NEEDED);
|
assert((*status) == BLK_DATA_ALL_NEEDED);
|
||||||
|
|
||||||
// load the data block statistics to perform further filter
|
// load the data block statistics to perform further filter
|
||||||
pRuntimeEnv->summary.loadBlockStatis += 1;
|
pRuntimeEnv->summary.loadBlockStatis += 1;
|
||||||
if (tsdbRetrieveDataBlockStatisInfo(pQueryHandle, pStatis) != TSDB_CODE_SUCCESS) {
|
tsdbRetrieveDataBlockStatisInfo(pQueryHandle, pStatis);
|
||||||
}
|
|
||||||
|
|
||||||
if (!needToLoadDataBlock(pRuntimeEnv, *pStatis, pRuntimeEnv->pCtx, pBlockInfo->rows)) {
|
if (!needToLoadDataBlock(pRuntimeEnv, *pStatis, pRuntimeEnv->pCtx, pBlockInfo->rows)) {
|
||||||
// current block has been discard due to filter applied
|
// current block has been discard due to filter applied
|
||||||
pRuntimeEnv->summary.discardBlocks += 1;
|
pRuntimeEnv->summary.discardBlocks += 1;
|
||||||
qDebug("QInfo:%p data block discard, brange:%"PRId64 "-%"PRId64", rows:%d", GET_QINFO_ADDR(pRuntimeEnv),
|
qDebug("QInfo:%p data block discard, brange:%"PRId64 "-%"PRId64", rows:%d", GET_QINFO_ADDR(pRuntimeEnv),
|
||||||
pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows);
|
pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows);
|
||||||
return BLK_DATA_DISCARD;
|
(*status) = BLK_DATA_DISCARD;
|
||||||
}
|
}
|
||||||
|
|
||||||
pRuntimeEnv->summary.totalCheckedRows += pBlockInfo->rows;
|
pRuntimeEnv->summary.totalCheckedRows += pBlockInfo->rows;
|
||||||
pRuntimeEnv->summary.loadBlocks += 1;
|
pRuntimeEnv->summary.loadBlocks += 1;
|
||||||
*pDataBlock = tsdbRetrieveDataBlock(pQueryHandle, NULL);
|
*pDataBlock = tsdbRetrieveDataBlock(pQueryHandle, NULL);
|
||||||
|
if (*pDataBlock == NULL) {
|
||||||
|
return terrno;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
|
@ -2431,15 +2464,18 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
|
||||||
ensureOutputBuffer(pRuntimeEnv, &blockInfo);
|
ensureOutputBuffer(pRuntimeEnv, &blockInfo);
|
||||||
|
|
||||||
SDataStatis *pStatis = NULL;
|
SDataStatis *pStatis = NULL;
|
||||||
SArray *pDataBlock = NULL;
|
SArray * pDataBlock = NULL;
|
||||||
if (loadDataBlockOnDemand(pRuntimeEnv, pQueryHandle, &blockInfo, &pStatis, &pDataBlock) == BLK_DATA_DISCARD) {
|
uint32_t status = 0;
|
||||||
pQuery->current->lastKey = QUERY_IS_ASC_QUERY(pQuery)? blockInfo.window.ekey + step:blockInfo.window.skey + step;
|
|
||||||
continue;
|
int32_t ret = loadDataBlockOnDemand(pRuntimeEnv, pQueryHandle, &blockInfo, &pStatis, &pDataBlock, &status);
|
||||||
|
if (ret != TSDB_CODE_SUCCESS) {
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (terrno != TSDB_CODE_SUCCESS) { // load data block failed, abort query
|
if (status == BLK_DATA_DISCARD) {
|
||||||
longjmp(pRuntimeEnv->env, terrno);
|
pQuery->current->lastKey =
|
||||||
break;
|
QUERY_IS_ASC_QUERY(pQuery) ? blockInfo.window.ekey + step : blockInfo.window.skey + step;
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
// query start position can not move into tableApplyFunctionsOnBlock due to limit/offset condition
|
// query start position can not move into tableApplyFunctionsOnBlock due to limit/offset condition
|
||||||
|
@ -2806,6 +2842,7 @@ void copyResToQueryResultBuf(SQInfo *pQInfo, SQuery *pQuery) {
|
||||||
// all results have been return to client, try next group
|
// all results have been return to client, try next group
|
||||||
if (pGroupResInfo->pos.pageId == pGroupResInfo->numOfDataPages) {
|
if (pGroupResInfo->pos.pageId == pGroupResInfo->numOfDataPages) {
|
||||||
pGroupResInfo->numOfDataPages = 0;
|
pGroupResInfo->numOfDataPages = 0;
|
||||||
|
pGroupResInfo->pos.pageId = 0;
|
||||||
pGroupResInfo->pos.rowId = 0;
|
pGroupResInfo->pos.rowId = 0;
|
||||||
|
|
||||||
// current results of group has been sent to client, try next group
|
// current results of group has been sent to client, try next group
|
||||||
|
@ -2920,11 +2957,11 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) {
|
||||||
STableQueryInfo *item = taosArrayGetP(pGroup, i);
|
STableQueryInfo *item = taosArrayGetP(pGroup, i);
|
||||||
|
|
||||||
SIDList list = getDataBufPagesIdList(pRuntimeEnv->pResultBuf, TSDB_TABLEID(item->pTable)->tid);
|
SIDList list = getDataBufPagesIdList(pRuntimeEnv->pResultBuf, TSDB_TABLEID(item->pTable)->tid);
|
||||||
|
pageList = list;
|
||||||
|
tid = TSDB_TABLEID(item->pTable)->tid;
|
||||||
|
|
||||||
if (taosArrayGetSize(list) > 0 && item->windowResInfo.size > 0) {
|
if (taosArrayGetSize(list) > 0 && item->windowResInfo.size > 0) {
|
||||||
pTableList[numOfTables++] = item;
|
pTableList[numOfTables++] = item;
|
||||||
tid = TSDB_TABLEID(item->pTable)->tid;
|
|
||||||
pageList = list;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2993,7 +3030,7 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) {
|
||||||
char *b = getPosInResultPage(pRuntimeEnv, PRIMARYKEY_TIMESTAMP_COL_INDEX, pWindowRes, page);
|
char *b = getPosInResultPage(pRuntimeEnv, PRIMARYKEY_TIMESTAMP_COL_INDEX, pWindowRes, page);
|
||||||
TSKEY ts = GET_INT64_VAL(b);
|
TSKEY ts = GET_INT64_VAL(b);
|
||||||
|
|
||||||
assert(ts == pWindowRes->skey);
|
assert(ts == pWindowRes->win.skey);
|
||||||
int64_t num = getNumOfResultWindowRes(pQuery, pWindowRes);
|
int64_t num = getNumOfResultWindowRes(pQuery, pWindowRes);
|
||||||
if (num <= 0) {
|
if (num <= 0) {
|
||||||
cs.position[pos] += 1;
|
cs.position[pos] += 1;
|
||||||
|
@ -3678,7 +3715,7 @@ static STableQueryInfo *createTableQueryInfo(SQueryRuntimeEnv *pRuntimeEnv, void
|
||||||
return pTableQueryInfo;
|
return pTableQueryInfo;
|
||||||
}
|
}
|
||||||
|
|
||||||
void destroyTableQueryInfo(STableQueryInfo *pTableQueryInfo) {
|
void destroyTableQueryInfoImpl(STableQueryInfo *pTableQueryInfo) {
|
||||||
if (pTableQueryInfo == NULL) {
|
if (pTableQueryInfo == NULL) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -4238,8 +4275,8 @@ static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv, TSKEY* start) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* 1. for interval without interpolation query we forward pQuery->intervalTime at a time for
|
* 1. for interval without interpolation query we forward pQuery->interval.interval at a time for
|
||||||
* pQuery->limit.offset times. Since hole exists, pQuery->intervalTime*pQuery->limit.offset value is
|
* pQuery->limit.offset times. Since hole exists, pQuery->interval.interval*pQuery->limit.offset value is
|
||||||
* not valid. otherwise, we only forward pQuery->limit.offset number of points
|
* not valid. otherwise, we only forward pQuery->limit.offset number of points
|
||||||
*/
|
*/
|
||||||
assert(pRuntimeEnv->windowResInfo.prevSKey == TSKEY_INITIAL_VAL);
|
assert(pRuntimeEnv->windowResInfo.prevSKey == TSKEY_INITIAL_VAL);
|
||||||
|
@ -4354,31 +4391,7 @@ static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv, TSKEY* start) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void freeTableQueryInfo(STableGroupInfo* pTableGroupInfo) {
|
static void doDestroyTableQueryInfo(STableGroupInfo* pTableqinfoGroupInfo);
|
||||||
if (pTableGroupInfo->pGroupList == NULL) {
|
|
||||||
assert(pTableGroupInfo->numOfTables == 0);
|
|
||||||
} else {
|
|
||||||
size_t numOfGroups = taosArrayGetSize(pTableGroupInfo->pGroupList);
|
|
||||||
for (int32_t i = 0; i < numOfGroups; ++i) {
|
|
||||||
SArray *p = taosArrayGetP(pTableGroupInfo->pGroupList, i);
|
|
||||||
|
|
||||||
size_t num = taosArrayGetSize(p);
|
|
||||||
for(int32_t j = 0; j < num; ++j) {
|
|
||||||
STableQueryInfo* item = taosArrayGetP(p, j);
|
|
||||||
destroyTableQueryInfo(item);
|
|
||||||
}
|
|
||||||
|
|
||||||
taosArrayDestroy(p);
|
|
||||||
}
|
|
||||||
|
|
||||||
taosArrayDestroy(pTableGroupInfo->pGroupList);
|
|
||||||
pTableGroupInfo->pGroupList = NULL;
|
|
||||||
pTableGroupInfo->numOfTables = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
taosHashCleanup(pTableGroupInfo->map);
|
|
||||||
pTableGroupInfo->map = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int32_t setupQueryHandle(void* tsdb, SQInfo* pQInfo, bool isSTableQuery) {
|
static int32_t setupQueryHandle(void* tsdb, SQInfo* pQInfo, bool isSTableQuery) {
|
||||||
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
|
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
|
||||||
|
@ -4415,13 +4428,15 @@ static int32_t setupQueryHandle(void* tsdb, SQInfo* pQInfo, bool isSTableQuery)
|
||||||
terrno = TSDB_CODE_SUCCESS;
|
terrno = TSDB_CODE_SUCCESS;
|
||||||
if (isFirstLastRowQuery(pQuery)) {
|
if (isFirstLastRowQuery(pQuery)) {
|
||||||
pRuntimeEnv->pQueryHandle = tsdbQueryLastRow(tsdb, &cond, &pQInfo->tableGroupInfo, pQInfo);
|
pRuntimeEnv->pQueryHandle = tsdbQueryLastRow(tsdb, &cond, &pQInfo->tableGroupInfo, pQInfo);
|
||||||
if (pRuntimeEnv->pQueryHandle == NULL) { // no data in current stable, clear all
|
|
||||||
freeTableQueryInfo(&pQInfo->tableqinfoGroupInfo);
|
// update the query time window
|
||||||
} else { // update the query time window
|
|
||||||
pQuery->window = cond.twindow;
|
pQuery->window = cond.twindow;
|
||||||
|
|
||||||
|
if (pQInfo->tableGroupInfo.numOfTables == 0) {
|
||||||
|
pQInfo->tableqinfoGroupInfo.numOfTables = 0;
|
||||||
|
} else {
|
||||||
size_t numOfGroups = GET_NUM_OF_TABLEGROUP(pQInfo);
|
size_t numOfGroups = GET_NUM_OF_TABLEGROUP(pQInfo);
|
||||||
for (int32_t i = 0; i < numOfGroups; ++i) {
|
for(int32_t i = 0; i < numOfGroups; ++i) {
|
||||||
SArray *group = GET_TABLEGROUP(pQInfo, i);
|
SArray *group = GET_TABLEGROUP(pQInfo, i);
|
||||||
|
|
||||||
size_t t = taosArrayGetSize(group);
|
size_t t = taosArrayGetSize(group);
|
||||||
|
@ -4484,12 +4499,6 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pQInfo->tableqinfoGroupInfo.numOfTables == 0) {
|
|
||||||
qDebug("QInfo:%p no table qualified for tag filter, abort query", pQInfo);
|
|
||||||
setQueryStatus(pQuery, QUERY_COMPLETED);
|
|
||||||
return TSDB_CODE_SUCCESS;
|
|
||||||
}
|
|
||||||
|
|
||||||
pQInfo->tsdb = tsdb;
|
pQInfo->tsdb = tsdb;
|
||||||
pQInfo->vgId = vgId;
|
pQInfo->vgId = vgId;
|
||||||
|
|
||||||
|
@ -4572,7 +4581,7 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo
|
||||||
getAlignQueryTimeWindow(pQuery, pQuery->window.skey, sk, ek, &w);
|
getAlignQueryTimeWindow(pQuery, pQuery->window.skey, sk, ek, &w);
|
||||||
|
|
||||||
pRuntimeEnv->pFillInfo = taosInitFillInfo(pQuery->order.order, w.skey, 0, (int32_t)pQuery->rec.capacity, pQuery->numOfOutput,
|
pRuntimeEnv->pFillInfo = taosInitFillInfo(pQuery->order.order, w.skey, 0, (int32_t)pQuery->rec.capacity, pQuery->numOfOutput,
|
||||||
pQuery->slidingTime, pQuery->slidingTimeUnit, (int8_t)pQuery->precision,
|
pQuery->interval.sliding, pQuery->interval.slidingUnit, (int8_t)pQuery->precision,
|
||||||
pQuery->fillType, pColInfo);
|
pQuery->fillType, pColInfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4651,9 +4660,17 @@ static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) {
|
||||||
}
|
}
|
||||||
|
|
||||||
SDataStatis *pStatis = NULL;
|
SDataStatis *pStatis = NULL;
|
||||||
SArray *pDataBlock = NULL;
|
SArray * pDataBlock = NULL;
|
||||||
if (loadDataBlockOnDemand(pRuntimeEnv, pQueryHandle, &blockInfo, &pStatis, &pDataBlock) == BLK_DATA_DISCARD) {
|
uint32_t status = 0;
|
||||||
pQuery->current->lastKey = QUERY_IS_ASC_QUERY(pQuery)? blockInfo.window.ekey + step:blockInfo.window.skey + step;
|
|
||||||
|
int32_t ret = loadDataBlockOnDemand(pRuntimeEnv, pQueryHandle, &blockInfo, &pStatis, &pDataBlock, &status);
|
||||||
|
if (ret != TSDB_CODE_SUCCESS) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (status == BLK_DATA_DISCARD) {
|
||||||
|
pQuery->current->lastKey =
|
||||||
|
QUERY_IS_ASC_QUERY(pQuery) ? blockInfo.window.ekey + step : blockInfo.window.skey + step;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5425,7 +5442,7 @@ static void stableQueryImpl(SQInfo *pQInfo) {
|
||||||
(isFixedOutputQuery(pRuntimeEnv) && (!isPointInterpoQuery(pQuery)) && (!pRuntimeEnv->groupbyNormalCol))) {
|
(isFixedOutputQuery(pRuntimeEnv) && (!isPointInterpoQuery(pQuery)) && (!pRuntimeEnv->groupbyNormalCol))) {
|
||||||
multiTableQueryProcess(pQInfo);
|
multiTableQueryProcess(pQInfo);
|
||||||
} else {
|
} else {
|
||||||
assert((pQuery->checkBuffer == 1 && pQuery->intervalTime == 0) || isPointInterpoQuery(pQuery) ||
|
assert((pQuery->checkBuffer == 1 && pQuery->interval.interval == 0) || isPointInterpoQuery(pQuery) ||
|
||||||
isFirstLastRowQuery(pQuery) || pRuntimeEnv->groupbyNormalCol);
|
isFirstLastRowQuery(pQuery) || pRuntimeEnv->groupbyNormalCol);
|
||||||
|
|
||||||
sequentialTableProcess(pQInfo);
|
sequentialTableProcess(pQInfo);
|
||||||
|
@ -5463,6 +5480,7 @@ static int32_t getColumnIndexInSource(SQueryTableMsg *pQueryMsg, SSqlFuncMsg *pE
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
assert(0);
|
assert(0);
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool validateExprColumnInfo(SQueryTableMsg *pQueryMsg, SSqlFuncMsg *pExprMsg, SColumnInfo* pTagCols) {
|
bool validateExprColumnInfo(SQueryTableMsg *pQueryMsg, SSqlFuncMsg *pExprMsg, SColumnInfo* pTagCols) {
|
||||||
|
@ -5471,8 +5489,8 @@ bool validateExprColumnInfo(SQueryTableMsg *pQueryMsg, SSqlFuncMsg *pExprMsg, SC
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool validateQueryMsg(SQueryTableMsg *pQueryMsg) {
|
static bool validateQueryMsg(SQueryTableMsg *pQueryMsg) {
|
||||||
if (pQueryMsg->intervalTime < 0) {
|
if (pQueryMsg->interval.interval < 0) {
|
||||||
qError("qmsg:%p illegal value of interval time %" PRId64, pQueryMsg, pQueryMsg->intervalTime);
|
qError("qmsg:%p illegal value of interval time %" PRId64, pQueryMsg, pQueryMsg->interval.interval);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5551,8 +5569,12 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList,
|
||||||
|
|
||||||
pQueryMsg->window.skey = htobe64(pQueryMsg->window.skey);
|
pQueryMsg->window.skey = htobe64(pQueryMsg->window.skey);
|
||||||
pQueryMsg->window.ekey = htobe64(pQueryMsg->window.ekey);
|
pQueryMsg->window.ekey = htobe64(pQueryMsg->window.ekey);
|
||||||
pQueryMsg->intervalTime = htobe64(pQueryMsg->intervalTime);
|
pQueryMsg->interval.interval = htobe64(pQueryMsg->interval.interval);
|
||||||
pQueryMsg->slidingTime = htobe64(pQueryMsg->slidingTime);
|
pQueryMsg->interval.sliding = htobe64(pQueryMsg->interval.sliding);
|
||||||
|
pQueryMsg->interval.offset = htobe64(pQueryMsg->interval.offset);
|
||||||
|
pQueryMsg->interval.intervalUnit = pQueryMsg->interval.intervalUnit;
|
||||||
|
pQueryMsg->interval.slidingUnit = pQueryMsg->interval.slidingUnit;
|
||||||
|
pQueryMsg->interval.offsetUnit = pQueryMsg->interval.offsetUnit;
|
||||||
pQueryMsg->limit = htobe64(pQueryMsg->limit);
|
pQueryMsg->limit = htobe64(pQueryMsg->limit);
|
||||||
pQueryMsg->offset = htobe64(pQueryMsg->offset);
|
pQueryMsg->offset = htobe64(pQueryMsg->offset);
|
||||||
|
|
||||||
|
@ -5765,7 +5787,7 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList,
|
||||||
qDebug("qmsg:%p query %d tables, type:%d, qrange:%" PRId64 "-%" PRId64 ", numOfGroupbyTagCols:%d, order:%d, "
|
qDebug("qmsg:%p query %d tables, type:%d, qrange:%" PRId64 "-%" PRId64 ", numOfGroupbyTagCols:%d, order:%d, "
|
||||||
"outputCols:%d, numOfCols:%d, interval:%" PRId64 ", fillType:%d, comptsLen:%d, compNumOfBlocks:%d, limit:%" PRId64 ", offset:%" PRId64,
|
"outputCols:%d, numOfCols:%d, interval:%" PRId64 ", fillType:%d, comptsLen:%d, compNumOfBlocks:%d, limit:%" PRId64 ", offset:%" PRId64,
|
||||||
pQueryMsg, pQueryMsg->numOfTables, pQueryMsg->queryType, pQueryMsg->window.skey, pQueryMsg->window.ekey, pQueryMsg->numOfGroupCols,
|
pQueryMsg, pQueryMsg->numOfTables, pQueryMsg->queryType, pQueryMsg->window.skey, pQueryMsg->window.ekey, pQueryMsg->numOfGroupCols,
|
||||||
pQueryMsg->order, pQueryMsg->numOfOutput, pQueryMsg->numOfCols, pQueryMsg->intervalTime,
|
pQueryMsg->order, pQueryMsg->numOfOutput, pQueryMsg->numOfCols, pQueryMsg->interval.interval,
|
||||||
pQueryMsg->fillType, pQueryMsg->tsLen, pQueryMsg->tsNumOfBlocks, pQueryMsg->limit, pQueryMsg->offset);
|
pQueryMsg->fillType, pQueryMsg->tsLen, pQueryMsg->tsNumOfBlocks, pQueryMsg->limit, pQueryMsg->offset);
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
|
@ -6104,10 +6126,7 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGrou
|
||||||
pQuery->order.orderColId = pQueryMsg->orderColId;
|
pQuery->order.orderColId = pQueryMsg->orderColId;
|
||||||
pQuery->pSelectExpr = pExprs;
|
pQuery->pSelectExpr = pExprs;
|
||||||
pQuery->pGroupbyExpr = pGroupbyExpr;
|
pQuery->pGroupbyExpr = pGroupbyExpr;
|
||||||
pQuery->intervalTime = pQueryMsg->intervalTime;
|
memcpy(&pQuery->interval, &pQueryMsg->interval, sizeof(pQuery->interval));
|
||||||
pQuery->slidingTime = pQueryMsg->slidingTime;
|
|
||||||
pQuery->intervalTimeUnit = pQueryMsg->intervalTimeUnit;
|
|
||||||
pQuery->slidingTimeUnit = pQueryMsg->slidingTimeUnit;
|
|
||||||
pQuery->fillType = pQueryMsg->fillType;
|
pQuery->fillType = pQueryMsg->fillType;
|
||||||
pQuery->numOfTags = pQueryMsg->numOfTags;
|
pQuery->numOfTags = pQueryMsg->numOfTags;
|
||||||
pQuery->tagColList = pTagCols;
|
pQuery->tagColList = pTagCols;
|
||||||
|
@ -6312,17 +6331,43 @@ _error:
|
||||||
}
|
}
|
||||||
|
|
||||||
static void freeColumnFilterInfo(SColumnFilterInfo* pFilter, int32_t numOfFilters) {
|
static void freeColumnFilterInfo(SColumnFilterInfo* pFilter, int32_t numOfFilters) {
|
||||||
if (pFilter == NULL) {
|
if (pFilter == NULL || numOfFilters == 0) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int32_t i = 0; i < numOfFilters; i++) {
|
for (int32_t i = 0; i < numOfFilters; i++) {
|
||||||
if (pFilter[i].filterstr) {
|
if (pFilter[i].filterstr) {
|
||||||
free((void*)(pFilter[i].pz));
|
free((void*)(pFilter[i].pz));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
free(pFilter);
|
free(pFilter);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void doDestroyTableQueryInfo(STableGroupInfo* pTableqinfoGroupInfo) {
|
||||||
|
if (pTableqinfoGroupInfo->pGroupList != NULL) {
|
||||||
|
int32_t numOfGroups = (int32_t) taosArrayGetSize(pTableqinfoGroupInfo->pGroupList);
|
||||||
|
for (int32_t i = 0; i < numOfGroups; ++i) {
|
||||||
|
SArray *p = taosArrayGetP(pTableqinfoGroupInfo->pGroupList, i);
|
||||||
|
|
||||||
|
size_t num = taosArrayGetSize(p);
|
||||||
|
for(int32_t j = 0; j < num; ++j) {
|
||||||
|
STableQueryInfo* item = taosArrayGetP(p, j);
|
||||||
|
destroyTableQueryInfoImpl(item);
|
||||||
|
}
|
||||||
|
|
||||||
|
taosArrayDestroy(p);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
taosArrayDestroy(pTableqinfoGroupInfo->pGroupList);
|
||||||
|
taosHashCleanup(pTableqinfoGroupInfo->map);
|
||||||
|
|
||||||
|
pTableqinfoGroupInfo->pGroupList = NULL;
|
||||||
|
pTableqinfoGroupInfo->map = NULL;
|
||||||
|
pTableqinfoGroupInfo->numOfTables = 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void freeQInfo(SQInfo *pQInfo) {
|
static void freeQInfo(SQInfo *pQInfo) {
|
||||||
if (!isValidQInfo(pQInfo)) {
|
if (!isValidQInfo(pQInfo)) {
|
||||||
return;
|
return;
|
||||||
|
@ -6383,10 +6428,9 @@ static void freeQInfo(SQInfo *pQInfo) {
|
||||||
taosTFree(pQuery);
|
taosTFree(pQuery);
|
||||||
}
|
}
|
||||||
|
|
||||||
freeTableQueryInfo(&pQInfo->tableqinfoGroupInfo);
|
doDestroyTableQueryInfo(&pQInfo->tableqinfoGroupInfo);
|
||||||
|
|
||||||
taosTFree(pQInfo->pBuf);
|
taosTFree(pQInfo->pBuf);
|
||||||
|
|
||||||
tsdbDestroyTableGroup(&pQInfo->tableGroupInfo);
|
tsdbDestroyTableGroup(&pQInfo->tableGroupInfo);
|
||||||
taosArrayDestroy(pQInfo->arrTableIdInfo);
|
taosArrayDestroy(pQInfo->arrTableIdInfo);
|
||||||
|
|
||||||
|
|
|
@ -502,22 +502,22 @@ FORCE_INLINE int32_t compare_sd(tOrderDescriptor *pDescriptor, int32_t numOfRows
|
||||||
return compare_d(pDescriptor, numOfRows, idx1, data, numOfRows, idx2, data);
|
return compare_d(pDescriptor, numOfRows, idx1, data, numOfRows, idx2, data);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void swap(SColumnModel *pColumnModel, int32_t count, int32_t s1, char *data1, int32_t s2) {
|
static void swap(SColumnModel *pColumnModel, int32_t count, int32_t s1, char *data1, int32_t s2, void* buf) {
|
||||||
for (int32_t i = 0; i < pColumnModel->numOfCols; ++i) {
|
for (int32_t i = 0; i < pColumnModel->numOfCols; ++i) {
|
||||||
void *first = COLMODEL_GET_VAL(data1, pColumnModel, count, s1, i);
|
void *first = COLMODEL_GET_VAL(data1, pColumnModel, count, s1, i);
|
||||||
void *second = COLMODEL_GET_VAL(data1, pColumnModel, count, s2, i);
|
void *second = COLMODEL_GET_VAL(data1, pColumnModel, count, s2, i);
|
||||||
|
|
||||||
SSchema* pSchema = &pColumnModel->pFields[i].field;
|
SSchema* pSchema = &pColumnModel->pFields[i].field;
|
||||||
tsDataSwap(first, second, pSchema->type, pSchema->bytes);
|
tsDataSwap(first, second, pSchema->type, pSchema->bytes, buf);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tColDataInsertSort(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t start, int32_t end, char *data,
|
static void tColDataInsertSort(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t start, int32_t end, char *data,
|
||||||
__col_compar_fn_t compareFn) {
|
__col_compar_fn_t compareFn, void* buf) {
|
||||||
for (int32_t i = start + 1; i <= end; ++i) {
|
for (int32_t i = start + 1; i <= end; ++i) {
|
||||||
for (int32_t j = i; j > start; --j) {
|
for (int32_t j = i; j > start; --j) {
|
||||||
if (compareFn(pDescriptor, numOfRows, j, j - 1, data) == -1) {
|
if (compareFn(pDescriptor, numOfRows, j, j - 1, data) == -1) {
|
||||||
swap(pDescriptor->pColumnModel, numOfRows, j - 1, data, j);
|
swap(pDescriptor->pColumnModel, numOfRows, j - 1, data, j, buf);
|
||||||
} else {
|
} else {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -553,7 +553,7 @@ static void UNUSED_FUNC tSortDataPrint(int32_t type, char *prefix, char *startx,
|
||||||
}
|
}
|
||||||
|
|
||||||
static void median(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t start, int32_t end, char *data,
|
static void median(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t start, int32_t end, char *data,
|
||||||
__col_compar_fn_t compareFn) {
|
__col_compar_fn_t compareFn, void* buf) {
|
||||||
int32_t midIdx = ((end - start) >> 1) + start;
|
int32_t midIdx = ((end - start) >> 1) + start;
|
||||||
|
|
||||||
#if defined(_DEBUG_VIEW)
|
#if defined(_DEBUG_VIEW)
|
||||||
|
@ -567,15 +567,16 @@ static void median(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t sta
|
||||||
tSortDataPrint(pDescriptor->pColumnModel->pFields[colIdx].field.type, "before", startx, midx, endx);
|
tSortDataPrint(pDescriptor->pColumnModel->pFields[colIdx].field.type, "before", startx, midx, endx);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
SColumnModel* pModel = pDescriptor->pColumnModel;
|
||||||
if (compareFn(pDescriptor, numOfRows, midIdx, start, data) == 1) {
|
if (compareFn(pDescriptor, numOfRows, midIdx, start, data) == 1) {
|
||||||
swap(pDescriptor->pColumnModel, numOfRows, start, data, midIdx);
|
swap(pModel, numOfRows, start, data, midIdx, buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (compareFn(pDescriptor, numOfRows, midIdx, end, data) == 1) {
|
if (compareFn(pDescriptor, numOfRows, midIdx, end, data) == 1) {
|
||||||
swap(pDescriptor->pColumnModel, numOfRows, midIdx, data, start);
|
swap(pModel, numOfRows, midIdx, data, start, buf);
|
||||||
swap(pDescriptor->pColumnModel, numOfRows, midIdx, data, end);
|
swap(pModel, numOfRows, midIdx, data, end, buf);
|
||||||
} else if (compareFn(pDescriptor, numOfRows, start, end, data) == 1) {
|
} else if (compareFn(pDescriptor, numOfRows, start, end, data) == 1) {
|
||||||
swap(pDescriptor->pColumnModel, numOfRows, start, data, end);
|
swap(pModel, numOfRows, start, data, end, buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(compareFn(pDescriptor, numOfRows, midIdx, start, data) <= 0 &&
|
assert(compareFn(pDescriptor, numOfRows, midIdx, start, data) <= 0 &&
|
||||||
|
@ -626,32 +627,20 @@ static UNUSED_FUNC void tRowModelDisplay(tOrderDescriptor *pDescriptor, int32_t
|
||||||
printf("\n");
|
printf("\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t qsort_call = 0;
|
static void columnwiseQSortImpl(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t start, int32_t end, char *data,
|
||||||
|
int32_t orderType, __col_compar_fn_t compareFn, void* buf) {
|
||||||
void tColDataQSort(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t start, int32_t end, char *data,
|
|
||||||
int32_t orderType) {
|
|
||||||
// short array sort, incur another sort procedure instead of quick sort process
|
|
||||||
__col_compar_fn_t compareFn = (orderType == TSDB_ORDER_ASC) ? compare_sa : compare_sd;
|
|
||||||
|
|
||||||
if (end - start + 1 <= 8) {
|
|
||||||
tColDataInsertSort(pDescriptor, numOfRows, start, end, data, compareFn);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef _DEBUG_VIEW
|
#ifdef _DEBUG_VIEW
|
||||||
// printf("before sort:\n");
|
printf("before sort:\n");
|
||||||
// tRowModelDisplay(pDescriptor, numOfRows, data, end - start + 1);
|
tRowModelDisplay(pDescriptor, numOfRows, data, end - start + 1);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
int32_t s = start, e = end;
|
int32_t s = start, e = end;
|
||||||
median(pDescriptor, numOfRows, start, end, data, compareFn);
|
median(pDescriptor, numOfRows, start, end, data, compareFn, buf);
|
||||||
|
|
||||||
#ifdef _DEBUG_VIEW
|
#ifdef _DEBUG_VIEW
|
||||||
// printf("%s called: %d\n", __FUNCTION__, qsort_call++);
|
// printf("%s called: %d\n", __FUNCTION__, qsort_call++);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
UNUSED(qsort_call);
|
|
||||||
|
|
||||||
int32_t end_same = end;
|
int32_t end_same = end;
|
||||||
int32_t start_same = start;
|
int32_t start_same = start;
|
||||||
|
|
||||||
|
@ -663,17 +652,17 @@ void tColDataQSort(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t sta
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ret == 0 && e != end_same) {
|
if (ret == 0 && e != end_same) {
|
||||||
swap(pDescriptor->pColumnModel, numOfRows, e, data, end_same--);
|
swap(pDescriptor->pColumnModel, numOfRows, e, data, end_same--, buf);
|
||||||
}
|
}
|
||||||
e--;
|
e--;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (e != s) {
|
if (e != s) {
|
||||||
swap(pDescriptor->pColumnModel, numOfRows, s, data, e);
|
swap(pDescriptor->pColumnModel, numOfRows, s, data, e, buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef _DEBUG_VIEW
|
#ifdef _DEBUG_VIEW
|
||||||
// tRowModelDisplay(pDescriptor, numOfRows, data, end - start + 1);
|
// tRowModelDisplay(pDescriptor, numOfRows, data, end - start + 1);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
while (s < e) {
|
while (s < e) {
|
||||||
|
@ -683,16 +672,16 @@ void tColDataQSort(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t sta
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ret == 0 && s != start_same) {
|
if (ret == 0 && s != start_same) {
|
||||||
swap(pDescriptor->pColumnModel, numOfRows, s, data, start_same++);
|
swap(pDescriptor->pColumnModel, numOfRows, s, data, start_same++, buf);
|
||||||
}
|
}
|
||||||
s++;
|
s++;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (s != e) {
|
if (s != e) {
|
||||||
swap(pDescriptor->pColumnModel, numOfRows, s, data, e);
|
swap(pDescriptor->pColumnModel, numOfRows, s, data, e, buf);
|
||||||
}
|
}
|
||||||
#ifdef _DEBUG_VIEW
|
#ifdef _DEBUG_VIEW
|
||||||
// tRowModelDisplay(pDescriptor, numOfRows, data, end - start + 1);
|
// tRowModelDisplay(pDescriptor, numOfRows, data, end - start + 1);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -702,14 +691,14 @@ void tColDataQSort(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t sta
|
||||||
int32_t right = end;
|
int32_t right = end;
|
||||||
|
|
||||||
while (right > end_same && left <= end_same) {
|
while (right > end_same && left <= end_same) {
|
||||||
swap(pDescriptor->pColumnModel, numOfRows, left++, data, right--);
|
swap(pDescriptor->pColumnModel, numOfRows, left++, data, right--, buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
// (pivotal+1) + steps of number that are identical pivotal
|
// (pivotal+1) + steps of number that are identical pivotal
|
||||||
rightx += (end - end_same);
|
rightx += (end - end_same);
|
||||||
|
|
||||||
#ifdef _DEBUG_VIEW
|
#ifdef _DEBUG_VIEW
|
||||||
// tRowModelDisplay(pDescriptor, numOfRows, data, end - start + 1);
|
// tRowModelDisplay(pDescriptor, numOfRows, data, end - start + 1);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -719,26 +708,52 @@ void tColDataQSort(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t sta
|
||||||
int32_t right = e - 1;
|
int32_t right = e - 1;
|
||||||
|
|
||||||
while (left < start_same && right >= start_same) {
|
while (left < start_same && right >= start_same) {
|
||||||
swap(pDescriptor->pColumnModel, numOfRows, left++, data, right--);
|
swap(pDescriptor->pColumnModel, numOfRows, left++, data, right--, buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
// (pivotal-1) - steps of number that are identical pivotal
|
// (pivotal-1) - steps of number that are identical pivotal
|
||||||
leftx -= (start_same - start);
|
leftx -= (start_same - start);
|
||||||
|
|
||||||
#ifdef _DEBUG_VIEW
|
#ifdef _DEBUG_VIEW
|
||||||
// tRowModelDisplay(pDescriptor, numOfRows, data, end - start + 1);
|
// tRowModelDisplay(pDescriptor, numOfRows, data, end - start + 1);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
if (leftx > start) {
|
if (leftx > start) {
|
||||||
tColDataQSort(pDescriptor, numOfRows, start, leftx, data, orderType);
|
columnwiseQSortImpl(pDescriptor, numOfRows, start, leftx, data, orderType, compareFn, buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rightx < end) {
|
if (rightx < end) {
|
||||||
tColDataQSort(pDescriptor, numOfRows, rightx, end, data, orderType);
|
columnwiseQSortImpl(pDescriptor, numOfRows, rightx, end, data, orderType, compareFn, buf);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void tColDataQSort(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t start, int32_t end, char *data, int32_t order) {
|
||||||
|
// short array sort, incur another sort procedure instead of quick sort process
|
||||||
|
__col_compar_fn_t compareFn = (order == TSDB_ORDER_ASC) ? compare_sa : compare_sd;
|
||||||
|
|
||||||
|
SColumnModel* pModel = pDescriptor->pColumnModel;
|
||||||
|
|
||||||
|
size_t width = 0;
|
||||||
|
for(int32_t i = 0; i < pModel->numOfCols; ++i) {
|
||||||
|
SSchema* pSchema = &pModel->pFields[i].field;
|
||||||
|
if (width < pSchema->bytes) {
|
||||||
|
width = pSchema->bytes;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
char* buf = malloc(width);
|
||||||
|
assert(width > 0 && buf != NULL);
|
||||||
|
|
||||||
|
if (end - start + 1 <= 8) {
|
||||||
|
tColDataInsertSort(pDescriptor, numOfRows, start, end, data, compareFn, buf);
|
||||||
|
} else {
|
||||||
|
columnwiseQSortImpl(pDescriptor, numOfRows, start, end, data, order, compareFn, buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
free(buf);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* deep copy of sschema
|
* deep copy of sschema
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -38,8 +38,11 @@ SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_
|
||||||
pFillInfo->numOfTags = numOfTags;
|
pFillInfo->numOfTags = numOfTags;
|
||||||
pFillInfo->numOfCols = numOfCols;
|
pFillInfo->numOfCols = numOfCols;
|
||||||
pFillInfo->precision = precision;
|
pFillInfo->precision = precision;
|
||||||
pFillInfo->slidingTime = slidingTime;
|
|
||||||
pFillInfo->slidingUnit = slidingUnit;
|
pFillInfo->interval.interval = slidingTime;
|
||||||
|
pFillInfo->interval.intervalUnit = slidingUnit;
|
||||||
|
pFillInfo->interval.sliding = slidingTime;
|
||||||
|
pFillInfo->interval.slidingUnit = slidingUnit;
|
||||||
|
|
||||||
pFillInfo->pData = malloc(POINTER_BYTES * numOfCols);
|
pFillInfo->pData = malloc(POINTER_BYTES * numOfCols);
|
||||||
if (numOfTags > 0) {
|
if (numOfTags > 0) {
|
||||||
|
@ -108,21 +111,15 @@ void* taosDestoryFillInfo(SFillInfo* pFillInfo) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static TSKEY taosGetRevisedEndKey(TSKEY ekey, int32_t order, int64_t timeInterval, int8_t slidingTimeUnit, int8_t precision) {
|
|
||||||
if (order == TSDB_ORDER_ASC) {
|
|
||||||
return ekey;
|
|
||||||
} else {
|
|
||||||
return taosGetIntervalStartTimestamp(ekey, timeInterval, timeInterval, slidingTimeUnit, precision);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void taosFillSetStartInfo(SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey) {
|
void taosFillSetStartInfo(SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey) {
|
||||||
if (pFillInfo->fillType == TSDB_FILL_NONE) {
|
if (pFillInfo->fillType == TSDB_FILL_NONE) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
pFillInfo->endKey = taosGetRevisedEndKey(endKey, pFillInfo->order, pFillInfo->slidingTime, pFillInfo->slidingUnit,
|
pFillInfo->endKey = endKey;
|
||||||
pFillInfo->precision);
|
if (pFillInfo->order != TSDB_ORDER_ASC) {
|
||||||
|
pFillInfo->endKey = taosTimeTruncate(endKey, &pFillInfo->interval, pFillInfo->precision);
|
||||||
|
}
|
||||||
|
|
||||||
pFillInfo->rowIdx = 0;
|
pFillInfo->rowIdx = 0;
|
||||||
pFillInfo->numOfRows = numOfRows;
|
pFillInfo->numOfRows = numOfRows;
|
||||||
|
@ -172,30 +169,34 @@ int64_t getFilledNumOfRes(SFillInfo* pFillInfo, TSKEY ekey, int32_t maxNumOfRows
|
||||||
|
|
||||||
int32_t numOfRows = taosNumOfRemainRows(pFillInfo);
|
int32_t numOfRows = taosNumOfRemainRows(pFillInfo);
|
||||||
|
|
||||||
TSKEY ekey1 = taosGetRevisedEndKey(ekey, pFillInfo->order, pFillInfo->slidingTime, pFillInfo->slidingUnit,
|
TSKEY ekey1 = ekey;
|
||||||
pFillInfo->precision);
|
if (pFillInfo->order != TSDB_ORDER_ASC) {
|
||||||
|
pFillInfo->endKey = taosTimeTruncate(ekey, &pFillInfo->interval, pFillInfo->precision);
|
||||||
|
}
|
||||||
|
|
||||||
int64_t numOfRes = -1;
|
int64_t numOfRes = -1;
|
||||||
if (numOfRows > 0) { // still fill gap within current data block, not generating data after the result set.
|
if (numOfRows > 0) { // still fill gap within current data block, not generating data after the result set.
|
||||||
TSKEY lastKey = tsList[pFillInfo->numOfRows - 1];
|
TSKEY lastKey = tsList[pFillInfo->numOfRows - 1];
|
||||||
|
numOfRes = taosTimeCountInterval(
|
||||||
if (pFillInfo->slidingUnit != 'y' && pFillInfo->slidingUnit != 'n') {
|
lastKey,
|
||||||
numOfRes = (int64_t)(ABS(lastKey - pFillInfo->start) / pFillInfo->slidingTime) + 1;
|
pFillInfo->start,
|
||||||
} else {
|
pFillInfo->interval.sliding,
|
||||||
numOfRes = taosCountNatualInterval(lastKey, pFillInfo->start, pFillInfo->slidingTime, pFillInfo->slidingUnit, pFillInfo->precision) + 1;
|
pFillInfo->interval.slidingUnit,
|
||||||
}
|
pFillInfo->precision);
|
||||||
|
numOfRes += 1;
|
||||||
assert(numOfRes >= numOfRows);
|
assert(numOfRes >= numOfRows);
|
||||||
} else { // reach the end of data
|
} else { // reach the end of data
|
||||||
if ((ekey1 < pFillInfo->start && FILL_IS_ASC_FILL(pFillInfo)) ||
|
if ((ekey1 < pFillInfo->start && FILL_IS_ASC_FILL(pFillInfo)) ||
|
||||||
(ekey1 > pFillInfo->start && !FILL_IS_ASC_FILL(pFillInfo))) {
|
(ekey1 > pFillInfo->start && !FILL_IS_ASC_FILL(pFillInfo))) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
// the numOfRes rows are all filled with specified policy
|
numOfRes = taosTimeCountInterval(
|
||||||
if (pFillInfo->slidingUnit != 'y' && pFillInfo->slidingUnit != 'n') {
|
ekey1,
|
||||||
numOfRes = (ABS(ekey1 - pFillInfo->start) / pFillInfo->slidingTime) + 1;
|
pFillInfo->start,
|
||||||
} else {
|
pFillInfo->interval.sliding,
|
||||||
numOfRes = taosCountNatualInterval(ekey1, pFillInfo->start, pFillInfo->slidingTime, pFillInfo->slidingUnit, pFillInfo->precision) + 1;
|
pFillInfo->interval.slidingUnit,
|
||||||
}
|
pFillInfo->precision);
|
||||||
|
numOfRes += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
return (numOfRes > maxNumOfRows) ? maxNumOfRows : numOfRes;
|
return (numOfRes > maxNumOfRows) ? maxNumOfRows : numOfRes;
|
||||||
|
@ -374,12 +375,7 @@ static void doFillResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t* nu
|
||||||
setTagsValue(pFillInfo, data, *num);
|
setTagsValue(pFillInfo, data, *num);
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO natual sliding time
|
pFillInfo->start = taosTimeAdd(pFillInfo->start, pFillInfo->interval.sliding * step, pFillInfo->interval.slidingUnit, pFillInfo->precision);
|
||||||
if (pFillInfo->slidingUnit != 'n' && pFillInfo->slidingUnit != 'y') {
|
|
||||||
pFillInfo->start += (pFillInfo->slidingTime * step);
|
|
||||||
} else {
|
|
||||||
pFillInfo->start = taosAddNatualInterval(pFillInfo->start, pFillInfo->slidingTime*step, pFillInfo->slidingUnit, pFillInfo->precision);
|
|
||||||
}
|
|
||||||
pFillInfo->numOfCurrent++;
|
pFillInfo->numOfCurrent++;
|
||||||
|
|
||||||
(*num) += 1;
|
(*num) += 1;
|
||||||
|
@ -486,12 +482,7 @@ int32_t generateDataBlockImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t nu
|
||||||
// set the tag value for final result
|
// set the tag value for final result
|
||||||
setTagsValue(pFillInfo, data, num);
|
setTagsValue(pFillInfo, data, num);
|
||||||
|
|
||||||
// TODO natual sliding time
|
pFillInfo->start = taosTimeAdd(pFillInfo->start, pFillInfo->interval.sliding*step, pFillInfo->interval.slidingUnit, pFillInfo->precision);
|
||||||
if (pFillInfo->slidingUnit != 'n' && pFillInfo->slidingUnit != 'y') {
|
|
||||||
pFillInfo->start += (pFillInfo->slidingTime * step);
|
|
||||||
} else {
|
|
||||||
pFillInfo->start = taosAddNatualInterval(pFillInfo->start, pFillInfo->slidingTime*step, pFillInfo->slidingUnit, pFillInfo->precision);
|
|
||||||
}
|
|
||||||
pFillInfo->rowIdx += 1;
|
pFillInfo->rowIdx += 1;
|
||||||
|
|
||||||
pFillInfo->numOfCurrent +=1;
|
pFillInfo->numOfCurrent +=1;
|
||||||
|
|
|
@ -284,6 +284,14 @@ bool nequal_nchar(SColumnFilterElem *pFilter, char* minval, char *maxval) {
|
||||||
|
|
||||||
return wcsncmp((wchar_t *)pFilter->filterInfo.pz, varDataVal(minval), varDataLen(minval)/TSDB_NCHAR_SIZE) != 0;
|
return wcsncmp((wchar_t *)pFilter->filterInfo.pz, varDataVal(minval), varDataLen(minval)/TSDB_NCHAR_SIZE) != 0;
|
||||||
}
|
}
|
||||||
|
////////////////////////////////////////////////////////////////
|
||||||
|
bool isNull_filter(SColumnFilterElem *pFilter, char* minval, char* maxval) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool notNull_filter(SColumnFilterElem *pFilter, char* minval, char* maxval) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
@ -398,6 +406,8 @@ bool (*filterFunc_i8[])(SColumnFilterElem *pFilter, char *minval, char *maxval)
|
||||||
largeEqual_i8,
|
largeEqual_i8,
|
||||||
nequal_i8,
|
nequal_i8,
|
||||||
NULL,
|
NULL,
|
||||||
|
isNull_filter,
|
||||||
|
notNull_filter,
|
||||||
};
|
};
|
||||||
|
|
||||||
bool (*filterFunc_i16[])(SColumnFilterElem *pFilter, char *minval, char *maxval) = {
|
bool (*filterFunc_i16[])(SColumnFilterElem *pFilter, char *minval, char *maxval) = {
|
||||||
|
@ -409,6 +419,8 @@ bool (*filterFunc_i16[])(SColumnFilterElem *pFilter, char *minval, char *maxval)
|
||||||
largeEqual_i16,
|
largeEqual_i16,
|
||||||
nequal_i16,
|
nequal_i16,
|
||||||
NULL,
|
NULL,
|
||||||
|
isNull_filter,
|
||||||
|
notNull_filter,
|
||||||
};
|
};
|
||||||
|
|
||||||
bool (*filterFunc_i32[])(SColumnFilterElem *pFilter, char *minval, char *maxval) = {
|
bool (*filterFunc_i32[])(SColumnFilterElem *pFilter, char *minval, char *maxval) = {
|
||||||
|
@ -420,6 +432,8 @@ bool (*filterFunc_i32[])(SColumnFilterElem *pFilter, char *minval, char *maxval)
|
||||||
largeEqual_i32,
|
largeEqual_i32,
|
||||||
nequal_i32,
|
nequal_i32,
|
||||||
NULL,
|
NULL,
|
||||||
|
isNull_filter,
|
||||||
|
notNull_filter,
|
||||||
};
|
};
|
||||||
|
|
||||||
bool (*filterFunc_i64[])(SColumnFilterElem *pFilter, char *minval, char *maxval) = {
|
bool (*filterFunc_i64[])(SColumnFilterElem *pFilter, char *minval, char *maxval) = {
|
||||||
|
@ -431,6 +445,8 @@ bool (*filterFunc_i64[])(SColumnFilterElem *pFilter, char *minval, char *maxval)
|
||||||
largeEqual_i64,
|
largeEqual_i64,
|
||||||
nequal_i64,
|
nequal_i64,
|
||||||
NULL,
|
NULL,
|
||||||
|
isNull_filter,
|
||||||
|
notNull_filter,
|
||||||
};
|
};
|
||||||
|
|
||||||
bool (*filterFunc_ds[])(SColumnFilterElem *pFilter, char *minval, char *maxval) = {
|
bool (*filterFunc_ds[])(SColumnFilterElem *pFilter, char *minval, char *maxval) = {
|
||||||
|
@ -442,6 +458,8 @@ bool (*filterFunc_ds[])(SColumnFilterElem *pFilter, char *minval, char *maxval)
|
||||||
largeEqual_ds,
|
largeEqual_ds,
|
||||||
nequal_ds,
|
nequal_ds,
|
||||||
NULL,
|
NULL,
|
||||||
|
isNull_filter,
|
||||||
|
notNull_filter,
|
||||||
};
|
};
|
||||||
|
|
||||||
bool (*filterFunc_dd[])(SColumnFilterElem *pFilter, char *minval, char *maxval) = {
|
bool (*filterFunc_dd[])(SColumnFilterElem *pFilter, char *minval, char *maxval) = {
|
||||||
|
@ -453,6 +471,8 @@ bool (*filterFunc_dd[])(SColumnFilterElem *pFilter, char *minval, char *maxval)
|
||||||
largeEqual_dd,
|
largeEqual_dd,
|
||||||
nequal_dd,
|
nequal_dd,
|
||||||
NULL,
|
NULL,
|
||||||
|
isNull_filter,
|
||||||
|
notNull_filter,
|
||||||
};
|
};
|
||||||
|
|
||||||
bool (*filterFunc_str[])(SColumnFilterElem* pFilter, char* minval, char *maxval) = {
|
bool (*filterFunc_str[])(SColumnFilterElem* pFilter, char* minval, char *maxval) = {
|
||||||
|
@ -464,6 +484,8 @@ bool (*filterFunc_str[])(SColumnFilterElem* pFilter, char* minval, char *maxval)
|
||||||
NULL,
|
NULL,
|
||||||
nequal_str,
|
nequal_str,
|
||||||
like_str,
|
like_str,
|
||||||
|
isNull_filter,
|
||||||
|
notNull_filter,
|
||||||
};
|
};
|
||||||
|
|
||||||
bool (*filterFunc_nchar[])(SColumnFilterElem* pFitler, char* minval, char* maxval) = {
|
bool (*filterFunc_nchar[])(SColumnFilterElem* pFitler, char* minval, char* maxval) = {
|
||||||
|
@ -475,6 +497,8 @@ bool (*filterFunc_nchar[])(SColumnFilterElem* pFitler, char* minval, char* maxva
|
||||||
NULL,
|
NULL,
|
||||||
nequal_nchar,
|
nequal_nchar,
|
||||||
like_nchar,
|
like_nchar,
|
||||||
|
isNull_filter,
|
||||||
|
notNull_filter,
|
||||||
};
|
};
|
||||||
|
|
||||||
bool (*rangeFilterFunc_i8[])(SColumnFilterElem *pFilter, char *minval, char *maxval) = {
|
bool (*rangeFilterFunc_i8[])(SColumnFilterElem *pFilter, char *minval, char *maxval) = {
|
||||||
|
|
|
@ -135,7 +135,7 @@ tSQLExpr *tSQLExprIdValueCreate(SStrToken *pToken, int32_t optrType) {
|
||||||
pSQLExpr->val.nType = TSDB_DATA_TYPE_BIGINT;
|
pSQLExpr->val.nType = TSDB_DATA_TYPE_BIGINT;
|
||||||
pSQLExpr->nSQLOptr = TK_TIMESTAMP; // TK_TIMESTAMP used to denote the time value is in microsecond
|
pSQLExpr->nSQLOptr = TK_TIMESTAMP; // TK_TIMESTAMP used to denote the time value is in microsecond
|
||||||
} else if (optrType == TK_VARIABLE) {
|
} else if (optrType == TK_VARIABLE) {
|
||||||
int32_t ret = getTimestampInUsFromStr(pToken->z, pToken->n, &pSQLExpr->val.i64Key);
|
int32_t ret = parseAbsoluteDuration(pToken->z, pToken->n, &pSQLExpr->val.i64Key);
|
||||||
UNUSED(ret);
|
UNUSED(ret);
|
||||||
|
|
||||||
pSQLExpr->val.nType = TSDB_DATA_TYPE_BIGINT;
|
pSQLExpr->val.nType = TSDB_DATA_TYPE_BIGINT;
|
||||||
|
@ -179,7 +179,7 @@ tSQLExpr *tSQLExprCreateFunction(tSQLExprList *pList, SStrToken *pFuncToken, SSt
|
||||||
tSQLExpr *tSQLExprCreate(tSQLExpr *pLeft, tSQLExpr *pRight, int32_t optrType) {
|
tSQLExpr *tSQLExprCreate(tSQLExpr *pLeft, tSQLExpr *pRight, int32_t optrType) {
|
||||||
tSQLExpr *pExpr = calloc(1, sizeof(tSQLExpr));
|
tSQLExpr *pExpr = calloc(1, sizeof(tSQLExpr));
|
||||||
|
|
||||||
if (pRight != NULL && pLeft != NULL) {
|
if (pLeft != NULL && pRight != NULL && (optrType != TK_IN)) {
|
||||||
char* endPos = pRight->token.z + pRight->token.n;
|
char* endPos = pRight->token.z + pRight->token.n;
|
||||||
pExpr->token.z = pLeft->token.z;
|
pExpr->token.z = pLeft->token.z;
|
||||||
pExpr->token.n = (uint32_t)(endPos - pExpr->token.z);
|
pExpr->token.n = (uint32_t)(endPos - pExpr->token.z);
|
||||||
|
@ -275,6 +275,11 @@ tSQLExpr *tSQLExprCreate(tSQLExpr *pLeft, tSQLExpr *pRight, int32_t optrType) {
|
||||||
} else {
|
} else {
|
||||||
pExpr->nSQLOptr = optrType;
|
pExpr->nSQLOptr = optrType;
|
||||||
pExpr->pLeft = pLeft;
|
pExpr->pLeft = pLeft;
|
||||||
|
|
||||||
|
if (pRight == NULL) {
|
||||||
|
pRight = calloc(1, sizeof(tSQLExpr));
|
||||||
|
}
|
||||||
|
|
||||||
pExpr->pRight = pRight;
|
pExpr->pRight = pRight;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -438,44 +443,6 @@ void setDBName(SStrToken *pCpxName, SStrToken *pDB) {
|
||||||
pCpxName->n = pDB->n;
|
pCpxName->n = pDB->n;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t getTimestampInUsFromStrImpl(int64_t val, char unit, int64_t *result) {
|
|
||||||
*result = val;
|
|
||||||
|
|
||||||
switch (unit) {
|
|
||||||
case 's':
|
|
||||||
(*result) *= MILLISECOND_PER_SECOND;
|
|
||||||
break;
|
|
||||||
case 'm':
|
|
||||||
(*result) *= MILLISECOND_PER_MINUTE;
|
|
||||||
break;
|
|
||||||
case 'h':
|
|
||||||
(*result) *= MILLISECOND_PER_HOUR;
|
|
||||||
break;
|
|
||||||
case 'd':
|
|
||||||
(*result) *= MILLISECOND_PER_DAY;
|
|
||||||
break;
|
|
||||||
case 'w':
|
|
||||||
(*result) *= MILLISECOND_PER_WEEK;
|
|
||||||
break;
|
|
||||||
case 'n':
|
|
||||||
(*result) *= MILLISECOND_PER_MONTH;
|
|
||||||
break;
|
|
||||||
case 'y':
|
|
||||||
(*result) *= MILLISECOND_PER_YEAR;
|
|
||||||
break;
|
|
||||||
case 'a':
|
|
||||||
break;
|
|
||||||
default: {
|
|
||||||
;
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* get the value in microsecond */
|
|
||||||
(*result) *= 1000L;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void tSQLSetColumnInfo(TAOS_FIELD *pField, SStrToken *pName, TAOS_FIELD *pType) {
|
void tSQLSetColumnInfo(TAOS_FIELD *pField, SStrToken *pName, TAOS_FIELD *pType) {
|
||||||
int32_t maxLen = sizeof(pField->name) / sizeof(pField->name[0]);
|
int32_t maxLen = sizeof(pField->name) / sizeof(pField->name[0]);
|
||||||
|
|
||||||
|
@ -530,7 +497,7 @@ void tSQLSetColumnType(TAOS_FIELD *pField, SStrToken *type) {
|
||||||
* extract the select info out of sql string
|
* extract the select info out of sql string
|
||||||
*/
|
*/
|
||||||
SQuerySQL *tSetQuerySQLElems(SStrToken *pSelectToken, tSQLExprList *pSelection, tVariantList *pFrom, tSQLExpr *pWhere,
|
SQuerySQL *tSetQuerySQLElems(SStrToken *pSelectToken, tSQLExprList *pSelection, tVariantList *pFrom, tSQLExpr *pWhere,
|
||||||
tVariantList *pGroupby, tVariantList *pSortOrder, SStrToken *pInterval,
|
tVariantList *pGroupby, tVariantList *pSortOrder, SIntervalVal *pInterval,
|
||||||
SStrToken *pSliding, tVariantList *pFill, SLimitVal *pLimit, SLimitVal *pGLimit) {
|
SStrToken *pSliding, tVariantList *pFill, SLimitVal *pLimit, SLimitVal *pGLimit) {
|
||||||
assert(pSelection != NULL);
|
assert(pSelection != NULL);
|
||||||
|
|
||||||
|
@ -553,7 +520,8 @@ SQuerySQL *tSetQuerySQLElems(SStrToken *pSelectToken, tSQLExprList *pSelection,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pInterval != NULL) {
|
if (pInterval != NULL) {
|
||||||
pQuery->interval = *pInterval;
|
pQuery->interval = pInterval->interval;
|
||||||
|
pQuery->offset = pInterval->offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pSliding != NULL) {
|
if (pSliding != NULL) {
|
||||||
|
|
|
@ -70,6 +70,33 @@ static void resetBoundingBox(MinMaxEntry* range, int32_t type) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int32_t setBoundingBox(MinMaxEntry* range, int16_t type, double minval, double maxval) {
|
||||||
|
if (minval > maxval) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
switch(type) {
|
||||||
|
case TSDB_DATA_TYPE_TINYINT:
|
||||||
|
case TSDB_DATA_TYPE_SMALLINT:
|
||||||
|
case TSDB_DATA_TYPE_INT:
|
||||||
|
range->iMinVal = (int32_t) minval;
|
||||||
|
range->iMaxVal = (int32_t) maxval;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case TSDB_DATA_TYPE_BIGINT:
|
||||||
|
range->i64MinVal = (int64_t) minval;
|
||||||
|
range->i64MaxVal = (int64_t) maxval;
|
||||||
|
break;
|
||||||
|
case TSDB_DATA_TYPE_FLOAT:
|
||||||
|
case TSDB_DATA_TYPE_DOUBLE:
|
||||||
|
range->dMinVal = minval;
|
||||||
|
range->dMaxVal = maxval;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void resetPosInfo(SSlotInfo* pInfo) {
|
static void resetPosInfo(SSlotInfo* pInfo) {
|
||||||
pInfo->size = 0;
|
pInfo->size = 0;
|
||||||
pInfo->pageId = -1;
|
pInfo->pageId = -1;
|
||||||
|
@ -135,6 +162,11 @@ int32_t tBucketBigIntHash(tMemBucket *pBucket, const void *value) {
|
||||||
|
|
||||||
return index;
|
return index;
|
||||||
} else {
|
} else {
|
||||||
|
// out of range
|
||||||
|
if (v < pBucket->range.i64MinVal || v > pBucket->range.i64MaxVal) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
// todo hash for bigint and float and double
|
// todo hash for bigint and float and double
|
||||||
int64_t span = pBucket->range.i64MaxVal - pBucket->range.i64MinVal;
|
int64_t span = pBucket->range.i64MaxVal - pBucket->range.i64MinVal;
|
||||||
if (span < pBucket->numOfSlots) {
|
if (span < pBucket->numOfSlots) {
|
||||||
|
@ -179,6 +211,11 @@ int32_t tBucketIntHash(tMemBucket *pBucket, const void *value) {
|
||||||
|
|
||||||
return index;
|
return index;
|
||||||
} else {
|
} else {
|
||||||
|
// out of range
|
||||||
|
if (v < pBucket->range.iMinVal || v > pBucket->range.iMaxVal) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
// divide a range of [iMinVal, iMaxVal] into 1024 buckets
|
// divide a range of [iMinVal, iMaxVal] into 1024 buckets
|
||||||
int32_t span = pBucket->range.iMaxVal - pBucket->range.iMinVal;
|
int32_t span = pBucket->range.iMaxVal - pBucket->range.iMinVal;
|
||||||
if (span < pBucket->numOfSlots) {
|
if (span < pBucket->numOfSlots) {
|
||||||
|
@ -209,6 +246,12 @@ int32_t tBucketDoubleHash(tMemBucket *pBucket, const void *value) {
|
||||||
double posx = (v + DBL_MAX) / x;
|
double posx = (v + DBL_MAX) / x;
|
||||||
return ((int32_t)posx) % pBucket->numOfSlots;
|
return ((int32_t)posx) % pBucket->numOfSlots;
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
|
// out of range
|
||||||
|
if (v < pBucket->range.dMinVal || v > pBucket->range.dMaxVal) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
// divide a range of [dMinVal, dMaxVal] into 1024 buckets
|
// divide a range of [dMinVal, dMaxVal] into 1024 buckets
|
||||||
double span = pBucket->range.dMaxVal - pBucket->range.dMinVal;
|
double span = pBucket->range.dMaxVal - pBucket->range.dMinVal;
|
||||||
if (span < pBucket->numOfSlots) {
|
if (span < pBucket->numOfSlots) {
|
||||||
|
@ -262,7 +305,7 @@ static void resetSlotInfo(tMemBucket* pBucket) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType) {
|
tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType, double minval, double maxval) {
|
||||||
tMemBucket *pBucket = (tMemBucket *)calloc(1, sizeof(tMemBucket));
|
tMemBucket *pBucket = (tMemBucket *)calloc(1, sizeof(tMemBucket));
|
||||||
if (pBucket == NULL) {
|
if (pBucket == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -278,9 +321,14 @@ tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType) {
|
||||||
|
|
||||||
pBucket->maxCapacity = 200000;
|
pBucket->maxCapacity = 200000;
|
||||||
|
|
||||||
|
if (setBoundingBox(&pBucket->range, pBucket->type, minval, maxval) != 0) {
|
||||||
|
uError("MemBucket:%p, invalid value range: %f-%f", pBucket, minval, maxval);
|
||||||
|
free(pBucket);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
pBucket->elemPerPage = (pBucket->bufPageSize - sizeof(tFilePage))/pBucket->bytes;
|
pBucket->elemPerPage = (pBucket->bufPageSize - sizeof(tFilePage))/pBucket->bytes;
|
||||||
pBucket->comparFn = getKeyComparFunc(pBucket->type);
|
pBucket->comparFn = getKeyComparFunc(pBucket->type);
|
||||||
resetBoundingBox(&pBucket->range, pBucket->type);
|
|
||||||
|
|
||||||
pBucket->hashFunc = getHashFunc(pBucket->type);
|
pBucket->hashFunc = getHashFunc(pBucket->type);
|
||||||
if (pBucket->hashFunc == NULL) {
|
if (pBucket->hashFunc == NULL) {
|
||||||
|
@ -395,23 +443,25 @@ void tMemBucketUpdateBoundingBox(MinMaxEntry *r, char *data, int32_t dataType) {
|
||||||
/*
|
/*
|
||||||
* in memory bucket, we only accept data array list
|
* in memory bucket, we only accept data array list
|
||||||
*/
|
*/
|
||||||
void tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size) {
|
int32_t tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size) {
|
||||||
assert(pBucket != NULL && data != NULL && size > 0);
|
assert(pBucket != NULL && data != NULL && size > 0);
|
||||||
|
|
||||||
pBucket->total += (int32_t)size;
|
pBucket->total += (int32_t)size;
|
||||||
|
|
||||||
int32_t bytes = pBucket->bytes;
|
int32_t bytes = pBucket->bytes;
|
||||||
|
|
||||||
for (int32_t i = 0; i < size; ++i) {
|
for (int32_t i = 0; i < size; ++i) {
|
||||||
char *d = (char *) data + i * bytes;
|
char *d = (char *) data + i * bytes;
|
||||||
|
|
||||||
int32_t slotIdx = (pBucket->hashFunc)(pBucket, d);
|
int32_t index = (pBucket->hashFunc)(pBucket, d);
|
||||||
assert(slotIdx >= 0);
|
if (index == -1) { // the value is out of range, do not add it into bucket
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
tMemBucketSlot *pSlot = &pBucket->pSlots[slotIdx];
|
tMemBucketSlot *pSlot = &pBucket->pSlots[index];
|
||||||
tMemBucketUpdateBoundingBox(&pSlot->range, d, pBucket->type);
|
tMemBucketUpdateBoundingBox(&pSlot->range, d, pBucket->type);
|
||||||
|
|
||||||
// ensure available memory pages to allocate
|
// ensure available memory pages to allocate
|
||||||
int32_t groupId = getGroupId(pBucket->numOfSlots, slotIdx, pBucket->times);
|
int32_t groupId = getGroupId(pBucket->numOfSlots, index, pBucket->times);
|
||||||
int32_t pageId = -1;
|
int32_t pageId = -1;
|
||||||
|
|
||||||
if (pSlot->info.data == NULL || pSlot->info.data->num >= pBucket->elemPerPage) {
|
if (pSlot->info.data == NULL || pSlot->info.data->num >= pBucket->elemPerPage) {
|
||||||
|
@ -432,10 +482,12 @@ void tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size) {
|
||||||
pSlot->info.data->num += 1;
|
pSlot->info.data->num += 1;
|
||||||
pSlot->info.size += 1;
|
pSlot->info.size += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
static void findMaxMinValue(tMemBucket *pMemBucket, double *maxVal, double *minVal) {
|
static UNUSED_FUNC void findMaxMinValue(tMemBucket *pMemBucket, double *maxVal, double *minVal) {
|
||||||
*minVal = DBL_MAX;
|
*minVal = DBL_MAX;
|
||||||
*maxVal = -DBL_MAX;
|
*maxVal = -DBL_MAX;
|
||||||
|
|
||||||
|
@ -681,16 +733,29 @@ double getPercentile(tMemBucket *pMemBucket, double percent) {
|
||||||
|
|
||||||
// find the min/max value, no need to scan all data in bucket
|
// find the min/max value, no need to scan all data in bucket
|
||||||
if (fabs(percent - 100.0) < DBL_EPSILON || (percent < DBL_EPSILON)) {
|
if (fabs(percent - 100.0) < DBL_EPSILON || (percent < DBL_EPSILON)) {
|
||||||
double minx = 0, maxx = 0;
|
MinMaxEntry* pRange = &pMemBucket->range;
|
||||||
findMaxMinValue(pMemBucket, &maxx, &minx);
|
|
||||||
|
|
||||||
return fabs(percent - 100) < DBL_EPSILON ? maxx : minx;
|
switch(pMemBucket->type) {
|
||||||
|
case TSDB_DATA_TYPE_TINYINT:
|
||||||
|
case TSDB_DATA_TYPE_SMALLINT:
|
||||||
|
case TSDB_DATA_TYPE_INT:
|
||||||
|
return fabs(percent - 100) < DBL_EPSILON? pRange->iMaxVal:pRange->iMinVal;
|
||||||
|
case TSDB_DATA_TYPE_BIGINT: {
|
||||||
|
double v = (double)(fabs(percent - 100) < DBL_EPSILON ? pRange->i64MaxVal : pRange->i64MinVal);
|
||||||
|
return v;
|
||||||
|
}
|
||||||
|
case TSDB_DATA_TYPE_FLOAT:
|
||||||
|
case TSDB_DATA_TYPE_DOUBLE:
|
||||||
|
return fabs(percent - 100) < DBL_EPSILON? pRange->dMaxVal:pRange->dMinVal;
|
||||||
|
default:
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
double percentVal = (percent * (pMemBucket->total - 1)) / ((double)100.0);
|
double percentVal = (percent * (pMemBucket->total - 1)) / ((double)100.0);
|
||||||
int32_t orderIdx = (int32_t)percentVal;
|
|
||||||
|
|
||||||
// do put data by using buckets
|
// do put data by using buckets
|
||||||
|
int32_t orderIdx = (int32_t)percentVal;
|
||||||
return getPercentileImpl(pMemBucket, orderIdx, percentVal - orderIdx);
|
return getPercentileImpl(pMemBucket, orderIdx, percentVal - orderIdx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -54,7 +54,7 @@ int32_t initWindowResInfo(SWindowResInfo *pWindowResInfo, SQueryRuntimeEnv *pRun
|
||||||
return TSDB_CODE_QRY_OUT_OF_MEMORY;
|
return TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
pWindowResInfo->interval = pRuntimeEnv->pQuery->intervalTime;
|
pWindowResInfo->interval = pRuntimeEnv->pQuery->interval.interval;
|
||||||
|
|
||||||
pSummary->internalSupSize += sizeof(SWindowResult) * threshold;
|
pSummary->internalSupSize += sizeof(SWindowResult) * threshold;
|
||||||
pSummary->internalSupSize += (pRuntimeEnv->pQuery->numOfOutput * sizeof(SResultInfo) + pRuntimeEnv->interBufSize) * pWindowResInfo->capacity;
|
pSummary->internalSupSize += (pRuntimeEnv->pQuery->numOfOutput * sizeof(SResultInfo) + pRuntimeEnv->interBufSize) * pWindowResInfo->capacity;
|
||||||
|
@ -127,10 +127,25 @@ void clearFirstNTimeWindow(SQueryRuntimeEnv *pRuntimeEnv, int32_t num) {
|
||||||
int32_t numOfClosed = numOfClosedTimeWindow(pWindowResInfo);
|
int32_t numOfClosed = numOfClosedTimeWindow(pWindowResInfo);
|
||||||
assert(num >= 0 && num <= numOfClosed);
|
assert(num >= 0 && num <= numOfClosed);
|
||||||
|
|
||||||
|
int16_t type = pWindowResInfo->type;
|
||||||
|
|
||||||
|
char *key = NULL;
|
||||||
|
int16_t bytes = -1;
|
||||||
|
|
||||||
for (int32_t i = 0; i < num; ++i) {
|
for (int32_t i = 0; i < num; ++i) {
|
||||||
SWindowResult *pResult = &pWindowResInfo->pResult[i];
|
SWindowResult *pResult = &pWindowResInfo->pResult[i];
|
||||||
if (pResult->closed) { // remove the window slot from hash table
|
if (pResult->closed) { // remove the window slot from hash table
|
||||||
taosHashRemove(pWindowResInfo->hashList, (const char *)&pResult->skey, pWindowResInfo->type);
|
|
||||||
|
// todo refactor
|
||||||
|
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
|
||||||
|
key = varDataVal(pResult->key);
|
||||||
|
bytes = varDataLen(pResult->key);
|
||||||
|
} else {
|
||||||
|
key = (char*) &pResult->win.skey;
|
||||||
|
bytes = tDataTypeDesc[pWindowResInfo->type].nSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
taosHashRemove(pWindowResInfo->hashList, (const char *)key, bytes);
|
||||||
} else {
|
} else {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -150,15 +165,24 @@ void clearFirstNTimeWindow(SQueryRuntimeEnv *pRuntimeEnv, int32_t num) {
|
||||||
}
|
}
|
||||||
|
|
||||||
pWindowResInfo->size = remain;
|
pWindowResInfo->size = remain;
|
||||||
|
|
||||||
for (int32_t k = 0; k < pWindowResInfo->size; ++k) {
|
for (int32_t k = 0; k < pWindowResInfo->size; ++k) {
|
||||||
SWindowResult *pResult = &pWindowResInfo->pResult[k];
|
SWindowResult *pResult = &pWindowResInfo->pResult[k];
|
||||||
int32_t *p = (int32_t *)taosHashGet(pWindowResInfo->hashList, (const char *)&pResult->skey,
|
|
||||||
tDataTypeDesc[pWindowResInfo->type].nSize);
|
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
|
||||||
|
key = varDataVal(pResult->key);
|
||||||
|
bytes = varDataLen(pResult->key);
|
||||||
|
} else {
|
||||||
|
key = (char*) &pResult->win.skey;
|
||||||
|
bytes = tDataTypeDesc[pWindowResInfo->type].nSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t *p = (int32_t *)taosHashGet(pWindowResInfo->hashList, (const char *)key, bytes);
|
||||||
assert(p != NULL);
|
assert(p != NULL);
|
||||||
|
|
||||||
int32_t v = (*p - num);
|
int32_t v = (*p - num);
|
||||||
assert(v >= 0 && v <= pWindowResInfo->size);
|
assert(v >= 0 && v <= pWindowResInfo->size);
|
||||||
taosHashPut(pWindowResInfo->hashList, (char *)&pResult->skey, tDataTypeDesc[pWindowResInfo->type].nSize,
|
taosHashPut(pWindowResInfo->hashList, (char *)key, bytes, (char *)&v, sizeof(int32_t));
|
||||||
(char *)&v, sizeof(int32_t));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pWindowResInfo->curIndex = -1;
|
pWindowResInfo->curIndex = -1;
|
||||||
|
@ -207,20 +231,19 @@ void removeRedundantWindow(SWindowResInfo *pWindowResInfo, TSKEY lastKey, int32_
|
||||||
}
|
}
|
||||||
|
|
||||||
// get the result order
|
// get the result order
|
||||||
int32_t resultOrder = (pWindowResInfo->pResult[0].skey < pWindowResInfo->pResult[1].skey)? 1:-1;
|
int32_t resultOrder = (pWindowResInfo->pResult[0].win.skey < pWindowResInfo->pResult[1].win.skey)? 1:-1;
|
||||||
|
|
||||||
if (order != resultOrder) {
|
if (order != resultOrder) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t i = 0;
|
int32_t i = 0;
|
||||||
if (order == QUERY_ASC_FORWARD_STEP) {
|
if (order == QUERY_ASC_FORWARD_STEP) {
|
||||||
TSKEY ekey = pWindowResInfo->pResult[i].skey + pWindowResInfo->interval;
|
TSKEY ekey = pWindowResInfo->pResult[i].win.ekey;
|
||||||
while (i < pWindowResInfo->size && (ekey < lastKey)) {
|
while (i < pWindowResInfo->size && (ekey < lastKey)) {
|
||||||
++i;
|
++i;
|
||||||
}
|
}
|
||||||
} else if (order == QUERY_DESC_FORWARD_STEP) {
|
} else if (order == QUERY_DESC_FORWARD_STEP) {
|
||||||
while (i < pWindowResInfo->size && (pWindowResInfo->pResult[i].skey > lastKey)) {
|
while (i < pWindowResInfo->size && (pWindowResInfo->pResult[i].win.skey > lastKey)) {
|
||||||
++i;
|
++i;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -258,7 +281,7 @@ void clearTimeWindowResBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pWindow
|
||||||
pWindowRes->numOfRows = 0;
|
pWindowRes->numOfRows = 0;
|
||||||
pWindowRes->pos = (SPosInfo){-1, -1};
|
pWindowRes->pos = (SPosInfo){-1, -1};
|
||||||
pWindowRes->closed = false;
|
pWindowRes->closed = false;
|
||||||
pWindowRes->skey = TSKEY_INITIAL_VAL;
|
pWindowRes->win = TSWINDOW_INITIALIZER;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -268,7 +291,7 @@ void clearTimeWindowResBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pWindow
|
||||||
*/
|
*/
|
||||||
void copyTimeWindowResBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *dst, const SWindowResult *src) {
|
void copyTimeWindowResBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *dst, const SWindowResult *src) {
|
||||||
dst->numOfRows = src->numOfRows;
|
dst->numOfRows = src->numOfRows;
|
||||||
dst->skey = src->skey;
|
dst->win = src->win;
|
||||||
dst->closed = src->closed;
|
dst->closed = src->closed;
|
||||||
|
|
||||||
int32_t nOutputCols = pRuntimeEnv->pQuery->numOfOutput;
|
int32_t nOutputCols = pRuntimeEnv->pQuery->numOfOutput;
|
||||||
|
|
2277
src/query/src/sql.c
2277
src/query/src/sql.c
File diff suppressed because it is too large
Load Diff
|
@ -511,9 +511,9 @@ static SSyncPeer *syncAddPeer(SSyncNode *pNode, const SNodeInfo *pInfo) {
|
||||||
sInfo("%s, it is configured", pPeer->id);
|
sInfo("%s, it is configured", pPeer->id);
|
||||||
int ret = strcmp(pPeer->fqdn, tsNodeFqdn);
|
int ret = strcmp(pPeer->fqdn, tsNodeFqdn);
|
||||||
if (pPeer->nodeId == 0 || (ret > 0) || (ret == 0 && pPeer->port > tsSyncPort)) {
|
if (pPeer->nodeId == 0 || (ret > 0) || (ret == 0 && pPeer->port > tsSyncPort)) {
|
||||||
sDebug("%s, start to check peer connection", pPeer->id);
|
|
||||||
int32_t checkMs = 100 + (pNode->vgId * 10) % 100;
|
int32_t checkMs = 100 + (pNode->vgId * 10) % 100;
|
||||||
if (pNode->vgId) checkMs = tsStatusInterval * 2000 + 100;
|
if (pNode->vgId > 1) checkMs = tsStatusInterval * 2000 + checkMs;
|
||||||
|
sDebug("%s, start to check peer connection after %d ms", pPeer->id, checkMs);
|
||||||
taosTmrReset(syncCheckPeerConnection, checkMs, pPeer, syncTmrCtrl, &pPeer->timer);
|
taosTmrReset(syncCheckPeerConnection, checkMs, pPeer, syncTmrCtrl, &pPeer->timer);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -248,6 +248,7 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab
|
||||||
STsdbMeta* pMeta = tsdbGetMeta(tsdb);
|
STsdbMeta* pMeta = tsdbGetMeta(tsdb);
|
||||||
assert(pMeta != NULL && sizeOfGroup >= 1 && pCond != NULL && pCond->numOfCols > 0);
|
assert(pMeta != NULL && sizeOfGroup >= 1 && pCond != NULL && pCond->numOfCols > 0);
|
||||||
|
|
||||||
|
// todo apply the lastkey of table check to avoid to load header file
|
||||||
for (int32_t i = 0; i < sizeOfGroup; ++i) {
|
for (int32_t i = 0; i < sizeOfGroup; ++i) {
|
||||||
SArray* group = *(SArray**) taosArrayGet(groupList->pGroupList, i);
|
SArray* group = *(SArray**) taosArrayGet(groupList->pGroupList, i);
|
||||||
|
|
||||||
|
@ -388,9 +389,9 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh
|
||||||
SDataRow row = *(SDataRow *)SL_GET_NODE_DATA(node);
|
SDataRow row = *(SDataRow *)SL_GET_NODE_DATA(node);
|
||||||
TSKEY key = dataRowKey(row); // first timestamp in buffer
|
TSKEY key = dataRowKey(row); // first timestamp in buffer
|
||||||
tsdbDebug("%p uid:%" PRId64 ", tid:%d check data in mem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64
|
tsdbDebug("%p uid:%" PRId64 ", tid:%d check data in mem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64
|
||||||
"-%" PRId64 ", lastKey:%" PRId64 ", %p",
|
"-%" PRId64 ", lastKey:%" PRId64 ", numOfRows:%"PRId64", %p",
|
||||||
pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, key, order, pMem->keyFirst, pMem->keyLast,
|
pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, key, order, pMem->keyFirst, pMem->keyLast,
|
||||||
pCheckInfo->lastKey, pHandle->qinfo);
|
pCheckInfo->lastKey, pMem->numOfRows, pHandle->qinfo);
|
||||||
|
|
||||||
if (ASCENDING_TRAVERSE(order)) {
|
if (ASCENDING_TRAVERSE(order)) {
|
||||||
assert(pCheckInfo->lastKey <= key);
|
assert(pCheckInfo->lastKey <= key);
|
||||||
|
@ -410,9 +411,9 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh
|
||||||
SDataRow row = *(SDataRow *)SL_GET_NODE_DATA(node);
|
SDataRow row = *(SDataRow *)SL_GET_NODE_DATA(node);
|
||||||
TSKEY key = dataRowKey(row); // first timestamp in buffer
|
TSKEY key = dataRowKey(row); // first timestamp in buffer
|
||||||
tsdbDebug("%p uid:%" PRId64 ", tid:%d check data in imem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64
|
tsdbDebug("%p uid:%" PRId64 ", tid:%d check data in imem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64
|
||||||
"-%" PRId64 ", lastKey:%" PRId64 ", %p",
|
"-%" PRId64 ", lastKey:%" PRId64 ", numOfRows:%"PRId64", %p",
|
||||||
pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, key, order, pIMem->keyFirst, pIMem->keyLast,
|
pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, key, order, pIMem->keyFirst, pIMem->keyLast,
|
||||||
pCheckInfo->lastKey, pHandle->qinfo);
|
pCheckInfo->lastKey, pIMem->numOfRows, pHandle->qinfo);
|
||||||
|
|
||||||
if (ASCENDING_TRAVERSE(order)) {
|
if (ASCENDING_TRAVERSE(order)) {
|
||||||
assert(pCheckInfo->lastKey <= key);
|
assert(pCheckInfo->lastKey <= key);
|
||||||
|
@ -734,6 +735,7 @@ static int32_t doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* p
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int32_t getEndPosInDataBlock(STsdbQueryHandle* pQueryHandle, SDataBlockInfo* pBlockInfo);
|
||||||
static int32_t doCopyRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, int32_t capacity, int32_t numOfRows, int32_t start, int32_t end);
|
static int32_t doCopyRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, int32_t capacity, int32_t numOfRows, int32_t start, int32_t end);
|
||||||
static void moveDataToFront(STsdbQueryHandle* pQueryHandle, int32_t numOfRows, int32_t numOfCols);
|
static void moveDataToFront(STsdbQueryHandle* pQueryHandle, int32_t numOfRows, int32_t numOfCols);
|
||||||
static void doCheckGeneratedBlockRange(STsdbQueryHandle* pQueryHandle);
|
static void doCheckGeneratedBlockRange(STsdbQueryHandle* pQueryHandle);
|
||||||
|
@ -790,9 +792,10 @@ static int32_t handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SCompBloc
|
||||||
* Here the buffer is not enough, so only part of file block can be loaded into memory buffer
|
* Here the buffer is not enough, so only part of file block can be loaded into memory buffer
|
||||||
*/
|
*/
|
||||||
assert(pQueryHandle->outputCapacity >= binfo.rows);
|
assert(pQueryHandle->outputCapacity >= binfo.rows);
|
||||||
|
int32_t endPos = getEndPosInDataBlock(pQueryHandle, &binfo);
|
||||||
|
|
||||||
if ((cur->pos == 0 && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
|
if ((cur->pos == 0 && endPos == binfo.rows -1 && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
|
||||||
(cur->pos == (binfo.rows - 1) && (!ASCENDING_TRAVERSE(pQueryHandle->order)))) {
|
(cur->pos == (binfo.rows - 1) && endPos == 0 && (!ASCENDING_TRAVERSE(pQueryHandle->order)))) {
|
||||||
pQueryHandle->realNumOfRows = binfo.rows;
|
pQueryHandle->realNumOfRows = binfo.rows;
|
||||||
|
|
||||||
cur->rows = binfo.rows;
|
cur->rows = binfo.rows;
|
||||||
|
@ -808,7 +811,6 @@ static int32_t handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SCompBloc
|
||||||
cur->pos = -1;
|
cur->pos = -1;
|
||||||
}
|
}
|
||||||
} else { // partially copy to dest buffer
|
} else { // partially copy to dest buffer
|
||||||
int32_t endPos = ASCENDING_TRAVERSE(pQueryHandle->order)? (binfo.rows - 1): 0;
|
|
||||||
copyAllRemainRowsFromFileBlock(pQueryHandle, pCheckInfo, &binfo, endPos);
|
copyAllRemainRowsFromFileBlock(pQueryHandle, pCheckInfo, &binfo, endPos);
|
||||||
cur->mixBlock = true;
|
cur->mixBlock = true;
|
||||||
}
|
}
|
||||||
|
@ -1203,6 +1205,29 @@ static void copyAllRemainRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, STabl
|
||||||
cur->win.ekey, cur->rows, pQueryHandle->qinfo);
|
cur->win.ekey, cur->rows, pQueryHandle->qinfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t getEndPosInDataBlock(STsdbQueryHandle* pQueryHandle, SDataBlockInfo* pBlockInfo) {
|
||||||
|
// NOTE: reverse the order to find the end position in data block
|
||||||
|
int32_t endPos = -1;
|
||||||
|
int32_t order = ASCENDING_TRAVERSE(pQueryHandle->order)? TSDB_ORDER_DESC : TSDB_ORDER_ASC;
|
||||||
|
|
||||||
|
SQueryFilePos* cur = &pQueryHandle->cur;
|
||||||
|
SDataCols* pCols = pQueryHandle->rhelper.pDataCols[0];
|
||||||
|
|
||||||
|
if (ASCENDING_TRAVERSE(pQueryHandle->order) && pQueryHandle->window.ekey >= pBlockInfo->window.ekey) {
|
||||||
|
endPos = pBlockInfo->rows - 1;
|
||||||
|
cur->mixBlock = (cur->pos != 0);
|
||||||
|
} else if (!ASCENDING_TRAVERSE(pQueryHandle->order) && pQueryHandle->window.ekey <= pBlockInfo->window.skey) {
|
||||||
|
endPos = 0;
|
||||||
|
cur->mixBlock = (cur->pos != pBlockInfo->rows - 1);
|
||||||
|
} else {
|
||||||
|
assert(pCols->numOfRows > 0);
|
||||||
|
endPos = doBinarySearchKey(pCols->cols[0].pData, pCols->numOfRows, pQueryHandle->window.ekey, order);
|
||||||
|
cur->mixBlock = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return endPos;
|
||||||
|
}
|
||||||
|
|
||||||
// only return the qualified data to client in terms of query time window, data rows in the same block but do not
|
// only return the qualified data to client in terms of query time window, data rows in the same block but do not
|
||||||
// be included in the query time window will be discarded
|
// be included in the query time window will be discarded
|
||||||
static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* pCheckInfo, SCompBlock* pBlock) {
|
static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* pCheckInfo, SCompBlock* pBlock) {
|
||||||
|
@ -1224,19 +1249,7 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
|
||||||
int32_t numOfCols = (int32_t)(QH_GET_NUM_OF_COLS(pQueryHandle));
|
int32_t numOfCols = (int32_t)(QH_GET_NUM_OF_COLS(pQueryHandle));
|
||||||
|
|
||||||
STable* pTable = pCheckInfo->pTableObj;
|
STable* pTable = pCheckInfo->pTableObj;
|
||||||
int32_t endPos = cur->pos;
|
int32_t endPos = getEndPosInDataBlock(pQueryHandle, &blockInfo);
|
||||||
|
|
||||||
if (ASCENDING_TRAVERSE(pQueryHandle->order) && pQueryHandle->window.ekey > blockInfo.window.ekey) {
|
|
||||||
endPos = blockInfo.rows - 1;
|
|
||||||
cur->mixBlock = (cur->pos != 0);
|
|
||||||
} else if (!ASCENDING_TRAVERSE(pQueryHandle->order) && pQueryHandle->window.ekey < blockInfo.window.skey) {
|
|
||||||
endPos = 0;
|
|
||||||
cur->mixBlock = (cur->pos != blockInfo.rows - 1);
|
|
||||||
} else {
|
|
||||||
assert(pCols->numOfRows > 0);
|
|
||||||
endPos = doBinarySearchKey(pCols->cols[0].pData, pCols->numOfRows, pQueryHandle->window.ekey, order);
|
|
||||||
cur->mixBlock = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
tsdbDebug("%p uid:%" PRIu64",tid:%d start merge data block, file block range:%"PRIu64"-%"PRIu64" rows:%d, start:%d,"
|
tsdbDebug("%p uid:%" PRIu64",tid:%d start merge data block, file block range:%"PRIu64"-%"PRIu64" rows:%d, start:%d,"
|
||||||
"end:%d, %p",
|
"end:%d, %p",
|
||||||
|
@ -1338,8 +1351,8 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
|
||||||
}
|
}
|
||||||
|
|
||||||
cur->blockCompleted =
|
cur->blockCompleted =
|
||||||
(((pos >= endPos || cur->lastKey > pQueryHandle->window.ekey) && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
|
(((pos > endPos || cur->lastKey > pQueryHandle->window.ekey) && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
|
||||||
((pos <= endPos || cur->lastKey < pQueryHandle->window.ekey) && !ASCENDING_TRAVERSE(pQueryHandle->order)));
|
((pos < endPos || cur->lastKey < pQueryHandle->window.ekey) && !ASCENDING_TRAVERSE(pQueryHandle->order)));
|
||||||
|
|
||||||
if (!ASCENDING_TRAVERSE(pQueryHandle->order)) {
|
if (!ASCENDING_TRAVERSE(pQueryHandle->order)) {
|
||||||
SWAP(cur->win.skey, cur->win.ekey, TSKEY);
|
SWAP(cur->win.skey, cur->win.ekey, TSKEY);
|
||||||
|
@ -2071,13 +2084,19 @@ STimeWindow changeTableGroupByLastrow(STableGroupInfo *groupList) {
|
||||||
if (keyInfo.pTable != NULL) {
|
if (keyInfo.pTable != NULL) {
|
||||||
totalNumOfTable++;
|
totalNumOfTable++;
|
||||||
taosArrayPush(pGroup, &keyInfo);
|
taosArrayPush(pGroup, &keyInfo);
|
||||||
|
} else {
|
||||||
|
taosArrayDestroy(pGroup);
|
||||||
|
|
||||||
|
taosArrayRemove(groupList->pGroupList, j);
|
||||||
|
numOfGroups -= 1;
|
||||||
|
j -= 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// window does not being updated, so set the original
|
// window does not being updated, so set the original
|
||||||
if (window.skey == INT64_MAX && window.ekey == INT64_MIN) {
|
if (window.skey == INT64_MAX && window.ekey == INT64_MIN) {
|
||||||
window = TSWINDOW_INITIALIZER;
|
window = TSWINDOW_INITIALIZER;
|
||||||
assert(totalNumOfTable == 0);
|
assert(totalNumOfTable == 0 && taosArrayGetSize(groupList->pGroupList) == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
groupList->numOfTables = totalNumOfTable;
|
groupList->numOfTables = totalNumOfTable;
|
||||||
|
@ -2398,6 +2417,14 @@ static bool indexedNodeFilterFp(const void* pNode, void* param) {
|
||||||
val = tdGetKVRowValOfCol(pTable->tagVal, pInfo->sch.colId);
|
val = tdGetKVRowValOfCol(pTable->tagVal, pInfo->sch.colId);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (pInfo->optr == TSDB_RELATION_ISNULL || pInfo->optr == TSDB_RELATION_NOTNULL) {
|
||||||
|
if (pInfo->optr == TSDB_RELATION_ISNULL) {
|
||||||
|
return (val == NULL) || isNull(val, pInfo->sch.type);
|
||||||
|
} else if (pInfo->optr == TSDB_RELATION_NOTNULL) {
|
||||||
|
return (val != NULL) && (!isNull(val, pInfo->sch.type));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
int32_t ret = 0;
|
int32_t ret = 0;
|
||||||
if (val == NULL) { //the val is possible to be null, so check it out carefully
|
if (val == NULL) { //the val is possible to be null, so check it out carefully
|
||||||
ret = -1; // val is missing in table tags value pairs
|
ret = -1; // val is missing in table tags value pairs
|
||||||
|
@ -2682,4 +2709,5 @@ void tsdbDestroyTableGroup(STableGroupInfo *pGroupList) {
|
||||||
}
|
}
|
||||||
|
|
||||||
taosArrayDestroy(pGroupList->pGroupList);
|
taosArrayDestroy(pGroupList->pGroupList);
|
||||||
|
pGroupList->numOfTables = 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -71,7 +71,7 @@ static SCacheDataNode *taosCreateCacheNode(const char *key, size_t keyLen, const
|
||||||
* @param pCacheObj Cache object
|
* @param pCacheObj Cache object
|
||||||
* @param pNode Cache slot object
|
* @param pNode Cache slot object
|
||||||
*/
|
*/
|
||||||
static void taosAddToTrash(SCacheObj *pCacheObj, SCacheDataNode *pNode);
|
static void taosAddToTrashcan(SCacheObj *pCacheObj, SCacheDataNode *pNode);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* remove nodes in trash with refCount == 0 in cache
|
* remove nodes in trash with refCount == 0 in cache
|
||||||
|
@ -80,7 +80,7 @@ static void taosAddToTrash(SCacheObj *pCacheObj, SCacheDataNode *pNode);
|
||||||
* @param force force model, if true, remove data in trash without check refcount.
|
* @param force force model, if true, remove data in trash without check refcount.
|
||||||
* may cause corruption. So, forece model only applys before cache is closed
|
* may cause corruption. So, forece model only applys before cache is closed
|
||||||
*/
|
*/
|
||||||
static void taosTrashCanEmpty(SCacheObj *pCacheObj, bool force);
|
static void taosTrashcanEmpty(SCacheObj *pCacheObj, bool force);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* release node
|
* release node
|
||||||
|
@ -165,7 +165,7 @@ SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool ext
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
// set free cache node callback function for hash table
|
// set free cache node callback function
|
||||||
pCacheObj->freeFp = fn;
|
pCacheObj->freeFp = fn;
|
||||||
pCacheObj->refreshTime = refreshTimeInSeconds * 1000;
|
pCacheObj->refreshTime = refreshTimeInSeconds * 1000;
|
||||||
pCacheObj->extendLifespan = extendLifespan;
|
pCacheObj->extendLifespan = extendLifespan;
|
||||||
|
@ -222,7 +222,7 @@ void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const v
|
||||||
|
|
||||||
taosTFree(p);
|
taosTFree(p);
|
||||||
} else {
|
} else {
|
||||||
taosAddToTrash(pCacheObj, p);
|
taosAddToTrashcan(pCacheObj, p);
|
||||||
uDebug("cache:%s, key:%p, %p exist in cache, updated old:%p", pCacheObj->name, key, pNode1->data, p->data);
|
uDebug("cache:%s, key:%p, %p exist in cache, updated old:%p", pCacheObj->name, key, pNode1->data, p->data);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -322,7 +322,12 @@ void *taosCacheTransfer(SCacheObj *pCacheObj, void **data) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) {
|
void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) {
|
||||||
if (pCacheObj == NULL || (*data) == NULL || (taosHashGetSize(pCacheObj->pHashTable) + pCacheObj->numOfElemsInTrash == 0)) {
|
if (pCacheObj == NULL || taosHashGetSize(pCacheObj->pHashTable) + pCacheObj->numOfElemsInTrash == 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((*data) == NULL) {
|
||||||
|
uError("cache:%s, NULL data to release", pCacheObj->name);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -394,19 +399,19 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) {
|
||||||
"others already", pCacheObj->name, pNode->key, p->data, T_REF_VAL_GET(p), pNode->data);
|
"others already", pCacheObj->name, pNode->key, p->data, T_REF_VAL_GET(p), pNode->data);
|
||||||
|
|
||||||
assert(p->pTNodeHeader == NULL);
|
assert(p->pTNodeHeader == NULL);
|
||||||
taosAddToTrash(pCacheObj, p);
|
taosAddToTrashcan(pCacheObj, p);
|
||||||
} else {
|
} else {
|
||||||
uDebug("cache:%s, key:%p, %p successfully removed from hash table, refcnt:%d", pCacheObj->name, pNode->key,
|
uDebug("cache:%s, key:%p, %p successfully removed from hash table, refcnt:%d", pCacheObj->name, pNode->key,
|
||||||
pNode->data, ref);
|
pNode->data, ref);
|
||||||
if (ref > 0) {
|
if (ref > 0) {
|
||||||
assert(pNode->pTNodeHeader == NULL);
|
assert(pNode->pTNodeHeader == NULL);
|
||||||
|
|
||||||
taosAddToTrash(pCacheObj, pNode);
|
taosAddToTrashcan(pCacheObj, pNode);
|
||||||
} else { // ref == 0
|
} else { // ref == 0
|
||||||
atomic_sub_fetch_64(&pCacheObj->totalSize, pNode->size);
|
atomic_sub_fetch_64(&pCacheObj->totalSize, pNode->size);
|
||||||
|
|
||||||
int32_t size = (int32_t)taosHashGetSize(pCacheObj->pHashTable);
|
int32_t size = (int32_t)taosHashGetSize(pCacheObj->pHashTable);
|
||||||
uDebug("cache:%s, key:%p, %p is destroyed from cache, size:%dbytes, num:%d size:%" PRId64 "bytes",
|
uDebug("cache:%s, key:%p, %p is destroyed from cache, size:%dbytes, totalNum:%d size:%" PRId64 "bytes",
|
||||||
pCacheObj->name, pNode->key, pNode->data, pNode->size, size, pCacheObj->totalSize);
|
pCacheObj->name, pNode->key, pNode->data, pNode->size, size, pCacheObj->totalSize);
|
||||||
|
|
||||||
if (pCacheObj->freeFp) {
|
if (pCacheObj->freeFp) {
|
||||||
|
@ -427,6 +432,26 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) {
|
||||||
char* key = pNode->key;
|
char* key = pNode->key;
|
||||||
char* p = pNode->data;
|
char* p = pNode->data;
|
||||||
|
|
||||||
|
// int32_t ref = T_REF_VAL_GET(pNode);
|
||||||
|
//
|
||||||
|
// if (ref == 1 && inTrashcan) {
|
||||||
|
// // If it is the last ref, remove it from trashcan linked-list first, and then destroy it.Otherwise, it may be
|
||||||
|
// // destroyed by refresh worker if decrease ref count before removing it from linked-list.
|
||||||
|
// assert(pNode->pTNodeHeader->pData == pNode);
|
||||||
|
//
|
||||||
|
// __cache_wr_lock(pCacheObj);
|
||||||
|
// doRemoveElemInTrashcan(pCacheObj, pNode->pTNodeHeader);
|
||||||
|
// __cache_unlock(pCacheObj);
|
||||||
|
//
|
||||||
|
// ref = T_REF_DEC(pNode);
|
||||||
|
// assert(ref == 0);
|
||||||
|
//
|
||||||
|
// doDestroyTrashcanElem(pCacheObj, pNode->pTNodeHeader);
|
||||||
|
// } else {
|
||||||
|
// ref = T_REF_DEC(pNode);
|
||||||
|
// assert(ref >= 0);
|
||||||
|
// }
|
||||||
|
|
||||||
int32_t ref = T_REF_DEC(pNode);
|
int32_t ref = T_REF_DEC(pNode);
|
||||||
uDebug("cache:%s, key:%p, %p released, refcnt:%d, data in trashcan:%d", pCacheObj->name, key, p, ref, inTrashcan);
|
uDebug("cache:%s, key:%p, %p released, refcnt:%d, data in trashcan:%d", pCacheObj->name, key, p, ref, inTrashcan);
|
||||||
}
|
}
|
||||||
|
@ -447,7 +472,7 @@ static bool travHashTableEmptyFn(void* param, void* data) {
|
||||||
if (T_REF_VAL_GET(pNode) == 0) {
|
if (T_REF_VAL_GET(pNode) == 0) {
|
||||||
taosCacheReleaseNode(pCacheObj, pNode);
|
taosCacheReleaseNode(pCacheObj, pNode);
|
||||||
} else { // do add to trashcan
|
} else { // do add to trashcan
|
||||||
taosAddToTrash(pCacheObj, pNode);
|
taosAddToTrashcan(pCacheObj, pNode);
|
||||||
}
|
}
|
||||||
|
|
||||||
// this node should be remove from hash table
|
// this node should be remove from hash table
|
||||||
|
@ -458,7 +483,7 @@ void taosCacheEmpty(SCacheObj *pCacheObj) {
|
||||||
SHashTravSupp sup = {.pCacheObj = pCacheObj, .fp = NULL, .time = taosGetTimestampMs()};
|
SHashTravSupp sup = {.pCacheObj = pCacheObj, .fp = NULL, .time = taosGetTimestampMs()};
|
||||||
|
|
||||||
taosHashCondTraverse(pCacheObj->pHashTable, travHashTableEmptyFn, &sup);
|
taosHashCondTraverse(pCacheObj->pHashTable, travHashTableEmptyFn, &sup);
|
||||||
taosTrashCanEmpty(pCacheObj, false);
|
taosTrashcanEmpty(pCacheObj, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
void taosCacheCleanup(SCacheObj *pCacheObj) {
|
void taosCacheCleanup(SCacheObj *pCacheObj) {
|
||||||
|
@ -498,7 +523,7 @@ SCacheDataNode *taosCreateCacheNode(const char *key, size_t keyLen, const char *
|
||||||
return pNewNode;
|
return pNewNode;
|
||||||
}
|
}
|
||||||
|
|
||||||
void taosAddToTrash(SCacheObj *pCacheObj, SCacheDataNode *pNode) {
|
void taosAddToTrashcan(SCacheObj *pCacheObj, SCacheDataNode *pNode) {
|
||||||
if (pNode->inTrashcan) { /* node is already in trash */
|
if (pNode->inTrashcan) { /* node is already in trash */
|
||||||
assert(pNode->pTNodeHeader != NULL && pNode->pTNodeHeader->pData == pNode);
|
assert(pNode->pTNodeHeader != NULL && pNode->pTNodeHeader->pData == pNode);
|
||||||
return;
|
return;
|
||||||
|
@ -520,11 +545,11 @@ void taosAddToTrash(SCacheObj *pCacheObj, SCacheDataNode *pNode) {
|
||||||
pCacheObj->numOfElemsInTrash++;
|
pCacheObj->numOfElemsInTrash++;
|
||||||
__cache_unlock(pCacheObj);
|
__cache_unlock(pCacheObj);
|
||||||
|
|
||||||
uDebug("cache:%s key:%p, %p move to trash, numOfElem in trash:%d", pCacheObj->name, pNode->key, pNode->data,
|
uDebug("cache:%s key:%p, %p move to trashcan, numOfElem in trashcan:%d", pCacheObj->name, pNode->key, pNode->data,
|
||||||
pCacheObj->numOfElemsInTrash);
|
pCacheObj->numOfElemsInTrash);
|
||||||
}
|
}
|
||||||
|
|
||||||
void taosTrashCanEmpty(SCacheObj *pCacheObj, bool force) {
|
void taosTrashcanEmpty(SCacheObj *pCacheObj, bool force) {
|
||||||
__cache_wr_lock(pCacheObj);
|
__cache_wr_lock(pCacheObj);
|
||||||
|
|
||||||
if (pCacheObj->numOfElemsInTrash == 0) {
|
if (pCacheObj->numOfElemsInTrash == 0) {
|
||||||
|
@ -568,7 +593,7 @@ void doCleanupDataCache(SCacheObj *pCacheObj) {
|
||||||
|
|
||||||
// todo memory leak if there are object with refcount greater than 0 in hash table?
|
// todo memory leak if there are object with refcount greater than 0 in hash table?
|
||||||
taosHashCleanup(pCacheObj->pHashTable);
|
taosHashCleanup(pCacheObj->pHashTable);
|
||||||
taosTrashCanEmpty(pCacheObj, true);
|
taosTrashcanEmpty(pCacheObj, true);
|
||||||
|
|
||||||
__cache_lock_destroy(pCacheObj);
|
__cache_lock_destroy(pCacheObj);
|
||||||
|
|
||||||
|
@ -643,7 +668,7 @@ void* taosCacheTimedRefresh(void *handle) {
|
||||||
doCacheRefresh(pCacheObj, now, NULL);
|
doCacheRefresh(pCacheObj, now, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
taosTrashCanEmpty(pCacheObj, false);
|
taosTrashcanEmpty(pCacheObj, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
#include "os.h"
|
#include "os.h"
|
||||||
#include "tulog.h"
|
#include "tulog.h"
|
||||||
#include "tsocket.h"
|
#include "tsocket.h"
|
||||||
#include "tutil.h"
|
#include "taoserror.h"
|
||||||
|
|
||||||
int taosGetFqdn(char *fqdn) {
|
int taosGetFqdn(char *fqdn) {
|
||||||
char hostname[1024];
|
char hostname[1024];
|
||||||
|
@ -56,7 +56,16 @@ uint32_t taosGetIpFromFqdn(const char *fqdn) {
|
||||||
freeaddrinfo(result);
|
freeaddrinfo(result);
|
||||||
return ip;
|
return ip;
|
||||||
} else {
|
} else {
|
||||||
uError("failed get the ip address, fqdn:%s, code:%d, reason:%s", fqdn, ret, gai_strerror(ret));
|
#ifdef EAI_SYSTEM
|
||||||
|
if (ret == EAI_SYSTEM) {
|
||||||
|
uError("failed to get the ip address, fqdn:%s, code:%d, reason:%s", fqdn, ret, strerror(errno));
|
||||||
|
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||||
|
} else {
|
||||||
|
uError("failed to get the ip address, fqdn:%s, code:%d, reason:%s", fqdn, ret, gai_strerror(ret));
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
uError("failed to get the ip address, fqdn:%s, code:%d, reason:%s", fqdn, ret, gai_strerror(ret));
|
||||||
|
#endif
|
||||||
return 0xFFFFFFFF;
|
return 0xFFFFFFFF;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,65 +12,65 @@ int32_t tsMaxMeterConnections = 200;
|
||||||
// test cache
|
// test cache
|
||||||
TEST(testCase, client_cache_test) {
|
TEST(testCase, client_cache_test) {
|
||||||
const int32_t REFRESH_TIME_IN_SEC = 2;
|
const int32_t REFRESH_TIME_IN_SEC = 2;
|
||||||
SCacheObj* tscCacheHandle = taosCacheInit(TSDB_DATA_TYPE_BINARY, REFRESH_TIME_IN_SEC, 0, NULL, "test");
|
SCacheObj* tscMetaCache = taosCacheInit(TSDB_DATA_TYPE_BINARY, REFRESH_TIME_IN_SEC, 0, NULL, "test");
|
||||||
|
|
||||||
const char* key1 = "test1";
|
const char* key1 = "test1";
|
||||||
char data1[] = "test11";
|
char data1[] = "test11";
|
||||||
|
|
||||||
char* cachedObj = (char*) taosCachePut(tscCacheHandle, key1, strlen(key1), data1, strlen(data1)+1, 1);
|
char* cachedObj = (char*) taosCachePut(tscMetaCache, key1, strlen(key1), data1, strlen(data1)+1, 1);
|
||||||
sleep(REFRESH_TIME_IN_SEC+1);
|
sleep(REFRESH_TIME_IN_SEC+1);
|
||||||
|
|
||||||
printf("obj is still valid: %s\n", cachedObj);
|
printf("obj is still valid: %s\n", cachedObj);
|
||||||
|
|
||||||
char data2[] = "test22";
|
char data2[] = "test22";
|
||||||
taosCacheRelease(tscCacheHandle, (void**) &cachedObj, false);
|
taosCacheRelease(tscMetaCache, (void**) &cachedObj, false);
|
||||||
|
|
||||||
/* the object is cleared by cache clean operation */
|
/* the object is cleared by cache clean operation */
|
||||||
cachedObj = (char*) taosCachePut(tscCacheHandle, key1, strlen(key1), data2, strlen(data2)+1, 20);
|
cachedObj = (char*) taosCachePut(tscMetaCache, key1, strlen(key1), data2, strlen(data2)+1, 20);
|
||||||
printf("after updated: %s\n", cachedObj);
|
printf("after updated: %s\n", cachedObj);
|
||||||
|
|
||||||
printf("start to remove data from cache\n");
|
printf("start to remove data from cache\n");
|
||||||
taosCacheRelease(tscCacheHandle, (void**) &cachedObj, false);
|
taosCacheRelease(tscMetaCache, (void**) &cachedObj, false);
|
||||||
printf("end of removing data from cache\n");
|
printf("end of removing data from cache\n");
|
||||||
|
|
||||||
const char* key3 = "test2";
|
const char* key3 = "test2";
|
||||||
const char* data3 = "kkkkkkk";
|
const char* data3 = "kkkkkkk";
|
||||||
|
|
||||||
char* cachedObj2 = (char*) taosCachePut(tscCacheHandle, key3, strlen(key3), data3, strlen(data3) + 1, 1);
|
char* cachedObj2 = (char*) taosCachePut(tscMetaCache, key3, strlen(key3), data3, strlen(data3) + 1, 1);
|
||||||
printf("%s\n", cachedObj2);
|
printf("%s\n", cachedObj2);
|
||||||
|
|
||||||
taosCacheRelease(tscCacheHandle, (void**) &cachedObj2, false);
|
taosCacheRelease(tscMetaCache, (void**) &cachedObj2, false);
|
||||||
|
|
||||||
sleep(3);
|
sleep(3);
|
||||||
char* d = (char*) taosCacheAcquireByKey(tscCacheHandle, key3, strlen(key3));
|
char* d = (char*) taosCacheAcquireByKey(tscMetaCache, key3, strlen(key3));
|
||||||
// assert(d == NULL);
|
// assert(d == NULL);
|
||||||
|
|
||||||
char key5[] = "test5";
|
char key5[] = "test5";
|
||||||
char data5[] = "data5kkkkk";
|
char data5[] = "data5kkkkk";
|
||||||
cachedObj2 = (char*) taosCachePut(tscCacheHandle, key5, strlen(key5), data5, strlen(data5) + 1, 20);
|
cachedObj2 = (char*) taosCachePut(tscMetaCache, key5, strlen(key5), data5, strlen(data5) + 1, 20);
|
||||||
|
|
||||||
const char* data6= "new Data after updated";
|
const char* data6= "new Data after updated";
|
||||||
taosCacheRelease(tscCacheHandle, (void**) &cachedObj2, false);
|
taosCacheRelease(tscMetaCache, (void**) &cachedObj2, false);
|
||||||
|
|
||||||
cachedObj2 = (char*) taosCachePut(tscCacheHandle, key5, strlen(key5), data6, strlen(data6) + 1, 20);
|
cachedObj2 = (char*) taosCachePut(tscMetaCache, key5, strlen(key5), data6, strlen(data6) + 1, 20);
|
||||||
printf("%s\n", cachedObj2);
|
printf("%s\n", cachedObj2);
|
||||||
|
|
||||||
taosCacheRelease(tscCacheHandle, (void**) &cachedObj2, true);
|
taosCacheRelease(tscMetaCache, (void**) &cachedObj2, true);
|
||||||
|
|
||||||
const char* data7 = "add call update procedure";
|
const char* data7 = "add call update procedure";
|
||||||
cachedObj2 = (char*) taosCachePut(tscCacheHandle, key5, strlen(key5), data7, strlen(data7) + 1, 20);
|
cachedObj2 = (char*) taosCachePut(tscMetaCache, key5, strlen(key5), data7, strlen(data7) + 1, 20);
|
||||||
printf("%s\n=======================================\n\n", cachedObj2);
|
printf("%s\n=======================================\n\n", cachedObj2);
|
||||||
|
|
||||||
char* cc = (char*) taosCacheAcquireByKey(tscCacheHandle, key5, strlen(key5));
|
char* cc = (char*) taosCacheAcquireByKey(tscMetaCache, key5, strlen(key5));
|
||||||
|
|
||||||
taosCacheRelease(tscCacheHandle, (void**) &cachedObj2, true);
|
taosCacheRelease(tscMetaCache, (void**) &cachedObj2, true);
|
||||||
taosCacheRelease(tscCacheHandle, (void**) &cc, false);
|
taosCacheRelease(tscMetaCache, (void**) &cc, false);
|
||||||
|
|
||||||
const char* data8 = "ttft";
|
const char* data8 = "ttft";
|
||||||
const char* key6 = "key6";
|
const char* key6 = "key6";
|
||||||
|
|
||||||
char* ft = (char*) taosCachePut(tscCacheHandle, key6, strlen(key6), data8, strlen(data8), 20);
|
char* ft = (char*) taosCachePut(tscMetaCache, key6, strlen(key6), data8, strlen(data8), 20);
|
||||||
taosCacheRelease(tscCacheHandle, (void**) &ft, false);
|
taosCacheRelease(tscMetaCache, (void**) &ft, false);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* 140ns
|
* 140ns
|
||||||
|
@ -78,14 +78,14 @@ TEST(testCase, client_cache_test) {
|
||||||
uint64_t startTime = taosGetTimestampUs();
|
uint64_t startTime = taosGetTimestampUs();
|
||||||
printf("Cache Performance Test\nstart time:%" PRIu64 "\n", startTime);
|
printf("Cache Performance Test\nstart time:%" PRIu64 "\n", startTime);
|
||||||
for(int32_t i=0; i<1000; ++i) {
|
for(int32_t i=0; i<1000; ++i) {
|
||||||
char* dd = (char*) taosCacheAcquireByKey(tscCacheHandle, key6, strlen(key6));
|
char* dd = (char*) taosCacheAcquireByKey(tscMetaCache, key6, strlen(key6));
|
||||||
if (dd != NULL) {
|
if (dd != NULL) {
|
||||||
// printf("get the data\n");
|
// printf("get the data\n");
|
||||||
} else {
|
} else {
|
||||||
printf("data has been released\n");
|
printf("data has been released\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
taosCacheRelease(tscCacheHandle, (void**) &dd, false);
|
taosCacheRelease(tscMetaCache, (void**) &dd, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t endTime = taosGetTimestampUs();
|
uint64_t endTime = taosGetTimestampUs();
|
||||||
|
@ -93,7 +93,7 @@ TEST(testCase, client_cache_test) {
|
||||||
|
|
||||||
printf("End of Test, %" PRIu64 "\nTotal Elapsed Time:%" PRIu64 " us.avg:%f us\n", endTime, el, el/1000.0);
|
printf("End of Test, %" PRIu64 "\nTotal Elapsed Time:%" PRIu64 " us.avg:%f us\n", endTime, el, el/1000.0);
|
||||||
|
|
||||||
taosCacheCleanup(tscCacheHandle);
|
taosCacheCleanup(tscMetaCache);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(testCase, cache_resize_test) {
|
TEST(testCase, cache_resize_test) {
|
||||||
|
|
|
@ -0,0 +1,96 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
sys.path.insert(0, os.getcwd())
|
||||||
|
from util.log import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.dnodes import *
|
||||||
|
import taos
|
||||||
|
|
||||||
|
|
||||||
|
class TwoClients:
|
||||||
|
def initConnection(self):
|
||||||
|
self.host = "127.0.0.1"
|
||||||
|
self.user = "root"
|
||||||
|
self.password = "taosdata"
|
||||||
|
self.config = "/home/xp/git/TDengine/sim/dnode1/cfg"
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
tdDnodes.init("")
|
||||||
|
tdDnodes.setTestCluster(False)
|
||||||
|
tdDnodes.setValgrind(False)
|
||||||
|
|
||||||
|
tdDnodes.stopAll()
|
||||||
|
tdDnodes.deploy(1)
|
||||||
|
tdDnodes.start(1)
|
||||||
|
|
||||||
|
# first client create a stable and insert data
|
||||||
|
conn1 = taos.connect(self.host, self.user, self.password, self.config)
|
||||||
|
cursor1 = conn1.cursor()
|
||||||
|
cursor1.execute("drop database if exists db")
|
||||||
|
cursor1.execute("create database db")
|
||||||
|
cursor1.execute("use db")
|
||||||
|
cursor1.execute("create table tb (ts timestamp, id int) tags(loc nchar(30))")
|
||||||
|
cursor1.execute("insert into t0 using tb tags('beijing') values(now, 1)")
|
||||||
|
|
||||||
|
# second client alter the table created by cleint
|
||||||
|
conn2 = taos.connect(self.host, self.user, self.password, self.config)
|
||||||
|
cursor2 = conn2.cursor()
|
||||||
|
cursor2.execute("use db")
|
||||||
|
cursor2.execute("alter table tb add column name nchar(30)")
|
||||||
|
|
||||||
|
# first client should not be able to use the origin metadata
|
||||||
|
tdSql.init(cursor1, True)
|
||||||
|
tdSql.error("insert into t0 values(now, 2)")
|
||||||
|
|
||||||
|
# first client should be able to insert data with udpated medadata
|
||||||
|
tdSql.execute("insert into t0 values(now, 2, 'test')")
|
||||||
|
tdSql.query("select * from tb")
|
||||||
|
tdSql.checkRows(2)
|
||||||
|
|
||||||
|
# second client drop the table
|
||||||
|
cursor2.execute("drop table t0")
|
||||||
|
cursor2.execute("create table t0 using tb tags('beijing')")
|
||||||
|
|
||||||
|
tdSql.execute("insert into t0 values(now, 2, 'test')")
|
||||||
|
tdSql.query("select * from tb")
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
|
||||||
|
# error expected for two clients drop the same cloumn
|
||||||
|
cursor2.execute("alter table tb drop column name")
|
||||||
|
tdSql.error("alter table tb drop column name")
|
||||||
|
|
||||||
|
cursor2.execute("alter table tb add column speed int")
|
||||||
|
tdSql.error("alter table tb add column speed int")
|
||||||
|
|
||||||
|
|
||||||
|
tdSql.execute("alter table tb add column size int")
|
||||||
|
tdSql.query("describe tb")
|
||||||
|
tdSql.checkRows(5)
|
||||||
|
tdSql.checkData(0, 0, "ts")
|
||||||
|
tdSql.checkData(1, 0, "id")
|
||||||
|
tdSql.checkData(2, 0, "speed")
|
||||||
|
tdSql.checkData(3, 0, "size")
|
||||||
|
tdSql.checkData(4, 0, "loc")
|
||||||
|
|
||||||
|
|
||||||
|
cursor1.close()
|
||||||
|
cursor2.close()
|
||||||
|
conn1.close()
|
||||||
|
conn2.close()
|
||||||
|
|
||||||
|
clients = TwoClients()
|
||||||
|
clients.initConnection()
|
||||||
|
clients.run()
|
|
@ -186,6 +186,9 @@ python3 ./test.py -f functions/function_sum.py
|
||||||
python3 ./test.py -f functions/function_top.py
|
python3 ./test.py -f functions/function_top.py
|
||||||
#python3 ./test.py -f functions/function_twa.py
|
#python3 ./test.py -f functions/function_twa.py
|
||||||
python3 queryCount.py
|
python3 queryCount.py
|
||||||
|
python3 ./test.py -f query/queryGroupbyWithInterval.py
|
||||||
|
python3 client/twoClients.py
|
||||||
|
python3 test.py -f query/queryInterval.py
|
||||||
|
|
||||||
# tools
|
# tools
|
||||||
python3 test.py -f tools/taosdemo.py
|
python3 test.py -f tools/taosdemo.py
|
||||||
|
|
|
@ -33,7 +33,7 @@ class TDTestCase:
|
||||||
tdDnodes.start(1)
|
tdDnodes.start(1)
|
||||||
tdSql.execute('reset query cache')
|
tdSql.execute('reset query cache')
|
||||||
tdSql.execute('drop database if exists db')
|
tdSql.execute('drop database if exists db')
|
||||||
tdSql.execute('create database db cache 128 maxtables 10')
|
tdSql.execute('create database db cache 128')
|
||||||
tdSql.execute('use db')
|
tdSql.execute('use db')
|
||||||
|
|
||||||
tdLog.info("================= step1")
|
tdLog.info("================= step1")
|
||||||
|
|
|
@ -0,0 +1,225 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2020 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import taos
|
||||||
|
from util.log import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.sql import *
|
||||||
|
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
def init(self, conn, logSql):
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor())
|
||||||
|
|
||||||
|
def general(self):
|
||||||
|
tdSql.execute("create table meters(ts timestamp, s int) tags(id int)")
|
||||||
|
tdSql.execute("create table t0 using meters tags(0)")
|
||||||
|
tdSql.execute("create table t1 using meters tags(1)")
|
||||||
|
tdSql.execute("create table t2 using meters tags(2)")
|
||||||
|
tdSql.execute("create table t3 using meters tags(3)")
|
||||||
|
tdSql.execute("create table t4 using meters tags(4)")
|
||||||
|
|
||||||
|
tdSql.execute("insert into t0 values('2019-01-01 00:00:00', 1)")
|
||||||
|
tdSql.execute("insert into t1 values('2019-01-01 00:00:01', 1)")
|
||||||
|
tdSql.execute("insert into t2 values('2019-01-01 00:01:00', 1)")
|
||||||
|
tdSql.execute("insert into t1 values('2019-01-01 00:01:01', 1)")
|
||||||
|
tdSql.execute("insert into t1 values('2019-01-01 00:01:02', 1)")
|
||||||
|
tdSql.execute("insert into t1 values('2019-01-01 00:01:03', 1)")
|
||||||
|
tdSql.execute("insert into t1 values('2019-01-01 00:01:30', 1)")
|
||||||
|
tdSql.execute("insert into t1 values('2019-01-01 00:01:50', 1)")
|
||||||
|
tdSql.execute("insert into t2 values('2019-01-01 00:02:00', 1)")
|
||||||
|
tdSql.execute("insert into t3 values('2019-01-01 00:02:02', 1)")
|
||||||
|
tdSql.execute("insert into t3 values('2019-01-01 00:02:59', 1)")
|
||||||
|
tdSql.execute("insert into t4 values('2019-01-01 00:02:59', 1)")
|
||||||
|
tdSql.execute("insert into t1 values('2019-01-01 00:03:10', 1)")
|
||||||
|
tdSql.execute("insert into t2 values('2019-01-01 00:08:00', 1)")
|
||||||
|
tdSql.execute("insert into t1 values('2019-01-01 00:08:00', 1)")
|
||||||
|
|
||||||
|
tdSql.query("select count(*) from meters interval(1m, 1s)")
|
||||||
|
tdSql.checkData(0, 1, 1)
|
||||||
|
tdSql.checkData(1, 1, 2)
|
||||||
|
tdSql.checkData(2, 1, 6)
|
||||||
|
tdSql.checkData(3, 1, 3)
|
||||||
|
tdSql.checkData(4, 1, 1)
|
||||||
|
tdSql.checkData(5, 1, 2)
|
||||||
|
|
||||||
|
tdSql.query("select count(*) from meters interval(1m, 2s)")
|
||||||
|
tdSql.checkData(0, 1, 2)
|
||||||
|
tdSql.checkData(1, 1, 2)
|
||||||
|
tdSql.checkData(2, 1, 5)
|
||||||
|
tdSql.checkData(3, 1, 3)
|
||||||
|
tdSql.checkData(4, 1, 1)
|
||||||
|
tdSql.checkData(5, 1, 2)
|
||||||
|
|
||||||
|
tdSql.query("select count(*) from meters interval(90s, 1500a)")
|
||||||
|
tdSql.checkData(0, 1, 2)
|
||||||
|
tdSql.checkData(1, 1, 5)
|
||||||
|
tdSql.checkData(2, 1, 5)
|
||||||
|
tdSql.checkData(3, 1, 1)
|
||||||
|
tdSql.checkData(4, 1, 2)
|
||||||
|
|
||||||
|
def singleTable(self):
|
||||||
|
tdSql.execute("create table car(ts timestamp, s int)")
|
||||||
|
tdSql.execute("insert into car values('2019-01-01 00:00:00', 1)")
|
||||||
|
tdSql.execute("insert into car values('2019-05-13 12:00:00', 1)")
|
||||||
|
tdSql.execute("insert into car values('2019-12-31 23:59:59', 1)")
|
||||||
|
tdSql.execute("insert into car values('2020-01-01 12:00:00', 1)")
|
||||||
|
tdSql.execute("insert into car values('2020-01-02 12:00:00', 1)")
|
||||||
|
tdSql.execute("insert into car values('2020-01-03 12:00:00', 1)")
|
||||||
|
tdSql.execute("insert into car values('2020-01-04 12:00:00', 1)")
|
||||||
|
tdSql.execute("insert into car values('2020-01-05 12:00:00', 1)")
|
||||||
|
tdSql.execute("insert into car values('2020-01-31 12:00:00', 1)")
|
||||||
|
tdSql.execute("insert into car values('2020-02-01 12:00:00', 1)")
|
||||||
|
tdSql.execute("insert into car values('2020-02-02 12:00:00', 1)")
|
||||||
|
tdSql.execute("insert into car values('2020-02-29 12:00:00', 1)")
|
||||||
|
tdSql.execute("insert into car values('2020-03-01 12:00:00', 1)")
|
||||||
|
tdSql.execute("insert into car values('2020-03-02 12:00:00', 1)")
|
||||||
|
tdSql.execute("insert into car values('2020-03-15 12:00:00', 1)")
|
||||||
|
tdSql.execute("insert into car values('2020-03-31 12:00:00', 1)")
|
||||||
|
tdSql.execute("insert into car values('2020-05-01 12:00:00', 1)")
|
||||||
|
|
||||||
|
tdSql.query("select count(*) from car interval(1n, 10d)")
|
||||||
|
tdSql.checkData(0, 1, 1)
|
||||||
|
tdSql.checkData(1, 1, 1)
|
||||||
|
tdSql.checkData(2, 1, 6)
|
||||||
|
tdSql.checkData(3, 1, 3)
|
||||||
|
tdSql.checkData(4, 1, 3)
|
||||||
|
tdSql.checkData(5, 1, 2)
|
||||||
|
tdSql.checkData(6, 1, 1)
|
||||||
|
|
||||||
|
tdSql.query("select count(*) from car interval(1n, 10d) order by ts desc")
|
||||||
|
tdSql.checkData(0, 1, 1)
|
||||||
|
tdSql.checkData(1, 1, 2)
|
||||||
|
tdSql.checkData(2, 1, 3)
|
||||||
|
tdSql.checkData(3, 1, 3)
|
||||||
|
tdSql.checkData(4, 1, 6)
|
||||||
|
tdSql.checkData(5, 1, 1)
|
||||||
|
tdSql.checkData(6, 1, 1)
|
||||||
|
|
||||||
|
tdSql.query("select count(*) from car interval(2n, 5d)")
|
||||||
|
tdSql.checkData(0, 1, 1)
|
||||||
|
tdSql.checkData(1, 1, 1)
|
||||||
|
tdSql.checkData(2, 1, 6)
|
||||||
|
tdSql.checkData(3, 1, 6)
|
||||||
|
tdSql.checkData(4, 1, 3)
|
||||||
|
|
||||||
|
tdSql.query("select count(*) from car interval(2n) order by ts desc")
|
||||||
|
tdSql.checkData(0, 1, 3)
|
||||||
|
tdSql.checkData(1, 1, 6)
|
||||||
|
tdSql.checkData(2, 1, 6)
|
||||||
|
tdSql.checkData(3, 1, 1)
|
||||||
|
tdSql.checkData(4, 1, 1)
|
||||||
|
|
||||||
|
tdSql.query("select count(*) from car interval(1y, 1n)")
|
||||||
|
tdSql.checkData(0, 1, 1)
|
||||||
|
tdSql.checkData(1, 1, 8)
|
||||||
|
tdSql.checkData(2, 1, 8)
|
||||||
|
|
||||||
|
tdSql.query("select count(*) from car interval(1y, 2n)")
|
||||||
|
tdSql.checkData(0, 1, 1)
|
||||||
|
tdSql.checkData(1, 1, 11)
|
||||||
|
tdSql.checkData(2, 1, 5)
|
||||||
|
|
||||||
|
tdSql.query("select count(*) from car where ts > '2019-05-14 00:00:00' interval(1y, 5d)")
|
||||||
|
tdSql.checkData(0, 1, 6)
|
||||||
|
tdSql.checkData(1, 1, 9)
|
||||||
|
|
||||||
|
def superTable(self):
|
||||||
|
tdSql.execute("create table cars(ts timestamp, s int) tags(id int)")
|
||||||
|
tdSql.execute("create table car0 using cars tags(0)")
|
||||||
|
tdSql.execute("create table car1 using cars tags(1)")
|
||||||
|
tdSql.execute("create table car2 using cars tags(2)")
|
||||||
|
tdSql.execute("create table car3 using cars tags(3)")
|
||||||
|
tdSql.execute("create table car4 using cars tags(4)")
|
||||||
|
|
||||||
|
tdSql.execute("insert into car0 values('2019-01-01 00:00:00', 1)")
|
||||||
|
tdSql.execute("insert into car1 values('2019-05-13 12:00:00', 1)")
|
||||||
|
tdSql.execute("insert into car2 values('2019-12-31 23:59:59', 1)")
|
||||||
|
tdSql.execute("insert into car1 values('2020-01-01 12:00:00', 1)")
|
||||||
|
tdSql.execute("insert into car1 values('2020-01-02 12:00:00', 1)")
|
||||||
|
tdSql.execute("insert into car1 values('2020-01-03 12:00:00', 1)")
|
||||||
|
tdSql.execute("insert into car1 values('2020-01-04 12:00:00', 1)")
|
||||||
|
tdSql.execute("insert into car1 values('2020-01-05 12:00:00', 1)")
|
||||||
|
tdSql.execute("insert into car1 values('2020-01-31 12:00:00', 1)")
|
||||||
|
tdSql.execute("insert into car1 values('2020-02-01 12:00:00', 1)")
|
||||||
|
tdSql.execute("insert into car2 values('2020-02-02 12:00:00', 1)")
|
||||||
|
tdSql.execute("insert into car2 values('2020-02-29 12:00:00', 1)")
|
||||||
|
tdSql.execute("insert into car3 values('2020-03-01 12:00:00', 1)")
|
||||||
|
tdSql.execute("insert into car3 values('2020-03-02 12:00:00', 1)")
|
||||||
|
tdSql.execute("insert into car3 values('2020-03-15 12:00:00', 1)")
|
||||||
|
tdSql.execute("insert into car4 values('2020-03-31 12:00:00', 1)")
|
||||||
|
tdSql.execute("insert into car3 values('2020-05-01 12:00:00', 1)")
|
||||||
|
|
||||||
|
tdSql.query("select count(*) from cars interval(1n, 10d)")
|
||||||
|
tdSql.checkData(0, 1, 1)
|
||||||
|
tdSql.checkData(1, 1, 1)
|
||||||
|
tdSql.checkData(2, 1, 6)
|
||||||
|
tdSql.checkData(3, 1, 3)
|
||||||
|
tdSql.checkData(4, 1, 3)
|
||||||
|
tdSql.checkData(5, 1, 2)
|
||||||
|
tdSql.checkData(6, 1, 1)
|
||||||
|
|
||||||
|
tdSql.query("select count(*) from cars interval(1n, 10d) order by ts desc")
|
||||||
|
tdSql.checkData(0, 1, 1)
|
||||||
|
tdSql.checkData(1, 1, 2)
|
||||||
|
tdSql.checkData(2, 1, 3)
|
||||||
|
tdSql.checkData(3, 1, 3)
|
||||||
|
tdSql.checkData(4, 1, 6)
|
||||||
|
tdSql.checkData(5, 1, 1)
|
||||||
|
tdSql.checkData(6, 1, 1)
|
||||||
|
|
||||||
|
tdSql.query("select count(*) from cars interval(2n, 5d)")
|
||||||
|
tdSql.checkData(0, 1, 1)
|
||||||
|
tdSql.checkData(1, 1, 1)
|
||||||
|
tdSql.checkData(2, 1, 6)
|
||||||
|
tdSql.checkData(3, 1, 6)
|
||||||
|
tdSql.checkData(4, 1, 3)
|
||||||
|
|
||||||
|
tdSql.query("select count(*) from cars interval(2n) order by ts desc")
|
||||||
|
tdSql.checkData(0, 1, 3)
|
||||||
|
tdSql.checkData(1, 1, 6)
|
||||||
|
tdSql.checkData(2, 1, 6)
|
||||||
|
tdSql.checkData(3, 1, 1)
|
||||||
|
tdSql.checkData(4, 1, 1)
|
||||||
|
|
||||||
|
tdSql.query("select count(*) from cars interval(1y, 1n)")
|
||||||
|
tdSql.checkData(0, 1, 1)
|
||||||
|
tdSql.checkData(1, 1, 8)
|
||||||
|
tdSql.checkData(2, 1, 8)
|
||||||
|
|
||||||
|
tdSql.query("select count(*) from cars interval(1y, 2n)")
|
||||||
|
tdSql.checkData(0, 1, 1)
|
||||||
|
tdSql.checkData(1, 1, 11)
|
||||||
|
tdSql.checkData(2, 1, 5)
|
||||||
|
|
||||||
|
tdSql.query("select count(*) from cars where ts > '2019-05-14 00:00:00' interval(1y, 5d)")
|
||||||
|
tdSql.checkData(0, 1, 6)
|
||||||
|
tdSql.checkData(1, 1, 9)
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
tdSql.prepare()
|
||||||
|
self.general()
|
||||||
|
self.singleTable()
|
||||||
|
self.superTable()
|
||||||
|
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success("%s successfully executed" % __file__)
|
||||||
|
|
||||||
|
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
||||||
|
|
|
@ -89,10 +89,10 @@ class TDTestCase:
|
||||||
def superTable(self):
|
def superTable(self):
|
||||||
tdSql.execute("create table cars(ts timestamp, s int) tags(id int)")
|
tdSql.execute("create table cars(ts timestamp, s int) tags(id int)")
|
||||||
tdSql.execute("create table car0 using cars tags(0)")
|
tdSql.execute("create table car0 using cars tags(0)")
|
||||||
tdSql.execute("create table car1 using cars tags(0)")
|
tdSql.execute("create table car1 using cars tags(1)")
|
||||||
tdSql.execute("create table car2 using cars tags(0)")
|
tdSql.execute("create table car2 using cars tags(2)")
|
||||||
tdSql.execute("create table car3 using cars tags(0)")
|
tdSql.execute("create table car3 using cars tags(3)")
|
||||||
tdSql.execute("create table car4 using cars tags(0)")
|
tdSql.execute("create table car4 using cars tags(4)")
|
||||||
|
|
||||||
tdSql.execute("insert into car0 values('2019-01-01 00:00:00', 1)")
|
tdSql.execute("insert into car0 values('2019-01-01 00:00:00', 1)")
|
||||||
tdSql.execute("insert into car1 values('2019-05-13 12:00:00', 1)")
|
tdSql.execute("insert into car1 values('2019-05-13 12:00:00', 1)")
|
||||||
|
|
|
@ -0,0 +1,53 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import taos
|
||||||
|
from util.log import tdLog
|
||||||
|
from util.cases import tdCases
|
||||||
|
from util.sql import tdSql
|
||||||
|
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
def init(self, conn, logSql):
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor(), logSql)
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
tdSql.prepare()
|
||||||
|
|
||||||
|
tdSql.execute(
|
||||||
|
"create table stest(ts timestamp,size INT,filenum INT) tags (appname binary(500),tenant binary(500))")
|
||||||
|
tdSql.execute(
|
||||||
|
"insert into test1 using stest tags('test1','aaa') values ('2020-09-04 16:53:54.003',210,3)")
|
||||||
|
tdSql.execute(
|
||||||
|
"insert into test2 using stest tags('test1','aaa') values ('2020-09-04 16:53:56.003',210,3)")
|
||||||
|
tdSql.execute(
|
||||||
|
"insert into test11 using stest tags('test11','bbb') values ('2020-09-04 16:53:57.003',210,3)")
|
||||||
|
tdSql.execute(
|
||||||
|
"insert into test12 using stest tags('test11','bbb') values ('2020-09-04 16:53:58.003',210,3)")
|
||||||
|
tdSql.execute(
|
||||||
|
"insert into test21 using stest tags('test21','ccc') values ('2020-09-04 16:53:59.003',210,3)")
|
||||||
|
tdSql.execute(
|
||||||
|
"insert into test22 using stest tags('test21','ccc') values ('2020-09-04 16:54:54.003',210,3)")
|
||||||
|
|
||||||
|
tdSql.query("select sum(size) from stest interval(1d) group by appname")
|
||||||
|
tdSql.checkRows(3)
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success("%s successfully executed" % __file__)
|
||||||
|
|
||||||
|
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,62 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import taos
|
||||||
|
from util.log import tdLog
|
||||||
|
from util.cases import tdCases
|
||||||
|
from util.sql import tdSql
|
||||||
|
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
def init(self, conn, logSql):
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor(), logSql)
|
||||||
|
|
||||||
|
self.ts = 1538548685000
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
tdSql.prepare()
|
||||||
|
|
||||||
|
tdSql.execute("create table st (ts timestamp, voltage int) tags (loc nchar(30))")
|
||||||
|
tdSql.execute("insert into t0 using st tags('beijing') values(now, 220) (now - 15d, 221) (now - 30d, 225) (now - 35d, 228) (now - 45d, 222)")
|
||||||
|
tdSql.execute("insert into t1 using st tags('shanghai') values(now, 220) (now - 60d, 221) (now - 50d, 225) (now - 40d, 228) (now - 20d, 222)")
|
||||||
|
|
||||||
|
tdSql.query("select avg(voltage) from st interval(1n)")
|
||||||
|
tdSql.checkRows(3)
|
||||||
|
tdSql.checkData(0, 1, 223.0)
|
||||||
|
tdSql.checkData(1, 1, 225.0)
|
||||||
|
tdSql.checkData(2, 1, 220.333333)
|
||||||
|
|
||||||
|
tdSql.query("select avg(voltage) from st interval(1n, 15d)")
|
||||||
|
tdSql.checkRows(3)
|
||||||
|
tdSql.checkData(0, 1, 224.8)
|
||||||
|
tdSql.checkData(1, 1, 222.666666)
|
||||||
|
tdSql.checkData(2, 1, 220.0)
|
||||||
|
|
||||||
|
tdSql.query("select avg(voltage) from st interval(1n, 15d) group by loc")
|
||||||
|
tdSql.checkRows(6)
|
||||||
|
tdSql.checkData(0, 1, 225.0)
|
||||||
|
tdSql.checkData(1, 1, 223.0)
|
||||||
|
tdSql.checkData(2, 1, 220.0)
|
||||||
|
tdSql.checkData(3, 1, 224.666666)
|
||||||
|
tdSql.checkData(4, 1, 222.0)
|
||||||
|
tdSql.checkData(5, 1, 220.0)
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success("%s successfully executed" % __file__)
|
||||||
|
|
||||||
|
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -38,7 +38,7 @@ class TDTestCase:
|
||||||
tdLog.info("drop database db if exits")
|
tdLog.info("drop database db if exits")
|
||||||
tdSql.execute('drop database if exists db')
|
tdSql.execute('drop database if exists db')
|
||||||
tdLog.info("================= step1")
|
tdLog.info("================= step1")
|
||||||
tdSql.execute('create database db maxtables 4')
|
tdSql.execute('create database db')
|
||||||
tdLog.sleep(5)
|
tdLog.sleep(5)
|
||||||
tdSql.execute('use db')
|
tdSql.execute('use db')
|
||||||
|
|
||||||
|
|
|
@ -38,7 +38,7 @@ class TDTestCase:
|
||||||
tdLog.info("drop database db if exits")
|
tdLog.info("drop database db if exits")
|
||||||
tdSql.execute('drop database if exists db')
|
tdSql.execute('drop database if exists db')
|
||||||
tdLog.info("================= step1")
|
tdLog.info("================= step1")
|
||||||
tdSql.execute('create database db maxtables 4')
|
tdSql.execute('create database db')
|
||||||
tdLog.sleep(5)
|
tdLog.sleep(5)
|
||||||
tdSql.execute('use db')
|
tdSql.execute('use db')
|
||||||
|
|
||||||
|
|
|
@ -123,8 +123,12 @@ class TDSql:
|
||||||
|
|
||||||
def checkData(self, row, col, data):
|
def checkData(self, row, col, data):
|
||||||
self.checkRowCol(row, col)
|
self.checkRowCol(row, col)
|
||||||
if str(self.queryResult[row][col]) != str(data):
|
if self.queryResult[row][col] != data:
|
||||||
if isinstance(data, float) and abs(self.queryResult[row][col] - data) <= 0.000001:
|
if str(self.queryResult[row][col]) == str(data):
|
||||||
|
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||||
|
(self.sql, row, col, self.queryResult[row][col], data))
|
||||||
|
return
|
||||||
|
elif isinstance(data, float) and abs(self.queryResult[row][col] - data) <= 0.000001:
|
||||||
tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" %
|
tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" %
|
||||||
(self.sql, row, col, self.queryResult[row][col], data))
|
(self.sql, row, col, self.queryResult[row][col], data))
|
||||||
return
|
return
|
||||||
|
|
|
@ -130,7 +130,6 @@ run general/parser/join.sim
|
||||||
run general/parser/join_multivnode.sim
|
run general/parser/join_multivnode.sim
|
||||||
run general/parser/select_with_tags.sim
|
run general/parser/select_with_tags.sim
|
||||||
run general/parser/groupby.sim
|
run general/parser/groupby.sim
|
||||||
run general/parser/bug.sim
|
|
||||||
run general/parser/tags_dynamically_specifiy.sim
|
run general/parser/tags_dynamically_specifiy.sim
|
||||||
run general/parser/set_tag_vals.sim
|
run general/parser/set_tag_vals.sim
|
||||||
#unsupport run general/parser/repeatAlter.sim
|
#unsupport run general/parser/repeatAlter.sim
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
system sh/stop_dnodes.sh
|
system sh/stop_dnodes.sh
|
||||||
|
|
||||||
|
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
|
||||||
system sh/deploy.sh -n dnode1 -i 1
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
|
|
||||||
print ========= start dnodes
|
print ========= start dnodes
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
system sh/stop_dnodes.sh
|
system sh/stop_dnodes.sh
|
||||||
system sh/deploy.sh -n dnode1 -i 1
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
|
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
|
||||||
|
|
||||||
print ========= start dnodes
|
print ========= start dnodes
|
||||||
system sh/exec.sh -n dnode1 -s start
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
|
|
@ -4,6 +4,8 @@ system sh/deploy.sh -n dnode1 -i 1
|
||||||
system sh/cfg.sh -n dnode1 -c walLevel -v 2
|
system sh/cfg.sh -n dnode1 -c walLevel -v 2
|
||||||
system sh/deploy.sh -n dnode2 -i 2
|
system sh/deploy.sh -n dnode2 -i 2
|
||||||
system sh/cfg.sh -n dnode1 -c walLevel -v 2
|
system sh/cfg.sh -n dnode1 -c walLevel -v 2
|
||||||
|
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
|
||||||
|
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4
|
||||||
|
|
||||||
print ========== prepare data
|
print ========== prepare data
|
||||||
system sh/exec.sh -n dnode1 -s start
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
|
|
@ -24,5 +24,10 @@ print curl 127.0.0.1:7111/rest/sql -----> $system_content
|
||||||
# return -1
|
# return -1
|
||||||
#endi
|
#endi
|
||||||
|
|
||||||
|
sql select * from db.win_cpu_windows_1_processor
|
||||||
|
print rows: $rows
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
#system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
#system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -0,0 +1,37 @@
|
||||||
|
system sh/stop_dnodes.sh
|
||||||
|
sleep 3000
|
||||||
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
|
system sh/cfg.sh -n dnode1 -c wallevel -v 0
|
||||||
|
system sh/cfg.sh -n dnode1 -c http -v 1
|
||||||
|
system sh/cfg.sh -n dnode1 -c maxSQLLength -v 340032
|
||||||
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
|
||||||
|
sleep 3000
|
||||||
|
sql connect
|
||||||
|
|
||||||
|
print ============================ dnode1 start
|
||||||
|
|
||||||
|
print =============== step1 - prepare data
|
||||||
|
sql create database d1
|
||||||
|
sql use d1
|
||||||
|
|
||||||
|
sql create table table_rest (ts timestamp, i int)
|
||||||
|
print sql length is 270KB
|
||||||
|
restful d1 table_rest 1591072800 10000
|
||||||
|
restful d1 table_rest 1591172800 10000
|
||||||
|
restful d1 table_rest 1591272800 10000
|
||||||
|
restful d1 table_rest 1591372800 10000
|
||||||
|
restful d1 table_rest 1591472800 10000
|
||||||
|
restful d1 table_rest 1591572800 10000
|
||||||
|
restful d1 table_rest 1591672800 10000
|
||||||
|
restful d1 table_rest 1591772800 10000
|
||||||
|
restful d1 table_rest 1591872800 10000
|
||||||
|
restful d1 table_rest 1591972800 10000
|
||||||
|
|
||||||
|
sql select * from table_rest;
|
||||||
|
print rows: $rows
|
||||||
|
if $rows != 100000 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -0,0 +1,27 @@
|
||||||
|
system sh/stop_dnodes.sh
|
||||||
|
sleep 3000
|
||||||
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
|
system sh/cfg.sh -n dnode1 -c wallevel -v 0
|
||||||
|
system sh/cfg.sh -n dnode1 -c http -v 1
|
||||||
|
system sh/cfg.sh -n dnode1 -c maxSQLLength -v 7340032
|
||||||
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
|
||||||
|
sleep 3000
|
||||||
|
sql connect
|
||||||
|
|
||||||
|
print ============================ dnode1 start
|
||||||
|
|
||||||
|
print =============== step1 - prepare data
|
||||||
|
sql create database d1
|
||||||
|
sql use d1
|
||||||
|
|
||||||
|
sql create table table_rest (ts timestamp, i int)
|
||||||
|
print sql length is 270KB
|
||||||
|
restful d1 table_rest 1591072800 10000 gzip
|
||||||
|
sql select * from table_rest;
|
||||||
|
print rows: $rows
|
||||||
|
if $rows != 10000 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -1,3 +1,5 @@
|
||||||
|
run general/http/autocreate.sim
|
||||||
|
run general/http/chunked.sim
|
||||||
run general/http/restful.sim
|
run general/http/restful.sim
|
||||||
run general/http/restful_insert.sim
|
run general/http/restful_insert.sim
|
||||||
run general/http/restful_limit.sim
|
run general/http/restful_limit.sim
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
system sh/stop_dnodes.sh
|
system sh/stop_dnodes.sh
|
||||||
system sh/deploy.sh -n dnode1 -i 1
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
system sh/cfg.sh -n dnode1 -c walLevel -v 1
|
system sh/cfg.sh -n dnode1 -c walLevel -v 1
|
||||||
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 1
|
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
|
||||||
system sh/cfg.sh -n dnode1 -c ctime -v 30
|
system sh/cfg.sh -n dnode1 -c ctime -v 30
|
||||||
system sh/exec.sh -n dnode1 -s start
|
system sh/exec.sh -n dnode1 -s start
|
||||||
sleep 3000
|
sleep 3000
|
||||||
|
|
|
@ -86,8 +86,6 @@ print ========== insert data by multi-format
|
||||||
sql create table abc.tk_mt (ts timestamp, a int, b binary(16), c bool, d float, e double, f nchar(16)) tags (t1 int, t2 binary(16))
|
sql create table abc.tk_mt (ts timestamp, a int, b binary(16), c bool, d float, e double, f nchar(16)) tags (t1 int, t2 binary(16))
|
||||||
|
|
||||||
sql create table abc.tk_subt001 using tk_mt tags(1, 'subt001')
|
sql create table abc.tk_subt001 using tk_mt tags(1, 'subt001')
|
||||||
sql insert into abc.tk_subt001 values (now-1y, 1, 'binary_1', true, 1.001, 2.001, 'nchar_1')
|
|
||||||
sql insert into abc.tk_subt001 values (now-1n, 2, 'binary_2', true, 1.002, 2.002, 'nchar_2')
|
|
||||||
sql insert into abc.tk_subt001 values (now-1w, 3, 'binary_3', true, 1.003, 2.003, 'nchar_3')
|
sql insert into abc.tk_subt001 values (now-1w, 3, 'binary_3', true, 1.003, 2.003, 'nchar_3')
|
||||||
sql insert into abc.tk_subt001 (ts, a, c, e, f) values (now-1d, 4, false, 2.004, 'nchar_4')
|
sql insert into abc.tk_subt001 (ts, a, c, e, f) values (now-1d, 4, false, 2.004, 'nchar_4')
|
||||||
sql insert into abc.tk_subt001 (ts, a, c, e, f) values (now-1h, 5, false, 2.005, 'nchar_5')
|
sql insert into abc.tk_subt001 (ts, a, c, e, f) values (now-1h, 5, false, 2.005, 'nchar_5')
|
||||||
|
@ -95,35 +93,29 @@ sql insert into abc.tk_subt001 (ts, b, d) values (now-1m, 'binary_6',
|
||||||
sql insert into abc.tk_subt001 (ts, b, d) values (now-1s, 'binary_7', 1.007)
|
sql insert into abc.tk_subt001 (ts, b, d) values (now-1s, 'binary_7', 1.007)
|
||||||
sql insert into abc.tk_subt001 (ts, b, d) values (now-1a, 'binary_8', 1.008)
|
sql insert into abc.tk_subt001 (ts, b, d) values (now-1a, 'binary_8', 1.008)
|
||||||
sql select * from tk_subt001
|
sql select * from tk_subt001
|
||||||
if $rows != 8 then
|
if $rows != 6 then
|
||||||
print ==== expect rows is 8, but actually is $rows
|
print ==== expect rows is 6, but actually is $rows
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
sql insert into abc.tk_subt002 using tk_mt tags (22,'subt002x') values (now-2y, 2008, 'binary_2008', false, 2008.001, 2008.001, 'nchar_2008')
|
sql insert into abc.tk_subt002 using tk_mt tags (22, 'subt002x') values (now+1s, 2001, 'binary_2001', true, 2001.001, 2001.001, 'nchar_2001')
|
||||||
sql insert into abc.tk_subt002 using tk_mt tags (2, 'subt002') values (now-1y, 2007, 'binary_2007', false, 2007.001, 2007.001, 'nchar_2007')
|
|
||||||
sql insert into abc.tk_subt002 using tk_mt tags (2, 'subt002') values (now-1n, 2006, 'binary_2006', true, 2006.001, 2006.001, 'nchar_2006')
|
|
||||||
sql insert into abc.tk_subt002 using tk_mt tags (2, 'subt002') values (now+1s, 2001, 'binary_2001', true, 2001.001, 2001.001, 'nchar_2001')
|
|
||||||
sql insert into abc.tk_subt002 using tk_mt tags (2, 'subt002') values (now+1m, 2002, 'binary_2002', false, 2002.001, 2002.001, 'nchar_2002')
|
sql insert into abc.tk_subt002 using tk_mt tags (2, 'subt002') values (now+1m, 2002, 'binary_2002', false, 2002.001, 2002.001, 'nchar_2002')
|
||||||
sql insert into abc.tk_subt002 using tk_mt tags (2, 'subt002') values (now+1h, 2003, 'binary_2003', false, 2003.001, 2003.001, 'nchar_2003')
|
sql insert into abc.tk_subt002 using tk_mt tags (2, 'subt002') values (now+1h, 2003, 'binary_2003', false, 2003.001, 2003.001, 'nchar_2003')
|
||||||
sql insert into abc.tk_subt002 using tk_mt tags (2, 'subt002') values (now+1d, 2004, 'binary_2004', true, 2004.001, 2004.001, 'nchar_2004')
|
sql insert into abc.tk_subt002 using tk_mt tags (2, 'subt002') values (now+1d, 2004, 'binary_2004', true, 2004.001, 2004.001, 'nchar_2004')
|
||||||
sql insert into abc.tk_subt002 using tk_mt tags (2, 'subt002') values (now+1w, 2005, 'binary_2005', false, 2005.001, 2005.001, 'nchar_2005')
|
sql insert into abc.tk_subt002 using tk_mt tags (2, 'subt002') values (now+1w, 2005, 'binary_2005', false, 2005.001, 2005.001, 'nchar_2005')
|
||||||
sql select * from tk_subt002
|
sql select * from tk_subt002
|
||||||
if $rows != 8 then
|
if $rows != 5 then
|
||||||
print ==== expect rows is 8, but actually is $rows
|
print ==== expect rows is 5, but actually is $rows
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
sql insert into abc.tk_subt003 (ts, a, c, e, f) using tk_mt tags (3, 'subt003') values (now-5y, 3001, false, 3001.001, 'nchar_3001')
|
sql insert into abc.tk_subt003 (ts, a, c, e, f) using tk_mt tags (3, 'subt003') values (now-38d, 3004, false, 3004.001, 'nchar_3004')
|
||||||
sql insert into abc.tk_subt003 (ts, a, c, e, f) using tk_mt tags (3, 'subt003') values (now-4y, 3002, false, 3002.001, 'nchar_3002')
|
sql insert into abc.tk_subt003 (ts, a, c, e, f) using tk_mt tags (3, 'subt003') values (now-37d, 3005, false, 3005.001, 'nchar_3005')
|
||||||
sql insert into abc.tk_subt003 (ts, a, c, e, f) using tk_mt tags (3, 'subt003') values (now-3y, 3003, true , 3003.001, 'nchar_3003')
|
|
||||||
sql insert into abc.tk_subt003 (ts, a, c, e, f) using tk_mt tags (3, 'subt003') values (now-2y, 3004, false, 3004.001, 'nchar_3004')
|
|
||||||
sql insert into abc.tk_subt003 values (now-37d, 3005, 'binary_3005', false, 3005.001, 3005.001, 'nchar_3005')
|
|
||||||
sql insert into abc.tk_subt003 values (now-36d, 3006, 'binary_3006', true, 3006.001, 3006.001, 'nchar_3006')
|
sql insert into abc.tk_subt003 values (now-36d, 3006, 'binary_3006', true, 3006.001, 3006.001, 'nchar_3006')
|
||||||
sql insert into abc.tk_subt003 (ts, a, c, e, f) using tk_mt tags (33, 'subt003x') values (now-35d, 3007, false, 3007.001, 'nchar_3007')
|
sql insert into abc.tk_subt003 (ts, a, c, e, f) using tk_mt tags (33, 'subt003x') values (now-35d, 3007, false, 3007.001, 'nchar_3007')
|
||||||
sql select * from tk_subt003
|
sql select * from tk_subt003
|
||||||
if $rows != 7 then
|
if $rows != 4 then
|
||||||
print ==== expect rows is 7, but actually is $rows
|
print ==== expect rows is 4, but actually is $rows
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
|
|
|
@ -850,6 +850,8 @@ if $rows != 12 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
|
print =====================>td-1442
|
||||||
|
sql_error select count(*) from m_fl_tb0 interval(1s) fill(prev);
|
||||||
|
|
||||||
print =============== clear
|
print =============== clear
|
||||||
sql drop database $db
|
sql drop database $db
|
||||||
|
|
|
@ -65,22 +65,23 @@ endi
|
||||||
if $data00 != @18-09-18 01:40:00.000@ then
|
if $data00 != @18-09-18 01:40:00.000@ then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
#if $data01 != NULL then
|
|
||||||
if $data01 != 999 then
|
if $data01 != 999 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
#if $data02 != NULL then
|
|
||||||
if $data02 != 999 then
|
if $data02 != 999 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
#if $data03 != NULL then
|
|
||||||
if $data03 != 999.00000 then
|
if $data03 != 999.00000 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
#if $data04 != NULL then
|
|
||||||
if $data04 != 999.000000000 then
|
if $data04 != 999.000000000 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
#if $data05 != NULL then
|
#if $data05 != NULL then
|
||||||
if $data05 != 999 then
|
if $data05 != 999 then
|
||||||
return -1
|
return -1
|
||||||
|
@ -127,7 +128,7 @@ if $data01 != 0 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
#add check for out of range first/last query
|
print =============> add check for out of range first/last query
|
||||||
sql select first(ts),last(ts) from first_tb4 where ts>'2018-9-18 1:40:01';
|
sql select first(ts),last(ts) from first_tb4 where ts>'2018-9-18 1:40:01';
|
||||||
if $row != 0 then
|
if $row != 0 then
|
||||||
return -1
|
return -1
|
||||||
|
@ -137,3 +138,129 @@ sql select first(ts),last(ts) from first_tb4 where ts<'2018-9-17 8:50:0';
|
||||||
if $row != 0 then
|
if $row != 0 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
|
#first/last mix up query
|
||||||
|
#select first(size),last(size) from stest interval(1d) group by tbname;
|
||||||
|
print =====================>td-1477
|
||||||
|
|
||||||
|
sql create table stest(ts timestamp,size INT,filenum INT) tags (appname binary(500),tenant binary(500));
|
||||||
|
sql insert into test1 using stest tags('test1','aaa') values ('2020-09-04 16:53:54.003',210,3);
|
||||||
|
sql insert into test2 using stest tags('test1','aaa') values ('2020-09-04 16:53:56.003',210,3);
|
||||||
|
sql insert into test11 using stest tags('test11','bbb') values ('2020-09-04 16:53:57.003',210,3);
|
||||||
|
sql insert into test12 using stest tags('test11','bbb') values ('2020-09-04 16:53:58.003',210,3);
|
||||||
|
sql insert into test21 using stest tags('test21','ccc') values ('2020-09-04 16:53:59.003',210,3);
|
||||||
|
sql insert into test22 using stest tags('test21','ccc') values ('2020-09-04 16:54:54.003',210,3);
|
||||||
|
sql select sum(size) from stest group by appname;
|
||||||
|
if $rows != 3 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data00 != 420 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data10 != 420 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data20 != 420 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data01 != @test1@ then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data11 != @test11@ then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data21 != @test21@ then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select sum(size) from stest interval(1d) group by appname;
|
||||||
|
if $rows != 3 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
#2020-09-04 00:00:00.000 | 420 | test1 |
|
||||||
|
#2020-09-04 00:00:00.000 | 420 | test11 |
|
||||||
|
#2020-09-04 00:00:00.000 | 420 | test21 |
|
||||||
|
if $data00 != @20-09-04 00:00:00.000@ then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data10 != @20-09-04 00:00:00.000@ then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data20 != @20-09-04 00:00:00.000@ then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data01 != 420 then
|
||||||
|
print expect 420 , actual $data01
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data11 != 420 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data21 != 420 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data02 != @test1@ then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data12 != @test11@ then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data22 != @test21@ then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
print ===================>td-1477, one table has only one block occurs this bug.
|
||||||
|
sql select first(size),count(*),LAST(SIZE) from stest where tbname in ('test1', 'test2') interval(1d) group by tbname;
|
||||||
|
if $rows != 2 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data00 != @20-09-04 00:00:00.000@ then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data01 != 210 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data02 != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data03 != 210 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data04 != @test1@ then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data10 != @20-09-04 00:00:00.000@ then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data11 != 210 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data12 != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data13 != 210 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data14 != @test2@ then
|
||||||
|
print expect test2 , actual: $data14
|
||||||
|
return -1
|
||||||
|
endi
|
|
@ -423,6 +423,8 @@ if $data97 != @group_tb0@ then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
|
print ---------------------------------> group by binary|nchar data add cases
|
||||||
|
|
||||||
|
|
||||||
#=========================== group by multi tags ======================
|
#=========================== group by multi tags ======================
|
||||||
sql create table st (ts timestamp, c int) tags (t1 int, t2 int, t3 int, t4 int);
|
sql create table st (ts timestamp, c int) tags (t1 int, t2 int, t3 int, t4 int);
|
||||||
|
|
|
@ -205,10 +205,12 @@ if $rows != 9 then
|
||||||
endi
|
endi
|
||||||
|
|
||||||
if $data00 != @70-01-01 08:01:40.100@ then
|
if $data00 != @70-01-01 08:01:40.100@ then
|
||||||
|
print $data00
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
if $data10 != @70-01-01 08:01:40.200@ then
|
if $data10 != @70-01-01 08:01:40.200@ then
|
||||||
|
print $data10
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue