[td-255] merge develop.

This commit is contained in:
Haojun Liao 2021-06-01 22:45:47 +08:00
commit d9f403ad3d
72 changed files with 4568 additions and 2331 deletions

3
.gitmodules vendored
View File

@ -10,3 +10,6 @@
[submodule "tests/examples/rust"] [submodule "tests/examples/rust"]
path = tests/examples/rust path = tests/examples/rust
url = https://github.com/songtianyi/tdengine-rust-bindings.git url = https://github.com/songtianyi/tdengine-rust-bindings.git
[submodule "deps/jemalloc"]
path = deps/jemalloc
url = https://github.com/jemalloc/jemalloc

View File

@ -59,6 +59,11 @@ IF (TD_LINUX_64)
MESSAGE(STATUS "linux64 is defined") MESSAGE(STATUS "linux64 is defined")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ADD_DEFINITIONS(-DUSE_LIBICONV) ADD_DEFINITIONS(-DUSE_LIBICONV)
IF (JEMALLOC_ENABLED)
ADD_DEFINITIONS(-DTD_JEMALLOC_ENABLED -I${CMAKE_BINARY_DIR}/build/include -L${CMAKE_BINARY_DIR}/build/lib -Wl,-rpath,${CMAKE_BINARY_DIR}/build/lib -ljemalloc)
ENDIF ()
ENDIF () ENDIF ()
IF (TD_LINUX_32) IF (TD_LINUX_32)

View File

@ -72,3 +72,8 @@ IF (${RANDOM_NETWORK_FAIL} MATCHES "true")
SET(TD_RANDOM_NETWORK_FAIL TRUE) SET(TD_RANDOM_NETWORK_FAIL TRUE)
MESSAGE(STATUS "build with random-network-fail enabled") MESSAGE(STATUS "build with random-network-fail enabled")
ENDIF () ENDIF ()
IF (${JEMALLOC_ENABLED} MATCHES "true")
SET(TD_JEMALLOC_ENABLED TRUE)
MESSAGE(STATUS "build with jemalloc enabled")
ENDIF ()

13
deps/CMakeLists.txt vendored
View File

@ -19,3 +19,16 @@ ENDIF ()
IF (TD_DARWIN AND TD_MQTT) IF (TD_DARWIN AND TD_MQTT)
ADD_SUBDIRECTORY(MQTT-C) ADD_SUBDIRECTORY(MQTT-C)
ENDIF () ENDIF ()
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
MESSAGE("setup dpes/jemalloc, current source dir:" ${CMAKE_CURRENT_SOURCE_DIR})
MESSAGE("binary dir:" ${CMAKE_BINARY_DIR})
include(ExternalProject)
ExternalProject_Add(jemalloc
PREFIX "jemalloc"
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/jemalloc
BUILD_IN_SOURCE 1
CONFIGURE_COMMAND ./autogen.sh COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/
BUILD_COMMAND ${MAKE}
)
ENDIF ()

1
deps/jemalloc vendored Submodule

@ -0,0 +1 @@
Subproject commit ea6b3e973b477b8061e0076bb257dbd7f3faa756

View File

@ -107,9 +107,9 @@ TDengine采取的是Master-Slave模式进行同步与流行的RAFT一致性
![replica-forward.png](page://images/architecture/replica-forward.png) ![replica-forward.png](page://images/architecture/replica-forward.png)
1. 应用对写请求做基本的合法性检查,通过,则给请求包打上一个版本号(version, 单调递增) 1. 应用对写请求做基本的合法性检查,通过,则给请求包打上一个版本号(version, 单调递增)
2. 应用将打上版本号的写请求封装一个WAL Head, 写入WAL(Write Ahead Log) 2. 应用将打上版本号的写请求封装一个WAL Head, 写入WAL(Write Ahead Log)
3. 应用调用API syncForwardToPeervnode B是slave状态sync模块将包含WAL Head的数据包通过Forward消息发送给vnode B否则就不转发。 3. 应用调用API syncForwardToPeervnode B是slave状态sync模块将包含WAL Head的数据包通过Forward消息发送给vnode B否则就不转发。
4. vnode B收到Forward消息后调用回调函数writeToCache, 交给应用处理 4. vnode B收到Forward消息后调用回调函数writeToCache, 交给应用处理
5. vnode B应用在写入成功后都需要调用syncAckForward通知sync模块已经写入成功。 5. vnode B应用在写入成功后都需要调用syncAckForward通知sync模块已经写入成功。
6. 如果quorum大于1vnode B需要等待应用的回复确认收到确认后vnode B发送Forward Response消息给node A。 6. 如果quorum大于1vnode B需要等待应用的回复确认收到确认后vnode B发送Forward Response消息给node A。
@ -219,7 +219,7 @@ Arbitrator的程序tarbitrator.c在复制模块的同一目录, 编译整个系
不同之处: 不同之处:
- 选举流程不一样Raft里任何一个节点是candidate时主动向其他节点发出vote request, 如果超过半数回答Yes, 这个candidate就成为Leader,开始一个新的term. 而TDengine的实现里节点上线、离线或角色改变都会触发状态消息在节点组类传播等节点组里状态稳定一致之后才触发选举流程因为状态稳定一致基于同样的状态信息每个节点做出的决定会是一致的一旦某个节点符合成为master的条件无需其他节点认可它会自动将自己设为master。TDengine里任何一个节点检测到其他节点或自己的角色发生改变就会给节点组内其他节点进行广播的。Raft里不存在这样的机制因此需要投票来解决。 - 选举流程不一样Raft里任何一个节点是candidate时主动向其他节点发出vote request如果超过半数回答Yes这个candidate就成为Leader开始一个新的term。而TDengine的实现里节点上线、离线或角色改变都会触发状态消息在节点组内传播等节点组里状态稳定一致之后才触发选举流程因为状态稳定一致基于同样的状态信息每个节点做出的决定会是一致的一旦某个节点符合成为master的条件无需其他节点认可它会自动将自己设为master。TDengine里任何一个节点检测到其他节点或自己的角色发生改变就会向节点组内其他节点进行广播。Raft里不存在这样的机制因此需要投票来解决。
- 对WAL的一条记录Raft用term + index来做唯一标识。但TDengine只用version类似index)在TDengine实现里仅仅用version是完全可行的, 因为TDengine的选举机制没有term的概念。 - 对WAL的一条记录Raft用term + index来做唯一标识。但TDengine只用version类似index)在TDengine实现里仅仅用version是完全可行的, 因为TDengine的选举机制没有term的概念。
如果整个虚拟节点组全部宕机重启但不是所有虚拟节点都上线这个时候TDengine是不会选出master的因为未上线的节点有可能有最高version的数据。而RAFT协议只要超过半数上线就会选出Leader。 如果整个虚拟节点组全部宕机重启但不是所有虚拟节点都上线这个时候TDengine是不会选出master的因为未上线的节点有可能有最高version的数据。而RAFT协议只要超过半数上线就会选出Leader。

View File

@ -343,7 +343,7 @@ TDengine采用数据驱动的方式让缓存中的数据写入硬盘进行持久
对于采集的数据一般有保留时长这个时长由系统配置参数keep决定。超过这个设置天数的数据文件将被系统自动删除释放存储空间。 对于采集的数据一般有保留时长这个时长由系统配置参数keep决定。超过这个设置天数的数据文件将被系统自动删除释放存储空间。
给定days与keep两个参数一个vnode总的数据文件数为keep/days。总的数据文件个数不宜过大也不宜过小。10到100以内合适。基于这个原则可以设置合理的days。 目前的版本参数keep可以修改但对于参数days一但设置后不可修改。 给定days与keep两个参数一个典型工作状态的vnode总的数据文件数为:`向上取整(keep/days)+1`个。总的数据文件个数不宜过大也不宜过小。10到100以内合适。基于这个原则可以设置合理的days。 目前的版本参数keep可以修改但对于参数days一但设置后不可修改。
在每个数据文件里一张表的数据是一块一块存储的。一张表可以有一到多个数据文件块。在一个文件块里数据是列式存储的占用的是一片连续的存储空间这样大大提高读取速度。文件块的大小由系统参数maxRows每块最大记录条数决定缺省值为4096。这个值不宜过大也不宜过小。过大定位具体时间段的数据的搜索时间会变长影响读取速度过小数据块的索引太大压缩效率偏低也影响读取速度。 在每个数据文件里一张表的数据是一块一块存储的。一张表可以有一到多个数据文件块。在一个文件块里数据是列式存储的占用的是一片连续的存储空间这样大大提高读取速度。文件块的大小由系统参数maxRows每块最大记录条数决定缺省值为4096。这个值不宜过大也不宜过小。过大定位具体时间段的数据的搜索时间会变长影响读取速度过小数据块的索引太大压缩效率偏低也影响读取速度。

View File

@ -516,7 +516,7 @@ conn.close()
- _TDengineCursor_ - _TDengineCursor_
参考python中help(taos.TDengineCursor)。 参考python中help(taos.TDengineCursor)。
这个类对应客户端进行的写入、查询操作。在客户端多线程的场景下,这个游标实例必须保持线程独享,不能线程共享使用,否则会导致返回结果出现错误。 这个类对应客户端进行的写入、查询操作。在客户端多线程的场景下,这个游标实例必须保持线程独享,不能线程共享使用,否则会导致返回结果出现错误。
- _connect_ 方法 - _connect_ 方法

View File

@ -37,7 +37,7 @@ taos> DESCRIBE meters;
- Epoch Time时间戳也可以是一个长整数表示从 1970-01-01 08:00:00.000 开始的毫秒数 - Epoch Time时间戳也可以是一个长整数表示从 1970-01-01 08:00:00.000 开始的毫秒数
- 时间可以加减,比如 now-2h表明查询时刻向前推 2 个小时(最近 2 小时)。数字后面的时间单位可以是 u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 比如 `select * from t1 where ts > now-2w and ts <= now-1w`表示查询两周前整整一周的数据。在指定降频操作down sampling的时间窗口interval时间单位还可以使用 n(自然月) 和 y(自然年)。 - 时间可以加减,比如 now-2h表明查询时刻向前推 2 个小时(最近 2 小时)。数字后面的时间单位可以是 u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 比如 `select * from t1 where ts > now-2w and ts <= now-1w`表示查询两周前整整一周的数据。在指定降频操作down sampling的时间窗口interval时间单位还可以使用 n(自然月) 和 y(自然年)。
TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableMicrosecond 就可以支持微秒。 TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传递的 PRECISION 参数就可以支持微秒。
在TDengine中普通表的数据模型中可使用以下 10 种数据类型。 在TDengine中普通表的数据模型中可使用以下 10 种数据类型。
@ -400,6 +400,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
tb2_name (tb2_field1_name, ...) [USING stb2_name TAGS (tag_value2, ...)] VALUES (field1_value1, ...) (field1_value2, ...) ...; tb2_name (tb2_field1_name, ...) [USING stb2_name TAGS (tag_value2, ...)] VALUES (field1_value1, ...) (field1_value2, ...) ...;
``` ```
以自动建表的方式同时向表tb1_name和tb2_name中按列分别插入多条记录。 以自动建表的方式同时向表tb1_name和tb2_name中按列分别插入多条记录。
说明:`(tb1_field1_name, ...)`的部分可以省略掉,这样就是使用全列模式写入——也即在 VALUES 部分提供的数据必须为数据表的每个列都显式地提供数据。全列写入速度会远快于指定列因此建议尽可能采用全列写入方式此时空列可以填入NULL。
从 2.0.20.5 版本开始,子表的列名可以不跟在子表名称后面,而是可以放在 TAGS 和 VALUES 之间,例如像下面这样写: 从 2.0.20.5 版本开始,子表的列名可以不跟在子表名称后面,而是可以放在 TAGS 和 VALUES 之间,例如像下面这样写:
```mysql ```mysql
INSERT INTO tb1_name [USING stb1_name TAGS (tag_value1, ...)] (tb1_field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ...; INSERT INTO tb1_name [USING stb1_name TAGS (tag_value1, ...)] (tb1_field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ...;
@ -423,9 +424,9 @@ Query OK, 1 row(s) in set (0.001029s)
taos> SHOW TABLES; taos> SHOW TABLES;
Query OK, 0 row(s) in set (0.000946s) Query OK, 0 row(s) in set (0.000946s)
taos> INSERT INTO d1001 USING meters TAGS('Beijing.Chaoyang', 2); taos> INSERT INTO d1001 USING meters TAGS('Beijing.Chaoyang', 2) VALUES('a');
DB error: invalid SQL: keyword VALUES or FILE required DB error: invalid SQL: 'a' (invalid timestamp) (0.039494s)
taos> SHOW TABLES; taos> SHOW TABLES;
table_name | created_time | columns | stable_name | table_name | created_time | columns | stable_name |

View File

@ -24,14 +24,14 @@ echo "compile_dir: ${compile_dir}"
echo "pkg_dir: ${pkg_dir}" echo "pkg_dir: ${pkg_dir}"
if [ -d ${pkg_dir} ]; then if [ -d ${pkg_dir} ]; then
rm -rf ${pkg_dir} rm -rf ${pkg_dir}
fi fi
mkdir -p ${pkg_dir} mkdir -p ${pkg_dir}
cd ${pkg_dir} cd ${pkg_dir}
libfile="libtaos.so.${tdengine_ver}" libfile="libtaos.so.${tdengine_ver}"
# create install dir # create install dir
install_home_path="/usr/local/taos" install_home_path="/usr/local/taos"
mkdir -p ${pkg_dir}${install_home_path} mkdir -p ${pkg_dir}${install_home_path}
mkdir -p ${pkg_dir}${install_home_path}/bin mkdir -p ${pkg_dir}${install_home_path}/bin
@ -42,7 +42,7 @@ mkdir -p ${pkg_dir}${install_home_path}/examples
mkdir -p ${pkg_dir}${install_home_path}/include mkdir -p ${pkg_dir}${install_home_path}/include
mkdir -p ${pkg_dir}${install_home_path}/init.d mkdir -p ${pkg_dir}${install_home_path}/init.d
mkdir -p ${pkg_dir}${install_home_path}/script mkdir -p ${pkg_dir}${install_home_path}/script
cp ${compile_dir}/../packaging/cfg/taos.cfg ${pkg_dir}${install_home_path}/cfg cp ${compile_dir}/../packaging/cfg/taos.cfg ${pkg_dir}${install_home_path}/cfg
cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_path}/init.d cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_path}/init.d
cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script
@ -54,7 +54,7 @@ cp ${compile_dir}/build/bin/taosdemo ${pkg_dir}${install_home_pat
cp ${compile_dir}/build/bin/taosdump ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/taosdump ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver
cp ${compile_dir}/../src/inc/taos.h ${pkg_dir}${install_home_path}/include cp ${compile_dir}/../src/inc/taos.h ${pkg_dir}${install_home_path}/include
cp ${compile_dir}/../src/inc/taoserror.h ${pkg_dir}${install_home_path}/include cp ${compile_dir}/../src/inc/taoserror.h ${pkg_dir}${install_home_path}/include
cp -r ${top_dir}/tests/examples/* ${pkg_dir}${install_home_path}/examples cp -r ${top_dir}/tests/examples/* ${pkg_dir}${install_home_path}/examples
@ -67,7 +67,41 @@ fi
cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector
cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector
cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector
cp ${compile_dir}/build/lib/taos-jdbcdriver*dist.* ${pkg_dir}${install_home_path}/connector ||: cp ${compile_dir}/build/lib/taos-jdbcdriver*.* ${pkg_dir}${install_home_path}/connector ||:
if [ -f ${compile_dir}/build/bin/jemalloc-config ]; then
install_user_local_path="/usr/local"
mkdir -p ${pkg_dir}${install_user_local_path}/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
cp ${compile_dir}/build/bin/jemalloc-config ${pkg_dir}${install_user_local_path}/bin/
if [ -f ${compile_dir}/build/bin/jemalloc.sh ]; then
cp ${compile_dir}/build/bin/jemalloc.sh ${pkg_dir}${install_user_local_path}/bin/
fi
if [ -f ${compile_dir}/build/bin/jeprof ]; then
cp ${compile_dir}/build/bin/jeprof ${pkg_dir}${install_user_local_path}/bin/
fi
if [ -f ${compile_dir}/build/include/jemalloc/jemalloc.h ]; then
cp ${compile_dir}/build/include/jemalloc/jemalloc.h ${pkg_dir}${install_user_local_path}/include/jemalloc/
fi
if [ -f ${compile_dir}/build/lib/libjemalloc.so.2 ]; then
cp ${compile_dir}/build/lib/libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/
ln -sf libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/libjemalloc.so
fi
if [ -f ${compile_dir}/build/lib/libjemalloc.a ]; then
cp ${compile_dir}/build/lib/libjemalloc.a ${pkg_dir}${install_user_local_path}/lib/
fi
if [ -f ${compile_dir}/build/lib/libjemalloc_pic.a ]; then
cp ${compile_dir}/build/lib/libjemalloc_pic.a ${pkg_dir}${install_user_local_path}/lib/
fi
if [ -f ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ]; then
cp ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ${pkg_dir}${install_user_local_path}/lib/pkgconfig/
fi
if [ -f ${compile_dir}/build/share/doc/jemalloc/jemalloc.html ]; then
cp ${compile_dir}/build/share/doc/jemalloc/jemalloc.html ${pkg_dir}${install_user_local_path}/share/doc/jemalloc/
fi
if [ -f ${compile_dir}/build/share/man/man3/jemalloc.3 ]; then
cp ${compile_dir}/build/share/man/man3/jemalloc.3 ${pkg_dir}${install_user_local_path}/share/man/man3/
fi
fi
cp -r ${compile_dir}/../packaging/deb/DEBIAN ${pkg_dir}/ cp -r ${compile_dir}/../packaging/deb/DEBIAN ${pkg_dir}/
chmod 755 ${pkg_dir}/DEBIAN/* chmod 755 ${pkg_dir}/DEBIAN/*
@ -75,7 +109,7 @@ chmod 755 ${pkg_dir}/DEBIAN/*
# modify version of control # modify version of control
debver="Version: "$tdengine_ver debver="Version: "$tdengine_ver
sed -i "2c$debver" ${pkg_dir}/DEBIAN/control sed -i "2c$debver" ${pkg_dir}/DEBIAN/control
#get taos version, then set deb name #get taos version, then set deb name
@ -90,7 +124,7 @@ fi
if [ "$verType" == "beta" ]; then if [ "$verType" == "beta" ]; then
debname=${debname}-${verType}".deb" debname=${debname}-${verType}".deb"
elif [ "$verType" == "stable" ]; then elif [ "$verType" == "stable" ]; then
debname=${debname}".deb" debname=${debname}".deb"
else else
echo "unknow verType, nor stabel or beta" echo "unknow verType, nor stabel or beta"
@ -101,7 +135,7 @@ fi
dpkg -b ${pkg_dir} $debname dpkg -b ${pkg_dir} $debname
echo "make deb package success!" echo "make deb package success!"
cp ${pkg_dir}/*.deb ${output_dir} cp ${pkg_dir}/*.deb ${output_dir}
# clean tmep dir # clean tmep dir
rm -rf ${pkg_dir} rm -rf ${pkg_dir}

View File

@ -6,7 +6,7 @@ set -e
#set -x #set -x
# release.sh -v [cluster | edge] # release.sh -v [cluster | edge]
# -c [aarch32 | aarch64 | x64 | x86 | mips64 ...] # -c [aarch32 | aarch64 | x64 | x86 | mips64 ...]
# -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...] # -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...]
# -V [stable | beta] # -V [stable | beta]
# -l [full | lite] # -l [full | lite]
@ -22,11 +22,12 @@ cpuType=x64 # [aarch32 | aarch64 | x64 | x86 | mips64 ...]
osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...] osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...]
pagMode=full # [full | lite] pagMode=full # [full | lite]
soMode=dynamic # [static | dynamic] soMode=dynamic # [static | dynamic]
allocator=glibc # [glibc | jemalloc]
dbName=taos # [taos | power] dbName=taos # [taos | power]
verNumber="" verNumber=""
verNumberComp="2.0.0.0" verNumberComp="2.0.0.0"
while getopts "hv:V:c:o:l:s:d:n:m:" arg while getopts "hv:V:c:o:l:s:d:a:n:m:" arg
do do
case $arg in case $arg in
v) v)
@ -53,6 +54,10 @@ do
#echo "dbName=$OPTARG" #echo "dbName=$OPTARG"
dbName=$(echo $OPTARG) dbName=$(echo $OPTARG)
;; ;;
a)
#echo "allocator=$OPTARG"
allocator=$(echo $OPTARG)
;;
n) n)
#echo "verNumber=$OPTARG" #echo "verNumber=$OPTARG"
verNumber=$(echo $OPTARG) verNumber=$(echo $OPTARG)
@ -71,20 +76,21 @@ do
echo " -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...] " echo " -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...] "
echo " -V [stable | beta] " echo " -V [stable | beta] "
echo " -l [full | lite] " echo " -l [full | lite] "
echo " -a [glibc | jemalloc] "
echo " -s [static | dynamic] " echo " -s [static | dynamic] "
echo " -d [taos | power] " echo " -d [taos | power] "
echo " -n [version number] " echo " -n [version number] "
echo " -m [compatible version number] " echo " -m [compatible version number] "
exit 0 exit 0
;; ;;
?) #unknow option ?) #unknow option
echo "unkonw argument" echo "unkonw argument"
exit 1 exit 1
;; ;;
esac esac
done done
echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode} soMode=${soMode} dbName=${dbName} verNumber=${verNumber} verNumberComp=${verNumberComp}" echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode} soMode=${soMode} dbName=${dbName} allocator=${allocator} verNumber=${verNumber} verNumberComp=${verNumberComp}"
curr_dir=$(pwd) curr_dir=$(pwd)
@ -118,7 +124,7 @@ function vercomp () {
echo 0 echo 0
exit 0 exit 0
fi fi
local IFS=. local IFS=.
local i ver1=($1) ver2=($2) local i ver1=($1) ver2=($2)
@ -164,7 +170,7 @@ if [[ "$verMode" == "cluster" ]]; then
else else
gitinfoOfInternal=NULL gitinfoOfInternal=NULL
fi fi
cd ${curr_dir} cd ${curr_dir}
# 2. cmake executable file # 2. cmake executable file
@ -180,12 +186,18 @@ else
fi fi
cd ${compile_dir} cd ${compile_dir}
if [[ "$allocator" == "jemalloc" ]]; then
allocator_macro="-DJEMALLOC_ENABLED=true"
else
allocator_macro=""
fi
# check support cpu type # check support cpu type
if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" == "aarch32" ]] || [[ "$cpuType" == "mips64" ]] ; then if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" == "aarch32" ]] || [[ "$cpuType" == "mips64" ]] ; then
if [ "$verMode" != "cluster" ]; then if [ "$verMode" != "cluster" ]; then
cmake ../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode} cmake ../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode} ${allocator_macro}
else else
cmake ../../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} cmake ../../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} ${allocator_macro}
fi fi
else else
echo "input cpuType=${cpuType} error!!!" echo "input cpuType=${cpuType} error!!!"
@ -199,9 +211,9 @@ cd ${curr_dir}
# 3. Call the corresponding script for packaging # 3. Call the corresponding script for packaging
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
if [[ "$verMode" != "cluster" ]] && [[ "$cpuType" == "x64" ]] && [[ "$dbName" == "taos" ]]; then if [[ "$verMode" != "cluster" ]] && [[ "$cpuType" == "x64" ]] && [[ "$dbName" == "taos" ]]; then
ret='0' ret='0'
command -v dpkg >/dev/null 2>&1 || { ret='1'; } command -v dpkg >/dev/null 2>&1 || { ret='1'; }
if [ "$ret" -eq 0 ]; then if [ "$ret" -eq 0 ]; then
echo "====do deb package for the ubuntu system====" echo "====do deb package for the ubuntu system===="
output_dir="${top_dir}/debs" output_dir="${top_dir}/debs"
if [ -d ${output_dir} ]; then if [ -d ${output_dir} ]; then
@ -214,9 +226,9 @@ if [ "$osType" != "Darwin" ]; then
echo "==========dpkg command not exist, so not release deb package!!!" echo "==========dpkg command not exist, so not release deb package!!!"
fi fi
ret='0' ret='0'
command -v rpmbuild >/dev/null 2>&1 || { ret='1'; } command -v rpmbuild >/dev/null 2>&1 || { ret='1'; }
if [ "$ret" -eq 0 ]; then if [ "$ret" -eq 0 ]; then
echo "====do rpm package for the centos system====" echo "====do rpm package for the centos system===="
output_dir="${top_dir}/rpms" output_dir="${top_dir}/rpms"
if [ -d ${output_dir} ]; then if [ -d ${output_dir} ]; then
@ -229,11 +241,11 @@ if [ "$osType" != "Darwin" ]; then
echo "==========rpmbuild command not exist, so not release rpm package!!!" echo "==========rpmbuild command not exist, so not release rpm package!!!"
fi fi
fi fi
echo "====do tar.gz package for all systems====" echo "====do tar.gz package for all systems===="
cd ${script_dir}/tools cd ${script_dir}/tools
if [[ "$dbName" == "taos" ]]; then if [[ "$dbName" == "taos" ]]; then
${csudo} ./makepkg.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${csudo} ./makepkg.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
${csudo} ./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${csudo} ./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
${csudo} ./makearbi.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${csudo} ./makearbi.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}

View File

@ -1,6 +1,6 @@
#!/bin/bash #!/bin/bash
# #
# Generate rpm package for centos # Generate rpm package for centos
set -e set -e
# set -x # set -x
@ -60,7 +60,7 @@ ${csudo} rpmbuild --define="_version ${tdengine_ver}" --define="_topdir ${pkg_di
# copy rpm package to output_dir, and modify package name, then clean temp dir # copy rpm package to output_dir, and modify package name, then clean temp dir
#${csudo} cp -rf RPMS/* ${output_dir} #${csudo} cp -rf RPMS/* ${output_dir}
cp_rpm_package ${pkg_dir}/RPMS cp_rpm_package ${pkg_dir}/RPMS
if [ "$verMode" == "cluster" ]; then if [ "$verMode" == "cluster" ]; then
@ -74,7 +74,7 @@ fi
if [ "$verType" == "beta" ]; then if [ "$verType" == "beta" ]; then
rpmname=${rpmname}-${verType}".rpm" rpmname=${rpmname}-${verType}".rpm"
elif [ "$verType" == "stable" ]; then elif [ "$verType" == "stable" ]; then
rpmname=${rpmname}".rpm" rpmname=${rpmname}".rpm"
else else
echo "unknow verType, nor stabel or beta" echo "unknow verType, nor stabel or beta"

View File

@ -1,4 +1,5 @@
%define homepath /usr/local/taos %define homepath /usr/local/taos
%define userlocalpath /usr/local
%define cfg_install_dir /etc/taos %define cfg_install_dir /etc/taos
%define __strip /bin/true %define __strip /bin/true
@ -12,22 +13,22 @@ URL: www.taosdata.com
AutoReqProv: no AutoReqProv: no
#BuildRoot: %_topdir/BUILDROOT #BuildRoot: %_topdir/BUILDROOT
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root
#Prefix: /usr/local/taos #Prefix: /usr/local/taos
#BuildRequires: #BuildRequires:
#Requires: #Requires:
%description %description
Big Data Platform Designed and Optimized for IoT Big Data Platform Designed and Optimized for IoT
#"prep" Nothing needs to be done #"prep" Nothing needs to be done
#%prep #%prep
#%setup -q #%setup -q
#%setup -T #%setup -T
#"build" Nothing needs to be done #"build" Nothing needs to be done
#%build #%build
#%configure #%configure
#make %{?_smp_mflags} #make %{?_smp_mflags}
@ -75,9 +76,53 @@ fi
cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector
cp %{_compiledir}/build/lib/taos-jdbcdriver*dist.* %{buildroot}%{homepath}/connector ||: cp %{_compiledir}/build/lib/taos-jdbcdriver*.* %{buildroot}%{homepath}/connector ||:
cp -r %{_compiledir}/../tests/examples/* %{buildroot}%{homepath}/examples cp -r %{_compiledir}/../tests/examples/* %{buildroot}%{homepath}/examples
if [ -f %{_compiledir}/build/bin/jemalloc-config ]; then
mkdir -p %{buildroot}%{userlocalpath}/bin
mkdir -p %{buildroot}%{userlocalpath}/lib
mkdir -p %{buildroot}%{userlocalpath}/lib/pkgconfig
mkdir -p %{buildroot}%{userlocalpath}/include
mkdir -p %{buildroot}%{userlocalpath}/include/jemalloc
mkdir -p %{buildroot}%{userlocalpath}/share
mkdir -p %{buildroot}%{userlocalpath}/share/doc
mkdir -p %{buildroot}%{userlocalpath}/share/doc/jemalloc
mkdir -p %{buildroot}%{userlocalpath}/share/man
mkdir -p %{buildroot}%{userlocalpath}/share/man/man3
cp %{_compiledir}/build/bin/jemalloc-config %{buildroot}%{userlocalpath}/bin/
if [ -f %{_compiledir}/build/bin/jemalloc.sh ]; then
cp %{_compiledir}/build/bin/jemalloc.sh %{buildroot}%{userlocalpath}/bin/
fi
if [ -f %{_compiledir}/build/bin/jeprof ]; then
cp %{_compiledir}/build/bin/jeprof %{buildroot}%{userlocalpath}/bin/
fi
if [ -f %{_compiledir}/build/include/jemalloc/jemalloc.h ]; then
cp %{_compiledir}/build/include/jemalloc/jemalloc.h %{buildroot}%{userlocalpath}/include/jemalloc/
fi
if [ -f %{_compiledir}/build/lib/libjemalloc.so.2 ]; then
cp %{_compiledir}/build/lib/libjemalloc.so.2 %{buildroot}%{userlocalpath}/lib/
ln -sf libjemalloc.so.2 %{buildroot}%{userlocalpath}/lib/libjemalloc.so
fi
if [ -f %{_compiledir}/build/lib/libjemalloc.a ]; then
cp %{_compiledir}/build/lib/libjemalloc.a %{buildroot}%{userlocalpath}/lib/
fi
if [ -f %{_compiledir}/build/lib/libjemalloc_pic.a ]; then
cp %{_compiledir}/build/lib/libjemalloc_pic.a %{buildroot}%{userlocalpath}/lib/
fi
if [ -f %{_compiledir}/build/lib/pkgconfig/jemalloc.pc ]; then
cp %{_compiledir}/build/lib/pkgconfig/jemalloc.pc %{buildroot}%{userlocalpath}/lib/pkgconfig/
fi
if [ -f %{_compiledir}/build/share/doc/jemalloc/jemalloc.html ]; then
cp %{_compiledir}/build/share/doc/jemalloc/jemalloc.html %{buildroot}%{userlocalpath}/share/doc/jemalloc/
fi
if [ -f %{_compiledir}/build/share/man/man3/jemalloc.3 ]; then
cp %{_compiledir}/build/share/man/man3/jemalloc.3 %{buildroot}%{userlocalpath}/share/man/man3/
fi
fi
#Scripts executed before installation #Scripts executed before installation
%pre %pre
csudo="" csudo=""
@ -103,7 +148,7 @@ fi
# if taos.cfg already softlink, remove it # if taos.cfg already softlink, remove it
if [ -f %{cfg_install_dir}/taos.cfg ]; then if [ -f %{cfg_install_dir}/taos.cfg ]; then
${csudo} rm -f %{homepath}/cfg/taos.cfg || : ${csudo} rm -f %{homepath}/cfg/taos.cfg || :
fi fi
# there can not libtaos.so*, otherwise ln -s error # there can not libtaos.so*, otherwise ln -s error
${csudo} rm -f %{homepath}/driver/libtaos* || : ${csudo} rm -f %{homepath}/driver/libtaos* || :
@ -116,18 +161,18 @@ if command -v sudo > /dev/null; then
fi fi
cd %{homepath}/script cd %{homepath}/script
${csudo} ./post.sh ${csudo} ./post.sh
# Scripts executed before uninstall # Scripts executed before uninstall
%preun %preun
csudo="" csudo=""
if command -v sudo > /dev/null; then if command -v sudo > /dev/null; then
csudo="sudo" csudo="sudo"
fi fi
# only remove package to call preun.sh, not but update(2) # only remove package to call preun.sh, not but update(2)
if [ $1 -eq 0 ];then if [ $1 -eq 0 ];then
#cd %{homepath}/script #cd %{homepath}/script
#${csudo} ./preun.sh #${csudo} ./preun.sh
if [ -f %{homepath}/script/preun.sh ]; then if [ -f %{homepath}/script/preun.sh ]; then
cd %{homepath}/script cd %{homepath}/script
${csudo} ./preun.sh ${csudo} ./preun.sh
@ -135,7 +180,7 @@ if [ $1 -eq 0 ];then
bin_link_dir="/usr/bin" bin_link_dir="/usr/bin"
lib_link_dir="/usr/lib" lib_link_dir="/usr/lib"
inc_link_dir="/usr/include" inc_link_dir="/usr/include"
data_link_dir="/usr/local/taos/data" data_link_dir="/usr/local/taos/data"
log_link_dir="/usr/local/taos/log" log_link_dir="/usr/local/taos/log"
cfg_link_dir="/usr/local/taos/cfg" cfg_link_dir="/usr/local/taos/cfg"
@ -149,20 +194,20 @@ if [ $1 -eq 0 ];then
${csudo} rm -f ${inc_link_dir}/taos.h || : ${csudo} rm -f ${inc_link_dir}/taos.h || :
${csudo} rm -f ${inc_link_dir}/taoserror.h || : ${csudo} rm -f ${inc_link_dir}/taoserror.h || :
${csudo} rm -f ${lib_link_dir}/libtaos.* || : ${csudo} rm -f ${lib_link_dir}/libtaos.* || :
${csudo} rm -f ${log_link_dir} || : ${csudo} rm -f ${log_link_dir} || :
${csudo} rm -f ${data_link_dir} || : ${csudo} rm -f ${data_link_dir} || :
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}') pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then if [ -n "$pid" ]; then
${csudo} kill -9 $pid || : ${csudo} kill -9 $pid || :
fi fi
fi fi
fi fi
# Scripts executed after uninstall # Scripts executed after uninstall
%postun %postun
# clean build dir # clean build dir
%clean %clean
csudo="" csudo=""

View File

@ -59,11 +59,11 @@ initd_mod=0
service_mod=2 service_mod=2
if pidof systemd &> /dev/null; then if pidof systemd &> /dev/null; then
service_mod=0 service_mod=0
elif $(which service &> /dev/null); then elif $(which service &> /dev/null); then
service_mod=1 service_mod=1
service_config_dir="/etc/init.d" service_config_dir="/etc/init.d"
if $(which chkconfig &> /dev/null); then if $(which chkconfig &> /dev/null); then
initd_mod=1 initd_mod=1
elif $(which insserv &> /dev/null); then elif $(which insserv &> /dev/null); then
initd_mod=2 initd_mod=2
elif $(which update-rc.d &> /dev/null); then elif $(which update-rc.d &> /dev/null); then
@ -71,7 +71,7 @@ elif $(which service &> /dev/null); then
else else
service_mod=2 service_mod=2
fi fi
else else
service_mod=2 service_mod=2
fi fi
@ -103,7 +103,7 @@ elif echo $osinfo | grep -qwi "fedora" ; then
os_type=2 os_type=2
else else
echo " osinfo: ${osinfo}" echo " osinfo: ${osinfo}"
echo " This is an officially unverified linux system," echo " This is an officially unverified linux system,"
echo " if there are any problems with the installation and operation, " echo " if there are any problems with the installation and operation, "
echo " please feel free to contact taosdata.com for support." echo " please feel free to contact taosdata.com for support."
os_type=1 os_type=1
@ -138,7 +138,7 @@ do
echo "Usage: `basename $0` -v [server | client] -e [yes | no]" echo "Usage: `basename $0` -v [server | client] -e [yes | no]"
exit 0 exit 0
;; ;;
?) #unknow option ?) #unknow option
echo "unkonw argument" echo "unkonw argument"
exit 1 exit 1
;; ;;
@ -157,9 +157,9 @@ function kill_process() {
function install_main_path() { function install_main_path() {
#create install main dir and all sub dir #create install main dir and all sub dir
${csudo} rm -rf ${install_main_dir} || : ${csudo} rm -rf ${install_main_dir} || :
${csudo} mkdir -p ${install_main_dir} ${csudo} mkdir -p ${install_main_dir}
${csudo} mkdir -p ${install_main_dir}/cfg ${csudo} mkdir -p ${install_main_dir}/cfg
${csudo} mkdir -p ${install_main_dir}/bin ${csudo} mkdir -p ${install_main_dir}/bin
${csudo} mkdir -p ${install_main_dir}/connector ${csudo} mkdir -p ${install_main_dir}/connector
${csudo} mkdir -p ${install_main_dir}/driver ${csudo} mkdir -p ${install_main_dir}/driver
${csudo} mkdir -p ${install_main_dir}/examples ${csudo} mkdir -p ${install_main_dir}/examples
@ -168,10 +168,10 @@ function install_main_path() {
if [ "$verMode" == "cluster" ]; then if [ "$verMode" == "cluster" ]; then
${csudo} mkdir -p ${nginx_dir} ${csudo} mkdir -p ${nginx_dir}
fi fi
if [[ -e ${script_dir}/email ]]; then if [[ -e ${script_dir}/email ]]; then
${csudo} cp ${script_dir}/email ${install_main_dir}/ ||: ${csudo} cp ${script_dir}/email ${install_main_dir}/ ||:
fi fi
} }
function install_bin() { function install_bin() {
@ -207,29 +207,75 @@ function install_lib() {
${csudo} rm -f ${lib_link_dir}/libtaos.* || : ${csudo} rm -f ${lib_link_dir}/libtaos.* || :
${csudo} rm -f ${lib64_link_dir}/libtaos.* || : ${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
#${csudo} rm -rf ${v15_java_app_dir} || : #${csudo} rm -rf ${v15_java_app_dir} || :
${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then
${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
fi fi
#if [ "$verMode" == "cluster" ]; then #if [ "$verMode" == "cluster" ]; then
# # Compatible with version 1.5 # # Compatible with version 1.5
# ${csudo} mkdir -p ${v15_java_app_dir} # ${csudo} mkdir -p ${v15_java_app_dir}
# ${csudo} ln -s ${install_main_dir}/connector/taos-jdbcdriver-1.0.2-dist.jar ${v15_java_app_dir}/JDBCDriver-1.0.2-dist.jar # ${csudo} ln -s ${install_main_dir}/connector/taos-jdbcdriver-1.0.2-dist.jar ${v15_java_app_dir}/JDBCDriver-1.0.2-dist.jar
# ${csudo} chmod 777 ${v15_java_app_dir} || : # ${csudo} chmod 777 ${v15_java_app_dir} || :
#fi #fi
${csudo} ldconfig ${csudo} ldconfig
} }
function install_jemalloc() {
jemalloc_dir=${script_dir}/jemalloc
if [ -d ${jemalloc_dir} ]; then
${csudo} /usr/bin/install -c -d /usr/local/bin
if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then
${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin
fi
if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then
${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin
fi
if [ -f ${jemalloc_dir}/bin/jeprof ]; then
${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin
fi
if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then
${csudo} /usr/bin/install -c -d /usr/local/include/jemalloc
${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc
fi
if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then
${csudo} /usr/bin/install -c -d /usr/local/lib
${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib
${csudo} ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
${csudo} /usr/bin/install -c -d /usr/local/lib
if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
fi
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
fi
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
${csudo} /usr/bin/install -c -d /usr/local/lib/pkgconfig
${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
fi
fi
if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then
${csudo} /usr/bin/install -c -d /usr/local/share/doc/jemalloc
${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc
fi
if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then
${csudo} /usr/bin/install -c -d /usr/local/share/man/man3
${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3
fi
fi
}
function install_header() { function install_header() {
${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
} }
@ -246,13 +292,13 @@ function add_newHostname_to_hosts() {
if [[ "$s" == "$localIp" ]]; then if [[ "$s" == "$localIp" ]]; then
return return
fi fi
done done
${csudo} echo "127.0.0.1 $1" >> /etc/hosts ||: ${csudo} echo "127.0.0.1 $1" >> /etc/hosts ||:
} }
function set_hostname() { function set_hostname() {
echo -e -n "${GREEN}Please enter one hostname(must not be 'localhost')${NC}:" echo -e -n "${GREEN}Please enter one hostname(must not be 'localhost')${NC}:"
read newHostname read newHostname
while true; do while true; do
if [[ ! -z "$newHostname" && "$newHostname" != "localhost" ]]; then if [[ ! -z "$newHostname" && "$newHostname" != "localhost" ]]; then
break break
@ -266,25 +312,25 @@ function set_hostname() {
if [[ $retval != 0 ]]; then if [[ $retval != 0 ]]; then
echo echo
echo "set hostname fail!" echo "set hostname fail!"
return return
fi fi
#echo -e -n "$(hostnamectl status --static)" #echo -e -n "$(hostnamectl status --static)"
#echo -e -n "$(hostnamectl status --transient)" #echo -e -n "$(hostnamectl status --transient)"
#echo -e -n "$(hostnamectl status --pretty)" #echo -e -n "$(hostnamectl status --pretty)"
#ubuntu/centos /etc/hostname #ubuntu/centos /etc/hostname
if [[ -e /etc/hostname ]]; then if [[ -e /etc/hostname ]]; then
${csudo} echo $newHostname > /etc/hostname ||: ${csudo} echo $newHostname > /etc/hostname ||:
fi fi
#debian: #HOSTNAME=yourname #debian: #HOSTNAME=yourname
if [[ -e /etc/sysconfig/network ]]; then if [[ -e /etc/sysconfig/network ]]; then
${csudo} sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network ||: ${csudo} sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network ||:
fi fi
${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/taos.cfg ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/taos.cfg
serverFqdn=$newHostname serverFqdn=$newHostname
if [[ -e /etc/hosts ]]; then if [[ -e /etc/hosts ]]; then
add_newHostname_to_hosts $newHostname add_newHostname_to_hosts $newHostname
fi fi
@ -302,7 +348,7 @@ function is_correct_ipaddr() {
return 0 return 0
fi fi
done done
return 1 return 1
} }
@ -316,13 +362,13 @@ function set_ipAsFqdn() {
echo echo
echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}" echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}"
localFqdn="127.0.0.1" localFqdn="127.0.0.1"
# Write the local FQDN to configuration file # Write the local FQDN to configuration file
${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg
serverFqdn=$localFqdn serverFqdn=$localFqdn
echo echo
return return
fi fi
echo -e -n "${GREEN}Please choose an IP from local IP list${NC}:" echo -e -n "${GREEN}Please choose an IP from local IP list${NC}:"
echo echo
echo -e -n "${GREEN}$iplist${NC}" echo -e -n "${GREEN}$iplist${NC}"
@ -331,15 +377,15 @@ function set_ipAsFqdn() {
echo -e -n "${GREEN}Notes: if IP is used as the node name, data can NOT be migrated to other machine directly${NC}:" echo -e -n "${GREEN}Notes: if IP is used as the node name, data can NOT be migrated to other machine directly${NC}:"
read localFqdn read localFqdn
while true; do while true; do
if [ ! -z "$localFqdn" ]; then if [ ! -z "$localFqdn" ]; then
# Check if correct ip address # Check if correct ip address
is_correct_ipaddr $localFqdn is_correct_ipaddr $localFqdn
retval=`echo $?` retval=`echo $?`
if [[ $retval != 0 ]]; then if [[ $retval != 0 ]]; then
read -p "Please choose an IP from local IP list:" localFqdn read -p "Please choose an IP from local IP list:" localFqdn
else else
# Write the local FQDN to configuration file # Write the local FQDN to configuration file
${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg
serverFqdn=$localFqdn serverFqdn=$localFqdn
break break
fi fi
@ -354,59 +400,59 @@ function local_fqdn_check() {
echo echo
echo -e -n "System hostname is: ${GREEN}$serverFqdn${NC}" echo -e -n "System hostname is: ${GREEN}$serverFqdn${NC}"
echo echo
if [[ "$serverFqdn" == "" ]] || [[ "$serverFqdn" == "localhost" ]]; then if [[ "$serverFqdn" == "" ]] || [[ "$serverFqdn" == "localhost" ]]; then
echo -e -n "${GREEN}It is strongly recommended to configure a hostname for this machine ${NC}" echo -e -n "${GREEN}It is strongly recommended to configure a hostname for this machine ${NC}"
echo echo
while true while true
do do
read -r -p "Set hostname now? [Y/n] " input read -r -p "Set hostname now? [Y/n] " input
if [ ! -n "$input" ]; then if [ ! -n "$input" ]; then
set_hostname set_hostname
break break
else else
case $input in case $input in
[yY][eE][sS]|[yY]) [yY][eE][sS]|[yY])
set_hostname set_hostname
break break
;; ;;
[nN][oO]|[nN]) [nN][oO]|[nN])
set_ipAsFqdn set_ipAsFqdn
break break
;; ;;
*) *)
echo "Invalid input..." echo "Invalid input..."
;; ;;
esac esac
fi fi
done done
fi fi
} }
function install_config() { function install_config() {
#${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || : #${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || :
if [ ! -f ${cfg_install_dir}/taos.cfg ]; then if [ ! -f ${cfg_install_dir}/taos.cfg ]; then
${csudo} mkdir -p ${cfg_install_dir} ${csudo} mkdir -p ${cfg_install_dir}
[ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir} [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir}
${csudo} chmod 644 ${cfg_install_dir}/* ${csudo} chmod 644 ${cfg_install_dir}/*
fi fi
${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org
${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg
[ ! -z $1 ] && return 0 || : # only install client [ ! -z $1 ] && return 0 || : # only install client
if ((${update_flag}==1)); then if ((${update_flag}==1)); then
return 0 return 0
fi fi
if [ "$interactiveFqdn" == "no" ]; then if [ "$interactiveFqdn" == "no" ]; then
return 0 return 0
fi fi
local_fqdn_check local_fqdn_check
#FQDN_FORMAT="(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)" #FQDN_FORMAT="(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)"
@ -424,8 +470,8 @@ function install_config() {
if [ ! -z "$firstEp" ]; then if [ ! -z "$firstEp" ]; then
# check the format of the firstEp # check the format of the firstEp
#if [[ $firstEp == $FQDN_PATTERN ]]; then #if [[ $firstEp == $FQDN_PATTERN ]]; then
# Write the first FQDN to configuration file # Write the first FQDN to configuration file
${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/taos.cfg ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/taos.cfg
break break
#else #else
# read -p "Please enter the correct FQDN:port: " firstEp # read -p "Please enter the correct FQDN:port: " firstEp
@ -433,9 +479,9 @@ function install_config() {
else else
break break
fi fi
done done
# user email # user email
#EMAIL_PATTERN='^[A-Za-z0-9\u4e00-\u9fa5]+@[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+)+$' #EMAIL_PATTERN='^[A-Za-z0-9\u4e00-\u9fa5]+@[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+)+$'
#EMAIL_PATTERN='^[\w-]+(\.[\w-]+)*@[\w-]+(\.[\w-]+)+$' #EMAIL_PATTERN='^[\w-]+(\.[\w-]+)*@[\w-]+(\.[\w-]+)+$'
#EMAIL_PATTERN="^[\w-]+(\.[\w-]+)*@[\w-]+(\.[\w-]+)+$" #EMAIL_PATTERN="^[\w-]+(\.[\w-]+)*@[\w-]+(\.[\w-]+)+$"
@ -446,31 +492,31 @@ function install_config() {
if [ ! -z "$emailAddr" ]; then if [ ! -z "$emailAddr" ]; then
# check the format of the emailAddr # check the format of the emailAddr
#if [[ "$emailAddr" =~ $EMAIL_PATTERN ]]; then #if [[ "$emailAddr" =~ $EMAIL_PATTERN ]]; then
# Write the email address to temp file # Write the email address to temp file
email_file="${install_main_dir}/email" email_file="${install_main_dir}/email"
${csudo} bash -c "echo $emailAddr > ${email_file}" ${csudo} bash -c "echo $emailAddr > ${email_file}"
break break
#else #else
# read -p "Please enter the correct email address: " emailAddr # read -p "Please enter the correct email address: " emailAddr
#fi #fi
else else
break break
fi fi
done done
} }
function install_log() { function install_log() {
${csudo} rm -rf ${log_dir} || : ${csudo} rm -rf ${log_dir} || :
${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
${csudo} ln -s ${log_dir} ${install_main_dir}/log ${csudo} ln -s ${log_dir} ${install_main_dir}/log
} }
function install_data() { function install_data() {
${csudo} mkdir -p ${data_dir} ${csudo} mkdir -p ${data_dir}
${csudo} ln -s ${data_dir} ${install_main_dir}/data ${csudo} ln -s ${data_dir} ${install_main_dir}/data
} }
function install_connector() { function install_connector() {
@ -485,26 +531,26 @@ function install_examples() {
function clean_service_on_sysvinit() { function clean_service_on_sysvinit() {
#restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
#${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || : #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || :
if pidof taosd &> /dev/null; then if pidof taosd &> /dev/null; then
${csudo} service taosd stop || : ${csudo} service taosd stop || :
fi fi
if pidof tarbitrator &> /dev/null; then if pidof tarbitrator &> /dev/null; then
${csudo} service tarbitratord stop || : ${csudo} service tarbitratord stop || :
fi fi
if ((${initd_mod}==1)); then if ((${initd_mod}==1)); then
if [ -e ${service_config_dir}/taosd ]; then if [ -e ${service_config_dir}/taosd ]; then
${csudo} chkconfig --del taosd || : ${csudo} chkconfig --del taosd || :
fi fi
if [ -e ${service_config_dir}/tarbitratord ]; then if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo} chkconfig --del tarbitratord || : ${csudo} chkconfig --del tarbitratord || :
fi fi
elif ((${initd_mod}==2)); then elif ((${initd_mod}==2)); then
if [ -e ${service_config_dir}/taosd ]; then if [ -e ${service_config_dir}/taosd ]; then
${csudo} insserv -r taosd || : ${csudo} insserv -r taosd || :
fi fi
if [ -e ${service_config_dir}/tarbitratord ]; then if [ -e ${service_config_dir}/tarbitratord ]; then
@ -518,10 +564,10 @@ function clean_service_on_sysvinit() {
${csudo} update-rc.d -f tarbitratord remove || : ${csudo} update-rc.d -f tarbitratord remove || :
fi fi
fi fi
${csudo} rm -f ${service_config_dir}/taosd || : ${csudo} rm -f ${service_config_dir}/taosd || :
${csudo} rm -f ${service_config_dir}/tarbitratord || : ${csudo} rm -f ${service_config_dir}/tarbitratord || :
if $(which init &> /dev/null); then if $(which init &> /dev/null); then
${csudo} init q || : ${csudo} init q || :
fi fi
@ -544,10 +590,10 @@ function install_service_on_sysvinit() {
${csudo} cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord ${csudo} cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord
${csudo} cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord ${csudo} cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
fi fi
#restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
#${csudo} grep -q -F "$restart_config_str" /etc/inittab || ${csudo} bash -c "echo '${restart_config_str}' >> /etc/inittab" #${csudo} grep -q -F "$restart_config_str" /etc/inittab || ${csudo} bash -c "echo '${restart_config_str}' >> /etc/inittab"
if ((${initd_mod}==1)); then if ((${initd_mod}==1)); then
${csudo} chkconfig --add taosd || : ${csudo} chkconfig --add taosd || :
${csudo} chkconfig --level 2345 taosd on || : ${csudo} chkconfig --level 2345 taosd on || :
@ -572,7 +618,7 @@ function clean_service_on_systemd() {
fi fi
${csudo} systemctl disable taosd &> /dev/null || echo &> /dev/null ${csudo} systemctl disable taosd &> /dev/null || echo &> /dev/null
${csudo} rm -f ${taosd_service_config} ${csudo} rm -f ${taosd_service_config}
tarbitratord_service_config="${service_config_dir}/tarbitratord.service" tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
if systemctl is-active --quiet tarbitratord; then if systemctl is-active --quiet tarbitratord; then
echo "tarbitrator is running, stopping it..." echo "tarbitrator is running, stopping it..."
@ -580,7 +626,7 @@ function clean_service_on_systemd() {
fi fi
${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null ${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null
${csudo} rm -f ${tarbitratord_service_config} ${csudo} rm -f ${tarbitratord_service_config}
if [ "$verMode" == "cluster" ]; then if [ "$verMode" == "cluster" ]; then
nginx_service_config="${service_config_dir}/nginxd.service" nginx_service_config="${service_config_dir}/nginxd.service"
if systemctl is-active --quiet nginxd; then if systemctl is-active --quiet nginxd; then
@ -588,8 +634,8 @@ function clean_service_on_systemd() {
${csudo} systemctl stop nginxd &> /dev/null || echo &> /dev/null ${csudo} systemctl stop nginxd &> /dev/null || echo &> /dev/null
fi fi
${csudo} systemctl disable nginxd &> /dev/null || echo &> /dev/null ${csudo} systemctl disable nginxd &> /dev/null || echo &> /dev/null
${csudo} rm -f ${nginx_service_config} ${csudo} rm -f ${nginx_service_config}
fi fi
} }
# taos:2345:respawn:/etc/init.d/taosd start # taos:2345:respawn:/etc/init.d/taosd start
@ -621,7 +667,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo '[Install]' >> ${taosd_service_config}" ${csudo} bash -c "echo '[Install]' >> ${taosd_service_config}"
${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${taosd_service_config}" ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${taosd_service_config}"
${csudo} systemctl enable taosd ${csudo} systemctl enable taosd
tarbitratord_service_config="${service_config_dir}/tarbitratord.service" tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}" ${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}" ${csudo} bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}"
@ -643,9 +689,9 @@ function install_service_on_systemd() {
${csudo} bash -c "echo >> ${tarbitratord_service_config}" ${csudo} bash -c "echo >> ${tarbitratord_service_config}"
${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}" ${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}" ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}"
#${csudo} systemctl enable tarbitratord #${csudo} systemctl enable tarbitratord
if [ "$verMode" == "cluster" ]; then if [ "$verMode" == "cluster" ]; then
nginx_service_config="${service_config_dir}/nginxd.service" nginx_service_config="${service_config_dir}/nginxd.service"
${csudo} bash -c "echo '[Unit]' >> ${nginx_service_config}" ${csudo} bash -c "echo '[Unit]' >> ${nginx_service_config}"
${csudo} bash -c "echo 'Description=Nginx For TDengine Service' >> ${nginx_service_config}" ${csudo} bash -c "echo 'Description=Nginx For TDengine Service' >> ${nginx_service_config}"
@ -674,7 +720,7 @@ function install_service_on_systemd() {
${csudo} systemctl enable nginxd ${csudo} systemctl enable nginxd
fi fi
${csudo} systemctl start nginxd ${csudo} systemctl start nginxd
fi fi
} }
function install_service() { function install_service() {
@ -757,7 +803,7 @@ function update_TDengine() {
fi fi
sleep 1 sleep 1
fi fi
if [ "$verMode" == "cluster" ]; then if [ "$verMode" == "cluster" ]; then
if pidof nginx &> /dev/null; then if pidof nginx &> /dev/null; then
if ((${service_mod}==0)); then if ((${service_mod}==0)); then
@ -770,12 +816,13 @@ function update_TDengine() {
sleep 1 sleep 1
fi fi
fi fi
install_main_path install_main_path
install_log install_log
install_header install_header
install_lib install_lib
install_jemalloc
if [ "$pagMode" != "lite" ]; then if [ "$pagMode" != "lite" ]; then
install_connector install_connector
fi fi
@ -783,10 +830,10 @@ function update_TDengine() {
if [ -z $1 ]; then if [ -z $1 ]; then
install_bin install_bin
install_service install_service
install_config install_config
openresty_work=false openresty_work=false
if [ "$verMode" == "cluster" ]; then if [ "$verMode" == "cluster" ]; then
# Check if openresty is installed # Check if openresty is installed
# Check if nginx is installed successfully # Check if nginx is installed successfully
if type curl &> /dev/null; then if type curl &> /dev/null; then
@ -797,7 +844,7 @@ function update_TDengine() {
echo -e "\033[44;31;5mNginx for TDengine does not work! Please try again!\033[0m" echo -e "\033[44;31;5mNginx for TDengine does not work! Please try again!\033[0m"
fi fi
fi fi
fi fi
#echo #echo
#echo -e "\033[44;32;1mTDengine is updated successfully!${NC}" #echo -e "\033[44;32;1mTDengine is updated successfully!${NC}"
@ -816,7 +863,7 @@ function update_TDengine() {
else else
echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos -h $serverFqdn${NC} in shell${NC}" echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos -h $serverFqdn${NC} in shell${NC}"
fi fi
echo echo
echo -e "\033[44;32;1mTDengine is updated successfully!${NC}" echo -e "\033[44;32;1mTDengine is updated successfully!${NC}"
else else
@ -839,14 +886,14 @@ function install_TDengine() {
tar -zxf taos.tar.gz tar -zxf taos.tar.gz
echo -e "${GREEN}Start to install TDengine...${NC}" echo -e "${GREEN}Start to install TDengine...${NC}"
install_main_path install_main_path
if [ -z $1 ]; then if [ -z $1 ]; then
install_data install_data
fi fi
install_log install_log
install_header install_header
install_lib install_lib
if [ "$pagMode" != "lite" ]; then if [ "$pagMode" != "lite" ]; then
@ -871,8 +918,8 @@ function install_TDengine() {
fi fi
fi fi
fi fi
install_config install_config
# Ask if to start the service # Ask if to start the service
#echo #echo
@ -885,36 +932,36 @@ function install_TDengine() {
echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} service taosd start${NC}" echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} service taosd start${NC}"
else else
echo -e "${GREEN_DARK}To start TDengine ${NC}: taosd${NC}" echo -e "${GREEN_DARK}To start TDengine ${NC}: taosd${NC}"
fi fi
#if [ ${openresty_work} = 'true' ]; then #if [ ${openresty_work} = 'true' ]; then
# echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}" # echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}"
#else #else
# echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" # echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}"
#fi #fi
if [ ! -z "$firstEp" ]; then if [ ! -z "$firstEp" ]; then
tmpFqdn=${firstEp%%:*} tmpFqdn=${firstEp%%:*}
substr=":" substr=":"
if [[ $firstEp =~ $substr ]];then if [[ $firstEp =~ $substr ]];then
tmpPort=${firstEp#*:} tmpPort=${firstEp#*:}
else else
tmpPort="" tmpPort=""
fi fi
if [[ "$tmpPort" != "" ]];then if [[ "$tmpPort" != "" ]];then
echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}" echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}"
else else
echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}" echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}"
fi fi
echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}" echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}"
echo echo
elif [ ! -z "$serverFqdn" ]; then elif [ ! -z "$serverFqdn" ]; then
echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $serverFqdn${GREEN_DARK} to login into TDengine server${NC}" echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $serverFqdn${GREEN_DARK} to login into TDengine server${NC}"
echo echo
fi fi
echo -e "\033[44;32;1mTDengine is installed successfully!${NC}" echo -e "\033[44;32;1mTDengine is installed successfully!${NC}"
echo echo
else # Only install client else # Only install client
install_bin install_bin
install_config install_config
@ -945,6 +992,6 @@ elif [ "$verType" == "client" ]; then
else else
install_TDengine client install_TDengine client
fi fi
else else
echo "please input correct verType" echo "please input correct verType"
fi fi

View File

@ -1,12 +1,12 @@
#!/bin/bash #!/bin/bash
# #
# This file is used to install TAOS time-series database on linux systems. The operating system # This file is used to install TAOS time-series database on linux systems. The operating system
# is required to use systemd to manage services at boot # is required to use systemd to manage services at boot
set -e set -e
# set -x # set -x
# -----------------------Variables definition--------------------- # -----------------------Variables definition
source_dir=$1 source_dir=$1
binary_dir=$2 binary_dir=$2
osType=$3 osType=$3
@ -71,9 +71,9 @@ if [ "$osType" != "Darwin" ]; then
service_mod=0 service_mod=0
elif $(which service &> /dev/null); then elif $(which service &> /dev/null); then
service_mod=1 service_mod=1
service_config_dir="/etc/init.d" service_config_dir="/etc/init.d"
if $(which chkconfig &> /dev/null); then if $(which chkconfig &> /dev/null); then
initd_mod=1 initd_mod=1
elif $(which insserv &> /dev/null); then elif $(which insserv &> /dev/null); then
initd_mod=2 initd_mod=2
elif $(which update-rc.d &> /dev/null); then elif $(which update-rc.d &> /dev/null); then
@ -123,9 +123,9 @@ function kill_taosd() {
function install_main_path() { function install_main_path() {
#create install main dir and all sub dir #create install main dir and all sub dir
${csudo} rm -rf ${install_main_dir} || : ${csudo} rm -rf ${install_main_dir} || :
${csudo} mkdir -p ${install_main_dir} ${csudo} mkdir -p ${install_main_dir}
${csudo} mkdir -p ${install_main_dir}/cfg ${csudo} mkdir -p ${install_main_dir}/cfg
${csudo} mkdir -p ${install_main_dir}/bin ${csudo} mkdir -p ${install_main_dir}/bin
${csudo} mkdir -p ${install_main_dir}/connector ${csudo} mkdir -p ${install_main_dir}/connector
${csudo} mkdir -p ${install_main_dir}/driver ${csudo} mkdir -p ${install_main_dir}/driver
${csudo} mkdir -p ${install_main_dir}/examples ${csudo} mkdir -p ${install_main_dir}/examples
@ -176,6 +176,49 @@ function install_bin() {
[ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/rmtaos || : [ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/rmtaos || :
fi fi
} }
function install_jemalloc() {
if [ "$osType" != "Darwin" ]; then
/usr/bin/install -c -d /usr/local/bin
if [ -f ${binary_dir}/build/bin/jemalloc-config ]; then
/usr/bin/install -c -m 755 ${binary_dir}/build/bin/jemalloc-config /usr/local/bin
fi
if [ -f ${binary_dir}/build/bin/jemalloc.sh ]; then
/usr/bin/install -c -m 755 ${binary_dir}/build/bin/jemalloc.sh /usr/local/bin
fi
if [ -f ${binary_dir}/build/bin/jeprof ]; then
/usr/bin/install -c -m 755 ${binary_dir}/build/bin/jeprof /usr/local/bin
fi
if [ -f ${binary_dir}/build/include/jemalloc/jemalloc.h ]; then
/usr/bin/install -c -d /usr/local/include/jemalloc
/usr/bin/install -c -m 644 ${binary_dir}/build/include/jemalloc/jemalloc.h /usr/local/include/jemalloc
fi
if [ -f ${binary_dir}/build/lib/libjemalloc.so.2 ]; then
/usr/bin/install -c -d /usr/local/lib
/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc.so.2 /usr/local/lib
ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
/usr/bin/install -c -d /usr/local/lib
if [ -f ${binary_dir}/build/lib/libjemalloc.a ]; then
/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc.a /usr/local/lib
fi
if [ -f ${binary_dir}/build/lib/libjemalloc_pic.a ]; then
/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc_pic.a /usr/local/lib
fi
if [ -f ${binary_dir}/build/lib/pkgconfig/jemalloc.pc ]; then
/usr/bin/install -c -d /usr/local/lib/pkgconfig
/usr/bin/install -c -m 644 ${binary_dir}/build/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
fi
fi
if [ -f ${binary_dir}/build/share/doc/jemalloc/jemalloc.html ]; then
/usr/bin/install -c -d /usr/local/share/doc/jemalloc
/usr/bin/install -c -m 644 ${binary_dir}/build/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc
fi
if [ -f ${binary_dir}/build/share/man/man3/jemalloc.3 ]; then
/usr/bin/install -c -d /usr/local/share/man/man3
/usr/bin/install -c -m 644 ${binary_dir}/build/share/man/man3/jemalloc.3 /usr/local/share/man/man3
fi
fi
}
function install_lib() { function install_lib() {
# Remove links # Remove links
@ -183,12 +226,12 @@ function install_lib() {
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
${csudo} rm -f ${lib64_link_dir}/libtaos.* || : ${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
fi fi
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
${csudo} cp ${binary_dir}/build/lib/libtaos.so.${verNumber} ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* ${csudo} cp ${binary_dir}/build/lib/libtaos.so.${verNumber} ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
${csudo} ln -sf ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 ${csudo} ln -sf ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
${csudo} ln -sf ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so ${csudo} ln -sf ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
if [ -d "${lib64_link_dir}" ]; then if [ -d "${lib64_link_dir}" ]; then
${csudo} ln -sf ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 ${csudo} ln -sf ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1
${csudo} ln -sf ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so ${csudo} ln -sf ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so
@ -198,7 +241,9 @@ function install_lib() {
${csudo} ln -sf ${install_main_dir}/driver/libtaos.1.dylib ${lib_link_dir}/libtaos.1.dylib ${csudo} ln -sf ${install_main_dir}/driver/libtaos.1.dylib ${lib_link_dir}/libtaos.1.dylib
${csudo} ln -sf ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib ${csudo} ln -sf ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib
fi fi
install_jemalloc
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
${csudo} ldconfig ${csudo} ldconfig
fi fi
@ -206,26 +251,26 @@ function install_lib() {
function install_header() { function install_header() {
${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
${csudo} cp -f ${source_dir}/src/inc/taos.h ${source_dir}/src/inc/taoserror.h ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* ${csudo} cp -f ${source_dir}/src/inc/taos.h ${source_dir}/src/inc/taoserror.h ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
} }
function install_config() { function install_config() {
#${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || : #${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || :
if [ ! -f ${cfg_install_dir}/taos.cfg ]; then if [ ! -f ${cfg_install_dir}/taos.cfg ]; then
${csudo} mkdir -p ${cfg_install_dir} ${csudo} mkdir -p ${cfg_install_dir}
[ -f ${script_dir}/../cfg/taos.cfg ] && ${csudo} cp ${script_dir}/../cfg/taos.cfg ${cfg_install_dir} [ -f ${script_dir}/../cfg/taos.cfg ] && ${csudo} cp ${script_dir}/../cfg/taos.cfg ${cfg_install_dir}
${csudo} chmod 644 ${cfg_install_dir}/* ${csudo} chmod 644 ${cfg_install_dir}/*
fi fi
${csudo} cp -f ${script_dir}/../cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org ${csudo} cp -f ${script_dir}/../cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org
${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg
} }
function install_log() { function install_log() {
${csudo} rm -rf ${log_dir} || : ${csudo} rm -rf ${log_dir} || :
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
@ -239,7 +284,7 @@ function install_log() {
function install_data() { function install_data() {
${csudo} mkdir -p ${data_dir} ${csudo} mkdir -p ${data_dir}
${csudo} ln -s ${data_dir} ${install_main_dir}/data ${csudo} ln -s ${data_dir} ${install_main_dir}/data
} }
function install_connector() { function install_connector() {
@ -254,8 +299,8 @@ function install_connector() {
echo "WARNING: go connector not found, please check if want to use it!" echo "WARNING: go connector not found, please check if want to use it!"
fi fi
${csudo} cp -rf ${source_dir}/src/connector/python ${install_main_dir}/connector ${csudo} cp -rf ${source_dir}/src/connector/python ${install_main_dir}/connector
${csudo} cp ${binary_dir}/build/lib/*.jar ${install_main_dir}/connector &> /dev/null && ${csudo} chmod 777 ${install_main_dir}/connector/*.jar || echo &> /dev/null ${csudo} cp ${binary_dir}/build/lib/*.jar ${install_main_dir}/connector &> /dev/null && ${csudo} chmod 777 ${install_main_dir}/connector/*.jar || echo &> /dev/null
} }
function install_examples() { function install_examples() {
@ -264,8 +309,8 @@ function install_examples() {
function clean_service_on_sysvinit() { function clean_service_on_sysvinit() {
#restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
#${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || : #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || :
if pidof taosd &> /dev/null; then if pidof taosd &> /dev/null; then
${csudo} service taosd stop || : ${csudo} service taosd stop || :
fi fi
@ -277,9 +322,9 @@ function clean_service_on_sysvinit() {
elif ((${initd_mod}==3)); then elif ((${initd_mod}==3)); then
${csudo} update-rc.d -f taosd remove || : ${csudo} update-rc.d -f taosd remove || :
fi fi
${csudo} rm -f ${service_config_dir}/taosd || : ${csudo} rm -f ${service_config_dir}/taosd || :
if $(which init &> /dev/null); then if $(which init &> /dev/null); then
${csudo} init q || : ${csudo} init q || :
fi fi
@ -298,10 +343,10 @@ function install_service_on_sysvinit() {
${csudo} cp -f ${script_dir}/../rpm/taosd ${install_main_dir}/init.d ${csudo} cp -f ${script_dir}/../rpm/taosd ${install_main_dir}/init.d
${csudo} cp ${script_dir}/../rpm/taosd ${service_config_dir} && ${csudo} chmod a+x ${service_config_dir}/taosd ${csudo} cp ${script_dir}/../rpm/taosd ${service_config_dir} && ${csudo} chmod a+x ${service_config_dir}/taosd
fi fi
#restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
#${csudo} grep -q -F "$restart_config_str" /etc/inittab || ${csudo} bash -c "echo '${restart_config_str}' >> /etc/inittab" #${csudo} grep -q -F "$restart_config_str" /etc/inittab || ${csudo} bash -c "echo '${restart_config_str}' >> /etc/inittab"
if ((${initd_mod}==1)); then if ((${initd_mod}==1)); then
${csudo} chkconfig --add taosd || : ${csudo} chkconfig --add taosd || :
${csudo} chkconfig --level 2345 taosd on || : ${csudo} chkconfig --level 2345 taosd on || :
@ -323,7 +368,7 @@ function clean_service_on_systemd() {
${csudo} systemctl disable taosd &> /dev/null || echo &> /dev/null ${csudo} systemctl disable taosd &> /dev/null || echo &> /dev/null
${csudo} rm -f ${taosd_service_config} ${csudo} rm -f ${taosd_service_config}
} }
# taos:2345:respawn:/etc/init.d/taosd start # taos:2345:respawn:/etc/init.d/taosd start
@ -383,7 +428,7 @@ function update_TDengine() {
sleep 1 sleep 1
fi fi
fi fi
install_main_path install_main_path
install_log install_log
@ -431,16 +476,16 @@ function install_TDengine() {
# Start to install # Start to install
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
echo -e "${GREEN}Start to install TDEngine...${NC}" echo -e "${GREEN}Start to install TDEngine...${NC}"
else else
echo -e "${GREEN}Start to install TDEngine Client ...${NC}" echo -e "${GREEN}Start to install TDEngine Client ...${NC}"
fi fi
install_main_path install_main_path
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
install_data install_data
fi fi
install_log install_log
install_header install_header
install_lib install_lib
install_connector install_connector
@ -452,7 +497,7 @@ function install_TDengine() {
install_service install_service
fi fi
install_config install_config
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
# Ask if to start the service # Ask if to start the service

View File

@ -30,12 +30,12 @@ else
install_dir="${release_dir}/TDengine-server-${version}" install_dir="${release_dir}/TDengine-server-${version}"
fi fi
# Directories and files. # Directories and files
if [ "$pagMode" == "lite" ]; then if [ "$pagMode" == "lite" ]; then
strip ${build_dir}/bin/taosd strip ${build_dir}/bin/taosd
strip ${build_dir}/bin/taos strip ${build_dir}/bin/taos
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh" bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh"
else else
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator\ bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator\
${script_dir}/remove.sh ${script_dir}/set_core.sh ${script_dir}/startPre.sh ${script_dir}/taosd-dump-cfg.gdb" ${script_dir}/remove.sh ${script_dir}/set_core.sh ${script_dir}/startPre.sh ${script_dir}/taosd-dump-cfg.gdb"
fi fi
@ -73,10 +73,43 @@ mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/taos
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || : mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || : mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
if [ -f ${build_dir}/bin/jemalloc-config ]; then
mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin
if [ -f ${build_dir}/bin/jemalloc.sh ]; then
cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin
fi
if [ -f ${build_dir}/bin/jeprof ]; then
cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin
fi
if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then
cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc
fi
if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then
cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib
ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so
fi
if [ -f ${build_dir}/lib/libjemalloc.a ]; then
cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
fi
if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
fi
if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then
cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig
fi
if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then
cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc
fi
if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then
cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3
fi
fi
if [ "$verMode" == "cluster" ]; then if [ "$verMode" == "cluster" ]; then
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove.sh >> remove_temp.sh sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove.sh >> remove_temp.sh
mv remove_temp.sh ${install_dir}/bin/remove.sh mv remove_temp.sh ${install_dir}/bin/remove.sh
mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd
cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png
rm -rf ${install_dir}/nginxd/png rm -rf ${install_dir}/nginxd/png
@ -132,7 +165,7 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ -d ${examples_dir}/JDBC/taosdemo/target ]; then if [ -d ${examples_dir}/JDBC/taosdemo/target ]; then
rm -rf ${examples_dir}/JDBC/taosdemo/target rm -rf ${examples_dir}/JDBC/taosdemo/target
fi fi
cp -r ${examples_dir}/JDBC ${install_dir}/examples cp -r ${examples_dir}/JDBC ${install_dir}/examples
cp -r ${examples_dir}/matlab ${install_dir}/examples cp -r ${examples_dir}/matlab ${install_dir}/examples
cp -r ${examples_dir}/python ${install_dir}/examples cp -r ${examples_dir}/python ${install_dir}/examples
@ -142,7 +175,7 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp -r ${examples_dir}/C# ${install_dir}/examples cp -r ${examples_dir}/C# ${install_dir}/examples
fi fi
# Copy driver # Copy driver
mkdir -p ${install_dir}/driver mkdir -p ${install_dir}/driver
cp ${lib_files} ${install_dir}/driver cp ${lib_files} ${install_dir}/driver
# Copy connector # Copy connector
@ -168,7 +201,7 @@ fi
# exit 1 # exit 1
cd ${release_dir} cd ${release_dir}
if [ "$verMode" == "cluster" ]; then if [ "$verMode" == "cluster" ]; then
pkg_name=${install_dir}-${osType}-${cpuType} pkg_name=${install_dir}-${osType}-${cpuType}
@ -185,8 +218,8 @@ fi
if [ "$verType" == "beta" ]; then if [ "$verType" == "beta" ]; then
pkg_name=${pkg_name}-${verType} pkg_name=${pkg_name}-${verType}
elif [ "$verType" == "stable" ]; then elif [ "$verType" == "stable" ]; then
pkg_name=${pkg_name} pkg_name=${pkg_name}
else else
echo "unknow verType, nor stabel or beta" echo "unknow verType, nor stabel or beta"
exit 1 exit 1

View File

@ -13,8 +13,8 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#ifndef TDENGINE_TSCLOCALMERGE_H #ifndef TDENGINE_TSCGLOBALMERGE_H
#define TDENGINE_TSCLOCALMERGE_H #define TDENGINE_TSCGLOBALMERGE_H
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
@ -24,7 +24,7 @@ extern "C" {
#include "qFill.h" #include "qFill.h"
#include "taosmsg.h" #include "taosmsg.h"
#include "tlosertree.h" #include "tlosertree.h"
#include "tsclient.h" #include "qExecutor.h"
#define MAX_NUM_OF_SUBQUERY_RETRY 3 #define MAX_NUM_OF_SUBQUERY_RETRY 3
@ -38,7 +38,7 @@ typedef struct SLocalDataSource {
tFilePage filePage; tFilePage filePage;
} SLocalDataSource; } SLocalDataSource;
typedef struct SLocalMerger { typedef struct SGlobalMerger {
SLocalDataSource **pLocalDataSrc; SLocalDataSource **pLocalDataSrc;
int32_t numOfBuffer; int32_t numOfBuffer;
int32_t numOfCompleted; int32_t numOfCompleted;
@ -48,20 +48,22 @@ typedef struct SLocalMerger {
tOrderDescriptor *pDesc; tOrderDescriptor *pDesc;
tExtMemBuffer **pExtMemBuffer; // disk-based buffer tExtMemBuffer **pExtMemBuffer; // disk-based buffer
char *buf; // temp buffer char *buf; // temp buffer
} SLocalMerger; } SGlobalMerger;
struct SSqlObj;
typedef struct SRetrieveSupport { typedef struct SRetrieveSupport {
tExtMemBuffer ** pExtMemBuffer; // for build loser tree tExtMemBuffer ** pExtMemBuffer; // for build loser tree
tOrderDescriptor *pOrderDescriptor; tOrderDescriptor *pOrderDescriptor;
int32_t subqueryIndex; // index of current vnode in vnode list int32_t subqueryIndex; // index of current vnode in vnode list
SSqlObj * pParentSql; struct SSqlObj *pParentSql;
tFilePage * localBuffer; // temp buffer, there is a buffer for each vnode to tFilePage * localBuffer; // temp buffer, there is a buffer for each vnode to
uint32_t numOfRetry; // record the number of retry times uint32_t numOfRetry; // record the number of retry times
} SRetrieveSupport; } SRetrieveSupport;
int32_t tscLocalReducerEnvCreate(SQueryInfo* pQueryInfo, tExtMemBuffer ***pMemBuffer, int32_t numOfSub, tOrderDescriptor **pDesc, uint32_t nBufferSize, int64_t id); int32_t tscCreateGlobalMergerEnv(SQueryInfo* pQueryInfo, tExtMemBuffer ***pMemBuffer, int32_t numOfSub, tOrderDescriptor **pDesc, uint32_t nBufferSize, int64_t id);
void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, int32_t numOfVnodes); void tscDestroyGlobalMergerEnv(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, int32_t numOfVnodes);
int32_t saveToBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePage *pPage, void *data, int32_t saveToBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePage *pPage, void *data,
int32_t numOfRows, int32_t orderType); int32_t numOfRows, int32_t orderType);
@ -71,13 +73,13 @@ int32_t tscFlushTmpBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tF
/* /*
* create local reducer to launch the second-stage reduce process at client site * create local reducer to launch the second-stage reduce process at client site
*/ */
int32_t tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc, int32_t tscCreateGlobalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
SQueryInfo *pQueryInfo, SLocalMerger **pMerger, int64_t id); SQueryInfo *pQueryInfo, SGlobalMerger **pMerger, int64_t id);
void tscDestroyLocalMerger(SLocalMerger* pLocalMerger); void tscDestroyGlobalMerger(SGlobalMerger* pMerger);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
#endif // TDENGINE_TSCLOCALMERGE_H #endif // TDENGINE_TSCGLOBALMERGE_H

View File

@ -20,13 +20,13 @@
extern "C" { extern "C" {
#endif #endif
#include "tsched.h"
#include "exception.h" #include "exception.h"
#include "os.h" #include "os.h"
#include "qExtbuffer.h" #include "qExtbuffer.h"
#include "taosdef.h" #include "taosdef.h"
#include "tbuffer.h" #include "tbuffer.h"
#include "tscLocalMerge.h" #include "tscGlobalmerge.h"
#include "tsched.h"
#include "tsclient.h" #include "tsclient.h"
#define UTIL_TABLE_IS_SUPER_TABLE(metaInfo) \ #define UTIL_TABLE_IS_SUPER_TABLE(metaInfo) \
@ -63,7 +63,7 @@ typedef struct SJoinSupporter {
SArray* exprList; SArray* exprList;
SFieldInfo fieldsInfo; SFieldInfo fieldsInfo;
STagCond tagCond; STagCond tagCond;
SGroupbyExpr groupInfo; // group by info SGroupbyExpr groupInfo; // group by info
struct STSBuf* pTSBuf; // the TSBuf struct that holds the compressed timestamp array struct STSBuf* pTSBuf; // the TSBuf struct that holds the compressed timestamp array
FILE* f; // temporary file in order to create TSBuf FILE* f; // temporary file in order to create TSBuf
char path[PATH_MAX]; // temporary file path, todo dynamic allocate memory char path[PATH_MAX]; // temporary file path, todo dynamic allocate memory
@ -111,7 +111,7 @@ void* tscDestroyUdfArrayList(SArray* pUdfList);
void* tscDestroyBlockHashTable(SHashObj* pBlockHashTable, bool removeMeta); void* tscDestroyBlockHashTable(SHashObj* pBlockHashTable, bool removeMeta);
int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock); int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock);
int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap); int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBlockMap);
int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, int32_t startOffset, int32_t rowSize, SName* pName, STableMeta* pTableMeta, int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, int32_t startOffset, int32_t rowSize, SName* pName, STableMeta* pTableMeta,
STableDataBlocks** dataBlocks, SArray* pBlockList); STableDataBlocks** dataBlocks, SArray* pBlockList);
@ -124,6 +124,8 @@ int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, i
*/ */
bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo); bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo);
bool tscIsTWAQuery(SQueryInfo* pQueryInfo); bool tscIsTWAQuery(SQueryInfo* pQueryInfo);
bool tscIsSessionWindowQuery(SQueryInfo* pQueryInfo);
bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo);
bool tsIsArithmeticQueryOnAggResult(SQueryInfo* pQueryInfo); bool tsIsArithmeticQueryOnAggResult(SQueryInfo* pQueryInfo);
bool tscGroupbyColumn(SQueryInfo* pQueryInfo); bool tscGroupbyColumn(SQueryInfo* pQueryInfo);
bool tscIsTopBotQuery(SQueryInfo* pQueryInfo); bool tscIsTopBotQuery(SQueryInfo* pQueryInfo);
@ -286,6 +288,7 @@ void tscSVgroupInfoCopy(SVgroupInfo* dst, const SVgroupInfo* src);
SSqlObj* createSimpleSubObj(SSqlObj* pSql, __async_cb_func_t fp, void* param, int32_t cmd); SSqlObj* createSimpleSubObj(SSqlObj* pSql, __async_cb_func_t fp, void* param, int32_t cmd);
void registerSqlObj(SSqlObj* pSql); void registerSqlObj(SSqlObj* pSql);
void tscInitResForMerge(SSqlRes* pRes);
SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t fp, void* param, int32_t cmd, SSqlObj* pPrevSql); SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t fp, void* param, int32_t cmd, SSqlObj* pPrevSql);
void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t subClauseIndex, int32_t tableIndex); void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t subClauseIndex, int32_t tableIndex);
@ -330,12 +333,15 @@ SVgroupsInfo* tscVgroupsInfoDup(SVgroupsInfo* pVgroupsInfo);
int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAttr, void* addr); int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAttr, void* addr);
void tsCreateSQLFunctionCtx(SQueryInfo* pQueryInfo, SQLFunctionCtx* pCtx, SSchema* pSchema); void tsCreateSQLFunctionCtx(SQueryInfo* pQueryInfo, SQLFunctionCtx* pCtx, SSchema* pSchema);
void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, SExprInfo* pExprs, STableGroupInfo* pTableGroupInfo, SOperatorInfo* pOperator, char* sql, void* addr, int32_t stage); void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGroupInfo, SOperatorInfo* pOperator, char* sql, void* addr, int32_t stage);
void* malloc_throw(size_t size); void* malloc_throw(size_t size);
void* calloc_throw(size_t nmemb, size_t size); void* calloc_throw(size_t nmemb, size_t size);
char* strdup_throw(const char* str); char* strdup_throw(const char* str);
bool vgroupInfoIdentical(SNewVgroupInfo *pExisted, SVgroupMsg* src);
SNewVgroupInfo createNewVgroupInfo(SVgroupMsg *pVgroupMsg);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@ -1,93 +0,0 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_TSCHEMAUTIL_H
#define TDENGINE_TSCHEMAUTIL_H
#ifdef __cplusplus
extern "C" {
#endif
#include "taosmsg.h"
#include "tsclient.h"
#include "ttoken.h"
/**
* get the number of tags of this table
* @param pTableMeta
* @return
*/
int32_t tscGetNumOfTags(const STableMeta* pTableMeta);
/**
* get the number of columns of this table
* @param pTableMeta
* @return
*/
int32_t tscGetNumOfColumns(const STableMeta* pTableMeta);
/**
* get the basic info of this table
* @param pTableMeta
* @return
*/
STableComInfo tscGetTableInfo(const STableMeta* pTableMeta);
/**
* get the schema
* @param pTableMeta
* @return
*/
SSchema* tscGetTableSchema(const STableMeta* pTableMeta);
/**
* get the tag schema
* @param pMeta
* @return
*/
SSchema *tscGetTableTagSchema(const STableMeta *pMeta);
/**
* get the column schema according to the column index
* @param pMeta
* @param colIndex
* @return
*/
SSchema *tscGetTableColumnSchema(const STableMeta *pMeta, int32_t colIndex);
/**
* get the column schema according to the column id
* @param pTableMeta
* @param colId
* @return
*/
SSchema* tscGetColumnSchemaById(STableMeta* pTableMeta, int16_t colId);
/**
* create the table meta from the msg
* @param pTableMetaMsg
* @param size size of the table meta
* @return
*/
STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg);
bool vgroupInfoIdentical(SNewVgroupInfo *pExisted, SVgroupMsg* src);
SNewVgroupInfo createNewVgroupInfo(SVgroupMsg *pVgroupMsg);
#ifdef __cplusplus
}
#endif
#endif // TDENGINE_TSCHEMAUTIL_H

View File

@ -40,17 +40,9 @@ extern "C" {
// forward declaration // forward declaration
struct SSqlInfo; struct SSqlInfo;
struct SLocalMerger;
typedef void (*__async_cb_func_t)(void *param, TAOS_RES *tres, int32_t numOfRows); typedef void (*__async_cb_func_t)(void *param, TAOS_RES *tres, int32_t numOfRows);
typedef struct STableComInfo {
uint8_t numOfTags;
uint8_t precision;
int16_t numOfColumns;
int32_t rowSize;
} STableComInfo;
typedef struct SNewVgroupInfo { typedef struct SNewVgroupInfo {
int32_t vgId; int32_t vgId;
int8_t inUse; int8_t inUse;
@ -66,34 +58,6 @@ typedef struct CChildTableMeta {
uint64_t suid; // super table id uint64_t suid; // super table id
} CChildTableMeta; } CChildTableMeta;
typedef struct STableMeta {
int32_t vgId;
STableId id;
uint8_t tableType;
char sTableName[TSDB_TABLE_FNAME_LEN]; // super table name
uint64_t suid; // super table id
int16_t sversion;
int16_t tversion;
STableComInfo tableInfo;
SSchema schema[]; // if the table is TSDB_CHILD_TABLE, schema is acquired by super table meta info
} STableMeta;
typedef struct STableMetaInfo {
STableMeta *pTableMeta; // table meta, cached in client side and acquired by name
uint32_t tableMetaSize;
SVgroupsInfo *vgroupList;
SArray *pVgroupTables; // SArray<SVgroupTableInfo>
/*
* 1. keep the vgroup index during the multi-vnode super table projection query
* 2. keep the vgroup index for multi-vnode insertion
*/
int32_t vgroupIndex;
SName name;
char aliasName[TSDB_TABLE_NAME_LEN]; // alias name of table specified in query sql
SArray *tagColList; // SArray<SColumn*>, involved tag columns
} STableMetaInfo;
typedef struct SColumnIndex { typedef struct SColumnIndex {
int16_t tableIndex; int16_t tableIndex;
int16_t columnIndex; int16_t columnIndex;
@ -111,44 +75,6 @@ typedef struct SInternalField {
SExprInfo *pExpr; SExprInfo *pExpr;
} SInternalField; } SInternalField;
typedef struct SFieldInfo {
int16_t numOfOutput; // number of column in result
TAOS_FIELD* final;
SArray *internalField; // SArray<SInternalField>
} SFieldInfo;
typedef struct SCond {
uint64_t uid;
int32_t len; // length of tag query condition data
char * cond;
} SCond;
typedef struct SJoinNode {
uint64_t uid;
int16_t tagColId;
SArray* tsJoin;
SArray* tagJoin;
} SJoinNode;
typedef struct SJoinInfo {
bool hasJoin;
SJoinNode *joinTables[TSDB_MAX_JOIN_TABLE_NUM];
} SJoinInfo;
typedef struct STagCond {
// relation between tbname list and query condition, including : TK_AND or TK_OR
int16_t relType;
// tbname query condition, only support tbname query condition on one table
SCond tbnameCond;
// join condition, only support two tables join currently
SJoinInfo joinInfo;
// for different table, the query condition must be seperated
SArray *pCond;
} STagCond;
typedef struct SParamInfo { typedef struct SParamInfo {
int32_t idx; int32_t idx;
uint8_t type; uint8_t type;
@ -191,59 +117,6 @@ typedef struct STableDataBlocks {
SParamInfo *params; SParamInfo *params;
} STableDataBlocks; } STableDataBlocks;
typedef struct SQueryInfo {
int16_t command; // the command may be different for each subclause, so keep it seperately.
uint32_t type; // query/insert type
STimeWindow window; // the whole query time window
SInterval interval; // tumble time window
SSessionWindow sessionWindow; // session time window
SGroupbyExpr groupbyExpr; // groupby tags info
SArray * colList; // SArray<SColumn*>
SFieldInfo fieldsInfo;
SArray * exprList; // SArray<SExprInfo*>
SArray * exprList1; // final exprlist in case of arithmetic expression exists
SLimitVal limit;
SLimitVal slimit;
STagCond tagCond;
SOrderVal order;
int16_t fillType; // final result fill type
int16_t numOfTables;
STableMetaInfo **pTableMetaInfo;
struct STSBuf *tsBuf;
int64_t * fillVal; // default value for fill
char * msg; // pointer to the pCmd->payload to keep error message temporarily
int64_t clauseLimit; // limit for current sub clause
int64_t prjOffset; // offset value in the original sql expression, only applied at client side
int64_t vgroupLimit; // table limit in case of super table projection query + global order + limit
int32_t udColumnId; // current user-defined constant output field column id, monotonically decreases from TSDB_UD_COLUMN_INDEX
int16_t resColumnId; // result column id
bool distinctTag; // distinct tag or not
int32_t round; // 0/1/....
int32_t bufLen;
char* buf;
SQInfo* pQInfo; // global merge operator
SQueryAttr* pQueryAttr; // query object
struct SQueryInfo *sibling; // sibling
SArray *pUpstream; // SArray<struct SQueryInfo>
struct SQueryInfo *pDownstream;
int32_t havingFieldNum;
bool stableQuery;
bool groupbyColumn;
bool simpleAgg;
bool arithmeticOnAgg;
bool projectionQuery;
bool hasFilter;
bool onlyTagQuery;
bool globalMerge; // need global merge
SArray *pUdfInfo; // user defined function information SArray<SUdfInfo>
} SQueryInfo;
typedef struct { typedef struct {
STableMeta *pTableMeta; STableMeta *pTableMeta;
SVgroupsInfo *pVgroupInfo; SVgroupsInfo *pVgroupInfo;
@ -257,9 +130,13 @@ typedef struct SInsertStatementParam {
int8_t schemaAttached; // denote if submit block is built with table schema or not int8_t schemaAttached; // denote if submit block is built with table schema or not
STagData tagData; // NOTE: pTagData->data is used as a variant length array STagData tagData; // NOTE: pTagData->data is used as a variant length array
int32_t batchSize; // for parameter ('?') binding and batch processing
int32_t numOfParams;
char msg[512]; // error message char msg[512]; // error message
char *sql; // current sql statement position
uint32_t insertType; // insert data from [file|sql statement| bound statement] uint32_t insertType; // insert data from [file|sql statement| bound statement]
uint64_t objectId; // sql object id
char *sql; // current sql statement position
} SInsertStatementParam; } SInsertStatementParam;
// TODO extract sql parser supporter // TODO extract sql parser supporter
@ -268,15 +145,10 @@ typedef struct {
uint8_t msgType; uint8_t msgType;
SInsertStatementParam insertParam; SInsertStatementParam insertParam;
char reserve1[3]; // fix bus error on arm32 char reserve1[3]; // fix bus error on arm32
int32_t count; // todo remove it
bool subCmd; bool subCmd;
union {
int32_t count;
};
char * curSql; // current sql, resume position of sql after parsing paused
char reserve2[3]; // fix bus error on arm32 char reserve2[3]; // fix bus error on arm32
int16_t numOfCols; int16_t numOfCols;
char reserve3[2]; // fix bus error on arm32 char reserve3[2]; // fix bus error on arm32
uint32_t allocSize; uint32_t allocSize;
@ -287,8 +159,6 @@ typedef struct {
SQueryInfo *pQueryInfo; SQueryInfo *pQueryInfo;
SQueryInfo *active; // current active query info SQueryInfo *active; // current active query info
int32_t batchSize; // for parameter ('?') binding and batch processing int32_t batchSize; // for parameter ('?') binding and batch processing
int32_t numOfParams;
STagData tagData; // NOTE: pTagData->data is used as a variant length array
int32_t resColumnId; int32_t resColumnId;
} SSqlCmd; } SSqlCmd;
@ -324,7 +194,7 @@ typedef struct {
TAOS_FIELD* final; TAOS_FIELD* final;
SArithmeticSupport *pArithSup; // support the arithmetic expression calculation on agg functions SArithmeticSupport *pArithSup; // support the arithmetic expression calculation on agg functions
struct SLocalMerger *pLocalMerger; struct SGlobalMerger *pMerger;
} SSqlRes; } SSqlRes;
typedef struct { typedef struct {
@ -451,7 +321,7 @@ void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo);
void tscSetResRawPtrRv(SSqlRes* pRes, SQueryInfo* pQueryInfo, SSDataBlock* pBlock); void tscSetResRawPtrRv(SSqlRes* pRes, SQueryInfo* pQueryInfo, SSDataBlock* pBlock);
void handleDownstreamOperator(SSqlObj** pSqlList, int32_t numOfUpstream, SQueryInfo* px, SSqlRes* pOutput); void handleDownstreamOperator(SSqlObj** pSqlList, int32_t numOfUpstream, SQueryInfo* px, SSqlRes* pOutput);
void destroyTableNameList(SSqlCmd* pCmd); void destroyTableNameList(SInsertStatementParam* pInsertParam);
void tscResetSqlCmd(SSqlCmd *pCmd, bool removeMeta); void tscResetSqlCmd(SSqlCmd *pCmd, bool removeMeta);
@ -483,7 +353,7 @@ void waitForQueryRsp(void *param, TAOS_RES *tres, int code);
void doAsyncQuery(STscObj *pObj, SSqlObj *pSql, __async_cb_func_t fp, void *param, const char *sqlstr, size_t sqlLen); void doAsyncQuery(STscObj *pObj, SSqlObj *pSql, __async_cb_func_t fp, void *param, const char *sqlstr, size_t sqlLen);
void tscImportDataFromFile(SSqlObj *pSql); void tscImportDataFromFile(SSqlObj *pSql);
void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen); struct SGlobalMerger* tscInitResObjForLocalQuery(int32_t numOfRes, int32_t rowLen, uint64_t id);
bool tscIsUpdateQuery(SSqlObj* pSql); bool tscIsUpdateQuery(SSqlObj* pSql);
char* tscGetSqlStr(SSqlObj* pSql); char* tscGetSqlStr(SSqlObj* pSql);
bool tscIsQueryWithLimit(SSqlObj* pSql); bool tscIsQueryWithLimit(SSqlObj* pSql);
@ -493,7 +363,7 @@ void tscSetBoundColumnInfo(SParsedDataColInfo *pColInfo, SSchema *pSchema, int32
char *tscGetErrorMsgPayload(SSqlCmd *pCmd); char *tscGetErrorMsgPayload(SSqlCmd *pCmd);
int32_t tscInvalidSQLErrMsg(char *msg, const char *additionalInfo, const char *sql); int32_t tscInvalidOperationMsg(char *msg, const char *additionalInfo, const char *sql);
int32_t tscSQLSyntaxErrMsg(char* msg, const char* additionalInfo, const char* sql); int32_t tscSQLSyntaxErrMsg(char* msg, const char* additionalInfo, const char* sql);
int32_t tscValidateSqlInfo(SSqlObj *pSql, struct SSqlInfo *pInfo); int32_t tscValidateSqlInfo(SSqlObj *pSql, struct SSqlInfo *pInfo);

View File

@ -22,7 +22,7 @@
#include "tscSubquery.h" #include "tscSubquery.h"
#include "tscUtil.h" #include "tscUtil.h"
#include "tsched.h" #include "tsched.h"
#include "tschemautil.h" #include "qTableMeta.h"
#include "tsclient.h" #include "tsclient.h"
static void tscAsyncQueryRowsForNextVnode(void *param, TAOS_RES *tres, int numOfRows); static void tscAsyncQueryRowsForNextVnode(void *param, TAOS_RES *tres, int numOfRows);
@ -58,7 +58,6 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, __async_cb_func_t fp, void* para
strntolower(pSql->sqlstr, sqlstr, (int32_t)sqlLen); strntolower(pSql->sqlstr, sqlstr, (int32_t)sqlLen);
tscDebugL("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr); tscDebugL("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr);
pCmd->curSql = pSql->sqlstr;
pCmd->resColumnId = TSDB_RES_COL_ID; pCmd->resColumnId = TSDB_RES_COL_ID;
int32_t code = tsParseSql(pSql, true); int32_t code = tsParseSql(pSql, true);
@ -221,7 +220,7 @@ void taos_fetch_rows_a(TAOS_RES *tres, __async_cb_func_t fp, void *param) {
tscResetForNextRetrieve(pRes); tscResetForNextRetrieve(pRes);
// handle the sub queries of join query // handle outer query based on the already retrieved nest query results.
SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd); SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
if (pQueryInfo->pUpstream != NULL && taosArrayGetSize(pQueryInfo->pUpstream) > 0) { if (pQueryInfo->pUpstream != NULL && taosArrayGetSize(pQueryInfo->pUpstream) > 0) {
SSchedMsg schedMsg = {0}; SSchedMsg schedMsg = {0};

View File

@ -13,15 +13,19 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#include "tscLocalMerge.h"
#include "tscSubquery.h"
#include "os.h" #include "os.h"
#include "texpr.h" #include "texpr.h"
#include "tlosertree.h" #include "tlosertree.h"
#include "tscGlobalmerge.h"
#include "tscSubquery.h"
#include "tscLog.h" #include "tscLog.h"
#include "tsclient.h"
#include "qUtil.h" #include "qUtil.h"
#define COLMODEL_GET_VAL(data, schema, rowId, colId) \
(data + (schema)->pFields[colId].offset * ((schema)->capacity) + (rowId) * (schema)->pFields[colId].field.bytes)
typedef struct SCompareParam { typedef struct SCompareParam {
SLocalDataSource **pLocalData; SLocalDataSource **pLocalData;
tOrderDescriptor * pDesc; tOrderDescriptor * pDesc;
@ -29,9 +33,18 @@ typedef struct SCompareParam {
int32_t groupOrderType; int32_t groupOrderType;
} SCompareParam; } SCompareParam;
bool needToMergeRv(SSDataBlock* pBlock, SArray* columnIndex, int32_t index, char **buf); static bool needToMerge(SSDataBlock* pBlock, SArray* columnIndexList, int32_t index, char **buf) {
int32_t ret = 0;
size_t size = taosArrayGetSize(columnIndexList);
if (size > 0) {
ret = compare_aRv(pBlock, columnIndexList, (int32_t) size, index, buf, TSDB_ORDER_ASC);
}
int32_t treeComparator(const void *pLeft, const void *pRight, void *param) { // if ret == 0, means the result belongs to the same group
return (ret == 0);
}
static int32_t treeComparator(const void *pLeft, const void *pRight, void *param) {
int32_t pLeftIdx = *(int32_t *)pLeft; int32_t pLeftIdx = *(int32_t *)pLeft;
int32_t pRightIdx = *(int32_t *)pRight; int32_t pRightIdx = *(int32_t *)pRight;
@ -57,16 +70,16 @@ int32_t treeComparator(const void *pLeft, const void *pRight, void *param) {
} }
} }
int32_t tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc, int32_t tscCreateGlobalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
SQueryInfo* pQueryInfo, SLocalMerger **pMerger, int64_t id) { SQueryInfo* pQueryInfo, SGlobalMerger **pMerger, int64_t id) {
if (pMemBuffer == NULL) { if (pMemBuffer == NULL) {
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, numOfBuffer); tscDestroyGlobalMergerEnv(pMemBuffer, pDesc, numOfBuffer);
tscError("0x%"PRIx64" %p pMemBuffer is NULL", id, pMemBuffer); tscError("0x%"PRIx64" %p pMemBuffer is NULL", id, pMemBuffer);
return TSDB_CODE_TSC_APP_ERROR; return TSDB_CODE_TSC_APP_ERROR;
} }
if (pDesc->pColumnModel == NULL) { if (pDesc->pColumnModel == NULL) {
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, numOfBuffer); tscDestroyGlobalMergerEnv(pMemBuffer, pDesc, numOfBuffer);
tscError("0x%"PRIx64" no local buffer or intermediate result format model", id); tscError("0x%"PRIx64" no local buffer or intermediate result format model", id);
return TSDB_CODE_TSC_APP_ERROR; return TSDB_CODE_TSC_APP_ERROR;
} }
@ -83,7 +96,7 @@ int32_t tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tO
} }
if (numOfFlush == 0 || numOfBuffer == 0) { if (numOfFlush == 0 || numOfBuffer == 0) {
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, numOfBuffer); tscDestroyGlobalMergerEnv(pMemBuffer, pDesc, numOfBuffer);
tscDebug("0x%"PRIx64" no data to retrieve", id); tscDebug("0x%"PRIx64" no data to retrieve", id);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
@ -92,15 +105,15 @@ int32_t tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tO
tscError("0x%"PRIx64" Invalid value of buffer capacity %d and page size %d ", id, pDesc->pColumnModel->capacity, tscError("0x%"PRIx64" Invalid value of buffer capacity %d and page size %d ", id, pDesc->pColumnModel->capacity,
pMemBuffer[0]->pageSize); pMemBuffer[0]->pageSize);
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, numOfBuffer); tscDestroyGlobalMergerEnv(pMemBuffer, pDesc, numOfBuffer);
return TSDB_CODE_TSC_APP_ERROR; return TSDB_CODE_TSC_APP_ERROR;
} }
*pMerger = (SLocalMerger *) calloc(1, sizeof(SLocalMerger)); *pMerger = (SGlobalMerger *) calloc(1, sizeof(SGlobalMerger));
if ((*pMerger) == NULL) { if ((*pMerger) == NULL) {
tscError("0x%"PRIx64" failed to create local merge structure, out of memory", id); tscError("0x%"PRIx64" failed to create local merge structure, out of memory", id);
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, numOfBuffer); tscDestroyGlobalMergerEnv(pMemBuffer, pDesc, numOfBuffer);
return TSDB_CODE_TSC_OUT_OF_MEMORY; return TSDB_CODE_TSC_OUT_OF_MEMORY;
} }
@ -160,7 +173,7 @@ int32_t tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tO
// no data actually, no need to merge result. // no data actually, no need to merge result.
if (idx == 0) { if (idx == 0) {
tscDebug("0x%"PRIx64" retrieved no data", id); tscDebug("0x%"PRIx64" retrieved no data", id);
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, numOfBuffer); tscDestroyGlobalMergerEnv(pMemBuffer, pDesc, numOfBuffer);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
@ -198,7 +211,7 @@ int32_t tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tO
} }
// restore the limitation value at the last stage // restore the limitation value at the last stage
if (tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) { if (pQueryInfo->orderProjectQuery) {
pQueryInfo->limit.limit = pQueryInfo->clauseLimit; pQueryInfo->limit.limit = pQueryInfo->clauseLimit;
pQueryInfo->limit.offset = pQueryInfo->prjOffset; pQueryInfo->limit.offset = pQueryInfo->prjOffset;
} }
@ -297,28 +310,28 @@ int32_t saveToBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePa
return 0; return 0;
} }
void tscDestroyLocalMerger(SLocalMerger* pLocalMerger) { void tscDestroyGlobalMerger(SGlobalMerger* pMerger) {
if (pLocalMerger == NULL) { if (pMerger == NULL) {
return; return;
} }
for (int32_t i = 0; i < pLocalMerger->numOfBuffer; ++i) { for (int32_t i = 0; i < pMerger->numOfBuffer; ++i) {
tfree(pLocalMerger->pLocalDataSrc[i]); tfree(pMerger->pLocalDataSrc[i]);
} }
pLocalMerger->numOfBuffer = 0; pMerger->numOfBuffer = 0;
tscLocalReducerEnvDestroy(pLocalMerger->pExtMemBuffer, pLocalMerger->pDesc, pLocalMerger->numOfVnode); tscDestroyGlobalMergerEnv(pMerger->pExtMemBuffer, pMerger->pDesc, pMerger->numOfVnode);
pLocalMerger->numOfCompleted = 0; pMerger->numOfCompleted = 0;
if (pLocalMerger->pLoserTree) { if (pMerger->pLoserTree) {
tfree(pLocalMerger->pLoserTree->param); tfree(pMerger->pLoserTree->param);
tfree(pLocalMerger->pLoserTree); tfree(pMerger->pLoserTree);
} }
tfree(pLocalMerger->buf); tfree(pMerger->buf);
tfree(pLocalMerger->pLocalDataSrc); tfree(pMerger->pLocalDataSrc);
free(pLocalMerger); free(pMerger);
} }
static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SQueryInfo* pQueryInfo, SColumnModel *pModel) { static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SQueryInfo* pQueryInfo, SColumnModel *pModel) {
@ -329,7 +342,7 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SQueryInfo*
} }
// primary timestamp column is involved in final result // primary timestamp column is involved in final result
if (pQueryInfo->interval.interval != 0 || tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) { if (pQueryInfo->interval.interval != 0 || pQueryInfo->orderProjectQuery) {
numOfGroupByCols++; numOfGroupByCols++;
} }
@ -392,7 +405,7 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SQueryInfo*
} }
} }
int32_t tscLocalReducerEnvCreate(SQueryInfo *pQueryInfo, tExtMemBuffer ***pMemBuffer, int32_t numOfSub, int32_t tscCreateGlobalMergerEnv(SQueryInfo *pQueryInfo, tExtMemBuffer ***pMemBuffer, int32_t numOfSub,
tOrderDescriptor **pOrderDesc, uint32_t nBufferSizes, int64_t id) { tOrderDescriptor **pOrderDesc, uint32_t nBufferSizes, int64_t id) {
SSchema *pSchema = NULL; SSchema *pSchema = NULL;
SColumnModel *pModel = NULL; SColumnModel *pModel = NULL;
@ -456,7 +469,7 @@ int32_t tscLocalReducerEnvCreate(SQueryInfo *pQueryInfo, tExtMemBuffer ***pMemBu
* @param pDesc * @param pDesc
* @param numOfVnodes * @param numOfVnodes
*/ */
void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, int32_t numOfVnodes) { void tscDestroyGlobalMergerEnv(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, int32_t numOfVnodes) {
tOrderDescDestroy(pDesc); tOrderDescDestroy(pDesc);
for (int32_t i = 0; i < numOfVnodes; ++i) { for (int32_t i = 0; i < numOfVnodes; ++i) {
pMemBuffer[i] = destoryExtMemBuffer(pMemBuffer[i]); pMemBuffer[i] = destoryExtMemBuffer(pMemBuffer[i]);
@ -467,12 +480,12 @@ void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDe
/** /**
* *
* @param pLocalMerge * @param pMerger
* @param pOneInterDataSrc * @param pOneInterDataSrc
* @param treeList * @param treeList
* @return the number of remain input source. if ret == 0, all data has been handled * @return the number of remain input source. if ret == 0, all data has been handled
*/ */
int32_t loadNewDataFromDiskFor(SLocalMerger *pLocalMerge, SLocalDataSource *pOneInterDataSrc, int32_t loadNewDataFromDiskFor(SGlobalMerger *pMerger, SLocalDataSource *pOneInterDataSrc,
bool *needAdjustLoserTree) { bool *needAdjustLoserTree) {
pOneInterDataSrc->rowIdx = 0; pOneInterDataSrc->rowIdx = 0;
pOneInterDataSrc->pageId += 1; pOneInterDataSrc->pageId += 1;
@ -489,17 +502,17 @@ int32_t loadNewDataFromDiskFor(SLocalMerger *pLocalMerge, SLocalDataSource *pOne
#endif #endif
*needAdjustLoserTree = true; *needAdjustLoserTree = true;
} else { } else {
pLocalMerge->numOfCompleted += 1; pMerger->numOfCompleted += 1;
pOneInterDataSrc->rowIdx = -1; pOneInterDataSrc->rowIdx = -1;
pOneInterDataSrc->pageId = -1; pOneInterDataSrc->pageId = -1;
*needAdjustLoserTree = true; *needAdjustLoserTree = true;
} }
return pLocalMerge->numOfBuffer; return pMerger->numOfBuffer;
} }
void adjustLoserTreeFromNewData(SLocalMerger *pLocalMerge, SLocalDataSource *pOneInterDataSrc, void adjustLoserTreeFromNewData(SGlobalMerger *pMerger, SLocalDataSource *pOneInterDataSrc,
SLoserTreeInfo *pTree) { SLoserTreeInfo *pTree) {
/* /*
* load a new data page into memory for intermediate dataset source, * load a new data page into memory for intermediate dataset source,
@ -507,7 +520,7 @@ void adjustLoserTreeFromNewData(SLocalMerger *pLocalMerge, SLocalDataSource *pOn
*/ */
bool needToAdjust = true; bool needToAdjust = true;
if (pOneInterDataSrc->filePage.num <= pOneInterDataSrc->rowIdx) { if (pOneInterDataSrc->filePage.num <= pOneInterDataSrc->rowIdx) {
loadNewDataFromDiskFor(pLocalMerge, pOneInterDataSrc, &needToAdjust); loadNewDataFromDiskFor(pMerger, pOneInterDataSrc, &needToAdjust);
} }
/* /*
@ -515,7 +528,7 @@ void adjustLoserTreeFromNewData(SLocalMerger *pLocalMerge, SLocalDataSource *pOn
* if the loser tree is rebuild completed, we do not need to adjust * if the loser tree is rebuild completed, we do not need to adjust
*/ */
if (needToAdjust) { if (needToAdjust) {
int32_t leafNodeIdx = pTree->pNode[0].index + pLocalMerge->numOfBuffer; int32_t leafNodeIdx = pTree->pNode[0].index + pMerger->numOfBuffer;
#ifdef _DEBUG_VIEW #ifdef _DEBUG_VIEW
printf("before adjust:\t"); printf("before adjust:\t");
@ -567,7 +580,7 @@ static void setTagValueForMultipleRows(SQLFunctionCtx* pCtx, int32_t numOfOutput
} }
} }
static void doExecuteFinalMergeRv(SOperatorInfo* pOperator, int32_t numOfExpr, SSDataBlock* pBlock) { static void doExecuteFinalMerge(SOperatorInfo* pOperator, int32_t numOfExpr, SSDataBlock* pBlock) {
SMultiwayMergeInfo* pInfo = pOperator->info; SMultiwayMergeInfo* pInfo = pOperator->info;
SQLFunctionCtx* pCtx = pInfo->binfo.pCtx; SQLFunctionCtx* pCtx = pInfo->binfo.pCtx;
@ -579,7 +592,7 @@ static void doExecuteFinalMergeRv(SOperatorInfo* pOperator, int32_t numOfExpr, S
for(int32_t i = 0; i < pBlock->info.rows; ++i) { for(int32_t i = 0; i < pBlock->info.rows; ++i) {
if (pInfo->hasPrev) { if (pInfo->hasPrev) {
if (needToMergeRv(pBlock, pInfo->orderColumnList, i, pInfo->prevRow)) { if (needToMerge(pBlock, pInfo->orderColumnList, i, pInfo->prevRow)) {
for (int32_t j = 0; j < numOfExpr; ++j) { for (int32_t j = 0; j < numOfExpr; ++j) {
pCtx[j].pInput = add[j] + pCtx[j].inputBytes * i; pCtx[j].pInput = add[j] + pCtx[j].inputBytes * i;
} }
@ -694,45 +707,27 @@ static void doExecuteFinalMergeRv(SOperatorInfo* pOperator, int32_t numOfExpr, S
tfree(add); tfree(add);
} }
bool needToMergeRv(SSDataBlock* pBlock, SArray* columnIndexList, int32_t index, char **buf) { static bool isAllSourcesCompleted(SGlobalMerger *pMerger) {
int32_t ret = 0; return (pMerger->numOfBuffer == pMerger->numOfCompleted);
size_t size = taosArrayGetSize(columnIndexList);
if (size > 0) {
ret = compare_aRv(pBlock, columnIndexList, (int32_t) size, index, buf, TSDB_ORDER_ASC);
}
// if ret == 0, means the result belongs to the same group
return (ret == 0);
} }
static bool isAllSourcesCompleted(SLocalMerger *pLocalMerge) { SGlobalMerger* tscInitResObjForLocalQuery(int32_t numOfRes, int32_t rowLen, uint64_t id) {
return (pLocalMerge->numOfBuffer == pLocalMerge->numOfCompleted); SGlobalMerger *pMerger = calloc(1, sizeof(SGlobalMerger));
} if (pMerger == NULL) {
tscDebug("0x%"PRIx64" free local reducer finished", id);
void tscInitResObjForLocalQuery(SSqlObj *pSql, int32_t numOfRes, int32_t rowLen) { return NULL;
SSqlRes *pRes = &pSql->res;
if (pRes->pLocalMerger != NULL) {
tscDestroyLocalMerger(pRes->pLocalMerger);
pRes->pLocalMerger = NULL;
tscDebug("0x%"PRIx64" free local reducer finished", pSql->self);
} }
pRes->qId = 1; // hack to pass the safety check in fetch_row function
pRes->numOfRows = 0;
pRes->row = 0;
pRes->rspType = 0; // used as a flag to denote if taos_retrieved() has been called yet
pRes->pLocalMerger = (SLocalMerger *)calloc(1, sizeof(SLocalMerger));
/* /*
* One more byte space is required, since the sprintf function needs one additional space to put '\0' at * One more byte space is required, since the sprintf function needs one additional space to put '\0' at
* the end of string * the end of string
*/ */
size_t size = numOfRes * rowLen + 1; size_t size = numOfRes * rowLen + 1;
pRes->pLocalMerger->buf = calloc(1, size); pMerger->buf = calloc(1, size);
pRes->data = pRes->pLocalMerger->buf; return pMerger;
} }
// todo remove it
int32_t doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_t rowSize, int32_t finalRowSize) { int32_t doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_t rowSize, int32_t finalRowSize) {
int32_t maxRowSize = MAX(rowSize, finalRowSize); int32_t maxRowSize = MAX(rowSize, finalRowSize);
char* pbuf = calloc(1, (size_t)(pOutput->num * maxRowSize)); char* pbuf = calloc(1, (size_t)(pOutput->num * maxRowSize));
@ -776,9 +771,6 @@ int32_t doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_
return offset; return offset;
} }
#define COLMODEL_GET_VAL(data, schema, rowId, colId) \
(data + (schema)->pFields[colId].offset * ((schema)->capacity) + (rowId) * (schema)->pFields[colId].field.bytes)
static void appendOneRowToDataBlock(SSDataBlock *pBlock, char *buf, SColumnModel *pModel, int32_t rowIndex, static void appendOneRowToDataBlock(SSDataBlock *pBlock, char *buf, SColumnModel *pModel, int32_t rowIndex,
int32_t maxRows) { int32_t maxRows) {
for (int32_t i = 0; i < pBlock->info.numOfCols; ++i) { for (int32_t i = 0; i < pBlock->info.numOfCols; ++i) {
@ -800,7 +792,7 @@ SSDataBlock* doMultiwayMergeSort(void* param, bool* newgroup) {
SMultiwayMergeInfo *pInfo = pOperator->info; SMultiwayMergeInfo *pInfo = pOperator->info;
SLocalMerger *pMerger = pInfo->pMerge; SGlobalMerger *pMerger = pInfo->pMerge;
SLoserTreeInfo *pTree = pMerger->pLoserTree; SLoserTreeInfo *pTree = pMerger->pLoserTree;
pInfo->binfo.pRes->info.rows = 0; pInfo->binfo.pRes->info.rows = 0;
@ -884,7 +876,7 @@ SSDataBlock* doMultiwayMergeSort(void* param, bool* newgroup) {
return (pInfo->binfo.pRes->info.rows > 0)? pInfo->binfo.pRes:NULL; return (pInfo->binfo.pRes->info.rows > 0)? pInfo->binfo.pRes:NULL;
} }
static bool isSameGroupRv(SArray* orderColumnList, SSDataBlock* pBlock, char** dataCols) { static bool isSameGroup(SArray* orderColumnList, SSDataBlock* pBlock, char** dataCols) {
int32_t numOfCols = (int32_t) taosArrayGetSize(orderColumnList); int32_t numOfCols = (int32_t) taosArrayGetSize(orderColumnList);
for (int32_t i = 0; i < numOfCols; ++i) { for (int32_t i = 0; i < numOfCols; ++i) {
SColIndex *pIndex = taosArrayGet(orderColumnList, i); SColIndex *pIndex = taosArrayGet(orderColumnList, i);
@ -937,7 +929,7 @@ SSDataBlock* doGlobalAggregate(void* param, bool* newgroup) {
} }
} }
doExecuteFinalMergeRv(pOperator, pOperator->numOfOutput, pAggInfo->pExistBlock); doExecuteFinalMerge(pOperator, pOperator->numOfOutput, pAggInfo->pExistBlock);
savePrevOrderColumns(pAggInfo->currentGroupColData, pAggInfo->groupColumnList, pAggInfo->pExistBlock, 0, savePrevOrderColumns(pAggInfo->currentGroupColData, pAggInfo->groupColumnList, pAggInfo->pExistBlock, 0,
&pAggInfo->hasGroupColData); &pAggInfo->hasGroupColData);
@ -958,7 +950,7 @@ SSDataBlock* doGlobalAggregate(void* param, bool* newgroup) {
} }
if (pAggInfo->hasGroupColData) { if (pAggInfo->hasGroupColData) {
bool sameGroup = isSameGroupRv(pAggInfo->groupColumnList, pBlock, pAggInfo->currentGroupColData); bool sameGroup = isSameGroup(pAggInfo->groupColumnList, pBlock, pAggInfo->currentGroupColData);
if (!sameGroup) { if (!sameGroup) {
*newgroup = true; *newgroup = true;
pAggInfo->hasDataBlockForNewGroup = true; pAggInfo->hasDataBlockForNewGroup = true;
@ -972,7 +964,7 @@ SSDataBlock* doGlobalAggregate(void* param, bool* newgroup) {
setInputDataBlock(pOperator, pAggInfo->binfo.pCtx, pBlock, TSDB_ORDER_ASC); setInputDataBlock(pOperator, pAggInfo->binfo.pCtx, pBlock, TSDB_ORDER_ASC);
updateOutputBuf(&pAggInfo->binfo, &pAggInfo->bufCapacity, pBlock->info.rows * pAggInfo->resultRowFactor); updateOutputBuf(&pAggInfo->binfo, &pAggInfo->bufCapacity, pBlock->info.rows * pAggInfo->resultRowFactor);
doExecuteFinalMergeRv(pOperator, pOperator->numOfOutput, pBlock); doExecuteFinalMerge(pOperator, pOperator->numOfOutput, pBlock);
savePrevOrderColumns(pAggInfo->currentGroupColData, pAggInfo->groupColumnList, pBlock, 0, &pAggInfo->hasGroupColData); savePrevOrderColumns(pAggInfo->currentGroupColData, pAggInfo->groupColumnList, pBlock, 0, &pAggInfo->hasGroupColData);
handleData = true; handleData = true;
} }

View File

@ -20,7 +20,7 @@
#include "tname.h" #include "tname.h"
#include "tscLog.h" #include "tscLog.h"
#include "tscUtil.h" #include "tscUtil.h"
#include "tschemautil.h" #include "qTableMeta.h"
#include "tsclient.h" #include "tsclient.h"
#include "taos.h" #include "taos.h"
#include "tscSubquery.h" #include "tscSubquery.h"
@ -71,7 +71,9 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
numOfRows = numOfRows + tscGetNumOfTags(pMeta); numOfRows = numOfRows + tscGetNumOfTags(pMeta);
} }
tscInitResObjForLocalQuery(pSql, totalNumOfRows, rowLen); pSql->res.pMerger = tscInitResObjForLocalQuery(totalNumOfRows, rowLen, pSql->self);
tscInitResForMerge(&pSql->res);
SSchema *pSchema = tscGetTableSchema(pMeta); SSchema *pSchema = tscGetTableSchema(pMeta);
for (int32_t i = 0; i < numOfRows; ++i) { for (int32_t i = 0; i < numOfRows; ++i) {
@ -433,7 +435,8 @@ static int32_t tscSCreateSetValueToResObj(SSqlObj *pSql, int32_t rowLen, const c
if (strlen(ddl) == 0) { if (strlen(ddl) == 0) {
} }
tscInitResObjForLocalQuery(pSql, numOfRows, rowLen); pSql->res.pMerger = tscInitResObjForLocalQuery(numOfRows, rowLen, pSql->self);
tscInitResForMerge(&pSql->res);
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 0); TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 0);
char* dst = pRes->data + tscFieldInfoGetOffset(pQueryInfo, 0) * numOfRows; char* dst = pRes->data + tscFieldInfoGetOffset(pQueryInfo, 0) * numOfRows;
@ -882,7 +885,8 @@ void tscSetLocalQueryResult(SSqlObj *pSql, const char *val, const char *columnNa
TAOS_FIELD f = tscCreateField((int8_t)type, columnName, (int16_t)valueLength); TAOS_FIELD f = tscCreateField((int8_t)type, columnName, (int16_t)valueLength);
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
tscInitResObjForLocalQuery(pSql, 1, (int32_t)valueLength); pSql->res.pMerger = tscInitResObjForLocalQuery(1, (int32_t)valueLength, pSql->self);
tscInitResForMerge(&pSql->res);
SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, 0); SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, 0);
pInfo->pExpr = taosArrayGetP(pQueryInfo->exprList, 0); pInfo->pExpr = taosArrayGetP(pQueryInfo->exprList, 0);

View File

@ -23,7 +23,7 @@
#include "ttype.h" #include "ttype.h"
#include "hash.h" #include "hash.h"
#include "tscUtil.h" #include "tscUtil.h"
#include "tschemautil.h" #include "qTableMeta.h"
#include "tsclient.h" #include "tsclient.h"
#include "ttokendef.h" #include "ttokendef.h"
#include "taosdef.h" #include "taosdef.h"
@ -38,8 +38,9 @@ enum {
TSDB_USE_CLI_TS = 1, TSDB_USE_CLI_TS = 1,
}; };
static int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t * numOfRows); static int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t *numOfRows);
static int32_t parseBoundColumns(SSqlCmd* pCmd, SParsedDataColInfo* pColInfo, SSchema* pSchema, char* str, char** end); static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDataColInfo *pColInfo, SSchema *pSchema,
char *str, char **end);
static int32_t tscToDouble(SStrToken *pToken, double *value, char **endPtr) { static int32_t tscToDouble(SStrToken *pToken, double *value, char **endPtr) {
errno = 0; errno = 0;
@ -71,7 +72,7 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
} else { } else {
// strptime("2001-11-12 18:31:01", "%Y-%m-%d %H:%M:%S", &tm); // strptime("2001-11-12 18:31:01", "%Y-%m-%d %H:%M:%S", &tm);
if (taosParseTime(pToken->z, time, pToken->n, timePrec, tsDaylight) != TSDB_CODE_SUCCESS) { if (taosParseTime(pToken->z, time, pToken->n, timePrec, tsDaylight) != TSDB_CODE_SUCCESS) {
return tscInvalidSQLErrMsg(error, "invalid timestamp format", pToken->z); return tscInvalidOperationMsg(error, "invalid timestamp format", pToken->z);
} }
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
@ -103,7 +104,7 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
pTokenEnd += index; pTokenEnd += index;
if (valueToken.n < 2) { if (valueToken.n < 2) {
return tscInvalidSQLErrMsg(error, "value expected in timestamp", sToken.z); return tscInvalidOperationMsg(error, "value expected in timestamp", sToken.z);
} }
if (parseAbsoluteDuration(valueToken.z, valueToken.n, &interval) != TSDB_CODE_SUCCESS) { if (parseAbsoluteDuration(valueToken.z, valueToken.n, &interval) != TSDB_CODE_SUCCESS) {
@ -138,7 +139,7 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
char *endptr = NULL; char *endptr = NULL;
if (IS_NUMERIC_TYPE(pSchema->type) && pToken->n == 0) { if (IS_NUMERIC_TYPE(pSchema->type) && pToken->n == 0) {
return tscInvalidSQLErrMsg(msg, "invalid numeric data", pToken->z); return tscInvalidOperationMsg(msg, "invalid numeric data", pToken->z);
} }
switch (pSchema->type) { switch (pSchema->type) {
@ -161,7 +162,7 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
double dv = strtod(pToken->z, NULL); double dv = strtod(pToken->z, NULL);
*(uint8_t *)payload = (int8_t)((dv == 0) ? TSDB_FALSE : TSDB_TRUE); *(uint8_t *)payload = (int8_t)((dv == 0) ? TSDB_FALSE : TSDB_TRUE);
} else { } else {
return tscInvalidSQLErrMsg(msg, "invalid bool data", pToken->z); return tscInvalidOperationMsg(msg, "invalid bool data", pToken->z);
} }
} }
break; break;
@ -173,9 +174,9 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
} else { } else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
if (ret != TSDB_CODE_SUCCESS) { if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidSQLErrMsg(msg, "invalid tinyint data", pToken->z); return tscInvalidOperationMsg(msg, "invalid tinyint data", pToken->z);
} else if (!IS_VALID_TINYINT(iv)) { } else if (!IS_VALID_TINYINT(iv)) {
return tscInvalidSQLErrMsg(msg, "data overflow", pToken->z); return tscInvalidOperationMsg(msg, "data overflow", pToken->z);
} }
*((uint8_t *)payload) = (uint8_t)iv; *((uint8_t *)payload) = (uint8_t)iv;
@ -189,9 +190,9 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
} else { } else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
if (ret != TSDB_CODE_SUCCESS) { if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidSQLErrMsg(msg, "invalid unsigned tinyint data", pToken->z); return tscInvalidOperationMsg(msg, "invalid unsigned tinyint data", pToken->z);
} else if (!IS_VALID_UTINYINT(iv)) { } else if (!IS_VALID_UTINYINT(iv)) {
return tscInvalidSQLErrMsg(msg, "unsigned tinyint data overflow", pToken->z); return tscInvalidOperationMsg(msg, "unsigned tinyint data overflow", pToken->z);
} }
*((uint8_t *)payload) = (uint8_t)iv; *((uint8_t *)payload) = (uint8_t)iv;
@ -205,9 +206,9 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
} else { } else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
if (ret != TSDB_CODE_SUCCESS) { if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidSQLErrMsg(msg, "invalid smallint data", pToken->z); return tscInvalidOperationMsg(msg, "invalid smallint data", pToken->z);
} else if (!IS_VALID_SMALLINT(iv)) { } else if (!IS_VALID_SMALLINT(iv)) {
return tscInvalidSQLErrMsg(msg, "smallint data overflow", pToken->z); return tscInvalidOperationMsg(msg, "smallint data overflow", pToken->z);
} }
*((int16_t *)payload) = (int16_t)iv; *((int16_t *)payload) = (int16_t)iv;
@ -221,9 +222,9 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
} else { } else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
if (ret != TSDB_CODE_SUCCESS) { if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidSQLErrMsg(msg, "invalid unsigned smallint data", pToken->z); return tscInvalidOperationMsg(msg, "invalid unsigned smallint data", pToken->z);
} else if (!IS_VALID_USMALLINT(iv)) { } else if (!IS_VALID_USMALLINT(iv)) {
return tscInvalidSQLErrMsg(msg, "unsigned smallint data overflow", pToken->z); return tscInvalidOperationMsg(msg, "unsigned smallint data overflow", pToken->z);
} }
*((uint16_t *)payload) = (uint16_t)iv; *((uint16_t *)payload) = (uint16_t)iv;
@ -237,9 +238,9 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
} else { } else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
if (ret != TSDB_CODE_SUCCESS) { if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidSQLErrMsg(msg, "invalid int data", pToken->z); return tscInvalidOperationMsg(msg, "invalid int data", pToken->z);
} else if (!IS_VALID_INT(iv)) { } else if (!IS_VALID_INT(iv)) {
return tscInvalidSQLErrMsg(msg, "int data overflow", pToken->z); return tscInvalidOperationMsg(msg, "int data overflow", pToken->z);
} }
*((int32_t *)payload) = (int32_t)iv; *((int32_t *)payload) = (int32_t)iv;
@ -253,9 +254,9 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
} else { } else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
if (ret != TSDB_CODE_SUCCESS) { if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidSQLErrMsg(msg, "invalid unsigned int data", pToken->z); return tscInvalidOperationMsg(msg, "invalid unsigned int data", pToken->z);
} else if (!IS_VALID_UINT(iv)) { } else if (!IS_VALID_UINT(iv)) {
return tscInvalidSQLErrMsg(msg, "unsigned int data overflow", pToken->z); return tscInvalidOperationMsg(msg, "unsigned int data overflow", pToken->z);
} }
*((uint32_t *)payload) = (uint32_t)iv; *((uint32_t *)payload) = (uint32_t)iv;
@ -269,9 +270,9 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
} else { } else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
if (ret != TSDB_CODE_SUCCESS) { if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidSQLErrMsg(msg, "invalid bigint data", pToken->z); return tscInvalidOperationMsg(msg, "invalid bigint data", pToken->z);
} else if (!IS_VALID_BIGINT(iv)) { } else if (!IS_VALID_BIGINT(iv)) {
return tscInvalidSQLErrMsg(msg, "bigint data overflow", pToken->z); return tscInvalidOperationMsg(msg, "bigint data overflow", pToken->z);
} }
*((int64_t *)payload) = iv; *((int64_t *)payload) = iv;
@ -284,9 +285,9 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
} else { } else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
if (ret != TSDB_CODE_SUCCESS) { if (ret != TSDB_CODE_SUCCESS) {
return tscInvalidSQLErrMsg(msg, "invalid unsigned bigint data", pToken->z); return tscInvalidOperationMsg(msg, "invalid unsigned bigint data", pToken->z);
} else if (!IS_VALID_UBIGINT((uint64_t)iv)) { } else if (!IS_VALID_UBIGINT((uint64_t)iv)) {
return tscInvalidSQLErrMsg(msg, "unsigned bigint data overflow", pToken->z); return tscInvalidOperationMsg(msg, "unsigned bigint data overflow", pToken->z);
} }
*((uint64_t *)payload) = iv; *((uint64_t *)payload) = iv;
@ -299,11 +300,11 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
} else { } else {
double dv; double dv;
if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) { if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) {
return tscInvalidSQLErrMsg(msg, "illegal float data", pToken->z); return tscInvalidOperationMsg(msg, "illegal float data", pToken->z);
} }
if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || dv > FLT_MAX || dv < -FLT_MAX || isinf(dv) || isnan(dv)) { if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || dv > FLT_MAX || dv < -FLT_MAX || isinf(dv) || isnan(dv)) {
return tscInvalidSQLErrMsg(msg, "illegal float data", pToken->z); return tscInvalidOperationMsg(msg, "illegal float data", pToken->z);
} }
// *((float *)payload) = (float)dv; // *((float *)payload) = (float)dv;
@ -317,11 +318,11 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
} else { } else {
double dv; double dv;
if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) { if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) {
return tscInvalidSQLErrMsg(msg, "illegal double data", pToken->z); return tscInvalidOperationMsg(msg, "illegal double data", pToken->z);
} }
if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || isinf(dv) || isnan(dv)) { if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || isinf(dv) || isnan(dv)) {
return tscInvalidSQLErrMsg(msg, "illegal double data", pToken->z); return tscInvalidOperationMsg(msg, "illegal double data", pToken->z);
} }
*((double *)payload) = dv; *((double *)payload) = dv;
@ -334,7 +335,7 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
setVardataNull(payload, TSDB_DATA_TYPE_BINARY); setVardataNull(payload, TSDB_DATA_TYPE_BINARY);
} else { // too long values will return invalid sql, not be truncated automatically } else { // too long values will return invalid sql, not be truncated automatically
if (pToken->n + VARSTR_HEADER_SIZE > pSchema->bytes) { //todo refactor if (pToken->n + VARSTR_HEADER_SIZE > pSchema->bytes) { //todo refactor
return tscInvalidSQLErrMsg(msg, "string data overflow", pToken->z); return tscInvalidOperationMsg(msg, "string data overflow", pToken->z);
} }
STR_WITH_SIZE_TO_VARSTR(payload, pToken->z, pToken->n); STR_WITH_SIZE_TO_VARSTR(payload, pToken->z, pToken->n);
@ -351,7 +352,7 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
if (!taosMbsToUcs4(pToken->z, pToken->n, varDataVal(payload), pSchema->bytes - VARSTR_HEADER_SIZE, &output)) { if (!taosMbsToUcs4(pToken->z, pToken->n, varDataVal(payload), pSchema->bytes - VARSTR_HEADER_SIZE, &output)) {
char buf[512] = {0}; char buf[512] = {0};
snprintf(buf, tListLen(buf), "%s", strerror(errno)); snprintf(buf, tListLen(buf), "%s", strerror(errno));
return tscInvalidSQLErrMsg(msg, buf, pToken->z); return tscInvalidOperationMsg(msg, buf, pToken->z);
} }
varDataSetLen(payload, output); varDataSetLen(payload, output);
@ -368,7 +369,7 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
} else { } else {
int64_t temp; int64_t temp;
if (tsParseTime(pToken, &temp, str, msg, timePrec) != TSDB_CODE_SUCCESS) { if (tsParseTime(pToken, &temp, str, msg, timePrec) != TSDB_CODE_SUCCESS) {
return tscInvalidSQLErrMsg(msg, "invalid timestamp", pToken->z); return tscInvalidOperationMsg(msg, "invalid timestamp", pToken->z);
} }
*((int64_t *)payload) = temp; *((int64_t *)payload) = temp;
@ -417,8 +418,8 @@ int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, SSqlCmd *pCmd, int16_t timePrec, int32_t *len, int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, int32_t *len,
char *tmpTokenBuf) { char *tmpTokenBuf, SInsertStatementParam* pInsertParam) {
int32_t index = 0; int32_t index = 0;
SStrToken sToken = {0}; SStrToken sToken = {0};
char *payload = pDataBlocks->pData + pDataBlocks->size; char *payload = pDataBlocks->pData + pDataBlocks->size;
@ -441,8 +442,8 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, SSqlCmd *pCmd, int1
*str += index; *str += index;
if (sToken.type == TK_QUESTION) { if (sToken.type == TK_QUESTION) {
if (pCmd->insertParam.insertType != TSDB_QUERY_TYPE_STMT_INSERT) { if (pInsertParam->insertType != TSDB_QUERY_TYPE_STMT_INSERT) {
return tscSQLSyntaxErrMsg(pCmd->payload, "? only allowed in binding insertion", *str); return tscSQLSyntaxErrMsg(pInsertParam->msg, "? only allowed in binding insertion", *str);
} }
uint32_t offset = (uint32_t)(start - pDataBlocks->pData); uint32_t offset = (uint32_t)(start - pDataBlocks->pData);
@ -450,14 +451,14 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, SSqlCmd *pCmd, int1
continue; continue;
} }
strcpy(pCmd->payload, "client out of memory"); strcpy(pInsertParam->msg, "client out of memory");
return TSDB_CODE_TSC_OUT_OF_MEMORY; return TSDB_CODE_TSC_OUT_OF_MEMORY;
} }
int16_t type = sToken.type; int16_t type = sToken.type;
if ((type != TK_NOW && type != TK_INTEGER && type != TK_STRING && type != TK_FLOAT && type != TK_BOOL && if ((type != TK_NOW && type != TK_INTEGER && type != TK_STRING && type != TK_FLOAT && type != TK_BOOL &&
type != TK_NULL && type != TK_HEX && type != TK_OCT && type != TK_BIN) || (sToken.n == 0) || (type == TK_RP)) { type != TK_NULL && type != TK_HEX && type != TK_OCT && type != TK_BIN) || (sToken.n == 0) || (type == TK_RP)) {
return tscSQLSyntaxErrMsg(pCmd->payload, "invalid data or symbol", sToken.z); return tscSQLSyntaxErrMsg(pInsertParam->msg, "invalid data or symbol", sToken.z);
} }
// Remove quotation marks // Remove quotation marks
@ -487,13 +488,13 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, SSqlCmd *pCmd, int1
} }
bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX); bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX);
int32_t ret = tsParseOneColumn(pSchema, &sToken, start, pCmd->payload, str, isPrimaryKey, timePrec); int32_t ret = tsParseOneColumn(pSchema, &sToken, start, pInsertParam->msg, str, isPrimaryKey, timePrec);
if (ret != TSDB_CODE_SUCCESS) { if (ret != TSDB_CODE_SUCCESS) {
return ret; return ret;
} }
if (isPrimaryKey && tsCheckTimestamp(pDataBlocks, start) != TSDB_CODE_SUCCESS) { if (isPrimaryKey && tsCheckTimestamp(pDataBlocks, start) != TSDB_CODE_SUCCESS) {
tscInvalidSQLErrMsg(pCmd->payload, "client time/server time can not be mixed up", sToken.z); tscInvalidOperationMsg(pInsertParam->msg, "client time/server time can not be mixed up", sToken.z);
return TSDB_CODE_TSC_INVALID_TIME_STAMP; return TSDB_CODE_TSC_INVALID_TIME_STAMP;
} }
} }
@ -536,7 +537,8 @@ static int32_t rowDataCompar(const void *lhs, const void *rhs) {
} }
} }
int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SSqlCmd* pCmd, int32_t* numOfRows, char *tmpTokenBuf) { int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SInsertStatementParam *pInsertParam,
int32_t* numOfRows, char *tmpTokenBuf) {
int32_t index = 0; int32_t index = 0;
int32_t code = 0; int32_t code = 0;
@ -559,7 +561,7 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SSq
int32_t tSize; int32_t tSize;
code = tscAllocateMemIfNeed(pDataBlock, tinfo.rowSize, &tSize); code = tscAllocateMemIfNeed(pDataBlock, tinfo.rowSize, &tSize);
if (code != TSDB_CODE_SUCCESS) { //TODO pass the correct error code to client if (code != TSDB_CODE_SUCCESS) { //TODO pass the correct error code to client
strcpy(pCmd->payload, "client out of memory"); strcpy(pInsertParam->msg, "client out of memory");
return TSDB_CODE_TSC_OUT_OF_MEMORY; return TSDB_CODE_TSC_OUT_OF_MEMORY;
} }
@ -568,7 +570,7 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SSq
} }
int32_t len = 0; int32_t len = 0;
code = tsParseOneRow(str, pDataBlock, pCmd, precision, &len, tmpTokenBuf); code = tsParseOneRow(str, pDataBlock, precision, &len, tmpTokenBuf, pInsertParam);
if (code != TSDB_CODE_SUCCESS) { // error message has been set in tsParseOneRow, return directly if (code != TSDB_CODE_SUCCESS) { // error message has been set in tsParseOneRow, return directly
return TSDB_CODE_TSC_SQL_SYNTAX_ERROR; return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
} }
@ -578,7 +580,7 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SSq
index = 0; index = 0;
sToken = tStrGetToken(*str, &index, false); sToken = tStrGetToken(*str, &index, false);
if (sToken.n == 0 || sToken.type != TK_RP) { if (sToken.n == 0 || sToken.type != TK_RP) {
tscSQLSyntaxErrMsg(pCmd->payload, ") expected", *str); tscSQLSyntaxErrMsg(pInsertParam->msg, ") expected", *str);
code = TSDB_CODE_TSC_SQL_SYNTAX_ERROR; code = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
return code; return code;
} }
@ -589,7 +591,7 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SSq
} }
if ((*numOfRows) <= 0) { if ((*numOfRows) <= 0) {
strcpy(pCmd->payload, "no any data points"); strcpy(pInsertParam->msg, "no any data points");
return TSDB_CODE_TSC_SQL_SYNTAX_ERROR; return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
} else { } else {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
@ -699,7 +701,7 @@ void tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf) {
dataBuf->prevTS = INT64_MIN; dataBuf->prevTS = INT64_MIN;
} }
static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, STableDataBlocks* dataBuf, int32_t *totalNum) { static int32_t doParseInsertStatement(SInsertStatementParam *pInsertParam, char **str, STableDataBlocks* dataBuf, int32_t *totalNum) {
STableComInfo tinfo = tscGetTableInfo(dataBuf->pTableMeta); STableComInfo tinfo = tscGetTableInfo(dataBuf->pTableMeta);
int32_t maxNumOfRows; int32_t maxNumOfRows;
@ -712,7 +714,7 @@ static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, STableDataBlock
char tmpTokenBuf[16*1024] = {0}; // used for deleting Escape character: \\, \', \" char tmpTokenBuf[16*1024] = {0}; // used for deleting Escape character: \\, \', \"
int32_t numOfRows = 0; int32_t numOfRows = 0;
code = tsParseValues(str, dataBuf, maxNumOfRows, pCmd, &numOfRows, tmpTokenBuf); code = tsParseValues(str, dataBuf, maxNumOfRows, pInsertParam, &numOfRows, tmpTokenBuf);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
return code; return code;
} }
@ -720,7 +722,7 @@ static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, STableDataBlock
for (uint32_t i = 0; i < dataBuf->numOfParams; ++i) { for (uint32_t i = 0; i < dataBuf->numOfParams; ++i) {
SParamInfo *param = dataBuf->params + i; SParamInfo *param = dataBuf->params + i;
if (param->idx == -1) { if (param->idx == -1) {
param->idx = pCmd->numOfParams++; param->idx = pInsertParam->numOfParams++;
param->offset -= sizeof(SSubmitBlk); param->offset -= sizeof(SSubmitBlk);
} }
} }
@ -728,7 +730,7 @@ static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, STableDataBlock
SSubmitBlk *pBlocks = (SSubmitBlk *)(dataBuf->pData); SSubmitBlk *pBlocks = (SSubmitBlk *)(dataBuf->pData);
code = tsSetBlockInfo(pBlocks, dataBuf->pTableMeta, numOfRows); code = tsSetBlockInfo(pBlocks, dataBuf->pTableMeta, numOfRows);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
tscInvalidSQLErrMsg(pCmd->payload, "too many rows in sql, total number of rows should be less than 32767", *str); tscInvalidOperationMsg(pInsertParam->msg, "too many rows in sql, total number of rows should be less than 32767", *str);
return code; return code;
} }
@ -748,6 +750,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
SSqlCmd * pCmd = &pSql->cmd; SSqlCmd * pCmd = &pSql->cmd;
SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd); SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
SInsertStatementParam* pInsertParam = &pCmd->insertParam;
char *sql = *sqlstr; char *sql = *sqlstr;
@ -784,7 +787,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
} }
if (numOfColList == 0 && (*boundColumn) != NULL) { if (numOfColList == 0 && (*boundColumn) != NULL) {
return TSDB_CODE_TSC_INVALID_OPERATION; return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
} }
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, TABLE_INDEX); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, TABLE_INDEX);
@ -805,8 +808,8 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
return code; return code;
} }
tNameExtractFullName(&pSTableMetaInfo->name, pCmd->tagData.name); tNameExtractFullName(&pSTableMetaInfo->name, pInsertParam->tagData.name);
pCmd->tagData.dataLen = 0; pInsertParam->tagData.dataLen = 0;
code = tscGetTableMeta(pSql, pSTableMetaInfo); code = tscGetTableMeta(pSql, pSTableMetaInfo);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
@ -814,7 +817,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
} }
if (!UTIL_TABLE_IS_SUPER_TABLE(pSTableMetaInfo)) { if (!UTIL_TABLE_IS_SUPER_TABLE(pSTableMetaInfo)) {
return tscInvalidSQLErrMsg(pCmd->payload, "create table only from super table is allowed", sToken.z); return tscInvalidOperationMsg(pInsertParam->msg, "create table only from super table is allowed", sToken.z);
} }
SSchema *pTagSchema = tscGetTableTagSchema(pSTableMetaInfo->pTableMeta); SSchema *pTagSchema = tscGetTableTagSchema(pSTableMetaInfo->pTableMeta);
@ -827,7 +830,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
sToken = tStrGetToken(sql, &index, false); sToken = tStrGetToken(sql, &index, false);
if (sToken.type != TK_TAGS && sToken.type != TK_LP) { if (sToken.type != TK_TAGS && sToken.type != TK_LP) {
tscDestroyBoundColumnInfo(&spd); tscDestroyBoundColumnInfo(&spd);
return tscInvalidSQLErrMsg(pCmd->payload, "keyword TAGS expected", sToken.z); return tscSQLSyntaxErrMsg(pInsertParam->msg, "keyword TAGS expected", sToken.z);
} }
// parse the bound tags column // parse the bound tags column
@ -837,7 +840,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
* tags(tagVal1, tagVal2, ..., tagValn) values(v1, v2,... vn); * tags(tagVal1, tagVal2, ..., tagValn) values(v1, v2,... vn);
*/ */
char* end = NULL; char* end = NULL;
code = parseBoundColumns(pCmd, &spd, pTagSchema, sql, &end); code = parseBoundColumns(pInsertParam, &spd, pTagSchema, sql, &end);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
tscDestroyBoundColumnInfo(&spd); tscDestroyBoundColumnInfo(&spd);
return code; return code;
@ -858,7 +861,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
if (sToken.type != TK_LP) { if (sToken.type != TK_LP) {
tscDestroyBoundColumnInfo(&spd); tscDestroyBoundColumnInfo(&spd);
return tscInvalidSQLErrMsg(pCmd->payload, "( is expected", sToken.z); return tscSQLSyntaxErrMsg(pInsertParam->msg, "( is expected", sToken.z);
} }
SKVRowBuilder kvRowBuilder = {0}; SKVRowBuilder kvRowBuilder = {0};
@ -877,7 +880,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
if (TK_ILLEGAL == sToken.type) { if (TK_ILLEGAL == sToken.type) {
tdDestroyKVRowBuilder(&kvRowBuilder); tdDestroyKVRowBuilder(&kvRowBuilder);
tscDestroyBoundColumnInfo(&spd); tscDestroyBoundColumnInfo(&spd);
return TSDB_CODE_TSC_INVALID_OPERATION; return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
} }
if (sToken.n == 0 || sToken.type == TK_RP) { if (sToken.n == 0 || sToken.type == TK_RP) {
@ -891,7 +894,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
} }
char tagVal[TSDB_MAX_TAGS_LEN]; char tagVal[TSDB_MAX_TAGS_LEN];
code = tsParseOneColumn(pSchema, &sToken, tagVal, pCmd->payload, &sql, false, tinfo.precision); code = tsParseOneColumn(pSchema, &sToken, tagVal, pInsertParam->msg, &sql, false, tinfo.precision);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
tdDestroyKVRowBuilder(&kvRowBuilder); tdDestroyKVRowBuilder(&kvRowBuilder);
tscDestroyBoundColumnInfo(&spd); tscDestroyBoundColumnInfo(&spd);
@ -906,29 +909,29 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
SKVRow row = tdGetKVRowFromBuilder(&kvRowBuilder); SKVRow row = tdGetKVRowFromBuilder(&kvRowBuilder);
tdDestroyKVRowBuilder(&kvRowBuilder); tdDestroyKVRowBuilder(&kvRowBuilder);
if (row == NULL) { if (row == NULL) {
return tscInvalidSQLErrMsg(pCmd->payload, "tag value expected", NULL); return tscSQLSyntaxErrMsg(pInsertParam->msg, "tag value expected", NULL);
} }
tdSortKVRowByColIdx(row); tdSortKVRowByColIdx(row);
pCmd->tagData.dataLen = kvRowLen(row); pInsertParam->tagData.dataLen = kvRowLen(row);
if (pCmd->tagData.dataLen <= 0){ if (pInsertParam->tagData.dataLen <= 0){
return tscInvalidSQLErrMsg(pCmd->payload, "tag value expected", NULL); return tscSQLSyntaxErrMsg(pInsertParam->msg, "tag value expected", NULL);
} }
char* pTag = realloc(pCmd->tagData.data, pCmd->tagData.dataLen); char* pTag = realloc(pInsertParam->tagData.data, pInsertParam->tagData.dataLen);
if (pTag == NULL) { if (pTag == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY; return TSDB_CODE_TSC_OUT_OF_MEMORY;
} }
kvRowCpy(pTag, row); kvRowCpy(pTag, row);
free(row); free(row);
pCmd->tagData.data = pTag; pInsertParam->tagData.data = pTag;
index = 0; index = 0;
sToken = tStrGetToken(sql, &index, false); sToken = tStrGetToken(sql, &index, false);
sql += index; sql += index;
if (sToken.n == 0 || sToken.type != TK_RP) { if (sToken.n == 0 || sToken.type != TK_RP) {
return tscSQLSyntaxErrMsg(pCmd->payload, ") expected", sToken.z); return tscSQLSyntaxErrMsg(pInsertParam->msg, ") expected", sToken.z);
} }
/* parse columns after super table tags values. /* parse columns after super table tags values.
@ -941,7 +944,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
int numOfColsAfterTags = 0; int numOfColsAfterTags = 0;
if (sToken.type == TK_LP) { if (sToken.type == TK_LP) {
if (*boundColumn != NULL) { if (*boundColumn != NULL) {
return tscSQLSyntaxErrMsg(pCmd->payload, "bind columns again", sToken.z); return tscSQLSyntaxErrMsg(pInsertParam->msg, "bind columns again", sToken.z);
} else { } else {
*boundColumn = &sToken.z[0]; *boundColumn = &sToken.z[0];
} }
@ -959,7 +962,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
} }
if (numOfColsAfterTags == 0 && (*boundColumn) != NULL) { if (numOfColsAfterTags == 0 && (*boundColumn) != NULL) {
return TSDB_CODE_TSC_INVALID_OPERATION; return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
} }
sToken = tStrGetToken(sql, &index, false); sToken = tStrGetToken(sql, &index, false);
@ -968,7 +971,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
sql = sToken.z; sql = sToken.z;
if (tscValidateName(&tableToken) != TSDB_CODE_SUCCESS) { if (tscValidateName(&tableToken) != TSDB_CODE_SUCCESS) {
return tscInvalidSQLErrMsg(pCmd->payload, "invalid table name", *sqlstr); return tscInvalidOperationMsg(pInsertParam->msg, "invalid table name", *sqlstr);
} }
int32_t ret = tscSetTableFullName(&pTableMetaInfo->name, &tableToken, pSql); int32_t ret = tscSetTableFullName(&pTableMetaInfo->name, &tableToken, pSql);
@ -977,7 +980,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
} }
if (sql == NULL) { if (sql == NULL) {
return TSDB_CODE_TSC_INVALID_OPERATION; return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
} }
code = tscGetTableMetaEx(pSql, pTableMetaInfo, true); code = tscGetTableMetaEx(pSql, pTableMetaInfo, true);
@ -986,20 +989,18 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
} }
} else { } else {
sql = sToken.z; if (sToken.z == NULL) {
return tscSQLSyntaxErrMsg(pInsertParam->msg, "", sql);
if (sql == NULL) {
return TSDB_CODE_TSC_INVALID_OPERATION;
} }
sql = sToken.z;
code = tscGetTableMetaEx(pSql, pTableMetaInfo, false); code = tscGetTableMetaEx(pSql, pTableMetaInfo, false);
if (pCmd->curSql == NULL) { if (pInsertParam->sql == NULL) {
assert(code == TSDB_CODE_TSC_ACTION_IN_PROGRESS); assert(code == TSDB_CODE_TSC_ACTION_IN_PROGRESS);
} }
} }
*sqlstr = sql; *sqlstr = sql;
return code; return code;
} }
@ -1013,21 +1014,21 @@ int validateTableName(char *tblName, int len, SStrToken* psTblToken) {
return tscValidateName(psTblToken); return tscValidateName(psTblToken);
} }
static int32_t validateDataSource(SSqlCmd *pCmd, int32_t type, const char *sql) { static int32_t validateDataSource(SInsertStatementParam *pInsertParam, int32_t type, const char *sql) {
uint32_t *insertType = &pCmd->insertParam.insertType; uint32_t *insertType = &pInsertParam->insertType;
if (*insertType == TSDB_QUERY_TYPE_STMT_INSERT && type == TSDB_QUERY_TYPE_INSERT) { if (*insertType == TSDB_QUERY_TYPE_STMT_INSERT && type == TSDB_QUERY_TYPE_INSERT) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
if ((*insertType) != 0 && (*insertType) != type) { if ((*insertType) != 0 && (*insertType) != type) {
return tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES and FILE are not allowed to mixed up", sql); return tscSQLSyntaxErrMsg(pInsertParam->msg, "keyword VALUES and FILE are not allowed to mixed up", sql);
} }
*insertType = type; *insertType = type;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
static int32_t parseBoundColumns(SSqlCmd* pCmd, SParsedDataColInfo* pColInfo, SSchema* pSchema, static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDataColInfo* pColInfo, SSchema* pSchema,
char* str, char **end) { char* str, char **end) {
pColInfo->numOfBound = 0; pColInfo->numOfBound = 0;
@ -1043,7 +1044,7 @@ static int32_t parseBoundColumns(SSqlCmd* pCmd, SParsedDataColInfo* pColInfo, SS
str += index; str += index;
if (sToken.type != TK_LP) { if (sToken.type != TK_LP) {
code = tscInvalidSQLErrMsg(pCmd->payload, "( is expected", sToken.z); code = tscSQLSyntaxErrMsg(pInsertParam->msg, "( is expected", sToken.z);
goto _clean; goto _clean;
} }
@ -1070,7 +1071,7 @@ static int32_t parseBoundColumns(SSqlCmd* pCmd, SParsedDataColInfo* pColInfo, SS
for (int32_t t = 0; t < pColInfo->numOfCols; ++t) { for (int32_t t = 0; t < pColInfo->numOfCols; ++t) {
if (strncmp(sToken.z, pSchema[t].name, sToken.n) == 0 && strlen(pSchema[t].name) == sToken.n) { if (strncmp(sToken.z, pSchema[t].name, sToken.n) == 0 && strlen(pSchema[t].name) == sToken.n) {
if (pColInfo->cols[t].hasVal == true) { if (pColInfo->cols[t].hasVal == true) {
code = tscInvalidSQLErrMsg(pCmd->payload, "duplicated column name", sToken.z); code = tscInvalidOperationMsg(pInsertParam->msg, "duplicated column name", sToken.z);
goto _clean; goto _clean;
} }
@ -1083,7 +1084,7 @@ static int32_t parseBoundColumns(SSqlCmd* pCmd, SParsedDataColInfo* pColInfo, SS
} }
if (!findColumnIndex) { if (!findColumnIndex) {
code = tscInvalidSQLErrMsg(pCmd->payload, "invalid column/tag name", sToken.z); code = tscInvalidOperationMsg(pInsertParam->msg, "invalid column/tag name", sToken.z);
goto _clean; goto _clean;
} }
} }
@ -1092,10 +1093,25 @@ static int32_t parseBoundColumns(SSqlCmd* pCmd, SParsedDataColInfo* pColInfo, SS
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
_clean: _clean:
pCmd->curSql = NULL; pInsertParam->sql = NULL;
return code; return code;
} }
static int32_t getFileFullPath(SStrToken* pToken, char* output) {
char path[PATH_MAX] = {0};
strncpy(path, pToken->z, pToken->n);
strdequote(path);
wordexp_t full_path;
if (wordexp(path, &full_path, 0) != 0) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
tstrncpy(output, full_path.we_wordv[0], PATH_MAX);
wordfree(&full_path);
return TSDB_CODE_SUCCESS;
}
/** /**
* parse insert sql * parse insert sql
* @param pSql * @param pSql
@ -1103,7 +1119,9 @@ static int32_t parseBoundColumns(SSqlCmd* pCmd, SParsedDataColInfo* pColInfo, SS
*/ */
int tsParseInsertSql(SSqlObj *pSql) { int tsParseInsertSql(SSqlObj *pSql) {
SSqlCmd *pCmd = &pSql->cmd; SSqlCmd *pCmd = &pSql->cmd;
char* str = pCmd->curSql;
SInsertStatementParam* pInsertParam = &pCmd->insertParam;
char* str = pInsertParam->sql;
int32_t totalNum = 0; int32_t totalNum = 0;
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
@ -1118,21 +1136,17 @@ int tsParseInsertSql(SSqlObj *pSql) {
return code; return code;
} }
if ((code = tscAllocPayload(pCmd, TSDB_DEFAULT_PAYLOAD_SIZE)) != TSDB_CODE_SUCCESS) { if (NULL == pInsertParam->pTableBlockHashList) {
return code; pInsertParam->pTableBlockHashList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
} if (NULL == pInsertParam->pTableBlockHashList) {
if (NULL == pCmd->insertParam.pTableBlockHashList) {
pCmd->insertParam.pTableBlockHashList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
if (NULL == pCmd->insertParam.pTableBlockHashList) {
code = TSDB_CODE_TSC_OUT_OF_MEMORY; code = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _clean; goto _clean;
} }
} else { } else {
str = pCmd->curSql; str = pInsertParam->sql;
} }
tscDebug("0x%"PRIx64" create data block list hashList:%p", pSql->self, pCmd->insertParam.pTableBlockHashList); tscDebug("0x%"PRIx64" create data block list hashList:%p", pSql->self, pInsertParam->pTableBlockHashList);
while (1) { while (1) {
int32_t index = 0; int32_t index = 0;
@ -1144,7 +1158,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
* if the data is from the data file, no data has been generated yet. So, there no data to * if the data is from the data file, no data has been generated yet. So, there no data to
* merge or submit, save the file path and parse the file in other routines. * merge or submit, save the file path and parse the file in other routines.
*/ */
if (TSDB_QUERY_HAS_TYPE(pCmd->insertParam.insertType, TSDB_QUERY_TYPE_FILE_INSERT)) { if (TSDB_QUERY_HAS_TYPE(pInsertParam->insertType, TSDB_QUERY_TYPE_FILE_INSERT)) {
goto _clean; goto _clean;
} }
@ -1160,13 +1174,13 @@ int tsParseInsertSql(SSqlObj *pSql) {
} }
} }
pCmd->curSql = sToken.z; pInsertParam->sql = sToken.z;
char buf[TSDB_TABLE_FNAME_LEN]; char buf[TSDB_TABLE_FNAME_LEN];
SStrToken sTblToken; SStrToken sTblToken;
sTblToken.z = buf; sTblToken.z = buf;
// Check if the table name available or not // Check if the table name available or not
if (validateTableName(sToken.z, sToken.n, &sTblToken) != TSDB_CODE_SUCCESS) { if (validateTableName(sToken.z, sToken.n, &sTblToken) != TSDB_CODE_SUCCESS) {
code = tscInvalidSQLErrMsg(pCmd->payload, "table name invalid", sToken.z); code = tscInvalidOperationMsg(pInsertParam->msg, "table name invalid", sToken.z);
goto _clean; goto _clean;
} }
@ -1185,12 +1199,12 @@ int tsParseInsertSql(SSqlObj *pSql) {
} }
tscError("0x%"PRIx64" async insert parse error, code:%s", pSql->self, tstrerror(code)); tscError("0x%"PRIx64" async insert parse error, code:%s", pSql->self, tstrerror(code));
pCmd->curSql = NULL; pInsertParam->sql = NULL;
goto _clean; goto _clean;
} }
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
code = tscInvalidSQLErrMsg(pCmd->payload, "insert data into super table is not supported", NULL); code = tscInvalidOperationMsg(pInsertParam->msg, "insert data into super table is not supported", NULL);
goto _clean; goto _clean;
} }
@ -1199,58 +1213,49 @@ int tsParseInsertSql(SSqlObj *pSql) {
str += index; str += index;
if (sToken.n == 0 || (sToken.type != TK_FILE && sToken.type != TK_VALUES)) { if (sToken.n == 0 || (sToken.type != TK_FILE && sToken.type != TK_VALUES)) {
code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES or FILE required", sToken.z); code = tscSQLSyntaxErrMsg(pInsertParam->msg, "keyword VALUES or FILE required", sToken.z);
goto _clean; goto _clean;
} }
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta); STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
if (sToken.type == TK_FILE) { if (sToken.type == TK_FILE) {
if (validateDataSource(pCmd, TSDB_QUERY_TYPE_FILE_INSERT, sToken.z) != TSDB_CODE_SUCCESS) { if (validateDataSource(pInsertParam, TSDB_QUERY_TYPE_FILE_INSERT, sToken.z) != TSDB_CODE_SUCCESS) {
goto _clean; goto _clean;
} }
index = 0; index = 0;
sToken = tStrGetToken(str, &index, false); sToken = tStrGetToken(str, &index, false);
if (sToken.type != TK_STRING && sToken.type != TK_ID) { if (sToken.type != TK_STRING && sToken.type != TK_ID) {
code = tscInvalidSQLErrMsg(pCmd->payload, "file path is required following keyword FILE", sToken.z); code = tscSQLSyntaxErrMsg(pInsertParam->msg, "file path is required following keyword FILE", sToken.z);
goto _clean; goto _clean;
} }
str += index; str += index;
if (sToken.n == 0) { if (sToken.n == 0) {
code = tscInvalidSQLErrMsg(pCmd->payload, "file path is required following keyword FILE", sToken.z); code = tscSQLSyntaxErrMsg(pInsertParam->msg, "file path is required following keyword FILE", sToken.z);
goto _clean; goto _clean;
} }
strncpy(pCmd->payload, sToken.z, sToken.n); code = getFileFullPath(&sToken, pCmd->payload);
strdequote(pCmd->payload); if (code != TSDB_CODE_SUCCESS) {
tscInvalidOperationMsg(pInsertParam->msg, "invalid filename", sToken.z);
// todo refactor extract method
wordexp_t full_path;
if (wordexp(pCmd->payload, &full_path, 0) != 0) {
code = tscInvalidSQLErrMsg(pCmd->payload, "invalid filename", sToken.z);
goto _clean; goto _clean;
} }
tstrncpy(pCmd->payload, full_path.we_wordv[0], pCmd->allocSize);
wordfree(&full_path);
} else { } else {
if (bindedColumns == NULL) { if (bindedColumns == NULL) {
STableMeta *pTableMeta = pTableMetaInfo->pTableMeta; STableMeta *pTableMeta = pTableMetaInfo->pTableMeta;
if (validateDataSource(pInsertParam, TSDB_QUERY_TYPE_INSERT, sToken.z) != TSDB_CODE_SUCCESS) {
if (validateDataSource(pCmd, TSDB_QUERY_TYPE_INSERT, sToken.z) != TSDB_CODE_SUCCESS) {
goto _clean; goto _clean;
} }
STableDataBlocks *dataBuf = NULL; STableDataBlocks *dataBuf = NULL;
int32_t ret = tscGetDataBlockFromList(pCmd->insertParam.pTableBlockHashList, pTableMeta->id.uid, TSDB_DEFAULT_PAYLOAD_SIZE, int32_t ret = tscGetDataBlockFromList(pInsertParam->pTableBlockHashList, pTableMeta->id.uid, TSDB_DEFAULT_PAYLOAD_SIZE,
sizeof(SSubmitBlk), tinfo.rowSize, &pTableMetaInfo->name, pTableMeta, sizeof(SSubmitBlk), tinfo.rowSize, &pTableMetaInfo->name, pTableMeta,
&dataBuf, NULL); &dataBuf, NULL);
if (ret != TSDB_CODE_SUCCESS) { if (ret != TSDB_CODE_SUCCESS) {
goto _clean; goto _clean;
} }
code = doParseInsertStatement(pCmd, &str, dataBuf, &totalNum); code = doParseInsertStatement(pInsertParam, &str, dataBuf, &totalNum);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
goto _clean; goto _clean;
} }
@ -1258,12 +1263,12 @@ int tsParseInsertSql(SSqlObj *pSql) {
// insert into tablename(col1, col2,..., coln) values(v1, v2,... vn); // insert into tablename(col1, col2,..., coln) values(v1, v2,... vn);
STableMeta *pTableMeta = tscGetTableMetaInfoFromCmd(pCmd, 0)->pTableMeta; STableMeta *pTableMeta = tscGetTableMetaInfoFromCmd(pCmd, 0)->pTableMeta;
if (validateDataSource(pCmd, TSDB_QUERY_TYPE_INSERT, sToken.z) != TSDB_CODE_SUCCESS) { if (validateDataSource(pInsertParam, TSDB_QUERY_TYPE_INSERT, sToken.z) != TSDB_CODE_SUCCESS) {
goto _clean; goto _clean;
} }
STableDataBlocks *dataBuf = NULL; STableDataBlocks *dataBuf = NULL;
int32_t ret = tscGetDataBlockFromList(pCmd->insertParam.pTableBlockHashList, pTableMeta->id.uid, TSDB_DEFAULT_PAYLOAD_SIZE, int32_t ret = tscGetDataBlockFromList(pInsertParam->pTableBlockHashList, pTableMeta->id.uid, TSDB_DEFAULT_PAYLOAD_SIZE,
sizeof(SSubmitBlk), tinfo.rowSize, &pTableMetaInfo->name, pTableMeta, sizeof(SSubmitBlk), tinfo.rowSize, &pTableMetaInfo->name, pTableMeta,
&dataBuf, NULL); &dataBuf, NULL);
if (ret != TSDB_CODE_SUCCESS) { if (ret != TSDB_CODE_SUCCESS) {
@ -1271,22 +1276,22 @@ int tsParseInsertSql(SSqlObj *pSql) {
} }
SSchema *pSchema = tscGetTableSchema(pTableMeta); SSchema *pSchema = tscGetTableSchema(pTableMeta);
code = parseBoundColumns(pCmd, &dataBuf->boundColumnInfo, pSchema, bindedColumns, NULL); code = parseBoundColumns(pInsertParam, &dataBuf->boundColumnInfo, pSchema, bindedColumns, NULL);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
goto _clean; goto _clean;
} }
if (dataBuf->boundColumnInfo.cols[0].hasVal == false) { if (dataBuf->boundColumnInfo.cols[0].hasVal == false) {
code = tscInvalidSQLErrMsg(pCmd->payload, "primary timestamp column can not be null", NULL); code = tscInvalidOperationMsg(pInsertParam->msg, "primary timestamp column can not be null", NULL);
goto _clean; goto _clean;
} }
if (sToken.type != TK_VALUES) { if (sToken.type != TK_VALUES) {
code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES is expected", sToken.z); code = tscSQLSyntaxErrMsg(pInsertParam->msg, "keyword VALUES is expected", sToken.z);
goto _clean; goto _clean;
} }
code = doParseInsertStatement(pCmd, &str, dataBuf, &totalNum); code = doParseInsertStatement(pInsertParam, &str, dataBuf, &totalNum);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
goto _clean; goto _clean;
} }
@ -1295,13 +1300,13 @@ int tsParseInsertSql(SSqlObj *pSql) {
} }
// we need to keep the data blocks if there are parameters in the sql // we need to keep the data blocks if there are parameters in the sql
if (pCmd->numOfParams > 0) { if (pInsertParam->numOfParams > 0) {
goto _clean; goto _clean;
} }
// merge according to vgId // merge according to vgId
if (!TSDB_QUERY_HAS_TYPE(pCmd->insertParam.insertType, TSDB_QUERY_TYPE_STMT_INSERT) && taosHashGetSize(pCmd->insertParam.pTableBlockHashList) > 0) { if (!TSDB_QUERY_HAS_TYPE(pInsertParam->insertType, TSDB_QUERY_TYPE_STMT_INSERT) && taosHashGetSize(pInsertParam->pTableBlockHashList) > 0) {
if ((code = tscMergeTableDataBlocks(pSql, true)) != TSDB_CODE_SUCCESS) { if ((code = tscMergeTableDataBlocks(pInsertParam, true)) != TSDB_CODE_SUCCESS) {
goto _clean; goto _clean;
} }
} }
@ -1310,7 +1315,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
goto _clean; goto _clean;
_clean: _clean:
pCmd->curSql = NULL; pInsertParam->sql = NULL;
return code; return code;
} }
@ -1325,18 +1330,19 @@ int tsInsertInitialCheck(SSqlObj *pSql) {
SStrToken sToken = tStrGetToken(pSql->sqlstr, &index, false); SStrToken sToken = tStrGetToken(pSql->sqlstr, &index, false);
assert(sToken.type == TK_INSERT || sToken.type == TK_IMPORT); assert(sToken.type == TK_INSERT || sToken.type == TK_IMPORT);
pCmd->count = 0; pCmd->count = 0;
pCmd->command = TSDB_SQL_INSERT; pCmd->command = TSDB_SQL_INSERT;
SInsertStatementParam* pInsertParam = &pCmd->insertParam;
SQueryInfo *pQueryInfo = tscGetQueryInfoS(pCmd); SQueryInfo *pQueryInfo = tscGetQueryInfoS(pCmd);
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT); TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT);
sToken = tStrGetToken(pSql->sqlstr, &index, false); sToken = tStrGetToken(pSql->sqlstr, &index, false);
if (sToken.type != TK_INTO) { if (sToken.type != TK_INTO) {
return tscInvalidSQLErrMsg(pCmd->payload, "keyword INTO is expected", sToken.z); return tscSQLSyntaxErrMsg(pInsertParam->msg, "keyword INTO is expected", sToken.z);
} }
pCmd->curSql = sToken.z + sToken.n; pInsertParam->sql = sToken.z + sToken.n;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
@ -1345,7 +1351,7 @@ int tsParseSql(SSqlObj *pSql, bool initial) {
SSqlCmd* pCmd = &pSql->cmd; SSqlCmd* pCmd = &pSql->cmd;
if (!initial) { if (!initial) {
tscDebug("0x%"PRIx64" resume to parse sql: %s", pSql->self, pCmd->curSql); tscDebug("0x%"PRIx64" resume to parse sql: %s", pSql->self, pCmd->insertParam.sql);
} }
ret = tscAllocPayload(pCmd, TSDB_DEFAULT_PAYLOAD_SIZE); ret = tscAllocPayload(pCmd, TSDB_DEFAULT_PAYLOAD_SIZE);
@ -1355,12 +1361,11 @@ int tsParseSql(SSqlObj *pSql, bool initial) {
if (tscIsInsertData(pSql->sqlstr)) { if (tscIsInsertData(pSql->sqlstr)) {
if (initial && ((ret = tsInsertInitialCheck(pSql)) != TSDB_CODE_SUCCESS)) { if (initial && ((ret = tsInsertInitialCheck(pSql)) != TSDB_CODE_SUCCESS)) {
strncpy(pCmd->payload, pCmd->insertParam.msg, TSDB_DEFAULT_PAYLOAD_SIZE);
return ret; return ret;
} }
ret = tsParseInsertSql(pSql); ret = tsParseInsertSql(pSql);
assert(ret == TSDB_CODE_SUCCESS || ret == TSDB_CODE_TSC_ACTION_IN_PROGRESS || ret == TSDB_CODE_TSC_SQL_SYNTAX_ERROR || ret == TSDB_CODE_TSC_INVALID_OPERATION);
if (pSql->parseRetry < 1 && (ret == TSDB_CODE_TSC_SQL_SYNTAX_ERROR || ret == TSDB_CODE_TSC_INVALID_OPERATION)) { if (pSql->parseRetry < 1 && (ret == TSDB_CODE_TSC_SQL_SYNTAX_ERROR || ret == TSDB_CODE_TSC_INVALID_OPERATION)) {
tscDebug("0x%"PRIx64 " parse insert sql statement failed, code:%s, clear meta cache and retry ", pSql->self, tstrerror(ret)); tscDebug("0x%"PRIx64 " parse insert sql statement failed, code:%s, clear meta cache and retry ", pSql->self, tstrerror(ret));
@ -1371,6 +1376,10 @@ int tsParseSql(SSqlObj *pSql, bool initial) {
ret = tsParseInsertSql(pSql); ret = tsParseInsertSql(pSql);
} }
} }
if (ret != TSDB_CODE_SUCCESS) {
strncpy(pCmd->payload, pCmd->insertParam.msg, TSDB_DEFAULT_PAYLOAD_SIZE);
}
} else { } else {
SSqlInfo sqlInfo = qSqlParse(pSql->sqlstr); SSqlInfo sqlInfo = qSqlParse(pSql->sqlstr);
ret = tscValidateSqlInfo(pSql, &sqlInfo); ret = tscValidateSqlInfo(pSql, &sqlInfo);
@ -1395,29 +1404,25 @@ int tsParseSql(SSqlObj *pSql, bool initial) {
return ret; return ret;
} }
static int doPackSendDataBlock(SSqlObj *pSql, int32_t numOfRows, STableDataBlocks *pTableDataBlocks) { static int doPackSendDataBlock(SSqlObj* pSql, SInsertStatementParam *pInsertParam, STableMeta* pTableMeta, int32_t numOfRows, STableDataBlocks *pTableDataBlocks) {
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
SSqlCmd *pCmd = &pSql->cmd;
pSql->res.numOfRows = 0;
STableMeta *pTableMeta = tscGetTableMetaInfoFromCmd(pCmd, 0)->pTableMeta;
SSubmitBlk *pBlocks = (SSubmitBlk *)(pTableDataBlocks->pData); SSubmitBlk *pBlocks = (SSubmitBlk *)(pTableDataBlocks->pData);
code = tsSetBlockInfo(pBlocks, pTableMeta, numOfRows); code = tsSetBlockInfo(pBlocks, pTableMeta, numOfRows);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
return tscInvalidSQLErrMsg(pCmd->payload, "too many rows in sql, total number of rows should be less than 32767", NULL); return tscInvalidOperationMsg(pInsertParam->msg, "too many rows in sql, total number of rows should be less than 32767", NULL);
} }
if ((code = tscMergeTableDataBlocks(pSql, true)) != TSDB_CODE_SUCCESS) { if ((code = tscMergeTableDataBlocks(pInsertParam, true)) != TSDB_CODE_SUCCESS) {
return code; return code;
} }
STableDataBlocks *pDataBlock = taosArrayGetP(pCmd->insertParam.pDataBlocks, 0); STableDataBlocks *pDataBlock = taosArrayGetP(pInsertParam->pDataBlocks, 0);
if ((code = tscCopyDataBlockToPayload(pSql, pDataBlock)) != TSDB_CODE_SUCCESS) { if ((code = tscCopyDataBlockToPayload(pSql, pDataBlock)) != TSDB_CODE_SUCCESS) {
return code; return code;
} }
return tscBuildAndSendRequest(pSql, NULL); return TSDB_CODE_SUCCESS;
} }
typedef struct SImportFileSupport { typedef struct SImportFileSupport {
@ -1466,13 +1471,14 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
STableMeta * pTableMeta = pTableMetaInfo->pTableMeta; STableMeta * pTableMeta = pTableMetaInfo->pTableMeta;
STableComInfo tinfo = tscGetTableInfo(pTableMeta); STableComInfo tinfo = tscGetTableInfo(pTableMeta);
destroyTableNameList(pCmd); SInsertStatementParam* pInsertParam = &pCmd->insertParam;
destroyTableNameList(pInsertParam);
pCmd->insertParam.pDataBlocks = tscDestroyBlockArrayList(pCmd->insertParam.pDataBlocks); pInsertParam->pDataBlocks = tscDestroyBlockArrayList(pInsertParam->pDataBlocks);
if (pCmd->insertParam.pTableBlockHashList == NULL) { if (pInsertParam->pTableBlockHashList == NULL) {
pCmd->insertParam.pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false); pInsertParam->pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
if (pCmd->insertParam.pTableBlockHashList == NULL) { if (pInsertParam->pTableBlockHashList == NULL) {
code = TSDB_CODE_TSC_OUT_OF_MEMORY; code = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error; goto _error;
} }
@ -1480,7 +1486,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
STableDataBlocks *pTableDataBlock = NULL; STableDataBlocks *pTableDataBlock = NULL;
int32_t ret = int32_t ret =
tscGetDataBlockFromList(pCmd->insertParam.pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk), tscGetDataBlockFromList(pInsertParam->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
tinfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pTableDataBlock, NULL); tinfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pTableDataBlock, NULL);
if (ret != TSDB_CODE_SUCCESS) { if (ret != TSDB_CODE_SUCCESS) {
pParentSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY; pParentSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY;
@ -1507,7 +1513,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
strtolower(line, line); strtolower(line, line);
int32_t len = 0; int32_t len = 0;
code = tsParseOneRow(&lineptr, pTableDataBlock, pCmd, tinfo.precision, &len, tokenBuf); code = tsParseOneRow(&lineptr, pTableDataBlock, tinfo.precision, &len, tokenBuf, pInsertParam);
if (code != TSDB_CODE_SUCCESS || pTableDataBlock->numOfParams > 0) { if (code != TSDB_CODE_SUCCESS || pTableDataBlock->numOfParams > 0) {
pSql->res.code = code; pSql->res.code = code;
break; break;
@ -1526,12 +1532,14 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
pParentSql->res.code = code; pParentSql->res.code = code;
if (code == TSDB_CODE_SUCCESS) { if (code == TSDB_CODE_SUCCESS) {
if (count > 0) { if (count > 0) {
code = doPackSendDataBlock(pSql, count, pTableDataBlock); pSql->res.numOfRows = 0;
if (code == TSDB_CODE_SUCCESS) { code = doPackSendDataBlock(pSql, pInsertParam, pTableMeta, count, pTableDataBlock);
return; if (code != TSDB_CODE_SUCCESS) {
} else {
goto _error; goto _error;
} }
tscBuildAndSendRequest(pSql, NULL);
return;
} else { } else {
taos_free_result(pSql); taos_free_result(pSql);
tfree(pSupporter); tfree(pSupporter);
@ -1562,7 +1570,8 @@ void tscImportDataFromFile(SSqlObj *pSql) {
return; return;
} }
assert(TSDB_QUERY_HAS_TYPE(pCmd->insertParam.insertType, TSDB_QUERY_TYPE_FILE_INSERT) && strlen(pCmd->payload) != 0); SInsertStatementParam* pInsertParam = &pCmd->insertParam;
assert(TSDB_QUERY_HAS_TYPE(pInsertParam->insertType, TSDB_QUERY_TYPE_FILE_INSERT) && strlen(pCmd->payload) != 0);
pCmd->active = pCmd->pQueryInfo; pCmd->active = pCmd->pQueryInfo;
SImportFileSupport *pSupporter = calloc(1, sizeof(SImportFileSupport)); SImportFileSupport *pSupporter = calloc(1, sizeof(SImportFileSupport));

View File

@ -48,6 +48,7 @@ typedef struct SMultiTbStmt {
bool nameSet; bool nameSet;
bool tagSet; bool tagSet;
uint64_t currentUid; uint64_t currentUid;
char *sqlstr;
uint32_t tbNum; uint32_t tbNum;
SStrToken tbname; SStrToken tbname;
SStrToken stbname; SStrToken stbname;
@ -1085,7 +1086,7 @@ static int insertStmtExecute(STscStmt* stmt) {
fillTablesColumnsNull(stmt->pSql); fillTablesColumnsNull(stmt->pSql);
int code = tscMergeTableDataBlocks(stmt->pSql, false); int code = tscMergeTableDataBlocks(&stmt->pSql->cmd.insertParam, false);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
return code; return code;
} }
@ -1185,7 +1186,7 @@ static int insertBatchStmtExecute(STscStmt* pStmt) {
fillTablesColumnsNull(pStmt->pSql); fillTablesColumnsNull(pStmt->pSql);
if ((code = tscMergeTableDataBlocks(pStmt->pSql, false)) != TSDB_CODE_SUCCESS) { if ((code = tscMergeTableDataBlocks(&pStmt->pSql->cmd.insertParam, false)) != TSDB_CODE_SUCCESS) {
return code; return code;
} }
@ -1203,7 +1204,6 @@ static int insertBatchStmtExecute(STscStmt* pStmt) {
return pStmt->pSql->res.code; return pStmt->pSql->res.code;
} }
int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) { int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
SSqlCmd *pCmd = &pSql->cmd; SSqlCmd *pCmd = &pSql->cmd;
int32_t ret = TSDB_CODE_SUCCESS; int32_t ret = TSDB_CODE_SUCCESS;
@ -1213,7 +1213,7 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
} }
int32_t index = 0; int32_t index = 0;
SStrToken sToken = tStrGetToken(pCmd->curSql, &index, false); SStrToken sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
if (sToken.n == 0) { if (sToken.n == 0) {
return TSDB_CODE_TSC_INVALID_OPERATION; return TSDB_CODE_TSC_INVALID_OPERATION;
} }
@ -1232,29 +1232,29 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
pStmt->mtb.tagSet = true; pStmt->mtb.tagSet = true;
sToken = tStrGetToken(pCmd->curSql, &index, false); sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
if (sToken.n > 0 && sToken.type == TK_VALUES) { if (sToken.n > 0 && (sToken.type == TK_VALUES || sToken.type == TK_LP)) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
if (sToken.n <= 0 || sToken.type != TK_USING) { if (sToken.n <= 0 || sToken.type != TK_USING) {
return TSDB_CODE_TSC_INVALID_OPERATION; return tscSQLSyntaxErrMsg(pCmd->payload, "keywords USING is expected", sToken.z);
} }
sToken = tStrGetToken(pCmd->curSql, &index, false); sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
if (sToken.n <= 0 || ((sToken.type != TK_ID) && (sToken.type != TK_STRING))) { if (sToken.n <= 0 || ((sToken.type != TK_ID) && (sToken.type != TK_STRING))) {
return TSDB_CODE_TSC_INVALID_OPERATION; return tscSQLSyntaxErrMsg(pCmd->payload, "invalid token", sToken.z);
} }
pStmt->mtb.stbname = sToken; pStmt->mtb.stbname = sToken;
sToken = tStrGetToken(pCmd->curSql, &index, false); sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
if (sToken.n <= 0 || sToken.type != TK_TAGS) { if (sToken.n <= 0 || sToken.type != TK_TAGS) {
return TSDB_CODE_TSC_INVALID_OPERATION; return tscSQLSyntaxErrMsg(pCmd->payload, "keyword TAGS expected", sToken.z);
} }
sToken = tStrGetToken(pCmd->curSql, &index, false); sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
if (sToken.n <= 0 || sToken.type != TK_LP) { if (sToken.n <= 0 || sToken.type != TK_LP) {
return TSDB_CODE_TSC_INVALID_OPERATION; return tscSQLSyntaxErrMsg(pCmd->payload, ") expected", sToken.z);
} }
pStmt->mtb.tags = taosArrayInit(4, sizeof(SStrToken)); pStmt->mtb.tags = taosArrayInit(4, sizeof(SStrToken));
@ -1262,7 +1262,7 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
int32_t loopCont = 1; int32_t loopCont = 1;
while (loopCont) { while (loopCont) {
sToken = tStrGetToken(pCmd->curSql, &index, false); sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
if (sToken.n <= 0) { if (sToken.n <= 0) {
return TSDB_CODE_TSC_INVALID_OPERATION; return TSDB_CODE_TSC_INVALID_OPERATION;
} }
@ -1285,20 +1285,18 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
return TSDB_CODE_TSC_INVALID_OPERATION; return TSDB_CODE_TSC_INVALID_OPERATION;
} }
sToken = tStrGetToken(pCmd->curSql, &index, false); sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
if (sToken.n <= 0 || sToken.type != TK_VALUES) { if (sToken.n <= 0 || (sToken.type != TK_VALUES && sToken.type != TK_LP)) {
return TSDB_CODE_TSC_INVALID_OPERATION; return TSDB_CODE_TSC_INVALID_OPERATION;
} }
pStmt->mtb.values = sToken; pStmt->mtb.values = sToken;
} }
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
int stmtGenInsertStatement(SSqlObj* pSql, STscStmt* pStmt, const char* name, TAOS_BIND* tags) { int stmtGenInsertStatement(SSqlObj* pSql, STscStmt* pStmt, const char* name, TAOS_BIND* tags) {
size_t tagNum = taosArrayGetSize(pStmt->mtb.tags); size_t tagNum = taosArrayGetSize(pStmt->mtb.tags);
size_t size = 1048576; size_t size = 1048576;
@ -1373,14 +1371,17 @@ int stmtGenInsertStatement(SSqlObj* pSql, STscStmt* pStmt, const char* name, TAO
break; break;
} }
free(pSql->sqlstr); if (pStmt->mtb.sqlstr == NULL) {
pStmt->mtb.sqlstr = pSql->sqlstr;
} else {
tfree(pSql->sqlstr);
}
pSql->sqlstr = str; pSql->sqlstr = str;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// interface functions // interface functions
@ -1468,7 +1469,7 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
if (tscIsInsertData(pSql->sqlstr)) { if (tscIsInsertData(pSql->sqlstr)) {
pStmt->isInsert = true; pStmt->isInsert = true;
pSql->cmd.numOfParams = 0; pSql->cmd.insertParam.numOfParams = 0;
pSql->cmd.batchSize = 0; pSql->cmd.batchSize = 0;
registerSqlObj(pSql); registerSqlObj(pSql);
@ -1561,11 +1562,10 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
} }
pStmt->mtb.nameSet = true; pStmt->mtb.nameSet = true;
pStmt->mtb.tagSet = true;
tscDebug("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr); tscDebug("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr);
pSql->cmd.numOfParams = 0; pSql->cmd.insertParam.numOfParams = 0;
pSql->cmd.batchSize = 0; pSql->cmd.batchSize = 0;
if (taosHashGetSize(pCmd->insertParam.pTableBlockHashList) > 0) { if (taosHashGetSize(pCmd->insertParam.pTableBlockHashList) > 0) {
@ -1634,6 +1634,7 @@ int taos_stmt_close(TAOS_STMT* stmt) {
taosHashCleanup(pStmt->pSql->cmd.insertParam.pTableBlockHashList); taosHashCleanup(pStmt->pSql->cmd.insertParam.pTableBlockHashList);
pStmt->pSql->cmd.insertParam.pTableBlockHashList = NULL; pStmt->pSql->cmd.insertParam.pTableBlockHashList = NULL;
taosArrayDestroy(pStmt->mtb.tags); taosArrayDestroy(pStmt->mtb.tags);
tfree(pStmt->mtb.sqlstr);
} }
} }
@ -1851,8 +1852,8 @@ int taos_stmt_num_params(TAOS_STMT *stmt, int *nums) {
if (pStmt->isInsert) { if (pStmt->isInsert) {
SSqlObj* pSql = pStmt->pSql; SSqlObj* pSql = pStmt->pSql;
SSqlCmd *pCmd = &pSql->cmd; SSqlCmd *pCmd = &pSql->cmd;
*nums = pCmd->numOfParams; *nums = pCmd->insertParam.numOfParams;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} else { } else {
SNormalStmt* normal = &pStmt->normal; SNormalStmt* normal = &pStmt->normal;

View File

@ -28,7 +28,7 @@
#include "tname.h" #include "tname.h"
#include "tscLog.h" #include "tscLog.h"
#include "tscUtil.h" #include "tscUtil.h"
#include "tschemautil.h" #include "qTableMeta.h"
#include "tsclient.h" #include "tsclient.h"
#include "tstrbuild.h" #include "tstrbuild.h"
#include "ttoken.h" #include "ttoken.h"
@ -91,6 +91,7 @@ static int32_t validateGroupbyNode(SQueryInfo* pQueryInfo, SArray* pList, SSqlCm
static int32_t validateIntervalNode(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode); static int32_t validateIntervalNode(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode);
static int32_t parseIntervalOffset(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SStrToken* offsetToken); static int32_t parseIntervalOffset(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SStrToken* offsetToken);
static int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SStrToken* pSliding); static int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SStrToken* pSliding);
static int32_t validateStateWindowNode(SSqlCmd* pSql, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode, bool isStable);
static int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExprItem* pItem); static int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExprItem* pItem);
@ -196,7 +197,7 @@ static bool validateDebugFlag(int32_t v) {
* is not needed in the final error message. * is not needed in the final error message.
*/ */
static int32_t invalidOperationMsg(char* dstBuffer, const char* errMsg) { static int32_t invalidOperationMsg(char* dstBuffer, const char* errMsg) {
return tscInvalidSQLErrMsg(dstBuffer, errMsg, NULL); return tscInvalidOperationMsg(dstBuffer, errMsg, NULL);
} }
static int setColumnFilterInfoForTimestamp(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tVariant* pVar) { static int setColumnFilterInfoForTimestamp(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tVariant* pVar) {
@ -993,6 +994,59 @@ int32_t validateIntervalNode(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNode* pS
// The following part is used to check for the invalid query expression. // The following part is used to check for the invalid query expression.
return checkInvalidExprForTimeWindow(pCmd, pQueryInfo); return checkInvalidExprForTimeWindow(pCmd, pQueryInfo);
} }
static int32_t validateStateWindowNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode, bool isStable) {
const char* msg1 = "invalid column name";
const char* msg3 = "not support state_window with group by ";
const char* msg4 = "function not support for super table query";
SStrToken *col = &(pSqlNode->windowstateVal.col) ;
if (col->z == NULL || col->n <= 0) {
return TSDB_CODE_SUCCESS;
}
if (pQueryInfo->colList == NULL) {
pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES);
}
if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
pQueryInfo->groupbyExpr.numOfGroupCols = 1;
//TODO(dengyihao): check tag column
if (isStable) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
if (getColumnIndexByName(pCmd, col, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
int32_t numOfCols = tscGetNumOfColumns(pTableMeta);
if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX || index.columnIndex >= numOfCols) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
SGroupbyExpr* pGroupExpr = &pQueryInfo->groupbyExpr;
if (pGroupExpr->columnInfo == NULL) {
pGroupExpr->columnInfo = taosArrayInit(4, sizeof(SColIndex));
}
SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, index.columnIndex);
if (pSchema->type == TSDB_DATA_TYPE_TIMESTAMP || pSchema->type == TSDB_DATA_TYPE_FLOAT || pSchema->type == TSDB_DATA_TYPE_DOUBLE) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
tscColumnListInsert(pQueryInfo->colList, index.columnIndex, pTableMeta->id.uid, pSchema);
SColIndex colIndex = { .colIndex = index.columnIndex, .flag = TSDB_COL_NORMAL, .colId = pSchema->colId };
taosArrayPush(pGroupExpr->columnInfo, &colIndex);
pQueryInfo->groupbyExpr.orderType = TSDB_ORDER_ASC;
pQueryInfo->stateWindow = true;
return TSDB_CODE_SUCCESS;
}
int32_t validateSessionNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode * pSqlNode) { int32_t validateSessionNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode * pSqlNode) {
const char* msg1 = "gap should be fixed time window"; const char* msg1 = "gap should be fixed time window";
@ -1027,11 +1081,17 @@ int32_t validateSessionNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode * pS
if (pQueryInfo->sessionWindow.gap != 0 && pQueryInfo->interval.interval != 0) { if (pQueryInfo->sessionWindow.gap != 0 && pQueryInfo->interval.interval != 0) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
} }
if (pQueryInfo->sessionWindow.gap == 0) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
SColumnIndex index = COLUMN_INDEX_INITIALIZER; SColumnIndex index = COLUMN_INDEX_INITIALIZER;
if (getColumnIndexByName(pCmd, col, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { if (getColumnIndexByName(pCmd, col, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
} }
if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
pQueryInfo->sessionWindow.primaryColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; pQueryInfo->sessionWindow.primaryColId = PRIMARYKEY_TIMESTAMP_COL_INDEX;
@ -3167,6 +3227,9 @@ bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo)
return true; return true;
} }
} }
} else if (tscIsSessionWindowQuery(pQueryInfo)) {
invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
return true;
} }
return false; return false;
@ -6472,7 +6535,7 @@ static int32_t doTagFunctionCheck(SQueryInfo* pQueryInfo) {
int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) { int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
const char* msg1 = "functions/columns not allowed in group by query"; const char* msg1 = "functions/columns not allowed in group by query";
const char* msg2 = "projection query on columns not allowed"; const char* msg2 = "projection query on columns not allowed";
const char* msg3 = "group by not allowed on projection query"; const char* msg3 = "group by/session/state_window not allowed on projection query";
const char* msg4 = "retrieve tags not compatible with group by or interval query"; const char* msg4 = "retrieve tags not compatible with group by or interval query";
const char* msg5 = "functions can not be mixed up"; const char* msg5 = "functions can not be mixed up";
@ -6488,6 +6551,9 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
} }
if (tscIsProjectionQuery(pQueryInfo) && tscIsSessionWindowQuery(pQueryInfo)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) { if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) {
// check if all the tags prj columns belongs to the group by columns // check if all the tags prj columns belongs to the group by columns
@ -6931,7 +6997,7 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
if (!findColumnIndex) { if (!findColumnIndex) {
tdDestroyKVRowBuilder(&kvRowBuilder); tdDestroyKVRowBuilder(&kvRowBuilder);
return tscInvalidSQLErrMsg(pCmd->payload, "invalid tag name", sToken->z); return tscInvalidOperationMsg(pCmd->payload, "invalid tag name", sToken->z);
} }
} }
} else { } else {
@ -7074,6 +7140,7 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) {
return TSDB_CODE_TSC_INVALID_OPERATION; return TSDB_CODE_TSC_INVALID_OPERATION;
} }
if (isTimeWindowQuery(pQueryInfo) && (validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS)) { if (isTimeWindowQuery(pQueryInfo) && (validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
} }
@ -7747,6 +7814,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
const char* msg1 = "point interpolation query needs timestamp"; const char* msg1 = "point interpolation query needs timestamp";
const char* msg2 = "too many tables in from clause"; const char* msg2 = "too many tables in from clause";
const char* msg3 = "start(end) time of query range required or time range too large"; const char* msg3 = "start(end) time of query range required or time range too large";
const char* msg4 = "interval query not supported, since the result of sub query not include valid timestamp column";
const char* msg9 = "only tag query not compatible with normal column filter"; const char* msg9 = "only tag query not compatible with normal column filter";
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
@ -7788,6 +7856,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
return TSDB_CODE_TSC_INVALID_OPERATION; return TSDB_CODE_TSC_INVALID_OPERATION;
} }
// validate the query filter condition info
if (pSqlNode->pWhere != NULL) { if (pSqlNode->pWhere != NULL) {
if (validateWhereNode(pQueryInfo, &pSqlNode->pWhere, pSql) != TSDB_CODE_SUCCESS) { if (validateWhereNode(pQueryInfo, &pSqlNode->pWhere, pSql) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION; return TSDB_CODE_TSC_INVALID_OPERATION;
@ -7799,6 +7868,30 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
pQueryInfo->window.ekey = pQueryInfo->window.ekey / 1000; pQueryInfo->window.ekey = pQueryInfo->window.ekey / 1000;
} }
} }
// validate the interval info
if (validateIntervalNode(pSql, pQueryInfo, pSqlNode) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
} else {
if (isTimeWindowQuery(pQueryInfo)) {
// check if the first column of the nest query result is timestamp column
SColumn* pCol = taosArrayGetP(pQueryInfo->colList, 0);
if (pCol->info.type != TSDB_DATA_TYPE_TIMESTAMP) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
if (validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
}
}
// set order by info
STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta;
if (validateOrderbyNode(pCmd, pQueryInfo, pSqlNode, tscGetTableSchema(pTableMeta)) !=
TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
} else { } else {
pQueryInfo->command = TSDB_SQL_SELECT; pQueryInfo->command = TSDB_SQL_SELECT;
@ -7855,7 +7948,10 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
TSDB_CODE_SUCCESS) { TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION; return TSDB_CODE_TSC_INVALID_OPERATION;
} }
// parse the window_state
if (validateStateWindowNode(pCmd, pQueryInfo, pSqlNode, isSTable) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
// set order by info // set order by info
if (validateOrderbyNode(pCmd, pQueryInfo, pSqlNode, tscGetTableSchema(pTableMetaInfo->pTableMeta)) != if (validateOrderbyNode(pCmd, pQueryInfo, pSqlNode, tscGetTableSchema(pTableMetaInfo->pTableMeta)) !=
TSDB_CODE_SUCCESS) { TSDB_CODE_SUCCESS) {
@ -7896,6 +7992,10 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
* transfer sql functions that need secondary merge into another format * transfer sql functions that need secondary merge into another format
* in dealing with super table queries such as: count/first/last * in dealing with super table queries such as: count/first/last
*/ */
if (validateSessionNode(pCmd, pQueryInfo, pSqlNode) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
if (isSTable) { if (isSTable) {
tscTansformFuncForSTableQuery(pQueryInfo); tscTansformFuncForSTableQuery(pQueryInfo);
if (hasUnsupportFunctionsForSTableQuery(pCmd, pQueryInfo)) { if (hasUnsupportFunctionsForSTableQuery(pCmd, pQueryInfo)) {
@ -7903,10 +8003,6 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
} }
} }
if (validateSessionNode(pCmd, pQueryInfo, pSqlNode) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
// no result due to invalid query time range // no result due to invalid query time range
if (pQueryInfo->window.skey > pQueryInfo->window.ekey) { if (pQueryInfo->window.skey > pQueryInfo->window.ekey) {
pQueryInfo->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; pQueryInfo->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT;
@ -7950,11 +8046,12 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
pQueryInfo->globalMerge = tscIsTwoStageSTableQuery(pCmd, pQueryInfo, 0); pQueryInfo->globalMerge = tscIsTwoStageSTableQuery(pCmd, pQueryInfo, 0);
pQueryInfo->arithmeticOnAgg = tsIsArithmeticQueryOnAggResult(pQueryInfo); pQueryInfo->arithmeticOnAgg = tsIsArithmeticQueryOnAggResult(pQueryInfo);
pQueryInfo->orderProjectQuery = tscOrderedProjectionQueryOnSTable(pQueryInfo, 0);
SExprInfo** p = NULL; SExprInfo** p = NULL;
int32_t numOfExpr = 0; int32_t numOfExpr = 0;
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
code = createProjectionExpr(pQueryInfo, pTableMetaInfo, &p, &numOfExpr); code = createProjectionExpr(pQueryInfo, pTableMetaInfo, &p, &numOfExpr);
if (pQueryInfo->exprList1 == NULL) { if (pQueryInfo->exprList1 == NULL) {
pQueryInfo->exprList1 = taosArrayInit(4, POINTER_BYTES); pQueryInfo->exprList1 = taosArrayInit(4, POINTER_BYTES);
} }

View File

@ -15,15 +15,15 @@
#include "os.h" #include "os.h"
#include "tcmdtype.h" #include "tcmdtype.h"
#include "tlockfree.h"
#include "trpc.h" #include "trpc.h"
#include "tscLocalMerge.h" #include "tscGlobalmerge.h"
#include "tscLog.h" #include "tscLog.h"
#include "tscProfile.h" #include "tscProfile.h"
#include "tscUtil.h" #include "tscUtil.h"
#include "tschemautil.h" #include "qTableMeta.h"
#include "tsclient.h" #include "tsclient.h"
#include "ttimer.h" #include "ttimer.h"
#include "tlockfree.h"
#include "qPlan.h" #include "qPlan.h"
int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo) = {0}; int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo) = {0};
@ -857,6 +857,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pQueryMsg->simpleAgg = query.simpleAgg; pQueryMsg->simpleAgg = query.simpleAgg;
pQueryMsg->pointInterpQuery = query.pointInterpQuery; pQueryMsg->pointInterpQuery = query.pointInterpQuery;
pQueryMsg->needReverseScan = query.needReverseScan; pQueryMsg->needReverseScan = query.needReverseScan;
pQueryMsg->stateWindow = query.stateWindow;
pQueryMsg->numOfTags = htonl(numOfTags); pQueryMsg->numOfTags = htonl(numOfTags);
pQueryMsg->sqlstrLen = htonl(sqlLen); pQueryMsg->sqlstrLen = htonl(sqlLen);
@ -1656,7 +1657,7 @@ int tscProcessRetrieveLocalMergeRsp(SSqlObj *pSql) {
return code; return code;
} }
if (pRes->pLocalMerger == NULL) { // no result from subquery, so abort here directly. if (pRes->pMerger == NULL) { // no result from subquery, so abort here directly.
(*pSql->fp)(pSql->param, pSql, pRes->numOfRows); (*pSql->fp)(pSql->param, pSql, pRes->numOfRows);
return code; return code;
} }
@ -1673,15 +1674,7 @@ int tscProcessRetrieveLocalMergeRsp(SSqlObj *pSql) {
taosArrayPush(group, &tableKeyInfo); taosArrayPush(group, &tableKeyInfo);
taosArrayPush(tableGroupInfo.pGroupList, &group); taosArrayPush(tableGroupInfo.pGroupList, &group);
// todo remove it pQueryInfo->pQInfo = createQInfoFromQueryNode(pQueryInfo, &tableGroupInfo, NULL, NULL, pRes->pMerger, MERGE_STAGE);
SExprInfo* list = calloc(tscNumOfExprs(pQueryInfo), sizeof(SExprInfo));
for(int32_t i = 0; i < tscNumOfExprs(pQueryInfo); ++i) {
SExprInfo* pExprInfo = tscExprGet(pQueryInfo, i);
list[i] = *pExprInfo;
}
pQueryInfo->pQInfo = createQInfoFromQueryNode(pQueryInfo, list, &tableGroupInfo, NULL, NULL, pRes->pLocalMerger, MERGE_STAGE);
tfree(list);
} }
uint64_t localQueryId = 0; uint64_t localQueryId = 0;
@ -2534,8 +2527,8 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn
char *pMsg = (char *)pInfoMsg + sizeof(STableInfoMsg); char *pMsg = (char *)pInfoMsg + sizeof(STableInfoMsg);
// tag data exists // tag data exists
if (autocreate && pSql->cmd.tagData.dataLen != 0) { if (autocreate && pSql->cmd.insertParam.tagData.dataLen != 0) {
pMsg = serializeTagData(&pSql->cmd.tagData, pMsg); pMsg = serializeTagData(&pSql->cmd.insertParam.tagData, pMsg);
} }
pNew->cmd.payloadLen = (int32_t)(pMsg - (char*)pInfoMsg); pNew->cmd.payloadLen = (int32_t)(pMsg - (char*)pInfoMsg);

View File

@ -627,7 +627,7 @@ static bool hasAdditionalErrorInfo(int32_t code, SSqlCmd *pCmd) {
char *z = NULL; char *z = NULL;
if (len > 0) { if (len > 0) {
z = strstr(pCmd->payload, "invalid SQL"); z = strstr(pCmd->payload, "invalid operation");
if (z == NULL) { if (z == NULL) {
z = strstr(pCmd->payload, "syntax error"); z = strstr(pCmd->payload, "syntax error");
} }

View File

@ -13,7 +13,6 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#include <tschemautil.h>
#include "os.h" #include "os.h"
#include "taosmsg.h" #include "taosmsg.h"
#include "tscLog.h" #include "tscLog.h"

View File

@ -21,7 +21,7 @@
#include "tcompare.h" #include "tcompare.h"
#include "tscLog.h" #include "tscLog.h"
#include "tscSubquery.h" #include "tscSubquery.h"
#include "tschemautil.h" #include "qTableMeta.h"
#include "tsclient.h" #include "tsclient.h"
#include "qUdf.h" #include "qUdf.h"
#include "qUtil.h" #include "qUtil.h"
@ -2450,7 +2450,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
assert(pState->numOfSub > 0); assert(pState->numOfSub > 0);
int32_t ret = tscLocalReducerEnvCreate(pQueryInfo, &pMemoryBuf, pSql->subState.numOfSub, &pDesc, nBufferSize, pSql->self); int32_t ret = tscCreateGlobalMergerEnv(pQueryInfo, &pMemoryBuf, pSql->subState.numOfSub, &pDesc, nBufferSize, pSql->self);
if (ret != 0) { if (ret != 0) {
pRes->code = ret; pRes->code = ret;
tscAsyncResultOnError(pSql); tscAsyncResultOnError(pSql);
@ -2463,7 +2463,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
if (pSql->pSubs == NULL) { if (pSql->pSubs == NULL) {
tfree(pSql->pSubs); tfree(pSql->pSubs);
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
tscLocalReducerEnvDestroy(pMemoryBuf, pDesc,pState->numOfSub); tscDestroyGlobalMergerEnv(pMemoryBuf, pDesc,pState->numOfSub);
tscAsyncResultOnError(pSql); tscAsyncResultOnError(pSql);
return ret; return ret;
@ -2530,13 +2530,13 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
tscError("0x%"PRIx64" failed to prepare subquery structure and launch subqueries", pSql->self); tscError("0x%"PRIx64" failed to prepare subquery structure and launch subqueries", pSql->self);
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pState->numOfSub); tscDestroyGlobalMergerEnv(pMemoryBuf, pDesc, pState->numOfSub);
doCleanupSubqueries(pSql, i); doCleanupSubqueries(pSql, i);
return pRes->code; // free all allocated resource return pRes->code; // free all allocated resource
} }
if (pRes->code == TSDB_CODE_TSC_QUERY_CANCELLED) { if (pRes->code == TSDB_CODE_TSC_QUERY_CANCELLED) {
tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pState->numOfSub); tscDestroyGlobalMergerEnv(pMemoryBuf, pDesc, pState->numOfSub);
doCleanupSubqueries(pSql, i); doCleanupSubqueries(pSql, i);
return pRes->code; return pRes->code;
} }
@ -2701,7 +2701,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
tstrerror(pParentSql->res.code)); tstrerror(pParentSql->res.code));
// release allocated resource // release allocated resource
tscLocalReducerEnvDestroy(trsupport->pExtMemBuffer, trsupport->pOrderDescriptor, tscDestroyGlobalMergerEnv(trsupport->pExtMemBuffer, trsupport->pOrderDescriptor,
pState->numOfSub); pState->numOfSub);
tscFreeRetrieveSup(pSql); tscFreeRetrieveSup(pSql);
@ -2776,7 +2776,7 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
SQueryInfo *pPQueryInfo = tscGetQueryInfo(&pParentSql->cmd); SQueryInfo *pPQueryInfo = tscGetQueryInfo(&pParentSql->cmd);
tscClearInterpInfo(pPQueryInfo); tscClearInterpInfo(pPQueryInfo);
code = tscCreateLocalMerger(trsupport->pExtMemBuffer, pState->numOfSub, pDesc, pPQueryInfo, &pParentSql->res.pLocalMerger, pParentSql->self); code = tscCreateGlobalMerger(trsupport->pExtMemBuffer, pState->numOfSub, pDesc, pPQueryInfo, &pParentSql->res.pMerger, pParentSql->self);
pParentSql->res.code = code; pParentSql->res.code = code;
if (code == TSDB_CODE_SUCCESS && trsupport->pExtMemBuffer == NULL) { if (code == TSDB_CODE_SUCCESS && trsupport->pExtMemBuffer == NULL) {
@ -3525,9 +3525,8 @@ static UNUSED_FUNC bool tscHasRemainDataInSubqueryResultSet(SSqlObj *pSql) {
return hasData; return hasData;
} }
// todo remove pExprs void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGroupInfo, SOperatorInfo* pSourceOperator,
void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, SExprInfo* pExprs, STableGroupInfo* pTableGroupInfo, char* sql, void* merger, int32_t stage) {
SOperatorInfo* pSourceOperator, char* sql, void* merger, int32_t stage) {
assert(pQueryInfo != NULL); assert(pQueryInfo != NULL);
SQInfo *pQInfo = (SQInfo *)calloc(1, sizeof(SQInfo)); SQInfo *pQInfo = (SQInfo *)calloc(1, sizeof(SQInfo));
if (pQInfo == NULL) { if (pQInfo == NULL) {

View File

@ -14,18 +14,18 @@
*/ */
#include "tscUtil.h" #include "tscUtil.h"
#include "tsched.h"
#include "hash.h" #include "hash.h"
#include "os.h" #include "os.h"
#include "taosmsg.h" #include "taosmsg.h"
#include "texpr.h" #include "texpr.h"
#include "tkey.h" #include "tkey.h"
#include "tmd5.h" #include "tmd5.h"
#include "tscLocalMerge.h" #include "tscGlobalmerge.h"
#include "tscLog.h" #include "tscLog.h"
#include "tscProfile.h" #include "tscProfile.h"
#include "tscSubquery.h" #include "tscSubquery.h"
#include "tschemautil.h" #include "tsched.h"
#include "qTableMeta.h"
#include "tsclient.h" #include "tsclient.h"
#include "ttimer.h" #include "ttimer.h"
#include "ttokendef.h" #include "ttokendef.h"
@ -456,6 +456,9 @@ bool tscIsTWAQuery(SQueryInfo* pQueryInfo) {
return false; return false;
} }
bool tscIsSessionWindowQuery(SQueryInfo* pQueryInfo) {
return pQueryInfo->sessionWindow.gap > 0;
}
bool tscNeedReverseScan(SQueryInfo* pQueryInfo) { bool tscNeedReverseScan(SQueryInfo* pQueryInfo) {
size_t numOfExprs = tscNumOfExprs(pQueryInfo); size_t numOfExprs = tscNumOfExprs(pQueryInfo);
@ -738,9 +741,10 @@ static SColumnInfo* extractColumnInfoFromResult(SArray* pTableCols) {
} }
typedef struct SDummyInputInfo { typedef struct SDummyInputInfo {
SSDataBlock *block; SSDataBlock *block;
SSqlObj *pSql; // refactor: remove it STableQueryInfo *pTableQueryInfo;
int32_t numOfFilterCols; SSqlObj *pSql; // refactor: remove it
int32_t numOfFilterCols;
SSingleColumnFilterInfo *pFilterInfo; SSingleColumnFilterInfo *pFilterInfo;
} SDummyInputInfo; } SDummyInputInfo;
@ -757,9 +761,10 @@ typedef struct SJoinOperatorInfo {
SRspResultInfo resultInfo; // todo refactor, add this info for each operator SRspResultInfo resultInfo; // todo refactor, add this info for each operator
} SJoinOperatorInfo; } SJoinOperatorInfo;
static void doSetupSDataBlock(SSqlRes* pRes, SSDataBlock* pBlock) { static void doSetupSDataBlock(SSqlRes* pRes, SSDataBlock* pBlock, SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols) {
int32_t offset = 0; int32_t offset = 0;
char* pData = pRes->data; char* pData = pRes->data;
for(int32_t i = 0; i < pBlock->info.numOfCols; ++i) { for(int32_t i = 0; i < pBlock->info.numOfCols; ++i) {
SColumnInfoData* pColData = taosArrayGet(pBlock->pDataBlock, i); SColumnInfoData* pColData = taosArrayGet(pBlock->pDataBlock, i);
if (pData != NULL) { if (pData != NULL) {
@ -771,6 +776,26 @@ static void doSetupSDataBlock(SSqlRes* pRes, SSDataBlock* pBlock) {
offset += pColData->info.bytes; offset += pColData->info.bytes;
} }
// filter data if needed
if (numOfFilterCols > 0) {
doSetFilterColumnInfo(pFilterInfo, numOfFilterCols, pBlock);
int8_t* p = calloc(pBlock->info.rows, sizeof(int8_t));
bool all = doFilterDataBlock(pFilterInfo, numOfFilterCols, pBlock->info.rows, p);
if (!all) {
doCompactSDataBlock(pBlock, pBlock->info.rows, p);
}
tfree(p);
}
// todo refactor: extract method
// set the timestamp range of current result data block
SColumnInfoData* pColData = taosArrayGet(pBlock->pDataBlock, 0);
if (pColData->info.type == TSDB_DATA_TYPE_TIMESTAMP) {
pBlock->info.window.skey = ((int64_t*)pColData->pData)[0];
pBlock->info.window.ekey = ((int64_t*)pColData->pData)[pBlock->info.rows-1];
}
pRes->numOfRows = 0; pRes->numOfRows = 0;
} }
@ -786,22 +811,13 @@ SSDataBlock* doGetDataBlock(void* param, bool* newgroup) {
SSqlRes* pRes = &pSql->res; SSqlRes* pRes = &pSql->res;
SSDataBlock* pBlock = pInput->block; SSDataBlock* pBlock = pInput->block;
if (pOperator->pRuntimeEnv != NULL) {
pOperator->pRuntimeEnv->current = pInput->pTableQueryInfo;
}
pBlock->info.rows = pRes->numOfRows; pBlock->info.rows = pRes->numOfRows;
if (pRes->numOfRows != 0) { if (pRes->numOfRows != 0) {
doSetupSDataBlock(pRes, pBlock); doSetupSDataBlock(pRes, pBlock, pInput->pFilterInfo, pInput->numOfFilterCols);
if (pInput->numOfFilterCols > 0) {
doSetFilterColumnInfo(pInput->pFilterInfo, pInput->numOfFilterCols, pBlock);
int8_t* p = calloc(pBlock->info.rows, sizeof(int8_t));
bool all = doFilterDataBlock(pInput->pFilterInfo, pInput->numOfFilterCols, pBlock->info.rows, p);
if (!all) {
doCompactSDataBlock(pBlock, pBlock->info.rows, p);
}
tfree(p);
}
*newgroup = false; *newgroup = false;
return pBlock; return pBlock;
} }
@ -816,11 +832,29 @@ SSDataBlock* doGetDataBlock(void* param, bool* newgroup) {
} }
pBlock->info.rows = pRes->numOfRows; pBlock->info.rows = pRes->numOfRows;
doSetupSDataBlock(pRes, pBlock); doSetupSDataBlock(pRes, pBlock, pInput->pFilterInfo, pInput->numOfFilterCols);
*newgroup = false; *newgroup = false;
return pBlock; return pBlock;
} }
static void fetchNextBlockIfCompleted(SOperatorInfo* pOperator, bool* newgroup) {
SJoinOperatorInfo* pJoinInfo = pOperator->info;
for (int32_t i = 0; i < pOperator->numOfUpstream; ++i) {
SJoinStatus* pStatus = &pJoinInfo->status[i];
if (pStatus->pBlock == NULL || pStatus->index >= pStatus->pBlock->info.rows) {
pStatus->pBlock = pOperator->upstream[i]->exec(pOperator->upstream[i], newgroup);
pStatus->index = 0;
if (pStatus->pBlock == NULL) {
pOperator->status = OP_EXEC_DONE;
pJoinInfo->resultInfo.total += pJoinInfo->pRes->info.rows;
break;
}
}
}
}
SSDataBlock* doDataBlockJoin(void* param, bool* newgroup) { SSDataBlock* doDataBlockJoin(void* param, bool* newgroup) {
SOperatorInfo *pOperator = (SOperatorInfo*) param; SOperatorInfo *pOperator = (SOperatorInfo*) param;
if (pOperator->status == OP_EXEC_DONE) { if (pOperator->status == OP_EXEC_DONE) {
@ -833,19 +867,9 @@ SSDataBlock* doDataBlockJoin(void* param, bool* newgroup) {
pJoinInfo->pRes->info.rows = 0; pJoinInfo->pRes->info.rows = 0;
while(1) { while(1) {
for (int32_t i = 0; i < pOperator->numOfUpstream; ++i) { fetchNextBlockIfCompleted(pOperator, newgroup);
SJoinStatus* pStatus = &pJoinInfo->status[i]; if (pOperator->status == OP_EXEC_DONE) {
if (pStatus->pBlock == NULL || pStatus->index >= pStatus->pBlock->info.rows) { return pJoinInfo->pRes;
pStatus->pBlock = pOperator->upstream[i]->exec(pOperator->upstream[i], newgroup);
pStatus->index = 0;
if (pStatus->pBlock == NULL) {
pOperator->status = OP_EXEC_DONE;
pJoinInfo->resultInfo.total += pJoinInfo->pRes->info.rows;
return pJoinInfo->pRes;
}
}
} }
SJoinStatus* st0 = &pJoinInfo->status[0]; SJoinStatus* st0 = &pJoinInfo->status[0];
@ -864,8 +888,12 @@ SSDataBlock* doDataBlockJoin(void* param, bool* newgroup) {
if (ts[st->index] < ts0[st0->index]) { // less than the first if (ts[st->index] < ts0[st0->index]) { // less than the first
prefixEqual = false; prefixEqual = false;
if ((++(st->index)) >= st->pBlock->info.rows) { if ((++(st->index)) >= st->pBlock->info.rows) {
break; fetchNextBlockIfCompleted(pOperator, newgroup);
if (pOperator->status == OP_EXEC_DONE) {
return pJoinInfo->pRes;
}
} }
} else if (ts[st->index] > ts0[st0->index]) { // greater than the first; } else if (ts[st->index] > ts0[st0->index]) { // greater than the first;
if (prefixEqual == true) { if (prefixEqual == true) {
@ -873,12 +901,19 @@ SSDataBlock* doDataBlockJoin(void* param, bool* newgroup) {
for (int32_t j = 0; j < i; ++j) { for (int32_t j = 0; j < i; ++j) {
SJoinStatus* stx = &pJoinInfo->status[j]; SJoinStatus* stx = &pJoinInfo->status[j];
if ((++(stx->index)) >= stx->pBlock->info.rows) { if ((++(stx->index)) >= stx->pBlock->info.rows) {
break;
fetchNextBlockIfCompleted(pOperator, newgroup);
if (pOperator->status == OP_EXEC_DONE) {
return pJoinInfo->pRes;
}
} }
} }
} else { } else {
if ((++(st0->index)) >= st0->pBlock->info.rows) { if ((++(st0->index)) >= st0->pBlock->info.rows) {
break; fetchNextBlockIfCompleted(pOperator, newgroup);
if (pOperator->status == OP_EXEC_DONE) {
return pJoinInfo->pRes;
}
} }
} }
} }
@ -973,11 +1008,14 @@ static void destroyDummyInputOperator(void* param, int32_t numOfOutput) {
// todo this operator servers as the adapter for Operator tree and SqlRes result, remove it later // todo this operator servers as the adapter for Operator tree and SqlRes result, remove it later
SOperatorInfo* createDummyInputOperator(SSqlObj* pSql, SSchema* pSchema, int32_t numOfCols, SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols) { SOperatorInfo* createDummyInputOperator(SSqlObj* pSql, SSchema* pSchema, int32_t numOfCols, SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols) {
assert(numOfCols > 0); assert(numOfCols > 0);
STimeWindow win = {.skey = INT64_MIN, .ekey = INT64_MAX};
SDummyInputInfo* pInfo = calloc(1, sizeof(SDummyInputInfo)); SDummyInputInfo* pInfo = calloc(1, sizeof(SDummyInputInfo));
pInfo->pSql = pSql; pInfo->pSql = pSql;
pInfo->pFilterInfo = pFilterInfo; pInfo->pFilterInfo = pFilterInfo;
pInfo->numOfFilterCols = numOfFilterCols; pInfo->numOfFilterCols = numOfFilterCols;
pInfo->pTableQueryInfo = createTmpTableQueryInfo(win);
pInfo->block = calloc(numOfCols, sizeof(SSDataBlock)); pInfo->block = calloc(numOfCols, sizeof(SSDataBlock));
pInfo->block->info.numOfCols = numOfCols; pInfo->block->info.numOfCols = numOfCols;
@ -1063,12 +1101,31 @@ void convertQueryResult(SSqlRes* pRes, SQueryInfo* pQueryInfo) {
pRes->completed = (pRes->numOfRows == 0); pRes->completed = (pRes->numOfRows == 0);
} }
static void createInputDataFilterInfo(SQueryInfo* px, int32_t numOfCol1, int32_t* numOfFilterCols, SSingleColumnFilterInfo** pFilterInfo) {
SColumnInfo* tableCols = calloc(numOfCol1, sizeof(SColumnInfo));
for(int32_t i = 0; i < numOfCol1; ++i) {
SColumn* pCol = taosArrayGetP(px->colList, i);
if (pCol->info.flist.numOfFilters > 0) {
(*numOfFilterCols) += 1;
}
tableCols[i] = pCol->info;
}
if ((*numOfFilterCols) > 0) {
doCreateFilterInfo(tableCols, numOfCol1, (*numOfFilterCols), pFilterInfo, 0);
}
tfree(tableCols);
}
void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQueryInfo* px, SSqlRes* pOutput) { void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQueryInfo* px, SSqlRes* pOutput) {
// handle the following query process // handle the following query process
if (px->pQInfo == NULL) { if (px->pQInfo == NULL) {
SColumnInfo* pColumnInfo = extractColumnInfoFromResult(px->colList); SColumnInfo* pColumnInfo = extractColumnInfoFromResult(px->colList);
SSchema* pSchema = tscGetTableSchema(px->pTableMetaInfo[0]->pTableMeta); STableMeta* pTableMeta = tscGetMetaInfo(px, 0)->pTableMeta;
SSchema* pSchema = tscGetTableSchema(pTableMeta);
STableGroupInfo tableGroupInfo = { STableGroupInfo tableGroupInfo = {
.numOfTables = 1, .numOfTables = 1,
@ -1085,23 +1142,11 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue
taosArrayPush(tableGroupInfo.pGroupList, &group); taosArrayPush(tableGroupInfo.pGroupList, &group);
// if it is a join query, create join operator here // if it is a join query, create join operator here
int32_t numOfCol1 = px->pTableMetaInfo[0]->pTableMeta->tableInfo.numOfColumns; int32_t numOfCol1 = pTableMeta->tableInfo.numOfColumns;
int32_t numOfFilterCols = 0; int32_t numOfFilterCols = 0;
SColumnInfo* tableCols = calloc(numOfCol1, sizeof(SColumnInfo));
for(int32_t i = 0; i < numOfCol1; ++i) {
SColumn* pCol = taosArrayGetP(px->colList, i);
if (pCol->info.flist.numOfFilters > 0) {
numOfFilterCols += 1;
}
tableCols[i] = pCol->info;
}
SSingleColumnFilterInfo* pFilterInfo = NULL; SSingleColumnFilterInfo* pFilterInfo = NULL;
if (numOfFilterCols > 0) { createInputDataFilterInfo(px, numOfCol1, &numOfFilterCols, &pFilterInfo);
doCreateFilterInfo(tableCols, numOfCol1, numOfFilterCols, &pFilterInfo, 0);
}
SOperatorInfo* pSourceOperator = createDummyInputOperator(pSqlObjList[0], pSchema, numOfCol1, pFilterInfo, numOfFilterCols); SOperatorInfo* pSourceOperator = createDummyInputOperator(pSqlObjList[0], pSchema, numOfCol1, pFilterInfo, numOfFilterCols);
@ -1117,24 +1162,14 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue
int32_t offset = pSourceOperator->numOfOutput; int32_t offset = pSourceOperator->numOfOutput;
for(int32_t i = 1; i < px->numOfTables; ++i) { for(int32_t i = 1; i < px->numOfTables; ++i) {
SSchema* pSchema1 = tscGetTableSchema(px->pTableMetaInfo[i]->pTableMeta); STableMeta* pTableMeta1 = tscGetMetaInfo(px, i)->pTableMeta;
int32_t n = px->pTableMetaInfo[i]->pTableMeta->tableInfo.numOfColumns;
SSchema* pSchema1 = tscGetTableSchema(pTableMeta1);
int32_t n = pTableMeta1->tableInfo.numOfColumns;
int32_t numOfFilterCols1 = 0; int32_t numOfFilterCols1 = 0;
SColumnInfo* tableCols1 = calloc(numOfCol1, sizeof(SColumnInfo));
for(int32_t j = 0; j < numOfCol1; ++j) {
SColumn* pCol = taosArrayGetP(px->colList, j);
if (pCol->info.flist.numOfFilters > 0) {
numOfFilterCols += 1;
}
tableCols1[j] = pCol->info;
}
SSingleColumnFilterInfo* pFilterInfo1 = NULL; SSingleColumnFilterInfo* pFilterInfo1 = NULL;
if (numOfFilterCols1 > 0) { createInputDataFilterInfo(px, numOfCol1, &numOfFilterCols1, &pFilterInfo1);
doCreateFilterInfo(tableCols1, numOfCol1, numOfFilterCols1, &pFilterInfo1, 0);
}
p[i] = createDummyInputOperator(pSqlObjList[i], pSchema1, n, pFilterInfo1, numOfFilterCols1); p[i] = createDummyInputOperator(pSqlObjList[i], pSchema1, n, pFilterInfo1, numOfFilterCols1);
memcpy(&schema[offset], pSchema1, n * sizeof(SSchema)); memcpy(&schema[offset], pSchema1, n * sizeof(SSchema));
@ -1149,11 +1184,25 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue
memcpy(schema, pSchema, numOfCol1*sizeof(SSchema)); memcpy(schema, pSchema, numOfCol1*sizeof(SSchema));
} }
SExprInfo* exprInfo = NULL; // update the exprinfo
px->pQInfo = createQInfoFromQueryNode(px, exprInfo, &tableGroupInfo, pSourceOperator, NULL, NULL, MASTER_SCAN); int32_t numOfOutput = (int32_t)tscNumOfExprs(px);
for(int32_t i = 0; i < numOfOutput; ++i) {
SExprInfo* pex = taosArrayGetP(px->exprList, i);
int32_t colId = pex->base.colInfo.colId;
for(int32_t j = 0; j < pSourceOperator->numOfOutput; ++j) {
if (colId == schema[j].colId) {
pex->base.colInfo.colIndex = j;
break;
}
}
}
px->pQInfo = createQInfoFromQueryNode(px, &tableGroupInfo, pSourceOperator, NULL, NULL, MASTER_SCAN);
tfree(pColumnInfo); tfree(pColumnInfo);
tfree(schema); tfree(schema);
tfree(exprInfo);
// set the pRuntimeEnv for pSourceOperator
pSourceOperator->pRuntimeEnv = &px->pQInfo->runtimeEnv;
} }
uint64_t qId = 0; uint64_t qId = 0;
@ -1236,31 +1285,34 @@ void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeMeta) {
pCmd->active = NULL; pCmd->active = NULL;
} }
void destroyTableNameList(SSqlCmd* pCmd) { void destroyTableNameList(SInsertStatementParam* pInsertParam) {
if (pCmd->insertParam.numOfTables == 0) { if (pInsertParam->numOfTables == 0) {
assert(pCmd->insertParam.pTableNameList == NULL); assert(pInsertParam->pTableNameList == NULL);
return; return;
} }
for(int32_t i = 0; i < pCmd->insertParam.numOfTables; ++i) { for(int32_t i = 0; i < pInsertParam->numOfTables; ++i) {
tfree(pCmd->insertParam.pTableNameList[i]); tfree(pInsertParam->pTableNameList[i]);
} }
pCmd->insertParam.numOfTables = 0; pInsertParam->numOfTables = 0;
tfree(pCmd->insertParam.pTableNameList); tfree(pInsertParam->pTableNameList);
} }
void tscResetSqlCmd(SSqlCmd* pCmd, bool clearCachedMeta) { void tscResetSqlCmd(SSqlCmd* pCmd, bool clearCachedMeta) {
pCmd->command = 0; pCmd->command = 0;
pCmd->numOfCols = 0; pCmd->numOfCols = 0;
pCmd->count = 0; pCmd->count = 0;
pCmd->curSql = NULL;
pCmd->msgType = 0; pCmd->msgType = 0;
destroyTableNameList(pCmd); pCmd->insertParam.sql = NULL;
destroyTableNameList(&pCmd->insertParam);
pCmd->insertParam.pTableBlockHashList = tscDestroyBlockHashTable(pCmd->insertParam.pTableBlockHashList, clearCachedMeta); pCmd->insertParam.pTableBlockHashList = tscDestroyBlockHashTable(pCmd->insertParam.pTableBlockHashList, clearCachedMeta);
pCmd->insertParam.pDataBlocks = tscDestroyBlockArrayList(pCmd->insertParam.pDataBlocks); pCmd->insertParam.pDataBlocks = tscDestroyBlockArrayList(pCmd->insertParam.pDataBlocks);
tfree(pCmd->insertParam.tagData.data);
pCmd->insertParam.tagData.dataLen = 0;
tscFreeQueryInfo(pCmd, clearCachedMeta); tscFreeQueryInfo(pCmd, clearCachedMeta);
if (pCmd->pTableMetaMap != NULL) { if (pCmd->pTableMetaMap != NULL) {
@ -1279,8 +1331,8 @@ void tscResetSqlCmd(SSqlCmd* pCmd, bool clearCachedMeta) {
void tscFreeSqlResult(SSqlObj* pSql) { void tscFreeSqlResult(SSqlObj* pSql) {
SSqlRes* pRes = &pSql->res; SSqlRes* pRes = &pSql->res;
tscDestroyLocalMerger(pRes->pLocalMerger); tscDestroyGlobalMerger(pRes->pMerger);
pRes->pLocalMerger = NULL; pRes->pMerger = NULL;
tscDestroyResPointerInfo(pRes); tscDestroyResPointerInfo(pRes);
memset(&pSql->res, 0, sizeof(SSqlRes)); memset(&pSql->res, 0, sizeof(SSqlRes));
@ -1373,9 +1425,6 @@ void tscFreeSqlObj(SSqlObj* pSql) {
tscFreeSqlResult(pSql); tscFreeSqlResult(pSql);
tscResetSqlCmd(pCmd, false); tscResetSqlCmd(pCmd, false);
tfree(pCmd->tagData.data);
pCmd->tagData.dataLen = 0;
memset(pCmd->payload, 0, (size_t)pCmd->allocSize); memset(pCmd->payload, 0, (size_t)pCmd->allocSize);
tfree(pCmd->payload); tfree(pCmd->payload);
pCmd->allocSize = 0; pCmd->allocSize = 0;
@ -1717,37 +1766,36 @@ static int32_t getRowExpandSize(STableMeta* pTableMeta) {
return result; return result;
} }
static void extractTableNameList(SSqlCmd* pCmd, bool freeBlockMap) { static void extractTableNameList(SInsertStatementParam *pInsertParam, bool freeBlockMap) {
pCmd->insertParam.numOfTables = (int32_t) taosHashGetSize(pCmd->insertParam.pTableBlockHashList); pInsertParam->numOfTables = (int32_t) taosHashGetSize(pInsertParam->pTableBlockHashList);
if (pCmd->insertParam.pTableNameList == NULL) { if (pInsertParam->pTableNameList == NULL) {
pCmd->insertParam.pTableNameList = calloc(pCmd->insertParam.numOfTables, POINTER_BYTES); pInsertParam->pTableNameList = calloc(pInsertParam->numOfTables, POINTER_BYTES);
} else { } else {
memset(pCmd->insertParam.pTableNameList, 0, pCmd->insertParam.numOfTables * POINTER_BYTES); memset(pInsertParam->pTableNameList, 0, pInsertParam->numOfTables * POINTER_BYTES);
} }
STableDataBlocks **p1 = taosHashIterate(pCmd->insertParam.pTableBlockHashList, NULL); STableDataBlocks **p1 = taosHashIterate(pInsertParam->pTableBlockHashList, NULL);
int32_t i = 0; int32_t i = 0;
while(p1) { while(p1) {
STableDataBlocks* pBlocks = *p1; STableDataBlocks* pBlocks = *p1;
tfree(pCmd->insertParam.pTableNameList[i]); tfree(pInsertParam->pTableNameList[i]);
pCmd->insertParam.pTableNameList[i++] = tNameDup(&pBlocks->tableName); pInsertParam->pTableNameList[i++] = tNameDup(&pBlocks->tableName);
p1 = taosHashIterate(pCmd->insertParam.pTableBlockHashList, p1); p1 = taosHashIterate(pInsertParam->pTableBlockHashList, p1);
} }
if (freeBlockMap) { if (freeBlockMap) {
pCmd->insertParam.pTableBlockHashList = tscDestroyBlockHashTable(pCmd->insertParam.pTableBlockHashList, false); pInsertParam->pTableBlockHashList = tscDestroyBlockHashTable(pInsertParam->pTableBlockHashList, false);
} }
} }
int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) { int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBlockMap) {
const int INSERT_HEAD_SIZE = sizeof(SMsgDesc) + sizeof(SSubmitMsg); const int INSERT_HEAD_SIZE = sizeof(SMsgDesc) + sizeof(SSubmitMsg);
SSqlCmd* pCmd = &pSql->cmd;
void* pVnodeDataBlockHashList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false); void* pVnodeDataBlockHashList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
SArray* pVnodeDataBlockList = taosArrayInit(8, POINTER_BYTES); SArray* pVnodeDataBlockList = taosArrayInit(8, POINTER_BYTES);
STableDataBlocks** p = taosHashIterate(pCmd->insertParam.pTableBlockHashList, NULL); STableDataBlocks** p = taosHashIterate(pInsertParam->pTableBlockHashList, NULL);
STableDataBlocks* pOneTableBlock = *p; STableDataBlocks* pOneTableBlock = *p;
while(pOneTableBlock) { while(pOneTableBlock) {
@ -1760,7 +1808,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) {
int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pOneTableBlock->vgId, TSDB_PAYLOAD_SIZE, int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pOneTableBlock->vgId, TSDB_PAYLOAD_SIZE,
INSERT_HEAD_SIZE, 0, &pOneTableBlock->tableName, pOneTableBlock->pTableMeta, &dataBuf, pVnodeDataBlockList); INSERT_HEAD_SIZE, 0, &pOneTableBlock->tableName, pOneTableBlock->pTableMeta, &dataBuf, pVnodeDataBlockList);
if (ret != TSDB_CODE_SUCCESS) { if (ret != TSDB_CODE_SUCCESS) {
tscError("0x%"PRIx64" failed to prepare the data block buffer for merging table data, code:%d", pSql->self, ret); tscError("0x%"PRIx64" failed to prepare the data block buffer for merging table data, code:%d", pInsertParam->objectId, ret);
taosHashCleanup(pVnodeDataBlockHashList); taosHashCleanup(pVnodeDataBlockHashList);
tscDestroyBlockArrayList(pVnodeDataBlockList); tscDestroyBlockArrayList(pVnodeDataBlockList);
return ret; return ret;
@ -1778,7 +1826,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) {
dataBuf->pData = tmp; dataBuf->pData = tmp;
memset(dataBuf->pData + dataBuf->size, 0, dataBuf->nAllocSize - dataBuf->size); memset(dataBuf->pData + dataBuf->size, 0, dataBuf->nAllocSize - dataBuf->size);
} else { // failed to allocate memory, free already allocated memory and return error code } else { // failed to allocate memory, free already allocated memory and return error code
tscError("0x%"PRIx64" failed to allocate memory for merging submit block, size:%d", pSql->self, dataBuf->nAllocSize); tscError("0x%"PRIx64" failed to allocate memory for merging submit block, size:%d", pInsertParam->objectId, dataBuf->nAllocSize);
taosHashCleanup(pVnodeDataBlockHashList); taosHashCleanup(pVnodeDataBlockHashList);
tscDestroyBlockArrayList(pVnodeDataBlockList); tscDestroyBlockArrayList(pVnodeDataBlockList);
@ -1791,7 +1839,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) {
tscSortRemoveDataBlockDupRows(pOneTableBlock); tscSortRemoveDataBlockDupRows(pOneTableBlock);
char* ekey = (char*)pBlocks->data + pOneTableBlock->rowSize*(pBlocks->numOfRows-1); char* ekey = (char*)pBlocks->data + pOneTableBlock->rowSize*(pBlocks->numOfRows-1);
tscDebug("0x%"PRIx64" name:%s, name:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pSql->self, tNameGetTableName(&pOneTableBlock->tableName), tscDebug("0x%"PRIx64" name:%s, name:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pInsertParam->objectId, tNameGetTableName(&pOneTableBlock->tableName),
pBlocks->tid, pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->data), GET_INT64_VAL(ekey)); pBlocks->tid, pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->data), GET_INT64_VAL(ekey));
int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta); int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
@ -1803,7 +1851,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) {
pBlocks->schemaLen = 0; pBlocks->schemaLen = 0;
// erase the empty space reserved for binary data // erase the empty space reserved for binary data
int32_t finalLen = trimDataBlock(dataBuf->pData + dataBuf->size, pOneTableBlock, pCmd->insertParam.schemaAttached); int32_t finalLen = trimDataBlock(dataBuf->pData + dataBuf->size, pOneTableBlock, pInsertParam->schemaAttached);
assert(finalLen <= len); assert(finalLen <= len);
dataBuf->size += (finalLen + sizeof(SSubmitBlk)); dataBuf->size += (finalLen + sizeof(SSubmitBlk));
@ -1815,10 +1863,10 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) {
pBlocks->numOfRows = 0; pBlocks->numOfRows = 0;
}else { }else {
tscDebug("0x%"PRIx64" table %s data block is empty", pSql->self, pOneTableBlock->tableName.tname); tscDebug("0x%"PRIx64" table %s data block is empty", pInsertParam->objectId, pOneTableBlock->tableName.tname);
} }
p = taosHashIterate(pCmd->insertParam.pTableBlockHashList, p); p = taosHashIterate(pInsertParam->pTableBlockHashList, p);
if (p == NULL) { if (p == NULL) {
break; break;
} }
@ -1826,10 +1874,10 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) {
pOneTableBlock = *p; pOneTableBlock = *p;
} }
extractTableNameList(pCmd, freeBlockMap); extractTableNameList(pInsertParam, freeBlockMap);
// free the table data blocks; // free the table data blocks;
pCmd->insertParam.pDataBlocks = pVnodeDataBlockList; pInsertParam->pDataBlocks = pVnodeDataBlockList;
taosHashCleanup(pVnodeDataBlockHashList); taosHashCleanup(pVnodeDataBlockHashList);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
@ -3158,6 +3206,15 @@ void tscResetForNextRetrieve(SSqlRes* pRes) {
pRes->numOfRows = 0; pRes->numOfRows = 0;
} }
void tscInitResForMerge(SSqlRes* pRes) {
pRes->qId = 1; // hack to pass the safety check in fetch_row function
pRes->rspType = 0; // used as a flag to denote if taos_retrieved() has been called yet
tscResetForNextRetrieve(pRes);
assert(pRes->pMerger != NULL);
pRes->data = pRes->pMerger->buf;
}
void registerSqlObj(SSqlObj* pSql) { void registerSqlObj(SSqlObj* pSql) {
taosAcquireRef(tscRefId, pSql->pTscObj->rid); taosAcquireRef(tscRefId, pSql->pTscObj->rid);
pSql->self = taosAddRef(tscObjRef, pSql); pSql->self = taosAddRef(tscObjRef, pSql);
@ -3179,14 +3236,6 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, __async_cb_func_t fp, void* param, in
SSqlCmd* pCmd = &pNew->cmd; SSqlCmd* pCmd = &pNew->cmd;
pCmd->command = cmd; pCmd->command = cmd;
int32_t code = copyTagData(&pNew->cmd.tagData, &pSql->cmd.tagData);
if (code != TSDB_CODE_SUCCESS) {
tscError("0x%"PRIx64" new subquery failed, unable to malloc tag data, tableIndex:%d", pSql->self, 0);
free(pNew);
return NULL;
}
if (tscAddQueryInfo(pCmd) != TSDB_CODE_SUCCESS) { if (tscAddQueryInfo(pCmd) != TSDB_CODE_SUCCESS) {
#ifdef __APPLE__ #ifdef __APPLE__
// to satisfy later tsem_destroy in taos_free_result // to satisfy later tsem_destroy in taos_free_result
@ -3284,8 +3333,6 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
pnCmd->insertParam.numOfTables = 0; pnCmd->insertParam.numOfTables = 0;
pnCmd->insertParam.pTableNameList = NULL; pnCmd->insertParam.pTableNameList = NULL;
pnCmd->insertParam.pTableBlockHashList = NULL; pnCmd->insertParam.pTableBlockHashList = NULL;
pnCmd->tagData.data = NULL;
pnCmd->tagData.dataLen = 0;
if (tscAddQueryInfo(pnCmd) != TSDB_CODE_SUCCESS) { if (tscAddQueryInfo(pnCmd) != TSDB_CODE_SUCCESS) {
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY; terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
@ -3456,7 +3503,11 @@ void doExecuteQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) {
tscHandleMasterSTableQuery(pSql); tscHandleMasterSTableQuery(pSql);
tscUnlockByThread(&pSql->squeryLock); tscUnlockByThread(&pSql->squeryLock);
} else if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT)) { } else if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT)) {
tscHandleMultivnodeInsert(pSql); if (TSDB_QUERY_HAS_TYPE(pSql->cmd.insertParam.insertType, TSDB_QUERY_TYPE_FILE_INSERT)) {
tscImportDataFromFile(pSql);
} else {
tscHandleMultivnodeInsert(pSql);
}
} else if (pSql->cmd.command > TSDB_SQL_LOCAL) { } else if (pSql->cmd.command > TSDB_SQL_LOCAL) {
tscProcessLocalCmd(pSql); tscProcessLocalCmd(pSql);
} else { // send request to server directly } else { // send request to server directly
@ -3674,10 +3725,10 @@ int32_t tscSQLSyntaxErrMsg(char* msg, const char* additionalInfo, const char* s
return TSDB_CODE_TSC_SQL_SYNTAX_ERROR; return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
} }
int32_t tscInvalidSQLErrMsg(char* msg, const char* additionalInfo, const char* sql) { int32_t tscInvalidOperationMsg(char* msg, const char* additionalInfo, const char* sql) {
const char* msgFormat1 = "invalid SQL: %s"; const char* msgFormat1 = "invalid operation: %s";
const char* msgFormat2 = "invalid SQL: \'%s\' (%s)"; const char* msgFormat2 = "invalid operation: \'%s\' (%s)";
const char* msgFormat3 = "invalid SQL: \'%s\'"; const char* msgFormat3 = "invalid operation: \'%s\'";
const int32_t BACKWARD_CHAR_STEP = 0; const int32_t BACKWARD_CHAR_STEP = 0;
@ -4281,11 +4332,13 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt
pQueryAttr->simpleAgg = isSimpleAggregate(pQueryInfo); pQueryAttr->simpleAgg = isSimpleAggregate(pQueryInfo);
pQueryAttr->needReverseScan = tscNeedReverseScan(pQueryInfo); pQueryAttr->needReverseScan = tscNeedReverseScan(pQueryInfo);
pQueryAttr->stableQuery = QUERY_IS_STABLE_QUERY(pQueryInfo->type); pQueryAttr->stableQuery = QUERY_IS_STABLE_QUERY(pQueryInfo->type);
pQueryAttr->groupbyColumn = tscGroupbyColumn(pQueryInfo); pQueryAttr->groupbyColumn = (!pQueryInfo->stateWindow) && tscGroupbyColumn(pQueryInfo);
pQueryAttr->queryBlockDist = isBlockDistQuery(pQueryInfo); pQueryAttr->queryBlockDist = isBlockDistQuery(pQueryInfo);
pQueryAttr->pointInterpQuery = tscIsPointInterpQuery(pQueryInfo); pQueryAttr->pointInterpQuery = tscIsPointInterpQuery(pQueryInfo);
pQueryAttr->timeWindowInterpo = timeWindowInterpoRequired(pQueryInfo); pQueryAttr->timeWindowInterpo = timeWindowInterpoRequired(pQueryInfo);
pQueryAttr->distinctTag = pQueryInfo->distinctTag; pQueryAttr->distinctTag = pQueryInfo->distinctTag;
pQueryAttr->sw = pQueryInfo->sessionWindow;
pQueryAttr->stateWindow = pQueryInfo->stateWindow;
pQueryAttr->numOfCols = numOfCols; pQueryAttr->numOfCols = numOfCols;
pQueryAttr->numOfOutput = numOfOutput; pQueryAttr->numOfOutput = numOfOutput;
@ -4293,7 +4346,6 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt
pQueryAttr->slimit = pQueryInfo->slimit; pQueryAttr->slimit = pQueryInfo->slimit;
pQueryAttr->order = pQueryInfo->order; pQueryAttr->order = pQueryInfo->order;
pQueryAttr->fillType = pQueryInfo->fillType; pQueryAttr->fillType = pQueryInfo->fillType;
pQueryAttr->groupbyColumn = tscGroupbyColumn(pQueryInfo);
pQueryAttr->havingNum = pQueryInfo->havingFieldNum; pQueryAttr->havingNum = pQueryInfo->havingFieldNum;
pQueryAttr->pUdfInfo = pQueryInfo->pUdfInfo; pQueryAttr->pUdfInfo = pQueryInfo->pUdfInfo;
@ -4484,3 +4536,38 @@ int tscTransferTableNameList(SSqlObj *pSql, const char *pNameList, int32_t lengt
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
bool vgroupInfoIdentical(SNewVgroupInfo *pExisted, SVgroupMsg* src) {
assert(pExisted != NULL && src != NULL);
if (pExisted->numOfEps != src->numOfEps) {
return false;
}
for(int32_t i = 0; i < pExisted->numOfEps; ++i) {
if (pExisted->ep[i].port != src->epAddr[i].port) {
return false;
}
if (strncmp(pExisted->ep[i].fqdn, src->epAddr[i].fqdn, tListLen(pExisted->ep[i].fqdn)) != 0) {
return false;
}
}
return true;
}
SNewVgroupInfo createNewVgroupInfo(SVgroupMsg *pVgroupMsg) {
assert(pVgroupMsg != NULL);
SNewVgroupInfo info = {0};
info.numOfEps = pVgroupMsg->numOfEps;
info.vgId = pVgroupMsg->vgId;
info.inUse = 0; // 0 is the default value of inUse in case of multiple replica
assert(info.numOfEps >= 1 && info.vgId >= 1);
for(int32_t i = 0; i < pVgroupMsg->numOfEps; ++i) {
tstrncpy(info.ep[i].fqdn, pVgroupMsg->epAddr[i].fqdn, TSDB_FQDN_LEN);
info.ep[i].port = pVgroupMsg->epAddr[i].port;
}
return info;
}

View File

@ -16,13 +16,13 @@
*/ */
package com.taosdata.jdbc; package com.taosdata.jdbc;
import com.taosdata.jdbc.utils.TaosInfo;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import java.sql.SQLException; import java.sql.SQLException;
import java.sql.SQLWarning; import java.sql.SQLWarning;
import java.util.List; import java.util.List;
import com.taosdata.jdbc.utils.TaosInfo;
/** /**
* JNI connector * JNI connector
*/ */
@ -276,23 +276,14 @@ public class TSDBJNIConnector {
private native int validateCreateTableSqlImp(long connection, byte[] sqlBytes); private native int validateCreateTableSqlImp(long connection, byte[] sqlBytes);
public long prepareStmt(String sql) throws SQLException { public long prepareStmt(String sql) throws SQLException {
Long stmt = 0L; Long stmt = prepareStmtImp(sql.getBytes(), this.taos);
try { if (stmt == TSDBConstants.JNI_TDENGINE_ERROR) {
stmt = prepareStmtImp(sql.getBytes(), this.taos); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_SQL);
} catch (Exception e) { } else if (stmt == TSDBConstants.JNI_CONNECTION_NULL) {
e.printStackTrace();
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_ENCODING);
}
if (stmt == TSDBConstants.JNI_CONNECTION_NULL) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL);
} } else if (stmt == TSDBConstants.JNI_SQL_NULL) {
if (stmt == TSDBConstants.JNI_SQL_NULL) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_SQL_NULL); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_SQL_NULL);
} } else if (stmt == TSDBConstants.JNI_OUT_OF_MEMORY) {
if (stmt == TSDBConstants.JNI_OUT_OF_MEMORY) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_OUT_OF_MEMORY); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_OUT_OF_MEMORY);
} }

View File

@ -10,8 +10,15 @@ INCLUDE_DIRECTORIES(${TD_ENTERPRISE_DIR}/src/inc)
INCLUDE_DIRECTORIES(inc) INCLUDE_DIRECTORIES(inc)
AUX_SOURCE_DIRECTORY(src SRC) AUX_SOURCE_DIRECTORY(src SRC)
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
ADD_DEFINITIONS(-DTD_JEMALLOC_ENABLED -I${CMAKE_BINARY_DIR}/build/include -L${CMAKE_BINARY_DIR}/build/lib -Wl,-rpath,${CMAKE_BINARY_DIR}/build/lib -ljemalloc)
SET(LINK_JEMALLOC "-L${CMAKE_BINARY_DIR}/build/lib -ljemalloc")
ELSE ()
SET(LINK_JEMALLOC "")
ENDIF ()
ADD_EXECUTABLE(taosd ${SRC}) ADD_EXECUTABLE(taosd ${SRC})
TARGET_LINK_LIBRARIES(taosd mnode monitor http tsdb twal vnode cJson lua lz4 balance sync) TARGET_LINK_LIBRARIES(taosd mnode monitor http tsdb twal vnode cJson lua lz4 balance sync ${LINK_JEMALLOC})
IF (TD_SOMODE_STATIC) IF (TD_SOMODE_STATIC)
TARGET_LINK_LIBRARIES(taosd taos_static) TARGET_LINK_LIBRARIES(taosd taos_static)

View File

@ -476,6 +476,7 @@ typedef struct {
bool simpleAgg; bool simpleAgg;
bool pointInterpQuery; // point interpolation query bool pointInterpQuery; // point interpolation query
bool needReverseScan; // need reverse scan bool needReverseScan; // need reverse scan
bool stateWindow; // state window flag
STimeWindow window; STimeWindow window;
int32_t numOfTables; int32_t numOfTables;

View File

@ -141,73 +141,74 @@
#define TK_VARIABLE 122 #define TK_VARIABLE 122
#define TK_INTERVAL 123 #define TK_INTERVAL 123
#define TK_SESSION 124 #define TK_SESSION 124
#define TK_FILL 125 #define TK_STATE_WINDOW 125
#define TK_SLIDING 126 #define TK_FILL 126
#define TK_ORDER 127 #define TK_SLIDING 127
#define TK_BY 128 #define TK_ORDER 128
#define TK_ASC 129 #define TK_BY 129
#define TK_DESC 130 #define TK_ASC 130
#define TK_GROUP 131 #define TK_DESC 131
#define TK_HAVING 132 #define TK_GROUP 132
#define TK_LIMIT 133 #define TK_HAVING 133
#define TK_OFFSET 134 #define TK_LIMIT 134
#define TK_SLIMIT 135 #define TK_OFFSET 135
#define TK_SOFFSET 136 #define TK_SLIMIT 136
#define TK_WHERE 137 #define TK_SOFFSET 137
#define TK_NOW 138 #define TK_WHERE 138
#define TK_RESET 139 #define TK_NOW 139
#define TK_QUERY 140 #define TK_RESET 140
#define TK_SYNCDB 141 #define TK_QUERY 141
#define TK_ADD 142 #define TK_SYNCDB 142
#define TK_COLUMN 143 #define TK_ADD 143
#define TK_TAG 144 #define TK_COLUMN 144
#define TK_CHANGE 145 #define TK_TAG 145
#define TK_SET 146 #define TK_CHANGE 146
#define TK_KILL 147 #define TK_SET 147
#define TK_CONNECTION 148 #define TK_KILL 148
#define TK_STREAM 149 #define TK_CONNECTION 149
#define TK_COLON 150 #define TK_STREAM 150
#define TK_ABORT 151 #define TK_COLON 151
#define TK_AFTER 152 #define TK_ABORT 152
#define TK_ATTACH 153 #define TK_AFTER 153
#define TK_BEFORE 154 #define TK_ATTACH 154
#define TK_BEGIN 155 #define TK_BEFORE 155
#define TK_CASCADE 156 #define TK_BEGIN 156
#define TK_CLUSTER 157 #define TK_CASCADE 157
#define TK_CONFLICT 158 #define TK_CLUSTER 158
#define TK_COPY 159 #define TK_CONFLICT 159
#define TK_DEFERRED 160 #define TK_COPY 160
#define TK_DELIMITERS 161 #define TK_DEFERRED 161
#define TK_DETACH 162 #define TK_DELIMITERS 162
#define TK_EACH 163 #define TK_DETACH 163
#define TK_END 164 #define TK_EACH 164
#define TK_EXPLAIN 165 #define TK_END 165
#define TK_FAIL 166 #define TK_EXPLAIN 166
#define TK_FOR 167 #define TK_FAIL 167
#define TK_IGNORE 168 #define TK_FOR 168
#define TK_IMMEDIATE 169 #define TK_IGNORE 169
#define TK_INITIALLY 170 #define TK_IMMEDIATE 170
#define TK_INSTEAD 171 #define TK_INITIALLY 171
#define TK_MATCH 172 #define TK_INSTEAD 172
#define TK_KEY 173 #define TK_MATCH 173
#define TK_OF 174 #define TK_KEY 174
#define TK_RAISE 175 #define TK_OF 175
#define TK_REPLACE 176 #define TK_RAISE 176
#define TK_RESTRICT 177 #define TK_REPLACE 177
#define TK_ROW 178 #define TK_RESTRICT 178
#define TK_STATEMENT 179 #define TK_ROW 179
#define TK_TRIGGER 180 #define TK_STATEMENT 180
#define TK_VIEW 181 #define TK_TRIGGER 181
#define TK_SEMI 182 #define TK_VIEW 182
#define TK_NONE 183 #define TK_SEMI 183
#define TK_PREV 184 #define TK_NONE 184
#define TK_LINEAR 185 #define TK_PREV 185
#define TK_IMPORT 186 #define TK_LINEAR 186
#define TK_TBNAME 187 #define TK_IMPORT 187
#define TK_JOIN 188 #define TK_TBNAME 188
#define TK_INSERT 189 #define TK_JOIN 189
#define TK_INTO 190 #define TK_INSERT 190
#define TK_VALUES 191 #define TK_INTO 191
#define TK_VALUES 192
#define TK_SPACE 300 #define TK_SPACE 300
#define TK_COMMENT 301 #define TK_COMMENT 301

View File

@ -11,10 +11,17 @@ IF (TD_LINUX)
LIST(REMOVE_ITEM SRC ./src/shellDarwin.c) LIST(REMOVE_ITEM SRC ./src/shellDarwin.c)
ADD_EXECUTABLE(shell ${SRC}) ADD_EXECUTABLE(shell ${SRC})
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
ADD_DEFINITIONS(-DTD_JEMALLOC_ENABLED -I${CMAKE_BINARY_DIR}/build/include -L${CMAKE_BINARY_DIR}/build/lib -Wl,-rpath,${CMAKE_BINARY_DIR}/build/lib -ljemalloc)
SET(LINK_JEMALLOC "-L${CMAKE_BINARY_DIR}/build/lib -ljemalloc")
ELSE ()
SET(LINK_JEMALLOC "")
ENDIF ()
IF (TD_SOMODE_STATIC) IF (TD_SOMODE_STATIC)
TARGET_LINK_LIBRARIES(shell taos_static lua) TARGET_LINK_LIBRARIES(shell taos_static lua ${LINK_JEMALLOC})
ELSE () ELSE ()
TARGET_LINK_LIBRARIES(shell taos lua) TARGET_LINK_LIBRARIES(shell taos lua ${LINK_JEMALLOC})
ENDIF () ENDIF ()
SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME taos) SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME taos)

View File

@ -55,14 +55,21 @@ ENDIF ()
MESSAGE("TD_VERSION_NUMBER is:" ${TD_VERSION_NUMBER}) MESSAGE("TD_VERSION_NUMBER is:" ${TD_VERSION_NUMBER})
ADD_DEFINITIONS(-DTD_VERNUMBER="${TD_VERSION_NUMBER}") ADD_DEFINITIONS(-DTD_VERNUMBER="${TD_VERSION_NUMBER}")
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
ADD_DEFINITIONS(-DTD_JEMALLOC_ENABLED -I${CMAKE_BINARY_DIR}/build/include -L${CMAKE_BINARY_DIR}/build/lib -Wl,-rpath,${CMAKE_BINARY_DIR}/build/lib -ljemalloc)
SET(LINK_JEMALLOC "-L${CMAKE_BINARY_DIR}/build/lib -ljemalloc")
ELSE ()
SET(LINK_JEMALLOC "")
ENDIF ()
IF (TD_LINUX) IF (TD_LINUX)
AUX_SOURCE_DIRECTORY(. SRC) AUX_SOURCE_DIRECTORY(. SRC)
ADD_EXECUTABLE(taosdemo ${SRC}) ADD_EXECUTABLE(taosdemo ${SRC})
IF (TD_SOMODE_STATIC) IF (TD_SOMODE_STATIC)
TARGET_LINK_LIBRARIES(taosdemo taos_static cJson lua) TARGET_LINK_LIBRARIES(taosdemo taos_static cJson lua ${LINK_JEMALLOC})
ELSE () ELSE ()
TARGET_LINK_LIBRARIES(taosdemo taos cJson) TARGET_LINK_LIBRARIES(taosdemo taos cJson ${LINK_JEMALLOC})
ENDIF () ENDIF ()
ELSEIF (TD_WINDOWS) ELSEIF (TD_WINDOWS)
AUX_SOURCE_DIRECTORY(. SRC) AUX_SOURCE_DIRECTORY(. SRC)
@ -71,7 +78,7 @@ ELSEIF (TD_WINDOWS)
IF (TD_SOMODE_STATIC) IF (TD_SOMODE_STATIC)
TARGET_LINK_LIBRARIES(taosdemo taos_static cJson lua) TARGET_LINK_LIBRARIES(taosdemo taos_static cJson lua)
ELSE () ELSE ()
TARGET_LINK_LIBRARIES(taosdemo taos cJson lua}) TARGET_LINK_LIBRARIES(taosdemo taos cJson lua)
ENDIF () ENDIF ()
ELSEIF (TD_DARWIN) ELSEIF (TD_DARWIN)
# missing a few dependencies, such as <argp.h> # missing a few dependencies, such as <argp.h>

View File

@ -1063,6 +1063,13 @@ static SDbCfg mnodeGetAlterDbOption(SDbObj *pDb, SAlterDbMsg *pAlter) {
newCfg.partitions = partitions; newCfg.partitions = partitions;
} }
// community version can only change daysToKeep
// but enterprise version can change all daysToKeep options
#ifndef _STORAGE
newCfg.daysToKeep1 = newCfg.daysToKeep;
newCfg.daysToKeep2 = newCfg.daysToKeep;
#endif
return newCfg; return newCfg;
} }

View File

@ -22,6 +22,10 @@
extern "C" { extern "C" {
#endif #endif
#ifdef TD_JEMALLOC_ENABLED
#include <jemalloc/jemalloc.h>
#endif
typedef enum { typedef enum {
TAOS_ALLOC_MODE_DEFAULT = 0, TAOS_ALLOC_MODE_DEFAULT = 0,
TAOS_ALLOC_MODE_RANDOM_FAIL = 1, TAOS_ALLOC_MODE_RANDOM_FAIL = 1,

View File

@ -22,6 +22,7 @@
#include "qFill.h" #include "qFill.h"
#include "qResultbuf.h" #include "qResultbuf.h"
#include "qSqlparser.h" #include "qSqlparser.h"
#include "qTableMeta.h"
#include "qTsbuf.h" #include "qTsbuf.h"
#include "query.h" #include "query.h"
#include "taosdef.h" #include "taosdef.h"
@ -71,14 +72,6 @@ typedef struct SResultRowPool {
SArray* pData; // SArray<void*> SArray* pData; // SArray<void*>
} SResultRowPool; } SResultRowPool;
typedef struct SGroupbyExpr {
int16_t tableIndex;
SArray* columnInfo; // SArray<SColIndex>, group by columns information
int16_t numOfGroupCols; // todo remove it
int16_t orderIndex; // order by column index
int16_t orderType; // order by type: asc/desc
} SGroupbyExpr;
typedef struct SResultRow { typedef struct SResultRow {
int32_t pageId; // pageId & rowId is the position of current result in disk-based output buffer int32_t pageId; // pageId & rowId is the position of current result in disk-based output buffer
int32_t offset:29; // row index in buffer page int32_t offset:29; // row index in buffer page
@ -197,6 +190,7 @@ typedef struct SQueryAttr {
bool pointInterpQuery; // point interpolation query bool pointInterpQuery; // point interpolation query
bool needReverseScan; // need reverse scan bool needReverseScan; // need reverse scan
bool distinctTag; // distinct tag query bool distinctTag; // distinct tag query
bool stateWindow; // window State on sub/normal table
int32_t interBufSize; // intermediate buffer sizse int32_t interBufSize; // intermediate buffer sizse
int32_t havingNum; // having expr number int32_t havingNum; // having expr number
@ -217,7 +211,7 @@ typedef struct SQueryAttr {
int32_t intermediateResultRowSize; // intermediate result row size, in case of top-k query. int32_t intermediateResultRowSize; // intermediate result row size, in case of top-k query.
int32_t maxTableColumnWidth; int32_t maxTableColumnWidth;
int32_t tagLen; // tag value length of current query int32_t tagLen; // tag value length of current query
SGroupbyExpr* pGroupbyExpr; SGroupbyExpr *pGroupbyExpr;
SExprInfo* pExpr1; SExprInfo* pExpr1;
SExprInfo* pExpr2; SExprInfo* pExpr2;
@ -306,6 +300,7 @@ enum OPERATOR_TYPE_E {
OP_Filter = 19, OP_Filter = 19,
OP_Distinct = 20, OP_Distinct = 20,
OP_Join = 21, OP_Join = 21,
OP_StateWindow = 22,
}; };
typedef struct SOperatorInfo { typedef struct SOperatorInfo {
@ -471,6 +466,16 @@ typedef struct SSWindowOperatorInfo {
int32_t start; // start row index int32_t start; // start row index
} SSWindowOperatorInfo; } SSWindowOperatorInfo;
typedef struct SStateWindowOperatorInfo {
SOptrBasicInfo binfo;
STimeWindow curWindow; // current time window
int32_t numOfRows; // number of rows
int32_t colIndex; // start row index
int32_t start;
char* prevData; // previous data
} SStateWindowOperatorInfo ;
typedef struct SDistinctOperatorInfo { typedef struct SDistinctOperatorInfo {
SHashObj *pSet; SHashObj *pSet;
SSDataBlock *pRes; SSDataBlock *pRes;
@ -479,10 +484,10 @@ typedef struct SDistinctOperatorInfo {
int64_t outputCapacity; int64_t outputCapacity;
} SDistinctOperatorInfo; } SDistinctOperatorInfo;
struct SLocalMerger; struct SGlobalMerger;
typedef struct SMultiwayMergeInfo { typedef struct SMultiwayMergeInfo {
struct SLocalMerger *pMerge; struct SGlobalMerger *pMerge;
SOptrBasicInfo binfo; SOptrBasicInfo binfo;
int32_t bufCapacity; int32_t bufCapacity;
int64_t seed; int64_t seed;
@ -522,6 +527,7 @@ SOperatorInfo* createTableBlockInfoScanOperator(void* pTsdbQueryHandle, SQueryRu
SOperatorInfo* createMultiwaySortOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SExprInfo* pExpr, int32_t numOfOutput, SOperatorInfo* createMultiwaySortOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SExprInfo* pExpr, int32_t numOfOutput,
int32_t numOfRows, void* merger, bool groupMix); int32_t numOfRows, void* merger, bool groupMix);
SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, void* param, SArray* pUdfInfo); SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, void* param, SArray* pUdfInfo);
SOperatorInfo* createStatewindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createSLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, void* merger); SOperatorInfo* createSLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, void* merger);
SOperatorInfo* createFilterOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, SOperatorInfo* createFilterOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr,
int32_t numOfOutput, SColumnInfo* pCols, int32_t numOfFilter); int32_t numOfOutput, SColumnInfo* pCols, int32_t numOfFilter);
@ -565,6 +571,8 @@ int32_t createFilterInfo(SQueryAttr* pQueryAttr, uint64_t qId);
void freeColumnFilterInfo(SColumnFilterInfo* pFilter, int32_t numOfFilters); void freeColumnFilterInfo(SColumnFilterInfo* pFilter, int32_t numOfFilters);
STableQueryInfo *createTableQueryInfo(SQueryAttr* pQueryAttr, void* pTable, bool groupbyColumn, STimeWindow win, void* buf); STableQueryInfo *createTableQueryInfo(SQueryAttr* pQueryAttr, void* pTable, bool groupbyColumn, STimeWindow win, void* buf);
STableQueryInfo* createTmpTableQueryInfo(STimeWindow win);
int32_t buildArithmeticExprFromMsg(SExprInfo *pArithExprInfo, void *pQueryMsg); int32_t buildArithmeticExprFromMsg(SExprInfo *pArithExprInfo, void *pQueryMsg);
bool isQueryKilled(SQInfo *pQInfo); bool isQueryKilled(SQInfo *pQInfo);

View File

@ -16,6 +16,8 @@
#ifndef TDENGINE_QPLAN_H #ifndef TDENGINE_QPLAN_H
#define TDENGINE_QPLAN_H #define TDENGINE_QPLAN_H
#include "qExecutor.h"
struct SQueryInfo; struct SQueryInfo;
typedef struct SQueryNodeBasicInfo { typedef struct SQueryNodeBasicInfo {

View File

@ -89,6 +89,10 @@ typedef struct SSessionWindowVal {
SStrToken gap; SStrToken gap;
} SSessionWindowVal; } SSessionWindowVal;
typedef struct SWindowStateVal {
SStrToken col;
} SWindowStateVal;
struct SRelationInfo; struct SRelationInfo;
typedef struct SSqlNode { typedef struct SSqlNode {
@ -100,6 +104,7 @@ typedef struct SSqlNode {
SArray *fillType; // fill type[optional], SArray<tVariantListItem> SArray *fillType; // fill type[optional], SArray<tVariantListItem>
SIntervalVal interval; // (interval, interval_offset) [optional] SIntervalVal interval; // (interval, interval_offset) [optional]
SSessionWindowVal sessionVal; // session window [optional] SSessionWindowVal sessionVal; // session window [optional]
SWindowStateVal windowstateVal; // window_state(col) [optional]
SStrToken sliding; // sliding window [optional] SStrToken sliding; // sliding window [optional]
SLimitVal limit; // limit offset [optional] SLimitVal limit; // limit offset [optional]
SLimitVal slimit; // group limit offset [optional] SLimitVal slimit; // group limit offset [optional]
@ -142,7 +147,7 @@ typedef struct SCreateTableSql {
} colInfo; } colInfo;
SArray *childTableInfo; // SArray<SCreatedTableInfo> SArray *childTableInfo; // SArray<SCreatedTableInfo>
SSqlNode *pSelect; SSqlNode *pSelect;
} SCreateTableSql; } SCreateTableSql;
typedef struct SAlterTableInfo { typedef struct SAlterTableInfo {
@ -268,7 +273,6 @@ SArray *tVariantListInsert(SArray *pList, tVariant *pVar, uint8_t sortOrder, int
SArray *tVariantListAppendToken(SArray *pList, SStrToken *pAliasToken, uint8_t sortOrder); SArray *tVariantListAppendToken(SArray *pList, SStrToken *pAliasToken, uint8_t sortOrder);
SRelationInfo *setTableNameList(SRelationInfo* pFromInfo, SStrToken *pName, SStrToken* pAlias); SRelationInfo *setTableNameList(SRelationInfo* pFromInfo, SStrToken *pName, SStrToken* pAlias);
//SRelationInfo *setSubquery(SRelationInfo* pFromInfo, SRelElementPair* p);
void *destroyRelationInfo(SRelationInfo* pFromInfo); void *destroyRelationInfo(SRelationInfo* pFromInfo);
SRelationInfo *addSubqueryElem(SRelationInfo* pRelationInfo, SArray* pSub, SStrToken* pAlias); SRelationInfo *addSubqueryElem(SRelationInfo* pRelationInfo, SArray* pSub, SStrToken* pAlias);
@ -286,7 +290,7 @@ SArray *tSqlExprListAppend(SArray *pList, tSqlExpr *pNode, SStrToken *pDistinc
void tSqlExprListDestroy(SArray *pList); void tSqlExprListDestroy(SArray *pList);
SSqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelNodeList, SRelationInfo *pFrom, tSqlExpr *pWhere, SSqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelNodeList, SRelationInfo *pFrom, tSqlExpr *pWhere,
SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval, SSessionWindowVal *ps, SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval, SSessionWindowVal *ps, SWindowStateVal *pw,
SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit, SLimitVal *pgLimit, tSqlExpr *pHaving); SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit, SLimitVal *pgLimit, tSqlExpr *pHaving);
int32_t tSqlExprCompare(tSqlExpr *left, tSqlExpr *right); int32_t tSqlExprCompare(tSqlExpr *left, tSqlExpr *right);

206
src/query/inc/qTableMeta.h Normal file
View File

@ -0,0 +1,206 @@
#ifndef TDENGINE_QTABLEUTIL_H
#define TDENGINE_QTABLEUTIL_H
#include "tsdb.h" //todo tsdb should not be here
#include "qSqlparser.h"
typedef struct SFieldInfo {
int16_t numOfOutput; // number of column in result
TAOS_FIELD* final;
SArray *internalField; // SArray<SInternalField>
} SFieldInfo;
typedef struct SCond {
uint64_t uid;
int32_t len; // length of tag query condition data
char * cond;
} SCond;
typedef struct SJoinNode {
uint64_t uid;
int16_t tagColId;
SArray* tsJoin;
SArray* tagJoin;
} SJoinNode;
typedef struct SJoinInfo {
bool hasJoin;
SJoinNode *joinTables[TSDB_MAX_JOIN_TABLE_NUM];
} SJoinInfo;
typedef struct STagCond {
// relation between tbname list and query condition, including : TK_AND or TK_OR
int16_t relType;
// tbname query condition, only support tbname query condition on one table
SCond tbnameCond;
// join condition, only support two tables join currently
SJoinInfo joinInfo;
// for different table, the query condition must be seperated
SArray *pCond;
} STagCond;
typedef struct SGroupbyExpr {
int16_t tableIndex;
SArray* columnInfo; // SArray<SColIndex>, group by columns information
int16_t numOfGroupCols; // todo remove it
int16_t orderIndex; // order by column index
int16_t orderType; // order by type: asc/desc
} SGroupbyExpr;
typedef struct STableComInfo {
uint8_t numOfTags;
uint8_t precision;
int16_t numOfColumns;
int32_t rowSize;
} STableComInfo;
typedef struct STableMeta {
int32_t vgId;
STableId id;
uint8_t tableType;
char sTableName[TSDB_TABLE_FNAME_LEN]; // super table name
uint64_t suid; // super table id
int16_t sversion;
int16_t tversion;
STableComInfo tableInfo;
SSchema schema[]; // if the table is TSDB_CHILD_TABLE, schema is acquired by super table meta info
} STableMeta;
typedef struct STableMetaInfo {
STableMeta *pTableMeta; // table meta, cached in client side and acquired by name
uint32_t tableMetaSize;
SVgroupsInfo *vgroupList;
SArray *pVgroupTables; // SArray<SVgroupTableInfo>
/*
* 1. keep the vgroup index during the multi-vnode super table projection query
* 2. keep the vgroup index for multi-vnode insertion
*/
int32_t vgroupIndex;
SName name;
char aliasName[TSDB_TABLE_NAME_LEN]; // alias name of table specified in query sql
SArray *tagColList; // SArray<SColumn*>, involved tag columns
} STableMetaInfo;
struct SQInfo; // global merge operator
struct SQueryAttr; // query object
typedef struct SQueryInfo {
int16_t command; // the command may be different for each subclause, so keep it seperately.
uint32_t type; // query/insert type
STimeWindow window; // the whole query time window
SInterval interval; // tumble time window
SSessionWindow sessionWindow; // session time window
SGroupbyExpr groupbyExpr; // groupby tags info
SArray * colList; // SArray<SColumn*>
SFieldInfo fieldsInfo;
SArray * exprList; // SArray<SExprInfo*>
SArray * exprList1; // final exprlist in case of arithmetic expression exists
SLimitVal limit;
SLimitVal slimit;
STagCond tagCond;
SOrderVal order;
int16_t fillType; // final result fill type
int16_t numOfTables;
STableMetaInfo **pTableMetaInfo;
struct STSBuf *tsBuf;
int64_t * fillVal; // default value for fill
char * msg; // pointer to the pCmd->payload to keep error message temporarily
int64_t clauseLimit; // limit for current sub clause
int64_t prjOffset; // offset value in the original sql expression, only applied at client side
int64_t vgroupLimit; // table limit in case of super table projection query + global order + limit
int32_t udColumnId; // current user-defined constant output field column id, monotonically decreases from TSDB_UD_COLUMN_INDEX
int16_t resColumnId; // result column id
bool distinctTag; // distinct tag or not
int32_t round; // 0/1/....
int32_t bufLen;
char* buf;
SArray *pUdfInfo;
struct SQInfo *pQInfo; // global merge operator
struct SQueryAttr *pQueryAttr; // query object
struct SQueryInfo *sibling; // sibling
SArray *pUpstream; // SArray<struct SQueryInfo>
struct SQueryInfo *pDownstream;
int32_t havingFieldNum;
bool stableQuery;
bool groupbyColumn;
bool simpleAgg;
bool arithmeticOnAgg;
bool projectionQuery;
bool hasFilter;
bool onlyTagQuery;
bool orderProjectQuery;
bool stateWindow;
bool globalMerge;
} SQueryInfo;
/**
* get the number of tags of this table
* @param pTableMeta
* @return
*/
int32_t tscGetNumOfTags(const STableMeta* pTableMeta);
/**
* get the number of columns of this table
* @param pTableMeta
* @return
*/
int32_t tscGetNumOfColumns(const STableMeta* pTableMeta);
/**
* get the basic info of this table
* @param pTableMeta
* @return
*/
STableComInfo tscGetTableInfo(const STableMeta* pTableMeta);
/**
* get the schema
* @param pTableMeta
* @return
*/
SSchema* tscGetTableSchema(const STableMeta* pTableMeta);
/**
* get the tag schema
* @param pMeta
* @return
*/
SSchema *tscGetTableTagSchema(const STableMeta *pMeta);
/**
* get the column schema according to the column index
* @param pMeta
* @param colIndex
* @return
*/
SSchema *tscGetTableColumnSchema(const STableMeta *pMeta, int32_t colIndex);
/**
* get the column schema according to the column id
* @param pTableMeta
* @param colId
* @return
*/
SSchema* tscGetColumnSchemaById(STableMeta* pTableMeta, int16_t colId);
/**
* create the table meta from the msg
* @param pTableMetaMsg
* @param size size of the table meta
* @return
*/
STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg);
#endif // TDENGINE_QTABLEUTIL_H

View File

@ -463,8 +463,8 @@ tagitem(A) ::= PLUS(X) FLOAT(Y). {
//////////////////////// The SELECT statement ///////////////////////////////// //////////////////////// The SELECT statement /////////////////////////////////
%type select {SSqlNode*} %type select {SSqlNode*}
%destructor select {destroySqlNode($$);} %destructor select {destroySqlNode($$);}
select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) interval_opt(K) session_option(H) fill_opt(F) sliding_opt(S) groupby_opt(P) orderby_opt(Z) having_opt(N) slimit_opt(G) limit_opt(L). { select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) interval_opt(K) session_option(H) windowstate_option(D) fill_opt(F) sliding_opt(S) groupby_opt(P) orderby_opt(Z) having_opt(N) slimit_opt(G) limit_opt(L). {
A = tSetQuerySqlNode(&T, W, X, Y, P, Z, &K, &H, &S, F, &L, &G, N); A = tSetQuerySqlNode(&T, W, X, Y, P, Z, &K, &H, &D, &S, F, &L, &G, N);
} }
select(A) ::= LP select(B) RP. {A = B;} select(A) ::= LP select(B) RP. {A = B;}
@ -482,7 +482,7 @@ cmd ::= union(X). { setSqlInfo(pInfo, X, NULL, TSDB_SQL_SELECT); }
// select client_version() // select client_version()
// select server_state() // select server_state()
select(A) ::= SELECT(T) selcollist(W). { select(A) ::= SELECT(T) selcollist(W). {
A = tSetQuerySqlNode(&T, W, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); A = tSetQuerySqlNode(&T, W, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
} }
// selcollist is a list of expressions that are to become the return // selcollist is a list of expressions that are to become the return
@ -565,6 +565,11 @@ session_option(X) ::= SESSION LP ids(V) cpxName(Z) COMMA tmvar(Y) RP. {
X.col = V; X.col = V;
X.gap = Y; X.gap = Y;
} }
%type windowstate_option {SWindowStateVal}
windowstate_option(X) ::= . {X.col.n = 0;}
windowstate_option(X) ::= STATE_WINDOW LP ids(V) RP. {
X.col = V;
}
%type fill_opt {SArray*} %type fill_opt {SArray*}
%destructor fill_opt {taosArrayDestroy($$);} %destructor fill_opt {taosArrayDestroy($$);}

View File

@ -2518,7 +2518,6 @@ static void buildTopBotStruct(STopBotInfo *pTopBotInfo, SQLFunctionCtx *pCtx) {
tmp += POINTER_BYTES * pCtx->param[0].i64; tmp += POINTER_BYTES * pCtx->param[0].i64;
size_t size = sizeof(tValuePair) + pCtx->tagInfo.tagsLen; size_t size = sizeof(tValuePair) + pCtx->tagInfo.tagsLen;
// assert(pCtx->param[0].i64 > 0);
for (int32_t i = 0; i < pCtx->param[0].i64; ++i) { for (int32_t i = 0; i < pCtx->param[0].i64; ++i) {
pTopBotInfo->res[i] = (tValuePair*) tmp; pTopBotInfo->res[i] = (tValuePair*) tmp;
@ -2527,7 +2526,6 @@ static void buildTopBotStruct(STopBotInfo *pTopBotInfo, SQLFunctionCtx *pCtx) {
} }
} }
bool topbot_datablock_filter(SQLFunctionCtx *pCtx, const char *minval, const char *maxval) { bool topbot_datablock_filter(SQLFunctionCtx *pCtx, const char *minval, const char *maxval) {
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
if (pResInfo == NULL) { if (pResInfo == NULL) {
@ -2606,13 +2604,14 @@ static void top_function(SQLFunctionCtx *pCtx) {
for (int32_t i = 0; i < pCtx->size; ++i) { for (int32_t i = 0; i < pCtx->size; ++i) {
char *data = GET_INPUT_DATA(pCtx, i); char *data = GET_INPUT_DATA(pCtx, i);
TSKEY ts = GET_TS_DATA(pCtx, i);
if (pCtx->hasNull && isNull(data, pCtx->inputType)) { if (pCtx->hasNull && isNull(data, pCtx->inputType)) {
continue; continue;
} }
notNullElems++; notNullElems++;
// NOTE: Set the default timestamp if it is missing [todo refactor]
TSKEY ts = (pCtx->ptsList != NULL)? GET_TS_DATA(pCtx, i):0;
do_top_function_add(pRes, (int32_t)pCtx->param[0].i64, data, ts, pCtx->inputType, &pCtx->tagInfo, NULL, 0); do_top_function_add(pRes, (int32_t)pCtx->param[0].i64, data, ts, pCtx->inputType, &pCtx->tagInfo, NULL, 0);
} }
@ -2685,13 +2684,13 @@ static void bottom_function(SQLFunctionCtx *pCtx) {
for (int32_t i = 0; i < pCtx->size; ++i) { for (int32_t i = 0; i < pCtx->size; ++i) {
char *data = GET_INPUT_DATA(pCtx, i); char *data = GET_INPUT_DATA(pCtx, i);
TSKEY ts = GET_TS_DATA(pCtx, i);
if (pCtx->hasNull && isNull(data, pCtx->inputType)) { if (pCtx->hasNull && isNull(data, pCtx->inputType)) {
continue; continue;
} }
notNullElems++; notNullElems++;
// NOTE: Set the default timestamp if it is missing [todo refactor]
TSKEY ts = (pCtx->ptsList != NULL)? GET_TS_DATA(pCtx, i):0;
do_bottom_function_add(pRes, (int32_t)pCtx->param[0].i64, data, ts, pCtx->inputType, &pCtx->tagInfo, NULL, 0); do_bottom_function_add(pRes, (int32_t)pCtx->param[0].i64, data, ts, pCtx->inputType, &pCtx->tagInfo, NULL, 0);
} }
@ -2769,7 +2768,7 @@ static void top_bottom_func_finalizer(SQLFunctionCtx *pCtx) {
if (pCtx->param[1].i64 == PRIMARYKEY_TIMESTAMP_COL_INDEX) { if (pCtx->param[1].i64 == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
__compar_fn_t comparator = (pCtx->param[2].i64 == TSDB_ORDER_ASC) ? resAscComparFn : resDescComparFn; __compar_fn_t comparator = (pCtx->param[2].i64 == TSDB_ORDER_ASC) ? resAscComparFn : resDescComparFn;
qsort(tvp, (size_t)pResInfo->numOfRes, POINTER_BYTES, comparator); qsort(tvp, (size_t)pResInfo->numOfRes, POINTER_BYTES, comparator);
} else if (pCtx->param[1].i64 > PRIMARYKEY_TIMESTAMP_COL_INDEX) { } else /*if (pCtx->param[1].i64 > PRIMARYKEY_TIMESTAMP_COL_INDEX)*/ {
__compar_fn_t comparator = (pCtx->param[2].i64 == TSDB_ORDER_ASC) ? resDataAscComparFn : resDataDescComparFn; __compar_fn_t comparator = (pCtx->param[2].i64 == TSDB_ORDER_ASC) ? resDataAscComparFn : resDataDescComparFn;
qsort(tvp, (size_t)pResInfo->numOfRes, POINTER_BYTES, comparator); qsort(tvp, (size_t)pResInfo->numOfRes, POINTER_BYTES, comparator);
} }
@ -3324,8 +3323,12 @@ static void col_project_function(SQLFunctionCtx *pCtx) {
if (pCtx->numOfParams == 2) { if (pCtx->numOfParams == 2) {
return; return;
} }
if (pCtx->param[0].i64 == 1) {
SET_VAL(pCtx, pCtx->size, 1);
} else {
INC_INIT_VAL(pCtx, pCtx->size);
}
INC_INIT_VAL(pCtx, pCtx->size);
char *pData = GET_INPUT_DATA_LIST(pCtx); char *pData = GET_INPUT_DATA_LIST(pCtx);
if (pCtx->order == TSDB_ORDER_ASC) { if (pCtx->order == TSDB_ORDER_ASC) {

View File

@ -190,12 +190,16 @@ static void destroySFillOperatorInfo(void* param, int32_t numOfOutput);
static void destroyGroupbyOperatorInfo(void* param, int32_t numOfOutput); static void destroyGroupbyOperatorInfo(void* param, int32_t numOfOutput);
static void destroyArithOperatorInfo(void* param, int32_t numOfOutput); static void destroyArithOperatorInfo(void* param, int32_t numOfOutput);
static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput); static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput);
static void destroySWindowOperatorInfo(void* param, int32_t numOfOutput);
static void destroyStateWindowOperatorInfo(void* param, int32_t numOfOutput);
static void destroyAggOperatorInfo(void* param, int32_t numOfOutput);
static void destroyOperatorInfo(SOperatorInfo* pOperator); static void destroyOperatorInfo(SOperatorInfo* pOperator);
static int32_t doCopyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo* pGroupResInfo, int32_t orderType, SSDataBlock* pBlock); static int32_t doCopyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo* pGroupResInfo, int32_t orderType, SSDataBlock* pBlock);
static int32_t getGroupbyColumnIndex(SGroupbyExpr *pGroupbyExpr, SSDataBlock* pDataBlock); static int32_t getGroupbyColumnIndex(SGroupbyExpr *pGroupbyExpr, SSDataBlock* pDataBlock);
static int32_t setGroupResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SGroupbyOperatorInfo *pInfo, int32_t numOfCols, char *pData, int16_t type, int16_t bytes, int32_t groupIndex); static int32_t setGroupResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasicInfo *binf, int32_t numOfCols, char *pData, int16_t type, int16_t bytes, int32_t groupIndex);
static void initCtxOutputBuffer(SQLFunctionCtx* pCtx, int32_t size); static void initCtxOutputBuffer(SQLFunctionCtx* pCtx, int32_t size);
static void getAlignQueryTimeWindow(SQueryAttr *pQueryAttr, int64_t key, int64_t keyFirst, int64_t keyLast, STimeWindow *win); static void getAlignQueryTimeWindow(SQueryAttr *pQueryAttr, int64_t key, int64_t keyFirst, int64_t keyLast, STimeWindow *win);
@ -805,7 +809,6 @@ static void doApplyFunctions(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx
if (pCtx[k].preAggVals.isSet && forwardStep < numOfTotal) { if (pCtx[k].preAggVals.isSet && forwardStep < numOfTotal) {
pCtx[k].preAggVals.isSet = false; pCtx[k].preAggVals.isSet = false;
} }
if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) { if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) {
// aAggs[functionId].xFunction(&pCtx[k]); // aAggs[functionId].xFunction(&pCtx[k]);
if (functionId < 0) { // load the script and exec, pRuntimeEnv->pUdfInfo if (functionId < 0) { // load the script and exec, pRuntimeEnv->pUdfInfo
@ -1034,7 +1037,13 @@ static void doSetInputDataBlock(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx,
uint32_t status = aAggs[pCtx[i].functionId].status; uint32_t status = aAggs[pCtx[i].functionId].status;
if ((status & (TSDB_FUNCSTATE_SELECTIVITY | TSDB_FUNCSTATE_NEED_TS)) != 0) { if ((status & (TSDB_FUNCSTATE_SELECTIVITY | TSDB_FUNCSTATE_NEED_TS)) != 0) {
SColumnInfoData* tsInfo = taosArrayGet(pBlock->pDataBlock, 0); SColumnInfoData* tsInfo = taosArrayGet(pBlock->pDataBlock, 0);
pCtx[i].ptsList = (int64_t*) tsInfo->pData; // In case of the top/bottom query again the nest query result, which has no timestamp column
// don't set the ptsList attribute.
if (tsInfo->info.type == TSDB_DATA_TYPE_TIMESTAMP) {
pCtx[i].ptsList = (int64_t*) tsInfo->pData;
} else {
pCtx[i].ptsList = NULL;
}
} }
} else if (TSDB_COL_IS_UD_COL(pCol->flag) && (pOperator->pRuntimeEnv->scanFlag == MERGE_STAGE)) { } else if (TSDB_COL_IS_UD_COL(pCol->flag) && (pOperator->pRuntimeEnv->scanFlag == MERGE_STAGE)) {
SColIndex* pColIndex = &pOperator->pExpr[i].base.colInfo; SColIndex* pColIndex = &pOperator->pExpr[i].base.colInfo;
@ -1400,7 +1409,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn
} }
int32_t ret = int32_t ret =
setGroupResultOutputBuf(pRuntimeEnv, pInfo, pOperator->numOfOutput, val, type, bytes, item->groupIndex); setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, val, type, bytes, item->groupIndex);
if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_APP_ERROR); longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_APP_ERROR);
} }
@ -1442,12 +1451,16 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSWindowOperatorInf
pInfo->start = j; pInfo->start = j;
} else if (tsList[j] - pInfo->prevTs <= gap) { } else if (tsList[j] - pInfo->prevTs <= gap) {
pInfo->curWindow.ekey = tsList[j]; pInfo->curWindow.ekey = tsList[j];
pInfo->prevTs = tsList[j]; //pInfo->prevTs = tsList[j];
pInfo->numOfRows += 1; pInfo->numOfRows += 1;
pInfo->start = j; if (j == 0 && pInfo->start != 0) {
pInfo->numOfRows = 1;
pInfo->start = 0;
}
} else { // start a new session window } else { // start a new session window
SResultRow* pResult = NULL; SResultRow* pResult = NULL;
pInfo->curWindow.ekey = pInfo->curWindow.skey;
int32_t ret = setWindowOutputBufByKey(pRuntimeEnv, &pBInfo->resultRowInfo, &pInfo->curWindow, masterScan, int32_t ret = setWindowOutputBufByKey(pRuntimeEnv, &pBInfo->resultRowInfo, &pInfo->curWindow, masterScan,
&pResult, item->groupIndex, pBInfo->pCtx, pOperator->numOfOutput, &pResult, item->groupIndex, pBInfo->pCtx, pOperator->numOfOutput,
pBInfo->rowCellInfoOffset); pBInfo->rowCellInfoOffset);
@ -1468,6 +1481,7 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSWindowOperatorInf
SResultRow* pResult = NULL; SResultRow* pResult = NULL;
pInfo->curWindow.ekey = pInfo->curWindow.skey;
int32_t ret = setWindowOutputBufByKey(pRuntimeEnv, &pBInfo->resultRowInfo, &pInfo->curWindow, masterScan, int32_t ret = setWindowOutputBufByKey(pRuntimeEnv, &pBInfo->resultRowInfo, &pInfo->curWindow, masterScan,
&pResult, item->groupIndex, pBInfo->pCtx, pOperator->numOfOutput, &pResult, item->groupIndex, pBInfo->pCtx, pOperator->numOfOutput,
pBInfo->rowCellInfoOffset); pBInfo->rowCellInfoOffset);
@ -1495,12 +1509,12 @@ static void setResultRowKey(SResultRow* pResultRow, char* pData, int16_t type) {
} }
} }
static int32_t setGroupResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SGroupbyOperatorInfo *pInfo, int32_t numOfCols, char *pData, int16_t type, int16_t bytes, int32_t groupIndex) { static int32_t setGroupResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasicInfo *binfo, int32_t numOfCols, char *pData, int16_t type, int16_t bytes, int32_t groupIndex) {
SDiskbasedResultBuf *pResultBuf = pRuntimeEnv->pResultBuf; SDiskbasedResultBuf *pResultBuf = pRuntimeEnv->pResultBuf;
int32_t *rowCellInfoOffset = pInfo->binfo.rowCellInfoOffset; int32_t *rowCellInfoOffset = binfo->rowCellInfoOffset;
SResultRowInfo *pResultRowInfo = &pInfo->binfo.resultRowInfo; SResultRowInfo *pResultRowInfo = &binfo->resultRowInfo;
SQLFunctionCtx *pCtx = pInfo->binfo.pCtx; SQLFunctionCtx *pCtx = binfo->pCtx;
// not assign result buffer yet, add new result buffer, TODO remove it // not assign result buffer yet, add new result buffer, TODO remove it
char* d = pData; char* d = pData;
@ -1822,7 +1836,10 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf
case OP_TimeWindow: { case OP_TimeWindow: {
pRuntimeEnv->proot = pRuntimeEnv->proot =
createTimeIntervalOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); createTimeIntervalOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput);
setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream[0]->info, pRuntimeEnv->proot); int32_t opType = pRuntimeEnv->proot->upstream[0]->operatorType;
if (opType != OP_DummyInput && opType != OP_Join) {
setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream[0]->info, pRuntimeEnv->proot);
}
break; break;
} }
case OP_Groupby: { case OP_Groupby: {
@ -1868,6 +1885,11 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf
} }
break; break;
} }
case OP_StateWindow: {
pRuntimeEnv->proot = createStatewindowOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput);
setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream[0]->info, pRuntimeEnv->proot);
break;
}
case OP_Limit: { case OP_Limit: {
pRuntimeEnv->proot = createLimitOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot); pRuntimeEnv->proot = createLimitOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot);
@ -2212,6 +2234,8 @@ static bool onlyFirstQuery(SQueryAttr *pQueryAttr) { return onlyOneQueryType(pQu
static bool onlyLastQuery(SQueryAttr *pQueryAttr) { return onlyOneQueryType(pQueryAttr, TSDB_FUNC_LAST, TSDB_FUNC_LAST_DST); } static bool onlyLastQuery(SQueryAttr *pQueryAttr) { return onlyOneQueryType(pQueryAttr, TSDB_FUNC_LAST, TSDB_FUNC_LAST_DST); }
static bool notContainSessionOrStateWindow(SQueryAttr *pQueryAttr) { return !(pQueryAttr->sw.gap > 0 || pQueryAttr->stateWindow); }
static int32_t updateBlockLoadStatus(SQueryAttr *pQuery, int32_t status) { static int32_t updateBlockLoadStatus(SQueryAttr *pQuery, int32_t status) {
bool hasFirstLastFunc = false; bool hasFirstLastFunc = false;
bool hasOtherFunc = false; bool hasOtherFunc = false;
@ -2315,7 +2339,7 @@ static void changeExecuteScanOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bo
} }
pQueryAttr->order.order = TSDB_ORDER_ASC; pQueryAttr->order.order = TSDB_ORDER_ASC;
} else if (onlyLastQuery(pQueryAttr)) { } else if (onlyLastQuery(pQueryAttr) && notContainSessionOrStateWindow(pQueryAttr)) {
if (QUERY_IS_ASC_QUERY(pQueryAttr)) { if (QUERY_IS_ASC_QUERY(pQueryAttr)) {
qDebug(msg, pQInfo, "only-last", pQueryAttr->order.order, TSDB_ORDER_DESC, pQueryAttr->window.skey, qDebug(msg, pQInfo, "only-last", pQueryAttr->order.order, TSDB_ORDER_DESC, pQueryAttr->window.skey,
pQueryAttr->window.ekey, pQueryAttr->window.ekey, pQueryAttr->window.skey); pQueryAttr->window.ekey, pQueryAttr->window.ekey, pQueryAttr->window.skey);
@ -3331,7 +3355,7 @@ void finalizeQueryResult(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, SResult
SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr;
int32_t numOfOutput = pOperator->numOfOutput; int32_t numOfOutput = pOperator->numOfOutput;
if (pQueryAttr->groupbyColumn || QUERY_IS_INTERVAL_QUERY(pQueryAttr) || pQueryAttr->sw.gap > 0) { if (pQueryAttr->groupbyColumn || QUERY_IS_INTERVAL_QUERY(pQueryAttr) || pQueryAttr->sw.gap > 0 || pQueryAttr->stateWindow) {
// for each group result, call the finalize function for each column // for each group result, call the finalize function for each column
if (pQueryAttr->groupbyColumn) { if (pQueryAttr->groupbyColumn) {
closeAllResultRows(pResultRowInfo); closeAllResultRows(pResultRowInfo);
@ -3407,6 +3431,25 @@ STableQueryInfo *createTableQueryInfo(SQueryAttr* pQueryAttr, void* pTable, bool
return pTableQueryInfo; return pTableQueryInfo;
} }
STableQueryInfo* createTmpTableQueryInfo(STimeWindow win) {
STableQueryInfo* pTableQueryInfo = calloc(1, sizeof(STableQueryInfo));
pTableQueryInfo->win = win;
pTableQueryInfo->lastKey = win.skey;
pTableQueryInfo->pTable = NULL;
pTableQueryInfo->cur.vgroupIndex = -1;
// set more initial size of interval/groupby query
int32_t initialSize = 16;
int32_t code = initResultRowInfo(&pTableQueryInfo->resInfo, initialSize, TSDB_DATA_TYPE_INT);
if (code != TSDB_CODE_SUCCESS) {
return NULL;
}
return pTableQueryInfo;
}
void destroyTableQueryInfoImpl(STableQueryInfo *pTableQueryInfo) { void destroyTableQueryInfoImpl(STableQueryInfo *pTableQueryInfo) {
if (pTableQueryInfo == NULL) { if (pTableQueryInfo == NULL) {
return; return;
@ -4335,6 +4378,10 @@ static void updateTableIdInfo(STableQueryInfo* pTableQueryInfo, SSDataBlock* pBl
int32_t step = GET_FORWARD_DIRECTION_FACTOR(order); int32_t step = GET_FORWARD_DIRECTION_FACTOR(order);
pTableQueryInfo->lastKey = ((order == TSDB_ORDER_ASC)? pBlock->info.window.ekey:pBlock->info.window.skey) + step; pTableQueryInfo->lastKey = ((order == TSDB_ORDER_ASC)? pBlock->info.window.ekey:pBlock->info.window.skey) + step;
if (pTableQueryInfo->pTable == NULL) {
return;
}
STableIdInfo tidInfo = createTableIdInfo(pTableQueryInfo); STableIdInfo tidInfo = createTableIdInfo(pTableQueryInfo);
STableIdInfo *idinfo = taosHashGet(pTableIdInfo, &tidInfo.tid, sizeof(tidInfo.tid)); STableIdInfo *idinfo = taosHashGet(pTableIdInfo, &tidInfo.tid, sizeof(tidInfo.tid));
if (idinfo != NULL) { if (idinfo != NULL) {
@ -4636,6 +4683,12 @@ void setTableScanFilterOperatorInfo(STableScanInfo* pTableScanInfo, SOperatorInf
} else if (pDownstream->operatorType == OP_SessionWindow) { } else if (pDownstream->operatorType == OP_SessionWindow) {
SSWindowOperatorInfo* pInfo = pDownstream->info; SSWindowOperatorInfo* pInfo = pDownstream->info;
pTableScanInfo->pCtx = pInfo->binfo.pCtx;
pTableScanInfo->pResultRowInfo = &pInfo->binfo.resultRowInfo;
pTableScanInfo->rowCellInfoOffset = pInfo->binfo.rowCellInfoOffset;
} else if (pDownstream->operatorType == OP_StateWindow) {
SStateWindowOperatorInfo* pInfo = pDownstream->info;
pTableScanInfo->pCtx = pInfo->binfo.pCtx; pTableScanInfo->pCtx = pInfo->binfo.pCtx;
pTableScanInfo->pResultRowInfo = &pInfo->binfo.resultRowInfo; pTableScanInfo->pResultRowInfo = &pInfo->binfo.resultRowInfo;
pTableScanInfo->rowCellInfoOffset = pInfo->binfo.rowCellInfoOffset; pTableScanInfo->rowCellInfoOffset = pInfo->binfo.rowCellInfoOffset;
@ -4747,7 +4800,6 @@ static void destroyGlobalAggOperatorInfo(void* param, int32_t numOfOutput) {
tfree(pInfo->prevRow); tfree(pInfo->prevRow);
tfree(pInfo->currentGroupColData); tfree(pInfo->currentGroupColData);
} }
static void destroySlimitOperatorInfo(void* param, int32_t numOfOutput) { static void destroySlimitOperatorInfo(void* param, int32_t numOfOutput) {
SSLimitOperatorInfo *pInfo = (SSLimitOperatorInfo*) param; SSLimitOperatorInfo *pInfo = (SSLimitOperatorInfo*) param;
taosArrayDestroy(pInfo->orderColumnList); taosArrayDestroy(pInfo->orderColumnList);
@ -5008,8 +5060,7 @@ static SSDataBlock* doArithmeticOperation(void* param, bool* newgroup) {
updateOutputBuf(&pArithInfo->binfo, &pArithInfo->bufCapacity, pBlock->info.rows); updateOutputBuf(&pArithInfo->binfo, &pArithInfo->bufCapacity, pBlock->info.rows);
arithmeticApplyFunctions(pRuntimeEnv, pInfo->pCtx, pOperator->numOfOutput); arithmeticApplyFunctions(pRuntimeEnv, pInfo->pCtx, pOperator->numOfOutput);
if (pTableQueryInfo != NULL) {
if (pTableQueryInfo != NULL) { // TODO refactor
updateTableIdInfo(pTableQueryInfo, pBlock, pRuntimeEnv->pTableRetrieveTsMap, order); updateTableIdInfo(pTableQueryInfo, pBlock, pRuntimeEnv->pTableRetrieveTsMap, order);
} }
@ -5052,8 +5103,7 @@ static SSDataBlock* doArithmeticOperation(void* param, bool* newgroup) {
updateOutputBuf(&pArithInfo->binfo, &pArithInfo->bufCapacity, pBlock->info.rows); updateOutputBuf(&pArithInfo->binfo, &pArithInfo->bufCapacity, pBlock->info.rows);
arithmeticApplyFunctions(pRuntimeEnv, pInfo->pCtx, pOperator->numOfOutput); arithmeticApplyFunctions(pRuntimeEnv, pInfo->pCtx, pOperator->numOfOutput);
if (pTableQueryInfo != NULL) {
if (pTableQueryInfo != NULL) { // TODO refactor
updateTableIdInfo(pTableQueryInfo, pBlock, pRuntimeEnv->pTableRetrieveTsMap, order); updateTableIdInfo(pTableQueryInfo, pBlock, pRuntimeEnv->pTableRetrieveTsMap, order);
} }
@ -5254,13 +5304,83 @@ static SSDataBlock* doSTableIntervalAgg(void* param, bool* newgroup) {
return pIntervalInfo->pRes; return pIntervalInfo->pRes;
} }
static SSDataBlock* doSessionWindowAgg(void* param, bool* newgroup) {
static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorInfo *pInfo, SSDataBlock *pSDataBlock) {
SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv;
STableQueryInfo* item = pRuntimeEnv->current;
SColumnInfoData* pColInfoData = taosArrayGet(pSDataBlock->pDataBlock, pInfo->colIndex);
SOptrBasicInfo* pBInfo = &pInfo->binfo;
bool masterScan = IS_MASTER_SCAN(pRuntimeEnv);
int16_t bytes = pColInfoData->info.bytes;
int16_t type = pColInfoData->info.type;
SColumnInfoData* pTsColInfoData = taosArrayGet(pSDataBlock->pDataBlock, 0);
TSKEY* tsList = (TSKEY*)pTsColInfoData->pData;
pInfo->numOfRows = 0;
for (int32_t j = 0; j < pSDataBlock->info.rows; ++j) {
char* val = ((char*)pColInfoData->pData) + bytes * j;
if (isNull(val, type)) {
continue;
}
if (pInfo->prevData == NULL) {
pInfo->prevData = malloc(bytes);
memcpy(pInfo->prevData, val, bytes);
pInfo->numOfRows = 1;
pInfo->curWindow.skey = tsList[j];
pInfo->curWindow.ekey = tsList[j];
pInfo->start = j;
} else if (memcmp(pInfo->prevData, val, bytes) == 0) {
pInfo->curWindow.ekey = tsList[j];
pInfo->numOfRows += 1;
//pInfo->start = j;
if (j == 0 && pInfo->start != 0) {
pInfo->numOfRows = 1;
pInfo->start = 0;
}
} else {
SResultRow* pResult = NULL;
pInfo->curWindow.ekey = pInfo->curWindow.skey;
int32_t ret = setWindowOutputBufByKey(pRuntimeEnv, &pBInfo->resultRowInfo, &pInfo->curWindow, masterScan,
&pResult, item->groupIndex, pBInfo->pCtx, pOperator->numOfOutput,
pBInfo->rowCellInfoOffset);
if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_APP_ERROR);
}
doApplyFunctions(pRuntimeEnv, pBInfo->pCtx, &pInfo->curWindow, pInfo->start, pInfo->numOfRows, tsList,
pSDataBlock->info.rows, pOperator->numOfOutput);
pInfo->curWindow.skey = tsList[j];
pInfo->curWindow.ekey = tsList[j];
memcpy(pInfo->prevData, val, bytes);
pInfo->numOfRows = 1;
pInfo->start = j;
}
}
SResultRow* pResult = NULL;
pInfo->curWindow.ekey = pInfo->curWindow.skey;
int32_t ret = setWindowOutputBufByKey(pRuntimeEnv, &pBInfo->resultRowInfo, &pInfo->curWindow, masterScan,
&pResult, item->groupIndex, pBInfo->pCtx, pOperator->numOfOutput,
pBInfo->rowCellInfoOffset);
if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_APP_ERROR);
}
doApplyFunctions(pRuntimeEnv, pBInfo->pCtx, &pInfo->curWindow, pInfo->start, pInfo->numOfRows, tsList,
pSDataBlock->info.rows, pOperator->numOfOutput);
}
static SSDataBlock* doStateWindowAgg(void *param, bool* newgroup) {
SOperatorInfo* pOperator = (SOperatorInfo*) param; SOperatorInfo* pOperator = (SOperatorInfo*) param;
if (pOperator->status == OP_EXEC_DONE) { if (pOperator->status == OP_EXEC_DONE) {
return NULL; return NULL;
} }
SSWindowOperatorInfo* pWindowInfo = pOperator->info; SStateWindowOperatorInfo* pWindowInfo = pOperator->info;
SOptrBasicInfo* pBInfo = &pWindowInfo->binfo; SOptrBasicInfo* pBInfo = &pWindowInfo->binfo;
SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv; SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv;
@ -5277,6 +5397,62 @@ static SSDataBlock* doSessionWindowAgg(void* param, bool* newgroup) {
SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr;
int32_t order = pQueryAttr->order.order; int32_t order = pQueryAttr->order.order;
STimeWindow win = pQueryAttr->window; STimeWindow win = pQueryAttr->window;
SOperatorInfo* upstream = pOperator->upstream[0];
while (1) {
SSDataBlock* pBlock = upstream->exec(upstream, newgroup);
if (pBlock == NULL) {
break;
}
setInputDataBlock(pOperator, pBInfo->pCtx, pBlock, pQueryAttr->order.order);
if (pWindowInfo->colIndex == -1) {
pWindowInfo->colIndex = getGroupbyColumnIndex(pRuntimeEnv->pQueryAttr->pGroupbyExpr, pBlock);
}
doStateWindowAggImpl(pOperator, pWindowInfo, pBlock);
}
// restore the value
pQueryAttr->order.order = order;
pQueryAttr->window = win;
pOperator->status = OP_RES_TO_RETURN;
closeAllResultRows(&pBInfo->resultRowInfo);
setQueryStatus(pRuntimeEnv, QUERY_COMPLETED);
finalizeQueryResult(pOperator, pBInfo->pCtx, &pBInfo->resultRowInfo, pBInfo->rowCellInfoOffset);
initGroupResInfo(&pRuntimeEnv->groupResInfo, &pBInfo->resultRowInfo);
toSSDataBlock(&pRuntimeEnv->groupResInfo, pRuntimeEnv, pBInfo->pRes);
if (pBInfo->pRes->info.rows == 0 || !hasRemainDataInCurrentGroup(&pRuntimeEnv->groupResInfo)) {
pOperator->status = OP_EXEC_DONE;
}
return pBInfo->pRes->info.rows == 0? NULL:pBInfo->pRes;
}
static SSDataBlock* doSessionWindowAgg(void* param, bool* newgroup) {
SOperatorInfo* pOperator = (SOperatorInfo*) param;
if (pOperator->status == OP_EXEC_DONE) {
return NULL;
}
SSWindowOperatorInfo* pWindowInfo = pOperator->info;
SOptrBasicInfo* pBInfo = &pWindowInfo->binfo;
SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv;
if (pOperator->status == OP_RES_TO_RETURN) {
toSSDataBlock(&pRuntimeEnv->groupResInfo, pRuntimeEnv, pBInfo->pRes);
if (pBInfo->pRes->info.rows == 0 || !hasRemainDataInCurrentGroup(&pRuntimeEnv->groupResInfo)) {
pOperator->status = OP_EXEC_DONE;
}
return pBInfo->pRes;
}
SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr;
//pQueryAttr->order.order = TSDB_ORDER_ASC;
int32_t order = pQueryAttr->order.order;
STimeWindow win = pQueryAttr->window;
SOperatorInfo* upstream = pOperator->upstream[0]; SOperatorInfo* upstream = pOperator->upstream[0];
@ -5512,7 +5688,7 @@ SOperatorInfo* createAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOpera
pOperator->pRuntimeEnv = pRuntimeEnv; pOperator->pRuntimeEnv = pRuntimeEnv;
pOperator->exec = doAggregate; pOperator->exec = doAggregate;
pOperator->cleanup = destroyBasicOperatorInfo; pOperator->cleanup = destroyAggOperatorInfo;
appendUpstream(pOperator, upstream); appendUpstream(pOperator, upstream);
return pOperator; return pOperator;
@ -5532,6 +5708,19 @@ static void destroyBasicOperatorInfo(void* param, int32_t numOfOutput) {
SOptrBasicInfo* pInfo = (SOptrBasicInfo*) param; SOptrBasicInfo* pInfo = (SOptrBasicInfo*) param;
doDestroyBasicInfo(pInfo, numOfOutput); doDestroyBasicInfo(pInfo, numOfOutput);
} }
static void destroyStateWindowOperatorInfo(void* param, int32_t numOfOutput) {
SStateWindowOperatorInfo* pInfo = (SStateWindowOperatorInfo*) param;
doDestroyBasicInfo(&pInfo->binfo, numOfOutput);
tfree(pInfo->prevData);
}
static void destroyAggOperatorInfo(void* param, int32_t numOfOutput) {
SAggOperatorInfo* pInfo = (SAggOperatorInfo*) param;
doDestroyBasicInfo(&pInfo->binfo, numOfOutput);
}
static void destroySWindowOperatorInfo(void* param, int32_t numOfOutput) {
SSWindowOperatorInfo* pInfo = (SSWindowOperatorInfo*) param;
doDestroyBasicInfo(&pInfo->binfo, numOfOutput);
}
static void destroySFillOperatorInfo(void* param, int32_t numOfOutput) { static void destroySFillOperatorInfo(void* param, int32_t numOfOutput) {
SFillOperatorInfo* pInfo = (SFillOperatorInfo*) param; SFillOperatorInfo* pInfo = (SFillOperatorInfo*) param;
@ -5586,7 +5775,7 @@ SOperatorInfo* createMultiTableAggOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SO
pOperator->pRuntimeEnv = pRuntimeEnv; pOperator->pRuntimeEnv = pRuntimeEnv;
pOperator->exec = doSTableAggregate; pOperator->exec = doSTableAggregate;
pOperator->cleanup = destroyBasicOperatorInfo; pOperator->cleanup = destroyAggOperatorInfo;
appendUpstream(pOperator, upstream); appendUpstream(pOperator, upstream);
return pOperator; return pOperator;
@ -5712,7 +5901,29 @@ SOperatorInfo* createTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOp
appendUpstream(pOperator, upstream); appendUpstream(pOperator, upstream);
return pOperator; return pOperator;
} }
SOperatorInfo* createStatewindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) {
SStateWindowOperatorInfo* pInfo = calloc(1, sizeof(SStateWindowOperatorInfo));
pInfo->colIndex = -1;
pInfo->binfo.pCtx = createSQLFunctionCtx(pRuntimeEnv, pExpr, numOfOutput, &pInfo->binfo.rowCellInfoOffset);
pInfo->binfo.pRes = createOutputBuf(pExpr, numOfOutput, pRuntimeEnv->resultInfo.capacity);
initResultRowInfo(&pInfo->binfo.resultRowInfo, 8, TSDB_DATA_TYPE_INT);
SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo));
pOperator->name = "StateWindowOperator";
pOperator->operatorType = OP_StateWindow;
pOperator->blockingOptr = true;
pOperator->status = OP_IN_EXECUTING;
pOperator->pExpr = pExpr;
pOperator->numOfOutput = numOfOutput;
pOperator->info = pInfo;
pOperator->pRuntimeEnv = pRuntimeEnv;
pOperator->exec = doStateWindowAgg;
pOperator->cleanup = destroyStateWindowOperatorInfo;
appendUpstream(pOperator, upstream);
return pOperator;
}
SOperatorInfo* createSWindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { SOperatorInfo* createSWindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) {
SSWindowOperatorInfo* pInfo = calloc(1, sizeof(SSWindowOperatorInfo)); SSWindowOperatorInfo* pInfo = calloc(1, sizeof(SSWindowOperatorInfo));
@ -5732,7 +5943,7 @@ SOperatorInfo* createSWindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperato
pOperator->info = pInfo; pOperator->info = pInfo;
pOperator->pRuntimeEnv = pRuntimeEnv; pOperator->pRuntimeEnv = pRuntimeEnv;
pOperator->exec = doSessionWindowAgg; pOperator->exec = doSessionWindowAgg;
pOperator->cleanup = destroyBasicOperatorInfo; pOperator->cleanup = destroySWindowOperatorInfo;
appendUpstream(pOperator, upstream); appendUpstream(pOperator, upstream);
return pOperator; return pOperator;
@ -7183,6 +7394,8 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SGroupbyExpr* pGroupbyExpr, S
pQueryAttr->simpleAgg = pQueryMsg->simpleAgg; pQueryAttr->simpleAgg = pQueryMsg->simpleAgg;
pQueryAttr->pointInterpQuery = pQueryMsg->pointInterpQuery; pQueryAttr->pointInterpQuery = pQueryMsg->pointInterpQuery;
pQueryAttr->needReverseScan = pQueryMsg->needReverseScan; pQueryAttr->needReverseScan = pQueryMsg->needReverseScan;
pQueryAttr->stateWindow = pQueryMsg->stateWindow;
pQueryAttr->vgId = vgId;
pQueryAttr->tableCols = calloc(numOfCols, sizeof(SSingleColumnFilterInfo)); pQueryAttr->tableCols = calloc(numOfCols, sizeof(SSingleColumnFilterInfo));
if (pQueryAttr->tableCols == NULL) { if (pQueryAttr->tableCols == NULL) {

View File

@ -1,5 +1,5 @@
#include "os.h" #include "os.h"
#include "tschemautil.h" #include "qTableMeta.h"
#include "qPlan.h" #include "qPlan.h"
#include "qExecutor.h" #include "qExecutor.h"
#include "qUtil.h" #include "qUtil.h"
@ -592,6 +592,14 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) {
op = OP_SessionWindow; op = OP_SessionWindow;
taosArrayPush(plan, &op); taosArrayPush(plan, &op);
if (pQueryAttr->pExpr2 != NULL) {
op = OP_Arithmetic;
taosArrayPush(plan, &op);
}
} else if (pQueryAttr->stateWindow) {
op = OP_StateWindow;
taosArrayPush(plan, &op);
if (pQueryAttr->pExpr2 != NULL) { if (pQueryAttr->pExpr2 != NULL) {
op = OP_Arithmetic; op = OP_Arithmetic;
taosArrayPush(plan, &op); taosArrayPush(plan, &op);

View File

@ -726,9 +726,9 @@ void tSetColumnType(TAOS_FIELD *pField, SStrToken *type) {
* extract the select info out of sql string * extract the select info out of sql string
*/ */
SSqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelNodeList, SRelationInfo *pFrom, tSqlExpr *pWhere, SSqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelNodeList, SRelationInfo *pFrom, tSqlExpr *pWhere,
SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval, SSessionWindowVal *pSession, SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval,
SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit, SLimitVal *psLimit, SSessionWindowVal *pSession, SWindowStateVal *pWindowStateVal, SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit,
tSqlExpr *pHaving) { SLimitVal *psLimit, tSqlExpr *pHaving) {
assert(pSelNodeList != NULL); assert(pSelNodeList != NULL);
SSqlNode *pSqlNode = calloc(1, sizeof(SSqlNode)); SSqlNode *pSqlNode = calloc(1, sizeof(SSqlNode));
@ -779,6 +779,12 @@ SSqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelNodeList, SRelat
TPARSER_SET_NONE_TOKEN(pSqlNode->sessionVal.col); TPARSER_SET_NONE_TOKEN(pSqlNode->sessionVal.col);
} }
if (pWindowStateVal != NULL) {
pSqlNode->windowstateVal = *pWindowStateVal;
} else {
TPARSER_SET_NONE_TOKEN(pSqlNode->windowstateVal.col);
}
return pSqlNode; return pSqlNode;
} }

View File

@ -1,47 +1,32 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "os.h" #include "os.h"
#include "taosmsg.h" #include "taosmsg.h"
#include "tschemautil.h" #include "qTableMeta.h"
#include "ttokendef.h" #include "ttokendef.h"
#include "taosdef.h" #include "taosdef.h"
#include "tutil.h" #include "tutil.h"
#include "tsclient.h"
int32_t tscGetNumOfTags(const STableMeta* pTableMeta) { int32_t tscGetNumOfTags(const STableMeta* pTableMeta) {
assert(pTableMeta != NULL); assert(pTableMeta != NULL);
STableComInfo tinfo = tscGetTableInfo(pTableMeta); STableComInfo tinfo = tscGetTableInfo(pTableMeta);
if (pTableMeta->tableType == TSDB_NORMAL_TABLE) { if (pTableMeta->tableType == TSDB_NORMAL_TABLE) {
assert(tinfo.numOfTags == 0); assert(tinfo.numOfTags == 0);
return 0; return 0;
} }
if (pTableMeta->tableType == TSDB_SUPER_TABLE || pTableMeta->tableType == TSDB_CHILD_TABLE) { if (pTableMeta->tableType == TSDB_SUPER_TABLE || pTableMeta->tableType == TSDB_CHILD_TABLE) {
return tinfo.numOfTags; return tinfo.numOfTags;
} }
assert(tinfo.numOfTags == 0); assert(tinfo.numOfTags == 0);
return 0; return 0;
} }
int32_t tscGetNumOfColumns(const STableMeta* pTableMeta) { int32_t tscGetNumOfColumns(const STableMeta* pTableMeta) {
assert(pTableMeta != NULL); assert(pTableMeta != NULL);
// table created according to super table, use data from super table // table created according to super table, use data from super table
STableComInfo tinfo = tscGetTableInfo(pTableMeta); STableComInfo tinfo = tscGetTableInfo(pTableMeta);
return tinfo.numOfColumns; return tinfo.numOfColumns;
@ -54,10 +39,10 @@ SSchema *tscGetTableSchema(const STableMeta *pTableMeta) {
SSchema* tscGetTableTagSchema(const STableMeta* pTableMeta) { SSchema* tscGetTableTagSchema(const STableMeta* pTableMeta) {
assert(pTableMeta != NULL && (pTableMeta->tableType == TSDB_SUPER_TABLE || pTableMeta->tableType == TSDB_CHILD_TABLE)); assert(pTableMeta != NULL && (pTableMeta->tableType == TSDB_SUPER_TABLE || pTableMeta->tableType == TSDB_CHILD_TABLE));
STableComInfo tinfo = tscGetTableInfo(pTableMeta); STableComInfo tinfo = tscGetTableInfo(pTableMeta);
assert(tinfo.numOfTags > 0); assert(tinfo.numOfTags > 0);
return tscGetTableColumnSchema(pTableMeta, tinfo.numOfColumns); return tscGetTableColumnSchema(pTableMeta, tinfo.numOfColumns);
} }
@ -68,7 +53,7 @@ STableComInfo tscGetTableInfo(const STableMeta* pTableMeta) {
SSchema* tscGetTableColumnSchema(const STableMeta* pTableMeta, int32_t colIndex) { SSchema* tscGetTableColumnSchema(const STableMeta* pTableMeta, int32_t colIndex) {
assert(pTableMeta != NULL); assert(pTableMeta != NULL);
SSchema* pSchema = (SSchema*) pTableMeta->schema; SSchema* pSchema = (SSchema*) pTableMeta->schema;
return &pSchema[colIndex]; return &pSchema[colIndex];
} }
@ -88,7 +73,7 @@ SSchema* tscGetColumnSchemaById(STableMeta* pTableMeta, int16_t colId) {
STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg) { STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg) {
assert(pTableMetaMsg != NULL && pTableMetaMsg->numOfColumns >= 2 && pTableMetaMsg->numOfTags >= 0); assert(pTableMetaMsg != NULL && pTableMetaMsg->numOfColumns >= 2 && pTableMetaMsg->numOfTags >= 0);
int32_t schemaSize = (pTableMetaMsg->numOfColumns + pTableMetaMsg->numOfTags) * sizeof(SSchema); int32_t schemaSize = (pTableMetaMsg->numOfColumns + pTableMetaMsg->numOfTags) * sizeof(SSchema);
STableMeta* pTableMeta = calloc(1, sizeof(STableMeta) + schemaSize); STableMeta* pTableMeta = calloc(1, sizeof(STableMeta) + schemaSize);
@ -97,11 +82,11 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg) {
pTableMeta->suid = pTableMetaMsg->suid; pTableMeta->suid = pTableMetaMsg->suid;
pTableMeta->tableInfo = (STableComInfo) { pTableMeta->tableInfo = (STableComInfo) {
.numOfTags = pTableMetaMsg->numOfTags, .numOfTags = pTableMetaMsg->numOfTags,
.precision = pTableMetaMsg->precision, .precision = pTableMetaMsg->precision,
.numOfColumns = pTableMetaMsg->numOfColumns, .numOfColumns = pTableMetaMsg->numOfColumns,
}; };
pTableMeta->id.tid = pTableMetaMsg->tid; pTableMeta->id.tid = pTableMetaMsg->tid;
pTableMeta->id.uid = pTableMetaMsg->uid; pTableMeta->id.uid = pTableMetaMsg->uid;
@ -109,69 +94,14 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg) {
pTableMeta->tversion = pTableMetaMsg->tversion; pTableMeta->tversion = pTableMetaMsg->tversion;
tstrncpy(pTableMeta->sTableName, pTableMetaMsg->sTableName, TSDB_TABLE_FNAME_LEN); tstrncpy(pTableMeta->sTableName, pTableMetaMsg->sTableName, TSDB_TABLE_FNAME_LEN);
memcpy(pTableMeta->schema, pTableMetaMsg->schema, schemaSize); memcpy(pTableMeta->schema, pTableMetaMsg->schema, schemaSize);
int32_t numOfTotalCols = pTableMeta->tableInfo.numOfColumns; int32_t numOfTotalCols = pTableMeta->tableInfo.numOfColumns;
for(int32_t i = 0; i < numOfTotalCols; ++i) { for(int32_t i = 0; i < numOfTotalCols; ++i) {
pTableMeta->tableInfo.rowSize += pTableMeta->schema[i].bytes; pTableMeta->tableInfo.rowSize += pTableMeta->schema[i].bytes;
} }
return pTableMeta; return pTableMeta;
} }
bool vgroupInfoIdentical(SNewVgroupInfo *pExisted, SVgroupMsg* src) {
assert(pExisted != NULL && src != NULL);
if (pExisted->numOfEps != src->numOfEps) {
return false;
}
for(int32_t i = 0; i < pExisted->numOfEps; ++i) {
if (pExisted->ep[i].port != src->epAddr[i].port) {
return false;
}
if (strncmp(pExisted->ep[i].fqdn, src->epAddr[i].fqdn, tListLen(pExisted->ep[i].fqdn)) != 0) {
return false;
}
}
return true;
}
SNewVgroupInfo createNewVgroupInfo(SVgroupMsg *pVgroupMsg) {
assert(pVgroupMsg != NULL);
SNewVgroupInfo info = {0};
info.numOfEps = pVgroupMsg->numOfEps;
info.vgId = pVgroupMsg->vgId;
info.inUse = 0; // 0 is the default value of inUse in case of multiple replica
assert(info.numOfEps >= 1 && info.vgId >= 1);
for(int32_t i = 0; i < pVgroupMsg->numOfEps; ++i) {
tstrncpy(info.ep[i].fqdn, pVgroupMsg->epAddr[i].fqdn, TSDB_FQDN_LEN);
info.ep[i].port = pVgroupMsg->epAddr[i].port;
}
return info;
}
// todo refactor
UNUSED_FUNC static FORCE_INLINE char* skipSegments(char* input, char delim, int32_t num) {
for (int32_t i = 0; i < num; ++i) {
while (*input != 0 && *input++ != delim) {
};
}
return input;
}
UNUSED_FUNC static FORCE_INLINE size_t copy(char* dst, const char* src, char delimiter) {
size_t len = 0;
while (*src != delimiter && *src != 0) {
*dst++ = *src++;
len++;
}
return len;
}

File diff suppressed because it is too large Load Diff

View File

@ -141,6 +141,7 @@ static SKeyword keywordTable[] = {
{"VARIABLE", TK_VARIABLE}, {"VARIABLE", TK_VARIABLE},
{"INTERVAL", TK_INTERVAL}, {"INTERVAL", TK_INTERVAL},
{"SESSION", TK_SESSION}, {"SESSION", TK_SESSION},
{"STATE_WINDOW", TK_STATE_WINDOW},
{"FILL", TK_FILL}, {"FILL", TK_FILL},
{"SLIDING", TK_SLIDING}, {"SLIDING", TK_SLIDING},
{"ORDER", TK_ORDER}, {"ORDER", TK_ORDER},

4
tests/Jenkinsfile vendored
View File

@ -29,8 +29,8 @@ pipeline {
agent none agent none
environment{ environment{
WK = '/var/lib/jenkins/workspace/TDinternal' WK = '/data/lib/jenkins/workspace/TDinternal'
WKC= '/var/lib/jenkins/workspace/TDinternal/community' WKC= '/data/lib/jenkins/workspace/TDinternal/community'
} }
stages { stages {

View File

@ -6,9 +6,9 @@ events {
} }
http { http {
lua_package_path '$prefix/lua/?.lua;$prefix/rest/?.lua;/blah/?.lua;;'; lua_package_path '$prefix/lua/?.lua;$prefix/rest/?.lua;$prefix/rest/?/init.lua;;';
lua_package_cpath "$prefix/so/?.so;;"; lua_package_cpath "$prefix/so/?.so;;";
lua_code_cache off; lua_code_cache on;
server { server {
listen 7000; listen 7000;
server_name restapi; server_name restapi;

View File

@ -0,0 +1,10 @@
local config = {
host = "127.0.0.1",
port = 6030,
database = "",
user = "root",
password = "taosdata",
max_packet_size = 1024 * 1024 ,
connection_pool_size = 64
}
return config

View File

@ -0,0 +1,72 @@
local _M = {}
local driver = require "luaconnector51"
local water_mark = 0
local occupied = 0
local connection_pool = {}
function _M.new(o,config)
o = o or {}
o.connection_pool = connection_pool
o.water_mark = water_mark
o.occupied = occupied
if #connection_pool == 0 then
for i = 1, config.connection_pool_size do
local res = driver.connect(config)
if res.code ~= 0 then
ngx.log(ngx.ERR, "connect--- failed:"..res.error)
return nil
else
local object = {obj = res.conn, state = 0}
table.insert(o.connection_pool,i, object)
ngx.log(ngx.INFO, "add connection, now pool size:"..#(o.connection_pool))
end
end
end
return setmetatable(o, { __index = _M })
end
function _M:get_connection()
local connection_obj
for i = 1, #connection_pool do
connection_obj = connection_pool[i]
if connection_obj.state == 0 then
connection_obj.state = 1
occupied = occupied +1
if occupied > water_mark then
water_mark = occupied
end
return connection_obj["obj"]
end
end
ngx.log(ngx.ERR,"ALERT! NO FREE CONNECTION.")
return nil
end
function _M:get_water_mark()
return water_mark
end
function _M:release_connection(conn)
local connection_obj
for i = 1, #connection_pool do
connection_obj = connection_pool[i]
if connection_obj["obj"] == conn then
connection_obj["state"] = 0
occupied = occupied -1
return
end
end
end
return _M

View File

@ -1,26 +1,11 @@
local driver = require "luaconnector51" local driver = require "luaconnector51"
local cjson = require "cjson" local cjson = require "cjson"
local Pool = require "tdpool"
local config = require "config"
ngx.say("start time:"..os.time()) ngx.say("start time:"..os.time())
local pool = Pool.new(Pool,config)
local config = { local conn = pool:get_connection()
host = "127.0.0.1",
port = 6030,
database = "",
user = "root",
password = "taosdata",
max_packet_size = 1024 * 1024
}
local conn
local res = driver.connect(config)
if res.code ~=0 then
ngx.say("connect--- failed: "..res.error)
return
else
conn = res.conn
ngx.say("connect--- pass.")
end
local res = driver.query(conn,"drop database if exists nginx") local res = driver.query(conn,"drop database if exists nginx")
if res.code ~=0 then if res.code ~=0 then
@ -51,7 +36,7 @@ else
ngx.say("create table--- pass.") ngx.say("create table--- pass.")
end end
res = driver.query(conn,"insert into m1 values ('2019-09-01 00:00:00.001',0,'robotspace'), ('2019-09-01 00:00:00.002',1,'Hilink'),('2019-09-01 00:00:00.003',2,'Harmony')") res = driver.query(conn,"insert into m1 values ('2019-09-01 00:00:00.001', 0, 'robotspace'), ('2019-09-01 00:00:00.002',1,'Hilink'),('2019-09-01 00:00:00.003',2,'Harmony')")
if res.code ~=0 then if res.code ~=0 then
ngx.say("insert records failed: "..res.error) ngx.say("insert records failed: "..res.error)
return return
@ -77,7 +62,29 @@ else
end end
end end
driver.close(conn)
ngx.say("end time:"..os.time())
--ngx.log(ngx.ERR,"in test file.")
local flag = false
function query_callback(res)
if res.code ~=0 then
ngx.say("async_query_callback--- failed:"..res.error)
else
if(res.affected == 3) then
ngx.say("async_query_callback, insert records--- pass")
else
ngx.say("async_query_callback, insert records---failed: expect 3 affected records, actually affected "..res.affected)
end
end
flag = true
end
driver.query_a(conn,"insert into m1 values ('2019-09-01 00:00:00.001', 3, 'robotspace'),('2019-09-01 00:00:00.006', 4, 'Hilink'),('2019-09-01 00:00:00.007', 6, 'Harmony')", query_callback)
while not flag do
-- ngx.say("i am here once...")
ngx.sleep(0.001) -- time unit is second
end
ngx.say("pool water_mark:"..pool:get_water_mark())
pool:release_connection(conn)
ngx.say("end time:"..os.time())

View File

@ -1,2 +1,8 @@
gcc -std=c99 lua_connector.c -fPIC -shared -o luaconnector.so -Wall -ltaos lua_header_installed=`apt-cache policy liblua5.3-dev|grep Installed|grep none > /dev/null; echo $?`
if [ "$lua_header_installed" = "0" ]; then
echo "If need, please input root password to install liblua5.3-dev for build the connector.."
sudo apt install -y liblua5.3-dev
fi
gcc -std=c99 lua_connector.c -fPIC -shared -o luaconnector.so -Wall -ltaos -I/usr/include/lua5.3

View File

@ -13,6 +13,11 @@ struct cb_param{
void * stream; void * stream;
}; };
struct async_query_callback_param{
lua_State* state;
int callback;
};
static int l_connect(lua_State *L){ static int l_connect(lua_State *L){
TAOS * taos=NULL; TAOS * taos=NULL;
const char* host; const char* host;
@ -23,7 +28,7 @@ static int l_connect(lua_State *L){
luaL_checktype(L, 1, LUA_TTABLE); luaL_checktype(L, 1, LUA_TTABLE);
lua_getfield(L,-1,"host"); lua_getfield(L, 1,"host");
if (lua_isstring(L,-1)){ if (lua_isstring(L,-1)){
host = lua_tostring(L, -1); host = lua_tostring(L, -1);
// printf("host = %s\n", host); // printf("host = %s\n", host);
@ -178,6 +183,58 @@ static int l_query(lua_State *L){
return 1; return 1;
} }
void async_query_callback(void *param, TAOS_RES *result, int code){
struct async_query_callback_param* p = (struct async_query_callback_param*) param;
//printf("\nin c,numfields:%d\n", numFields);
//printf("\nin c, code:%d\n", code);
lua_State *L = p->state;
lua_rawgeti(L, LUA_REGISTRYINDEX, p->callback);
lua_newtable(L);
int table_index = lua_gettop(L);
if( code < 0){
printf("failed, reason:%s\n", taos_errstr(result));
lua_pushinteger(L, -1);
lua_setfield(L, table_index, "code");
lua_pushstring(L,"something is wrong");// taos_errstr(taos));
lua_setfield(L, table_index, "error");
}else{
//printf("success to async query.\n");
const int affectRows = taos_affected_rows(result);
//printf(" affect rows:%d\r\n", affectRows);
lua_pushinteger(L, 0);
lua_setfield(L, table_index, "code");
lua_pushinteger(L, affectRows);
lua_setfield(L, table_index, "affected");
}
lua_call(L, 1, 0);
}
static int l_async_query(lua_State *L){
int r = luaL_ref(L, LUA_REGISTRYINDEX);
TAOS * taos = (TAOS*)lua_topointer(L,1);
const char * sqlstr = lua_tostring(L,2);
// int stime = luaL_checknumber(L,3);
lua_newtable(L);
int table_index = lua_gettop(L);
struct async_query_callback_param *p = malloc(sizeof(struct async_query_callback_param));
p->state = L;
p->callback=r;
// printf("r:%d, L:%d\n",r,L);
taos_query_a(taos,sqlstr,async_query_callback,p);
lua_pushnumber(L, 0);
lua_setfield(L, table_index, "code");
lua_pushstring(L, "ok");
lua_setfield(L, table_index, "error");
return 1;
}
void stream_cb(void *param, TAOS_RES *result, TAOS_ROW row){ void stream_cb(void *param, TAOS_RES *result, TAOS_ROW row){
struct cb_param* p = (struct cb_param*) param; struct cb_param* p = (struct cb_param*) param;
TAOS_FIELD *fields = taos_fetch_fields(result); TAOS_FIELD *fields = taos_fetch_fields(result);
@ -308,6 +365,7 @@ static int l_close(lua_State *L){
static const struct luaL_Reg lib[] = { static const struct luaL_Reg lib[] = {
{"connect", l_connect}, {"connect", l_connect},
{"query", l_query}, {"query", l_query},
{"query_a",l_async_query},
{"close", l_close}, {"close", l_close},
{"open_stream", l_open_stream}, {"open_stream", l_open_stream},
{"close_stream", l_close_stream}, {"close_stream", l_close_stream},

View File

@ -13,6 +13,11 @@ struct cb_param{
void * stream; void * stream;
}; };
struct async_query_callback_param{
lua_State* state;
int callback;
};
static int l_connect(lua_State *L){ static int l_connect(lua_State *L){
TAOS * taos=NULL; TAOS * taos=NULL;
const char* host; const char* host;
@ -56,6 +61,7 @@ static int l_connect(lua_State *L){
lua_settop(L,0); lua_settop(L,0);
taos_init(); taos_init();
lua_newtable(L); lua_newtable(L);
int table_index = lua_gettop(L); int table_index = lua_gettop(L);
@ -177,6 +183,58 @@ static int l_query(lua_State *L){
return 1; return 1;
} }
void async_query_callback(void *param, TAOS_RES *result, int code){
struct async_query_callback_param* p = (struct async_query_callback_param*) param;
//printf("\nin c,numfields:%d\n", numFields);
//printf("\nin c, code:%d\n", code);
lua_State *L = p->state;
lua_rawgeti(L, LUA_REGISTRYINDEX, p->callback);
lua_newtable(L);
int table_index = lua_gettop(L);
if( code < 0){
printf("failed, reason:%s\n", taos_errstr(result));
lua_pushinteger(L, -1);
lua_setfield(L, table_index, "code");
lua_pushstring(L,"something is wrong");// taos_errstr(taos));
lua_setfield(L, table_index, "error");
}else{
//printf("success to async query.\n");
const int affectRows = taos_affected_rows(result);
//printf(" affect rows:%d\r\n", affectRows);
lua_pushinteger(L, 0);
lua_setfield(L, table_index, "code");
lua_pushinteger(L, affectRows);
lua_setfield(L, table_index, "affected");
}
lua_call(L, 1, 0);
}
static int l_async_query(lua_State *L){
int r = luaL_ref(L, LUA_REGISTRYINDEX);
TAOS * taos = (TAOS*)lua_topointer(L,1);
const char * sqlstr = lua_tostring(L,2);
// int stime = luaL_checknumber(L,3);
lua_newtable(L);
int table_index = lua_gettop(L);
struct async_query_callback_param *p = malloc(sizeof(struct async_query_callback_param));
p->state = L;
p->callback=r;
// printf("r:%d, L:%d\n",r,L);
taos_query_a(taos,sqlstr,async_query_callback,p);
lua_pushnumber(L, 0);
lua_setfield(L, table_index, "code");
lua_pushstring(L, "ok");
lua_setfield(L, table_index, "error");
return 1;
}
void stream_cb(void *param, TAOS_RES *result, TAOS_ROW row){ void stream_cb(void *param, TAOS_RES *result, TAOS_ROW row){
struct cb_param* p = (struct cb_param*) param; struct cb_param* p = (struct cb_param*) param;
TAOS_FIELD *fields = taos_fetch_fields(result); TAOS_FIELD *fields = taos_fetch_fields(result);
@ -307,6 +365,7 @@ static int l_close(lua_State *L){
static const struct luaL_Reg lib[] = { static const struct luaL_Reg lib[] = {
{"connect", l_connect}, {"connect", l_connect},
{"query", l_query}, {"query", l_query},
{"query_a",l_async_query},
{"close", l_close}, {"close", l_close},
{"open_stream", l_open_stream}, {"open_stream", l_open_stream},
{"close_stream", l_close_stream}, {"close_stream", l_close_stream},

View File

@ -110,7 +110,25 @@ else
end end
end end
function callback(t) function async_query_callback(res)
if res.code ~=0 then
print("async_query_callback--- failed:"..res.error)
return
else
if(res.affected == 3) then
print("async_query_callback, insert records--- pass")
else
print("async_query_callback, insert records---failed: expect 3 affected records, actually affected "..res.affected)
end
end
end
driver.query_a(conn,"INSERT INTO therm1 VALUES ('2019-09-01 00:00:00.005', 100),('2019-09-01 00:00:00.006', 101),('2019-09-01 00:00:00.007', 102)", async_query_callback)
function stream_callback(t)
print("------------------------") print("------------------------")
print("continuous query result:") print("continuous query result:")
for key, value in pairs(t) do for key, value in pairs(t) do
@ -119,7 +137,7 @@ function callback(t)
end end
local stream local stream
res = driver.open_stream(conn,"SELECT COUNT(*) as count, AVG(degree) as avg, MAX(degree) as max, MIN(degree) as min FROM thermometer interval(2s) sliding(2s);)",0,callback) res = driver.open_stream(conn,"SELECT COUNT(*) as count, AVG(degree) as avg, MAX(degree) as max, MIN(degree) as min FROM thermometer interval(2s) sliding(2s);)",0, stream_callback)
if res.code ~=0 then if res.code ~=0 then
print("open stream--- failed:"..res.error) print("open stream--- failed:"..res.error)
return return
@ -146,4 +164,5 @@ while loop_index < 30 do
end end
driver.close_stream(stream) driver.close_stream(stream)
driver.close(conn) driver.close(conn)

View File

@ -29,8 +29,8 @@ pipeline {
agent none agent none
environment{ environment{
WK = '/var/lib/jenkins/workspace/TDinternal' WK = '/data/lib/jenkins/workspace/TDinternal'
WKC= '/var/lib/jenkins/workspace/TDinternal/community' WKC= '/data/lib/jenkins/workspace/TDinternal/community'
} }
stages { stages {

View File

@ -0,0 +1,73 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from util.log import *
from util.cases import *
from util.sql import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def isLuaInstalled(self):
if not which('lua'):
tdLog.exit("Lua not found!")
return False
else:
return True
def run(self):
tdSql.prepare()
# tdLog.info("Check if Lua installed")
# if not self.isLuaInstalled():
# sys.exit(1)
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
targetPath = buildPath + "/../tests/examples/lua"
tdLog.info(targetPath)
currentPath = os.getcwd()
os.chdir(targetPath)
os.system('./build.sh')
os.system('lua test.lua')
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
#tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -334,5 +334,6 @@ python3 ./test.py -f tag_lite/alter_tag.py
python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py
python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py
python3 test.py -f insert/insert_before_use_db.py
#======================p4-end=============== #======================p4-end===============

View File

@ -82,6 +82,8 @@ class TDTestCase:
tdSql.execute("import into tbx file \'%s\'"%(self.csvfile)) tdSql.execute("import into tbx file \'%s\'"%(self.csvfile))
tdSql.query('select * from tbx') tdSql.query('select * from tbx')
tdSql.checkRows(self.rows) tdSql.checkRows(self.rows)
#TD-4447 import the same csv twice
tdSql.execute("import into tbx file \'%s\'"%(self.csvfile))
def stop(self): def stop(self):
self.destroyCSVFile() self.destroyCSVFile()

View File

@ -0,0 +1,39 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from util.log import *
from util.cases import *
from util.sql import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.error('insert into tb values (now + 10m, 10)')
tdSql.prepare()
tdSql.error('insert into tb values (now + 10m, 10)')
tdSql.execute('drop database db')
tdSql.error('insert into tb values (now + 10m, 10)')
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -35,3 +35,5 @@ python3.8 ./test.py $1 -s && sleep 1
python3.8 ./test.py $1 -f client/client.py python3.8 ./test.py $1 -f client/client.py
python3.8 ./test.py $1 -s && sleep 1 python3.8 ./test.py $1 -s && sleep 1
# connector
python3.8 ./test.py $1 -f connector/lua.py

File diff suppressed because it is too large Load Diff

View File

@ -24,6 +24,9 @@ sql drop database if exists $db
sql create database $db keep 36500 sql create database $db keep 36500
sql use $db sql use $db
print =====================================> td-4481
sql create database $db
print =====================================> test case for twa in single block print =====================================> test case for twa in single block
sql create table t1 (ts timestamp, k float); sql create table t1 (ts timestamp, k float);

View File

@ -18,6 +18,7 @@ system general/parser/gendata.sh
sql create table tbx (ts TIMESTAMP, collect_area NCHAR(12), device_id BINARY(16), imsi BINARY(16), imei BINARY(16), mdn BINARY(10), net_type BINARY(4), mno NCHAR(4), province NCHAR(10), city NCHAR(16), alarm BINARY(2)) sql create table tbx (ts TIMESTAMP, collect_area NCHAR(12), device_id BINARY(16), imsi BINARY(16), imei BINARY(16), mdn BINARY(10), net_type BINARY(4), mno NCHAR(4), province NCHAR(10), city NCHAR(16), alarm BINARY(2))
print ====== create tables success, starting import data print ====== create tables success, starting import data
sql import into tbx file '~/data.sql'
sql import into tbx file '~/data.sql' sql import into tbx file '~/data.sql'
sql select count(*) from tbx sql select count(*) from tbx

View File

@ -9,7 +9,7 @@ sql connect
print ======================== dnode1 start print ======================== dnode1 start
$dbPrefix = nest_query $dbPrefix = nest_db
$tbPrefix = nest_tb $tbPrefix = nest_tb
$mtPrefix = nest_mt $mtPrefix = nest_mt
$tbNum = 10 $tbNum = 10
@ -17,7 +17,6 @@ $rowNum = 10000
$totalNum = $tbNum * $rowNum $totalNum = $tbNum * $rowNum
print =============== nestquery.sim print =============== nestquery.sim
$i = 0 $i = 0
$db = $dbPrefix . $i $db = $dbPrefix . $i
$mt = $mtPrefix . $i $mt = $mtPrefix . $i

View File

@ -64,4 +64,5 @@ run general/parser/slimit_alter_tags.sim
run general/parser/udf.sim run general/parser/udf.sim
run general/parser/udf_dll.sim run general/parser/udf_dll.sim
run general/parser/udf_dll_stable.sim run general/parser/udf_dll_stable.sim
run general/parser/nestquery.sim