[td-255] merge develop.
This commit is contained in:
commit
d9f403ad3d
|
@ -10,3 +10,6 @@
|
|||
[submodule "tests/examples/rust"]
|
||||
path = tests/examples/rust
|
||||
url = https://github.com/songtianyi/tdengine-rust-bindings.git
|
||||
[submodule "deps/jemalloc"]
|
||||
path = deps/jemalloc
|
||||
url = https://github.com/jemalloc/jemalloc
|
||||
|
|
|
@ -59,6 +59,11 @@ IF (TD_LINUX_64)
|
|||
MESSAGE(STATUS "linux64 is defined")
|
||||
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
|
||||
ADD_DEFINITIONS(-DUSE_LIBICONV)
|
||||
|
||||
IF (JEMALLOC_ENABLED)
|
||||
ADD_DEFINITIONS(-DTD_JEMALLOC_ENABLED -I${CMAKE_BINARY_DIR}/build/include -L${CMAKE_BINARY_DIR}/build/lib -Wl,-rpath,${CMAKE_BINARY_DIR}/build/lib -ljemalloc)
|
||||
ENDIF ()
|
||||
|
||||
ENDIF ()
|
||||
|
||||
IF (TD_LINUX_32)
|
||||
|
|
|
@ -72,3 +72,8 @@ IF (${RANDOM_NETWORK_FAIL} MATCHES "true")
|
|||
SET(TD_RANDOM_NETWORK_FAIL TRUE)
|
||||
MESSAGE(STATUS "build with random-network-fail enabled")
|
||||
ENDIF ()
|
||||
|
||||
IF (${JEMALLOC_ENABLED} MATCHES "true")
|
||||
SET(TD_JEMALLOC_ENABLED TRUE)
|
||||
MESSAGE(STATUS "build with jemalloc enabled")
|
||||
ENDIF ()
|
||||
|
|
|
@ -19,3 +19,16 @@ ENDIF ()
|
|||
IF (TD_DARWIN AND TD_MQTT)
|
||||
ADD_SUBDIRECTORY(MQTT-C)
|
||||
ENDIF ()
|
||||
|
||||
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
|
||||
MESSAGE("setup dpes/jemalloc, current source dir:" ${CMAKE_CURRENT_SOURCE_DIR})
|
||||
MESSAGE("binary dir:" ${CMAKE_BINARY_DIR})
|
||||
include(ExternalProject)
|
||||
ExternalProject_Add(jemalloc
|
||||
PREFIX "jemalloc"
|
||||
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/jemalloc
|
||||
BUILD_IN_SOURCE 1
|
||||
CONFIGURE_COMMAND ./autogen.sh COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/
|
||||
BUILD_COMMAND ${MAKE}
|
||||
)
|
||||
ENDIF ()
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
Subproject commit ea6b3e973b477b8061e0076bb257dbd7f3faa756
|
|
@ -107,9 +107,9 @@ TDengine采取的是Master-Slave模式进行同步,与流行的RAFT一致性
|
|||
|
||||

|
||||
|
||||
1. 应用对写请求做基本的合法性检查,通过,则给改请求包打上一个版本号(version, 单调递增)
|
||||
1. 应用对写请求做基本的合法性检查,通过,则给该请求包打上一个版本号(version, 单调递增)
|
||||
2. 应用将打上版本号的写请求封装一个WAL Head, 写入WAL(Write Ahead Log)
|
||||
3. 应用调用API syncForwardToPeer,如多vnode B是slave状态,sync模块将包含WAL Head的数据包通过Forward消息发送给vnode B,否则就不转发。
|
||||
3. 应用调用API syncForwardToPeer,如果vnode B是slave状态,sync模块将包含WAL Head的数据包通过Forward消息发送给vnode B,否则就不转发。
|
||||
4. vnode B收到Forward消息后,调用回调函数writeToCache, 交给应用处理
|
||||
5. vnode B应用在写入成功后,都需要调用syncAckForward通知sync模块已经写入成功。
|
||||
6. 如果quorum大于1,vnode B需要等待应用的回复确认,收到确认后,vnode B发送Forward Response消息给node A。
|
||||
|
@ -219,7 +219,7 @@ Arbitrator的程序tarbitrator.c在复制模块的同一目录, 编译整个系
|
|||
|
||||
不同之处:
|
||||
|
||||
- 选举流程不一样:Raft里任何一个节点是candidate时,主动向其他节点发出vote request, 如果超过半数回答Yes, 这个candidate就成为Leader,开始一个新的term. 而TDengine的实现里,节点上线、离线或角色改变都会触发状态消息在节点组类传播,等节点组里状态稳定一致之后才触发选举流程,因为状态稳定一致,基于同样的状态信息,每个节点做出的决定会是一致的,一旦某个节点符合成为master的条件,无需其他节点认可,它会自动将自己设为master。TDengine里,任何一个节点检测到其他节点或自己的角色发生改变,就会给节点组内其他节点进行广播的。Raft里不存在这样的机制,因此需要投票来解决。
|
||||
- 选举流程不一样:Raft里任何一个节点是candidate时,主动向其他节点发出vote request,如果超过半数回答Yes,这个candidate就成为Leader,开始一个新的term。而TDengine的实现里,节点上线、离线或角色改变都会触发状态消息在节点组内传播,等节点组里状态稳定一致之后才触发选举流程,因为状态稳定一致,基于同样的状态信息,每个节点做出的决定会是一致的,一旦某个节点符合成为master的条件,无需其他节点认可,它会自动将自己设为master。TDengine里,任何一个节点检测到其他节点或自己的角色发生改变,就会向节点组内其他节点进行广播。Raft里不存在这样的机制,因此需要投票来解决。
|
||||
- 对WAL的一条记录,Raft用term + index来做唯一标识。但TDengine只用version(类似index),在TDengine实现里,仅仅用version是完全可行的, 因为TDengine的选举机制,没有term的概念。
|
||||
|
||||
如果整个虚拟节点组全部宕机,重启,但不是所有虚拟节点都上线,这个时候TDengine是不会选出master的,因为未上线的节点有可能有最高version的数据。而RAFT协议,只要超过半数上线,就会选出Leader。
|
||||
|
|
|
@ -343,7 +343,7 @@ TDengine采用数据驱动的方式让缓存中的数据写入硬盘进行持久
|
|||
|
||||
对于采集的数据,一般有保留时长,这个时长由系统配置参数keep决定。超过这个设置天数的数据文件,将被系统自动删除,释放存储空间。
|
||||
|
||||
给定days与keep两个参数,一个vnode总的数据文件数为:keep/days。总的数据文件个数不宜过大,也不宜过小。10到100以内合适。基于这个原则,可以设置合理的days。 目前的版本,参数keep可以修改,但对于参数days,一但设置后,不可修改。
|
||||
给定days与keep两个参数,一个典型工作状态的vnode中总的数据文件数为:`向上取整(keep/days)+1`个。总的数据文件个数不宜过大,也不宜过小。10到100以内合适。基于这个原则,可以设置合理的days。 目前的版本,参数keep可以修改,但对于参数days,一但设置后,不可修改。
|
||||
|
||||
在每个数据文件里,一张表的数据是一块一块存储的。一张表可以有一到多个数据文件块。在一个文件块里,数据是列式存储的,占用的是一片连续的存储空间,这样大大提高读取速度。文件块的大小由系统参数maxRows(每块最大记录条数)决定,缺省值为4096。这个值不宜过大,也不宜过小。过大,定位具体时间段的数据的搜索时间会变长,影响读取速度;过小,数据块的索引太大,压缩效率偏低,也影响读取速度。
|
||||
|
||||
|
|
|
@ -516,7 +516,7 @@ conn.close()
|
|||
- _TDengineCursor_ 类
|
||||
|
||||
参考python中help(taos.TDengineCursor)。
|
||||
这个类对应客户端进行的写入、查询操作。在客户端多线程的场景下,这个游标实例必须保持线程独享,不能夸线程共享使用,否则会导致返回结果出现错误。
|
||||
这个类对应客户端进行的写入、查询操作。在客户端多线程的场景下,这个游标实例必须保持线程独享,不能跨线程共享使用,否则会导致返回结果出现错误。
|
||||
|
||||
- _connect_ 方法
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@ taos> DESCRIBE meters;
|
|||
- Epoch Time:时间戳也可以是一个长整数,表示从 1970-01-01 08:00:00.000 开始的毫秒数
|
||||
- 时间可以加减,比如 now-2h,表明查询时刻向前推 2 个小时(最近 2 小时)。数字后面的时间单位可以是 u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 比如 `select * from t1 where ts > now-2w and ts <= now-1w`,表示查询两周前整整一周的数据。在指定降频操作(down sampling)的时间窗口(interval)时,时间单位还可以使用 n(自然月) 和 y(自然年)。
|
||||
|
||||
TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableMicrosecond 就可以支持微秒。
|
||||
TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传递的 PRECISION 参数就可以支持微秒。
|
||||
|
||||
在TDengine中,普通表的数据模型中可使用以下 10 种数据类型。
|
||||
|
||||
|
@ -400,6 +400,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
|
|||
tb2_name (tb2_field1_name, ...) [USING stb2_name TAGS (tag_value2, ...)] VALUES (field1_value1, ...) (field1_value2, ...) ...;
|
||||
```
|
||||
以自动建表的方式,同时向表tb1_name和tb2_name中按列分别插入多条记录。
|
||||
说明:`(tb1_field1_name, ...)`的部分可以省略掉,这样就是使用全列模式写入——也即在 VALUES 部分提供的数据,必须为数据表的每个列都显式地提供数据。全列写入速度会远快于指定列,因此建议尽可能采用全列写入方式,此时空列可以填入NULL。
|
||||
从 2.0.20.5 版本开始,子表的列名可以不跟在子表名称后面,而是可以放在 TAGS 和 VALUES 之间,例如像下面这样写:
|
||||
```mysql
|
||||
INSERT INTO tb1_name [USING stb1_name TAGS (tag_value1, ...)] (tb1_field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ...;
|
||||
|
@ -423,9 +424,9 @@ Query OK, 1 row(s) in set (0.001029s)
|
|||
taos> SHOW TABLES;
|
||||
Query OK, 0 row(s) in set (0.000946s)
|
||||
|
||||
taos> INSERT INTO d1001 USING meters TAGS('Beijing.Chaoyang', 2);
|
||||
taos> INSERT INTO d1001 USING meters TAGS('Beijing.Chaoyang', 2) VALUES('a');
|
||||
|
||||
DB error: invalid SQL: keyword VALUES or FILE required
|
||||
DB error: invalid SQL: 'a' (invalid timestamp) (0.039494s)
|
||||
|
||||
taos> SHOW TABLES;
|
||||
table_name | created_time | columns | stable_name |
|
||||
|
|
|
@ -24,7 +24,7 @@ echo "compile_dir: ${compile_dir}"
|
|||
echo "pkg_dir: ${pkg_dir}"
|
||||
|
||||
if [ -d ${pkg_dir} ]; then
|
||||
rm -rf ${pkg_dir}
|
||||
rm -rf ${pkg_dir}
|
||||
fi
|
||||
mkdir -p ${pkg_dir}
|
||||
cd ${pkg_dir}
|
||||
|
@ -67,7 +67,41 @@ fi
|
|||
cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector
|
||||
cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector
|
||||
cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector
|
||||
cp ${compile_dir}/build/lib/taos-jdbcdriver*dist.* ${pkg_dir}${install_home_path}/connector ||:
|
||||
cp ${compile_dir}/build/lib/taos-jdbcdriver*.* ${pkg_dir}${install_home_path}/connector ||:
|
||||
|
||||
if [ -f ${compile_dir}/build/bin/jemalloc-config ]; then
|
||||
install_user_local_path="/usr/local"
|
||||
mkdir -p ${pkg_dir}${install_user_local_path}/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
|
||||
cp ${compile_dir}/build/bin/jemalloc-config ${pkg_dir}${install_user_local_path}/bin/
|
||||
if [ -f ${compile_dir}/build/bin/jemalloc.sh ]; then
|
||||
cp ${compile_dir}/build/bin/jemalloc.sh ${pkg_dir}${install_user_local_path}/bin/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/bin/jeprof ]; then
|
||||
cp ${compile_dir}/build/bin/jeprof ${pkg_dir}${install_user_local_path}/bin/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/include/jemalloc/jemalloc.h ]; then
|
||||
cp ${compile_dir}/build/include/jemalloc/jemalloc.h ${pkg_dir}${install_user_local_path}/include/jemalloc/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/lib/libjemalloc.so.2 ]; then
|
||||
cp ${compile_dir}/build/lib/libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/
|
||||
ln -sf libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/libjemalloc.so
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/lib/libjemalloc.a ]; then
|
||||
cp ${compile_dir}/build/lib/libjemalloc.a ${pkg_dir}${install_user_local_path}/lib/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/lib/libjemalloc_pic.a ]; then
|
||||
cp ${compile_dir}/build/lib/libjemalloc_pic.a ${pkg_dir}${install_user_local_path}/lib/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ]; then
|
||||
cp ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ${pkg_dir}${install_user_local_path}/lib/pkgconfig/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/share/doc/jemalloc/jemalloc.html ]; then
|
||||
cp ${compile_dir}/build/share/doc/jemalloc/jemalloc.html ${pkg_dir}${install_user_local_path}/share/doc/jemalloc/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/share/man/man3/jemalloc.3 ]; then
|
||||
cp ${compile_dir}/build/share/man/man3/jemalloc.3 ${pkg_dir}${install_user_local_path}/share/man/man3/
|
||||
fi
|
||||
fi
|
||||
|
||||
cp -r ${compile_dir}/../packaging/deb/DEBIAN ${pkg_dir}/
|
||||
chmod 755 ${pkg_dir}/DEBIAN/*
|
||||
|
|
|
@ -22,11 +22,12 @@ cpuType=x64 # [aarch32 | aarch64 | x64 | x86 | mips64 ...]
|
|||
osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...]
|
||||
pagMode=full # [full | lite]
|
||||
soMode=dynamic # [static | dynamic]
|
||||
allocator=glibc # [glibc | jemalloc]
|
||||
dbName=taos # [taos | power]
|
||||
verNumber=""
|
||||
verNumberComp="2.0.0.0"
|
||||
|
||||
while getopts "hv:V:c:o:l:s:d:n:m:" arg
|
||||
while getopts "hv:V:c:o:l:s:d:a:n:m:" arg
|
||||
do
|
||||
case $arg in
|
||||
v)
|
||||
|
@ -53,6 +54,10 @@ do
|
|||
#echo "dbName=$OPTARG"
|
||||
dbName=$(echo $OPTARG)
|
||||
;;
|
||||
a)
|
||||
#echo "allocator=$OPTARG"
|
||||
allocator=$(echo $OPTARG)
|
||||
;;
|
||||
n)
|
||||
#echo "verNumber=$OPTARG"
|
||||
verNumber=$(echo $OPTARG)
|
||||
|
@ -71,6 +76,7 @@ do
|
|||
echo " -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...] "
|
||||
echo " -V [stable | beta] "
|
||||
echo " -l [full | lite] "
|
||||
echo " -a [glibc | jemalloc] "
|
||||
echo " -s [static | dynamic] "
|
||||
echo " -d [taos | power] "
|
||||
echo " -n [version number] "
|
||||
|
@ -84,7 +90,7 @@ do
|
|||
esac
|
||||
done
|
||||
|
||||
echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode} soMode=${soMode} dbName=${dbName} verNumber=${verNumber} verNumberComp=${verNumberComp}"
|
||||
echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode} soMode=${soMode} dbName=${dbName} allocator=${allocator} verNumber=${verNumber} verNumberComp=${verNumberComp}"
|
||||
|
||||
curr_dir=$(pwd)
|
||||
|
||||
|
@ -180,12 +186,18 @@ else
|
|||
fi
|
||||
cd ${compile_dir}
|
||||
|
||||
if [[ "$allocator" == "jemalloc" ]]; then
|
||||
allocator_macro="-DJEMALLOC_ENABLED=true"
|
||||
else
|
||||
allocator_macro=""
|
||||
fi
|
||||
|
||||
# check support cpu type
|
||||
if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" == "aarch32" ]] || [[ "$cpuType" == "mips64" ]] ; then
|
||||
if [ "$verMode" != "cluster" ]; then
|
||||
cmake ../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode}
|
||||
cmake ../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode} ${allocator_macro}
|
||||
else
|
||||
cmake ../../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp}
|
||||
cmake ../../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} ${allocator_macro}
|
||||
fi
|
||||
else
|
||||
echo "input cpuType=${cpuType} error!!!"
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
%define homepath /usr/local/taos
|
||||
%define userlocalpath /usr/local
|
||||
%define cfg_install_dir /etc/taos
|
||||
%define __strip /bin/true
|
||||
|
||||
|
@ -75,9 +76,53 @@ fi
|
|||
cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector
|
||||
cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector
|
||||
cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector
|
||||
cp %{_compiledir}/build/lib/taos-jdbcdriver*dist.* %{buildroot}%{homepath}/connector ||:
|
||||
cp %{_compiledir}/build/lib/taos-jdbcdriver*.* %{buildroot}%{homepath}/connector ||:
|
||||
cp -r %{_compiledir}/../tests/examples/* %{buildroot}%{homepath}/examples
|
||||
|
||||
|
||||
if [ -f %{_compiledir}/build/bin/jemalloc-config ]; then
|
||||
mkdir -p %{buildroot}%{userlocalpath}/bin
|
||||
mkdir -p %{buildroot}%{userlocalpath}/lib
|
||||
mkdir -p %{buildroot}%{userlocalpath}/lib/pkgconfig
|
||||
mkdir -p %{buildroot}%{userlocalpath}/include
|
||||
mkdir -p %{buildroot}%{userlocalpath}/include/jemalloc
|
||||
mkdir -p %{buildroot}%{userlocalpath}/share
|
||||
mkdir -p %{buildroot}%{userlocalpath}/share/doc
|
||||
mkdir -p %{buildroot}%{userlocalpath}/share/doc/jemalloc
|
||||
mkdir -p %{buildroot}%{userlocalpath}/share/man
|
||||
mkdir -p %{buildroot}%{userlocalpath}/share/man/man3
|
||||
|
||||
cp %{_compiledir}/build/bin/jemalloc-config %{buildroot}%{userlocalpath}/bin/
|
||||
if [ -f %{_compiledir}/build/bin/jemalloc.sh ]; then
|
||||
cp %{_compiledir}/build/bin/jemalloc.sh %{buildroot}%{userlocalpath}/bin/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/bin/jeprof ]; then
|
||||
cp %{_compiledir}/build/bin/jeprof %{buildroot}%{userlocalpath}/bin/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/include/jemalloc/jemalloc.h ]; then
|
||||
cp %{_compiledir}/build/include/jemalloc/jemalloc.h %{buildroot}%{userlocalpath}/include/jemalloc/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/lib/libjemalloc.so.2 ]; then
|
||||
cp %{_compiledir}/build/lib/libjemalloc.so.2 %{buildroot}%{userlocalpath}/lib/
|
||||
ln -sf libjemalloc.so.2 %{buildroot}%{userlocalpath}/lib/libjemalloc.so
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/lib/libjemalloc.a ]; then
|
||||
cp %{_compiledir}/build/lib/libjemalloc.a %{buildroot}%{userlocalpath}/lib/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/lib/libjemalloc_pic.a ]; then
|
||||
cp %{_compiledir}/build/lib/libjemalloc_pic.a %{buildroot}%{userlocalpath}/lib/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/lib/pkgconfig/jemalloc.pc ]; then
|
||||
cp %{_compiledir}/build/lib/pkgconfig/jemalloc.pc %{buildroot}%{userlocalpath}/lib/pkgconfig/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/share/doc/jemalloc/jemalloc.html ]; then
|
||||
cp %{_compiledir}/build/share/doc/jemalloc/jemalloc.html %{buildroot}%{userlocalpath}/share/doc/jemalloc/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/share/man/man3/jemalloc.3 ]; then
|
||||
cp %{_compiledir}/build/share/man/man3/jemalloc.3 %{buildroot}%{userlocalpath}/share/man/man3/
|
||||
fi
|
||||
fi
|
||||
|
||||
#Scripts executed before installation
|
||||
%pre
|
||||
csudo=""
|
||||
|
|
|
@ -217,7 +217,7 @@ function install_lib() {
|
|||
${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
|
||||
fi
|
||||
|
||||
#if [ "$verMode" == "cluster" ]; then
|
||||
#if [ "$verMode" == "cluster" ]; then
|
||||
# # Compatible with version 1.5
|
||||
# ${csudo} mkdir -p ${v15_java_app_dir}
|
||||
# ${csudo} ln -s ${install_main_dir}/connector/taos-jdbcdriver-1.0.2-dist.jar ${v15_java_app_dir}/JDBCDriver-1.0.2-dist.jar
|
||||
|
@ -227,6 +227,52 @@ function install_lib() {
|
|||
${csudo} ldconfig
|
||||
}
|
||||
|
||||
function install_jemalloc() {
|
||||
jemalloc_dir=${script_dir}/jemalloc
|
||||
|
||||
if [ -d ${jemalloc_dir} ]; then
|
||||
${csudo} /usr/bin/install -c -d /usr/local/bin
|
||||
|
||||
if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then
|
||||
${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then
|
||||
${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/bin/jeprof ]; then
|
||||
${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then
|
||||
${csudo} /usr/bin/install -c -d /usr/local/include/jemalloc
|
||||
${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then
|
||||
${csudo} /usr/bin/install -c -d /usr/local/lib
|
||||
${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib
|
||||
${csudo} ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
|
||||
${csudo} /usr/bin/install -c -d /usr/local/lib
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
|
||||
${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
|
||||
${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
|
||||
${csudo} /usr/bin/install -c -d /usr/local/lib/pkgconfig
|
||||
${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
|
||||
fi
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then
|
||||
${csudo} /usr/bin/install -c -d /usr/local/share/doc/jemalloc
|
||||
${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then
|
||||
${csudo} /usr/bin/install -c -d /usr/local/share/man/man3
|
||||
${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function install_header() {
|
||||
${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
|
||||
${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
|
||||
|
@ -252,7 +298,7 @@ function add_newHostname_to_hosts() {
|
|||
|
||||
function set_hostname() {
|
||||
echo -e -n "${GREEN}Please enter one hostname(must not be 'localhost')${NC}:"
|
||||
read newHostname
|
||||
read newHostname
|
||||
while true; do
|
||||
if [[ ! -z "$newHostname" && "$newHostname" != "localhost" ]]; then
|
||||
break
|
||||
|
@ -360,27 +406,27 @@ function local_fqdn_check() {
|
|||
|
||||
while true
|
||||
do
|
||||
read -r -p "Set hostname now? [Y/n] " input
|
||||
if [ ! -n "$input" ]; then
|
||||
set_hostname
|
||||
break
|
||||
else
|
||||
case $input in
|
||||
[yY][eE][sS]|[yY])
|
||||
set_hostname
|
||||
break
|
||||
;;
|
||||
read -r -p "Set hostname now? [Y/n] " input
|
||||
if [ ! -n "$input" ]; then
|
||||
set_hostname
|
||||
break
|
||||
else
|
||||
case $input in
|
||||
[yY][eE][sS]|[yY])
|
||||
set_hostname
|
||||
break
|
||||
;;
|
||||
|
||||
[nN][oO]|[nN])
|
||||
set_ipAsFqdn
|
||||
break
|
||||
;;
|
||||
[nN][oO]|[nN])
|
||||
set_ipAsFqdn
|
||||
break
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "Invalid input..."
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
*)
|
||||
echo "Invalid input..."
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
@ -589,7 +635,7 @@ function clean_service_on_systemd() {
|
|||
fi
|
||||
${csudo} systemctl disable nginxd &> /dev/null || echo &> /dev/null
|
||||
${csudo} rm -f ${nginx_service_config}
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# taos:2345:respawn:/etc/init.d/taosd start
|
||||
|
@ -674,7 +720,7 @@ function install_service_on_systemd() {
|
|||
${csudo} systemctl enable nginxd
|
||||
fi
|
||||
${csudo} systemctl start nginxd
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function install_service() {
|
||||
|
@ -776,6 +822,7 @@ function update_TDengine() {
|
|||
install_log
|
||||
install_header
|
||||
install_lib
|
||||
install_jemalloc
|
||||
if [ "$pagMode" != "lite" ]; then
|
||||
install_connector
|
||||
fi
|
||||
|
@ -786,7 +833,7 @@ function update_TDengine() {
|
|||
install_config
|
||||
|
||||
openresty_work=false
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
# Check if openresty is installed
|
||||
# Check if nginx is installed successfully
|
||||
if type curl &> /dev/null; then
|
||||
|
@ -797,7 +844,7 @@ function update_TDengine() {
|
|||
echo -e "\033[44;31;5mNginx for TDengine does not work! Please try again!\033[0m"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
#echo
|
||||
#echo -e "\033[44;32;1mTDengine is updated successfully!${NC}"
|
||||
|
@ -840,7 +887,7 @@ function install_TDengine() {
|
|||
|
||||
echo -e "${GREEN}Start to install TDengine...${NC}"
|
||||
|
||||
install_main_path
|
||||
install_main_path
|
||||
|
||||
if [ -z $1 ]; then
|
||||
install_data
|
||||
|
@ -894,23 +941,23 @@ function install_TDengine() {
|
|||
#fi
|
||||
|
||||
if [ ! -z "$firstEp" ]; then
|
||||
tmpFqdn=${firstEp%%:*}
|
||||
substr=":"
|
||||
if [[ $firstEp =~ $substr ]];then
|
||||
tmpPort=${firstEp#*:}
|
||||
else
|
||||
tmpPort=""
|
||||
fi
|
||||
if [[ "$tmpPort" != "" ]];then
|
||||
echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}"
|
||||
else
|
||||
echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}"
|
||||
fi
|
||||
echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}"
|
||||
echo
|
||||
tmpFqdn=${firstEp%%:*}
|
||||
substr=":"
|
||||
if [[ $firstEp =~ $substr ]];then
|
||||
tmpPort=${firstEp#*:}
|
||||
else
|
||||
tmpPort=""
|
||||
fi
|
||||
if [[ "$tmpPort" != "" ]];then
|
||||
echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}"
|
||||
else
|
||||
echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}"
|
||||
fi
|
||||
echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}"
|
||||
echo
|
||||
elif [ ! -z "$serverFqdn" ]; then
|
||||
echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $serverFqdn${GREEN_DARK} to login into TDengine server${NC}"
|
||||
echo
|
||||
echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $serverFqdn${GREEN_DARK} to login into TDengine server${NC}"
|
||||
echo
|
||||
fi
|
||||
|
||||
echo -e "\033[44;32;1mTDengine is installed successfully!${NC}"
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
set -e
|
||||
# set -x
|
||||
|
||||
# -----------------------Variables definition---------------------
|
||||
# -----------------------Variables definition
|
||||
source_dir=$1
|
||||
binary_dir=$2
|
||||
osType=$3
|
||||
|
@ -176,6 +176,49 @@ function install_bin() {
|
|||
[ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/rmtaos || :
|
||||
fi
|
||||
}
|
||||
function install_jemalloc() {
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
/usr/bin/install -c -d /usr/local/bin
|
||||
|
||||
if [ -f ${binary_dir}/build/bin/jemalloc-config ]; then
|
||||
/usr/bin/install -c -m 755 ${binary_dir}/build/bin/jemalloc-config /usr/local/bin
|
||||
fi
|
||||
if [ -f ${binary_dir}/build/bin/jemalloc.sh ]; then
|
||||
/usr/bin/install -c -m 755 ${binary_dir}/build/bin/jemalloc.sh /usr/local/bin
|
||||
fi
|
||||
if [ -f ${binary_dir}/build/bin/jeprof ]; then
|
||||
/usr/bin/install -c -m 755 ${binary_dir}/build/bin/jeprof /usr/local/bin
|
||||
fi
|
||||
if [ -f ${binary_dir}/build/include/jemalloc/jemalloc.h ]; then
|
||||
/usr/bin/install -c -d /usr/local/include/jemalloc
|
||||
/usr/bin/install -c -m 644 ${binary_dir}/build/include/jemalloc/jemalloc.h /usr/local/include/jemalloc
|
||||
fi
|
||||
if [ -f ${binary_dir}/build/lib/libjemalloc.so.2 ]; then
|
||||
/usr/bin/install -c -d /usr/local/lib
|
||||
/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc.so.2 /usr/local/lib
|
||||
ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
|
||||
/usr/bin/install -c -d /usr/local/lib
|
||||
if [ -f ${binary_dir}/build/lib/libjemalloc.a ]; then
|
||||
/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc.a /usr/local/lib
|
||||
fi
|
||||
if [ -f ${binary_dir}/build/lib/libjemalloc_pic.a ]; then
|
||||
/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc_pic.a /usr/local/lib
|
||||
fi
|
||||
if [ -f ${binary_dir}/build/lib/pkgconfig/jemalloc.pc ]; then
|
||||
/usr/bin/install -c -d /usr/local/lib/pkgconfig
|
||||
/usr/bin/install -c -m 644 ${binary_dir}/build/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
|
||||
fi
|
||||
fi
|
||||
if [ -f ${binary_dir}/build/share/doc/jemalloc/jemalloc.html ]; then
|
||||
/usr/bin/install -c -d /usr/local/share/doc/jemalloc
|
||||
/usr/bin/install -c -m 644 ${binary_dir}/build/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc
|
||||
fi
|
||||
if [ -f ${binary_dir}/build/share/man/man3/jemalloc.3 ]; then
|
||||
/usr/bin/install -c -d /usr/local/share/man/man3
|
||||
/usr/bin/install -c -m 644 ${binary_dir}/build/share/man/man3/jemalloc.3 /usr/local/share/man/man3
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function install_lib() {
|
||||
# Remove links
|
||||
|
@ -199,6 +242,8 @@ function install_lib() {
|
|||
${csudo} ln -sf ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib
|
||||
fi
|
||||
|
||||
install_jemalloc
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo} ldconfig
|
||||
fi
|
||||
|
@ -431,13 +476,13 @@ function install_TDengine() {
|
|||
# Start to install
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
echo -e "${GREEN}Start to install TDEngine...${NC}"
|
||||
else
|
||||
echo -e "${GREEN}Start to install TDEngine Client ...${NC}"
|
||||
else
|
||||
echo -e "${GREEN}Start to install TDEngine Client ...${NC}"
|
||||
fi
|
||||
|
||||
install_main_path
|
||||
install_main_path
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
install_data
|
||||
fi
|
||||
install_log
|
||||
|
|
|
@ -30,7 +30,7 @@ else
|
|||
install_dir="${release_dir}/TDengine-server-${version}"
|
||||
fi
|
||||
|
||||
# Directories and files.
|
||||
# Directories and files
|
||||
if [ "$pagMode" == "lite" ]; then
|
||||
strip ${build_dir}/bin/taosd
|
||||
strip ${build_dir}/bin/taos
|
||||
|
@ -73,6 +73,39 @@ mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/taos
|
|||
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
|
||||
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
|
||||
|
||||
if [ -f ${build_dir}/bin/jemalloc-config ]; then
|
||||
mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
|
||||
cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin
|
||||
if [ -f ${build_dir}/bin/jemalloc.sh ]; then
|
||||
cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin
|
||||
fi
|
||||
if [ -f ${build_dir}/bin/jeprof ]; then
|
||||
cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin
|
||||
fi
|
||||
if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then
|
||||
cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then
|
||||
cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib
|
||||
ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/libjemalloc.a ]; then
|
||||
cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
|
||||
cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then
|
||||
cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig
|
||||
fi
|
||||
if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then
|
||||
cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc
|
||||
fi
|
||||
if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then
|
||||
cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove.sh >> remove_temp.sh
|
||||
mv remove_temp.sh ${install_dir}/bin/remove.sh
|
||||
|
|
|
@ -13,8 +13,8 @@
|
|||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef TDENGINE_TSCLOCALMERGE_H
|
||||
#define TDENGINE_TSCLOCALMERGE_H
|
||||
#ifndef TDENGINE_TSCGLOBALMERGE_H
|
||||
#define TDENGINE_TSCGLOBALMERGE_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
@ -24,7 +24,7 @@ extern "C" {
|
|||
#include "qFill.h"
|
||||
#include "taosmsg.h"
|
||||
#include "tlosertree.h"
|
||||
#include "tsclient.h"
|
||||
#include "qExecutor.h"
|
||||
|
||||
#define MAX_NUM_OF_SUBQUERY_RETRY 3
|
||||
|
||||
|
@ -38,7 +38,7 @@ typedef struct SLocalDataSource {
|
|||
tFilePage filePage;
|
||||
} SLocalDataSource;
|
||||
|
||||
typedef struct SLocalMerger {
|
||||
typedef struct SGlobalMerger {
|
||||
SLocalDataSource **pLocalDataSrc;
|
||||
int32_t numOfBuffer;
|
||||
int32_t numOfCompleted;
|
||||
|
@ -48,20 +48,22 @@ typedef struct SLocalMerger {
|
|||
tOrderDescriptor *pDesc;
|
||||
tExtMemBuffer **pExtMemBuffer; // disk-based buffer
|
||||
char *buf; // temp buffer
|
||||
} SLocalMerger;
|
||||
} SGlobalMerger;
|
||||
|
||||
struct SSqlObj;
|
||||
|
||||
typedef struct SRetrieveSupport {
|
||||
tExtMemBuffer ** pExtMemBuffer; // for build loser tree
|
||||
tOrderDescriptor *pOrderDescriptor;
|
||||
int32_t subqueryIndex; // index of current vnode in vnode list
|
||||
SSqlObj * pParentSql;
|
||||
struct SSqlObj *pParentSql;
|
||||
tFilePage * localBuffer; // temp buffer, there is a buffer for each vnode to
|
||||
uint32_t numOfRetry; // record the number of retry times
|
||||
} SRetrieveSupport;
|
||||
|
||||
int32_t tscLocalReducerEnvCreate(SQueryInfo* pQueryInfo, tExtMemBuffer ***pMemBuffer, int32_t numOfSub, tOrderDescriptor **pDesc, uint32_t nBufferSize, int64_t id);
|
||||
int32_t tscCreateGlobalMergerEnv(SQueryInfo* pQueryInfo, tExtMemBuffer ***pMemBuffer, int32_t numOfSub, tOrderDescriptor **pDesc, uint32_t nBufferSize, int64_t id);
|
||||
|
||||
void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, int32_t numOfVnodes);
|
||||
void tscDestroyGlobalMergerEnv(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, int32_t numOfVnodes);
|
||||
|
||||
int32_t saveToBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePage *pPage, void *data,
|
||||
int32_t numOfRows, int32_t orderType);
|
||||
|
@ -71,13 +73,13 @@ int32_t tscFlushTmpBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tF
|
|||
/*
|
||||
* create local reducer to launch the second-stage reduce process at client site
|
||||
*/
|
||||
int32_t tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
|
||||
SQueryInfo *pQueryInfo, SLocalMerger **pMerger, int64_t id);
|
||||
int32_t tscCreateGlobalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
|
||||
SQueryInfo *pQueryInfo, SGlobalMerger **pMerger, int64_t id);
|
||||
|
||||
void tscDestroyLocalMerger(SLocalMerger* pLocalMerger);
|
||||
void tscDestroyGlobalMerger(SGlobalMerger* pMerger);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // TDENGINE_TSCLOCALMERGE_H
|
||||
#endif // TDENGINE_TSCGLOBALMERGE_H
|
|
@ -20,13 +20,13 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "tsched.h"
|
||||
#include "exception.h"
|
||||
#include "os.h"
|
||||
#include "qExtbuffer.h"
|
||||
#include "taosdef.h"
|
||||
#include "tbuffer.h"
|
||||
#include "tscLocalMerge.h"
|
||||
#include "tscGlobalmerge.h"
|
||||
#include "tsched.h"
|
||||
#include "tsclient.h"
|
||||
|
||||
#define UTIL_TABLE_IS_SUPER_TABLE(metaInfo) \
|
||||
|
@ -63,7 +63,7 @@ typedef struct SJoinSupporter {
|
|||
SArray* exprList;
|
||||
SFieldInfo fieldsInfo;
|
||||
STagCond tagCond;
|
||||
SGroupbyExpr groupInfo; // group by info
|
||||
SGroupbyExpr groupInfo; // group by info
|
||||
struct STSBuf* pTSBuf; // the TSBuf struct that holds the compressed timestamp array
|
||||
FILE* f; // temporary file in order to create TSBuf
|
||||
char path[PATH_MAX]; // temporary file path, todo dynamic allocate memory
|
||||
|
@ -111,7 +111,7 @@ void* tscDestroyUdfArrayList(SArray* pUdfList);
|
|||
void* tscDestroyBlockHashTable(SHashObj* pBlockHashTable, bool removeMeta);
|
||||
|
||||
int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock);
|
||||
int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap);
|
||||
int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBlockMap);
|
||||
int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, int32_t startOffset, int32_t rowSize, SName* pName, STableMeta* pTableMeta,
|
||||
STableDataBlocks** dataBlocks, SArray* pBlockList);
|
||||
|
||||
|
@ -124,6 +124,8 @@ int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, i
|
|||
*/
|
||||
bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo);
|
||||
bool tscIsTWAQuery(SQueryInfo* pQueryInfo);
|
||||
bool tscIsSessionWindowQuery(SQueryInfo* pQueryInfo);
|
||||
bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo);
|
||||
bool tsIsArithmeticQueryOnAggResult(SQueryInfo* pQueryInfo);
|
||||
bool tscGroupbyColumn(SQueryInfo* pQueryInfo);
|
||||
bool tscIsTopBotQuery(SQueryInfo* pQueryInfo);
|
||||
|
@ -286,6 +288,7 @@ void tscSVgroupInfoCopy(SVgroupInfo* dst, const SVgroupInfo* src);
|
|||
SSqlObj* createSimpleSubObj(SSqlObj* pSql, __async_cb_func_t fp, void* param, int32_t cmd);
|
||||
|
||||
void registerSqlObj(SSqlObj* pSql);
|
||||
void tscInitResForMerge(SSqlRes* pRes);
|
||||
|
||||
SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t fp, void* param, int32_t cmd, SSqlObj* pPrevSql);
|
||||
void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t subClauseIndex, int32_t tableIndex);
|
||||
|
@ -330,12 +333,15 @@ SVgroupsInfo* tscVgroupsInfoDup(SVgroupsInfo* pVgroupsInfo);
|
|||
int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAttr, void* addr);
|
||||
|
||||
void tsCreateSQLFunctionCtx(SQueryInfo* pQueryInfo, SQLFunctionCtx* pCtx, SSchema* pSchema);
|
||||
void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, SExprInfo* pExprs, STableGroupInfo* pTableGroupInfo, SOperatorInfo* pOperator, char* sql, void* addr, int32_t stage);
|
||||
void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGroupInfo, SOperatorInfo* pOperator, char* sql, void* addr, int32_t stage);
|
||||
|
||||
void* malloc_throw(size_t size);
|
||||
void* calloc_throw(size_t nmemb, size_t size);
|
||||
char* strdup_throw(const char* str);
|
||||
|
||||
bool vgroupInfoIdentical(SNewVgroupInfo *pExisted, SVgroupMsg* src);
|
||||
SNewVgroupInfo createNewVgroupInfo(SVgroupMsg *pVgroupMsg);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -1,93 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef TDENGINE_TSCHEMAUTIL_H
|
||||
#define TDENGINE_TSCHEMAUTIL_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "taosmsg.h"
|
||||
#include "tsclient.h"
|
||||
#include "ttoken.h"
|
||||
|
||||
/**
|
||||
* get the number of tags of this table
|
||||
* @param pTableMeta
|
||||
* @return
|
||||
*/
|
||||
int32_t tscGetNumOfTags(const STableMeta* pTableMeta);
|
||||
|
||||
/**
|
||||
* get the number of columns of this table
|
||||
* @param pTableMeta
|
||||
* @return
|
||||
*/
|
||||
int32_t tscGetNumOfColumns(const STableMeta* pTableMeta);
|
||||
|
||||
/**
|
||||
* get the basic info of this table
|
||||
* @param pTableMeta
|
||||
* @return
|
||||
*/
|
||||
STableComInfo tscGetTableInfo(const STableMeta* pTableMeta);
|
||||
|
||||
/**
|
||||
* get the schema
|
||||
* @param pTableMeta
|
||||
* @return
|
||||
*/
|
||||
SSchema* tscGetTableSchema(const STableMeta* pTableMeta);
|
||||
|
||||
/**
|
||||
* get the tag schema
|
||||
* @param pMeta
|
||||
* @return
|
||||
*/
|
||||
SSchema *tscGetTableTagSchema(const STableMeta *pMeta);
|
||||
|
||||
/**
|
||||
* get the column schema according to the column index
|
||||
* @param pMeta
|
||||
* @param colIndex
|
||||
* @return
|
||||
*/
|
||||
SSchema *tscGetTableColumnSchema(const STableMeta *pMeta, int32_t colIndex);
|
||||
|
||||
/**
|
||||
* get the column schema according to the column id
|
||||
* @param pTableMeta
|
||||
* @param colId
|
||||
* @return
|
||||
*/
|
||||
SSchema* tscGetColumnSchemaById(STableMeta* pTableMeta, int16_t colId);
|
||||
|
||||
/**
|
||||
* create the table meta from the msg
|
||||
* @param pTableMetaMsg
|
||||
* @param size size of the table meta
|
||||
* @return
|
||||
*/
|
||||
STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg);
|
||||
|
||||
bool vgroupInfoIdentical(SNewVgroupInfo *pExisted, SVgroupMsg* src);
|
||||
SNewVgroupInfo createNewVgroupInfo(SVgroupMsg *pVgroupMsg);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // TDENGINE_TSCHEMAUTIL_H
|
|
@ -40,17 +40,9 @@ extern "C" {
|
|||
|
||||
// forward declaration
|
||||
struct SSqlInfo;
|
||||
struct SLocalMerger;
|
||||
|
||||
typedef void (*__async_cb_func_t)(void *param, TAOS_RES *tres, int32_t numOfRows);
|
||||
|
||||
typedef struct STableComInfo {
|
||||
uint8_t numOfTags;
|
||||
uint8_t precision;
|
||||
int16_t numOfColumns;
|
||||
int32_t rowSize;
|
||||
} STableComInfo;
|
||||
|
||||
typedef struct SNewVgroupInfo {
|
||||
int32_t vgId;
|
||||
int8_t inUse;
|
||||
|
@ -66,34 +58,6 @@ typedef struct CChildTableMeta {
|
|||
uint64_t suid; // super table id
|
||||
} CChildTableMeta;
|
||||
|
||||
typedef struct STableMeta {
|
||||
int32_t vgId;
|
||||
STableId id;
|
||||
uint8_t tableType;
|
||||
char sTableName[TSDB_TABLE_FNAME_LEN]; // super table name
|
||||
uint64_t suid; // super table id
|
||||
int16_t sversion;
|
||||
int16_t tversion;
|
||||
STableComInfo tableInfo;
|
||||
SSchema schema[]; // if the table is TSDB_CHILD_TABLE, schema is acquired by super table meta info
|
||||
} STableMeta;
|
||||
|
||||
typedef struct STableMetaInfo {
|
||||
STableMeta *pTableMeta; // table meta, cached in client side and acquired by name
|
||||
uint32_t tableMetaSize;
|
||||
SVgroupsInfo *vgroupList;
|
||||
SArray *pVgroupTables; // SArray<SVgroupTableInfo>
|
||||
|
||||
/*
|
||||
* 1. keep the vgroup index during the multi-vnode super table projection query
|
||||
* 2. keep the vgroup index for multi-vnode insertion
|
||||
*/
|
||||
int32_t vgroupIndex;
|
||||
SName name;
|
||||
char aliasName[TSDB_TABLE_NAME_LEN]; // alias name of table specified in query sql
|
||||
SArray *tagColList; // SArray<SColumn*>, involved tag columns
|
||||
} STableMetaInfo;
|
||||
|
||||
typedef struct SColumnIndex {
|
||||
int16_t tableIndex;
|
||||
int16_t columnIndex;
|
||||
|
@ -111,44 +75,6 @@ typedef struct SInternalField {
|
|||
SExprInfo *pExpr;
|
||||
} SInternalField;
|
||||
|
||||
typedef struct SFieldInfo {
|
||||
int16_t numOfOutput; // number of column in result
|
||||
TAOS_FIELD* final;
|
||||
SArray *internalField; // SArray<SInternalField>
|
||||
} SFieldInfo;
|
||||
|
||||
typedef struct SCond {
|
||||
uint64_t uid;
|
||||
int32_t len; // length of tag query condition data
|
||||
char * cond;
|
||||
} SCond;
|
||||
|
||||
typedef struct SJoinNode {
|
||||
uint64_t uid;
|
||||
int16_t tagColId;
|
||||
SArray* tsJoin;
|
||||
SArray* tagJoin;
|
||||
} SJoinNode;
|
||||
|
||||
typedef struct SJoinInfo {
|
||||
bool hasJoin;
|
||||
SJoinNode *joinTables[TSDB_MAX_JOIN_TABLE_NUM];
|
||||
} SJoinInfo;
|
||||
|
||||
typedef struct STagCond {
|
||||
// relation between tbname list and query condition, including : TK_AND or TK_OR
|
||||
int16_t relType;
|
||||
|
||||
// tbname query condition, only support tbname query condition on one table
|
||||
SCond tbnameCond;
|
||||
|
||||
// join condition, only support two tables join currently
|
||||
SJoinInfo joinInfo;
|
||||
|
||||
// for different table, the query condition must be seperated
|
||||
SArray *pCond;
|
||||
} STagCond;
|
||||
|
||||
typedef struct SParamInfo {
|
||||
int32_t idx;
|
||||
uint8_t type;
|
||||
|
@ -191,59 +117,6 @@ typedef struct STableDataBlocks {
|
|||
SParamInfo *params;
|
||||
} STableDataBlocks;
|
||||
|
||||
typedef struct SQueryInfo {
|
||||
int16_t command; // the command may be different for each subclause, so keep it seperately.
|
||||
uint32_t type; // query/insert type
|
||||
STimeWindow window; // the whole query time window
|
||||
|
||||
SInterval interval; // tumble time window
|
||||
SSessionWindow sessionWindow; // session time window
|
||||
|
||||
SGroupbyExpr groupbyExpr; // groupby tags info
|
||||
SArray * colList; // SArray<SColumn*>
|
||||
SFieldInfo fieldsInfo;
|
||||
SArray * exprList; // SArray<SExprInfo*>
|
||||
SArray * exprList1; // final exprlist in case of arithmetic expression exists
|
||||
SLimitVal limit;
|
||||
SLimitVal slimit;
|
||||
STagCond tagCond;
|
||||
|
||||
SOrderVal order;
|
||||
int16_t fillType; // final result fill type
|
||||
int16_t numOfTables;
|
||||
STableMetaInfo **pTableMetaInfo;
|
||||
struct STSBuf *tsBuf;
|
||||
int64_t * fillVal; // default value for fill
|
||||
char * msg; // pointer to the pCmd->payload to keep error message temporarily
|
||||
int64_t clauseLimit; // limit for current sub clause
|
||||
|
||||
int64_t prjOffset; // offset value in the original sql expression, only applied at client side
|
||||
int64_t vgroupLimit; // table limit in case of super table projection query + global order + limit
|
||||
|
||||
int32_t udColumnId; // current user-defined constant output field column id, monotonically decreases from TSDB_UD_COLUMN_INDEX
|
||||
int16_t resColumnId; // result column id
|
||||
bool distinctTag; // distinct tag or not
|
||||
int32_t round; // 0/1/....
|
||||
int32_t bufLen;
|
||||
char* buf;
|
||||
SQInfo* pQInfo; // global merge operator
|
||||
SQueryAttr* pQueryAttr; // query object
|
||||
|
||||
struct SQueryInfo *sibling; // sibling
|
||||
SArray *pUpstream; // SArray<struct SQueryInfo>
|
||||
struct SQueryInfo *pDownstream;
|
||||
int32_t havingFieldNum;
|
||||
bool stableQuery;
|
||||
bool groupbyColumn;
|
||||
bool simpleAgg;
|
||||
bool arithmeticOnAgg;
|
||||
bool projectionQuery;
|
||||
bool hasFilter;
|
||||
bool onlyTagQuery;
|
||||
bool globalMerge; // need global merge
|
||||
SArray *pUdfInfo; // user defined function information SArray<SUdfInfo>
|
||||
} SQueryInfo;
|
||||
|
||||
typedef struct {
|
||||
STableMeta *pTableMeta;
|
||||
SVgroupsInfo *pVgroupInfo;
|
||||
|
@ -257,9 +130,13 @@ typedef struct SInsertStatementParam {
|
|||
int8_t schemaAttached; // denote if submit block is built with table schema or not
|
||||
STagData tagData; // NOTE: pTagData->data is used as a variant length array
|
||||
|
||||
int32_t batchSize; // for parameter ('?') binding and batch processing
|
||||
int32_t numOfParams;
|
||||
|
||||
char msg[512]; // error message
|
||||
char *sql; // current sql statement position
|
||||
uint32_t insertType; // insert data from [file|sql statement| bound statement]
|
||||
uint64_t objectId; // sql object id
|
||||
char *sql; // current sql statement position
|
||||
} SInsertStatementParam;
|
||||
|
||||
// TODO extract sql parser supporter
|
||||
|
@ -268,15 +145,10 @@ typedef struct {
|
|||
uint8_t msgType;
|
||||
SInsertStatementParam insertParam;
|
||||
char reserve1[3]; // fix bus error on arm32
|
||||
int32_t count; // todo remove it
|
||||
bool subCmd;
|
||||
|
||||
union {
|
||||
int32_t count;
|
||||
};
|
||||
|
||||
char * curSql; // current sql, resume position of sql after parsing paused
|
||||
char reserve2[3]; // fix bus error on arm32
|
||||
|
||||
int16_t numOfCols;
|
||||
char reserve3[2]; // fix bus error on arm32
|
||||
uint32_t allocSize;
|
||||
|
@ -287,8 +159,6 @@ typedef struct {
|
|||
SQueryInfo *pQueryInfo;
|
||||
SQueryInfo *active; // current active query info
|
||||
int32_t batchSize; // for parameter ('?') binding and batch processing
|
||||
int32_t numOfParams;
|
||||
STagData tagData; // NOTE: pTagData->data is used as a variant length array
|
||||
int32_t resColumnId;
|
||||
} SSqlCmd;
|
||||
|
||||
|
@ -324,7 +194,7 @@ typedef struct {
|
|||
|
||||
TAOS_FIELD* final;
|
||||
SArithmeticSupport *pArithSup; // support the arithmetic expression calculation on agg functions
|
||||
struct SLocalMerger *pLocalMerger;
|
||||
struct SGlobalMerger *pMerger;
|
||||
} SSqlRes;
|
||||
|
||||
typedef struct {
|
||||
|
@ -451,7 +321,7 @@ void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo);
|
|||
void tscSetResRawPtrRv(SSqlRes* pRes, SQueryInfo* pQueryInfo, SSDataBlock* pBlock);
|
||||
|
||||
void handleDownstreamOperator(SSqlObj** pSqlList, int32_t numOfUpstream, SQueryInfo* px, SSqlRes* pOutput);
|
||||
void destroyTableNameList(SSqlCmd* pCmd);
|
||||
void destroyTableNameList(SInsertStatementParam* pInsertParam);
|
||||
|
||||
void tscResetSqlCmd(SSqlCmd *pCmd, bool removeMeta);
|
||||
|
||||
|
@ -483,7 +353,7 @@ void waitForQueryRsp(void *param, TAOS_RES *tres, int code);
|
|||
void doAsyncQuery(STscObj *pObj, SSqlObj *pSql, __async_cb_func_t fp, void *param, const char *sqlstr, size_t sqlLen);
|
||||
|
||||
void tscImportDataFromFile(SSqlObj *pSql);
|
||||
void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen);
|
||||
struct SGlobalMerger* tscInitResObjForLocalQuery(int32_t numOfRes, int32_t rowLen, uint64_t id);
|
||||
bool tscIsUpdateQuery(SSqlObj* pSql);
|
||||
char* tscGetSqlStr(SSqlObj* pSql);
|
||||
bool tscIsQueryWithLimit(SSqlObj* pSql);
|
||||
|
@ -493,7 +363,7 @@ void tscSetBoundColumnInfo(SParsedDataColInfo *pColInfo, SSchema *pSchema, int32
|
|||
|
||||
char *tscGetErrorMsgPayload(SSqlCmd *pCmd);
|
||||
|
||||
int32_t tscInvalidSQLErrMsg(char *msg, const char *additionalInfo, const char *sql);
|
||||
int32_t tscInvalidOperationMsg(char *msg, const char *additionalInfo, const char *sql);
|
||||
int32_t tscSQLSyntaxErrMsg(char* msg, const char* additionalInfo, const char* sql);
|
||||
|
||||
int32_t tscValidateSqlInfo(SSqlObj *pSql, struct SSqlInfo *pInfo);
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
#include "tscSubquery.h"
|
||||
#include "tscUtil.h"
|
||||
#include "tsched.h"
|
||||
#include "tschemautil.h"
|
||||
#include "qTableMeta.h"
|
||||
#include "tsclient.h"
|
||||
|
||||
static void tscAsyncQueryRowsForNextVnode(void *param, TAOS_RES *tres, int numOfRows);
|
||||
|
@ -58,7 +58,6 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, __async_cb_func_t fp, void* para
|
|||
strntolower(pSql->sqlstr, sqlstr, (int32_t)sqlLen);
|
||||
|
||||
tscDebugL("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr);
|
||||
pCmd->curSql = pSql->sqlstr;
|
||||
pCmd->resColumnId = TSDB_RES_COL_ID;
|
||||
|
||||
int32_t code = tsParseSql(pSql, true);
|
||||
|
@ -221,7 +220,7 @@ void taos_fetch_rows_a(TAOS_RES *tres, __async_cb_func_t fp, void *param) {
|
|||
|
||||
tscResetForNextRetrieve(pRes);
|
||||
|
||||
// handle the sub queries of join query
|
||||
// handle outer query based on the already retrieved nest query results.
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
|
||||
if (pQueryInfo->pUpstream != NULL && taosArrayGetSize(pQueryInfo->pUpstream) > 0) {
|
||||
SSchedMsg schedMsg = {0};
|
||||
|
|
|
@ -13,15 +13,19 @@
|
|||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "tscLocalMerge.h"
|
||||
#include "tscSubquery.h"
|
||||
#include "os.h"
|
||||
#include "texpr.h"
|
||||
#include "tlosertree.h"
|
||||
|
||||
#include "tscGlobalmerge.h"
|
||||
#include "tscSubquery.h"
|
||||
#include "tscLog.h"
|
||||
#include "tsclient.h"
|
||||
#include "qUtil.h"
|
||||
|
||||
#define COLMODEL_GET_VAL(data, schema, rowId, colId) \
|
||||
(data + (schema)->pFields[colId].offset * ((schema)->capacity) + (rowId) * (schema)->pFields[colId].field.bytes)
|
||||
|
||||
|
||||
typedef struct SCompareParam {
|
||||
SLocalDataSource **pLocalData;
|
||||
tOrderDescriptor * pDesc;
|
||||
|
@ -29,9 +33,18 @@ typedef struct SCompareParam {
|
|||
int32_t groupOrderType;
|
||||
} SCompareParam;
|
||||
|
||||
bool needToMergeRv(SSDataBlock* pBlock, SArray* columnIndex, int32_t index, char **buf);
|
||||
static bool needToMerge(SSDataBlock* pBlock, SArray* columnIndexList, int32_t index, char **buf) {
|
||||
int32_t ret = 0;
|
||||
size_t size = taosArrayGetSize(columnIndexList);
|
||||
if (size > 0) {
|
||||
ret = compare_aRv(pBlock, columnIndexList, (int32_t) size, index, buf, TSDB_ORDER_ASC);
|
||||
}
|
||||
|
||||
int32_t treeComparator(const void *pLeft, const void *pRight, void *param) {
|
||||
// if ret == 0, means the result belongs to the same group
|
||||
return (ret == 0);
|
||||
}
|
||||
|
||||
static int32_t treeComparator(const void *pLeft, const void *pRight, void *param) {
|
||||
int32_t pLeftIdx = *(int32_t *)pLeft;
|
||||
int32_t pRightIdx = *(int32_t *)pRight;
|
||||
|
||||
|
@ -57,16 +70,16 @@ int32_t treeComparator(const void *pLeft, const void *pRight, void *param) {
|
|||
}
|
||||
}
|
||||
|
||||
int32_t tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
|
||||
SQueryInfo* pQueryInfo, SLocalMerger **pMerger, int64_t id) {
|
||||
int32_t tscCreateGlobalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
|
||||
SQueryInfo* pQueryInfo, SGlobalMerger **pMerger, int64_t id) {
|
||||
if (pMemBuffer == NULL) {
|
||||
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, numOfBuffer);
|
||||
tscDestroyGlobalMergerEnv(pMemBuffer, pDesc, numOfBuffer);
|
||||
tscError("0x%"PRIx64" %p pMemBuffer is NULL", id, pMemBuffer);
|
||||
return TSDB_CODE_TSC_APP_ERROR;
|
||||
}
|
||||
|
||||
if (pDesc->pColumnModel == NULL) {
|
||||
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, numOfBuffer);
|
||||
tscDestroyGlobalMergerEnv(pMemBuffer, pDesc, numOfBuffer);
|
||||
tscError("0x%"PRIx64" no local buffer or intermediate result format model", id);
|
||||
return TSDB_CODE_TSC_APP_ERROR;
|
||||
}
|
||||
|
@ -83,7 +96,7 @@ int32_t tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tO
|
|||
}
|
||||
|
||||
if (numOfFlush == 0 || numOfBuffer == 0) {
|
||||
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, numOfBuffer);
|
||||
tscDestroyGlobalMergerEnv(pMemBuffer, pDesc, numOfBuffer);
|
||||
tscDebug("0x%"PRIx64" no data to retrieve", id);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -92,15 +105,15 @@ int32_t tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tO
|
|||
tscError("0x%"PRIx64" Invalid value of buffer capacity %d and page size %d ", id, pDesc->pColumnModel->capacity,
|
||||
pMemBuffer[0]->pageSize);
|
||||
|
||||
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, numOfBuffer);
|
||||
tscDestroyGlobalMergerEnv(pMemBuffer, pDesc, numOfBuffer);
|
||||
return TSDB_CODE_TSC_APP_ERROR;
|
||||
}
|
||||
|
||||
*pMerger = (SLocalMerger *) calloc(1, sizeof(SLocalMerger));
|
||||
*pMerger = (SGlobalMerger *) calloc(1, sizeof(SGlobalMerger));
|
||||
if ((*pMerger) == NULL) {
|
||||
tscError("0x%"PRIx64" failed to create local merge structure, out of memory", id);
|
||||
|
||||
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, numOfBuffer);
|
||||
tscDestroyGlobalMergerEnv(pMemBuffer, pDesc, numOfBuffer);
|
||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
|
@ -160,7 +173,7 @@ int32_t tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tO
|
|||
// no data actually, no need to merge result.
|
||||
if (idx == 0) {
|
||||
tscDebug("0x%"PRIx64" retrieved no data", id);
|
||||
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, numOfBuffer);
|
||||
tscDestroyGlobalMergerEnv(pMemBuffer, pDesc, numOfBuffer);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -198,7 +211,7 @@ int32_t tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tO
|
|||
}
|
||||
|
||||
// restore the limitation value at the last stage
|
||||
if (tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
|
||||
if (pQueryInfo->orderProjectQuery) {
|
||||
pQueryInfo->limit.limit = pQueryInfo->clauseLimit;
|
||||
pQueryInfo->limit.offset = pQueryInfo->prjOffset;
|
||||
}
|
||||
|
@ -297,28 +310,28 @@ int32_t saveToBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePa
|
|||
return 0;
|
||||
}
|
||||
|
||||
void tscDestroyLocalMerger(SLocalMerger* pLocalMerger) {
|
||||
if (pLocalMerger == NULL) {
|
||||
void tscDestroyGlobalMerger(SGlobalMerger* pMerger) {
|
||||
if (pMerger == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < pLocalMerger->numOfBuffer; ++i) {
|
||||
tfree(pLocalMerger->pLocalDataSrc[i]);
|
||||
for (int32_t i = 0; i < pMerger->numOfBuffer; ++i) {
|
||||
tfree(pMerger->pLocalDataSrc[i]);
|
||||
}
|
||||
|
||||
pLocalMerger->numOfBuffer = 0;
|
||||
tscLocalReducerEnvDestroy(pLocalMerger->pExtMemBuffer, pLocalMerger->pDesc, pLocalMerger->numOfVnode);
|
||||
pMerger->numOfBuffer = 0;
|
||||
tscDestroyGlobalMergerEnv(pMerger->pExtMemBuffer, pMerger->pDesc, pMerger->numOfVnode);
|
||||
|
||||
pLocalMerger->numOfCompleted = 0;
|
||||
pMerger->numOfCompleted = 0;
|
||||
|
||||
if (pLocalMerger->pLoserTree) {
|
||||
tfree(pLocalMerger->pLoserTree->param);
|
||||
tfree(pLocalMerger->pLoserTree);
|
||||
if (pMerger->pLoserTree) {
|
||||
tfree(pMerger->pLoserTree->param);
|
||||
tfree(pMerger->pLoserTree);
|
||||
}
|
||||
|
||||
tfree(pLocalMerger->buf);
|
||||
tfree(pLocalMerger->pLocalDataSrc);
|
||||
free(pLocalMerger);
|
||||
tfree(pMerger->buf);
|
||||
tfree(pMerger->pLocalDataSrc);
|
||||
free(pMerger);
|
||||
}
|
||||
|
||||
static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SQueryInfo* pQueryInfo, SColumnModel *pModel) {
|
||||
|
@ -329,7 +342,7 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SQueryInfo*
|
|||
}
|
||||
|
||||
// primary timestamp column is involved in final result
|
||||
if (pQueryInfo->interval.interval != 0 || tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
|
||||
if (pQueryInfo->interval.interval != 0 || pQueryInfo->orderProjectQuery) {
|
||||
numOfGroupByCols++;
|
||||
}
|
||||
|
||||
|
@ -392,7 +405,7 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SQueryInfo*
|
|||
}
|
||||
}
|
||||
|
||||
int32_t tscLocalReducerEnvCreate(SQueryInfo *pQueryInfo, tExtMemBuffer ***pMemBuffer, int32_t numOfSub,
|
||||
int32_t tscCreateGlobalMergerEnv(SQueryInfo *pQueryInfo, tExtMemBuffer ***pMemBuffer, int32_t numOfSub,
|
||||
tOrderDescriptor **pOrderDesc, uint32_t nBufferSizes, int64_t id) {
|
||||
SSchema *pSchema = NULL;
|
||||
SColumnModel *pModel = NULL;
|
||||
|
@ -456,7 +469,7 @@ int32_t tscLocalReducerEnvCreate(SQueryInfo *pQueryInfo, tExtMemBuffer ***pMemBu
|
|||
* @param pDesc
|
||||
* @param numOfVnodes
|
||||
*/
|
||||
void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, int32_t numOfVnodes) {
|
||||
void tscDestroyGlobalMergerEnv(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, int32_t numOfVnodes) {
|
||||
tOrderDescDestroy(pDesc);
|
||||
for (int32_t i = 0; i < numOfVnodes; ++i) {
|
||||
pMemBuffer[i] = destoryExtMemBuffer(pMemBuffer[i]);
|
||||
|
@ -467,12 +480,12 @@ void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDe
|
|||
|
||||
/**
|
||||
*
|
||||
* @param pLocalMerge
|
||||
* @param pMerger
|
||||
* @param pOneInterDataSrc
|
||||
* @param treeList
|
||||
* @return the number of remain input source. if ret == 0, all data has been handled
|
||||
*/
|
||||
int32_t loadNewDataFromDiskFor(SLocalMerger *pLocalMerge, SLocalDataSource *pOneInterDataSrc,
|
||||
int32_t loadNewDataFromDiskFor(SGlobalMerger *pMerger, SLocalDataSource *pOneInterDataSrc,
|
||||
bool *needAdjustLoserTree) {
|
||||
pOneInterDataSrc->rowIdx = 0;
|
||||
pOneInterDataSrc->pageId += 1;
|
||||
|
@ -489,17 +502,17 @@ int32_t loadNewDataFromDiskFor(SLocalMerger *pLocalMerge, SLocalDataSource *pOne
|
|||
#endif
|
||||
*needAdjustLoserTree = true;
|
||||
} else {
|
||||
pLocalMerge->numOfCompleted += 1;
|
||||
pMerger->numOfCompleted += 1;
|
||||
|
||||
pOneInterDataSrc->rowIdx = -1;
|
||||
pOneInterDataSrc->pageId = -1;
|
||||
*needAdjustLoserTree = true;
|
||||
}
|
||||
|
||||
return pLocalMerge->numOfBuffer;
|
||||
return pMerger->numOfBuffer;
|
||||
}
|
||||
|
||||
void adjustLoserTreeFromNewData(SLocalMerger *pLocalMerge, SLocalDataSource *pOneInterDataSrc,
|
||||
void adjustLoserTreeFromNewData(SGlobalMerger *pMerger, SLocalDataSource *pOneInterDataSrc,
|
||||
SLoserTreeInfo *pTree) {
|
||||
/*
|
||||
* load a new data page into memory for intermediate dataset source,
|
||||
|
@ -507,7 +520,7 @@ void adjustLoserTreeFromNewData(SLocalMerger *pLocalMerge, SLocalDataSource *pOn
|
|||
*/
|
||||
bool needToAdjust = true;
|
||||
if (pOneInterDataSrc->filePage.num <= pOneInterDataSrc->rowIdx) {
|
||||
loadNewDataFromDiskFor(pLocalMerge, pOneInterDataSrc, &needToAdjust);
|
||||
loadNewDataFromDiskFor(pMerger, pOneInterDataSrc, &needToAdjust);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -515,7 +528,7 @@ void adjustLoserTreeFromNewData(SLocalMerger *pLocalMerge, SLocalDataSource *pOn
|
|||
* if the loser tree is rebuild completed, we do not need to adjust
|
||||
*/
|
||||
if (needToAdjust) {
|
||||
int32_t leafNodeIdx = pTree->pNode[0].index + pLocalMerge->numOfBuffer;
|
||||
int32_t leafNodeIdx = pTree->pNode[0].index + pMerger->numOfBuffer;
|
||||
|
||||
#ifdef _DEBUG_VIEW
|
||||
printf("before adjust:\t");
|
||||
|
@ -567,7 +580,7 @@ static void setTagValueForMultipleRows(SQLFunctionCtx* pCtx, int32_t numOfOutput
|
|||
}
|
||||
}
|
||||
|
||||
static void doExecuteFinalMergeRv(SOperatorInfo* pOperator, int32_t numOfExpr, SSDataBlock* pBlock) {
|
||||
static void doExecuteFinalMerge(SOperatorInfo* pOperator, int32_t numOfExpr, SSDataBlock* pBlock) {
|
||||
SMultiwayMergeInfo* pInfo = pOperator->info;
|
||||
SQLFunctionCtx* pCtx = pInfo->binfo.pCtx;
|
||||
|
||||
|
@ -579,7 +592,7 @@ static void doExecuteFinalMergeRv(SOperatorInfo* pOperator, int32_t numOfExpr, S
|
|||
|
||||
for(int32_t i = 0; i < pBlock->info.rows; ++i) {
|
||||
if (pInfo->hasPrev) {
|
||||
if (needToMergeRv(pBlock, pInfo->orderColumnList, i, pInfo->prevRow)) {
|
||||
if (needToMerge(pBlock, pInfo->orderColumnList, i, pInfo->prevRow)) {
|
||||
for (int32_t j = 0; j < numOfExpr; ++j) {
|
||||
pCtx[j].pInput = add[j] + pCtx[j].inputBytes * i;
|
||||
}
|
||||
|
@ -694,45 +707,27 @@ static void doExecuteFinalMergeRv(SOperatorInfo* pOperator, int32_t numOfExpr, S
|
|||
tfree(add);
|
||||
}
|
||||
|
||||
bool needToMergeRv(SSDataBlock* pBlock, SArray* columnIndexList, int32_t index, char **buf) {
|
||||
int32_t ret = 0;
|
||||
size_t size = taosArrayGetSize(columnIndexList);
|
||||
if (size > 0) {
|
||||
ret = compare_aRv(pBlock, columnIndexList, (int32_t) size, index, buf, TSDB_ORDER_ASC);
|
||||
}
|
||||
|
||||
// if ret == 0, means the result belongs to the same group
|
||||
return (ret == 0);
|
||||
static bool isAllSourcesCompleted(SGlobalMerger *pMerger) {
|
||||
return (pMerger->numOfBuffer == pMerger->numOfCompleted);
|
||||
}
|
||||
|
||||
static bool isAllSourcesCompleted(SLocalMerger *pLocalMerge) {
|
||||
return (pLocalMerge->numOfBuffer == pLocalMerge->numOfCompleted);
|
||||
}
|
||||
|
||||
void tscInitResObjForLocalQuery(SSqlObj *pSql, int32_t numOfRes, int32_t rowLen) {
|
||||
SSqlRes *pRes = &pSql->res;
|
||||
if (pRes->pLocalMerger != NULL) {
|
||||
tscDestroyLocalMerger(pRes->pLocalMerger);
|
||||
pRes->pLocalMerger = NULL;
|
||||
tscDebug("0x%"PRIx64" free local reducer finished", pSql->self);
|
||||
SGlobalMerger* tscInitResObjForLocalQuery(int32_t numOfRes, int32_t rowLen, uint64_t id) {
|
||||
SGlobalMerger *pMerger = calloc(1, sizeof(SGlobalMerger));
|
||||
if (pMerger == NULL) {
|
||||
tscDebug("0x%"PRIx64" free local reducer finished", id);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pRes->qId = 1; // hack to pass the safety check in fetch_row function
|
||||
pRes->numOfRows = 0;
|
||||
pRes->row = 0;
|
||||
|
||||
pRes->rspType = 0; // used as a flag to denote if taos_retrieved() has been called yet
|
||||
pRes->pLocalMerger = (SLocalMerger *)calloc(1, sizeof(SLocalMerger));
|
||||
|
||||
/*
|
||||
* One more byte space is required, since the sprintf function needs one additional space to put '\0' at
|
||||
* the end of string
|
||||
*/
|
||||
size_t size = numOfRes * rowLen + 1;
|
||||
pRes->pLocalMerger->buf = calloc(1, size);
|
||||
pRes->data = pRes->pLocalMerger->buf;
|
||||
pMerger->buf = calloc(1, size);
|
||||
return pMerger;
|
||||
}
|
||||
|
||||
// todo remove it
|
||||
int32_t doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_t rowSize, int32_t finalRowSize) {
|
||||
int32_t maxRowSize = MAX(rowSize, finalRowSize);
|
||||
char* pbuf = calloc(1, (size_t)(pOutput->num * maxRowSize));
|
||||
|
@ -776,9 +771,6 @@ int32_t doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_
|
|||
return offset;
|
||||
}
|
||||
|
||||
#define COLMODEL_GET_VAL(data, schema, rowId, colId) \
|
||||
(data + (schema)->pFields[colId].offset * ((schema)->capacity) + (rowId) * (schema)->pFields[colId].field.bytes)
|
||||
|
||||
static void appendOneRowToDataBlock(SSDataBlock *pBlock, char *buf, SColumnModel *pModel, int32_t rowIndex,
|
||||
int32_t maxRows) {
|
||||
for (int32_t i = 0; i < pBlock->info.numOfCols; ++i) {
|
||||
|
@ -800,7 +792,7 @@ SSDataBlock* doMultiwayMergeSort(void* param, bool* newgroup) {
|
|||
|
||||
SMultiwayMergeInfo *pInfo = pOperator->info;
|
||||
|
||||
SLocalMerger *pMerger = pInfo->pMerge;
|
||||
SGlobalMerger *pMerger = pInfo->pMerge;
|
||||
SLoserTreeInfo *pTree = pMerger->pLoserTree;
|
||||
|
||||
pInfo->binfo.pRes->info.rows = 0;
|
||||
|
@ -884,7 +876,7 @@ SSDataBlock* doMultiwayMergeSort(void* param, bool* newgroup) {
|
|||
return (pInfo->binfo.pRes->info.rows > 0)? pInfo->binfo.pRes:NULL;
|
||||
}
|
||||
|
||||
static bool isSameGroupRv(SArray* orderColumnList, SSDataBlock* pBlock, char** dataCols) {
|
||||
static bool isSameGroup(SArray* orderColumnList, SSDataBlock* pBlock, char** dataCols) {
|
||||
int32_t numOfCols = (int32_t) taosArrayGetSize(orderColumnList);
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColIndex *pIndex = taosArrayGet(orderColumnList, i);
|
||||
|
@ -937,7 +929,7 @@ SSDataBlock* doGlobalAggregate(void* param, bool* newgroup) {
|
|||
}
|
||||
}
|
||||
|
||||
doExecuteFinalMergeRv(pOperator, pOperator->numOfOutput, pAggInfo->pExistBlock);
|
||||
doExecuteFinalMerge(pOperator, pOperator->numOfOutput, pAggInfo->pExistBlock);
|
||||
|
||||
savePrevOrderColumns(pAggInfo->currentGroupColData, pAggInfo->groupColumnList, pAggInfo->pExistBlock, 0,
|
||||
&pAggInfo->hasGroupColData);
|
||||
|
@ -958,7 +950,7 @@ SSDataBlock* doGlobalAggregate(void* param, bool* newgroup) {
|
|||
}
|
||||
|
||||
if (pAggInfo->hasGroupColData) {
|
||||
bool sameGroup = isSameGroupRv(pAggInfo->groupColumnList, pBlock, pAggInfo->currentGroupColData);
|
||||
bool sameGroup = isSameGroup(pAggInfo->groupColumnList, pBlock, pAggInfo->currentGroupColData);
|
||||
if (!sameGroup) {
|
||||
*newgroup = true;
|
||||
pAggInfo->hasDataBlockForNewGroup = true;
|
||||
|
@ -972,7 +964,7 @@ SSDataBlock* doGlobalAggregate(void* param, bool* newgroup) {
|
|||
setInputDataBlock(pOperator, pAggInfo->binfo.pCtx, pBlock, TSDB_ORDER_ASC);
|
||||
updateOutputBuf(&pAggInfo->binfo, &pAggInfo->bufCapacity, pBlock->info.rows * pAggInfo->resultRowFactor);
|
||||
|
||||
doExecuteFinalMergeRv(pOperator, pOperator->numOfOutput, pBlock);
|
||||
doExecuteFinalMerge(pOperator, pOperator->numOfOutput, pBlock);
|
||||
savePrevOrderColumns(pAggInfo->currentGroupColData, pAggInfo->groupColumnList, pBlock, 0, &pAggInfo->hasGroupColData);
|
||||
handleData = true;
|
||||
}
|
|
@ -20,7 +20,7 @@
|
|||
#include "tname.h"
|
||||
#include "tscLog.h"
|
||||
#include "tscUtil.h"
|
||||
#include "tschemautil.h"
|
||||
#include "qTableMeta.h"
|
||||
#include "tsclient.h"
|
||||
#include "taos.h"
|
||||
#include "tscSubquery.h"
|
||||
|
@ -71,7 +71,9 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
|
|||
numOfRows = numOfRows + tscGetNumOfTags(pMeta);
|
||||
}
|
||||
|
||||
tscInitResObjForLocalQuery(pSql, totalNumOfRows, rowLen);
|
||||
pSql->res.pMerger = tscInitResObjForLocalQuery(totalNumOfRows, rowLen, pSql->self);
|
||||
tscInitResForMerge(&pSql->res);
|
||||
|
||||
SSchema *pSchema = tscGetTableSchema(pMeta);
|
||||
|
||||
for (int32_t i = 0; i < numOfRows; ++i) {
|
||||
|
@ -433,7 +435,8 @@ static int32_t tscSCreateSetValueToResObj(SSqlObj *pSql, int32_t rowLen, const c
|
|||
if (strlen(ddl) == 0) {
|
||||
|
||||
}
|
||||
tscInitResObjForLocalQuery(pSql, numOfRows, rowLen);
|
||||
pSql->res.pMerger = tscInitResObjForLocalQuery(numOfRows, rowLen, pSql->self);
|
||||
tscInitResForMerge(&pSql->res);
|
||||
|
||||
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 0);
|
||||
char* dst = pRes->data + tscFieldInfoGetOffset(pQueryInfo, 0) * numOfRows;
|
||||
|
@ -882,7 +885,8 @@ void tscSetLocalQueryResult(SSqlObj *pSql, const char *val, const char *columnNa
|
|||
TAOS_FIELD f = tscCreateField((int8_t)type, columnName, (int16_t)valueLength);
|
||||
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
|
||||
|
||||
tscInitResObjForLocalQuery(pSql, 1, (int32_t)valueLength);
|
||||
pSql->res.pMerger = tscInitResObjForLocalQuery(1, (int32_t)valueLength, pSql->self);
|
||||
tscInitResForMerge(&pSql->res);
|
||||
|
||||
SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, 0);
|
||||
pInfo->pExpr = taosArrayGetP(pQueryInfo->exprList, 0);
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
#include "ttype.h"
|
||||
#include "hash.h"
|
||||
#include "tscUtil.h"
|
||||
#include "tschemautil.h"
|
||||
#include "qTableMeta.h"
|
||||
#include "tsclient.h"
|
||||
#include "ttokendef.h"
|
||||
#include "taosdef.h"
|
||||
|
@ -38,8 +38,9 @@ enum {
|
|||
TSDB_USE_CLI_TS = 1,
|
||||
};
|
||||
|
||||
static int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t * numOfRows);
|
||||
static int32_t parseBoundColumns(SSqlCmd* pCmd, SParsedDataColInfo* pColInfo, SSchema* pSchema, char* str, char** end);
|
||||
static int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t *numOfRows);
|
||||
static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDataColInfo *pColInfo, SSchema *pSchema,
|
||||
char *str, char **end);
|
||||
|
||||
static int32_t tscToDouble(SStrToken *pToken, double *value, char **endPtr) {
|
||||
errno = 0;
|
||||
|
@ -71,7 +72,7 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
|
|||
} else {
|
||||
// strptime("2001-11-12 18:31:01", "%Y-%m-%d %H:%M:%S", &tm);
|
||||
if (taosParseTime(pToken->z, time, pToken->n, timePrec, tsDaylight) != TSDB_CODE_SUCCESS) {
|
||||
return tscInvalidSQLErrMsg(error, "invalid timestamp format", pToken->z);
|
||||
return tscInvalidOperationMsg(error, "invalid timestamp format", pToken->z);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -103,7 +104,7 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
|
|||
pTokenEnd += index;
|
||||
|
||||
if (valueToken.n < 2) {
|
||||
return tscInvalidSQLErrMsg(error, "value expected in timestamp", sToken.z);
|
||||
return tscInvalidOperationMsg(error, "value expected in timestamp", sToken.z);
|
||||
}
|
||||
|
||||
if (parseAbsoluteDuration(valueToken.z, valueToken.n, &interval) != TSDB_CODE_SUCCESS) {
|
||||
|
@ -138,7 +139,7 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
|
|||
char *endptr = NULL;
|
||||
|
||||
if (IS_NUMERIC_TYPE(pSchema->type) && pToken->n == 0) {
|
||||
return tscInvalidSQLErrMsg(msg, "invalid numeric data", pToken->z);
|
||||
return tscInvalidOperationMsg(msg, "invalid numeric data", pToken->z);
|
||||
}
|
||||
|
||||
switch (pSchema->type) {
|
||||
|
@ -161,7 +162,7 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
|
|||
double dv = strtod(pToken->z, NULL);
|
||||
*(uint8_t *)payload = (int8_t)((dv == 0) ? TSDB_FALSE : TSDB_TRUE);
|
||||
} else {
|
||||
return tscInvalidSQLErrMsg(msg, "invalid bool data", pToken->z);
|
||||
return tscInvalidOperationMsg(msg, "invalid bool data", pToken->z);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
@ -173,9 +174,9 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
|
|||
} else {
|
||||
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return tscInvalidSQLErrMsg(msg, "invalid tinyint data", pToken->z);
|
||||
return tscInvalidOperationMsg(msg, "invalid tinyint data", pToken->z);
|
||||
} else if (!IS_VALID_TINYINT(iv)) {
|
||||
return tscInvalidSQLErrMsg(msg, "data overflow", pToken->z);
|
||||
return tscInvalidOperationMsg(msg, "data overflow", pToken->z);
|
||||
}
|
||||
|
||||
*((uint8_t *)payload) = (uint8_t)iv;
|
||||
|
@ -189,9 +190,9 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
|
|||
} else {
|
||||
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return tscInvalidSQLErrMsg(msg, "invalid unsigned tinyint data", pToken->z);
|
||||
return tscInvalidOperationMsg(msg, "invalid unsigned tinyint data", pToken->z);
|
||||
} else if (!IS_VALID_UTINYINT(iv)) {
|
||||
return tscInvalidSQLErrMsg(msg, "unsigned tinyint data overflow", pToken->z);
|
||||
return tscInvalidOperationMsg(msg, "unsigned tinyint data overflow", pToken->z);
|
||||
}
|
||||
|
||||
*((uint8_t *)payload) = (uint8_t)iv;
|
||||
|
@ -205,9 +206,9 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
|
|||
} else {
|
||||
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return tscInvalidSQLErrMsg(msg, "invalid smallint data", pToken->z);
|
||||
return tscInvalidOperationMsg(msg, "invalid smallint data", pToken->z);
|
||||
} else if (!IS_VALID_SMALLINT(iv)) {
|
||||
return tscInvalidSQLErrMsg(msg, "smallint data overflow", pToken->z);
|
||||
return tscInvalidOperationMsg(msg, "smallint data overflow", pToken->z);
|
||||
}
|
||||
|
||||
*((int16_t *)payload) = (int16_t)iv;
|
||||
|
@ -221,9 +222,9 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
|
|||
} else {
|
||||
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return tscInvalidSQLErrMsg(msg, "invalid unsigned smallint data", pToken->z);
|
||||
return tscInvalidOperationMsg(msg, "invalid unsigned smallint data", pToken->z);
|
||||
} else if (!IS_VALID_USMALLINT(iv)) {
|
||||
return tscInvalidSQLErrMsg(msg, "unsigned smallint data overflow", pToken->z);
|
||||
return tscInvalidOperationMsg(msg, "unsigned smallint data overflow", pToken->z);
|
||||
}
|
||||
|
||||
*((uint16_t *)payload) = (uint16_t)iv;
|
||||
|
@ -237,9 +238,9 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
|
|||
} else {
|
||||
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return tscInvalidSQLErrMsg(msg, "invalid int data", pToken->z);
|
||||
return tscInvalidOperationMsg(msg, "invalid int data", pToken->z);
|
||||
} else if (!IS_VALID_INT(iv)) {
|
||||
return tscInvalidSQLErrMsg(msg, "int data overflow", pToken->z);
|
||||
return tscInvalidOperationMsg(msg, "int data overflow", pToken->z);
|
||||
}
|
||||
|
||||
*((int32_t *)payload) = (int32_t)iv;
|
||||
|
@ -253,9 +254,9 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
|
|||
} else {
|
||||
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return tscInvalidSQLErrMsg(msg, "invalid unsigned int data", pToken->z);
|
||||
return tscInvalidOperationMsg(msg, "invalid unsigned int data", pToken->z);
|
||||
} else if (!IS_VALID_UINT(iv)) {
|
||||
return tscInvalidSQLErrMsg(msg, "unsigned int data overflow", pToken->z);
|
||||
return tscInvalidOperationMsg(msg, "unsigned int data overflow", pToken->z);
|
||||
}
|
||||
|
||||
*((uint32_t *)payload) = (uint32_t)iv;
|
||||
|
@ -269,9 +270,9 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
|
|||
} else {
|
||||
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return tscInvalidSQLErrMsg(msg, "invalid bigint data", pToken->z);
|
||||
return tscInvalidOperationMsg(msg, "invalid bigint data", pToken->z);
|
||||
} else if (!IS_VALID_BIGINT(iv)) {
|
||||
return tscInvalidSQLErrMsg(msg, "bigint data overflow", pToken->z);
|
||||
return tscInvalidOperationMsg(msg, "bigint data overflow", pToken->z);
|
||||
}
|
||||
|
||||
*((int64_t *)payload) = iv;
|
||||
|
@ -284,9 +285,9 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
|
|||
} else {
|
||||
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return tscInvalidSQLErrMsg(msg, "invalid unsigned bigint data", pToken->z);
|
||||
return tscInvalidOperationMsg(msg, "invalid unsigned bigint data", pToken->z);
|
||||
} else if (!IS_VALID_UBIGINT((uint64_t)iv)) {
|
||||
return tscInvalidSQLErrMsg(msg, "unsigned bigint data overflow", pToken->z);
|
||||
return tscInvalidOperationMsg(msg, "unsigned bigint data overflow", pToken->z);
|
||||
}
|
||||
|
||||
*((uint64_t *)payload) = iv;
|
||||
|
@ -299,11 +300,11 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
|
|||
} else {
|
||||
double dv;
|
||||
if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) {
|
||||
return tscInvalidSQLErrMsg(msg, "illegal float data", pToken->z);
|
||||
return tscInvalidOperationMsg(msg, "illegal float data", pToken->z);
|
||||
}
|
||||
|
||||
if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || dv > FLT_MAX || dv < -FLT_MAX || isinf(dv) || isnan(dv)) {
|
||||
return tscInvalidSQLErrMsg(msg, "illegal float data", pToken->z);
|
||||
return tscInvalidOperationMsg(msg, "illegal float data", pToken->z);
|
||||
}
|
||||
|
||||
// *((float *)payload) = (float)dv;
|
||||
|
@ -317,11 +318,11 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
|
|||
} else {
|
||||
double dv;
|
||||
if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) {
|
||||
return tscInvalidSQLErrMsg(msg, "illegal double data", pToken->z);
|
||||
return tscInvalidOperationMsg(msg, "illegal double data", pToken->z);
|
||||
}
|
||||
|
||||
if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || isinf(dv) || isnan(dv)) {
|
||||
return tscInvalidSQLErrMsg(msg, "illegal double data", pToken->z);
|
||||
return tscInvalidOperationMsg(msg, "illegal double data", pToken->z);
|
||||
}
|
||||
|
||||
*((double *)payload) = dv;
|
||||
|
@ -334,7 +335,7 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
|
|||
setVardataNull(payload, TSDB_DATA_TYPE_BINARY);
|
||||
} else { // too long values will return invalid sql, not be truncated automatically
|
||||
if (pToken->n + VARSTR_HEADER_SIZE > pSchema->bytes) { //todo refactor
|
||||
return tscInvalidSQLErrMsg(msg, "string data overflow", pToken->z);
|
||||
return tscInvalidOperationMsg(msg, "string data overflow", pToken->z);
|
||||
}
|
||||
|
||||
STR_WITH_SIZE_TO_VARSTR(payload, pToken->z, pToken->n);
|
||||
|
@ -351,7 +352,7 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
|
|||
if (!taosMbsToUcs4(pToken->z, pToken->n, varDataVal(payload), pSchema->bytes - VARSTR_HEADER_SIZE, &output)) {
|
||||
char buf[512] = {0};
|
||||
snprintf(buf, tListLen(buf), "%s", strerror(errno));
|
||||
return tscInvalidSQLErrMsg(msg, buf, pToken->z);
|
||||
return tscInvalidOperationMsg(msg, buf, pToken->z);
|
||||
}
|
||||
|
||||
varDataSetLen(payload, output);
|
||||
|
@ -368,7 +369,7 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
|
|||
} else {
|
||||
int64_t temp;
|
||||
if (tsParseTime(pToken, &temp, str, msg, timePrec) != TSDB_CODE_SUCCESS) {
|
||||
return tscInvalidSQLErrMsg(msg, "invalid timestamp", pToken->z);
|
||||
return tscInvalidOperationMsg(msg, "invalid timestamp", pToken->z);
|
||||
}
|
||||
|
||||
*((int64_t *)payload) = temp;
|
||||
|
@ -417,8 +418,8 @@ int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, SSqlCmd *pCmd, int16_t timePrec, int32_t *len,
|
||||
char *tmpTokenBuf) {
|
||||
int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, int32_t *len,
|
||||
char *tmpTokenBuf, SInsertStatementParam* pInsertParam) {
|
||||
int32_t index = 0;
|
||||
SStrToken sToken = {0};
|
||||
char *payload = pDataBlocks->pData + pDataBlocks->size;
|
||||
|
@ -441,8 +442,8 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, SSqlCmd *pCmd, int1
|
|||
*str += index;
|
||||
|
||||
if (sToken.type == TK_QUESTION) {
|
||||
if (pCmd->insertParam.insertType != TSDB_QUERY_TYPE_STMT_INSERT) {
|
||||
return tscSQLSyntaxErrMsg(pCmd->payload, "? only allowed in binding insertion", *str);
|
||||
if (pInsertParam->insertType != TSDB_QUERY_TYPE_STMT_INSERT) {
|
||||
return tscSQLSyntaxErrMsg(pInsertParam->msg, "? only allowed in binding insertion", *str);
|
||||
}
|
||||
|
||||
uint32_t offset = (uint32_t)(start - pDataBlocks->pData);
|
||||
|
@ -450,14 +451,14 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, SSqlCmd *pCmd, int1
|
|||
continue;
|
||||
}
|
||||
|
||||
strcpy(pCmd->payload, "client out of memory");
|
||||
strcpy(pInsertParam->msg, "client out of memory");
|
||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
int16_t type = sToken.type;
|
||||
if ((type != TK_NOW && type != TK_INTEGER && type != TK_STRING && type != TK_FLOAT && type != TK_BOOL &&
|
||||
type != TK_NULL && type != TK_HEX && type != TK_OCT && type != TK_BIN) || (sToken.n == 0) || (type == TK_RP)) {
|
||||
return tscSQLSyntaxErrMsg(pCmd->payload, "invalid data or symbol", sToken.z);
|
||||
return tscSQLSyntaxErrMsg(pInsertParam->msg, "invalid data or symbol", sToken.z);
|
||||
}
|
||||
|
||||
// Remove quotation marks
|
||||
|
@ -487,13 +488,13 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, SSqlCmd *pCmd, int1
|
|||
}
|
||||
|
||||
bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX);
|
||||
int32_t ret = tsParseOneColumn(pSchema, &sToken, start, pCmd->payload, str, isPrimaryKey, timePrec);
|
||||
int32_t ret = tsParseOneColumn(pSchema, &sToken, start, pInsertParam->msg, str, isPrimaryKey, timePrec);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (isPrimaryKey && tsCheckTimestamp(pDataBlocks, start) != TSDB_CODE_SUCCESS) {
|
||||
tscInvalidSQLErrMsg(pCmd->payload, "client time/server time can not be mixed up", sToken.z);
|
||||
tscInvalidOperationMsg(pInsertParam->msg, "client time/server time can not be mixed up", sToken.z);
|
||||
return TSDB_CODE_TSC_INVALID_TIME_STAMP;
|
||||
}
|
||||
}
|
||||
|
@ -536,7 +537,8 @@ static int32_t rowDataCompar(const void *lhs, const void *rhs) {
|
|||
}
|
||||
}
|
||||
|
||||
int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SSqlCmd* pCmd, int32_t* numOfRows, char *tmpTokenBuf) {
|
||||
int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SInsertStatementParam *pInsertParam,
|
||||
int32_t* numOfRows, char *tmpTokenBuf) {
|
||||
int32_t index = 0;
|
||||
int32_t code = 0;
|
||||
|
||||
|
@ -559,7 +561,7 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SSq
|
|||
int32_t tSize;
|
||||
code = tscAllocateMemIfNeed(pDataBlock, tinfo.rowSize, &tSize);
|
||||
if (code != TSDB_CODE_SUCCESS) { //TODO pass the correct error code to client
|
||||
strcpy(pCmd->payload, "client out of memory");
|
||||
strcpy(pInsertParam->msg, "client out of memory");
|
||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
|
@ -568,7 +570,7 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SSq
|
|||
}
|
||||
|
||||
int32_t len = 0;
|
||||
code = tsParseOneRow(str, pDataBlock, pCmd, precision, &len, tmpTokenBuf);
|
||||
code = tsParseOneRow(str, pDataBlock, precision, &len, tmpTokenBuf, pInsertParam);
|
||||
if (code != TSDB_CODE_SUCCESS) { // error message has been set in tsParseOneRow, return directly
|
||||
return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
|
||||
}
|
||||
|
@ -578,7 +580,7 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SSq
|
|||
index = 0;
|
||||
sToken = tStrGetToken(*str, &index, false);
|
||||
if (sToken.n == 0 || sToken.type != TK_RP) {
|
||||
tscSQLSyntaxErrMsg(pCmd->payload, ") expected", *str);
|
||||
tscSQLSyntaxErrMsg(pInsertParam->msg, ") expected", *str);
|
||||
code = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
|
||||
return code;
|
||||
}
|
||||
|
@ -589,7 +591,7 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SSq
|
|||
}
|
||||
|
||||
if ((*numOfRows) <= 0) {
|
||||
strcpy(pCmd->payload, "no any data points");
|
||||
strcpy(pInsertParam->msg, "no any data points");
|
||||
return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
|
||||
} else {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -699,7 +701,7 @@ void tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf) {
|
|||
dataBuf->prevTS = INT64_MIN;
|
||||
}
|
||||
|
||||
static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, STableDataBlocks* dataBuf, int32_t *totalNum) {
|
||||
static int32_t doParseInsertStatement(SInsertStatementParam *pInsertParam, char **str, STableDataBlocks* dataBuf, int32_t *totalNum) {
|
||||
STableComInfo tinfo = tscGetTableInfo(dataBuf->pTableMeta);
|
||||
|
||||
int32_t maxNumOfRows;
|
||||
|
@ -712,7 +714,7 @@ static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, STableDataBlock
|
|||
char tmpTokenBuf[16*1024] = {0}; // used for deleting Escape character: \\, \', \"
|
||||
|
||||
int32_t numOfRows = 0;
|
||||
code = tsParseValues(str, dataBuf, maxNumOfRows, pCmd, &numOfRows, tmpTokenBuf);
|
||||
code = tsParseValues(str, dataBuf, maxNumOfRows, pInsertParam, &numOfRows, tmpTokenBuf);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
@ -720,7 +722,7 @@ static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, STableDataBlock
|
|||
for (uint32_t i = 0; i < dataBuf->numOfParams; ++i) {
|
||||
SParamInfo *param = dataBuf->params + i;
|
||||
if (param->idx == -1) {
|
||||
param->idx = pCmd->numOfParams++;
|
||||
param->idx = pInsertParam->numOfParams++;
|
||||
param->offset -= sizeof(SSubmitBlk);
|
||||
}
|
||||
}
|
||||
|
@ -728,7 +730,7 @@ static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, STableDataBlock
|
|||
SSubmitBlk *pBlocks = (SSubmitBlk *)(dataBuf->pData);
|
||||
code = tsSetBlockInfo(pBlocks, dataBuf->pTableMeta, numOfRows);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tscInvalidSQLErrMsg(pCmd->payload, "too many rows in sql, total number of rows should be less than 32767", *str);
|
||||
tscInvalidOperationMsg(pInsertParam->msg, "too many rows in sql, total number of rows should be less than 32767", *str);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -748,6 +750,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
|
|||
|
||||
SSqlCmd * pCmd = &pSql->cmd;
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
|
||||
SInsertStatementParam* pInsertParam = &pCmd->insertParam;
|
||||
|
||||
char *sql = *sqlstr;
|
||||
|
||||
|
@ -784,7 +787,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
|
|||
}
|
||||
|
||||
if (numOfColList == 0 && (*boundColumn) != NULL) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
|
||||
}
|
||||
|
||||
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, TABLE_INDEX);
|
||||
|
@ -805,8 +808,8 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
|
|||
return code;
|
||||
}
|
||||
|
||||
tNameExtractFullName(&pSTableMetaInfo->name, pCmd->tagData.name);
|
||||
pCmd->tagData.dataLen = 0;
|
||||
tNameExtractFullName(&pSTableMetaInfo->name, pInsertParam->tagData.name);
|
||||
pInsertParam->tagData.dataLen = 0;
|
||||
|
||||
code = tscGetTableMeta(pSql, pSTableMetaInfo);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
|
@ -814,7 +817,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
|
|||
}
|
||||
|
||||
if (!UTIL_TABLE_IS_SUPER_TABLE(pSTableMetaInfo)) {
|
||||
return tscInvalidSQLErrMsg(pCmd->payload, "create table only from super table is allowed", sToken.z);
|
||||
return tscInvalidOperationMsg(pInsertParam->msg, "create table only from super table is allowed", sToken.z);
|
||||
}
|
||||
|
||||
SSchema *pTagSchema = tscGetTableTagSchema(pSTableMetaInfo->pTableMeta);
|
||||
|
@ -827,7 +830,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
|
|||
sToken = tStrGetToken(sql, &index, false);
|
||||
if (sToken.type != TK_TAGS && sToken.type != TK_LP) {
|
||||
tscDestroyBoundColumnInfo(&spd);
|
||||
return tscInvalidSQLErrMsg(pCmd->payload, "keyword TAGS expected", sToken.z);
|
||||
return tscSQLSyntaxErrMsg(pInsertParam->msg, "keyword TAGS expected", sToken.z);
|
||||
}
|
||||
|
||||
// parse the bound tags column
|
||||
|
@ -837,7 +840,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
|
|||
* tags(tagVal1, tagVal2, ..., tagValn) values(v1, v2,... vn);
|
||||
*/
|
||||
char* end = NULL;
|
||||
code = parseBoundColumns(pCmd, &spd, pTagSchema, sql, &end);
|
||||
code = parseBoundColumns(pInsertParam, &spd, pTagSchema, sql, &end);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tscDestroyBoundColumnInfo(&spd);
|
||||
return code;
|
||||
|
@ -858,7 +861,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
|
|||
|
||||
if (sToken.type != TK_LP) {
|
||||
tscDestroyBoundColumnInfo(&spd);
|
||||
return tscInvalidSQLErrMsg(pCmd->payload, "( is expected", sToken.z);
|
||||
return tscSQLSyntaxErrMsg(pInsertParam->msg, "( is expected", sToken.z);
|
||||
}
|
||||
|
||||
SKVRowBuilder kvRowBuilder = {0};
|
||||
|
@ -877,7 +880,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
|
|||
if (TK_ILLEGAL == sToken.type) {
|
||||
tdDestroyKVRowBuilder(&kvRowBuilder);
|
||||
tscDestroyBoundColumnInfo(&spd);
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
|
||||
}
|
||||
|
||||
if (sToken.n == 0 || sToken.type == TK_RP) {
|
||||
|
@ -891,7 +894,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
|
|||
}
|
||||
|
||||
char tagVal[TSDB_MAX_TAGS_LEN];
|
||||
code = tsParseOneColumn(pSchema, &sToken, tagVal, pCmd->payload, &sql, false, tinfo.precision);
|
||||
code = tsParseOneColumn(pSchema, &sToken, tagVal, pInsertParam->msg, &sql, false, tinfo.precision);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tdDestroyKVRowBuilder(&kvRowBuilder);
|
||||
tscDestroyBoundColumnInfo(&spd);
|
||||
|
@ -906,29 +909,29 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
|
|||
SKVRow row = tdGetKVRowFromBuilder(&kvRowBuilder);
|
||||
tdDestroyKVRowBuilder(&kvRowBuilder);
|
||||
if (row == NULL) {
|
||||
return tscInvalidSQLErrMsg(pCmd->payload, "tag value expected", NULL);
|
||||
return tscSQLSyntaxErrMsg(pInsertParam->msg, "tag value expected", NULL);
|
||||
}
|
||||
tdSortKVRowByColIdx(row);
|
||||
|
||||
pCmd->tagData.dataLen = kvRowLen(row);
|
||||
if (pCmd->tagData.dataLen <= 0){
|
||||
return tscInvalidSQLErrMsg(pCmd->payload, "tag value expected", NULL);
|
||||
pInsertParam->tagData.dataLen = kvRowLen(row);
|
||||
if (pInsertParam->tagData.dataLen <= 0){
|
||||
return tscSQLSyntaxErrMsg(pInsertParam->msg, "tag value expected", NULL);
|
||||
}
|
||||
|
||||
char* pTag = realloc(pCmd->tagData.data, pCmd->tagData.dataLen);
|
||||
char* pTag = realloc(pInsertParam->tagData.data, pInsertParam->tagData.dataLen);
|
||||
if (pTag == NULL) {
|
||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
kvRowCpy(pTag, row);
|
||||
free(row);
|
||||
pCmd->tagData.data = pTag;
|
||||
pInsertParam->tagData.data = pTag;
|
||||
|
||||
index = 0;
|
||||
sToken = tStrGetToken(sql, &index, false);
|
||||
sql += index;
|
||||
if (sToken.n == 0 || sToken.type != TK_RP) {
|
||||
return tscSQLSyntaxErrMsg(pCmd->payload, ") expected", sToken.z);
|
||||
return tscSQLSyntaxErrMsg(pInsertParam->msg, ") expected", sToken.z);
|
||||
}
|
||||
|
||||
/* parse columns after super table tags values.
|
||||
|
@ -941,7 +944,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
|
|||
int numOfColsAfterTags = 0;
|
||||
if (sToken.type == TK_LP) {
|
||||
if (*boundColumn != NULL) {
|
||||
return tscSQLSyntaxErrMsg(pCmd->payload, "bind columns again", sToken.z);
|
||||
return tscSQLSyntaxErrMsg(pInsertParam->msg, "bind columns again", sToken.z);
|
||||
} else {
|
||||
*boundColumn = &sToken.z[0];
|
||||
}
|
||||
|
@ -959,7 +962,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
|
|||
}
|
||||
|
||||
if (numOfColsAfterTags == 0 && (*boundColumn) != NULL) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
|
||||
}
|
||||
|
||||
sToken = tStrGetToken(sql, &index, false);
|
||||
|
@ -968,7 +971,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
|
|||
sql = sToken.z;
|
||||
|
||||
if (tscValidateName(&tableToken) != TSDB_CODE_SUCCESS) {
|
||||
return tscInvalidSQLErrMsg(pCmd->payload, "invalid table name", *sqlstr);
|
||||
return tscInvalidOperationMsg(pInsertParam->msg, "invalid table name", *sqlstr);
|
||||
}
|
||||
|
||||
int32_t ret = tscSetTableFullName(&pTableMetaInfo->name, &tableToken, pSql);
|
||||
|
@ -977,7 +980,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
|
|||
}
|
||||
|
||||
if (sql == NULL) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
|
||||
}
|
||||
|
||||
code = tscGetTableMetaEx(pSql, pTableMetaInfo, true);
|
||||
|
@ -986,20 +989,18 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
|
|||
}
|
||||
|
||||
} else {
|
||||
sql = sToken.z;
|
||||
|
||||
if (sql == NULL) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
if (sToken.z == NULL) {
|
||||
return tscSQLSyntaxErrMsg(pInsertParam->msg, "", sql);
|
||||
}
|
||||
|
||||
sql = sToken.z;
|
||||
code = tscGetTableMetaEx(pSql, pTableMetaInfo, false);
|
||||
if (pCmd->curSql == NULL) {
|
||||
if (pInsertParam->sql == NULL) {
|
||||
assert(code == TSDB_CODE_TSC_ACTION_IN_PROGRESS);
|
||||
}
|
||||
}
|
||||
|
||||
*sqlstr = sql;
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -1013,21 +1014,21 @@ int validateTableName(char *tblName, int len, SStrToken* psTblToken) {
|
|||
return tscValidateName(psTblToken);
|
||||
}
|
||||
|
||||
static int32_t validateDataSource(SSqlCmd *pCmd, int32_t type, const char *sql) {
|
||||
uint32_t *insertType = &pCmd->insertParam.insertType;
|
||||
static int32_t validateDataSource(SInsertStatementParam *pInsertParam, int32_t type, const char *sql) {
|
||||
uint32_t *insertType = &pInsertParam->insertType;
|
||||
if (*insertType == TSDB_QUERY_TYPE_STMT_INSERT && type == TSDB_QUERY_TYPE_INSERT) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if ((*insertType) != 0 && (*insertType) != type) {
|
||||
return tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES and FILE are not allowed to mixed up", sql);
|
||||
return tscSQLSyntaxErrMsg(pInsertParam->msg, "keyword VALUES and FILE are not allowed to mixed up", sql);
|
||||
}
|
||||
|
||||
*insertType = type;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t parseBoundColumns(SSqlCmd* pCmd, SParsedDataColInfo* pColInfo, SSchema* pSchema,
|
||||
static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDataColInfo* pColInfo, SSchema* pSchema,
|
||||
char* str, char **end) {
|
||||
pColInfo->numOfBound = 0;
|
||||
|
||||
|
@ -1043,7 +1044,7 @@ static int32_t parseBoundColumns(SSqlCmd* pCmd, SParsedDataColInfo* pColInfo, SS
|
|||
str += index;
|
||||
|
||||
if (sToken.type != TK_LP) {
|
||||
code = tscInvalidSQLErrMsg(pCmd->payload, "( is expected", sToken.z);
|
||||
code = tscSQLSyntaxErrMsg(pInsertParam->msg, "( is expected", sToken.z);
|
||||
goto _clean;
|
||||
}
|
||||
|
||||
|
@ -1070,7 +1071,7 @@ static int32_t parseBoundColumns(SSqlCmd* pCmd, SParsedDataColInfo* pColInfo, SS
|
|||
for (int32_t t = 0; t < pColInfo->numOfCols; ++t) {
|
||||
if (strncmp(sToken.z, pSchema[t].name, sToken.n) == 0 && strlen(pSchema[t].name) == sToken.n) {
|
||||
if (pColInfo->cols[t].hasVal == true) {
|
||||
code = tscInvalidSQLErrMsg(pCmd->payload, "duplicated column name", sToken.z);
|
||||
code = tscInvalidOperationMsg(pInsertParam->msg, "duplicated column name", sToken.z);
|
||||
goto _clean;
|
||||
}
|
||||
|
||||
|
@ -1083,7 +1084,7 @@ static int32_t parseBoundColumns(SSqlCmd* pCmd, SParsedDataColInfo* pColInfo, SS
|
|||
}
|
||||
|
||||
if (!findColumnIndex) {
|
||||
code = tscInvalidSQLErrMsg(pCmd->payload, "invalid column/tag name", sToken.z);
|
||||
code = tscInvalidOperationMsg(pInsertParam->msg, "invalid column/tag name", sToken.z);
|
||||
goto _clean;
|
||||
}
|
||||
}
|
||||
|
@ -1092,10 +1093,25 @@ static int32_t parseBoundColumns(SSqlCmd* pCmd, SParsedDataColInfo* pColInfo, SS
|
|||
return TSDB_CODE_SUCCESS;
|
||||
|
||||
_clean:
|
||||
pCmd->curSql = NULL;
|
||||
pInsertParam->sql = NULL;
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t getFileFullPath(SStrToken* pToken, char* output) {
|
||||
char path[PATH_MAX] = {0};
|
||||
strncpy(path, pToken->z, pToken->n);
|
||||
strdequote(path);
|
||||
|
||||
wordexp_t full_path;
|
||||
if (wordexp(path, &full_path, 0) != 0) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
}
|
||||
|
||||
tstrncpy(output, full_path.we_wordv[0], PATH_MAX);
|
||||
wordfree(&full_path);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* parse insert sql
|
||||
* @param pSql
|
||||
|
@ -1103,7 +1119,9 @@ static int32_t parseBoundColumns(SSqlCmd* pCmd, SParsedDataColInfo* pColInfo, SS
|
|||
*/
|
||||
int tsParseInsertSql(SSqlObj *pSql) {
|
||||
SSqlCmd *pCmd = &pSql->cmd;
|
||||
char* str = pCmd->curSql;
|
||||
|
||||
SInsertStatementParam* pInsertParam = &pCmd->insertParam;
|
||||
char* str = pInsertParam->sql;
|
||||
|
||||
int32_t totalNum = 0;
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
@ -1118,21 +1136,17 @@ int tsParseInsertSql(SSqlObj *pSql) {
|
|||
return code;
|
||||
}
|
||||
|
||||
if ((code = tscAllocPayload(pCmd, TSDB_DEFAULT_PAYLOAD_SIZE)) != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
if (NULL == pCmd->insertParam.pTableBlockHashList) {
|
||||
pCmd->insertParam.pTableBlockHashList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
|
||||
if (NULL == pCmd->insertParam.pTableBlockHashList) {
|
||||
if (NULL == pInsertParam->pTableBlockHashList) {
|
||||
pInsertParam->pTableBlockHashList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
|
||||
if (NULL == pInsertParam->pTableBlockHashList) {
|
||||
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
goto _clean;
|
||||
}
|
||||
} else {
|
||||
str = pCmd->curSql;
|
||||
str = pInsertParam->sql;
|
||||
}
|
||||
|
||||
tscDebug("0x%"PRIx64" create data block list hashList:%p", pSql->self, pCmd->insertParam.pTableBlockHashList);
|
||||
tscDebug("0x%"PRIx64" create data block list hashList:%p", pSql->self, pInsertParam->pTableBlockHashList);
|
||||
|
||||
while (1) {
|
||||
int32_t index = 0;
|
||||
|
@ -1144,7 +1158,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
|
|||
* if the data is from the data file, no data has been generated yet. So, there no data to
|
||||
* merge or submit, save the file path and parse the file in other routines.
|
||||
*/
|
||||
if (TSDB_QUERY_HAS_TYPE(pCmd->insertParam.insertType, TSDB_QUERY_TYPE_FILE_INSERT)) {
|
||||
if (TSDB_QUERY_HAS_TYPE(pInsertParam->insertType, TSDB_QUERY_TYPE_FILE_INSERT)) {
|
||||
goto _clean;
|
||||
}
|
||||
|
||||
|
@ -1160,13 +1174,13 @@ int tsParseInsertSql(SSqlObj *pSql) {
|
|||
}
|
||||
}
|
||||
|
||||
pCmd->curSql = sToken.z;
|
||||
pInsertParam->sql = sToken.z;
|
||||
char buf[TSDB_TABLE_FNAME_LEN];
|
||||
SStrToken sTblToken;
|
||||
sTblToken.z = buf;
|
||||
// Check if the table name available or not
|
||||
if (validateTableName(sToken.z, sToken.n, &sTblToken) != TSDB_CODE_SUCCESS) {
|
||||
code = tscInvalidSQLErrMsg(pCmd->payload, "table name invalid", sToken.z);
|
||||
code = tscInvalidOperationMsg(pInsertParam->msg, "table name invalid", sToken.z);
|
||||
goto _clean;
|
||||
}
|
||||
|
||||
|
@ -1185,12 +1199,12 @@ int tsParseInsertSql(SSqlObj *pSql) {
|
|||
}
|
||||
|
||||
tscError("0x%"PRIx64" async insert parse error, code:%s", pSql->self, tstrerror(code));
|
||||
pCmd->curSql = NULL;
|
||||
pInsertParam->sql = NULL;
|
||||
goto _clean;
|
||||
}
|
||||
|
||||
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
|
||||
code = tscInvalidSQLErrMsg(pCmd->payload, "insert data into super table is not supported", NULL);
|
||||
code = tscInvalidOperationMsg(pInsertParam->msg, "insert data into super table is not supported", NULL);
|
||||
goto _clean;
|
||||
}
|
||||
|
||||
|
@ -1199,58 +1213,49 @@ int tsParseInsertSql(SSqlObj *pSql) {
|
|||
str += index;
|
||||
|
||||
if (sToken.n == 0 || (sToken.type != TK_FILE && sToken.type != TK_VALUES)) {
|
||||
code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES or FILE required", sToken.z);
|
||||
code = tscSQLSyntaxErrMsg(pInsertParam->msg, "keyword VALUES or FILE required", sToken.z);
|
||||
goto _clean;
|
||||
}
|
||||
|
||||
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
|
||||
if (sToken.type == TK_FILE) {
|
||||
if (validateDataSource(pCmd, TSDB_QUERY_TYPE_FILE_INSERT, sToken.z) != TSDB_CODE_SUCCESS) {
|
||||
if (validateDataSource(pInsertParam, TSDB_QUERY_TYPE_FILE_INSERT, sToken.z) != TSDB_CODE_SUCCESS) {
|
||||
goto _clean;
|
||||
}
|
||||
|
||||
index = 0;
|
||||
sToken = tStrGetToken(str, &index, false);
|
||||
if (sToken.type != TK_STRING && sToken.type != TK_ID) {
|
||||
code = tscInvalidSQLErrMsg(pCmd->payload, "file path is required following keyword FILE", sToken.z);
|
||||
code = tscSQLSyntaxErrMsg(pInsertParam->msg, "file path is required following keyword FILE", sToken.z);
|
||||
goto _clean;
|
||||
}
|
||||
str += index;
|
||||
if (sToken.n == 0) {
|
||||
code = tscInvalidSQLErrMsg(pCmd->payload, "file path is required following keyword FILE", sToken.z);
|
||||
code = tscSQLSyntaxErrMsg(pInsertParam->msg, "file path is required following keyword FILE", sToken.z);
|
||||
goto _clean;
|
||||
}
|
||||
|
||||
strncpy(pCmd->payload, sToken.z, sToken.n);
|
||||
strdequote(pCmd->payload);
|
||||
|
||||
// todo refactor extract method
|
||||
wordexp_t full_path;
|
||||
if (wordexp(pCmd->payload, &full_path, 0) != 0) {
|
||||
code = tscInvalidSQLErrMsg(pCmd->payload, "invalid filename", sToken.z);
|
||||
code = getFileFullPath(&sToken, pCmd->payload);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tscInvalidOperationMsg(pInsertParam->msg, "invalid filename", sToken.z);
|
||||
goto _clean;
|
||||
}
|
||||
|
||||
tstrncpy(pCmd->payload, full_path.we_wordv[0], pCmd->allocSize);
|
||||
wordfree(&full_path);
|
||||
|
||||
} else {
|
||||
if (bindedColumns == NULL) {
|
||||
STableMeta *pTableMeta = pTableMetaInfo->pTableMeta;
|
||||
|
||||
if (validateDataSource(pCmd, TSDB_QUERY_TYPE_INSERT, sToken.z) != TSDB_CODE_SUCCESS) {
|
||||
if (validateDataSource(pInsertParam, TSDB_QUERY_TYPE_INSERT, sToken.z) != TSDB_CODE_SUCCESS) {
|
||||
goto _clean;
|
||||
}
|
||||
|
||||
STableDataBlocks *dataBuf = NULL;
|
||||
int32_t ret = tscGetDataBlockFromList(pCmd->insertParam.pTableBlockHashList, pTableMeta->id.uid, TSDB_DEFAULT_PAYLOAD_SIZE,
|
||||
int32_t ret = tscGetDataBlockFromList(pInsertParam->pTableBlockHashList, pTableMeta->id.uid, TSDB_DEFAULT_PAYLOAD_SIZE,
|
||||
sizeof(SSubmitBlk), tinfo.rowSize, &pTableMetaInfo->name, pTableMeta,
|
||||
&dataBuf, NULL);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
goto _clean;
|
||||
}
|
||||
|
||||
code = doParseInsertStatement(pCmd, &str, dataBuf, &totalNum);
|
||||
code = doParseInsertStatement(pInsertParam, &str, dataBuf, &totalNum);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _clean;
|
||||
}
|
||||
|
@ -1258,12 +1263,12 @@ int tsParseInsertSql(SSqlObj *pSql) {
|
|||
// insert into tablename(col1, col2,..., coln) values(v1, v2,... vn);
|
||||
STableMeta *pTableMeta = tscGetTableMetaInfoFromCmd(pCmd, 0)->pTableMeta;
|
||||
|
||||
if (validateDataSource(pCmd, TSDB_QUERY_TYPE_INSERT, sToken.z) != TSDB_CODE_SUCCESS) {
|
||||
if (validateDataSource(pInsertParam, TSDB_QUERY_TYPE_INSERT, sToken.z) != TSDB_CODE_SUCCESS) {
|
||||
goto _clean;
|
||||
}
|
||||
|
||||
STableDataBlocks *dataBuf = NULL;
|
||||
int32_t ret = tscGetDataBlockFromList(pCmd->insertParam.pTableBlockHashList, pTableMeta->id.uid, TSDB_DEFAULT_PAYLOAD_SIZE,
|
||||
int32_t ret = tscGetDataBlockFromList(pInsertParam->pTableBlockHashList, pTableMeta->id.uid, TSDB_DEFAULT_PAYLOAD_SIZE,
|
||||
sizeof(SSubmitBlk), tinfo.rowSize, &pTableMetaInfo->name, pTableMeta,
|
||||
&dataBuf, NULL);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
|
@ -1271,22 +1276,22 @@ int tsParseInsertSql(SSqlObj *pSql) {
|
|||
}
|
||||
|
||||
SSchema *pSchema = tscGetTableSchema(pTableMeta);
|
||||
code = parseBoundColumns(pCmd, &dataBuf->boundColumnInfo, pSchema, bindedColumns, NULL);
|
||||
code = parseBoundColumns(pInsertParam, &dataBuf->boundColumnInfo, pSchema, bindedColumns, NULL);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _clean;
|
||||
}
|
||||
|
||||
if (dataBuf->boundColumnInfo.cols[0].hasVal == false) {
|
||||
code = tscInvalidSQLErrMsg(pCmd->payload, "primary timestamp column can not be null", NULL);
|
||||
code = tscInvalidOperationMsg(pInsertParam->msg, "primary timestamp column can not be null", NULL);
|
||||
goto _clean;
|
||||
}
|
||||
|
||||
if (sToken.type != TK_VALUES) {
|
||||
code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES is expected", sToken.z);
|
||||
code = tscSQLSyntaxErrMsg(pInsertParam->msg, "keyword VALUES is expected", sToken.z);
|
||||
goto _clean;
|
||||
}
|
||||
|
||||
code = doParseInsertStatement(pCmd, &str, dataBuf, &totalNum);
|
||||
code = doParseInsertStatement(pInsertParam, &str, dataBuf, &totalNum);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _clean;
|
||||
}
|
||||
|
@ -1295,13 +1300,13 @@ int tsParseInsertSql(SSqlObj *pSql) {
|
|||
}
|
||||
|
||||
// we need to keep the data blocks if there are parameters in the sql
|
||||
if (pCmd->numOfParams > 0) {
|
||||
if (pInsertParam->numOfParams > 0) {
|
||||
goto _clean;
|
||||
}
|
||||
|
||||
// merge according to vgId
|
||||
if (!TSDB_QUERY_HAS_TYPE(pCmd->insertParam.insertType, TSDB_QUERY_TYPE_STMT_INSERT) && taosHashGetSize(pCmd->insertParam.pTableBlockHashList) > 0) {
|
||||
if ((code = tscMergeTableDataBlocks(pSql, true)) != TSDB_CODE_SUCCESS) {
|
||||
if (!TSDB_QUERY_HAS_TYPE(pInsertParam->insertType, TSDB_QUERY_TYPE_STMT_INSERT) && taosHashGetSize(pInsertParam->pTableBlockHashList) > 0) {
|
||||
if ((code = tscMergeTableDataBlocks(pInsertParam, true)) != TSDB_CODE_SUCCESS) {
|
||||
goto _clean;
|
||||
}
|
||||
}
|
||||
|
@ -1310,7 +1315,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
|
|||
goto _clean;
|
||||
|
||||
_clean:
|
||||
pCmd->curSql = NULL;
|
||||
pInsertParam->sql = NULL;
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -1325,18 +1330,19 @@ int tsInsertInitialCheck(SSqlObj *pSql) {
|
|||
SStrToken sToken = tStrGetToken(pSql->sqlstr, &index, false);
|
||||
assert(sToken.type == TK_INSERT || sToken.type == TK_IMPORT);
|
||||
|
||||
pCmd->count = 0;
|
||||
pCmd->count = 0;
|
||||
pCmd->command = TSDB_SQL_INSERT;
|
||||
SInsertStatementParam* pInsertParam = &pCmd->insertParam;
|
||||
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfoS(pCmd);
|
||||
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT);
|
||||
|
||||
sToken = tStrGetToken(pSql->sqlstr, &index, false);
|
||||
if (sToken.type != TK_INTO) {
|
||||
return tscInvalidSQLErrMsg(pCmd->payload, "keyword INTO is expected", sToken.z);
|
||||
return tscSQLSyntaxErrMsg(pInsertParam->msg, "keyword INTO is expected", sToken.z);
|
||||
}
|
||||
|
||||
pCmd->curSql = sToken.z + sToken.n;
|
||||
pInsertParam->sql = sToken.z + sToken.n;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -1345,7 +1351,7 @@ int tsParseSql(SSqlObj *pSql, bool initial) {
|
|||
SSqlCmd* pCmd = &pSql->cmd;
|
||||
|
||||
if (!initial) {
|
||||
tscDebug("0x%"PRIx64" resume to parse sql: %s", pSql->self, pCmd->curSql);
|
||||
tscDebug("0x%"PRIx64" resume to parse sql: %s", pSql->self, pCmd->insertParam.sql);
|
||||
}
|
||||
|
||||
ret = tscAllocPayload(pCmd, TSDB_DEFAULT_PAYLOAD_SIZE);
|
||||
|
@ -1355,12 +1361,11 @@ int tsParseSql(SSqlObj *pSql, bool initial) {
|
|||
|
||||
if (tscIsInsertData(pSql->sqlstr)) {
|
||||
if (initial && ((ret = tsInsertInitialCheck(pSql)) != TSDB_CODE_SUCCESS)) {
|
||||
strncpy(pCmd->payload, pCmd->insertParam.msg, TSDB_DEFAULT_PAYLOAD_SIZE);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = tsParseInsertSql(pSql);
|
||||
assert(ret == TSDB_CODE_SUCCESS || ret == TSDB_CODE_TSC_ACTION_IN_PROGRESS || ret == TSDB_CODE_TSC_SQL_SYNTAX_ERROR || ret == TSDB_CODE_TSC_INVALID_OPERATION);
|
||||
|
||||
if (pSql->parseRetry < 1 && (ret == TSDB_CODE_TSC_SQL_SYNTAX_ERROR || ret == TSDB_CODE_TSC_INVALID_OPERATION)) {
|
||||
tscDebug("0x%"PRIx64 " parse insert sql statement failed, code:%s, clear meta cache and retry ", pSql->self, tstrerror(ret));
|
||||
|
||||
|
@ -1371,6 +1376,10 @@ int tsParseSql(SSqlObj *pSql, bool initial) {
|
|||
ret = tsParseInsertSql(pSql);
|
||||
}
|
||||
}
|
||||
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
strncpy(pCmd->payload, pCmd->insertParam.msg, TSDB_DEFAULT_PAYLOAD_SIZE);
|
||||
}
|
||||
} else {
|
||||
SSqlInfo sqlInfo = qSqlParse(pSql->sqlstr);
|
||||
ret = tscValidateSqlInfo(pSql, &sqlInfo);
|
||||
|
@ -1395,29 +1404,25 @@ int tsParseSql(SSqlObj *pSql, bool initial) {
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int doPackSendDataBlock(SSqlObj *pSql, int32_t numOfRows, STableDataBlocks *pTableDataBlocks) {
|
||||
static int doPackSendDataBlock(SSqlObj* pSql, SInsertStatementParam *pInsertParam, STableMeta* pTableMeta, int32_t numOfRows, STableDataBlocks *pTableDataBlocks) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
SSqlCmd *pCmd = &pSql->cmd;
|
||||
pSql->res.numOfRows = 0;
|
||||
|
||||
STableMeta *pTableMeta = tscGetTableMetaInfoFromCmd(pCmd, 0)->pTableMeta;
|
||||
|
||||
SSubmitBlk *pBlocks = (SSubmitBlk *)(pTableDataBlocks->pData);
|
||||
code = tsSetBlockInfo(pBlocks, pTableMeta, numOfRows);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return tscInvalidSQLErrMsg(pCmd->payload, "too many rows in sql, total number of rows should be less than 32767", NULL);
|
||||
return tscInvalidOperationMsg(pInsertParam->msg, "too many rows in sql, total number of rows should be less than 32767", NULL);
|
||||
}
|
||||
|
||||
if ((code = tscMergeTableDataBlocks(pSql, true)) != TSDB_CODE_SUCCESS) {
|
||||
if ((code = tscMergeTableDataBlocks(pInsertParam, true)) != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
STableDataBlocks *pDataBlock = taosArrayGetP(pCmd->insertParam.pDataBlocks, 0);
|
||||
STableDataBlocks *pDataBlock = taosArrayGetP(pInsertParam->pDataBlocks, 0);
|
||||
if ((code = tscCopyDataBlockToPayload(pSql, pDataBlock)) != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
return tscBuildAndSendRequest(pSql, NULL);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
typedef struct SImportFileSupport {
|
||||
|
@ -1466,13 +1471,14 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
|
|||
STableMeta * pTableMeta = pTableMetaInfo->pTableMeta;
|
||||
STableComInfo tinfo = tscGetTableInfo(pTableMeta);
|
||||
|
||||
destroyTableNameList(pCmd);
|
||||
SInsertStatementParam* pInsertParam = &pCmd->insertParam;
|
||||
destroyTableNameList(pInsertParam);
|
||||
|
||||
pCmd->insertParam.pDataBlocks = tscDestroyBlockArrayList(pCmd->insertParam.pDataBlocks);
|
||||
pInsertParam->pDataBlocks = tscDestroyBlockArrayList(pInsertParam->pDataBlocks);
|
||||
|
||||
if (pCmd->insertParam.pTableBlockHashList == NULL) {
|
||||
pCmd->insertParam.pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
|
||||
if (pCmd->insertParam.pTableBlockHashList == NULL) {
|
||||
if (pInsertParam->pTableBlockHashList == NULL) {
|
||||
pInsertParam->pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
|
||||
if (pInsertParam->pTableBlockHashList == NULL) {
|
||||
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
goto _error;
|
||||
}
|
||||
|
@ -1480,7 +1486,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
|
|||
|
||||
STableDataBlocks *pTableDataBlock = NULL;
|
||||
int32_t ret =
|
||||
tscGetDataBlockFromList(pCmd->insertParam.pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
|
||||
tscGetDataBlockFromList(pInsertParam->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
|
||||
tinfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pTableDataBlock, NULL);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
pParentSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
|
@ -1507,7 +1513,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
|
|||
strtolower(line, line);
|
||||
|
||||
int32_t len = 0;
|
||||
code = tsParseOneRow(&lineptr, pTableDataBlock, pCmd, tinfo.precision, &len, tokenBuf);
|
||||
code = tsParseOneRow(&lineptr, pTableDataBlock, tinfo.precision, &len, tokenBuf, pInsertParam);
|
||||
if (code != TSDB_CODE_SUCCESS || pTableDataBlock->numOfParams > 0) {
|
||||
pSql->res.code = code;
|
||||
break;
|
||||
|
@ -1526,12 +1532,14 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
|
|||
pParentSql->res.code = code;
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
if (count > 0) {
|
||||
code = doPackSendDataBlock(pSql, count, pTableDataBlock);
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
return;
|
||||
} else {
|
||||
pSql->res.numOfRows = 0;
|
||||
code = doPackSendDataBlock(pSql, pInsertParam, pTableMeta, count, pTableDataBlock);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
}
|
||||
|
||||
tscBuildAndSendRequest(pSql, NULL);
|
||||
return;
|
||||
} else {
|
||||
taos_free_result(pSql);
|
||||
tfree(pSupporter);
|
||||
|
@ -1562,7 +1570,8 @@ void tscImportDataFromFile(SSqlObj *pSql) {
|
|||
return;
|
||||
}
|
||||
|
||||
assert(TSDB_QUERY_HAS_TYPE(pCmd->insertParam.insertType, TSDB_QUERY_TYPE_FILE_INSERT) && strlen(pCmd->payload) != 0);
|
||||
SInsertStatementParam* pInsertParam = &pCmd->insertParam;
|
||||
assert(TSDB_QUERY_HAS_TYPE(pInsertParam->insertType, TSDB_QUERY_TYPE_FILE_INSERT) && strlen(pCmd->payload) != 0);
|
||||
pCmd->active = pCmd->pQueryInfo;
|
||||
|
||||
SImportFileSupport *pSupporter = calloc(1, sizeof(SImportFileSupport));
|
||||
|
|
|
@ -48,6 +48,7 @@ typedef struct SMultiTbStmt {
|
|||
bool nameSet;
|
||||
bool tagSet;
|
||||
uint64_t currentUid;
|
||||
char *sqlstr;
|
||||
uint32_t tbNum;
|
||||
SStrToken tbname;
|
||||
SStrToken stbname;
|
||||
|
@ -1085,7 +1086,7 @@ static int insertStmtExecute(STscStmt* stmt) {
|
|||
|
||||
fillTablesColumnsNull(stmt->pSql);
|
||||
|
||||
int code = tscMergeTableDataBlocks(stmt->pSql, false);
|
||||
int code = tscMergeTableDataBlocks(&stmt->pSql->cmd.insertParam, false);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
@ -1185,7 +1186,7 @@ static int insertBatchStmtExecute(STscStmt* pStmt) {
|
|||
|
||||
fillTablesColumnsNull(pStmt->pSql);
|
||||
|
||||
if ((code = tscMergeTableDataBlocks(pStmt->pSql, false)) != TSDB_CODE_SUCCESS) {
|
||||
if ((code = tscMergeTableDataBlocks(&pStmt->pSql->cmd.insertParam, false)) != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -1203,7 +1204,6 @@ static int insertBatchStmtExecute(STscStmt* pStmt) {
|
|||
return pStmt->pSql->res.code;
|
||||
}
|
||||
|
||||
|
||||
int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
|
||||
SSqlCmd *pCmd = &pSql->cmd;
|
||||
int32_t ret = TSDB_CODE_SUCCESS;
|
||||
|
@ -1213,7 +1213,7 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
|
|||
}
|
||||
|
||||
int32_t index = 0;
|
||||
SStrToken sToken = tStrGetToken(pCmd->curSql, &index, false);
|
||||
SStrToken sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
|
||||
if (sToken.n == 0) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
}
|
||||
|
@ -1232,29 +1232,29 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
|
|||
|
||||
pStmt->mtb.tagSet = true;
|
||||
|
||||
sToken = tStrGetToken(pCmd->curSql, &index, false);
|
||||
if (sToken.n > 0 && sToken.type == TK_VALUES) {
|
||||
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
|
||||
if (sToken.n > 0 && (sToken.type == TK_VALUES || sToken.type == TK_LP)) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (sToken.n <= 0 || sToken.type != TK_USING) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
return tscSQLSyntaxErrMsg(pCmd->payload, "keywords USING is expected", sToken.z);
|
||||
}
|
||||
|
||||
sToken = tStrGetToken(pCmd->curSql, &index, false);
|
||||
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
|
||||
if (sToken.n <= 0 || ((sToken.type != TK_ID) && (sToken.type != TK_STRING))) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
return tscSQLSyntaxErrMsg(pCmd->payload, "invalid token", sToken.z);
|
||||
}
|
||||
pStmt->mtb.stbname = sToken;
|
||||
|
||||
sToken = tStrGetToken(pCmd->curSql, &index, false);
|
||||
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
|
||||
if (sToken.n <= 0 || sToken.type != TK_TAGS) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
return tscSQLSyntaxErrMsg(pCmd->payload, "keyword TAGS expected", sToken.z);
|
||||
}
|
||||
|
||||
sToken = tStrGetToken(pCmd->curSql, &index, false);
|
||||
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
|
||||
if (sToken.n <= 0 || sToken.type != TK_LP) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
return tscSQLSyntaxErrMsg(pCmd->payload, ") expected", sToken.z);
|
||||
}
|
||||
|
||||
pStmt->mtb.tags = taosArrayInit(4, sizeof(SStrToken));
|
||||
|
@ -1262,7 +1262,7 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
|
|||
int32_t loopCont = 1;
|
||||
|
||||
while (loopCont) {
|
||||
sToken = tStrGetToken(pCmd->curSql, &index, false);
|
||||
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
|
||||
if (sToken.n <= 0) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
}
|
||||
|
@ -1285,20 +1285,18 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
|
|||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
}
|
||||
|
||||
sToken = tStrGetToken(pCmd->curSql, &index, false);
|
||||
if (sToken.n <= 0 || sToken.type != TK_VALUES) {
|
||||
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
|
||||
if (sToken.n <= 0 || (sToken.type != TK_VALUES && sToken.type != TK_LP)) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
}
|
||||
|
||||
pStmt->mtb.values = sToken;
|
||||
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
int stmtGenInsertStatement(SSqlObj* pSql, STscStmt* pStmt, const char* name, TAOS_BIND* tags) {
|
||||
size_t tagNum = taosArrayGetSize(pStmt->mtb.tags);
|
||||
size_t size = 1048576;
|
||||
|
@ -1373,14 +1371,17 @@ int stmtGenInsertStatement(SSqlObj* pSql, STscStmt* pStmt, const char* name, TAO
|
|||
break;
|
||||
}
|
||||
|
||||
free(pSql->sqlstr);
|
||||
if (pStmt->mtb.sqlstr == NULL) {
|
||||
pStmt->mtb.sqlstr = pSql->sqlstr;
|
||||
} else {
|
||||
tfree(pSql->sqlstr);
|
||||
}
|
||||
|
||||
pSql->sqlstr = str;
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// interface functions
|
||||
|
||||
|
@ -1468,7 +1469,7 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
|
|||
if (tscIsInsertData(pSql->sqlstr)) {
|
||||
pStmt->isInsert = true;
|
||||
|
||||
pSql->cmd.numOfParams = 0;
|
||||
pSql->cmd.insertParam.numOfParams = 0;
|
||||
pSql->cmd.batchSize = 0;
|
||||
|
||||
registerSqlObj(pSql);
|
||||
|
@ -1561,11 +1562,10 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
|
|||
}
|
||||
|
||||
pStmt->mtb.nameSet = true;
|
||||
pStmt->mtb.tagSet = true;
|
||||
|
||||
tscDebug("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr);
|
||||
|
||||
pSql->cmd.numOfParams = 0;
|
||||
pSql->cmd.insertParam.numOfParams = 0;
|
||||
pSql->cmd.batchSize = 0;
|
||||
|
||||
if (taosHashGetSize(pCmd->insertParam.pTableBlockHashList) > 0) {
|
||||
|
@ -1634,6 +1634,7 @@ int taos_stmt_close(TAOS_STMT* stmt) {
|
|||
taosHashCleanup(pStmt->pSql->cmd.insertParam.pTableBlockHashList);
|
||||
pStmt->pSql->cmd.insertParam.pTableBlockHashList = NULL;
|
||||
taosArrayDestroy(pStmt->mtb.tags);
|
||||
tfree(pStmt->mtb.sqlstr);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1851,8 +1852,8 @@ int taos_stmt_num_params(TAOS_STMT *stmt, int *nums) {
|
|||
|
||||
if (pStmt->isInsert) {
|
||||
SSqlObj* pSql = pStmt->pSql;
|
||||
SSqlCmd *pCmd = &pSql->cmd;
|
||||
*nums = pCmd->numOfParams;
|
||||
SSqlCmd *pCmd = &pSql->cmd;
|
||||
*nums = pCmd->insertParam.numOfParams;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
SNormalStmt* normal = &pStmt->normal;
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
#include "tname.h"
|
||||
#include "tscLog.h"
|
||||
#include "tscUtil.h"
|
||||
#include "tschemautil.h"
|
||||
#include "qTableMeta.h"
|
||||
#include "tsclient.h"
|
||||
#include "tstrbuild.h"
|
||||
#include "ttoken.h"
|
||||
|
@ -91,6 +91,7 @@ static int32_t validateGroupbyNode(SQueryInfo* pQueryInfo, SArray* pList, SSqlCm
|
|||
static int32_t validateIntervalNode(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode);
|
||||
static int32_t parseIntervalOffset(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SStrToken* offsetToken);
|
||||
static int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SStrToken* pSliding);
|
||||
static int32_t validateStateWindowNode(SSqlCmd* pSql, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode, bool isStable);
|
||||
|
||||
static int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExprItem* pItem);
|
||||
|
||||
|
@ -196,7 +197,7 @@ static bool validateDebugFlag(int32_t v) {
|
|||
* is not needed in the final error message.
|
||||
*/
|
||||
static int32_t invalidOperationMsg(char* dstBuffer, const char* errMsg) {
|
||||
return tscInvalidSQLErrMsg(dstBuffer, errMsg, NULL);
|
||||
return tscInvalidOperationMsg(dstBuffer, errMsg, NULL);
|
||||
}
|
||||
|
||||
static int setColumnFilterInfoForTimestamp(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tVariant* pVar) {
|
||||
|
@ -993,6 +994,59 @@ int32_t validateIntervalNode(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNode* pS
|
|||
// The following part is used to check for the invalid query expression.
|
||||
return checkInvalidExprForTimeWindow(pCmd, pQueryInfo);
|
||||
}
|
||||
static int32_t validateStateWindowNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode, bool isStable) {
|
||||
|
||||
const char* msg1 = "invalid column name";
|
||||
const char* msg3 = "not support state_window with group by ";
|
||||
const char* msg4 = "function not support for super table query";
|
||||
|
||||
SStrToken *col = &(pSqlNode->windowstateVal.col) ;
|
||||
if (col->z == NULL || col->n <= 0) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (pQueryInfo->colList == NULL) {
|
||||
pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES);
|
||||
}
|
||||
if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||
}
|
||||
pQueryInfo->groupbyExpr.numOfGroupCols = 1;
|
||||
|
||||
//TODO(dengyihao): check tag column
|
||||
if (isStable) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
|
||||
}
|
||||
|
||||
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
|
||||
if (getColumnIndexByName(pCmd, col, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||
}
|
||||
|
||||
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
|
||||
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
|
||||
int32_t numOfCols = tscGetNumOfColumns(pTableMeta);
|
||||
if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX || index.columnIndex >= numOfCols) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||
}
|
||||
|
||||
SGroupbyExpr* pGroupExpr = &pQueryInfo->groupbyExpr;
|
||||
if (pGroupExpr->columnInfo == NULL) {
|
||||
pGroupExpr->columnInfo = taosArrayInit(4, sizeof(SColIndex));
|
||||
}
|
||||
|
||||
SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, index.columnIndex);
|
||||
if (pSchema->type == TSDB_DATA_TYPE_TIMESTAMP || pSchema->type == TSDB_DATA_TYPE_FLOAT || pSchema->type == TSDB_DATA_TYPE_DOUBLE) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||
}
|
||||
|
||||
tscColumnListInsert(pQueryInfo->colList, index.columnIndex, pTableMeta->id.uid, pSchema);
|
||||
SColIndex colIndex = { .colIndex = index.columnIndex, .flag = TSDB_COL_NORMAL, .colId = pSchema->colId };
|
||||
taosArrayPush(pGroupExpr->columnInfo, &colIndex);
|
||||
pQueryInfo->groupbyExpr.orderType = TSDB_ORDER_ASC;
|
||||
pQueryInfo->stateWindow = true;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t validateSessionNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode * pSqlNode) {
|
||||
const char* msg1 = "gap should be fixed time window";
|
||||
|
@ -1027,11 +1081,17 @@ int32_t validateSessionNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode * pS
|
|||
if (pQueryInfo->sessionWindow.gap != 0 && pQueryInfo->interval.interval != 0) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||
}
|
||||
if (pQueryInfo->sessionWindow.gap == 0) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
|
||||
}
|
||||
|
||||
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
|
||||
if (getColumnIndexByName(pCmd, col, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||
}
|
||||
if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||
}
|
||||
|
||||
pQueryInfo->sessionWindow.primaryColId = PRIMARYKEY_TIMESTAMP_COL_INDEX;
|
||||
|
||||
|
@ -3167,6 +3227,9 @@ bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo)
|
|||
return true;
|
||||
}
|
||||
}
|
||||
} else if (tscIsSessionWindowQuery(pQueryInfo)) {
|
||||
invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
|
@ -6472,7 +6535,7 @@ static int32_t doTagFunctionCheck(SQueryInfo* pQueryInfo) {
|
|||
int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
|
||||
const char* msg1 = "functions/columns not allowed in group by query";
|
||||
const char* msg2 = "projection query on columns not allowed";
|
||||
const char* msg3 = "group by not allowed on projection query";
|
||||
const char* msg3 = "group by/session/state_window not allowed on projection query";
|
||||
const char* msg4 = "retrieve tags not compatible with group by or interval query";
|
||||
const char* msg5 = "functions can not be mixed up";
|
||||
|
||||
|
@ -6488,6 +6551,9 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
}
|
||||
if (tscIsProjectionQuery(pQueryInfo) && tscIsSessionWindowQuery(pQueryInfo)) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||
}
|
||||
|
||||
if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) {
|
||||
// check if all the tags prj columns belongs to the group by columns
|
||||
|
@ -6931,7 +6997,7 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
|
|||
|
||||
if (!findColumnIndex) {
|
||||
tdDestroyKVRowBuilder(&kvRowBuilder);
|
||||
return tscInvalidSQLErrMsg(pCmd->payload, "invalid tag name", sToken->z);
|
||||
return tscInvalidOperationMsg(pCmd->payload, "invalid tag name", sToken->z);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -7074,6 +7140,7 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) {
|
|||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
}
|
||||
|
||||
|
||||
if (isTimeWindowQuery(pQueryInfo) && (validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS)) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||
}
|
||||
|
@ -7747,6 +7814,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
|
|||
const char* msg1 = "point interpolation query needs timestamp";
|
||||
const char* msg2 = "too many tables in from clause";
|
||||
const char* msg3 = "start(end) time of query range required or time range too large";
|
||||
const char* msg4 = "interval query not supported, since the result of sub query not include valid timestamp column";
|
||||
const char* msg9 = "only tag query not compatible with normal column filter";
|
||||
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
@ -7788,6 +7856,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
|
|||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
}
|
||||
|
||||
// validate the query filter condition info
|
||||
if (pSqlNode->pWhere != NULL) {
|
||||
if (validateWhereNode(pQueryInfo, &pSqlNode->pWhere, pSql) != TSDB_CODE_SUCCESS) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
|
@ -7799,6 +7868,30 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
|
|||
pQueryInfo->window.ekey = pQueryInfo->window.ekey / 1000;
|
||||
}
|
||||
}
|
||||
|
||||
// validate the interval info
|
||||
if (validateIntervalNode(pSql, pQueryInfo, pSqlNode) != TSDB_CODE_SUCCESS) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
} else {
|
||||
if (isTimeWindowQuery(pQueryInfo)) {
|
||||
// check if the first column of the nest query result is timestamp column
|
||||
SColumn* pCol = taosArrayGetP(pQueryInfo->colList, 0);
|
||||
if (pCol->info.type != TSDB_DATA_TYPE_TIMESTAMP) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
|
||||
}
|
||||
|
||||
if (validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// set order by info
|
||||
STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta;
|
||||
if (validateOrderbyNode(pCmd, pQueryInfo, pSqlNode, tscGetTableSchema(pTableMeta)) !=
|
||||
TSDB_CODE_SUCCESS) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
}
|
||||
} else {
|
||||
pQueryInfo->command = TSDB_SQL_SELECT;
|
||||
|
||||
|
@ -7855,7 +7948,10 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
|
|||
TSDB_CODE_SUCCESS) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
}
|
||||
|
||||
// parse the window_state
|
||||
if (validateStateWindowNode(pCmd, pQueryInfo, pSqlNode, isSTable) != TSDB_CODE_SUCCESS) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
}
|
||||
// set order by info
|
||||
if (validateOrderbyNode(pCmd, pQueryInfo, pSqlNode, tscGetTableSchema(pTableMetaInfo->pTableMeta)) !=
|
||||
TSDB_CODE_SUCCESS) {
|
||||
|
@ -7896,6 +7992,10 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
|
|||
* transfer sql functions that need secondary merge into another format
|
||||
* in dealing with super table queries such as: count/first/last
|
||||
*/
|
||||
if (validateSessionNode(pCmd, pQueryInfo, pSqlNode) != TSDB_CODE_SUCCESS) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
}
|
||||
|
||||
if (isSTable) {
|
||||
tscTansformFuncForSTableQuery(pQueryInfo);
|
||||
if (hasUnsupportFunctionsForSTableQuery(pCmd, pQueryInfo)) {
|
||||
|
@ -7903,10 +8003,6 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
|
|||
}
|
||||
}
|
||||
|
||||
if (validateSessionNode(pCmd, pQueryInfo, pSqlNode) != TSDB_CODE_SUCCESS) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
}
|
||||
|
||||
// no result due to invalid query time range
|
||||
if (pQueryInfo->window.skey > pQueryInfo->window.ekey) {
|
||||
pQueryInfo->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT;
|
||||
|
@ -7950,11 +8046,12 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
|
|||
pQueryInfo->globalMerge = tscIsTwoStageSTableQuery(pCmd, pQueryInfo, 0);
|
||||
|
||||
pQueryInfo->arithmeticOnAgg = tsIsArithmeticQueryOnAggResult(pQueryInfo);
|
||||
pQueryInfo->orderProjectQuery = tscOrderedProjectionQueryOnSTable(pQueryInfo, 0);
|
||||
|
||||
SExprInfo** p = NULL;
|
||||
int32_t numOfExpr = 0;
|
||||
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||
code = createProjectionExpr(pQueryInfo, pTableMetaInfo, &p, &numOfExpr);
|
||||
|
||||
if (pQueryInfo->exprList1 == NULL) {
|
||||
pQueryInfo->exprList1 = taosArrayInit(4, POINTER_BYTES);
|
||||
}
|
||||
|
|
|
@ -15,15 +15,15 @@
|
|||
|
||||
#include "os.h"
|
||||
#include "tcmdtype.h"
|
||||
#include "tlockfree.h"
|
||||
#include "trpc.h"
|
||||
#include "tscLocalMerge.h"
|
||||
#include "tscGlobalmerge.h"
|
||||
#include "tscLog.h"
|
||||
#include "tscProfile.h"
|
||||
#include "tscUtil.h"
|
||||
#include "tschemautil.h"
|
||||
#include "qTableMeta.h"
|
||||
#include "tsclient.h"
|
||||
#include "ttimer.h"
|
||||
#include "tlockfree.h"
|
||||
#include "qPlan.h"
|
||||
|
||||
int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo) = {0};
|
||||
|
@ -857,6 +857,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
pQueryMsg->simpleAgg = query.simpleAgg;
|
||||
pQueryMsg->pointInterpQuery = query.pointInterpQuery;
|
||||
pQueryMsg->needReverseScan = query.needReverseScan;
|
||||
pQueryMsg->stateWindow = query.stateWindow;
|
||||
|
||||
pQueryMsg->numOfTags = htonl(numOfTags);
|
||||
pQueryMsg->sqlstrLen = htonl(sqlLen);
|
||||
|
@ -1656,7 +1657,7 @@ int tscProcessRetrieveLocalMergeRsp(SSqlObj *pSql) {
|
|||
return code;
|
||||
}
|
||||
|
||||
if (pRes->pLocalMerger == NULL) { // no result from subquery, so abort here directly.
|
||||
if (pRes->pMerger == NULL) { // no result from subquery, so abort here directly.
|
||||
(*pSql->fp)(pSql->param, pSql, pRes->numOfRows);
|
||||
return code;
|
||||
}
|
||||
|
@ -1673,15 +1674,7 @@ int tscProcessRetrieveLocalMergeRsp(SSqlObj *pSql) {
|
|||
taosArrayPush(group, &tableKeyInfo);
|
||||
taosArrayPush(tableGroupInfo.pGroupList, &group);
|
||||
|
||||
// todo remove it
|
||||
SExprInfo* list = calloc(tscNumOfExprs(pQueryInfo), sizeof(SExprInfo));
|
||||
for(int32_t i = 0; i < tscNumOfExprs(pQueryInfo); ++i) {
|
||||
SExprInfo* pExprInfo = tscExprGet(pQueryInfo, i);
|
||||
list[i] = *pExprInfo;
|
||||
}
|
||||
|
||||
pQueryInfo->pQInfo = createQInfoFromQueryNode(pQueryInfo, list, &tableGroupInfo, NULL, NULL, pRes->pLocalMerger, MERGE_STAGE);
|
||||
tfree(list);
|
||||
pQueryInfo->pQInfo = createQInfoFromQueryNode(pQueryInfo, &tableGroupInfo, NULL, NULL, pRes->pMerger, MERGE_STAGE);
|
||||
}
|
||||
|
||||
uint64_t localQueryId = 0;
|
||||
|
@ -2534,8 +2527,8 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn
|
|||
char *pMsg = (char *)pInfoMsg + sizeof(STableInfoMsg);
|
||||
|
||||
// tag data exists
|
||||
if (autocreate && pSql->cmd.tagData.dataLen != 0) {
|
||||
pMsg = serializeTagData(&pSql->cmd.tagData, pMsg);
|
||||
if (autocreate && pSql->cmd.insertParam.tagData.dataLen != 0) {
|
||||
pMsg = serializeTagData(&pSql->cmd.insertParam.tagData, pMsg);
|
||||
}
|
||||
|
||||
pNew->cmd.payloadLen = (int32_t)(pMsg - (char*)pInfoMsg);
|
||||
|
|
|
@ -627,7 +627,7 @@ static bool hasAdditionalErrorInfo(int32_t code, SSqlCmd *pCmd) {
|
|||
|
||||
char *z = NULL;
|
||||
if (len > 0) {
|
||||
z = strstr(pCmd->payload, "invalid SQL");
|
||||
z = strstr(pCmd->payload, "invalid operation");
|
||||
if (z == NULL) {
|
||||
z = strstr(pCmd->payload, "syntax error");
|
||||
}
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <tschemautil.h>
|
||||
#include "os.h"
|
||||
#include "taosmsg.h"
|
||||
#include "tscLog.h"
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
#include "tcompare.h"
|
||||
#include "tscLog.h"
|
||||
#include "tscSubquery.h"
|
||||
#include "tschemautil.h"
|
||||
#include "qTableMeta.h"
|
||||
#include "tsclient.h"
|
||||
#include "qUdf.h"
|
||||
#include "qUtil.h"
|
||||
|
@ -2450,7 +2450,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
|
|||
|
||||
assert(pState->numOfSub > 0);
|
||||
|
||||
int32_t ret = tscLocalReducerEnvCreate(pQueryInfo, &pMemoryBuf, pSql->subState.numOfSub, &pDesc, nBufferSize, pSql->self);
|
||||
int32_t ret = tscCreateGlobalMergerEnv(pQueryInfo, &pMemoryBuf, pSql->subState.numOfSub, &pDesc, nBufferSize, pSql->self);
|
||||
if (ret != 0) {
|
||||
pRes->code = ret;
|
||||
tscAsyncResultOnError(pSql);
|
||||
|
@ -2463,7 +2463,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
|
|||
if (pSql->pSubs == NULL) {
|
||||
tfree(pSql->pSubs);
|
||||
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
tscLocalReducerEnvDestroy(pMemoryBuf, pDesc,pState->numOfSub);
|
||||
tscDestroyGlobalMergerEnv(pMemoryBuf, pDesc,pState->numOfSub);
|
||||
|
||||
tscAsyncResultOnError(pSql);
|
||||
return ret;
|
||||
|
@ -2530,13 +2530,13 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
|
|||
tscError("0x%"PRIx64" failed to prepare subquery structure and launch subqueries", pSql->self);
|
||||
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
|
||||
tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pState->numOfSub);
|
||||
tscDestroyGlobalMergerEnv(pMemoryBuf, pDesc, pState->numOfSub);
|
||||
doCleanupSubqueries(pSql, i);
|
||||
return pRes->code; // free all allocated resource
|
||||
}
|
||||
|
||||
if (pRes->code == TSDB_CODE_TSC_QUERY_CANCELLED) {
|
||||
tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pState->numOfSub);
|
||||
tscDestroyGlobalMergerEnv(pMemoryBuf, pDesc, pState->numOfSub);
|
||||
doCleanupSubqueries(pSql, i);
|
||||
return pRes->code;
|
||||
}
|
||||
|
@ -2701,7 +2701,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
|
|||
tstrerror(pParentSql->res.code));
|
||||
|
||||
// release allocated resource
|
||||
tscLocalReducerEnvDestroy(trsupport->pExtMemBuffer, trsupport->pOrderDescriptor,
|
||||
tscDestroyGlobalMergerEnv(trsupport->pExtMemBuffer, trsupport->pOrderDescriptor,
|
||||
pState->numOfSub);
|
||||
|
||||
tscFreeRetrieveSup(pSql);
|
||||
|
@ -2776,7 +2776,7 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
|
|||
SQueryInfo *pPQueryInfo = tscGetQueryInfo(&pParentSql->cmd);
|
||||
tscClearInterpInfo(pPQueryInfo);
|
||||
|
||||
code = tscCreateLocalMerger(trsupport->pExtMemBuffer, pState->numOfSub, pDesc, pPQueryInfo, &pParentSql->res.pLocalMerger, pParentSql->self);
|
||||
code = tscCreateGlobalMerger(trsupport->pExtMemBuffer, pState->numOfSub, pDesc, pPQueryInfo, &pParentSql->res.pMerger, pParentSql->self);
|
||||
pParentSql->res.code = code;
|
||||
|
||||
if (code == TSDB_CODE_SUCCESS && trsupport->pExtMemBuffer == NULL) {
|
||||
|
@ -3525,9 +3525,8 @@ static UNUSED_FUNC bool tscHasRemainDataInSubqueryResultSet(SSqlObj *pSql) {
|
|||
return hasData;
|
||||
}
|
||||
|
||||
// todo remove pExprs
|
||||
void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, SExprInfo* pExprs, STableGroupInfo* pTableGroupInfo,
|
||||
SOperatorInfo* pSourceOperator, char* sql, void* merger, int32_t stage) {
|
||||
void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGroupInfo, SOperatorInfo* pSourceOperator,
|
||||
char* sql, void* merger, int32_t stage) {
|
||||
assert(pQueryInfo != NULL);
|
||||
SQInfo *pQInfo = (SQInfo *)calloc(1, sizeof(SQInfo));
|
||||
if (pQInfo == NULL) {
|
||||
|
|
|
@ -14,18 +14,18 @@
|
|||
*/
|
||||
|
||||
#include "tscUtil.h"
|
||||
#include "tsched.h"
|
||||
#include "hash.h"
|
||||
#include "os.h"
|
||||
#include "taosmsg.h"
|
||||
#include "texpr.h"
|
||||
#include "tkey.h"
|
||||
#include "tmd5.h"
|
||||
#include "tscLocalMerge.h"
|
||||
#include "tscGlobalmerge.h"
|
||||
#include "tscLog.h"
|
||||
#include "tscProfile.h"
|
||||
#include "tscSubquery.h"
|
||||
#include "tschemautil.h"
|
||||
#include "tsched.h"
|
||||
#include "qTableMeta.h"
|
||||
#include "tsclient.h"
|
||||
#include "ttimer.h"
|
||||
#include "ttokendef.h"
|
||||
|
@ -456,6 +456,9 @@ bool tscIsTWAQuery(SQueryInfo* pQueryInfo) {
|
|||
|
||||
return false;
|
||||
}
|
||||
bool tscIsSessionWindowQuery(SQueryInfo* pQueryInfo) {
|
||||
return pQueryInfo->sessionWindow.gap > 0;
|
||||
}
|
||||
|
||||
bool tscNeedReverseScan(SQueryInfo* pQueryInfo) {
|
||||
size_t numOfExprs = tscNumOfExprs(pQueryInfo);
|
||||
|
@ -738,9 +741,10 @@ static SColumnInfo* extractColumnInfoFromResult(SArray* pTableCols) {
|
|||
}
|
||||
|
||||
typedef struct SDummyInputInfo {
|
||||
SSDataBlock *block;
|
||||
SSqlObj *pSql; // refactor: remove it
|
||||
int32_t numOfFilterCols;
|
||||
SSDataBlock *block;
|
||||
STableQueryInfo *pTableQueryInfo;
|
||||
SSqlObj *pSql; // refactor: remove it
|
||||
int32_t numOfFilterCols;
|
||||
SSingleColumnFilterInfo *pFilterInfo;
|
||||
} SDummyInputInfo;
|
||||
|
||||
|
@ -757,9 +761,10 @@ typedef struct SJoinOperatorInfo {
|
|||
SRspResultInfo resultInfo; // todo refactor, add this info for each operator
|
||||
} SJoinOperatorInfo;
|
||||
|
||||
static void doSetupSDataBlock(SSqlRes* pRes, SSDataBlock* pBlock) {
|
||||
static void doSetupSDataBlock(SSqlRes* pRes, SSDataBlock* pBlock, SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols) {
|
||||
int32_t offset = 0;
|
||||
char* pData = pRes->data;
|
||||
|
||||
for(int32_t i = 0; i < pBlock->info.numOfCols; ++i) {
|
||||
SColumnInfoData* pColData = taosArrayGet(pBlock->pDataBlock, i);
|
||||
if (pData != NULL) {
|
||||
|
@ -771,6 +776,26 @@ static void doSetupSDataBlock(SSqlRes* pRes, SSDataBlock* pBlock) {
|
|||
offset += pColData->info.bytes;
|
||||
}
|
||||
|
||||
// filter data if needed
|
||||
if (numOfFilterCols > 0) {
|
||||
doSetFilterColumnInfo(pFilterInfo, numOfFilterCols, pBlock);
|
||||
int8_t* p = calloc(pBlock->info.rows, sizeof(int8_t));
|
||||
bool all = doFilterDataBlock(pFilterInfo, numOfFilterCols, pBlock->info.rows, p);
|
||||
if (!all) {
|
||||
doCompactSDataBlock(pBlock, pBlock->info.rows, p);
|
||||
}
|
||||
|
||||
tfree(p);
|
||||
}
|
||||
|
||||
// todo refactor: extract method
|
||||
// set the timestamp range of current result data block
|
||||
SColumnInfoData* pColData = taosArrayGet(pBlock->pDataBlock, 0);
|
||||
if (pColData->info.type == TSDB_DATA_TYPE_TIMESTAMP) {
|
||||
pBlock->info.window.skey = ((int64_t*)pColData->pData)[0];
|
||||
pBlock->info.window.ekey = ((int64_t*)pColData->pData)[pBlock->info.rows-1];
|
||||
}
|
||||
|
||||
pRes->numOfRows = 0;
|
||||
}
|
||||
|
||||
|
@ -786,22 +811,13 @@ SSDataBlock* doGetDataBlock(void* param, bool* newgroup) {
|
|||
SSqlRes* pRes = &pSql->res;
|
||||
|
||||
SSDataBlock* pBlock = pInput->block;
|
||||
if (pOperator->pRuntimeEnv != NULL) {
|
||||
pOperator->pRuntimeEnv->current = pInput->pTableQueryInfo;
|
||||
}
|
||||
|
||||
pBlock->info.rows = pRes->numOfRows;
|
||||
if (pRes->numOfRows != 0) {
|
||||
doSetupSDataBlock(pRes, pBlock);
|
||||
|
||||
if (pInput->numOfFilterCols > 0) {
|
||||
doSetFilterColumnInfo(pInput->pFilterInfo, pInput->numOfFilterCols, pBlock);
|
||||
int8_t* p = calloc(pBlock->info.rows, sizeof(int8_t));
|
||||
bool all = doFilterDataBlock(pInput->pFilterInfo, pInput->numOfFilterCols, pBlock->info.rows, p);
|
||||
if (!all) {
|
||||
doCompactSDataBlock(pBlock, pBlock->info.rows, p);
|
||||
}
|
||||
|
||||
tfree(p);
|
||||
}
|
||||
|
||||
doSetupSDataBlock(pRes, pBlock, pInput->pFilterInfo, pInput->numOfFilterCols);
|
||||
*newgroup = false;
|
||||
return pBlock;
|
||||
}
|
||||
|
@ -816,11 +832,29 @@ SSDataBlock* doGetDataBlock(void* param, bool* newgroup) {
|
|||
}
|
||||
|
||||
pBlock->info.rows = pRes->numOfRows;
|
||||
doSetupSDataBlock(pRes, pBlock);
|
||||
doSetupSDataBlock(pRes, pBlock, pInput->pFilterInfo, pInput->numOfFilterCols);
|
||||
*newgroup = false;
|
||||
return pBlock;
|
||||
}
|
||||
|
||||
static void fetchNextBlockIfCompleted(SOperatorInfo* pOperator, bool* newgroup) {
|
||||
SJoinOperatorInfo* pJoinInfo = pOperator->info;
|
||||
|
||||
for (int32_t i = 0; i < pOperator->numOfUpstream; ++i) {
|
||||
SJoinStatus* pStatus = &pJoinInfo->status[i];
|
||||
if (pStatus->pBlock == NULL || pStatus->index >= pStatus->pBlock->info.rows) {
|
||||
pStatus->pBlock = pOperator->upstream[i]->exec(pOperator->upstream[i], newgroup);
|
||||
pStatus->index = 0;
|
||||
|
||||
if (pStatus->pBlock == NULL) {
|
||||
pOperator->status = OP_EXEC_DONE;
|
||||
pJoinInfo->resultInfo.total += pJoinInfo->pRes->info.rows;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
SSDataBlock* doDataBlockJoin(void* param, bool* newgroup) {
|
||||
SOperatorInfo *pOperator = (SOperatorInfo*) param;
|
||||
if (pOperator->status == OP_EXEC_DONE) {
|
||||
|
@ -833,19 +867,9 @@ SSDataBlock* doDataBlockJoin(void* param, bool* newgroup) {
|
|||
pJoinInfo->pRes->info.rows = 0;
|
||||
|
||||
while(1) {
|
||||
for (int32_t i = 0; i < pOperator->numOfUpstream; ++i) {
|
||||
SJoinStatus* pStatus = &pJoinInfo->status[i];
|
||||
if (pStatus->pBlock == NULL || pStatus->index >= pStatus->pBlock->info.rows) {
|
||||
pStatus->pBlock = pOperator->upstream[i]->exec(pOperator->upstream[i], newgroup);
|
||||
pStatus->index = 0;
|
||||
|
||||
if (pStatus->pBlock == NULL) {
|
||||
pOperator->status = OP_EXEC_DONE;
|
||||
|
||||
pJoinInfo->resultInfo.total += pJoinInfo->pRes->info.rows;
|
||||
return pJoinInfo->pRes;
|
||||
}
|
||||
}
|
||||
fetchNextBlockIfCompleted(pOperator, newgroup);
|
||||
if (pOperator->status == OP_EXEC_DONE) {
|
||||
return pJoinInfo->pRes;
|
||||
}
|
||||
|
||||
SJoinStatus* st0 = &pJoinInfo->status[0];
|
||||
|
@ -864,8 +888,12 @@ SSDataBlock* doDataBlockJoin(void* param, bool* newgroup) {
|
|||
|
||||
if (ts[st->index] < ts0[st0->index]) { // less than the first
|
||||
prefixEqual = false;
|
||||
|
||||
if ((++(st->index)) >= st->pBlock->info.rows) {
|
||||
break;
|
||||
fetchNextBlockIfCompleted(pOperator, newgroup);
|
||||
if (pOperator->status == OP_EXEC_DONE) {
|
||||
return pJoinInfo->pRes;
|
||||
}
|
||||
}
|
||||
} else if (ts[st->index] > ts0[st0->index]) { // greater than the first;
|
||||
if (prefixEqual == true) {
|
||||
|
@ -873,12 +901,19 @@ SSDataBlock* doDataBlockJoin(void* param, bool* newgroup) {
|
|||
for (int32_t j = 0; j < i; ++j) {
|
||||
SJoinStatus* stx = &pJoinInfo->status[j];
|
||||
if ((++(stx->index)) >= stx->pBlock->info.rows) {
|
||||
break;
|
||||
|
||||
fetchNextBlockIfCompleted(pOperator, newgroup);
|
||||
if (pOperator->status == OP_EXEC_DONE) {
|
||||
return pJoinInfo->pRes;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if ((++(st0->index)) >= st0->pBlock->info.rows) {
|
||||
break;
|
||||
fetchNextBlockIfCompleted(pOperator, newgroup);
|
||||
if (pOperator->status == OP_EXEC_DONE) {
|
||||
return pJoinInfo->pRes;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -973,11 +1008,14 @@ static void destroyDummyInputOperator(void* param, int32_t numOfOutput) {
|
|||
// todo this operator servers as the adapter for Operator tree and SqlRes result, remove it later
|
||||
SOperatorInfo* createDummyInputOperator(SSqlObj* pSql, SSchema* pSchema, int32_t numOfCols, SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols) {
|
||||
assert(numOfCols > 0);
|
||||
STimeWindow win = {.skey = INT64_MIN, .ekey = INT64_MAX};
|
||||
|
||||
SDummyInputInfo* pInfo = calloc(1, sizeof(SDummyInputInfo));
|
||||
|
||||
pInfo->pSql = pSql;
|
||||
pInfo->pFilterInfo = pFilterInfo;
|
||||
pInfo->numOfFilterCols = numOfFilterCols;
|
||||
pInfo->pTableQueryInfo = createTmpTableQueryInfo(win);
|
||||
|
||||
pInfo->block = calloc(numOfCols, sizeof(SSDataBlock));
|
||||
pInfo->block->info.numOfCols = numOfCols;
|
||||
|
@ -1063,12 +1101,31 @@ void convertQueryResult(SSqlRes* pRes, SQueryInfo* pQueryInfo) {
|
|||
pRes->completed = (pRes->numOfRows == 0);
|
||||
}
|
||||
|
||||
static void createInputDataFilterInfo(SQueryInfo* px, int32_t numOfCol1, int32_t* numOfFilterCols, SSingleColumnFilterInfo** pFilterInfo) {
|
||||
SColumnInfo* tableCols = calloc(numOfCol1, sizeof(SColumnInfo));
|
||||
for(int32_t i = 0; i < numOfCol1; ++i) {
|
||||
SColumn* pCol = taosArrayGetP(px->colList, i);
|
||||
if (pCol->info.flist.numOfFilters > 0) {
|
||||
(*numOfFilterCols) += 1;
|
||||
}
|
||||
|
||||
tableCols[i] = pCol->info;
|
||||
}
|
||||
|
||||
if ((*numOfFilterCols) > 0) {
|
||||
doCreateFilterInfo(tableCols, numOfCol1, (*numOfFilterCols), pFilterInfo, 0);
|
||||
}
|
||||
|
||||
tfree(tableCols);
|
||||
}
|
||||
|
||||
void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQueryInfo* px, SSqlRes* pOutput) {
|
||||
// handle the following query process
|
||||
if (px->pQInfo == NULL) {
|
||||
SColumnInfo* pColumnInfo = extractColumnInfoFromResult(px->colList);
|
||||
|
||||
SSchema* pSchema = tscGetTableSchema(px->pTableMetaInfo[0]->pTableMeta);
|
||||
STableMeta* pTableMeta = tscGetMetaInfo(px, 0)->pTableMeta;
|
||||
SSchema* pSchema = tscGetTableSchema(pTableMeta);
|
||||
|
||||
STableGroupInfo tableGroupInfo = {
|
||||
.numOfTables = 1,
|
||||
|
@ -1085,23 +1142,11 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue
|
|||
taosArrayPush(tableGroupInfo.pGroupList, &group);
|
||||
|
||||
// if it is a join query, create join operator here
|
||||
int32_t numOfCol1 = px->pTableMetaInfo[0]->pTableMeta->tableInfo.numOfColumns;
|
||||
int32_t numOfCol1 = pTableMeta->tableInfo.numOfColumns;
|
||||
|
||||
int32_t numOfFilterCols = 0;
|
||||
SColumnInfo* tableCols = calloc(numOfCol1, sizeof(SColumnInfo));
|
||||
for(int32_t i = 0; i < numOfCol1; ++i) {
|
||||
SColumn* pCol = taosArrayGetP(px->colList, i);
|
||||
if (pCol->info.flist.numOfFilters > 0) {
|
||||
numOfFilterCols += 1;
|
||||
}
|
||||
|
||||
tableCols[i] = pCol->info;
|
||||
}
|
||||
|
||||
SSingleColumnFilterInfo* pFilterInfo = NULL;
|
||||
if (numOfFilterCols > 0) {
|
||||
doCreateFilterInfo(tableCols, numOfCol1, numOfFilterCols, &pFilterInfo, 0);
|
||||
}
|
||||
createInputDataFilterInfo(px, numOfCol1, &numOfFilterCols, &pFilterInfo);
|
||||
|
||||
SOperatorInfo* pSourceOperator = createDummyInputOperator(pSqlObjList[0], pSchema, numOfCol1, pFilterInfo, numOfFilterCols);
|
||||
|
||||
|
@ -1117,24 +1162,14 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue
|
|||
int32_t offset = pSourceOperator->numOfOutput;
|
||||
|
||||
for(int32_t i = 1; i < px->numOfTables; ++i) {
|
||||
SSchema* pSchema1 = tscGetTableSchema(px->pTableMetaInfo[i]->pTableMeta);
|
||||
int32_t n = px->pTableMetaInfo[i]->pTableMeta->tableInfo.numOfColumns;
|
||||
STableMeta* pTableMeta1 = tscGetMetaInfo(px, i)->pTableMeta;
|
||||
|
||||
SSchema* pSchema1 = tscGetTableSchema(pTableMeta1);
|
||||
int32_t n = pTableMeta1->tableInfo.numOfColumns;
|
||||
|
||||
int32_t numOfFilterCols1 = 0;
|
||||
SColumnInfo* tableCols1 = calloc(numOfCol1, sizeof(SColumnInfo));
|
||||
for(int32_t j = 0; j < numOfCol1; ++j) {
|
||||
SColumn* pCol = taosArrayGetP(px->colList, j);
|
||||
if (pCol->info.flist.numOfFilters > 0) {
|
||||
numOfFilterCols += 1;
|
||||
}
|
||||
|
||||
tableCols1[j] = pCol->info;
|
||||
}
|
||||
|
||||
SSingleColumnFilterInfo* pFilterInfo1 = NULL;
|
||||
if (numOfFilterCols1 > 0) {
|
||||
doCreateFilterInfo(tableCols1, numOfCol1, numOfFilterCols1, &pFilterInfo1, 0);
|
||||
}
|
||||
createInputDataFilterInfo(px, numOfCol1, &numOfFilterCols1, &pFilterInfo1);
|
||||
|
||||
p[i] = createDummyInputOperator(pSqlObjList[i], pSchema1, n, pFilterInfo1, numOfFilterCols1);
|
||||
memcpy(&schema[offset], pSchema1, n * sizeof(SSchema));
|
||||
|
@ -1149,11 +1184,25 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue
|
|||
memcpy(schema, pSchema, numOfCol1*sizeof(SSchema));
|
||||
}
|
||||
|
||||
SExprInfo* exprInfo = NULL;
|
||||
px->pQInfo = createQInfoFromQueryNode(px, exprInfo, &tableGroupInfo, pSourceOperator, NULL, NULL, MASTER_SCAN);
|
||||
// update the exprinfo
|
||||
int32_t numOfOutput = (int32_t)tscNumOfExprs(px);
|
||||
for(int32_t i = 0; i < numOfOutput; ++i) {
|
||||
SExprInfo* pex = taosArrayGetP(px->exprList, i);
|
||||
int32_t colId = pex->base.colInfo.colId;
|
||||
for(int32_t j = 0; j < pSourceOperator->numOfOutput; ++j) {
|
||||
if (colId == schema[j].colId) {
|
||||
pex->base.colInfo.colIndex = j;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
px->pQInfo = createQInfoFromQueryNode(px, &tableGroupInfo, pSourceOperator, NULL, NULL, MASTER_SCAN);
|
||||
tfree(pColumnInfo);
|
||||
tfree(schema);
|
||||
tfree(exprInfo);
|
||||
|
||||
// set the pRuntimeEnv for pSourceOperator
|
||||
pSourceOperator->pRuntimeEnv = &px->pQInfo->runtimeEnv;
|
||||
}
|
||||
|
||||
uint64_t qId = 0;
|
||||
|
@ -1236,31 +1285,34 @@ void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeMeta) {
|
|||
pCmd->active = NULL;
|
||||
}
|
||||
|
||||
void destroyTableNameList(SSqlCmd* pCmd) {
|
||||
if (pCmd->insertParam.numOfTables == 0) {
|
||||
assert(pCmd->insertParam.pTableNameList == NULL);
|
||||
void destroyTableNameList(SInsertStatementParam* pInsertParam) {
|
||||
if (pInsertParam->numOfTables == 0) {
|
||||
assert(pInsertParam->pTableNameList == NULL);
|
||||
return;
|
||||
}
|
||||
|
||||
for(int32_t i = 0; i < pCmd->insertParam.numOfTables; ++i) {
|
||||
tfree(pCmd->insertParam.pTableNameList[i]);
|
||||
for(int32_t i = 0; i < pInsertParam->numOfTables; ++i) {
|
||||
tfree(pInsertParam->pTableNameList[i]);
|
||||
}
|
||||
|
||||
pCmd->insertParam.numOfTables = 0;
|
||||
tfree(pCmd->insertParam.pTableNameList);
|
||||
pInsertParam->numOfTables = 0;
|
||||
tfree(pInsertParam->pTableNameList);
|
||||
}
|
||||
|
||||
void tscResetSqlCmd(SSqlCmd* pCmd, bool clearCachedMeta) {
|
||||
pCmd->command = 0;
|
||||
pCmd->numOfCols = 0;
|
||||
pCmd->count = 0;
|
||||
pCmd->curSql = NULL;
|
||||
pCmd->msgType = 0;
|
||||
|
||||
destroyTableNameList(pCmd);
|
||||
pCmd->insertParam.sql = NULL;
|
||||
destroyTableNameList(&pCmd->insertParam);
|
||||
|
||||
pCmd->insertParam.pTableBlockHashList = tscDestroyBlockHashTable(pCmd->insertParam.pTableBlockHashList, clearCachedMeta);
|
||||
pCmd->insertParam.pDataBlocks = tscDestroyBlockArrayList(pCmd->insertParam.pDataBlocks);
|
||||
tfree(pCmd->insertParam.tagData.data);
|
||||
pCmd->insertParam.tagData.dataLen = 0;
|
||||
|
||||
tscFreeQueryInfo(pCmd, clearCachedMeta);
|
||||
|
||||
if (pCmd->pTableMetaMap != NULL) {
|
||||
|
@ -1279,8 +1331,8 @@ void tscResetSqlCmd(SSqlCmd* pCmd, bool clearCachedMeta) {
|
|||
void tscFreeSqlResult(SSqlObj* pSql) {
|
||||
SSqlRes* pRes = &pSql->res;
|
||||
|
||||
tscDestroyLocalMerger(pRes->pLocalMerger);
|
||||
pRes->pLocalMerger = NULL;
|
||||
tscDestroyGlobalMerger(pRes->pMerger);
|
||||
pRes->pMerger = NULL;
|
||||
|
||||
tscDestroyResPointerInfo(pRes);
|
||||
memset(&pSql->res, 0, sizeof(SSqlRes));
|
||||
|
@ -1373,9 +1425,6 @@ void tscFreeSqlObj(SSqlObj* pSql) {
|
|||
tscFreeSqlResult(pSql);
|
||||
tscResetSqlCmd(pCmd, false);
|
||||
|
||||
tfree(pCmd->tagData.data);
|
||||
pCmd->tagData.dataLen = 0;
|
||||
|
||||
memset(pCmd->payload, 0, (size_t)pCmd->allocSize);
|
||||
tfree(pCmd->payload);
|
||||
pCmd->allocSize = 0;
|
||||
|
@ -1717,37 +1766,36 @@ static int32_t getRowExpandSize(STableMeta* pTableMeta) {
|
|||
return result;
|
||||
}
|
||||
|
||||
static void extractTableNameList(SSqlCmd* pCmd, bool freeBlockMap) {
|
||||
pCmd->insertParam.numOfTables = (int32_t) taosHashGetSize(pCmd->insertParam.pTableBlockHashList);
|
||||
if (pCmd->insertParam.pTableNameList == NULL) {
|
||||
pCmd->insertParam.pTableNameList = calloc(pCmd->insertParam.numOfTables, POINTER_BYTES);
|
||||
static void extractTableNameList(SInsertStatementParam *pInsertParam, bool freeBlockMap) {
|
||||
pInsertParam->numOfTables = (int32_t) taosHashGetSize(pInsertParam->pTableBlockHashList);
|
||||
if (pInsertParam->pTableNameList == NULL) {
|
||||
pInsertParam->pTableNameList = calloc(pInsertParam->numOfTables, POINTER_BYTES);
|
||||
} else {
|
||||
memset(pCmd->insertParam.pTableNameList, 0, pCmd->insertParam.numOfTables * POINTER_BYTES);
|
||||
memset(pInsertParam->pTableNameList, 0, pInsertParam->numOfTables * POINTER_BYTES);
|
||||
}
|
||||
|
||||
STableDataBlocks **p1 = taosHashIterate(pCmd->insertParam.pTableBlockHashList, NULL);
|
||||
STableDataBlocks **p1 = taosHashIterate(pInsertParam->pTableBlockHashList, NULL);
|
||||
int32_t i = 0;
|
||||
while(p1) {
|
||||
STableDataBlocks* pBlocks = *p1;
|
||||
tfree(pCmd->insertParam.pTableNameList[i]);
|
||||
tfree(pInsertParam->pTableNameList[i]);
|
||||
|
||||
pCmd->insertParam.pTableNameList[i++] = tNameDup(&pBlocks->tableName);
|
||||
p1 = taosHashIterate(pCmd->insertParam.pTableBlockHashList, p1);
|
||||
pInsertParam->pTableNameList[i++] = tNameDup(&pBlocks->tableName);
|
||||
p1 = taosHashIterate(pInsertParam->pTableBlockHashList, p1);
|
||||
}
|
||||
|
||||
if (freeBlockMap) {
|
||||
pCmd->insertParam.pTableBlockHashList = tscDestroyBlockHashTable(pCmd->insertParam.pTableBlockHashList, false);
|
||||
pInsertParam->pTableBlockHashList = tscDestroyBlockHashTable(pInsertParam->pTableBlockHashList, false);
|
||||
}
|
||||
}
|
||||
|
||||
int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) {
|
||||
int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBlockMap) {
|
||||
const int INSERT_HEAD_SIZE = sizeof(SMsgDesc) + sizeof(SSubmitMsg);
|
||||
SSqlCmd* pCmd = &pSql->cmd;
|
||||
|
||||
void* pVnodeDataBlockHashList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
|
||||
SArray* pVnodeDataBlockList = taosArrayInit(8, POINTER_BYTES);
|
||||
|
||||
STableDataBlocks** p = taosHashIterate(pCmd->insertParam.pTableBlockHashList, NULL);
|
||||
STableDataBlocks** p = taosHashIterate(pInsertParam->pTableBlockHashList, NULL);
|
||||
|
||||
STableDataBlocks* pOneTableBlock = *p;
|
||||
while(pOneTableBlock) {
|
||||
|
@ -1760,7 +1808,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) {
|
|||
int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pOneTableBlock->vgId, TSDB_PAYLOAD_SIZE,
|
||||
INSERT_HEAD_SIZE, 0, &pOneTableBlock->tableName, pOneTableBlock->pTableMeta, &dataBuf, pVnodeDataBlockList);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
tscError("0x%"PRIx64" failed to prepare the data block buffer for merging table data, code:%d", pSql->self, ret);
|
||||
tscError("0x%"PRIx64" failed to prepare the data block buffer for merging table data, code:%d", pInsertParam->objectId, ret);
|
||||
taosHashCleanup(pVnodeDataBlockHashList);
|
||||
tscDestroyBlockArrayList(pVnodeDataBlockList);
|
||||
return ret;
|
||||
|
@ -1778,7 +1826,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) {
|
|||
dataBuf->pData = tmp;
|
||||
memset(dataBuf->pData + dataBuf->size, 0, dataBuf->nAllocSize - dataBuf->size);
|
||||
} else { // failed to allocate memory, free already allocated memory and return error code
|
||||
tscError("0x%"PRIx64" failed to allocate memory for merging submit block, size:%d", pSql->self, dataBuf->nAllocSize);
|
||||
tscError("0x%"PRIx64" failed to allocate memory for merging submit block, size:%d", pInsertParam->objectId, dataBuf->nAllocSize);
|
||||
|
||||
taosHashCleanup(pVnodeDataBlockHashList);
|
||||
tscDestroyBlockArrayList(pVnodeDataBlockList);
|
||||
|
@ -1791,7 +1839,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) {
|
|||
tscSortRemoveDataBlockDupRows(pOneTableBlock);
|
||||
char* ekey = (char*)pBlocks->data + pOneTableBlock->rowSize*(pBlocks->numOfRows-1);
|
||||
|
||||
tscDebug("0x%"PRIx64" name:%s, name:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pSql->self, tNameGetTableName(&pOneTableBlock->tableName),
|
||||
tscDebug("0x%"PRIx64" name:%s, name:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pInsertParam->objectId, tNameGetTableName(&pOneTableBlock->tableName),
|
||||
pBlocks->tid, pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->data), GET_INT64_VAL(ekey));
|
||||
|
||||
int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
|
||||
|
@ -1803,7 +1851,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) {
|
|||
pBlocks->schemaLen = 0;
|
||||
|
||||
// erase the empty space reserved for binary data
|
||||
int32_t finalLen = trimDataBlock(dataBuf->pData + dataBuf->size, pOneTableBlock, pCmd->insertParam.schemaAttached);
|
||||
int32_t finalLen = trimDataBlock(dataBuf->pData + dataBuf->size, pOneTableBlock, pInsertParam->schemaAttached);
|
||||
assert(finalLen <= len);
|
||||
|
||||
dataBuf->size += (finalLen + sizeof(SSubmitBlk));
|
||||
|
@ -1815,10 +1863,10 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) {
|
|||
|
||||
pBlocks->numOfRows = 0;
|
||||
}else {
|
||||
tscDebug("0x%"PRIx64" table %s data block is empty", pSql->self, pOneTableBlock->tableName.tname);
|
||||
tscDebug("0x%"PRIx64" table %s data block is empty", pInsertParam->objectId, pOneTableBlock->tableName.tname);
|
||||
}
|
||||
|
||||
p = taosHashIterate(pCmd->insertParam.pTableBlockHashList, p);
|
||||
p = taosHashIterate(pInsertParam->pTableBlockHashList, p);
|
||||
if (p == NULL) {
|
||||
break;
|
||||
}
|
||||
|
@ -1826,10 +1874,10 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) {
|
|||
pOneTableBlock = *p;
|
||||
}
|
||||
|
||||
extractTableNameList(pCmd, freeBlockMap);
|
||||
extractTableNameList(pInsertParam, freeBlockMap);
|
||||
|
||||
// free the table data blocks;
|
||||
pCmd->insertParam.pDataBlocks = pVnodeDataBlockList;
|
||||
pInsertParam->pDataBlocks = pVnodeDataBlockList;
|
||||
taosHashCleanup(pVnodeDataBlockHashList);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -3158,6 +3206,15 @@ void tscResetForNextRetrieve(SSqlRes* pRes) {
|
|||
pRes->numOfRows = 0;
|
||||
}
|
||||
|
||||
void tscInitResForMerge(SSqlRes* pRes) {
|
||||
pRes->qId = 1; // hack to pass the safety check in fetch_row function
|
||||
pRes->rspType = 0; // used as a flag to denote if taos_retrieved() has been called yet
|
||||
tscResetForNextRetrieve(pRes);
|
||||
|
||||
assert(pRes->pMerger != NULL);
|
||||
pRes->data = pRes->pMerger->buf;
|
||||
}
|
||||
|
||||
void registerSqlObj(SSqlObj* pSql) {
|
||||
taosAcquireRef(tscRefId, pSql->pTscObj->rid);
|
||||
pSql->self = taosAddRef(tscObjRef, pSql);
|
||||
|
@ -3179,14 +3236,6 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, __async_cb_func_t fp, void* param, in
|
|||
|
||||
SSqlCmd* pCmd = &pNew->cmd;
|
||||
pCmd->command = cmd;
|
||||
|
||||
int32_t code = copyTagData(&pNew->cmd.tagData, &pSql->cmd.tagData);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tscError("0x%"PRIx64" new subquery failed, unable to malloc tag data, tableIndex:%d", pSql->self, 0);
|
||||
free(pNew);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (tscAddQueryInfo(pCmd) != TSDB_CODE_SUCCESS) {
|
||||
#ifdef __APPLE__
|
||||
// to satisfy later tsem_destroy in taos_free_result
|
||||
|
@ -3284,8 +3333,6 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
|
|||
pnCmd->insertParam.numOfTables = 0;
|
||||
pnCmd->insertParam.pTableNameList = NULL;
|
||||
pnCmd->insertParam.pTableBlockHashList = NULL;
|
||||
pnCmd->tagData.data = NULL;
|
||||
pnCmd->tagData.dataLen = 0;
|
||||
|
||||
if (tscAddQueryInfo(pnCmd) != TSDB_CODE_SUCCESS) {
|
||||
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
|
@ -3456,7 +3503,11 @@ void doExecuteQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) {
|
|||
tscHandleMasterSTableQuery(pSql);
|
||||
tscUnlockByThread(&pSql->squeryLock);
|
||||
} else if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT)) {
|
||||
tscHandleMultivnodeInsert(pSql);
|
||||
if (TSDB_QUERY_HAS_TYPE(pSql->cmd.insertParam.insertType, TSDB_QUERY_TYPE_FILE_INSERT)) {
|
||||
tscImportDataFromFile(pSql);
|
||||
} else {
|
||||
tscHandleMultivnodeInsert(pSql);
|
||||
}
|
||||
} else if (pSql->cmd.command > TSDB_SQL_LOCAL) {
|
||||
tscProcessLocalCmd(pSql);
|
||||
} else { // send request to server directly
|
||||
|
@ -3674,10 +3725,10 @@ int32_t tscSQLSyntaxErrMsg(char* msg, const char* additionalInfo, const char* s
|
|||
return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
|
||||
}
|
||||
|
||||
int32_t tscInvalidSQLErrMsg(char* msg, const char* additionalInfo, const char* sql) {
|
||||
const char* msgFormat1 = "invalid SQL: %s";
|
||||
const char* msgFormat2 = "invalid SQL: \'%s\' (%s)";
|
||||
const char* msgFormat3 = "invalid SQL: \'%s\'";
|
||||
int32_t tscInvalidOperationMsg(char* msg, const char* additionalInfo, const char* sql) {
|
||||
const char* msgFormat1 = "invalid operation: %s";
|
||||
const char* msgFormat2 = "invalid operation: \'%s\' (%s)";
|
||||
const char* msgFormat3 = "invalid operation: \'%s\'";
|
||||
|
||||
const int32_t BACKWARD_CHAR_STEP = 0;
|
||||
|
||||
|
@ -4281,11 +4332,13 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt
|
|||
pQueryAttr->simpleAgg = isSimpleAggregate(pQueryInfo);
|
||||
pQueryAttr->needReverseScan = tscNeedReverseScan(pQueryInfo);
|
||||
pQueryAttr->stableQuery = QUERY_IS_STABLE_QUERY(pQueryInfo->type);
|
||||
pQueryAttr->groupbyColumn = tscGroupbyColumn(pQueryInfo);
|
||||
pQueryAttr->groupbyColumn = (!pQueryInfo->stateWindow) && tscGroupbyColumn(pQueryInfo);
|
||||
pQueryAttr->queryBlockDist = isBlockDistQuery(pQueryInfo);
|
||||
pQueryAttr->pointInterpQuery = tscIsPointInterpQuery(pQueryInfo);
|
||||
pQueryAttr->timeWindowInterpo = timeWindowInterpoRequired(pQueryInfo);
|
||||
pQueryAttr->distinctTag = pQueryInfo->distinctTag;
|
||||
pQueryAttr->sw = pQueryInfo->sessionWindow;
|
||||
pQueryAttr->stateWindow = pQueryInfo->stateWindow;
|
||||
|
||||
pQueryAttr->numOfCols = numOfCols;
|
||||
pQueryAttr->numOfOutput = numOfOutput;
|
||||
|
@ -4293,7 +4346,6 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt
|
|||
pQueryAttr->slimit = pQueryInfo->slimit;
|
||||
pQueryAttr->order = pQueryInfo->order;
|
||||
pQueryAttr->fillType = pQueryInfo->fillType;
|
||||
pQueryAttr->groupbyColumn = tscGroupbyColumn(pQueryInfo);
|
||||
pQueryAttr->havingNum = pQueryInfo->havingFieldNum;
|
||||
pQueryAttr->pUdfInfo = pQueryInfo->pUdfInfo;
|
||||
|
||||
|
@ -4484,3 +4536,38 @@ int tscTransferTableNameList(SSqlObj *pSql, const char *pNameList, int32_t lengt
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
bool vgroupInfoIdentical(SNewVgroupInfo *pExisted, SVgroupMsg* src) {
|
||||
assert(pExisted != NULL && src != NULL);
|
||||
if (pExisted->numOfEps != src->numOfEps) {
|
||||
return false;
|
||||
}
|
||||
|
||||
for(int32_t i = 0; i < pExisted->numOfEps; ++i) {
|
||||
if (pExisted->ep[i].port != src->epAddr[i].port) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (strncmp(pExisted->ep[i].fqdn, src->epAddr[i].fqdn, tListLen(pExisted->ep[i].fqdn)) != 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
SNewVgroupInfo createNewVgroupInfo(SVgroupMsg *pVgroupMsg) {
|
||||
assert(pVgroupMsg != NULL);
|
||||
|
||||
SNewVgroupInfo info = {0};
|
||||
info.numOfEps = pVgroupMsg->numOfEps;
|
||||
info.vgId = pVgroupMsg->vgId;
|
||||
info.inUse = 0; // 0 is the default value of inUse in case of multiple replica
|
||||
|
||||
assert(info.numOfEps >= 1 && info.vgId >= 1);
|
||||
for(int32_t i = 0; i < pVgroupMsg->numOfEps; ++i) {
|
||||
tstrncpy(info.ep[i].fqdn, pVgroupMsg->epAddr[i].fqdn, TSDB_FQDN_LEN);
|
||||
info.ep[i].port = pVgroupMsg->epAddr[i].port;
|
||||
}
|
||||
|
||||
return info;
|
||||
}
|
|
@ -16,13 +16,13 @@
|
|||
*/
|
||||
package com.taosdata.jdbc;
|
||||
|
||||
import com.taosdata.jdbc.utils.TaosInfo;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.sql.SQLException;
|
||||
import java.sql.SQLWarning;
|
||||
import java.util.List;
|
||||
|
||||
import com.taosdata.jdbc.utils.TaosInfo;
|
||||
|
||||
/**
|
||||
* JNI connector
|
||||
*/
|
||||
|
@ -276,23 +276,14 @@ public class TSDBJNIConnector {
|
|||
private native int validateCreateTableSqlImp(long connection, byte[] sqlBytes);
|
||||
|
||||
public long prepareStmt(String sql) throws SQLException {
|
||||
Long stmt = 0L;
|
||||
try {
|
||||
stmt = prepareStmtImp(sql.getBytes(), this.taos);
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_ENCODING);
|
||||
}
|
||||
|
||||
if (stmt == TSDBConstants.JNI_CONNECTION_NULL) {
|
||||
Long stmt = prepareStmtImp(sql.getBytes(), this.taos);
|
||||
if (stmt == TSDBConstants.JNI_TDENGINE_ERROR) {
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_SQL);
|
||||
} else if (stmt == TSDBConstants.JNI_CONNECTION_NULL) {
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL);
|
||||
}
|
||||
|
||||
if (stmt == TSDBConstants.JNI_SQL_NULL) {
|
||||
} else if (stmt == TSDBConstants.JNI_SQL_NULL) {
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_SQL_NULL);
|
||||
}
|
||||
|
||||
if (stmt == TSDBConstants.JNI_OUT_OF_MEMORY) {
|
||||
} else if (stmt == TSDBConstants.JNI_OUT_OF_MEMORY) {
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
|
|
|
@ -10,8 +10,15 @@ INCLUDE_DIRECTORIES(${TD_ENTERPRISE_DIR}/src/inc)
|
|||
INCLUDE_DIRECTORIES(inc)
|
||||
AUX_SOURCE_DIRECTORY(src SRC)
|
||||
|
||||
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
|
||||
ADD_DEFINITIONS(-DTD_JEMALLOC_ENABLED -I${CMAKE_BINARY_DIR}/build/include -L${CMAKE_BINARY_DIR}/build/lib -Wl,-rpath,${CMAKE_BINARY_DIR}/build/lib -ljemalloc)
|
||||
SET(LINK_JEMALLOC "-L${CMAKE_BINARY_DIR}/build/lib -ljemalloc")
|
||||
ELSE ()
|
||||
SET(LINK_JEMALLOC "")
|
||||
ENDIF ()
|
||||
|
||||
ADD_EXECUTABLE(taosd ${SRC})
|
||||
TARGET_LINK_LIBRARIES(taosd mnode monitor http tsdb twal vnode cJson lua lz4 balance sync)
|
||||
TARGET_LINK_LIBRARIES(taosd mnode monitor http tsdb twal vnode cJson lua lz4 balance sync ${LINK_JEMALLOC})
|
||||
|
||||
IF (TD_SOMODE_STATIC)
|
||||
TARGET_LINK_LIBRARIES(taosd taos_static)
|
||||
|
|
|
@ -476,6 +476,7 @@ typedef struct {
|
|||
bool simpleAgg;
|
||||
bool pointInterpQuery; // point interpolation query
|
||||
bool needReverseScan; // need reverse scan
|
||||
bool stateWindow; // state window flag
|
||||
|
||||
STimeWindow window;
|
||||
int32_t numOfTables;
|
||||
|
|
|
@ -141,73 +141,74 @@
|
|||
#define TK_VARIABLE 122
|
||||
#define TK_INTERVAL 123
|
||||
#define TK_SESSION 124
|
||||
#define TK_FILL 125
|
||||
#define TK_SLIDING 126
|
||||
#define TK_ORDER 127
|
||||
#define TK_BY 128
|
||||
#define TK_ASC 129
|
||||
#define TK_DESC 130
|
||||
#define TK_GROUP 131
|
||||
#define TK_HAVING 132
|
||||
#define TK_LIMIT 133
|
||||
#define TK_OFFSET 134
|
||||
#define TK_SLIMIT 135
|
||||
#define TK_SOFFSET 136
|
||||
#define TK_WHERE 137
|
||||
#define TK_NOW 138
|
||||
#define TK_RESET 139
|
||||
#define TK_QUERY 140
|
||||
#define TK_SYNCDB 141
|
||||
#define TK_ADD 142
|
||||
#define TK_COLUMN 143
|
||||
#define TK_TAG 144
|
||||
#define TK_CHANGE 145
|
||||
#define TK_SET 146
|
||||
#define TK_KILL 147
|
||||
#define TK_CONNECTION 148
|
||||
#define TK_STREAM 149
|
||||
#define TK_COLON 150
|
||||
#define TK_ABORT 151
|
||||
#define TK_AFTER 152
|
||||
#define TK_ATTACH 153
|
||||
#define TK_BEFORE 154
|
||||
#define TK_BEGIN 155
|
||||
#define TK_CASCADE 156
|
||||
#define TK_CLUSTER 157
|
||||
#define TK_CONFLICT 158
|
||||
#define TK_COPY 159
|
||||
#define TK_DEFERRED 160
|
||||
#define TK_DELIMITERS 161
|
||||
#define TK_DETACH 162
|
||||
#define TK_EACH 163
|
||||
#define TK_END 164
|
||||
#define TK_EXPLAIN 165
|
||||
#define TK_FAIL 166
|
||||
#define TK_FOR 167
|
||||
#define TK_IGNORE 168
|
||||
#define TK_IMMEDIATE 169
|
||||
#define TK_INITIALLY 170
|
||||
#define TK_INSTEAD 171
|
||||
#define TK_MATCH 172
|
||||
#define TK_KEY 173
|
||||
#define TK_OF 174
|
||||
#define TK_RAISE 175
|
||||
#define TK_REPLACE 176
|
||||
#define TK_RESTRICT 177
|
||||
#define TK_ROW 178
|
||||
#define TK_STATEMENT 179
|
||||
#define TK_TRIGGER 180
|
||||
#define TK_VIEW 181
|
||||
#define TK_SEMI 182
|
||||
#define TK_NONE 183
|
||||
#define TK_PREV 184
|
||||
#define TK_LINEAR 185
|
||||
#define TK_IMPORT 186
|
||||
#define TK_TBNAME 187
|
||||
#define TK_JOIN 188
|
||||
#define TK_INSERT 189
|
||||
#define TK_INTO 190
|
||||
#define TK_VALUES 191
|
||||
#define TK_STATE_WINDOW 125
|
||||
#define TK_FILL 126
|
||||
#define TK_SLIDING 127
|
||||
#define TK_ORDER 128
|
||||
#define TK_BY 129
|
||||
#define TK_ASC 130
|
||||
#define TK_DESC 131
|
||||
#define TK_GROUP 132
|
||||
#define TK_HAVING 133
|
||||
#define TK_LIMIT 134
|
||||
#define TK_OFFSET 135
|
||||
#define TK_SLIMIT 136
|
||||
#define TK_SOFFSET 137
|
||||
#define TK_WHERE 138
|
||||
#define TK_NOW 139
|
||||
#define TK_RESET 140
|
||||
#define TK_QUERY 141
|
||||
#define TK_SYNCDB 142
|
||||
#define TK_ADD 143
|
||||
#define TK_COLUMN 144
|
||||
#define TK_TAG 145
|
||||
#define TK_CHANGE 146
|
||||
#define TK_SET 147
|
||||
#define TK_KILL 148
|
||||
#define TK_CONNECTION 149
|
||||
#define TK_STREAM 150
|
||||
#define TK_COLON 151
|
||||
#define TK_ABORT 152
|
||||
#define TK_AFTER 153
|
||||
#define TK_ATTACH 154
|
||||
#define TK_BEFORE 155
|
||||
#define TK_BEGIN 156
|
||||
#define TK_CASCADE 157
|
||||
#define TK_CLUSTER 158
|
||||
#define TK_CONFLICT 159
|
||||
#define TK_COPY 160
|
||||
#define TK_DEFERRED 161
|
||||
#define TK_DELIMITERS 162
|
||||
#define TK_DETACH 163
|
||||
#define TK_EACH 164
|
||||
#define TK_END 165
|
||||
#define TK_EXPLAIN 166
|
||||
#define TK_FAIL 167
|
||||
#define TK_FOR 168
|
||||
#define TK_IGNORE 169
|
||||
#define TK_IMMEDIATE 170
|
||||
#define TK_INITIALLY 171
|
||||
#define TK_INSTEAD 172
|
||||
#define TK_MATCH 173
|
||||
#define TK_KEY 174
|
||||
#define TK_OF 175
|
||||
#define TK_RAISE 176
|
||||
#define TK_REPLACE 177
|
||||
#define TK_RESTRICT 178
|
||||
#define TK_ROW 179
|
||||
#define TK_STATEMENT 180
|
||||
#define TK_TRIGGER 181
|
||||
#define TK_VIEW 182
|
||||
#define TK_SEMI 183
|
||||
#define TK_NONE 184
|
||||
#define TK_PREV 185
|
||||
#define TK_LINEAR 186
|
||||
#define TK_IMPORT 187
|
||||
#define TK_TBNAME 188
|
||||
#define TK_JOIN 189
|
||||
#define TK_INSERT 190
|
||||
#define TK_INTO 191
|
||||
#define TK_VALUES 192
|
||||
|
||||
#define TK_SPACE 300
|
||||
#define TK_COMMENT 301
|
||||
|
|
|
@ -11,10 +11,17 @@ IF (TD_LINUX)
|
|||
LIST(REMOVE_ITEM SRC ./src/shellDarwin.c)
|
||||
ADD_EXECUTABLE(shell ${SRC})
|
||||
|
||||
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
|
||||
ADD_DEFINITIONS(-DTD_JEMALLOC_ENABLED -I${CMAKE_BINARY_DIR}/build/include -L${CMAKE_BINARY_DIR}/build/lib -Wl,-rpath,${CMAKE_BINARY_DIR}/build/lib -ljemalloc)
|
||||
SET(LINK_JEMALLOC "-L${CMAKE_BINARY_DIR}/build/lib -ljemalloc")
|
||||
ELSE ()
|
||||
SET(LINK_JEMALLOC "")
|
||||
ENDIF ()
|
||||
|
||||
IF (TD_SOMODE_STATIC)
|
||||
TARGET_LINK_LIBRARIES(shell taos_static lua)
|
||||
TARGET_LINK_LIBRARIES(shell taos_static lua ${LINK_JEMALLOC})
|
||||
ELSE ()
|
||||
TARGET_LINK_LIBRARIES(shell taos lua)
|
||||
TARGET_LINK_LIBRARIES(shell taos lua ${LINK_JEMALLOC})
|
||||
ENDIF ()
|
||||
|
||||
SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME taos)
|
||||
|
|
|
@ -55,14 +55,21 @@ ENDIF ()
|
|||
MESSAGE("TD_VERSION_NUMBER is:" ${TD_VERSION_NUMBER})
|
||||
ADD_DEFINITIONS(-DTD_VERNUMBER="${TD_VERSION_NUMBER}")
|
||||
|
||||
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
|
||||
ADD_DEFINITIONS(-DTD_JEMALLOC_ENABLED -I${CMAKE_BINARY_DIR}/build/include -L${CMAKE_BINARY_DIR}/build/lib -Wl,-rpath,${CMAKE_BINARY_DIR}/build/lib -ljemalloc)
|
||||
SET(LINK_JEMALLOC "-L${CMAKE_BINARY_DIR}/build/lib -ljemalloc")
|
||||
ELSE ()
|
||||
SET(LINK_JEMALLOC "")
|
||||
ENDIF ()
|
||||
|
||||
IF (TD_LINUX)
|
||||
AUX_SOURCE_DIRECTORY(. SRC)
|
||||
ADD_EXECUTABLE(taosdemo ${SRC})
|
||||
|
||||
IF (TD_SOMODE_STATIC)
|
||||
TARGET_LINK_LIBRARIES(taosdemo taos_static cJson lua)
|
||||
TARGET_LINK_LIBRARIES(taosdemo taos_static cJson lua ${LINK_JEMALLOC})
|
||||
ELSE ()
|
||||
TARGET_LINK_LIBRARIES(taosdemo taos cJson)
|
||||
TARGET_LINK_LIBRARIES(taosdemo taos cJson ${LINK_JEMALLOC})
|
||||
ENDIF ()
|
||||
ELSEIF (TD_WINDOWS)
|
||||
AUX_SOURCE_DIRECTORY(. SRC)
|
||||
|
@ -71,7 +78,7 @@ ELSEIF (TD_WINDOWS)
|
|||
IF (TD_SOMODE_STATIC)
|
||||
TARGET_LINK_LIBRARIES(taosdemo taos_static cJson lua)
|
||||
ELSE ()
|
||||
TARGET_LINK_LIBRARIES(taosdemo taos cJson lua})
|
||||
TARGET_LINK_LIBRARIES(taosdemo taos cJson lua)
|
||||
ENDIF ()
|
||||
ELSEIF (TD_DARWIN)
|
||||
# missing a few dependencies, such as <argp.h>
|
||||
|
|
|
@ -1063,6 +1063,13 @@ static SDbCfg mnodeGetAlterDbOption(SDbObj *pDb, SAlterDbMsg *pAlter) {
|
|||
newCfg.partitions = partitions;
|
||||
}
|
||||
|
||||
// community version can only change daysToKeep
|
||||
// but enterprise version can change all daysToKeep options
|
||||
#ifndef _STORAGE
|
||||
newCfg.daysToKeep1 = newCfg.daysToKeep;
|
||||
newCfg.daysToKeep2 = newCfg.daysToKeep;
|
||||
#endif
|
||||
|
||||
return newCfg;
|
||||
}
|
||||
|
||||
|
|
|
@ -22,6 +22,10 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifdef TD_JEMALLOC_ENABLED
|
||||
#include <jemalloc/jemalloc.h>
|
||||
#endif
|
||||
|
||||
typedef enum {
|
||||
TAOS_ALLOC_MODE_DEFAULT = 0,
|
||||
TAOS_ALLOC_MODE_RANDOM_FAIL = 1,
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include "qFill.h"
|
||||
#include "qResultbuf.h"
|
||||
#include "qSqlparser.h"
|
||||
#include "qTableMeta.h"
|
||||
#include "qTsbuf.h"
|
||||
#include "query.h"
|
||||
#include "taosdef.h"
|
||||
|
@ -71,14 +72,6 @@ typedef struct SResultRowPool {
|
|||
SArray* pData; // SArray<void*>
|
||||
} SResultRowPool;
|
||||
|
||||
typedef struct SGroupbyExpr {
|
||||
int16_t tableIndex;
|
||||
SArray* columnInfo; // SArray<SColIndex>, group by columns information
|
||||
int16_t numOfGroupCols; // todo remove it
|
||||
int16_t orderIndex; // order by column index
|
||||
int16_t orderType; // order by type: asc/desc
|
||||
} SGroupbyExpr;
|
||||
|
||||
typedef struct SResultRow {
|
||||
int32_t pageId; // pageId & rowId is the position of current result in disk-based output buffer
|
||||
int32_t offset:29; // row index in buffer page
|
||||
|
@ -197,6 +190,7 @@ typedef struct SQueryAttr {
|
|||
bool pointInterpQuery; // point interpolation query
|
||||
bool needReverseScan; // need reverse scan
|
||||
bool distinctTag; // distinct tag query
|
||||
bool stateWindow; // window State on sub/normal table
|
||||
int32_t interBufSize; // intermediate buffer sizse
|
||||
|
||||
int32_t havingNum; // having expr number
|
||||
|
@ -217,7 +211,7 @@ typedef struct SQueryAttr {
|
|||
int32_t intermediateResultRowSize; // intermediate result row size, in case of top-k query.
|
||||
int32_t maxTableColumnWidth;
|
||||
int32_t tagLen; // tag value length of current query
|
||||
SGroupbyExpr* pGroupbyExpr;
|
||||
SGroupbyExpr *pGroupbyExpr;
|
||||
|
||||
SExprInfo* pExpr1;
|
||||
SExprInfo* pExpr2;
|
||||
|
@ -306,6 +300,7 @@ enum OPERATOR_TYPE_E {
|
|||
OP_Filter = 19,
|
||||
OP_Distinct = 20,
|
||||
OP_Join = 21,
|
||||
OP_StateWindow = 22,
|
||||
};
|
||||
|
||||
typedef struct SOperatorInfo {
|
||||
|
@ -471,6 +466,16 @@ typedef struct SSWindowOperatorInfo {
|
|||
int32_t start; // start row index
|
||||
} SSWindowOperatorInfo;
|
||||
|
||||
typedef struct SStateWindowOperatorInfo {
|
||||
SOptrBasicInfo binfo;
|
||||
STimeWindow curWindow; // current time window
|
||||
int32_t numOfRows; // number of rows
|
||||
int32_t colIndex; // start row index
|
||||
int32_t start;
|
||||
char* prevData; // previous data
|
||||
|
||||
} SStateWindowOperatorInfo ;
|
||||
|
||||
typedef struct SDistinctOperatorInfo {
|
||||
SHashObj *pSet;
|
||||
SSDataBlock *pRes;
|
||||
|
@ -479,10 +484,10 @@ typedef struct SDistinctOperatorInfo {
|
|||
int64_t outputCapacity;
|
||||
} SDistinctOperatorInfo;
|
||||
|
||||
struct SLocalMerger;
|
||||
struct SGlobalMerger;
|
||||
|
||||
typedef struct SMultiwayMergeInfo {
|
||||
struct SLocalMerger *pMerge;
|
||||
struct SGlobalMerger *pMerge;
|
||||
SOptrBasicInfo binfo;
|
||||
int32_t bufCapacity;
|
||||
int64_t seed;
|
||||
|
@ -522,6 +527,7 @@ SOperatorInfo* createTableBlockInfoScanOperator(void* pTsdbQueryHandle, SQueryRu
|
|||
SOperatorInfo* createMultiwaySortOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SExprInfo* pExpr, int32_t numOfOutput,
|
||||
int32_t numOfRows, void* merger, bool groupMix);
|
||||
SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, void* param, SArray* pUdfInfo);
|
||||
SOperatorInfo* createStatewindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
|
||||
SOperatorInfo* createSLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, void* merger);
|
||||
SOperatorInfo* createFilterOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr,
|
||||
int32_t numOfOutput, SColumnInfo* pCols, int32_t numOfFilter);
|
||||
|
@ -565,6 +571,8 @@ int32_t createFilterInfo(SQueryAttr* pQueryAttr, uint64_t qId);
|
|||
void freeColumnFilterInfo(SColumnFilterInfo* pFilter, int32_t numOfFilters);
|
||||
|
||||
STableQueryInfo *createTableQueryInfo(SQueryAttr* pQueryAttr, void* pTable, bool groupbyColumn, STimeWindow win, void* buf);
|
||||
STableQueryInfo* createTmpTableQueryInfo(STimeWindow win);
|
||||
|
||||
int32_t buildArithmeticExprFromMsg(SExprInfo *pArithExprInfo, void *pQueryMsg);
|
||||
|
||||
bool isQueryKilled(SQInfo *pQInfo);
|
||||
|
|
|
@ -16,6 +16,8 @@
|
|||
#ifndef TDENGINE_QPLAN_H
|
||||
#define TDENGINE_QPLAN_H
|
||||
|
||||
#include "qExecutor.h"
|
||||
|
||||
struct SQueryInfo;
|
||||
|
||||
typedef struct SQueryNodeBasicInfo {
|
||||
|
|
|
@ -89,6 +89,10 @@ typedef struct SSessionWindowVal {
|
|||
SStrToken gap;
|
||||
} SSessionWindowVal;
|
||||
|
||||
typedef struct SWindowStateVal {
|
||||
SStrToken col;
|
||||
} SWindowStateVal;
|
||||
|
||||
struct SRelationInfo;
|
||||
|
||||
typedef struct SSqlNode {
|
||||
|
@ -100,6 +104,7 @@ typedef struct SSqlNode {
|
|||
SArray *fillType; // fill type[optional], SArray<tVariantListItem>
|
||||
SIntervalVal interval; // (interval, interval_offset) [optional]
|
||||
SSessionWindowVal sessionVal; // session window [optional]
|
||||
SWindowStateVal windowstateVal; // window_state(col) [optional]
|
||||
SStrToken sliding; // sliding window [optional]
|
||||
SLimitVal limit; // limit offset [optional]
|
||||
SLimitVal slimit; // group limit offset [optional]
|
||||
|
@ -142,7 +147,7 @@ typedef struct SCreateTableSql {
|
|||
} colInfo;
|
||||
|
||||
SArray *childTableInfo; // SArray<SCreatedTableInfo>
|
||||
SSqlNode *pSelect;
|
||||
SSqlNode *pSelect;
|
||||
} SCreateTableSql;
|
||||
|
||||
typedef struct SAlterTableInfo {
|
||||
|
@ -268,7 +273,6 @@ SArray *tVariantListInsert(SArray *pList, tVariant *pVar, uint8_t sortOrder, int
|
|||
SArray *tVariantListAppendToken(SArray *pList, SStrToken *pAliasToken, uint8_t sortOrder);
|
||||
|
||||
SRelationInfo *setTableNameList(SRelationInfo* pFromInfo, SStrToken *pName, SStrToken* pAlias);
|
||||
//SRelationInfo *setSubquery(SRelationInfo* pFromInfo, SRelElementPair* p);
|
||||
void *destroyRelationInfo(SRelationInfo* pFromInfo);
|
||||
SRelationInfo *addSubqueryElem(SRelationInfo* pRelationInfo, SArray* pSub, SStrToken* pAlias);
|
||||
|
||||
|
@ -286,7 +290,7 @@ SArray *tSqlExprListAppend(SArray *pList, tSqlExpr *pNode, SStrToken *pDistinc
|
|||
void tSqlExprListDestroy(SArray *pList);
|
||||
|
||||
SSqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelNodeList, SRelationInfo *pFrom, tSqlExpr *pWhere,
|
||||
SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval, SSessionWindowVal *ps,
|
||||
SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval, SSessionWindowVal *ps, SWindowStateVal *pw,
|
||||
SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit, SLimitVal *pgLimit, tSqlExpr *pHaving);
|
||||
int32_t tSqlExprCompare(tSqlExpr *left, tSqlExpr *right);
|
||||
|
||||
|
|
|
@ -0,0 +1,206 @@
|
|||
#ifndef TDENGINE_QTABLEUTIL_H
|
||||
#define TDENGINE_QTABLEUTIL_H
|
||||
|
||||
#include "tsdb.h" //todo tsdb should not be here
|
||||
#include "qSqlparser.h"
|
||||
|
||||
typedef struct SFieldInfo {
|
||||
int16_t numOfOutput; // number of column in result
|
||||
TAOS_FIELD* final;
|
||||
SArray *internalField; // SArray<SInternalField>
|
||||
} SFieldInfo;
|
||||
|
||||
typedef struct SCond {
|
||||
uint64_t uid;
|
||||
int32_t len; // length of tag query condition data
|
||||
char * cond;
|
||||
} SCond;
|
||||
|
||||
typedef struct SJoinNode {
|
||||
uint64_t uid;
|
||||
int16_t tagColId;
|
||||
SArray* tsJoin;
|
||||
SArray* tagJoin;
|
||||
} SJoinNode;
|
||||
|
||||
typedef struct SJoinInfo {
|
||||
bool hasJoin;
|
||||
SJoinNode *joinTables[TSDB_MAX_JOIN_TABLE_NUM];
|
||||
} SJoinInfo;
|
||||
|
||||
typedef struct STagCond {
|
||||
// relation between tbname list and query condition, including : TK_AND or TK_OR
|
||||
int16_t relType;
|
||||
|
||||
// tbname query condition, only support tbname query condition on one table
|
||||
SCond tbnameCond;
|
||||
|
||||
// join condition, only support two tables join currently
|
||||
SJoinInfo joinInfo;
|
||||
|
||||
// for different table, the query condition must be seperated
|
||||
SArray *pCond;
|
||||
} STagCond;
|
||||
|
||||
typedef struct SGroupbyExpr {
|
||||
int16_t tableIndex;
|
||||
SArray* columnInfo; // SArray<SColIndex>, group by columns information
|
||||
int16_t numOfGroupCols; // todo remove it
|
||||
int16_t orderIndex; // order by column index
|
||||
int16_t orderType; // order by type: asc/desc
|
||||
} SGroupbyExpr;
|
||||
|
||||
typedef struct STableComInfo {
|
||||
uint8_t numOfTags;
|
||||
uint8_t precision;
|
||||
int16_t numOfColumns;
|
||||
int32_t rowSize;
|
||||
} STableComInfo;
|
||||
|
||||
typedef struct STableMeta {
|
||||
int32_t vgId;
|
||||
STableId id;
|
||||
uint8_t tableType;
|
||||
char sTableName[TSDB_TABLE_FNAME_LEN]; // super table name
|
||||
uint64_t suid; // super table id
|
||||
int16_t sversion;
|
||||
int16_t tversion;
|
||||
STableComInfo tableInfo;
|
||||
SSchema schema[]; // if the table is TSDB_CHILD_TABLE, schema is acquired by super table meta info
|
||||
} STableMeta;
|
||||
|
||||
typedef struct STableMetaInfo {
|
||||
STableMeta *pTableMeta; // table meta, cached in client side and acquired by name
|
||||
uint32_t tableMetaSize;
|
||||
SVgroupsInfo *vgroupList;
|
||||
SArray *pVgroupTables; // SArray<SVgroupTableInfo>
|
||||
|
||||
/*
|
||||
* 1. keep the vgroup index during the multi-vnode super table projection query
|
||||
* 2. keep the vgroup index for multi-vnode insertion
|
||||
*/
|
||||
int32_t vgroupIndex;
|
||||
SName name;
|
||||
char aliasName[TSDB_TABLE_NAME_LEN]; // alias name of table specified in query sql
|
||||
SArray *tagColList; // SArray<SColumn*>, involved tag columns
|
||||
} STableMetaInfo;
|
||||
|
||||
struct SQInfo; // global merge operator
|
||||
struct SQueryAttr; // query object
|
||||
|
||||
typedef struct SQueryInfo {
|
||||
int16_t command; // the command may be different for each subclause, so keep it seperately.
|
||||
uint32_t type; // query/insert type
|
||||
STimeWindow window; // the whole query time window
|
||||
|
||||
SInterval interval; // tumble time window
|
||||
SSessionWindow sessionWindow; // session time window
|
||||
|
||||
SGroupbyExpr groupbyExpr; // groupby tags info
|
||||
SArray * colList; // SArray<SColumn*>
|
||||
SFieldInfo fieldsInfo;
|
||||
SArray * exprList; // SArray<SExprInfo*>
|
||||
SArray * exprList1; // final exprlist in case of arithmetic expression exists
|
||||
SLimitVal limit;
|
||||
SLimitVal slimit;
|
||||
STagCond tagCond;
|
||||
|
||||
SOrderVal order;
|
||||
int16_t fillType; // final result fill type
|
||||
int16_t numOfTables;
|
||||
STableMetaInfo **pTableMetaInfo;
|
||||
struct STSBuf *tsBuf;
|
||||
int64_t * fillVal; // default value for fill
|
||||
char * msg; // pointer to the pCmd->payload to keep error message temporarily
|
||||
int64_t clauseLimit; // limit for current sub clause
|
||||
|
||||
int64_t prjOffset; // offset value in the original sql expression, only applied at client side
|
||||
int64_t vgroupLimit; // table limit in case of super table projection query + global order + limit
|
||||
|
||||
int32_t udColumnId; // current user-defined constant output field column id, monotonically decreases from TSDB_UD_COLUMN_INDEX
|
||||
int16_t resColumnId; // result column id
|
||||
bool distinctTag; // distinct tag or not
|
||||
int32_t round; // 0/1/....
|
||||
int32_t bufLen;
|
||||
char* buf;
|
||||
SArray *pUdfInfo;
|
||||
|
||||
struct SQInfo *pQInfo; // global merge operator
|
||||
struct SQueryAttr *pQueryAttr; // query object
|
||||
|
||||
struct SQueryInfo *sibling; // sibling
|
||||
SArray *pUpstream; // SArray<struct SQueryInfo>
|
||||
struct SQueryInfo *pDownstream;
|
||||
int32_t havingFieldNum;
|
||||
bool stableQuery;
|
||||
bool groupbyColumn;
|
||||
bool simpleAgg;
|
||||
bool arithmeticOnAgg;
|
||||
bool projectionQuery;
|
||||
bool hasFilter;
|
||||
bool onlyTagQuery;
|
||||
bool orderProjectQuery;
|
||||
bool stateWindow;
|
||||
bool globalMerge;
|
||||
} SQueryInfo;
|
||||
|
||||
/**
|
||||
* get the number of tags of this table
|
||||
* @param pTableMeta
|
||||
* @return
|
||||
*/
|
||||
int32_t tscGetNumOfTags(const STableMeta* pTableMeta);
|
||||
|
||||
/**
|
||||
* get the number of columns of this table
|
||||
* @param pTableMeta
|
||||
* @return
|
||||
*/
|
||||
int32_t tscGetNumOfColumns(const STableMeta* pTableMeta);
|
||||
|
||||
/**
|
||||
* get the basic info of this table
|
||||
* @param pTableMeta
|
||||
* @return
|
||||
*/
|
||||
STableComInfo tscGetTableInfo(const STableMeta* pTableMeta);
|
||||
|
||||
/**
|
||||
* get the schema
|
||||
* @param pTableMeta
|
||||
* @return
|
||||
*/
|
||||
SSchema* tscGetTableSchema(const STableMeta* pTableMeta);
|
||||
|
||||
/**
|
||||
* get the tag schema
|
||||
* @param pMeta
|
||||
* @return
|
||||
*/
|
||||
SSchema *tscGetTableTagSchema(const STableMeta *pMeta);
|
||||
|
||||
/**
|
||||
* get the column schema according to the column index
|
||||
* @param pMeta
|
||||
* @param colIndex
|
||||
* @return
|
||||
*/
|
||||
SSchema *tscGetTableColumnSchema(const STableMeta *pMeta, int32_t colIndex);
|
||||
|
||||
/**
|
||||
* get the column schema according to the column id
|
||||
* @param pTableMeta
|
||||
* @param colId
|
||||
* @return
|
||||
*/
|
||||
SSchema* tscGetColumnSchemaById(STableMeta* pTableMeta, int16_t colId);
|
||||
|
||||
/**
|
||||
* create the table meta from the msg
|
||||
* @param pTableMetaMsg
|
||||
* @param size size of the table meta
|
||||
* @return
|
||||
*/
|
||||
STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg);
|
||||
|
||||
#endif // TDENGINE_QTABLEUTIL_H
|
|
@ -463,8 +463,8 @@ tagitem(A) ::= PLUS(X) FLOAT(Y). {
|
|||
//////////////////////// The SELECT statement /////////////////////////////////
|
||||
%type select {SSqlNode*}
|
||||
%destructor select {destroySqlNode($$);}
|
||||
select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) interval_opt(K) session_option(H) fill_opt(F) sliding_opt(S) groupby_opt(P) orderby_opt(Z) having_opt(N) slimit_opt(G) limit_opt(L). {
|
||||
A = tSetQuerySqlNode(&T, W, X, Y, P, Z, &K, &H, &S, F, &L, &G, N);
|
||||
select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) interval_opt(K) session_option(H) windowstate_option(D) fill_opt(F) sliding_opt(S) groupby_opt(P) orderby_opt(Z) having_opt(N) slimit_opt(G) limit_opt(L). {
|
||||
A = tSetQuerySqlNode(&T, W, X, Y, P, Z, &K, &H, &D, &S, F, &L, &G, N);
|
||||
}
|
||||
|
||||
select(A) ::= LP select(B) RP. {A = B;}
|
||||
|
@ -482,7 +482,7 @@ cmd ::= union(X). { setSqlInfo(pInfo, X, NULL, TSDB_SQL_SELECT); }
|
|||
// select client_version()
|
||||
// select server_state()
|
||||
select(A) ::= SELECT(T) selcollist(W). {
|
||||
A = tSetQuerySqlNode(&T, W, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
|
||||
A = tSetQuerySqlNode(&T, W, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
|
||||
}
|
||||
|
||||
// selcollist is a list of expressions that are to become the return
|
||||
|
@ -565,6 +565,11 @@ session_option(X) ::= SESSION LP ids(V) cpxName(Z) COMMA tmvar(Y) RP. {
|
|||
X.col = V;
|
||||
X.gap = Y;
|
||||
}
|
||||
%type windowstate_option {SWindowStateVal}
|
||||
windowstate_option(X) ::= . {X.col.n = 0;}
|
||||
windowstate_option(X) ::= STATE_WINDOW LP ids(V) RP. {
|
||||
X.col = V;
|
||||
}
|
||||
|
||||
%type fill_opt {SArray*}
|
||||
%destructor fill_opt {taosArrayDestroy($$);}
|
||||
|
|
|
@ -2518,7 +2518,6 @@ static void buildTopBotStruct(STopBotInfo *pTopBotInfo, SQLFunctionCtx *pCtx) {
|
|||
tmp += POINTER_BYTES * pCtx->param[0].i64;
|
||||
|
||||
size_t size = sizeof(tValuePair) + pCtx->tagInfo.tagsLen;
|
||||
// assert(pCtx->param[0].i64 > 0);
|
||||
|
||||
for (int32_t i = 0; i < pCtx->param[0].i64; ++i) {
|
||||
pTopBotInfo->res[i] = (tValuePair*) tmp;
|
||||
|
@ -2527,7 +2526,6 @@ static void buildTopBotStruct(STopBotInfo *pTopBotInfo, SQLFunctionCtx *pCtx) {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
bool topbot_datablock_filter(SQLFunctionCtx *pCtx, const char *minval, const char *maxval) {
|
||||
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
|
||||
if (pResInfo == NULL) {
|
||||
|
@ -2606,13 +2604,14 @@ static void top_function(SQLFunctionCtx *pCtx) {
|
|||
|
||||
for (int32_t i = 0; i < pCtx->size; ++i) {
|
||||
char *data = GET_INPUT_DATA(pCtx, i);
|
||||
TSKEY ts = GET_TS_DATA(pCtx, i);
|
||||
|
||||
if (pCtx->hasNull && isNull(data, pCtx->inputType)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
notNullElems++;
|
||||
|
||||
// NOTE: Set the default timestamp if it is missing [todo refactor]
|
||||
TSKEY ts = (pCtx->ptsList != NULL)? GET_TS_DATA(pCtx, i):0;
|
||||
do_top_function_add(pRes, (int32_t)pCtx->param[0].i64, data, ts, pCtx->inputType, &pCtx->tagInfo, NULL, 0);
|
||||
}
|
||||
|
||||
|
@ -2685,13 +2684,13 @@ static void bottom_function(SQLFunctionCtx *pCtx) {
|
|||
|
||||
for (int32_t i = 0; i < pCtx->size; ++i) {
|
||||
char *data = GET_INPUT_DATA(pCtx, i);
|
||||
TSKEY ts = GET_TS_DATA(pCtx, i);
|
||||
|
||||
if (pCtx->hasNull && isNull(data, pCtx->inputType)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
notNullElems++;
|
||||
// NOTE: Set the default timestamp if it is missing [todo refactor]
|
||||
TSKEY ts = (pCtx->ptsList != NULL)? GET_TS_DATA(pCtx, i):0;
|
||||
do_bottom_function_add(pRes, (int32_t)pCtx->param[0].i64, data, ts, pCtx->inputType, &pCtx->tagInfo, NULL, 0);
|
||||
}
|
||||
|
||||
|
@ -2769,7 +2768,7 @@ static void top_bottom_func_finalizer(SQLFunctionCtx *pCtx) {
|
|||
if (pCtx->param[1].i64 == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
|
||||
__compar_fn_t comparator = (pCtx->param[2].i64 == TSDB_ORDER_ASC) ? resAscComparFn : resDescComparFn;
|
||||
qsort(tvp, (size_t)pResInfo->numOfRes, POINTER_BYTES, comparator);
|
||||
} else if (pCtx->param[1].i64 > PRIMARYKEY_TIMESTAMP_COL_INDEX) {
|
||||
} else /*if (pCtx->param[1].i64 > PRIMARYKEY_TIMESTAMP_COL_INDEX)*/ {
|
||||
__compar_fn_t comparator = (pCtx->param[2].i64 == TSDB_ORDER_ASC) ? resDataAscComparFn : resDataDescComparFn;
|
||||
qsort(tvp, (size_t)pResInfo->numOfRes, POINTER_BYTES, comparator);
|
||||
}
|
||||
|
@ -3324,8 +3323,12 @@ static void col_project_function(SQLFunctionCtx *pCtx) {
|
|||
if (pCtx->numOfParams == 2) {
|
||||
return;
|
||||
}
|
||||
if (pCtx->param[0].i64 == 1) {
|
||||
SET_VAL(pCtx, pCtx->size, 1);
|
||||
} else {
|
||||
INC_INIT_VAL(pCtx, pCtx->size);
|
||||
}
|
||||
|
||||
INC_INIT_VAL(pCtx, pCtx->size);
|
||||
|
||||
char *pData = GET_INPUT_DATA_LIST(pCtx);
|
||||
if (pCtx->order == TSDB_ORDER_ASC) {
|
||||
|
|
|
@ -190,12 +190,16 @@ static void destroySFillOperatorInfo(void* param, int32_t numOfOutput);
|
|||
static void destroyGroupbyOperatorInfo(void* param, int32_t numOfOutput);
|
||||
static void destroyArithOperatorInfo(void* param, int32_t numOfOutput);
|
||||
static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput);
|
||||
static void destroySWindowOperatorInfo(void* param, int32_t numOfOutput);
|
||||
static void destroyStateWindowOperatorInfo(void* param, int32_t numOfOutput);
|
||||
static void destroyAggOperatorInfo(void* param, int32_t numOfOutput);
|
||||
static void destroyOperatorInfo(SOperatorInfo* pOperator);
|
||||
|
||||
|
||||
static int32_t doCopyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo* pGroupResInfo, int32_t orderType, SSDataBlock* pBlock);
|
||||
|
||||
static int32_t getGroupbyColumnIndex(SGroupbyExpr *pGroupbyExpr, SSDataBlock* pDataBlock);
|
||||
static int32_t setGroupResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SGroupbyOperatorInfo *pInfo, int32_t numOfCols, char *pData, int16_t type, int16_t bytes, int32_t groupIndex);
|
||||
static int32_t setGroupResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasicInfo *binf, int32_t numOfCols, char *pData, int16_t type, int16_t bytes, int32_t groupIndex);
|
||||
|
||||
static void initCtxOutputBuffer(SQLFunctionCtx* pCtx, int32_t size);
|
||||
static void getAlignQueryTimeWindow(SQueryAttr *pQueryAttr, int64_t key, int64_t keyFirst, int64_t keyLast, STimeWindow *win);
|
||||
|
@ -805,7 +809,6 @@ static void doApplyFunctions(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx
|
|||
if (pCtx[k].preAggVals.isSet && forwardStep < numOfTotal) {
|
||||
pCtx[k].preAggVals.isSet = false;
|
||||
}
|
||||
|
||||
if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) {
|
||||
// aAggs[functionId].xFunction(&pCtx[k]);
|
||||
if (functionId < 0) { // load the script and exec, pRuntimeEnv->pUdfInfo
|
||||
|
@ -1034,7 +1037,13 @@ static void doSetInputDataBlock(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx,
|
|||
uint32_t status = aAggs[pCtx[i].functionId].status;
|
||||
if ((status & (TSDB_FUNCSTATE_SELECTIVITY | TSDB_FUNCSTATE_NEED_TS)) != 0) {
|
||||
SColumnInfoData* tsInfo = taosArrayGet(pBlock->pDataBlock, 0);
|
||||
pCtx[i].ptsList = (int64_t*) tsInfo->pData;
|
||||
// In case of the top/bottom query again the nest query result, which has no timestamp column
|
||||
// don't set the ptsList attribute.
|
||||
if (tsInfo->info.type == TSDB_DATA_TYPE_TIMESTAMP) {
|
||||
pCtx[i].ptsList = (int64_t*) tsInfo->pData;
|
||||
} else {
|
||||
pCtx[i].ptsList = NULL;
|
||||
}
|
||||
}
|
||||
} else if (TSDB_COL_IS_UD_COL(pCol->flag) && (pOperator->pRuntimeEnv->scanFlag == MERGE_STAGE)) {
|
||||
SColIndex* pColIndex = &pOperator->pExpr[i].base.colInfo;
|
||||
|
@ -1400,7 +1409,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn
|
|||
}
|
||||
|
||||
int32_t ret =
|
||||
setGroupResultOutputBuf(pRuntimeEnv, pInfo, pOperator->numOfOutput, val, type, bytes, item->groupIndex);
|
||||
setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, val, type, bytes, item->groupIndex);
|
||||
if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
|
||||
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
|
@ -1442,12 +1451,16 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSWindowOperatorInf
|
|||
pInfo->start = j;
|
||||
} else if (tsList[j] - pInfo->prevTs <= gap) {
|
||||
pInfo->curWindow.ekey = tsList[j];
|
||||
pInfo->prevTs = tsList[j];
|
||||
//pInfo->prevTs = tsList[j];
|
||||
pInfo->numOfRows += 1;
|
||||
pInfo->start = j;
|
||||
if (j == 0 && pInfo->start != 0) {
|
||||
pInfo->numOfRows = 1;
|
||||
pInfo->start = 0;
|
||||
}
|
||||
} else { // start a new session window
|
||||
SResultRow* pResult = NULL;
|
||||
|
||||
pInfo->curWindow.ekey = pInfo->curWindow.skey;
|
||||
int32_t ret = setWindowOutputBufByKey(pRuntimeEnv, &pBInfo->resultRowInfo, &pInfo->curWindow, masterScan,
|
||||
&pResult, item->groupIndex, pBInfo->pCtx, pOperator->numOfOutput,
|
||||
pBInfo->rowCellInfoOffset);
|
||||
|
@ -1468,6 +1481,7 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSWindowOperatorInf
|
|||
|
||||
SResultRow* pResult = NULL;
|
||||
|
||||
pInfo->curWindow.ekey = pInfo->curWindow.skey;
|
||||
int32_t ret = setWindowOutputBufByKey(pRuntimeEnv, &pBInfo->resultRowInfo, &pInfo->curWindow, masterScan,
|
||||
&pResult, item->groupIndex, pBInfo->pCtx, pOperator->numOfOutput,
|
||||
pBInfo->rowCellInfoOffset);
|
||||
|
@ -1495,12 +1509,12 @@ static void setResultRowKey(SResultRow* pResultRow, char* pData, int16_t type) {
|
|||
}
|
||||
}
|
||||
|
||||
static int32_t setGroupResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SGroupbyOperatorInfo *pInfo, int32_t numOfCols, char *pData, int16_t type, int16_t bytes, int32_t groupIndex) {
|
||||
static int32_t setGroupResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasicInfo *binfo, int32_t numOfCols, char *pData, int16_t type, int16_t bytes, int32_t groupIndex) {
|
||||
SDiskbasedResultBuf *pResultBuf = pRuntimeEnv->pResultBuf;
|
||||
|
||||
int32_t *rowCellInfoOffset = pInfo->binfo.rowCellInfoOffset;
|
||||
SResultRowInfo *pResultRowInfo = &pInfo->binfo.resultRowInfo;
|
||||
SQLFunctionCtx *pCtx = pInfo->binfo.pCtx;
|
||||
int32_t *rowCellInfoOffset = binfo->rowCellInfoOffset;
|
||||
SResultRowInfo *pResultRowInfo = &binfo->resultRowInfo;
|
||||
SQLFunctionCtx *pCtx = binfo->pCtx;
|
||||
|
||||
// not assign result buffer yet, add new result buffer, TODO remove it
|
||||
char* d = pData;
|
||||
|
@ -1822,7 +1836,10 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf
|
|||
case OP_TimeWindow: {
|
||||
pRuntimeEnv->proot =
|
||||
createTimeIntervalOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput);
|
||||
setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream[0]->info, pRuntimeEnv->proot);
|
||||
int32_t opType = pRuntimeEnv->proot->upstream[0]->operatorType;
|
||||
if (opType != OP_DummyInput && opType != OP_Join) {
|
||||
setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream[0]->info, pRuntimeEnv->proot);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case OP_Groupby: {
|
||||
|
@ -1868,6 +1885,11 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf
|
|||
}
|
||||
break;
|
||||
}
|
||||
case OP_StateWindow: {
|
||||
pRuntimeEnv->proot = createStatewindowOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput);
|
||||
setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream[0]->info, pRuntimeEnv->proot);
|
||||
break;
|
||||
}
|
||||
|
||||
case OP_Limit: {
|
||||
pRuntimeEnv->proot = createLimitOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot);
|
||||
|
@ -2212,6 +2234,8 @@ static bool onlyFirstQuery(SQueryAttr *pQueryAttr) { return onlyOneQueryType(pQu
|
|||
|
||||
static bool onlyLastQuery(SQueryAttr *pQueryAttr) { return onlyOneQueryType(pQueryAttr, TSDB_FUNC_LAST, TSDB_FUNC_LAST_DST); }
|
||||
|
||||
static bool notContainSessionOrStateWindow(SQueryAttr *pQueryAttr) { return !(pQueryAttr->sw.gap > 0 || pQueryAttr->stateWindow); }
|
||||
|
||||
static int32_t updateBlockLoadStatus(SQueryAttr *pQuery, int32_t status) {
|
||||
bool hasFirstLastFunc = false;
|
||||
bool hasOtherFunc = false;
|
||||
|
@ -2315,7 +2339,7 @@ static void changeExecuteScanOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bo
|
|||
}
|
||||
|
||||
pQueryAttr->order.order = TSDB_ORDER_ASC;
|
||||
} else if (onlyLastQuery(pQueryAttr)) {
|
||||
} else if (onlyLastQuery(pQueryAttr) && notContainSessionOrStateWindow(pQueryAttr)) {
|
||||
if (QUERY_IS_ASC_QUERY(pQueryAttr)) {
|
||||
qDebug(msg, pQInfo, "only-last", pQueryAttr->order.order, TSDB_ORDER_DESC, pQueryAttr->window.skey,
|
||||
pQueryAttr->window.ekey, pQueryAttr->window.ekey, pQueryAttr->window.skey);
|
||||
|
@ -3331,7 +3355,7 @@ void finalizeQueryResult(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, SResult
|
|||
SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr;
|
||||
|
||||
int32_t numOfOutput = pOperator->numOfOutput;
|
||||
if (pQueryAttr->groupbyColumn || QUERY_IS_INTERVAL_QUERY(pQueryAttr) || pQueryAttr->sw.gap > 0) {
|
||||
if (pQueryAttr->groupbyColumn || QUERY_IS_INTERVAL_QUERY(pQueryAttr) || pQueryAttr->sw.gap > 0 || pQueryAttr->stateWindow) {
|
||||
// for each group result, call the finalize function for each column
|
||||
if (pQueryAttr->groupbyColumn) {
|
||||
closeAllResultRows(pResultRowInfo);
|
||||
|
@ -3407,6 +3431,25 @@ STableQueryInfo *createTableQueryInfo(SQueryAttr* pQueryAttr, void* pTable, bool
|
|||
return pTableQueryInfo;
|
||||
}
|
||||
|
||||
STableQueryInfo* createTmpTableQueryInfo(STimeWindow win) {
|
||||
STableQueryInfo* pTableQueryInfo = calloc(1, sizeof(STableQueryInfo));
|
||||
|
||||
pTableQueryInfo->win = win;
|
||||
pTableQueryInfo->lastKey = win.skey;
|
||||
|
||||
pTableQueryInfo->pTable = NULL;
|
||||
pTableQueryInfo->cur.vgroupIndex = -1;
|
||||
|
||||
// set more initial size of interval/groupby query
|
||||
int32_t initialSize = 16;
|
||||
int32_t code = initResultRowInfo(&pTableQueryInfo->resInfo, initialSize, TSDB_DATA_TYPE_INT);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return pTableQueryInfo;
|
||||
}
|
||||
|
||||
void destroyTableQueryInfoImpl(STableQueryInfo *pTableQueryInfo) {
|
||||
if (pTableQueryInfo == NULL) {
|
||||
return;
|
||||
|
@ -4335,6 +4378,10 @@ static void updateTableIdInfo(STableQueryInfo* pTableQueryInfo, SSDataBlock* pBl
|
|||
int32_t step = GET_FORWARD_DIRECTION_FACTOR(order);
|
||||
pTableQueryInfo->lastKey = ((order == TSDB_ORDER_ASC)? pBlock->info.window.ekey:pBlock->info.window.skey) + step;
|
||||
|
||||
if (pTableQueryInfo->pTable == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
STableIdInfo tidInfo = createTableIdInfo(pTableQueryInfo);
|
||||
STableIdInfo *idinfo = taosHashGet(pTableIdInfo, &tidInfo.tid, sizeof(tidInfo.tid));
|
||||
if (idinfo != NULL) {
|
||||
|
@ -4636,6 +4683,12 @@ void setTableScanFilterOperatorInfo(STableScanInfo* pTableScanInfo, SOperatorInf
|
|||
} else if (pDownstream->operatorType == OP_SessionWindow) {
|
||||
SSWindowOperatorInfo* pInfo = pDownstream->info;
|
||||
|
||||
pTableScanInfo->pCtx = pInfo->binfo.pCtx;
|
||||
pTableScanInfo->pResultRowInfo = &pInfo->binfo.resultRowInfo;
|
||||
pTableScanInfo->rowCellInfoOffset = pInfo->binfo.rowCellInfoOffset;
|
||||
} else if (pDownstream->operatorType == OP_StateWindow) {
|
||||
SStateWindowOperatorInfo* pInfo = pDownstream->info;
|
||||
|
||||
pTableScanInfo->pCtx = pInfo->binfo.pCtx;
|
||||
pTableScanInfo->pResultRowInfo = &pInfo->binfo.resultRowInfo;
|
||||
pTableScanInfo->rowCellInfoOffset = pInfo->binfo.rowCellInfoOffset;
|
||||
|
@ -4747,7 +4800,6 @@ static void destroyGlobalAggOperatorInfo(void* param, int32_t numOfOutput) {
|
|||
tfree(pInfo->prevRow);
|
||||
tfree(pInfo->currentGroupColData);
|
||||
}
|
||||
|
||||
static void destroySlimitOperatorInfo(void* param, int32_t numOfOutput) {
|
||||
SSLimitOperatorInfo *pInfo = (SSLimitOperatorInfo*) param;
|
||||
taosArrayDestroy(pInfo->orderColumnList);
|
||||
|
@ -5008,8 +5060,7 @@ static SSDataBlock* doArithmeticOperation(void* param, bool* newgroup) {
|
|||
updateOutputBuf(&pArithInfo->binfo, &pArithInfo->bufCapacity, pBlock->info.rows);
|
||||
|
||||
arithmeticApplyFunctions(pRuntimeEnv, pInfo->pCtx, pOperator->numOfOutput);
|
||||
|
||||
if (pTableQueryInfo != NULL) { // TODO refactor
|
||||
if (pTableQueryInfo != NULL) {
|
||||
updateTableIdInfo(pTableQueryInfo, pBlock, pRuntimeEnv->pTableRetrieveTsMap, order);
|
||||
}
|
||||
|
||||
|
@ -5052,8 +5103,7 @@ static SSDataBlock* doArithmeticOperation(void* param, bool* newgroup) {
|
|||
updateOutputBuf(&pArithInfo->binfo, &pArithInfo->bufCapacity, pBlock->info.rows);
|
||||
|
||||
arithmeticApplyFunctions(pRuntimeEnv, pInfo->pCtx, pOperator->numOfOutput);
|
||||
|
||||
if (pTableQueryInfo != NULL) { // TODO refactor
|
||||
if (pTableQueryInfo != NULL) {
|
||||
updateTableIdInfo(pTableQueryInfo, pBlock, pRuntimeEnv->pTableRetrieveTsMap, order);
|
||||
}
|
||||
|
||||
|
@ -5254,13 +5304,83 @@ static SSDataBlock* doSTableIntervalAgg(void* param, bool* newgroup) {
|
|||
return pIntervalInfo->pRes;
|
||||
}
|
||||
|
||||
static SSDataBlock* doSessionWindowAgg(void* param, bool* newgroup) {
|
||||
|
||||
static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorInfo *pInfo, SSDataBlock *pSDataBlock) {
|
||||
SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv;
|
||||
STableQueryInfo* item = pRuntimeEnv->current;
|
||||
SColumnInfoData* pColInfoData = taosArrayGet(pSDataBlock->pDataBlock, pInfo->colIndex);
|
||||
|
||||
SOptrBasicInfo* pBInfo = &pInfo->binfo;
|
||||
|
||||
bool masterScan = IS_MASTER_SCAN(pRuntimeEnv);
|
||||
int16_t bytes = pColInfoData->info.bytes;
|
||||
int16_t type = pColInfoData->info.type;
|
||||
|
||||
SColumnInfoData* pTsColInfoData = taosArrayGet(pSDataBlock->pDataBlock, 0);
|
||||
TSKEY* tsList = (TSKEY*)pTsColInfoData->pData;
|
||||
|
||||
pInfo->numOfRows = 0;
|
||||
for (int32_t j = 0; j < pSDataBlock->info.rows; ++j) {
|
||||
char* val = ((char*)pColInfoData->pData) + bytes * j;
|
||||
if (isNull(val, type)) {
|
||||
continue;
|
||||
}
|
||||
if (pInfo->prevData == NULL) {
|
||||
pInfo->prevData = malloc(bytes);
|
||||
memcpy(pInfo->prevData, val, bytes);
|
||||
pInfo->numOfRows = 1;
|
||||
pInfo->curWindow.skey = tsList[j];
|
||||
pInfo->curWindow.ekey = tsList[j];
|
||||
pInfo->start = j;
|
||||
|
||||
} else if (memcmp(pInfo->prevData, val, bytes) == 0) {
|
||||
pInfo->curWindow.ekey = tsList[j];
|
||||
pInfo->numOfRows += 1;
|
||||
//pInfo->start = j;
|
||||
if (j == 0 && pInfo->start != 0) {
|
||||
pInfo->numOfRows = 1;
|
||||
pInfo->start = 0;
|
||||
}
|
||||
} else {
|
||||
SResultRow* pResult = NULL;
|
||||
pInfo->curWindow.ekey = pInfo->curWindow.skey;
|
||||
int32_t ret = setWindowOutputBufByKey(pRuntimeEnv, &pBInfo->resultRowInfo, &pInfo->curWindow, masterScan,
|
||||
&pResult, item->groupIndex, pBInfo->pCtx, pOperator->numOfOutput,
|
||||
pBInfo->rowCellInfoOffset);
|
||||
if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
|
||||
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
doApplyFunctions(pRuntimeEnv, pBInfo->pCtx, &pInfo->curWindow, pInfo->start, pInfo->numOfRows, tsList,
|
||||
pSDataBlock->info.rows, pOperator->numOfOutput);
|
||||
|
||||
pInfo->curWindow.skey = tsList[j];
|
||||
pInfo->curWindow.ekey = tsList[j];
|
||||
memcpy(pInfo->prevData, val, bytes);
|
||||
pInfo->numOfRows = 1;
|
||||
pInfo->start = j;
|
||||
|
||||
}
|
||||
}
|
||||
SResultRow* pResult = NULL;
|
||||
|
||||
pInfo->curWindow.ekey = pInfo->curWindow.skey;
|
||||
int32_t ret = setWindowOutputBufByKey(pRuntimeEnv, &pBInfo->resultRowInfo, &pInfo->curWindow, masterScan,
|
||||
&pResult, item->groupIndex, pBInfo->pCtx, pOperator->numOfOutput,
|
||||
pBInfo->rowCellInfoOffset);
|
||||
if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
|
||||
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
|
||||
doApplyFunctions(pRuntimeEnv, pBInfo->pCtx, &pInfo->curWindow, pInfo->start, pInfo->numOfRows, tsList,
|
||||
pSDataBlock->info.rows, pOperator->numOfOutput);
|
||||
}
|
||||
static SSDataBlock* doStateWindowAgg(void *param, bool* newgroup) {
|
||||
SOperatorInfo* pOperator = (SOperatorInfo*) param;
|
||||
if (pOperator->status == OP_EXEC_DONE) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SSWindowOperatorInfo* pWindowInfo = pOperator->info;
|
||||
SStateWindowOperatorInfo* pWindowInfo = pOperator->info;
|
||||
SOptrBasicInfo* pBInfo = &pWindowInfo->binfo;
|
||||
|
||||
SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv;
|
||||
|
@ -5277,6 +5397,62 @@ static SSDataBlock* doSessionWindowAgg(void* param, bool* newgroup) {
|
|||
SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr;
|
||||
int32_t order = pQueryAttr->order.order;
|
||||
STimeWindow win = pQueryAttr->window;
|
||||
SOperatorInfo* upstream = pOperator->upstream[0];
|
||||
while (1) {
|
||||
SSDataBlock* pBlock = upstream->exec(upstream, newgroup);
|
||||
if (pBlock == NULL) {
|
||||
break;
|
||||
}
|
||||
setInputDataBlock(pOperator, pBInfo->pCtx, pBlock, pQueryAttr->order.order);
|
||||
if (pWindowInfo->colIndex == -1) {
|
||||
pWindowInfo->colIndex = getGroupbyColumnIndex(pRuntimeEnv->pQueryAttr->pGroupbyExpr, pBlock);
|
||||
}
|
||||
doStateWindowAggImpl(pOperator, pWindowInfo, pBlock);
|
||||
}
|
||||
|
||||
// restore the value
|
||||
pQueryAttr->order.order = order;
|
||||
pQueryAttr->window = win;
|
||||
|
||||
pOperator->status = OP_RES_TO_RETURN;
|
||||
closeAllResultRows(&pBInfo->resultRowInfo);
|
||||
setQueryStatus(pRuntimeEnv, QUERY_COMPLETED);
|
||||
finalizeQueryResult(pOperator, pBInfo->pCtx, &pBInfo->resultRowInfo, pBInfo->rowCellInfoOffset);
|
||||
|
||||
initGroupResInfo(&pRuntimeEnv->groupResInfo, &pBInfo->resultRowInfo);
|
||||
toSSDataBlock(&pRuntimeEnv->groupResInfo, pRuntimeEnv, pBInfo->pRes);
|
||||
|
||||
if (pBInfo->pRes->info.rows == 0 || !hasRemainDataInCurrentGroup(&pRuntimeEnv->groupResInfo)) {
|
||||
pOperator->status = OP_EXEC_DONE;
|
||||
}
|
||||
|
||||
return pBInfo->pRes->info.rows == 0? NULL:pBInfo->pRes;
|
||||
}
|
||||
static SSDataBlock* doSessionWindowAgg(void* param, bool* newgroup) {
|
||||
SOperatorInfo* pOperator = (SOperatorInfo*) param;
|
||||
if (pOperator->status == OP_EXEC_DONE) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SSWindowOperatorInfo* pWindowInfo = pOperator->info;
|
||||
SOptrBasicInfo* pBInfo = &pWindowInfo->binfo;
|
||||
|
||||
|
||||
SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv;
|
||||
if (pOperator->status == OP_RES_TO_RETURN) {
|
||||
toSSDataBlock(&pRuntimeEnv->groupResInfo, pRuntimeEnv, pBInfo->pRes);
|
||||
|
||||
if (pBInfo->pRes->info.rows == 0 || !hasRemainDataInCurrentGroup(&pRuntimeEnv->groupResInfo)) {
|
||||
pOperator->status = OP_EXEC_DONE;
|
||||
}
|
||||
|
||||
return pBInfo->pRes;
|
||||
}
|
||||
|
||||
SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr;
|
||||
//pQueryAttr->order.order = TSDB_ORDER_ASC;
|
||||
int32_t order = pQueryAttr->order.order;
|
||||
STimeWindow win = pQueryAttr->window;
|
||||
|
||||
SOperatorInfo* upstream = pOperator->upstream[0];
|
||||
|
||||
|
@ -5512,7 +5688,7 @@ SOperatorInfo* createAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOpera
|
|||
pOperator->pRuntimeEnv = pRuntimeEnv;
|
||||
|
||||
pOperator->exec = doAggregate;
|
||||
pOperator->cleanup = destroyBasicOperatorInfo;
|
||||
pOperator->cleanup = destroyAggOperatorInfo;
|
||||
appendUpstream(pOperator, upstream);
|
||||
|
||||
return pOperator;
|
||||
|
@ -5532,6 +5708,19 @@ static void destroyBasicOperatorInfo(void* param, int32_t numOfOutput) {
|
|||
SOptrBasicInfo* pInfo = (SOptrBasicInfo*) param;
|
||||
doDestroyBasicInfo(pInfo, numOfOutput);
|
||||
}
|
||||
static void destroyStateWindowOperatorInfo(void* param, int32_t numOfOutput) {
|
||||
SStateWindowOperatorInfo* pInfo = (SStateWindowOperatorInfo*) param;
|
||||
doDestroyBasicInfo(&pInfo->binfo, numOfOutput);
|
||||
tfree(pInfo->prevData);
|
||||
}
|
||||
static void destroyAggOperatorInfo(void* param, int32_t numOfOutput) {
|
||||
SAggOperatorInfo* pInfo = (SAggOperatorInfo*) param;
|
||||
doDestroyBasicInfo(&pInfo->binfo, numOfOutput);
|
||||
}
|
||||
static void destroySWindowOperatorInfo(void* param, int32_t numOfOutput) {
|
||||
SSWindowOperatorInfo* pInfo = (SSWindowOperatorInfo*) param;
|
||||
doDestroyBasicInfo(&pInfo->binfo, numOfOutput);
|
||||
}
|
||||
|
||||
static void destroySFillOperatorInfo(void* param, int32_t numOfOutput) {
|
||||
SFillOperatorInfo* pInfo = (SFillOperatorInfo*) param;
|
||||
|
@ -5586,7 +5775,7 @@ SOperatorInfo* createMultiTableAggOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SO
|
|||
pOperator->pRuntimeEnv = pRuntimeEnv;
|
||||
|
||||
pOperator->exec = doSTableAggregate;
|
||||
pOperator->cleanup = destroyBasicOperatorInfo;
|
||||
pOperator->cleanup = destroyAggOperatorInfo;
|
||||
appendUpstream(pOperator, upstream);
|
||||
|
||||
return pOperator;
|
||||
|
@ -5712,7 +5901,29 @@ SOperatorInfo* createTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOp
|
|||
appendUpstream(pOperator, upstream);
|
||||
return pOperator;
|
||||
}
|
||||
SOperatorInfo* createStatewindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) {
|
||||
SStateWindowOperatorInfo* pInfo = calloc(1, sizeof(SStateWindowOperatorInfo));
|
||||
pInfo->colIndex = -1;
|
||||
pInfo->binfo.pCtx = createSQLFunctionCtx(pRuntimeEnv, pExpr, numOfOutput, &pInfo->binfo.rowCellInfoOffset);
|
||||
pInfo->binfo.pRes = createOutputBuf(pExpr, numOfOutput, pRuntimeEnv->resultInfo.capacity);
|
||||
initResultRowInfo(&pInfo->binfo.resultRowInfo, 8, TSDB_DATA_TYPE_INT);
|
||||
|
||||
SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo));
|
||||
pOperator->name = "StateWindowOperator";
|
||||
pOperator->operatorType = OP_StateWindow;
|
||||
pOperator->blockingOptr = true;
|
||||
pOperator->status = OP_IN_EXECUTING;
|
||||
pOperator->pExpr = pExpr;
|
||||
pOperator->numOfOutput = numOfOutput;
|
||||
pOperator->info = pInfo;
|
||||
pOperator->pRuntimeEnv = pRuntimeEnv;
|
||||
pOperator->exec = doStateWindowAgg;
|
||||
pOperator->cleanup = destroyStateWindowOperatorInfo;
|
||||
|
||||
appendUpstream(pOperator, upstream);
|
||||
return pOperator;
|
||||
|
||||
}
|
||||
SOperatorInfo* createSWindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) {
|
||||
SSWindowOperatorInfo* pInfo = calloc(1, sizeof(SSWindowOperatorInfo));
|
||||
|
||||
|
@ -5732,7 +5943,7 @@ SOperatorInfo* createSWindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperato
|
|||
pOperator->info = pInfo;
|
||||
pOperator->pRuntimeEnv = pRuntimeEnv;
|
||||
pOperator->exec = doSessionWindowAgg;
|
||||
pOperator->cleanup = destroyBasicOperatorInfo;
|
||||
pOperator->cleanup = destroySWindowOperatorInfo;
|
||||
|
||||
appendUpstream(pOperator, upstream);
|
||||
return pOperator;
|
||||
|
@ -7183,6 +7394,8 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SGroupbyExpr* pGroupbyExpr, S
|
|||
pQueryAttr->simpleAgg = pQueryMsg->simpleAgg;
|
||||
pQueryAttr->pointInterpQuery = pQueryMsg->pointInterpQuery;
|
||||
pQueryAttr->needReverseScan = pQueryMsg->needReverseScan;
|
||||
pQueryAttr->stateWindow = pQueryMsg->stateWindow;
|
||||
pQueryAttr->vgId = vgId;
|
||||
|
||||
pQueryAttr->tableCols = calloc(numOfCols, sizeof(SSingleColumnFilterInfo));
|
||||
if (pQueryAttr->tableCols == NULL) {
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#include "os.h"
|
||||
#include "tschemautil.h"
|
||||
#include "qTableMeta.h"
|
||||
#include "qPlan.h"
|
||||
#include "qExecutor.h"
|
||||
#include "qUtil.h"
|
||||
|
@ -592,6 +592,14 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) {
|
|||
op = OP_SessionWindow;
|
||||
taosArrayPush(plan, &op);
|
||||
|
||||
if (pQueryAttr->pExpr2 != NULL) {
|
||||
op = OP_Arithmetic;
|
||||
taosArrayPush(plan, &op);
|
||||
}
|
||||
} else if (pQueryAttr->stateWindow) {
|
||||
op = OP_StateWindow;
|
||||
taosArrayPush(plan, &op);
|
||||
|
||||
if (pQueryAttr->pExpr2 != NULL) {
|
||||
op = OP_Arithmetic;
|
||||
taosArrayPush(plan, &op);
|
||||
|
|
|
@ -726,9 +726,9 @@ void tSetColumnType(TAOS_FIELD *pField, SStrToken *type) {
|
|||
* extract the select info out of sql string
|
||||
*/
|
||||
SSqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelNodeList, SRelationInfo *pFrom, tSqlExpr *pWhere,
|
||||
SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval, SSessionWindowVal *pSession,
|
||||
SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit, SLimitVal *psLimit,
|
||||
tSqlExpr *pHaving) {
|
||||
SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval,
|
||||
SSessionWindowVal *pSession, SWindowStateVal *pWindowStateVal, SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit,
|
||||
SLimitVal *psLimit, tSqlExpr *pHaving) {
|
||||
assert(pSelNodeList != NULL);
|
||||
|
||||
SSqlNode *pSqlNode = calloc(1, sizeof(SSqlNode));
|
||||
|
@ -779,6 +779,12 @@ SSqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelNodeList, SRelat
|
|||
TPARSER_SET_NONE_TOKEN(pSqlNode->sessionVal.col);
|
||||
}
|
||||
|
||||
if (pWindowStateVal != NULL) {
|
||||
pSqlNode->windowstateVal = *pWindowStateVal;
|
||||
} else {
|
||||
TPARSER_SET_NONE_TOKEN(pSqlNode->windowstateVal.col);
|
||||
}
|
||||
|
||||
return pSqlNode;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,25 +1,10 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "os.h"
|
||||
#include "taosmsg.h"
|
||||
#include "tschemautil.h"
|
||||
#include "qTableMeta.h"
|
||||
#include "ttokendef.h"
|
||||
#include "taosdef.h"
|
||||
#include "tutil.h"
|
||||
#include "tsclient.h"
|
||||
|
||||
int32_t tscGetNumOfTags(const STableMeta* pTableMeta) {
|
||||
assert(pTableMeta != NULL);
|
||||
|
@ -97,9 +82,9 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg) {
|
|||
pTableMeta->suid = pTableMetaMsg->suid;
|
||||
|
||||
pTableMeta->tableInfo = (STableComInfo) {
|
||||
.numOfTags = pTableMetaMsg->numOfTags,
|
||||
.precision = pTableMetaMsg->precision,
|
||||
.numOfColumns = pTableMetaMsg->numOfColumns,
|
||||
.numOfTags = pTableMetaMsg->numOfTags,
|
||||
.precision = pTableMetaMsg->precision,
|
||||
.numOfColumns = pTableMetaMsg->numOfColumns,
|
||||
};
|
||||
|
||||
pTableMeta->id.tid = pTableMetaMsg->tid;
|
||||
|
@ -120,58 +105,3 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg) {
|
|||
return pTableMeta;
|
||||
}
|
||||
|
||||
bool vgroupInfoIdentical(SNewVgroupInfo *pExisted, SVgroupMsg* src) {
|
||||
assert(pExisted != NULL && src != NULL);
|
||||
if (pExisted->numOfEps != src->numOfEps) {
|
||||
return false;
|
||||
}
|
||||
|
||||
for(int32_t i = 0; i < pExisted->numOfEps; ++i) {
|
||||
if (pExisted->ep[i].port != src->epAddr[i].port) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (strncmp(pExisted->ep[i].fqdn, src->epAddr[i].fqdn, tListLen(pExisted->ep[i].fqdn)) != 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
SNewVgroupInfo createNewVgroupInfo(SVgroupMsg *pVgroupMsg) {
|
||||
assert(pVgroupMsg != NULL);
|
||||
|
||||
SNewVgroupInfo info = {0};
|
||||
info.numOfEps = pVgroupMsg->numOfEps;
|
||||
info.vgId = pVgroupMsg->vgId;
|
||||
info.inUse = 0; // 0 is the default value of inUse in case of multiple replica
|
||||
|
||||
assert(info.numOfEps >= 1 && info.vgId >= 1);
|
||||
for(int32_t i = 0; i < pVgroupMsg->numOfEps; ++i) {
|
||||
tstrncpy(info.ep[i].fqdn, pVgroupMsg->epAddr[i].fqdn, TSDB_FQDN_LEN);
|
||||
info.ep[i].port = pVgroupMsg->epAddr[i].port;
|
||||
}
|
||||
|
||||
return info;
|
||||
}
|
||||
|
||||
// todo refactor
|
||||
UNUSED_FUNC static FORCE_INLINE char* skipSegments(char* input, char delim, int32_t num) {
|
||||
for (int32_t i = 0; i < num; ++i) {
|
||||
while (*input != 0 && *input++ != delim) {
|
||||
};
|
||||
}
|
||||
return input;
|
||||
}
|
||||
|
||||
UNUSED_FUNC static FORCE_INLINE size_t copy(char* dst, const char* src, char delimiter) {
|
||||
size_t len = 0;
|
||||
while (*src != delimiter && *src != 0) {
|
||||
*dst++ = *src++;
|
||||
len++;
|
||||
}
|
||||
|
||||
return len;
|
||||
}
|
||||
|
2338
src/query/src/sql.c
2338
src/query/src/sql.c
File diff suppressed because it is too large
Load Diff
|
@ -141,6 +141,7 @@ static SKeyword keywordTable[] = {
|
|||
{"VARIABLE", TK_VARIABLE},
|
||||
{"INTERVAL", TK_INTERVAL},
|
||||
{"SESSION", TK_SESSION},
|
||||
{"STATE_WINDOW", TK_STATE_WINDOW},
|
||||
{"FILL", TK_FILL},
|
||||
{"SLIDING", TK_SLIDING},
|
||||
{"ORDER", TK_ORDER},
|
||||
|
|
|
@ -29,8 +29,8 @@ pipeline {
|
|||
agent none
|
||||
environment{
|
||||
|
||||
WK = '/var/lib/jenkins/workspace/TDinternal'
|
||||
WKC= '/var/lib/jenkins/workspace/TDinternal/community'
|
||||
WK = '/data/lib/jenkins/workspace/TDinternal'
|
||||
WKC= '/data/lib/jenkins/workspace/TDinternal/community'
|
||||
}
|
||||
|
||||
stages {
|
||||
|
|
|
@ -6,9 +6,9 @@ events {
|
|||
}
|
||||
|
||||
http {
|
||||
lua_package_path '$prefix/lua/?.lua;$prefix/rest/?.lua;/blah/?.lua;;';
|
||||
lua_package_path '$prefix/lua/?.lua;$prefix/rest/?.lua;$prefix/rest/?/init.lua;;';
|
||||
lua_package_cpath "$prefix/so/?.so;;";
|
||||
lua_code_cache off;
|
||||
lua_code_cache on;
|
||||
server {
|
||||
listen 7000;
|
||||
server_name restapi;
|
||||
|
|
|
@ -0,0 +1,10 @@
|
|||
local config = {
|
||||
host = "127.0.0.1",
|
||||
port = 6030,
|
||||
database = "",
|
||||
user = "root",
|
||||
password = "taosdata",
|
||||
max_packet_size = 1024 * 1024 ,
|
||||
connection_pool_size = 64
|
||||
}
|
||||
return config
|
|
@ -0,0 +1,72 @@
|
|||
local _M = {}
|
||||
local driver = require "luaconnector51"
|
||||
local water_mark = 0
|
||||
local occupied = 0
|
||||
local connection_pool = {}
|
||||
|
||||
function _M.new(o,config)
|
||||
o = o or {}
|
||||
o.connection_pool = connection_pool
|
||||
o.water_mark = water_mark
|
||||
o.occupied = occupied
|
||||
if #connection_pool == 0 then
|
||||
|
||||
for i = 1, config.connection_pool_size do
|
||||
local res = driver.connect(config)
|
||||
if res.code ~= 0 then
|
||||
ngx.log(ngx.ERR, "connect--- failed:"..res.error)
|
||||
return nil
|
||||
else
|
||||
local object = {obj = res.conn, state = 0}
|
||||
table.insert(o.connection_pool,i, object)
|
||||
ngx.log(ngx.INFO, "add connection, now pool size:"..#(o.connection_pool))
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
return setmetatable(o, { __index = _M })
|
||||
end
|
||||
|
||||
function _M:get_connection()
|
||||
|
||||
local connection_obj
|
||||
|
||||
for i = 1, #connection_pool do
|
||||
connection_obj = connection_pool[i]
|
||||
if connection_obj.state == 0 then
|
||||
connection_obj.state = 1
|
||||
occupied = occupied +1
|
||||
if occupied > water_mark then
|
||||
water_mark = occupied
|
||||
end
|
||||
return connection_obj["obj"]
|
||||
end
|
||||
end
|
||||
|
||||
ngx.log(ngx.ERR,"ALERT! NO FREE CONNECTION.")
|
||||
|
||||
return nil
|
||||
end
|
||||
|
||||
function _M:get_water_mark()
|
||||
|
||||
return water_mark
|
||||
end
|
||||
|
||||
function _M:release_connection(conn)
|
||||
|
||||
local connection_obj
|
||||
|
||||
for i = 1, #connection_pool do
|
||||
connection_obj = connection_pool[i]
|
||||
|
||||
if connection_obj["obj"] == conn then
|
||||
connection_obj["state"] = 0
|
||||
occupied = occupied -1
|
||||
return
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
return _M
|
|
@ -1,26 +1,11 @@
|
|||
local driver = require "luaconnector51"
|
||||
local cjson = require "cjson"
|
||||
local Pool = require "tdpool"
|
||||
local config = require "config"
|
||||
ngx.say("start time:"..os.time())
|
||||
|
||||
|
||||
local config = {
|
||||
host = "127.0.0.1",
|
||||
port = 6030,
|
||||
database = "",
|
||||
user = "root",
|
||||
password = "taosdata",
|
||||
max_packet_size = 1024 * 1024
|
||||
}
|
||||
|
||||
local conn
|
||||
local res = driver.connect(config)
|
||||
if res.code ~=0 then
|
||||
ngx.say("connect--- failed: "..res.error)
|
||||
return
|
||||
else
|
||||
conn = res.conn
|
||||
ngx.say("connect--- pass.")
|
||||
end
|
||||
local pool = Pool.new(Pool,config)
|
||||
local conn = pool:get_connection()
|
||||
|
||||
local res = driver.query(conn,"drop database if exists nginx")
|
||||
if res.code ~=0 then
|
||||
|
@ -51,7 +36,7 @@ else
|
|||
ngx.say("create table--- pass.")
|
||||
end
|
||||
|
||||
res = driver.query(conn,"insert into m1 values ('2019-09-01 00:00:00.001',0,'robotspace'), ('2019-09-01 00:00:00.002',1,'Hilink'),('2019-09-01 00:00:00.003',2,'Harmony')")
|
||||
res = driver.query(conn,"insert into m1 values ('2019-09-01 00:00:00.001', 0, 'robotspace'), ('2019-09-01 00:00:00.002',1,'Hilink'),('2019-09-01 00:00:00.003',2,'Harmony')")
|
||||
if res.code ~=0 then
|
||||
ngx.say("insert records failed: "..res.error)
|
||||
return
|
||||
|
@ -77,7 +62,29 @@ else
|
|||
end
|
||||
|
||||
end
|
||||
driver.close(conn)
|
||||
ngx.say("end time:"..os.time())
|
||||
--ngx.log(ngx.ERR,"in test file.")
|
||||
|
||||
local flag = false
|
||||
function query_callback(res)
|
||||
if res.code ~=0 then
|
||||
ngx.say("async_query_callback--- failed:"..res.error)
|
||||
else
|
||||
if(res.affected == 3) then
|
||||
ngx.say("async_query_callback, insert records--- pass")
|
||||
else
|
||||
ngx.say("async_query_callback, insert records---failed: expect 3 affected records, actually affected "..res.affected)
|
||||
end
|
||||
end
|
||||
flag = true
|
||||
end
|
||||
|
||||
driver.query_a(conn,"insert into m1 values ('2019-09-01 00:00:00.001', 3, 'robotspace'),('2019-09-01 00:00:00.006', 4, 'Hilink'),('2019-09-01 00:00:00.007', 6, 'Harmony')", query_callback)
|
||||
|
||||
while not flag do
|
||||
-- ngx.say("i am here once...")
|
||||
ngx.sleep(0.001) -- time unit is second
|
||||
end
|
||||
|
||||
ngx.say("pool water_mark:"..pool:get_water_mark())
|
||||
|
||||
pool:release_connection(conn)
|
||||
ngx.say("end time:"..os.time())
|
||||
|
|
Binary file not shown.
|
@ -1,2 +1,8 @@
|
|||
gcc -std=c99 lua_connector.c -fPIC -shared -o luaconnector.so -Wall -ltaos
|
||||
lua_header_installed=`apt-cache policy liblua5.3-dev|grep Installed|grep none > /dev/null; echo $?`
|
||||
if [ "$lua_header_installed" = "0" ]; then
|
||||
echo "If need, please input root password to install liblua5.3-dev for build the connector.."
|
||||
sudo apt install -y liblua5.3-dev
|
||||
fi
|
||||
|
||||
gcc -std=c99 lua_connector.c -fPIC -shared -o luaconnector.so -Wall -ltaos -I/usr/include/lua5.3
|
||||
|
||||
|
|
|
@ -13,6 +13,11 @@ struct cb_param{
|
|||
void * stream;
|
||||
};
|
||||
|
||||
struct async_query_callback_param{
|
||||
lua_State* state;
|
||||
int callback;
|
||||
};
|
||||
|
||||
static int l_connect(lua_State *L){
|
||||
TAOS * taos=NULL;
|
||||
const char* host;
|
||||
|
@ -23,7 +28,7 @@ static int l_connect(lua_State *L){
|
|||
|
||||
luaL_checktype(L, 1, LUA_TTABLE);
|
||||
|
||||
lua_getfield(L,-1,"host");
|
||||
lua_getfield(L, 1,"host");
|
||||
if (lua_isstring(L,-1)){
|
||||
host = lua_tostring(L, -1);
|
||||
// printf("host = %s\n", host);
|
||||
|
@ -178,6 +183,58 @@ static int l_query(lua_State *L){
|
|||
return 1;
|
||||
}
|
||||
|
||||
void async_query_callback(void *param, TAOS_RES *result, int code){
|
||||
struct async_query_callback_param* p = (struct async_query_callback_param*) param;
|
||||
|
||||
//printf("\nin c,numfields:%d\n", numFields);
|
||||
//printf("\nin c, code:%d\n", code);
|
||||
|
||||
lua_State *L = p->state;
|
||||
lua_rawgeti(L, LUA_REGISTRYINDEX, p->callback);
|
||||
lua_newtable(L);
|
||||
int table_index = lua_gettop(L);
|
||||
if( code < 0){
|
||||
printf("failed, reason:%s\n", taos_errstr(result));
|
||||
lua_pushinteger(L, -1);
|
||||
lua_setfield(L, table_index, "code");
|
||||
lua_pushstring(L,"something is wrong");// taos_errstr(taos));
|
||||
lua_setfield(L, table_index, "error");
|
||||
}else{
|
||||
//printf("success to async query.\n");
|
||||
const int affectRows = taos_affected_rows(result);
|
||||
//printf(" affect rows:%d\r\n", affectRows);
|
||||
lua_pushinteger(L, 0);
|
||||
lua_setfield(L, table_index, "code");
|
||||
lua_pushinteger(L, affectRows);
|
||||
lua_setfield(L, table_index, "affected");
|
||||
}
|
||||
|
||||
lua_call(L, 1, 0);
|
||||
}
|
||||
|
||||
static int l_async_query(lua_State *L){
|
||||
int r = luaL_ref(L, LUA_REGISTRYINDEX);
|
||||
TAOS * taos = (TAOS*)lua_topointer(L,1);
|
||||
const char * sqlstr = lua_tostring(L,2);
|
||||
// int stime = luaL_checknumber(L,3);
|
||||
|
||||
lua_newtable(L);
|
||||
int table_index = lua_gettop(L);
|
||||
|
||||
struct async_query_callback_param *p = malloc(sizeof(struct async_query_callback_param));
|
||||
p->state = L;
|
||||
p->callback=r;
|
||||
// printf("r:%d, L:%d\n",r,L);
|
||||
taos_query_a(taos,sqlstr,async_query_callback,p);
|
||||
|
||||
lua_pushnumber(L, 0);
|
||||
lua_setfield(L, table_index, "code");
|
||||
lua_pushstring(L, "ok");
|
||||
lua_setfield(L, table_index, "error");
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
void stream_cb(void *param, TAOS_RES *result, TAOS_ROW row){
|
||||
struct cb_param* p = (struct cb_param*) param;
|
||||
TAOS_FIELD *fields = taos_fetch_fields(result);
|
||||
|
@ -308,6 +365,7 @@ static int l_close(lua_State *L){
|
|||
static const struct luaL_Reg lib[] = {
|
||||
{"connect", l_connect},
|
||||
{"query", l_query},
|
||||
{"query_a",l_async_query},
|
||||
{"close", l_close},
|
||||
{"open_stream", l_open_stream},
|
||||
{"close_stream", l_close_stream},
|
||||
|
|
|
@ -13,6 +13,11 @@ struct cb_param{
|
|||
void * stream;
|
||||
};
|
||||
|
||||
struct async_query_callback_param{
|
||||
lua_State* state;
|
||||
int callback;
|
||||
};
|
||||
|
||||
static int l_connect(lua_State *L){
|
||||
TAOS * taos=NULL;
|
||||
const char* host;
|
||||
|
@ -56,6 +61,7 @@ static int l_connect(lua_State *L){
|
|||
lua_settop(L,0);
|
||||
|
||||
taos_init();
|
||||
|
||||
lua_newtable(L);
|
||||
int table_index = lua_gettop(L);
|
||||
|
||||
|
@ -177,6 +183,58 @@ static int l_query(lua_State *L){
|
|||
return 1;
|
||||
}
|
||||
|
||||
void async_query_callback(void *param, TAOS_RES *result, int code){
|
||||
struct async_query_callback_param* p = (struct async_query_callback_param*) param;
|
||||
|
||||
//printf("\nin c,numfields:%d\n", numFields);
|
||||
//printf("\nin c, code:%d\n", code);
|
||||
|
||||
lua_State *L = p->state;
|
||||
lua_rawgeti(L, LUA_REGISTRYINDEX, p->callback);
|
||||
lua_newtable(L);
|
||||
int table_index = lua_gettop(L);
|
||||
if( code < 0){
|
||||
printf("failed, reason:%s\n", taos_errstr(result));
|
||||
lua_pushinteger(L, -1);
|
||||
lua_setfield(L, table_index, "code");
|
||||
lua_pushstring(L,"something is wrong");// taos_errstr(taos));
|
||||
lua_setfield(L, table_index, "error");
|
||||
}else{
|
||||
//printf("success to async query.\n");
|
||||
const int affectRows = taos_affected_rows(result);
|
||||
//printf(" affect rows:%d\r\n", affectRows);
|
||||
lua_pushinteger(L, 0);
|
||||
lua_setfield(L, table_index, "code");
|
||||
lua_pushinteger(L, affectRows);
|
||||
lua_setfield(L, table_index, "affected");
|
||||
}
|
||||
|
||||
lua_call(L, 1, 0);
|
||||
}
|
||||
|
||||
static int l_async_query(lua_State *L){
|
||||
int r = luaL_ref(L, LUA_REGISTRYINDEX);
|
||||
TAOS * taos = (TAOS*)lua_topointer(L,1);
|
||||
const char * sqlstr = lua_tostring(L,2);
|
||||
// int stime = luaL_checknumber(L,3);
|
||||
|
||||
lua_newtable(L);
|
||||
int table_index = lua_gettop(L);
|
||||
|
||||
struct async_query_callback_param *p = malloc(sizeof(struct async_query_callback_param));
|
||||
p->state = L;
|
||||
p->callback=r;
|
||||
// printf("r:%d, L:%d\n",r,L);
|
||||
taos_query_a(taos,sqlstr,async_query_callback,p);
|
||||
|
||||
lua_pushnumber(L, 0);
|
||||
lua_setfield(L, table_index, "code");
|
||||
lua_pushstring(L, "ok");
|
||||
lua_setfield(L, table_index, "error");
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
void stream_cb(void *param, TAOS_RES *result, TAOS_ROW row){
|
||||
struct cb_param* p = (struct cb_param*) param;
|
||||
TAOS_FIELD *fields = taos_fetch_fields(result);
|
||||
|
@ -307,6 +365,7 @@ static int l_close(lua_State *L){
|
|||
static const struct luaL_Reg lib[] = {
|
||||
{"connect", l_connect},
|
||||
{"query", l_query},
|
||||
{"query_a",l_async_query},
|
||||
{"close", l_close},
|
||||
{"open_stream", l_open_stream},
|
||||
{"close_stream", l_close_stream},
|
||||
|
|
|
@ -110,7 +110,25 @@ else
|
|||
end
|
||||
end
|
||||
|
||||
function callback(t)
|
||||
function async_query_callback(res)
|
||||
if res.code ~=0 then
|
||||
print("async_query_callback--- failed:"..res.error)
|
||||
return
|
||||
else
|
||||
|
||||
if(res.affected == 3) then
|
||||
print("async_query_callback, insert records--- pass")
|
||||
else
|
||||
print("async_query_callback, insert records---failed: expect 3 affected records, actually affected "..res.affected)
|
||||
end
|
||||
|
||||
end
|
||||
end
|
||||
|
||||
driver.query_a(conn,"INSERT INTO therm1 VALUES ('2019-09-01 00:00:00.005', 100),('2019-09-01 00:00:00.006', 101),('2019-09-01 00:00:00.007', 102)", async_query_callback)
|
||||
|
||||
|
||||
function stream_callback(t)
|
||||
print("------------------------")
|
||||
print("continuous query result:")
|
||||
for key, value in pairs(t) do
|
||||
|
@ -119,7 +137,7 @@ function callback(t)
|
|||
end
|
||||
|
||||
local stream
|
||||
res = driver.open_stream(conn,"SELECT COUNT(*) as count, AVG(degree) as avg, MAX(degree) as max, MIN(degree) as min FROM thermometer interval(2s) sliding(2s);)",0,callback)
|
||||
res = driver.open_stream(conn,"SELECT COUNT(*) as count, AVG(degree) as avg, MAX(degree) as max, MIN(degree) as min FROM thermometer interval(2s) sliding(2s);)",0, stream_callback)
|
||||
if res.code ~=0 then
|
||||
print("open stream--- failed:"..res.error)
|
||||
return
|
||||
|
@ -146,4 +164,5 @@ while loop_index < 30 do
|
|||
end
|
||||
|
||||
driver.close_stream(stream)
|
||||
|
||||
driver.close(conn)
|
||||
|
|
|
@ -29,8 +29,8 @@ pipeline {
|
|||
agent none
|
||||
environment{
|
||||
|
||||
WK = '/var/lib/jenkins/workspace/TDinternal'
|
||||
WKC= '/var/lib/jenkins/workspace/TDinternal/community'
|
||||
WK = '/data/lib/jenkins/workspace/TDinternal'
|
||||
WKC= '/data/lib/jenkins/workspace/TDinternal/community'
|
||||
}
|
||||
|
||||
stages {
|
||||
|
|
|
@ -0,0 +1,73 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def isLuaInstalled(self):
|
||||
if not which('lua'):
|
||||
tdLog.exit("Lua not found!")
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
# tdLog.info("Check if Lua installed")
|
||||
# if not self.isLuaInstalled():
|
||||
# sys.exit(1)
|
||||
|
||||
buildPath = self.getBuildPath()
|
||||
if (buildPath == ""):
|
||||
tdLog.exit("taosd not found!")
|
||||
else:
|
||||
tdLog.info("taosd found in %s" % buildPath)
|
||||
|
||||
targetPath = buildPath + "/../tests/examples/lua"
|
||||
tdLog.info(targetPath)
|
||||
currentPath = os.getcwd()
|
||||
os.chdir(targetPath)
|
||||
os.system('./build.sh')
|
||||
os.system('lua test.lua')
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
#tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -334,5 +334,6 @@ python3 ./test.py -f tag_lite/alter_tag.py
|
|||
|
||||
python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py
|
||||
python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py
|
||||
python3 test.py -f insert/insert_before_use_db.py
|
||||
|
||||
#======================p4-end===============
|
||||
|
|
|
@ -82,6 +82,8 @@ class TDTestCase:
|
|||
tdSql.execute("import into tbx file \'%s\'"%(self.csvfile))
|
||||
tdSql.query('select * from tbx')
|
||||
tdSql.checkRows(self.rows)
|
||||
#TD-4447 import the same csv twice
|
||||
tdSql.execute("import into tbx file \'%s\'"%(self.csvfile))
|
||||
|
||||
def stop(self):
|
||||
self.destroyCSVFile()
|
||||
|
|
|
@ -0,0 +1,39 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
def run(self):
|
||||
tdSql.error('insert into tb values (now + 10m, 10)')
|
||||
tdSql.prepare()
|
||||
tdSql.error('insert into tb values (now + 10m, 10)')
|
||||
tdSql.execute('drop database db')
|
||||
tdSql.error('insert into tb values (now + 10m, 10)')
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -35,3 +35,5 @@ python3.8 ./test.py $1 -s && sleep 1
|
|||
python3.8 ./test.py $1 -f client/client.py
|
||||
python3.8 ./test.py $1 -s && sleep 1
|
||||
|
||||
# connector
|
||||
python3.8 ./test.py $1 -f connector/lua.py
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -24,6 +24,9 @@ sql drop database if exists $db
|
|||
sql create database $db keep 36500
|
||||
sql use $db
|
||||
|
||||
print =====================================> td-4481
|
||||
sql create database $db
|
||||
|
||||
print =====================================> test case for twa in single block
|
||||
|
||||
sql create table t1 (ts timestamp, k float);
|
||||
|
|
|
@ -18,6 +18,7 @@ system general/parser/gendata.sh
|
|||
sql create table tbx (ts TIMESTAMP, collect_area NCHAR(12), device_id BINARY(16), imsi BINARY(16), imei BINARY(16), mdn BINARY(10), net_type BINARY(4), mno NCHAR(4), province NCHAR(10), city NCHAR(16), alarm BINARY(2))
|
||||
print ====== create tables success, starting import data
|
||||
|
||||
sql import into tbx file '~/data.sql'
|
||||
sql import into tbx file '~/data.sql'
|
||||
|
||||
sql select count(*) from tbx
|
||||
|
|
|
@ -9,7 +9,7 @@ sql connect
|
|||
|
||||
print ======================== dnode1 start
|
||||
|
||||
$dbPrefix = nest_query
|
||||
$dbPrefix = nest_db
|
||||
$tbPrefix = nest_tb
|
||||
$mtPrefix = nest_mt
|
||||
$tbNum = 10
|
||||
|
@ -17,7 +17,6 @@ $rowNum = 10000
|
|||
$totalNum = $tbNum * $rowNum
|
||||
|
||||
print =============== nestquery.sim
|
||||
|
||||
$i = 0
|
||||
$db = $dbPrefix . $i
|
||||
$mt = $mtPrefix . $i
|
||||
|
|
|
@ -64,4 +64,5 @@ run general/parser/slimit_alter_tags.sim
|
|||
run general/parser/udf.sim
|
||||
run general/parser/udf_dll.sim
|
||||
run general/parser/udf_dll_stable.sim
|
||||
run general/parser/nestquery.sim
|
||||
|
||||
|
|
Loading…
Reference in New Issue