Merge branch 'develop' into dev/chr
This commit is contained in:
commit
e2d693718b
|
@ -24,14 +24,14 @@ echo "compile_dir: ${compile_dir}"
|
||||||
echo "pkg_dir: ${pkg_dir}"
|
echo "pkg_dir: ${pkg_dir}"
|
||||||
|
|
||||||
if [ -d ${pkg_dir} ]; then
|
if [ -d ${pkg_dir} ]; then
|
||||||
rm -rf ${pkg_dir}
|
rm -rf ${pkg_dir}
|
||||||
fi
|
fi
|
||||||
mkdir -p ${pkg_dir}
|
mkdir -p ${pkg_dir}
|
||||||
cd ${pkg_dir}
|
cd ${pkg_dir}
|
||||||
|
|
||||||
libfile="libtaos.so.${tdengine_ver}"
|
libfile="libtaos.so.${tdengine_ver}"
|
||||||
|
|
||||||
# create install dir
|
# create install dir
|
||||||
install_home_path="/usr/local/taos"
|
install_home_path="/usr/local/taos"
|
||||||
mkdir -p ${pkg_dir}${install_home_path}
|
mkdir -p ${pkg_dir}${install_home_path}
|
||||||
mkdir -p ${pkg_dir}${install_home_path}/bin
|
mkdir -p ${pkg_dir}${install_home_path}/bin
|
||||||
|
@ -42,7 +42,7 @@ mkdir -p ${pkg_dir}${install_home_path}/examples
|
||||||
mkdir -p ${pkg_dir}${install_home_path}/include
|
mkdir -p ${pkg_dir}${install_home_path}/include
|
||||||
mkdir -p ${pkg_dir}${install_home_path}/init.d
|
mkdir -p ${pkg_dir}${install_home_path}/init.d
|
||||||
mkdir -p ${pkg_dir}${install_home_path}/script
|
mkdir -p ${pkg_dir}${install_home_path}/script
|
||||||
|
|
||||||
cp ${compile_dir}/../packaging/cfg/taos.cfg ${pkg_dir}${install_home_path}/cfg
|
cp ${compile_dir}/../packaging/cfg/taos.cfg ${pkg_dir}${install_home_path}/cfg
|
||||||
cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_path}/init.d
|
cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_path}/init.d
|
||||||
cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script
|
cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script
|
||||||
|
@ -54,7 +54,7 @@ cp ${compile_dir}/build/bin/taosdemo ${pkg_dir}${install_home_pat
|
||||||
cp ${compile_dir}/build/bin/taosdump ${pkg_dir}${install_home_path}/bin
|
cp ${compile_dir}/build/bin/taosdump ${pkg_dir}${install_home_path}/bin
|
||||||
cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin
|
cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin
|
||||||
cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin
|
cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin
|
||||||
cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver
|
cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver
|
||||||
cp ${compile_dir}/../src/inc/taos.h ${pkg_dir}${install_home_path}/include
|
cp ${compile_dir}/../src/inc/taos.h ${pkg_dir}${install_home_path}/include
|
||||||
cp ${compile_dir}/../src/inc/taoserror.h ${pkg_dir}${install_home_path}/include
|
cp ${compile_dir}/../src/inc/taoserror.h ${pkg_dir}${install_home_path}/include
|
||||||
cp -r ${top_dir}/tests/examples/* ${pkg_dir}${install_home_path}/examples
|
cp -r ${top_dir}/tests/examples/* ${pkg_dir}${install_home_path}/examples
|
||||||
|
@ -67,7 +67,41 @@ fi
|
||||||
cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector
|
cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector
|
||||||
cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector
|
cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector
|
||||||
cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector
|
cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector
|
||||||
cp ${compile_dir}/build/lib/taos-jdbcdriver*dist.* ${pkg_dir}${install_home_path}/connector ||:
|
cp ${compile_dir}/build/lib/taos-jdbcdriver*.* ${pkg_dir}${install_home_path}/connector ||:
|
||||||
|
|
||||||
|
if [ -f ${compile_dir}/build/bin/jemalloc-config ]; then
|
||||||
|
install_user_local_path="/usr/local"
|
||||||
|
mkdir -p ${pkg_dir}${install_user_local_path}/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
|
||||||
|
cp ${compile_dir}/build/bin/jemalloc-config ${pkg_dir}${install_user_local_path}/bin/
|
||||||
|
if [ -f ${compile_dir}/build/bin/jemalloc.sh ]; then
|
||||||
|
cp ${compile_dir}/build/bin/jemalloc.sh ${pkg_dir}${install_user_local_path}/bin/
|
||||||
|
fi
|
||||||
|
if [ -f ${compile_dir}/build/bin/jeprof ]; then
|
||||||
|
cp ${compile_dir}/build/bin/jeprof ${pkg_dir}${install_user_local_path}/bin/
|
||||||
|
fi
|
||||||
|
if [ -f ${compile_dir}/build/include/jemalloc/jemalloc.h ]; then
|
||||||
|
cp ${compile_dir}/build/include/jemalloc/jemalloc.h ${pkg_dir}${install_user_local_path}/include/jemalloc/
|
||||||
|
fi
|
||||||
|
if [ -f ${compile_dir}/build/lib/libjemalloc.so.2 ]; then
|
||||||
|
cp ${compile_dir}/build/lib/libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/
|
||||||
|
ln -sf libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/libjemalloc.so
|
||||||
|
fi
|
||||||
|
if [ -f ${compile_dir}/build/lib/libjemalloc.a ]; then
|
||||||
|
cp ${compile_dir}/build/lib/libjemalloc.a ${pkg_dir}${install_user_local_path}/lib/
|
||||||
|
fi
|
||||||
|
if [ -f ${compile_dir}/build/lib/libjemalloc_pic.a ]; then
|
||||||
|
cp ${compile_dir}/build/lib/libjemalloc_pic.a ${pkg_dir}${install_user_local_path}/lib/
|
||||||
|
fi
|
||||||
|
if [ -f ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ]; then
|
||||||
|
cp ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ${pkg_dir}${install_user_local_path}/lib/pkgconfig/
|
||||||
|
fi
|
||||||
|
if [ -f ${compile_dir}/build/share/doc/jemalloc/jemalloc.html ]; then
|
||||||
|
cp ${compile_dir}/build/share/doc/jemalloc/jemalloc.html ${pkg_dir}${install_user_local_path}/share/doc/jemalloc/
|
||||||
|
fi
|
||||||
|
if [ -f ${compile_dir}/build/share/man/man3/jemalloc.3 ]; then
|
||||||
|
cp ${compile_dir}/build/share/man/man3/jemalloc.3 ${pkg_dir}${install_user_local_path}/share/man/man3/
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
cp -r ${compile_dir}/../packaging/deb/DEBIAN ${pkg_dir}/
|
cp -r ${compile_dir}/../packaging/deb/DEBIAN ${pkg_dir}/
|
||||||
chmod 755 ${pkg_dir}/DEBIAN/*
|
chmod 755 ${pkg_dir}/DEBIAN/*
|
||||||
|
@ -75,7 +109,7 @@ chmod 755 ${pkg_dir}/DEBIAN/*
|
||||||
# modify version of control
|
# modify version of control
|
||||||
debver="Version: "$tdengine_ver
|
debver="Version: "$tdengine_ver
|
||||||
sed -i "2c$debver" ${pkg_dir}/DEBIAN/control
|
sed -i "2c$debver" ${pkg_dir}/DEBIAN/control
|
||||||
|
|
||||||
#get taos version, then set deb name
|
#get taos version, then set deb name
|
||||||
|
|
||||||
|
|
||||||
|
@ -90,7 +124,7 @@ fi
|
||||||
|
|
||||||
if [ "$verType" == "beta" ]; then
|
if [ "$verType" == "beta" ]; then
|
||||||
debname=${debname}-${verType}".deb"
|
debname=${debname}-${verType}".deb"
|
||||||
elif [ "$verType" == "stable" ]; then
|
elif [ "$verType" == "stable" ]; then
|
||||||
debname=${debname}".deb"
|
debname=${debname}".deb"
|
||||||
else
|
else
|
||||||
echo "unknow verType, nor stabel or beta"
|
echo "unknow verType, nor stabel or beta"
|
||||||
|
@ -101,7 +135,7 @@ fi
|
||||||
dpkg -b ${pkg_dir} $debname
|
dpkg -b ${pkg_dir} $debname
|
||||||
echo "make deb package success!"
|
echo "make deb package success!"
|
||||||
|
|
||||||
cp ${pkg_dir}/*.deb ${output_dir}
|
cp ${pkg_dir}/*.deb ${output_dir}
|
||||||
|
|
||||||
# clean tmep dir
|
# clean tmep dir
|
||||||
rm -rf ${pkg_dir}
|
rm -rf ${pkg_dir}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
#
|
#
|
||||||
# Generate rpm package for centos
|
# Generate rpm package for centos
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
# set -x
|
# set -x
|
||||||
|
@ -60,7 +60,7 @@ ${csudo} rpmbuild --define="_version ${tdengine_ver}" --define="_topdir ${pkg_di
|
||||||
|
|
||||||
# copy rpm package to output_dir, and modify package name, then clean temp dir
|
# copy rpm package to output_dir, and modify package name, then clean temp dir
|
||||||
#${csudo} cp -rf RPMS/* ${output_dir}
|
#${csudo} cp -rf RPMS/* ${output_dir}
|
||||||
cp_rpm_package ${pkg_dir}/RPMS
|
cp_rpm_package ${pkg_dir}/RPMS
|
||||||
|
|
||||||
|
|
||||||
if [ "$verMode" == "cluster" ]; then
|
if [ "$verMode" == "cluster" ]; then
|
||||||
|
@ -74,7 +74,7 @@ fi
|
||||||
|
|
||||||
if [ "$verType" == "beta" ]; then
|
if [ "$verType" == "beta" ]; then
|
||||||
rpmname=${rpmname}-${verType}".rpm"
|
rpmname=${rpmname}-${verType}".rpm"
|
||||||
elif [ "$verType" == "stable" ]; then
|
elif [ "$verType" == "stable" ]; then
|
||||||
rpmname=${rpmname}".rpm"
|
rpmname=${rpmname}".rpm"
|
||||||
else
|
else
|
||||||
echo "unknow verType, nor stabel or beta"
|
echo "unknow verType, nor stabel or beta"
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
%define homepath /usr/local/taos
|
%define homepath /usr/local/taos
|
||||||
|
%define userlocalpath /usr/local
|
||||||
%define cfg_install_dir /etc/taos
|
%define cfg_install_dir /etc/taos
|
||||||
%define __strip /bin/true
|
%define __strip /bin/true
|
||||||
|
|
||||||
|
@ -12,22 +13,22 @@ URL: www.taosdata.com
|
||||||
AutoReqProv: no
|
AutoReqProv: no
|
||||||
|
|
||||||
#BuildRoot: %_topdir/BUILDROOT
|
#BuildRoot: %_topdir/BUILDROOT
|
||||||
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root
|
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root
|
||||||
|
|
||||||
#Prefix: /usr/local/taos
|
#Prefix: /usr/local/taos
|
||||||
|
|
||||||
#BuildRequires:
|
#BuildRequires:
|
||||||
#Requires:
|
#Requires:
|
||||||
|
|
||||||
%description
|
%description
|
||||||
Big Data Platform Designed and Optimized for IoT
|
Big Data Platform Designed and Optimized for IoT
|
||||||
|
|
||||||
#"prep" Nothing needs to be done
|
#"prep" Nothing needs to be done
|
||||||
#%prep
|
#%prep
|
||||||
#%setup -q
|
#%setup -q
|
||||||
#%setup -T
|
#%setup -T
|
||||||
|
|
||||||
#"build" Nothing needs to be done
|
#"build" Nothing needs to be done
|
||||||
#%build
|
#%build
|
||||||
#%configure
|
#%configure
|
||||||
#make %{?_smp_mflags}
|
#make %{?_smp_mflags}
|
||||||
|
@ -75,9 +76,53 @@ fi
|
||||||
cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector
|
cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector
|
||||||
cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector
|
cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector
|
||||||
cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector
|
cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector
|
||||||
cp %{_compiledir}/build/lib/taos-jdbcdriver*dist.* %{buildroot}%{homepath}/connector ||:
|
cp %{_compiledir}/build/lib/taos-jdbcdriver*.* %{buildroot}%{homepath}/connector ||:
|
||||||
cp -r %{_compiledir}/../tests/examples/* %{buildroot}%{homepath}/examples
|
cp -r %{_compiledir}/../tests/examples/* %{buildroot}%{homepath}/examples
|
||||||
|
|
||||||
|
|
||||||
|
if [ -f %{_compiledir}/build/bin/jemalloc-config ]; then
|
||||||
|
mkdir -p %{buildroot}%{userlocalpath}/bin
|
||||||
|
mkdir -p %{buildroot}%{userlocalpath}/lib
|
||||||
|
mkdir -p %{buildroot}%{userlocalpath}/lib/pkgconfig
|
||||||
|
mkdir -p %{buildroot}%{userlocalpath}/include
|
||||||
|
mkdir -p %{buildroot}%{userlocalpath}/include/jemalloc
|
||||||
|
mkdir -p %{buildroot}%{userlocalpath}/share
|
||||||
|
mkdir -p %{buildroot}%{userlocalpath}/share/doc
|
||||||
|
mkdir -p %{buildroot}%{userlocalpath}/share/doc/jemalloc
|
||||||
|
mkdir -p %{buildroot}%{userlocalpath}/share/man
|
||||||
|
mkdir -p %{buildroot}%{userlocalpath}/share/man/man3
|
||||||
|
|
||||||
|
cp %{_compiledir}/build/bin/jemalloc-config %{buildroot}%{userlocalpath}/bin/
|
||||||
|
if [ -f %{_compiledir}/build/bin/jemalloc.sh ]; then
|
||||||
|
cp %{_compiledir}/build/bin/jemalloc.sh %{buildroot}%{userlocalpath}/bin/
|
||||||
|
fi
|
||||||
|
if [ -f %{_compiledir}/build/bin/jeprof ]; then
|
||||||
|
cp %{_compiledir}/build/bin/jeprof %{buildroot}%{userlocalpath}/bin/
|
||||||
|
fi
|
||||||
|
if [ -f %{_compiledir}/build/include/jemalloc/jemalloc.h ]; then
|
||||||
|
cp %{_compiledir}/build/include/jemalloc/jemalloc.h %{buildroot}%{userlocalpath}/include/jemalloc/
|
||||||
|
fi
|
||||||
|
if [ -f %{_compiledir}/build/lib/libjemalloc.so.2 ]; then
|
||||||
|
cp %{_compiledir}/build/lib/libjemalloc.so.2 %{buildroot}%{userlocalpath}/lib/
|
||||||
|
ln -sf libjemalloc.so.2 %{buildroot}%{userlocalpath}/lib/libjemalloc.so
|
||||||
|
fi
|
||||||
|
if [ -f %{_compiledir}/build/lib/libjemalloc.a ]; then
|
||||||
|
cp %{_compiledir}/build/lib/libjemalloc.a %{buildroot}%{userlocalpath}/lib/
|
||||||
|
fi
|
||||||
|
if [ -f %{_compiledir}/build/lib/libjemalloc_pic.a ]; then
|
||||||
|
cp %{_compiledir}/build/lib/libjemalloc_pic.a %{buildroot}%{userlocalpath}/lib/
|
||||||
|
fi
|
||||||
|
if [ -f %{_compiledir}/build/lib/pkgconfig/jemalloc.pc ]; then
|
||||||
|
cp %{_compiledir}/build/lib/pkgconfig/jemalloc.pc %{buildroot}%{userlocalpath}/lib/pkgconfig/
|
||||||
|
fi
|
||||||
|
if [ -f %{_compiledir}/build/share/doc/jemalloc/jemalloc.html ]; then
|
||||||
|
cp %{_compiledir}/build/share/doc/jemalloc/jemalloc.html %{buildroot}%{userlocalpath}/share/doc/jemalloc/
|
||||||
|
fi
|
||||||
|
if [ -f %{_compiledir}/build/share/man/man3/jemalloc.3 ]; then
|
||||||
|
cp %{_compiledir}/build/share/man/man3/jemalloc.3 %{buildroot}%{userlocalpath}/share/man/man3/
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
#Scripts executed before installation
|
#Scripts executed before installation
|
||||||
%pre
|
%pre
|
||||||
csudo=""
|
csudo=""
|
||||||
|
@ -103,7 +148,7 @@ fi
|
||||||
# if taos.cfg already softlink, remove it
|
# if taos.cfg already softlink, remove it
|
||||||
if [ -f %{cfg_install_dir}/taos.cfg ]; then
|
if [ -f %{cfg_install_dir}/taos.cfg ]; then
|
||||||
${csudo} rm -f %{homepath}/cfg/taos.cfg || :
|
${csudo} rm -f %{homepath}/cfg/taos.cfg || :
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# there can not libtaos.so*, otherwise ln -s error
|
# there can not libtaos.so*, otherwise ln -s error
|
||||||
${csudo} rm -f %{homepath}/driver/libtaos* || :
|
${csudo} rm -f %{homepath}/driver/libtaos* || :
|
||||||
|
@ -116,18 +161,18 @@ if command -v sudo > /dev/null; then
|
||||||
fi
|
fi
|
||||||
cd %{homepath}/script
|
cd %{homepath}/script
|
||||||
${csudo} ./post.sh
|
${csudo} ./post.sh
|
||||||
|
|
||||||
# Scripts executed before uninstall
|
# Scripts executed before uninstall
|
||||||
%preun
|
%preun
|
||||||
csudo=""
|
csudo=""
|
||||||
if command -v sudo > /dev/null; then
|
if command -v sudo > /dev/null; then
|
||||||
csudo="sudo"
|
csudo="sudo"
|
||||||
fi
|
fi
|
||||||
# only remove package to call preun.sh, not but update(2)
|
# only remove package to call preun.sh, not but update(2)
|
||||||
if [ $1 -eq 0 ];then
|
if [ $1 -eq 0 ];then
|
||||||
#cd %{homepath}/script
|
#cd %{homepath}/script
|
||||||
#${csudo} ./preun.sh
|
#${csudo} ./preun.sh
|
||||||
|
|
||||||
if [ -f %{homepath}/script/preun.sh ]; then
|
if [ -f %{homepath}/script/preun.sh ]; then
|
||||||
cd %{homepath}/script
|
cd %{homepath}/script
|
||||||
${csudo} ./preun.sh
|
${csudo} ./preun.sh
|
||||||
|
@ -135,7 +180,7 @@ if [ $1 -eq 0 ];then
|
||||||
bin_link_dir="/usr/bin"
|
bin_link_dir="/usr/bin"
|
||||||
lib_link_dir="/usr/lib"
|
lib_link_dir="/usr/lib"
|
||||||
inc_link_dir="/usr/include"
|
inc_link_dir="/usr/include"
|
||||||
|
|
||||||
data_link_dir="/usr/local/taos/data"
|
data_link_dir="/usr/local/taos/data"
|
||||||
log_link_dir="/usr/local/taos/log"
|
log_link_dir="/usr/local/taos/log"
|
||||||
cfg_link_dir="/usr/local/taos/cfg"
|
cfg_link_dir="/usr/local/taos/cfg"
|
||||||
|
@ -149,20 +194,20 @@ if [ $1 -eq 0 ];then
|
||||||
${csudo} rm -f ${inc_link_dir}/taos.h || :
|
${csudo} rm -f ${inc_link_dir}/taos.h || :
|
||||||
${csudo} rm -f ${inc_link_dir}/taoserror.h || :
|
${csudo} rm -f ${inc_link_dir}/taoserror.h || :
|
||||||
${csudo} rm -f ${lib_link_dir}/libtaos.* || :
|
${csudo} rm -f ${lib_link_dir}/libtaos.* || :
|
||||||
|
|
||||||
${csudo} rm -f ${log_link_dir} || :
|
${csudo} rm -f ${log_link_dir} || :
|
||||||
${csudo} rm -f ${data_link_dir} || :
|
${csudo} rm -f ${data_link_dir} || :
|
||||||
|
|
||||||
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
|
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
|
||||||
if [ -n "$pid" ]; then
|
if [ -n "$pid" ]; then
|
||||||
${csudo} kill -9 $pid || :
|
${csudo} kill -9 $pid || :
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Scripts executed after uninstall
|
# Scripts executed after uninstall
|
||||||
%postun
|
%postun
|
||||||
|
|
||||||
# clean build dir
|
# clean build dir
|
||||||
%clean
|
%clean
|
||||||
csudo=""
|
csudo=""
|
||||||
|
|
|
@ -59,11 +59,11 @@ initd_mod=0
|
||||||
service_mod=2
|
service_mod=2
|
||||||
if pidof systemd &> /dev/null; then
|
if pidof systemd &> /dev/null; then
|
||||||
service_mod=0
|
service_mod=0
|
||||||
elif $(which service &> /dev/null); then
|
elif $(which service &> /dev/null); then
|
||||||
service_mod=1
|
service_mod=1
|
||||||
service_config_dir="/etc/init.d"
|
service_config_dir="/etc/init.d"
|
||||||
if $(which chkconfig &> /dev/null); then
|
if $(which chkconfig &> /dev/null); then
|
||||||
initd_mod=1
|
initd_mod=1
|
||||||
elif $(which insserv &> /dev/null); then
|
elif $(which insserv &> /dev/null); then
|
||||||
initd_mod=2
|
initd_mod=2
|
||||||
elif $(which update-rc.d &> /dev/null); then
|
elif $(which update-rc.d &> /dev/null); then
|
||||||
|
@ -71,7 +71,7 @@ elif $(which service &> /dev/null); then
|
||||||
else
|
else
|
||||||
service_mod=2
|
service_mod=2
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
service_mod=2
|
service_mod=2
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -103,7 +103,7 @@ elif echo $osinfo | grep -qwi "fedora" ; then
|
||||||
os_type=2
|
os_type=2
|
||||||
else
|
else
|
||||||
echo " osinfo: ${osinfo}"
|
echo " osinfo: ${osinfo}"
|
||||||
echo " This is an officially unverified linux system,"
|
echo " This is an officially unverified linux system,"
|
||||||
echo " if there are any problems with the installation and operation, "
|
echo " if there are any problems with the installation and operation, "
|
||||||
echo " please feel free to contact taosdata.com for support."
|
echo " please feel free to contact taosdata.com for support."
|
||||||
os_type=1
|
os_type=1
|
||||||
|
@ -138,7 +138,7 @@ do
|
||||||
echo "Usage: `basename $0` -v [server | client] -e [yes | no]"
|
echo "Usage: `basename $0` -v [server | client] -e [yes | no]"
|
||||||
exit 0
|
exit 0
|
||||||
;;
|
;;
|
||||||
?) #unknow option
|
?) #unknow option
|
||||||
echo "unkonw argument"
|
echo "unkonw argument"
|
||||||
exit 1
|
exit 1
|
||||||
;;
|
;;
|
||||||
|
@ -157,9 +157,9 @@ function kill_process() {
|
||||||
function install_main_path() {
|
function install_main_path() {
|
||||||
#create install main dir and all sub dir
|
#create install main dir and all sub dir
|
||||||
${csudo} rm -rf ${install_main_dir} || :
|
${csudo} rm -rf ${install_main_dir} || :
|
||||||
${csudo} mkdir -p ${install_main_dir}
|
${csudo} mkdir -p ${install_main_dir}
|
||||||
${csudo} mkdir -p ${install_main_dir}/cfg
|
${csudo} mkdir -p ${install_main_dir}/cfg
|
||||||
${csudo} mkdir -p ${install_main_dir}/bin
|
${csudo} mkdir -p ${install_main_dir}/bin
|
||||||
${csudo} mkdir -p ${install_main_dir}/connector
|
${csudo} mkdir -p ${install_main_dir}/connector
|
||||||
${csudo} mkdir -p ${install_main_dir}/driver
|
${csudo} mkdir -p ${install_main_dir}/driver
|
||||||
${csudo} mkdir -p ${install_main_dir}/examples
|
${csudo} mkdir -p ${install_main_dir}/examples
|
||||||
|
@ -168,10 +168,10 @@ function install_main_path() {
|
||||||
if [ "$verMode" == "cluster" ]; then
|
if [ "$verMode" == "cluster" ]; then
|
||||||
${csudo} mkdir -p ${nginx_dir}
|
${csudo} mkdir -p ${nginx_dir}
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -e ${script_dir}/email ]]; then
|
if [[ -e ${script_dir}/email ]]; then
|
||||||
${csudo} cp ${script_dir}/email ${install_main_dir}/ ||:
|
${csudo} cp ${script_dir}/email ${install_main_dir}/ ||:
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
function install_bin() {
|
function install_bin() {
|
||||||
|
@ -207,29 +207,75 @@ function install_lib() {
|
||||||
${csudo} rm -f ${lib_link_dir}/libtaos.* || :
|
${csudo} rm -f ${lib_link_dir}/libtaos.* || :
|
||||||
${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
|
${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
|
||||||
#${csudo} rm -rf ${v15_java_app_dir} || :
|
#${csudo} rm -rf ${v15_java_app_dir} || :
|
||||||
${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
|
${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
|
||||||
|
|
||||||
${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
|
${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
|
||||||
${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
|
${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
|
||||||
|
|
||||||
if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then
|
if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then
|
||||||
${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
|
${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
|
||||||
${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
|
${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
|
||||||
fi
|
fi
|
||||||
|
|
||||||
#if [ "$verMode" == "cluster" ]; then
|
#if [ "$verMode" == "cluster" ]; then
|
||||||
# # Compatible with version 1.5
|
# # Compatible with version 1.5
|
||||||
# ${csudo} mkdir -p ${v15_java_app_dir}
|
# ${csudo} mkdir -p ${v15_java_app_dir}
|
||||||
# ${csudo} ln -s ${install_main_dir}/connector/taos-jdbcdriver-1.0.2-dist.jar ${v15_java_app_dir}/JDBCDriver-1.0.2-dist.jar
|
# ${csudo} ln -s ${install_main_dir}/connector/taos-jdbcdriver-1.0.2-dist.jar ${v15_java_app_dir}/JDBCDriver-1.0.2-dist.jar
|
||||||
# ${csudo} chmod 777 ${v15_java_app_dir} || :
|
# ${csudo} chmod 777 ${v15_java_app_dir} || :
|
||||||
#fi
|
#fi
|
||||||
|
|
||||||
${csudo} ldconfig
|
${csudo} ldconfig
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function install_jemalloc() {
|
||||||
|
jemalloc_dir=${script_dir}/jemalloc
|
||||||
|
|
||||||
|
if [ -d ${jemalloc_dir} ]; then
|
||||||
|
${csudo} /usr/bin/install -c -d /usr/local/bin
|
||||||
|
|
||||||
|
if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then
|
||||||
|
${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin
|
||||||
|
fi
|
||||||
|
if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then
|
||||||
|
${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin
|
||||||
|
fi
|
||||||
|
if [ -f ${jemalloc_dir}/bin/jeprof ]; then
|
||||||
|
${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin
|
||||||
|
fi
|
||||||
|
if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then
|
||||||
|
${csudo} /usr/bin/install -c -d /usr/local/include/jemalloc
|
||||||
|
${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc
|
||||||
|
fi
|
||||||
|
if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then
|
||||||
|
${csudo} /usr/bin/install -c -d /usr/local/lib
|
||||||
|
${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib
|
||||||
|
${csudo} ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
|
||||||
|
${csudo} /usr/bin/install -c -d /usr/local/lib
|
||||||
|
if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
|
||||||
|
${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
|
||||||
|
fi
|
||||||
|
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
|
||||||
|
${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
|
||||||
|
fi
|
||||||
|
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
|
||||||
|
${csudo} /usr/bin/install -c -d /usr/local/lib/pkgconfig
|
||||||
|
${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then
|
||||||
|
${csudo} /usr/bin/install -c -d /usr/local/share/doc/jemalloc
|
||||||
|
${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc
|
||||||
|
fi
|
||||||
|
if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then
|
||||||
|
${csudo} /usr/bin/install -c -d /usr/local/share/man/man3
|
||||||
|
${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
function install_header() {
|
function install_header() {
|
||||||
${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
|
${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
|
||||||
${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
|
${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
|
||||||
${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
|
${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
|
||||||
${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
|
${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
|
||||||
}
|
}
|
||||||
|
@ -246,13 +292,13 @@ function add_newHostname_to_hosts() {
|
||||||
if [[ "$s" == "$localIp" ]]; then
|
if [[ "$s" == "$localIp" ]]; then
|
||||||
return
|
return
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
${csudo} echo "127.0.0.1 $1" >> /etc/hosts ||:
|
${csudo} echo "127.0.0.1 $1" >> /etc/hosts ||:
|
||||||
}
|
}
|
||||||
|
|
||||||
function set_hostname() {
|
function set_hostname() {
|
||||||
echo -e -n "${GREEN}Please enter one hostname(must not be 'localhost')${NC}:"
|
echo -e -n "${GREEN}Please enter one hostname(must not be 'localhost')${NC}:"
|
||||||
read newHostname
|
read newHostname
|
||||||
while true; do
|
while true; do
|
||||||
if [[ ! -z "$newHostname" && "$newHostname" != "localhost" ]]; then
|
if [[ ! -z "$newHostname" && "$newHostname" != "localhost" ]]; then
|
||||||
break
|
break
|
||||||
|
@ -266,25 +312,25 @@ function set_hostname() {
|
||||||
if [[ $retval != 0 ]]; then
|
if [[ $retval != 0 ]]; then
|
||||||
echo
|
echo
|
||||||
echo "set hostname fail!"
|
echo "set hostname fail!"
|
||||||
return
|
return
|
||||||
fi
|
fi
|
||||||
#echo -e -n "$(hostnamectl status --static)"
|
#echo -e -n "$(hostnamectl status --static)"
|
||||||
#echo -e -n "$(hostnamectl status --transient)"
|
#echo -e -n "$(hostnamectl status --transient)"
|
||||||
#echo -e -n "$(hostnamectl status --pretty)"
|
#echo -e -n "$(hostnamectl status --pretty)"
|
||||||
|
|
||||||
#ubuntu/centos /etc/hostname
|
#ubuntu/centos /etc/hostname
|
||||||
if [[ -e /etc/hostname ]]; then
|
if [[ -e /etc/hostname ]]; then
|
||||||
${csudo} echo $newHostname > /etc/hostname ||:
|
${csudo} echo $newHostname > /etc/hostname ||:
|
||||||
fi
|
fi
|
||||||
|
|
||||||
#debian: #HOSTNAME=yourname
|
#debian: #HOSTNAME=yourname
|
||||||
if [[ -e /etc/sysconfig/network ]]; then
|
if [[ -e /etc/sysconfig/network ]]; then
|
||||||
${csudo} sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network ||:
|
${csudo} sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network ||:
|
||||||
fi
|
fi
|
||||||
|
|
||||||
${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/taos.cfg
|
${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/taos.cfg
|
||||||
serverFqdn=$newHostname
|
serverFqdn=$newHostname
|
||||||
|
|
||||||
if [[ -e /etc/hosts ]]; then
|
if [[ -e /etc/hosts ]]; then
|
||||||
add_newHostname_to_hosts $newHostname
|
add_newHostname_to_hosts $newHostname
|
||||||
fi
|
fi
|
||||||
|
@ -302,7 +348,7 @@ function is_correct_ipaddr() {
|
||||||
return 0
|
return 0
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -316,13 +362,13 @@ function set_ipAsFqdn() {
|
||||||
echo
|
echo
|
||||||
echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}"
|
echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}"
|
||||||
localFqdn="127.0.0.1"
|
localFqdn="127.0.0.1"
|
||||||
# Write the local FQDN to configuration file
|
# Write the local FQDN to configuration file
|
||||||
${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg
|
${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg
|
||||||
serverFqdn=$localFqdn
|
serverFqdn=$localFqdn
|
||||||
echo
|
echo
|
||||||
return
|
return
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo -e -n "${GREEN}Please choose an IP from local IP list${NC}:"
|
echo -e -n "${GREEN}Please choose an IP from local IP list${NC}:"
|
||||||
echo
|
echo
|
||||||
echo -e -n "${GREEN}$iplist${NC}"
|
echo -e -n "${GREEN}$iplist${NC}"
|
||||||
|
@ -331,15 +377,15 @@ function set_ipAsFqdn() {
|
||||||
echo -e -n "${GREEN}Notes: if IP is used as the node name, data can NOT be migrated to other machine directly${NC}:"
|
echo -e -n "${GREEN}Notes: if IP is used as the node name, data can NOT be migrated to other machine directly${NC}:"
|
||||||
read localFqdn
|
read localFqdn
|
||||||
while true; do
|
while true; do
|
||||||
if [ ! -z "$localFqdn" ]; then
|
if [ ! -z "$localFqdn" ]; then
|
||||||
# Check if correct ip address
|
# Check if correct ip address
|
||||||
is_correct_ipaddr $localFqdn
|
is_correct_ipaddr $localFqdn
|
||||||
retval=`echo $?`
|
retval=`echo $?`
|
||||||
if [[ $retval != 0 ]]; then
|
if [[ $retval != 0 ]]; then
|
||||||
read -p "Please choose an IP from local IP list:" localFqdn
|
read -p "Please choose an IP from local IP list:" localFqdn
|
||||||
else
|
else
|
||||||
# Write the local FQDN to configuration file
|
# Write the local FQDN to configuration file
|
||||||
${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg
|
${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg
|
||||||
serverFqdn=$localFqdn
|
serverFqdn=$localFqdn
|
||||||
break
|
break
|
||||||
fi
|
fi
|
||||||
|
@ -354,59 +400,59 @@ function local_fqdn_check() {
|
||||||
echo
|
echo
|
||||||
echo -e -n "System hostname is: ${GREEN}$serverFqdn${NC}"
|
echo -e -n "System hostname is: ${GREEN}$serverFqdn${NC}"
|
||||||
echo
|
echo
|
||||||
if [[ "$serverFqdn" == "" ]] || [[ "$serverFqdn" == "localhost" ]]; then
|
if [[ "$serverFqdn" == "" ]] || [[ "$serverFqdn" == "localhost" ]]; then
|
||||||
echo -e -n "${GREEN}It is strongly recommended to configure a hostname for this machine ${NC}"
|
echo -e -n "${GREEN}It is strongly recommended to configure a hostname for this machine ${NC}"
|
||||||
echo
|
echo
|
||||||
|
|
||||||
while true
|
while true
|
||||||
do
|
do
|
||||||
read -r -p "Set hostname now? [Y/n] " input
|
read -r -p "Set hostname now? [Y/n] " input
|
||||||
if [ ! -n "$input" ]; then
|
if [ ! -n "$input" ]; then
|
||||||
set_hostname
|
set_hostname
|
||||||
break
|
break
|
||||||
else
|
else
|
||||||
case $input in
|
case $input in
|
||||||
[yY][eE][sS]|[yY])
|
[yY][eE][sS]|[yY])
|
||||||
set_hostname
|
set_hostname
|
||||||
break
|
break
|
||||||
;;
|
;;
|
||||||
|
|
||||||
[nN][oO]|[nN])
|
[nN][oO]|[nN])
|
||||||
set_ipAsFqdn
|
set_ipAsFqdn
|
||||||
break
|
break
|
||||||
;;
|
;;
|
||||||
|
|
||||||
*)
|
*)
|
||||||
echo "Invalid input..."
|
echo "Invalid input..."
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
function install_config() {
|
function install_config() {
|
||||||
#${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || :
|
#${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || :
|
||||||
|
|
||||||
if [ ! -f ${cfg_install_dir}/taos.cfg ]; then
|
if [ ! -f ${cfg_install_dir}/taos.cfg ]; then
|
||||||
${csudo} mkdir -p ${cfg_install_dir}
|
${csudo} mkdir -p ${cfg_install_dir}
|
||||||
[ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir}
|
[ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir}
|
||||||
${csudo} chmod 644 ${cfg_install_dir}/*
|
${csudo} chmod 644 ${cfg_install_dir}/*
|
||||||
fi
|
fi
|
||||||
|
|
||||||
${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org
|
${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org
|
||||||
${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg
|
${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg
|
||||||
|
|
||||||
[ ! -z $1 ] && return 0 || : # only install client
|
[ ! -z $1 ] && return 0 || : # only install client
|
||||||
|
|
||||||
if ((${update_flag}==1)); then
|
if ((${update_flag}==1)); then
|
||||||
return 0
|
return 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$interactiveFqdn" == "no" ]; then
|
if [ "$interactiveFqdn" == "no" ]; then
|
||||||
return 0
|
return 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
local_fqdn_check
|
local_fqdn_check
|
||||||
|
|
||||||
#FQDN_FORMAT="(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)"
|
#FQDN_FORMAT="(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)"
|
||||||
|
@ -424,8 +470,8 @@ function install_config() {
|
||||||
if [ ! -z "$firstEp" ]; then
|
if [ ! -z "$firstEp" ]; then
|
||||||
# check the format of the firstEp
|
# check the format of the firstEp
|
||||||
#if [[ $firstEp == $FQDN_PATTERN ]]; then
|
#if [[ $firstEp == $FQDN_PATTERN ]]; then
|
||||||
# Write the first FQDN to configuration file
|
# Write the first FQDN to configuration file
|
||||||
${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/taos.cfg
|
${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/taos.cfg
|
||||||
break
|
break
|
||||||
#else
|
#else
|
||||||
# read -p "Please enter the correct FQDN:port: " firstEp
|
# read -p "Please enter the correct FQDN:port: " firstEp
|
||||||
|
@ -433,9 +479,9 @@ function install_config() {
|
||||||
else
|
else
|
||||||
break
|
break
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
# user email
|
# user email
|
||||||
#EMAIL_PATTERN='^[A-Za-z0-9\u4e00-\u9fa5]+@[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+)+$'
|
#EMAIL_PATTERN='^[A-Za-z0-9\u4e00-\u9fa5]+@[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+)+$'
|
||||||
#EMAIL_PATTERN='^[\w-]+(\.[\w-]+)*@[\w-]+(\.[\w-]+)+$'
|
#EMAIL_PATTERN='^[\w-]+(\.[\w-]+)*@[\w-]+(\.[\w-]+)+$'
|
||||||
#EMAIL_PATTERN="^[\w-]+(\.[\w-]+)*@[\w-]+(\.[\w-]+)+$"
|
#EMAIL_PATTERN="^[\w-]+(\.[\w-]+)*@[\w-]+(\.[\w-]+)+$"
|
||||||
|
@ -446,31 +492,31 @@ function install_config() {
|
||||||
if [ ! -z "$emailAddr" ]; then
|
if [ ! -z "$emailAddr" ]; then
|
||||||
# check the format of the emailAddr
|
# check the format of the emailAddr
|
||||||
#if [[ "$emailAddr" =~ $EMAIL_PATTERN ]]; then
|
#if [[ "$emailAddr" =~ $EMAIL_PATTERN ]]; then
|
||||||
# Write the email address to temp file
|
# Write the email address to temp file
|
||||||
email_file="${install_main_dir}/email"
|
email_file="${install_main_dir}/email"
|
||||||
${csudo} bash -c "echo $emailAddr > ${email_file}"
|
${csudo} bash -c "echo $emailAddr > ${email_file}"
|
||||||
break
|
break
|
||||||
#else
|
#else
|
||||||
# read -p "Please enter the correct email address: " emailAddr
|
# read -p "Please enter the correct email address: " emailAddr
|
||||||
#fi
|
#fi
|
||||||
else
|
else
|
||||||
break
|
break
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
function install_log() {
|
function install_log() {
|
||||||
${csudo} rm -rf ${log_dir} || :
|
${csudo} rm -rf ${log_dir} || :
|
||||||
${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
|
${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
|
||||||
|
|
||||||
${csudo} ln -s ${log_dir} ${install_main_dir}/log
|
${csudo} ln -s ${log_dir} ${install_main_dir}/log
|
||||||
}
|
}
|
||||||
|
|
||||||
function install_data() {
|
function install_data() {
|
||||||
${csudo} mkdir -p ${data_dir}
|
${csudo} mkdir -p ${data_dir}
|
||||||
|
|
||||||
${csudo} ln -s ${data_dir} ${install_main_dir}/data
|
${csudo} ln -s ${data_dir} ${install_main_dir}/data
|
||||||
}
|
}
|
||||||
|
|
||||||
function install_connector() {
|
function install_connector() {
|
||||||
|
@ -485,26 +531,26 @@ function install_examples() {
|
||||||
|
|
||||||
function clean_service_on_sysvinit() {
|
function clean_service_on_sysvinit() {
|
||||||
#restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
|
#restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
|
||||||
#${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || :
|
#${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || :
|
||||||
|
|
||||||
if pidof taosd &> /dev/null; then
|
if pidof taosd &> /dev/null; then
|
||||||
${csudo} service taosd stop || :
|
${csudo} service taosd stop || :
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if pidof tarbitrator &> /dev/null; then
|
if pidof tarbitrator &> /dev/null; then
|
||||||
${csudo} service tarbitratord stop || :
|
${csudo} service tarbitratord stop || :
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if ((${initd_mod}==1)); then
|
if ((${initd_mod}==1)); then
|
||||||
if [ -e ${service_config_dir}/taosd ]; then
|
if [ -e ${service_config_dir}/taosd ]; then
|
||||||
${csudo} chkconfig --del taosd || :
|
${csudo} chkconfig --del taosd || :
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||||
${csudo} chkconfig --del tarbitratord || :
|
${csudo} chkconfig --del tarbitratord || :
|
||||||
fi
|
fi
|
||||||
elif ((${initd_mod}==2)); then
|
elif ((${initd_mod}==2)); then
|
||||||
if [ -e ${service_config_dir}/taosd ]; then
|
if [ -e ${service_config_dir}/taosd ]; then
|
||||||
${csudo} insserv -r taosd || :
|
${csudo} insserv -r taosd || :
|
||||||
fi
|
fi
|
||||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||||
|
@ -518,10 +564,10 @@ function clean_service_on_sysvinit() {
|
||||||
${csudo} update-rc.d -f tarbitratord remove || :
|
${csudo} update-rc.d -f tarbitratord remove || :
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
${csudo} rm -f ${service_config_dir}/taosd || :
|
${csudo} rm -f ${service_config_dir}/taosd || :
|
||||||
${csudo} rm -f ${service_config_dir}/tarbitratord || :
|
${csudo} rm -f ${service_config_dir}/tarbitratord || :
|
||||||
|
|
||||||
if $(which init &> /dev/null); then
|
if $(which init &> /dev/null); then
|
||||||
${csudo} init q || :
|
${csudo} init q || :
|
||||||
fi
|
fi
|
||||||
|
@ -544,10 +590,10 @@ function install_service_on_sysvinit() {
|
||||||
${csudo} cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord
|
${csudo} cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord
|
||||||
${csudo} cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
|
${csudo} cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
|
||||||
fi
|
fi
|
||||||
|
|
||||||
#restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
|
#restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
|
||||||
#${csudo} grep -q -F "$restart_config_str" /etc/inittab || ${csudo} bash -c "echo '${restart_config_str}' >> /etc/inittab"
|
#${csudo} grep -q -F "$restart_config_str" /etc/inittab || ${csudo} bash -c "echo '${restart_config_str}' >> /etc/inittab"
|
||||||
|
|
||||||
if ((${initd_mod}==1)); then
|
if ((${initd_mod}==1)); then
|
||||||
${csudo} chkconfig --add taosd || :
|
${csudo} chkconfig --add taosd || :
|
||||||
${csudo} chkconfig --level 2345 taosd on || :
|
${csudo} chkconfig --level 2345 taosd on || :
|
||||||
|
@ -572,7 +618,7 @@ function clean_service_on_systemd() {
|
||||||
fi
|
fi
|
||||||
${csudo} systemctl disable taosd &> /dev/null || echo &> /dev/null
|
${csudo} systemctl disable taosd &> /dev/null || echo &> /dev/null
|
||||||
${csudo} rm -f ${taosd_service_config}
|
${csudo} rm -f ${taosd_service_config}
|
||||||
|
|
||||||
tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
|
tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
|
||||||
if systemctl is-active --quiet tarbitratord; then
|
if systemctl is-active --quiet tarbitratord; then
|
||||||
echo "tarbitrator is running, stopping it..."
|
echo "tarbitrator is running, stopping it..."
|
||||||
|
@ -580,7 +626,7 @@ function clean_service_on_systemd() {
|
||||||
fi
|
fi
|
||||||
${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null
|
${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null
|
||||||
${csudo} rm -f ${tarbitratord_service_config}
|
${csudo} rm -f ${tarbitratord_service_config}
|
||||||
|
|
||||||
if [ "$verMode" == "cluster" ]; then
|
if [ "$verMode" == "cluster" ]; then
|
||||||
nginx_service_config="${service_config_dir}/nginxd.service"
|
nginx_service_config="${service_config_dir}/nginxd.service"
|
||||||
if systemctl is-active --quiet nginxd; then
|
if systemctl is-active --quiet nginxd; then
|
||||||
|
@ -588,8 +634,8 @@ function clean_service_on_systemd() {
|
||||||
${csudo} systemctl stop nginxd &> /dev/null || echo &> /dev/null
|
${csudo} systemctl stop nginxd &> /dev/null || echo &> /dev/null
|
||||||
fi
|
fi
|
||||||
${csudo} systemctl disable nginxd &> /dev/null || echo &> /dev/null
|
${csudo} systemctl disable nginxd &> /dev/null || echo &> /dev/null
|
||||||
${csudo} rm -f ${nginx_service_config}
|
${csudo} rm -f ${nginx_service_config}
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
# taos:2345:respawn:/etc/init.d/taosd start
|
# taos:2345:respawn:/etc/init.d/taosd start
|
||||||
|
@ -621,7 +667,7 @@ function install_service_on_systemd() {
|
||||||
${csudo} bash -c "echo '[Install]' >> ${taosd_service_config}"
|
${csudo} bash -c "echo '[Install]' >> ${taosd_service_config}"
|
||||||
${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${taosd_service_config}"
|
${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${taosd_service_config}"
|
||||||
${csudo} systemctl enable taosd
|
${csudo} systemctl enable taosd
|
||||||
|
|
||||||
tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
|
tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
|
||||||
${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}"
|
${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}"
|
||||||
${csudo} bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}"
|
${csudo} bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}"
|
||||||
|
@ -643,9 +689,9 @@ function install_service_on_systemd() {
|
||||||
${csudo} bash -c "echo >> ${tarbitratord_service_config}"
|
${csudo} bash -c "echo >> ${tarbitratord_service_config}"
|
||||||
${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}"
|
${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}"
|
||||||
${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}"
|
${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}"
|
||||||
#${csudo} systemctl enable tarbitratord
|
#${csudo} systemctl enable tarbitratord
|
||||||
|
|
||||||
if [ "$verMode" == "cluster" ]; then
|
if [ "$verMode" == "cluster" ]; then
|
||||||
nginx_service_config="${service_config_dir}/nginxd.service"
|
nginx_service_config="${service_config_dir}/nginxd.service"
|
||||||
${csudo} bash -c "echo '[Unit]' >> ${nginx_service_config}"
|
${csudo} bash -c "echo '[Unit]' >> ${nginx_service_config}"
|
||||||
${csudo} bash -c "echo 'Description=Nginx For TDengine Service' >> ${nginx_service_config}"
|
${csudo} bash -c "echo 'Description=Nginx For TDengine Service' >> ${nginx_service_config}"
|
||||||
|
@ -674,7 +720,7 @@ function install_service_on_systemd() {
|
||||||
${csudo} systemctl enable nginxd
|
${csudo} systemctl enable nginxd
|
||||||
fi
|
fi
|
||||||
${csudo} systemctl start nginxd
|
${csudo} systemctl start nginxd
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
function install_service() {
|
function install_service() {
|
||||||
|
@ -757,7 +803,7 @@ function update_TDengine() {
|
||||||
fi
|
fi
|
||||||
sleep 1
|
sleep 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$verMode" == "cluster" ]; then
|
if [ "$verMode" == "cluster" ]; then
|
||||||
if pidof nginx &> /dev/null; then
|
if pidof nginx &> /dev/null; then
|
||||||
if ((${service_mod}==0)); then
|
if ((${service_mod}==0)); then
|
||||||
|
@ -770,12 +816,13 @@ function update_TDengine() {
|
||||||
sleep 1
|
sleep 1
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
install_main_path
|
install_main_path
|
||||||
|
|
||||||
install_log
|
install_log
|
||||||
install_header
|
install_header
|
||||||
install_lib
|
install_lib
|
||||||
|
install_jemalloc
|
||||||
if [ "$pagMode" != "lite" ]; then
|
if [ "$pagMode" != "lite" ]; then
|
||||||
install_connector
|
install_connector
|
||||||
fi
|
fi
|
||||||
|
@ -783,10 +830,10 @@ function update_TDengine() {
|
||||||
if [ -z $1 ]; then
|
if [ -z $1 ]; then
|
||||||
install_bin
|
install_bin
|
||||||
install_service
|
install_service
|
||||||
install_config
|
install_config
|
||||||
|
|
||||||
openresty_work=false
|
openresty_work=false
|
||||||
if [ "$verMode" == "cluster" ]; then
|
if [ "$verMode" == "cluster" ]; then
|
||||||
# Check if openresty is installed
|
# Check if openresty is installed
|
||||||
# Check if nginx is installed successfully
|
# Check if nginx is installed successfully
|
||||||
if type curl &> /dev/null; then
|
if type curl &> /dev/null; then
|
||||||
|
@ -797,7 +844,7 @@ function update_TDengine() {
|
||||||
echo -e "\033[44;31;5mNginx for TDengine does not work! Please try again!\033[0m"
|
echo -e "\033[44;31;5mNginx for TDengine does not work! Please try again!\033[0m"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
#echo
|
#echo
|
||||||
#echo -e "\033[44;32;1mTDengine is updated successfully!${NC}"
|
#echo -e "\033[44;32;1mTDengine is updated successfully!${NC}"
|
||||||
|
@ -816,7 +863,7 @@ function update_TDengine() {
|
||||||
else
|
else
|
||||||
echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos -h $serverFqdn${NC} in shell${NC}"
|
echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos -h $serverFqdn${NC} in shell${NC}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo
|
echo
|
||||||
echo -e "\033[44;32;1mTDengine is updated successfully!${NC}"
|
echo -e "\033[44;32;1mTDengine is updated successfully!${NC}"
|
||||||
else
|
else
|
||||||
|
@ -839,14 +886,14 @@ function install_TDengine() {
|
||||||
tar -zxf taos.tar.gz
|
tar -zxf taos.tar.gz
|
||||||
|
|
||||||
echo -e "${GREEN}Start to install TDengine...${NC}"
|
echo -e "${GREEN}Start to install TDengine...${NC}"
|
||||||
|
|
||||||
install_main_path
|
install_main_path
|
||||||
|
|
||||||
if [ -z $1 ]; then
|
if [ -z $1 ]; then
|
||||||
install_data
|
install_data
|
||||||
fi
|
fi
|
||||||
|
|
||||||
install_log
|
install_log
|
||||||
install_header
|
install_header
|
||||||
install_lib
|
install_lib
|
||||||
if [ "$pagMode" != "lite" ]; then
|
if [ "$pagMode" != "lite" ]; then
|
||||||
|
@ -871,8 +918,8 @@ function install_TDengine() {
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
install_config
|
install_config
|
||||||
|
|
||||||
# Ask if to start the service
|
# Ask if to start the service
|
||||||
#echo
|
#echo
|
||||||
|
@ -885,36 +932,36 @@ function install_TDengine() {
|
||||||
echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} service taosd start${NC}"
|
echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} service taosd start${NC}"
|
||||||
else
|
else
|
||||||
echo -e "${GREEN_DARK}To start TDengine ${NC}: taosd${NC}"
|
echo -e "${GREEN_DARK}To start TDengine ${NC}: taosd${NC}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
#if [ ${openresty_work} = 'true' ]; then
|
#if [ ${openresty_work} = 'true' ]; then
|
||||||
# echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}"
|
# echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}"
|
||||||
#else
|
#else
|
||||||
# echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}"
|
# echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}"
|
||||||
#fi
|
#fi
|
||||||
|
|
||||||
if [ ! -z "$firstEp" ]; then
|
if [ ! -z "$firstEp" ]; then
|
||||||
tmpFqdn=${firstEp%%:*}
|
tmpFqdn=${firstEp%%:*}
|
||||||
substr=":"
|
substr=":"
|
||||||
if [[ $firstEp =~ $substr ]];then
|
if [[ $firstEp =~ $substr ]];then
|
||||||
tmpPort=${firstEp#*:}
|
tmpPort=${firstEp#*:}
|
||||||
else
|
else
|
||||||
tmpPort=""
|
tmpPort=""
|
||||||
fi
|
fi
|
||||||
if [[ "$tmpPort" != "" ]];then
|
if [[ "$tmpPort" != "" ]];then
|
||||||
echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}"
|
echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}"
|
||||||
else
|
else
|
||||||
echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}"
|
echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}"
|
||||||
fi
|
fi
|
||||||
echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}"
|
echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}"
|
||||||
echo
|
echo
|
||||||
elif [ ! -z "$serverFqdn" ]; then
|
elif [ ! -z "$serverFqdn" ]; then
|
||||||
echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $serverFqdn${GREEN_DARK} to login into TDengine server${NC}"
|
echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $serverFqdn${GREEN_DARK} to login into TDengine server${NC}"
|
||||||
echo
|
echo
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo -e "\033[44;32;1mTDengine is installed successfully!${NC}"
|
echo -e "\033[44;32;1mTDengine is installed successfully!${NC}"
|
||||||
echo
|
echo
|
||||||
else # Only install client
|
else # Only install client
|
||||||
install_bin
|
install_bin
|
||||||
install_config
|
install_config
|
||||||
|
@ -945,6 +992,6 @@ elif [ "$verType" == "client" ]; then
|
||||||
else
|
else
|
||||||
install_TDengine client
|
install_TDengine client
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
echo "please input correct verType"
|
echo "please input correct verType"
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -204,7 +204,7 @@ function install_jemalloc() {
|
||||||
if [ -f ${binary_dir}/build/lib/libjemalloc_pic.a ]; then
|
if [ -f ${binary_dir}/build/lib/libjemalloc_pic.a ]; then
|
||||||
/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc_pic.a /usr/local/lib
|
/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc_pic.a /usr/local/lib
|
||||||
fi
|
fi
|
||||||
if [ -f ${binary_dir}/build/lib/libjemalloc_pic.a ]; then
|
if [ -f ${binary_dir}/build/lib/pkgconfig/jemalloc.pc ]; then
|
||||||
/usr/bin/install -c -d /usr/local/lib/pkgconfig
|
/usr/bin/install -c -d /usr/local/lib/pkgconfig
|
||||||
/usr/bin/install -c -m 644 ${binary_dir}/build/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
|
/usr/bin/install -c -m 644 ${binary_dir}/build/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -30,12 +30,12 @@ else
|
||||||
install_dir="${release_dir}/TDengine-server-${version}"
|
install_dir="${release_dir}/TDengine-server-${version}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Directories and files.
|
# Directories and files
|
||||||
if [ "$pagMode" == "lite" ]; then
|
if [ "$pagMode" == "lite" ]; then
|
||||||
strip ${build_dir}/bin/taosd
|
strip ${build_dir}/bin/taosd
|
||||||
strip ${build_dir}/bin/taos
|
strip ${build_dir}/bin/taos
|
||||||
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh"
|
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh"
|
||||||
else
|
else
|
||||||
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator\
|
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator\
|
||||||
${script_dir}/remove.sh ${script_dir}/set_core.sh ${script_dir}/startPre.sh ${script_dir}/taosd-dump-cfg.gdb"
|
${script_dir}/remove.sh ${script_dir}/set_core.sh ${script_dir}/startPre.sh ${script_dir}/taosd-dump-cfg.gdb"
|
||||||
fi
|
fi
|
||||||
|
@ -73,10 +73,43 @@ mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/taos
|
||||||
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
|
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
|
||||||
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
|
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
|
||||||
|
|
||||||
|
if [ -f ${build_dir}/bin/jemalloc-config ]; then
|
||||||
|
mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
|
||||||
|
cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin
|
||||||
|
if [ -f ${build_dir}/bin/jemalloc.sh ]; then
|
||||||
|
cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin
|
||||||
|
fi
|
||||||
|
if [ -f ${build_dir}/bin/jeprof ]; then
|
||||||
|
cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin
|
||||||
|
fi
|
||||||
|
if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then
|
||||||
|
cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc
|
||||||
|
fi
|
||||||
|
if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then
|
||||||
|
cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib
|
||||||
|
ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so
|
||||||
|
fi
|
||||||
|
if [ -f ${build_dir}/lib/libjemalloc.a ]; then
|
||||||
|
cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
|
||||||
|
fi
|
||||||
|
if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
|
||||||
|
cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
|
||||||
|
fi
|
||||||
|
if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then
|
||||||
|
cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig
|
||||||
|
fi
|
||||||
|
if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then
|
||||||
|
cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc
|
||||||
|
fi
|
||||||
|
if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then
|
||||||
|
cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
if [ "$verMode" == "cluster" ]; then
|
if [ "$verMode" == "cluster" ]; then
|
||||||
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove.sh >> remove_temp.sh
|
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove.sh >> remove_temp.sh
|
||||||
mv remove_temp.sh ${install_dir}/bin/remove.sh
|
mv remove_temp.sh ${install_dir}/bin/remove.sh
|
||||||
|
|
||||||
mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd
|
mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd
|
||||||
cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png
|
cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png
|
||||||
rm -rf ${install_dir}/nginxd/png
|
rm -rf ${install_dir}/nginxd/png
|
||||||
|
@ -132,7 +165,7 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
||||||
if [ -d ${examples_dir}/JDBC/taosdemo/target ]; then
|
if [ -d ${examples_dir}/JDBC/taosdemo/target ]; then
|
||||||
rm -rf ${examples_dir}/JDBC/taosdemo/target
|
rm -rf ${examples_dir}/JDBC/taosdemo/target
|
||||||
fi
|
fi
|
||||||
|
|
||||||
cp -r ${examples_dir}/JDBC ${install_dir}/examples
|
cp -r ${examples_dir}/JDBC ${install_dir}/examples
|
||||||
cp -r ${examples_dir}/matlab ${install_dir}/examples
|
cp -r ${examples_dir}/matlab ${install_dir}/examples
|
||||||
cp -r ${examples_dir}/python ${install_dir}/examples
|
cp -r ${examples_dir}/python ${install_dir}/examples
|
||||||
|
@ -142,7 +175,7 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
||||||
cp -r ${examples_dir}/C# ${install_dir}/examples
|
cp -r ${examples_dir}/C# ${install_dir}/examples
|
||||||
fi
|
fi
|
||||||
# Copy driver
|
# Copy driver
|
||||||
mkdir -p ${install_dir}/driver
|
mkdir -p ${install_dir}/driver
|
||||||
cp ${lib_files} ${install_dir}/driver
|
cp ${lib_files} ${install_dir}/driver
|
||||||
|
|
||||||
# Copy connector
|
# Copy connector
|
||||||
|
@ -168,7 +201,7 @@ fi
|
||||||
|
|
||||||
# exit 1
|
# exit 1
|
||||||
|
|
||||||
cd ${release_dir}
|
cd ${release_dir}
|
||||||
|
|
||||||
if [ "$verMode" == "cluster" ]; then
|
if [ "$verMode" == "cluster" ]; then
|
||||||
pkg_name=${install_dir}-${osType}-${cpuType}
|
pkg_name=${install_dir}-${osType}-${cpuType}
|
||||||
|
@ -185,8 +218,8 @@ fi
|
||||||
|
|
||||||
if [ "$verType" == "beta" ]; then
|
if [ "$verType" == "beta" ]; then
|
||||||
pkg_name=${pkg_name}-${verType}
|
pkg_name=${pkg_name}-${verType}
|
||||||
elif [ "$verType" == "stable" ]; then
|
elif [ "$verType" == "stable" ]; then
|
||||||
pkg_name=${pkg_name}
|
pkg_name=${pkg_name}
|
||||||
else
|
else
|
||||||
echo "unknow verType, nor stabel or beta"
|
echo "unknow verType, nor stabel or beta"
|
||||||
exit 1
|
exit 1
|
||||||
|
|
|
@ -283,6 +283,7 @@ typedef struct SSqlStream {
|
||||||
int64_t ctime; // stream created time
|
int64_t ctime; // stream created time
|
||||||
int64_t stime; // stream next executed time
|
int64_t stime; // stream next executed time
|
||||||
int64_t etime; // stream end query time, when time is larger then etime, the stream will be closed
|
int64_t etime; // stream end query time, when time is larger then etime, the stream will be closed
|
||||||
|
int64_t ltime; // stream last row time in stream table
|
||||||
SInterval interval;
|
SInterval interval;
|
||||||
void * pTimer;
|
void * pTimer;
|
||||||
|
|
||||||
|
|
|
@ -774,6 +774,10 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
|
||||||
index = 0;
|
index = 0;
|
||||||
sToken = tStrGetToken(sql, &index, false);
|
sToken = tStrGetToken(sql, &index, false);
|
||||||
|
|
||||||
|
if (sToken.type == TK_ILLEGAL) {
|
||||||
|
return tscSQLSyntaxErrMsg(pCmd->payload, "unrecognized token", sToken.z);
|
||||||
|
}
|
||||||
|
|
||||||
if (sToken.type == TK_RP) {
|
if (sToken.type == TK_RP) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1204,7 +1204,6 @@ static int insertBatchStmtExecute(STscStmt* pStmt) {
|
||||||
return pStmt->pSql->res.code;
|
return pStmt->pSql->res.code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
|
int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
|
||||||
SSqlCmd *pCmd = &pSql->cmd;
|
SSqlCmd *pCmd = &pSql->cmd;
|
||||||
int32_t ret = TSDB_CODE_SUCCESS;
|
int32_t ret = TSDB_CODE_SUCCESS;
|
||||||
|
@ -1234,28 +1233,28 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
|
||||||
pStmt->mtb.tagSet = true;
|
pStmt->mtb.tagSet = true;
|
||||||
|
|
||||||
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
|
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
|
||||||
if (sToken.n > 0 && sToken.type == TK_VALUES) {
|
if (sToken.n > 0 && (sToken.type == TK_VALUES || sToken.type == TK_LP)) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (sToken.n <= 0 || sToken.type != TK_USING) {
|
if (sToken.n <= 0 || sToken.type != TK_USING) {
|
||||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
return tscSQLSyntaxErrMsg(pCmd->payload, "keywords USING is expected", sToken.z);
|
||||||
}
|
}
|
||||||
|
|
||||||
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
|
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
|
||||||
if (sToken.n <= 0 || ((sToken.type != TK_ID) && (sToken.type != TK_STRING))) {
|
if (sToken.n <= 0 || ((sToken.type != TK_ID) && (sToken.type != TK_STRING))) {
|
||||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
return tscSQLSyntaxErrMsg(pCmd->payload, "invalid token", sToken.z);
|
||||||
}
|
}
|
||||||
pStmt->mtb.stbname = sToken;
|
pStmt->mtb.stbname = sToken;
|
||||||
|
|
||||||
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
|
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
|
||||||
if (sToken.n <= 0 || sToken.type != TK_TAGS) {
|
if (sToken.n <= 0 || sToken.type != TK_TAGS) {
|
||||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
return tscSQLSyntaxErrMsg(pCmd->payload, "keyword TAGS expected", sToken.z);
|
||||||
}
|
}
|
||||||
|
|
||||||
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
|
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
|
||||||
if (sToken.n <= 0 || sToken.type != TK_LP) {
|
if (sToken.n <= 0 || sToken.type != TK_LP) {
|
||||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
return tscSQLSyntaxErrMsg(pCmd->payload, ") expected", sToken.z);
|
||||||
}
|
}
|
||||||
|
|
||||||
pStmt->mtb.tags = taosArrayInit(4, sizeof(SStrToken));
|
pStmt->mtb.tags = taosArrayInit(4, sizeof(SStrToken));
|
||||||
|
@ -1298,9 +1297,6 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
int stmtGenInsertStatement(SSqlObj* pSql, STscStmt* pStmt, const char* name, TAOS_BIND* tags) {
|
int stmtGenInsertStatement(SSqlObj* pSql, STscStmt* pStmt, const char* name, TAOS_BIND* tags) {
|
||||||
size_t tagNum = taosArrayGetSize(pStmt->mtb.tags);
|
size_t tagNum = taosArrayGetSize(pStmt->mtb.tags);
|
||||||
size_t size = 1048576;
|
size_t size = 1048576;
|
||||||
|
@ -1386,8 +1382,6 @@ int stmtGenInsertStatement(SSqlObj* pSql, STscStmt* pStmt, const char* name, TAO
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
// interface functions
|
// interface functions
|
||||||
|
|
||||||
|
|
|
@ -396,11 +396,18 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
||||||
const char* msg2 = "name too long";
|
const char* msg2 = "name too long";
|
||||||
|
|
||||||
SCreateDbInfo* pCreateDB = &(pInfo->pMiscInfo->dbOpt);
|
SCreateDbInfo* pCreateDB = &(pInfo->pMiscInfo->dbOpt);
|
||||||
if (tscValidateName(&pCreateDB->dbname) != TSDB_CODE_SUCCESS) {
|
if (pCreateDB->dbname.n >= TSDB_DB_NAME_LEN) {
|
||||||
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||||
|
}
|
||||||
|
|
||||||
|
char buf[TSDB_DB_NAME_LEN] = {0};
|
||||||
|
SStrToken token = taosTokenDup(&pCreateDB->dbname, buf, tListLen(buf));
|
||||||
|
|
||||||
|
if (tscValidateName(&token) != TSDB_CODE_SUCCESS) {
|
||||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t ret = tNameSetDbName(&pTableMetaInfo->name, getAccountId(pSql), &(pCreateDB->dbname));
|
int32_t ret = tNameSetDbName(&pTableMetaInfo->name, getAccountId(pSql), &token);
|
||||||
if (ret != TSDB_CODE_SUCCESS) {
|
if (ret != TSDB_CODE_SUCCESS) {
|
||||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||||
}
|
}
|
||||||
|
@ -7482,6 +7489,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
|
||||||
const char* msg1 = "point interpolation query needs timestamp";
|
const char* msg1 = "point interpolation query needs timestamp";
|
||||||
const char* msg2 = "too many tables in from clause";
|
const char* msg2 = "too many tables in from clause";
|
||||||
const char* msg3 = "start(end) time of query range required or time range too large";
|
const char* msg3 = "start(end) time of query range required or time range too large";
|
||||||
|
const char* msg4 = "interval query not supported, since the result of sub query not include valid timestamp column";
|
||||||
const char* msg9 = "only tag query not compatible with normal column filter";
|
const char* msg9 = "only tag query not compatible with normal column filter";
|
||||||
|
|
||||||
int32_t code = TSDB_CODE_SUCCESS;
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
|
@ -7540,11 +7548,25 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
|
||||||
if (validateIntervalNode(pSql, pQueryInfo, pSqlNode) != TSDB_CODE_SUCCESS) {
|
if (validateIntervalNode(pSql, pQueryInfo, pSqlNode) != TSDB_CODE_SUCCESS) {
|
||||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||||
} else {
|
} else {
|
||||||
if (isTimeWindowQuery(pQueryInfo) &&
|
if (isTimeWindowQuery(pQueryInfo)) {
|
||||||
(validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS)) {
|
// check if the first column of the nest query result is timestamp column
|
||||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
SColumn* pCol = taosArrayGetP(pQueryInfo->colList, 0);
|
||||||
|
if (pCol->info.type != TSDB_DATA_TYPE_TIMESTAMP) {
|
||||||
|
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS) {
|
||||||
|
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// set order by info
|
||||||
|
STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta;
|
||||||
|
if (validateOrderbyNode(pCmd, pQueryInfo, pSqlNode, tscGetTableSchema(pTableMeta)) !=
|
||||||
|
TSDB_CODE_SUCCESS) {
|
||||||
|
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
pQueryInfo->command = TSDB_SQL_SELECT;
|
pQueryInfo->command = TSDB_SQL_SELECT;
|
||||||
|
|
||||||
|
@ -7697,8 +7719,8 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
|
||||||
|
|
||||||
SExprInfo** p = NULL;
|
SExprInfo** p = NULL;
|
||||||
int32_t numOfExpr = 0;
|
int32_t numOfExpr = 0;
|
||||||
|
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||||
code = createProjectionExpr(pQueryInfo, pTableMetaInfo, &p, &numOfExpr);
|
code = createProjectionExpr(pQueryInfo, pTableMetaInfo, &p, &numOfExpr);
|
||||||
|
|
||||||
if (pQueryInfo->exprList1 == NULL) {
|
if (pQueryInfo->exprList1 == NULL) {
|
||||||
pQueryInfo->exprList1 = taosArrayInit(4, POINTER_BYTES);
|
pQueryInfo->exprList1 = taosArrayInit(4, POINTER_BYTES);
|
||||||
}
|
}
|
||||||
|
|
|
@ -477,7 +477,6 @@ int doBuildAndSendMsg(SSqlObj *pSql) {
|
||||||
pCmd->command == TSDB_SQL_INSERT ||
|
pCmd->command == TSDB_SQL_INSERT ||
|
||||||
pCmd->command == TSDB_SQL_CONNECT ||
|
pCmd->command == TSDB_SQL_CONNECT ||
|
||||||
pCmd->command == TSDB_SQL_HB ||
|
pCmd->command == TSDB_SQL_HB ||
|
||||||
// pCmd->command == TSDB_SQL_META ||
|
|
||||||
pCmd->command == TSDB_SQL_STABLEVGROUP) {
|
pCmd->command == TSDB_SQL_STABLEVGROUP) {
|
||||||
pRes->code = tscBuildMsg[pCmd->command](pSql, NULL);
|
pRes->code = tscBuildMsg[pCmd->command](pSql, NULL);
|
||||||
}
|
}
|
||||||
|
@ -2470,8 +2469,8 @@ int32_t getMultiTableMetaFromMnode(SSqlObj *pSql, SArray* pNameList, SArray* pVg
|
||||||
pNew->fp = fp;
|
pNew->fp = fp;
|
||||||
pNew->param = (void *)pSql->self;
|
pNew->param = (void *)pSql->self;
|
||||||
|
|
||||||
tscDebug("0x%"PRIx64" metaRid from %" PRId64 " to %" PRId64 , pSql->self, pSql->metaRid, pNew->self);
|
tscDebug("0x%"PRIx64" metaRid from 0x%" PRIx64 " to 0x%" PRIx64 , pSql->self, pSql->metaRid, pNew->self);
|
||||||
|
|
||||||
pSql->metaRid = pNew->self;
|
pSql->metaRid = pNew->self;
|
||||||
int32_t code = tscBuildAndSendRequest(pNew, NULL);
|
int32_t code = tscBuildAndSendRequest(pNew, NULL);
|
||||||
if (code == TSDB_CODE_SUCCESS) {
|
if (code == TSDB_CODE_SUCCESS) {
|
||||||
|
|
|
@ -627,7 +627,7 @@ static bool hasAdditionalErrorInfo(int32_t code, SSqlCmd *pCmd) {
|
||||||
|
|
||||||
char *z = NULL;
|
char *z = NULL;
|
||||||
if (len > 0) {
|
if (len > 0) {
|
||||||
z = strstr(pCmd->payload, "invalid SQL");
|
z = strstr(pCmd->payload, "invalid operation");
|
||||||
if (z == NULL) {
|
if (z == NULL) {
|
||||||
z = strstr(pCmd->payload, "syntax error");
|
z = strstr(pCmd->payload, "syntax error");
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,6 +24,7 @@
|
||||||
#include "tutil.h"
|
#include "tutil.h"
|
||||||
|
|
||||||
#include "tscProfile.h"
|
#include "tscProfile.h"
|
||||||
|
#include "tscSubquery.h"
|
||||||
|
|
||||||
static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOfRows);
|
static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOfRows);
|
||||||
static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOfRows);
|
static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOfRows);
|
||||||
|
@ -47,8 +48,8 @@ static bool isProjectStream(SQueryInfo* pQueryInfo) {
|
||||||
|
|
||||||
static int64_t tscGetRetryDelayTime(SSqlStream* pStream, int64_t slidingTime, int16_t prec) {
|
static int64_t tscGetRetryDelayTime(SSqlStream* pStream, int64_t slidingTime, int16_t prec) {
|
||||||
float retryRangeFactor = 0.3f;
|
float retryRangeFactor = 0.3f;
|
||||||
int64_t retryDelta = (int64_t)(tsStreamCompRetryDelay * retryRangeFactor);
|
int64_t retryDelta = (int64_t)(tsRetryStreamCompDelay * retryRangeFactor);
|
||||||
retryDelta = ((rand() % retryDelta) + tsStreamCompRetryDelay) * 1000L;
|
retryDelta = ((rand() % retryDelta) + tsRetryStreamCompDelay) * 1000L;
|
||||||
|
|
||||||
if (pStream->interval.intervalUnit != 'n' && pStream->interval.intervalUnit != 'y') {
|
if (pStream->interval.intervalUnit != 'n' && pStream->interval.intervalUnit != 'y') {
|
||||||
// change to ms
|
// change to ms
|
||||||
|
@ -575,6 +576,14 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) {
|
||||||
|
|
||||||
pStream->stime = tscGetStreamStartTimestamp(pSql, pStream, pStream->stime);
|
pStream->stime = tscGetStreamStartTimestamp(pSql, pStream, pStream->stime);
|
||||||
|
|
||||||
|
// set stime with ltime if ltime > stime
|
||||||
|
const char* dstTable = pStream->dstTable? pStream->dstTable: "";
|
||||||
|
tscDebug(" CQ table=%s ltime is %"PRId64, dstTable, pStream->ltime);
|
||||||
|
if(pStream->ltime != INT64_MIN && pStream->ltime > pStream->stime) {
|
||||||
|
tscWarn(" CQ set stream %s stime=%"PRId64" replace with ltime=%"PRId64" if ltime>0 ", dstTable, pStream->stime, pStream->ltime);
|
||||||
|
pStream->stime = pStream->ltime;
|
||||||
|
}
|
||||||
|
|
||||||
int64_t starttime = tscGetLaunchTimestamp(pStream);
|
int64_t starttime = tscGetLaunchTimestamp(pStream);
|
||||||
pCmd->command = TSDB_SQL_SELECT;
|
pCmd->command = TSDB_SQL_SELECT;
|
||||||
|
|
||||||
|
@ -590,7 +599,66 @@ void tscSetStreamDestTable(SSqlStream* pStream, const char* dstTable) {
|
||||||
pStream->dstTable = dstTable;
|
pStream->dstTable = dstTable;
|
||||||
}
|
}
|
||||||
|
|
||||||
TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
|
// fetchFp call back
|
||||||
|
void fetchFpStreamLastRow(void* param ,TAOS_RES* res, int num) {
|
||||||
|
SSqlStream* pStream = (SSqlStream*)param;
|
||||||
|
SSqlObj* pSql = res;
|
||||||
|
|
||||||
|
// get row data set to ltime
|
||||||
|
tscSetSqlOwner(pSql);
|
||||||
|
TAOS_ROW row = doSetResultRowData(pSql);
|
||||||
|
if( row && row[0] ) {
|
||||||
|
pStream->ltime = *((int64_t*)row[0]);
|
||||||
|
const char* dstTable = pStream->dstTable? pStream->dstTable: "";
|
||||||
|
tscDebug(" CQ stream table=%s last row time=%"PRId64" .", dstTable, pStream->ltime);
|
||||||
|
}
|
||||||
|
tscClearSqlOwner(pSql);
|
||||||
|
|
||||||
|
// no condition call
|
||||||
|
tscCreateStream(param, pStream->pSql, TSDB_CODE_SUCCESS);
|
||||||
|
taos_free_result(res);
|
||||||
|
}
|
||||||
|
|
||||||
|
// fp callback
|
||||||
|
void fpStreamLastRow(void* param ,TAOS_RES* res, int code) {
|
||||||
|
// check result successful
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
tscCreateStream(param, res, TSDB_CODE_SUCCESS);
|
||||||
|
taos_free_result(res);
|
||||||
|
return ;
|
||||||
|
}
|
||||||
|
|
||||||
|
// asynchronous fetch last row data
|
||||||
|
taos_fetch_rows_a(res, fetchFpStreamLastRow, param);
|
||||||
|
}
|
||||||
|
|
||||||
|
void cbParseSql(void* param, TAOS_RES* res, int code) {
|
||||||
|
// check result successful
|
||||||
|
SSqlStream* pStream = (SSqlStream*)param;
|
||||||
|
SSqlObj* pSql = pStream->pSql;
|
||||||
|
SSqlCmd* pCmd = &pSql->cmd;
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
pSql->res.code = code;
|
||||||
|
tscDebug("0x%"PRIx64" open stream parse sql failed, sql:%s, reason:%s, code:%s", pSql->self, pSql->sqlstr, pCmd->payload, tstrerror(code));
|
||||||
|
pStream->fp(pStream->param, NULL, NULL);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// check dstTable valid
|
||||||
|
if(pStream->dstTable == NULL || strlen(pStream->dstTable) == 0) {
|
||||||
|
tscDebug(" cbParseSql dstTable is empty.");
|
||||||
|
tscCreateStream(param, res, code);
|
||||||
|
return ;
|
||||||
|
}
|
||||||
|
|
||||||
|
// query stream last row time async
|
||||||
|
char sql[128] = "";
|
||||||
|
sprintf(sql, "select last_row(*) from %s;", pStream->dstTable);
|
||||||
|
taos_query_a(pSql->pTscObj, sql, fpStreamLastRow, param);
|
||||||
|
return ;
|
||||||
|
}
|
||||||
|
|
||||||
|
TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* dstTable, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
|
||||||
int64_t stime, void *param, void (*callback)(void *)) {
|
int64_t stime, void *param, void (*callback)(void *)) {
|
||||||
STscObj *pObj = (STscObj *)taos;
|
STscObj *pObj = (STscObj *)taos;
|
||||||
if (pObj == NULL || pObj->signature != pObj) return NULL;
|
if (pObj == NULL || pObj->signature != pObj) return NULL;
|
||||||
|
@ -613,11 +681,16 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
pStream->stime = stime;
|
pStream->ltime = INT64_MIN;
|
||||||
pStream->fp = fp;
|
pStream->stime = stime;
|
||||||
|
pStream->fp = fp;
|
||||||
pStream->callback = callback;
|
pStream->callback = callback;
|
||||||
pStream->param = param;
|
pStream->param = param;
|
||||||
pStream->pSql = pSql;
|
pStream->pSql = pSql;
|
||||||
|
pSql->pStream = pStream;
|
||||||
|
pSql->param = pStream;
|
||||||
|
pSql->maxRetry = TSDB_MAX_REPLICA;
|
||||||
|
tscSetStreamDestTable(pStream, dstTable);
|
||||||
|
|
||||||
pSql->pStream = pStream;
|
pSql->pStream = pStream;
|
||||||
pSql->param = pStream;
|
pSql->param = pStream;
|
||||||
|
@ -640,10 +713,17 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
|
||||||
|
|
||||||
tscDebugL("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr);
|
tscDebugL("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr);
|
||||||
|
|
||||||
|
pSql->fp = cbParseSql;
|
||||||
|
pSql->fetchFp = cbParseSql;
|
||||||
|
|
||||||
|
registerSqlObj(pSql);
|
||||||
|
|
||||||
int32_t code = tsParseSql(pSql, true);
|
int32_t code = tsParseSql(pSql, true);
|
||||||
if (code == TSDB_CODE_SUCCESS) {
|
if (code == TSDB_CODE_SUCCESS) {
|
||||||
tscCreateStream(pStream, pSql, code);
|
cbParseSql(pStream, pSql, code);
|
||||||
} else if (code != TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
|
} else if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
|
||||||
|
tscDebug(" CQ taso_open_stream IN Process. sql=%s", sqlstr);
|
||||||
|
} else {
|
||||||
tscError("0x%"PRIx64" open stream failed, sql:%s, code:%s", pSql->self, sqlstr, tstrerror(code));
|
tscError("0x%"PRIx64" open stream failed, sql:%s, code:%s", pSql->self, sqlstr, tstrerror(code));
|
||||||
taosReleaseRef(tscObjRef, pSql->self);
|
taosReleaseRef(tscObjRef, pSql->self);
|
||||||
free(pStream);
|
free(pStream);
|
||||||
|
@ -653,6 +733,11 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
|
||||||
return pStream;
|
return pStream;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
|
||||||
|
int64_t stime, void *param, void (*callback)(void *)) {
|
||||||
|
return taos_open_stream_withname(taos, "", sqlstr, fp, stime, param, callback);
|
||||||
|
}
|
||||||
|
|
||||||
void taos_close_stream(TAOS_STREAM *handle) {
|
void taos_close_stream(TAOS_STREAM *handle) {
|
||||||
SSqlStream *pStream = (SSqlStream *)handle;
|
SSqlStream *pStream = (SSqlStream *)handle;
|
||||||
|
|
||||||
|
|
|
@ -1469,6 +1469,8 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR
|
||||||
|
|
||||||
SSqlRes* pRes1 = &pParentSql->pSubs[i]->res;
|
SSqlRes* pRes1 = &pParentSql->pSubs[i]->res;
|
||||||
|
|
||||||
|
pParentSql->res.precision = pRes1->precision;
|
||||||
|
|
||||||
if (pRes1->row > 0 && pRes1->numOfRows > 0) {
|
if (pRes1->row > 0 && pRes1->numOfRows > 0) {
|
||||||
tscDebug("0x%"PRIx64" sub:0x%"PRIx64" index:%d numOfRows:%d total:%"PRId64 " (not retrieve)", pParentSql->self,
|
tscDebug("0x%"PRIx64" sub:0x%"PRIx64" index:%d numOfRows:%d total:%"PRId64 " (not retrieve)", pParentSql->self,
|
||||||
pParentSql->pSubs[i]->self, i, pRes1->numOfRows, pRes1->numOfTotal);
|
pParentSql->pSubs[i]->self, i, pRes1->numOfRows, pRes1->numOfTotal);
|
||||||
|
|
|
@ -780,7 +780,9 @@ SSDataBlock* doGetDataBlock(void* param, bool* newgroup) {
|
||||||
SSqlRes* pRes = &pSql->res;
|
SSqlRes* pRes = &pSql->res;
|
||||||
|
|
||||||
SSDataBlock* pBlock = pInput->block;
|
SSDataBlock* pBlock = pInput->block;
|
||||||
pOperator->pRuntimeEnv->current = pInput->pTableQueryInfo;
|
if (pOperator->pRuntimeEnv != NULL) {
|
||||||
|
pOperator->pRuntimeEnv->current = pInput->pTableQueryInfo;
|
||||||
|
}
|
||||||
|
|
||||||
pBlock->info.rows = pRes->numOfRows;
|
pBlock->info.rows = pRes->numOfRows;
|
||||||
if (pRes->numOfRows != 0) {
|
if (pRes->numOfRows != 0) {
|
||||||
|
@ -804,6 +806,24 @@ SSDataBlock* doGetDataBlock(void* param, bool* newgroup) {
|
||||||
return pBlock;
|
return pBlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void fetchNextBlockIfCompleted(SOperatorInfo* pOperator, bool* newgroup) {
|
||||||
|
SJoinOperatorInfo* pJoinInfo = pOperator->info;
|
||||||
|
|
||||||
|
for (int32_t i = 0; i < pOperator->numOfUpstream; ++i) {
|
||||||
|
SJoinStatus* pStatus = &pJoinInfo->status[i];
|
||||||
|
if (pStatus->pBlock == NULL || pStatus->index >= pStatus->pBlock->info.rows) {
|
||||||
|
pStatus->pBlock = pOperator->upstream[i]->exec(pOperator->upstream[i], newgroup);
|
||||||
|
pStatus->index = 0;
|
||||||
|
|
||||||
|
if (pStatus->pBlock == NULL) {
|
||||||
|
pOperator->status = OP_EXEC_DONE;
|
||||||
|
pJoinInfo->resultInfo.total += pJoinInfo->pRes->info.rows;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
SSDataBlock* doDataBlockJoin(void* param, bool* newgroup) {
|
SSDataBlock* doDataBlockJoin(void* param, bool* newgroup) {
|
||||||
SOperatorInfo *pOperator = (SOperatorInfo*) param;
|
SOperatorInfo *pOperator = (SOperatorInfo*) param;
|
||||||
if (pOperator->status == OP_EXEC_DONE) {
|
if (pOperator->status == OP_EXEC_DONE) {
|
||||||
|
@ -816,19 +836,9 @@ SSDataBlock* doDataBlockJoin(void* param, bool* newgroup) {
|
||||||
pJoinInfo->pRes->info.rows = 0;
|
pJoinInfo->pRes->info.rows = 0;
|
||||||
|
|
||||||
while(1) {
|
while(1) {
|
||||||
for (int32_t i = 0; i < pOperator->numOfUpstream; ++i) {
|
fetchNextBlockIfCompleted(pOperator, newgroup);
|
||||||
SJoinStatus* pStatus = &pJoinInfo->status[i];
|
if (pOperator->status == OP_EXEC_DONE) {
|
||||||
if (pStatus->pBlock == NULL || pStatus->index >= pStatus->pBlock->info.rows) {
|
return pJoinInfo->pRes;
|
||||||
pStatus->pBlock = pOperator->upstream[i]->exec(pOperator->upstream[i], newgroup);
|
|
||||||
pStatus->index = 0;
|
|
||||||
|
|
||||||
if (pStatus->pBlock == NULL) {
|
|
||||||
pOperator->status = OP_EXEC_DONE;
|
|
||||||
|
|
||||||
pJoinInfo->resultInfo.total += pJoinInfo->pRes->info.rows;
|
|
||||||
return pJoinInfo->pRes;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
SJoinStatus* st0 = &pJoinInfo->status[0];
|
SJoinStatus* st0 = &pJoinInfo->status[0];
|
||||||
|
@ -847,8 +857,12 @@ SSDataBlock* doDataBlockJoin(void* param, bool* newgroup) {
|
||||||
|
|
||||||
if (ts[st->index] < ts0[st0->index]) { // less than the first
|
if (ts[st->index] < ts0[st0->index]) { // less than the first
|
||||||
prefixEqual = false;
|
prefixEqual = false;
|
||||||
|
|
||||||
if ((++(st->index)) >= st->pBlock->info.rows) {
|
if ((++(st->index)) >= st->pBlock->info.rows) {
|
||||||
break;
|
fetchNextBlockIfCompleted(pOperator, newgroup);
|
||||||
|
if (pOperator->status == OP_EXEC_DONE) {
|
||||||
|
return pJoinInfo->pRes;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else if (ts[st->index] > ts0[st0->index]) { // greater than the first;
|
} else if (ts[st->index] > ts0[st0->index]) { // greater than the first;
|
||||||
if (prefixEqual == true) {
|
if (prefixEqual == true) {
|
||||||
|
@ -856,12 +870,19 @@ SSDataBlock* doDataBlockJoin(void* param, bool* newgroup) {
|
||||||
for (int32_t j = 0; j < i; ++j) {
|
for (int32_t j = 0; j < i; ++j) {
|
||||||
SJoinStatus* stx = &pJoinInfo->status[j];
|
SJoinStatus* stx = &pJoinInfo->status[j];
|
||||||
if ((++(stx->index)) >= stx->pBlock->info.rows) {
|
if ((++(stx->index)) >= stx->pBlock->info.rows) {
|
||||||
break;
|
|
||||||
|
fetchNextBlockIfCompleted(pOperator, newgroup);
|
||||||
|
if (pOperator->status == OP_EXEC_DONE) {
|
||||||
|
return pJoinInfo->pRes;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if ((++(st0->index)) >= st0->pBlock->info.rows) {
|
if ((++(st0->index)) >= st0->pBlock->info.rows) {
|
||||||
break;
|
fetchNextBlockIfCompleted(pOperator, newgroup);
|
||||||
|
if (pOperator->status == OP_EXEC_DONE) {
|
||||||
|
return pJoinInfo->pRes;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1132,6 +1153,19 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue
|
||||||
memcpy(schema, pSchema, numOfCol1*sizeof(SSchema));
|
memcpy(schema, pSchema, numOfCol1*sizeof(SSchema));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// update the exprinfo
|
||||||
|
int32_t numOfOutput = (int32_t)tscNumOfExprs(px);
|
||||||
|
for(int32_t i = 0; i < numOfOutput; ++i) {
|
||||||
|
SExprInfo* pex = taosArrayGetP(px->exprList, i);
|
||||||
|
int32_t colId = pex->base.colInfo.colId;
|
||||||
|
for(int32_t j = 0; j < pSourceOperator->numOfOutput; ++j) {
|
||||||
|
if (colId == schema[j].colId) {
|
||||||
|
pex->base.colInfo.colIndex = j;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
px->pQInfo = createQInfoFromQueryNode(px, &tableGroupInfo, pSourceOperator, NULL, NULL, MASTER_SCAN);
|
px->pQInfo = createQInfoFromQueryNode(px, &tableGroupInfo, pSourceOperator, NULL, NULL, MASTER_SCAN);
|
||||||
tfree(pColumnInfo);
|
tfree(pColumnInfo);
|
||||||
tfree(schema);
|
tfree(schema);
|
||||||
|
|
|
@ -319,7 +319,7 @@ int tdInitDataCols(SDataCols *pCols, STSchema *pSchema);
|
||||||
SDataCols *tdDupDataCols(SDataCols *pCols, bool keepData);
|
SDataCols *tdDupDataCols(SDataCols *pCols, bool keepData);
|
||||||
SDataCols *tdFreeDataCols(SDataCols *pCols);
|
SDataCols *tdFreeDataCols(SDataCols *pCols);
|
||||||
void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols);
|
void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols);
|
||||||
int tdMergeDataCols(SDataCols *target, SDataCols *src, int rowsToMerge);
|
int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *pOffset);
|
||||||
|
|
||||||
// ----------------- K-V data row structure
|
// ----------------- K-V data row structure
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -39,6 +39,7 @@ extern int8_t tsEnableTelemetryReporting;
|
||||||
extern char tsEmail[];
|
extern char tsEmail[];
|
||||||
extern char tsArbitrator[];
|
extern char tsArbitrator[];
|
||||||
extern int8_t tsArbOnline;
|
extern int8_t tsArbOnline;
|
||||||
|
extern int64_t tsArbOnlineTimestamp;
|
||||||
extern int32_t tsDnodeId;
|
extern int32_t tsDnodeId;
|
||||||
|
|
||||||
// common
|
// common
|
||||||
|
@ -75,7 +76,7 @@ extern int32_t tsMinSlidingTime;
|
||||||
extern int32_t tsMinIntervalTime;
|
extern int32_t tsMinIntervalTime;
|
||||||
extern int32_t tsMaxStreamComputDelay;
|
extern int32_t tsMaxStreamComputDelay;
|
||||||
extern int32_t tsStreamCompStartDelay;
|
extern int32_t tsStreamCompStartDelay;
|
||||||
extern int32_t tsStreamCompRetryDelay;
|
extern int32_t tsRetryStreamCompDelay;
|
||||||
extern float tsStreamComputDelayRatio; // the delayed computing ration of the whole time window
|
extern float tsStreamComputDelayRatio; // the delayed computing ration of the whole time window
|
||||||
extern int32_t tsProjectExecInterval;
|
extern int32_t tsProjectExecInterval;
|
||||||
extern int64_t tsMaxRetentWindow;
|
extern int64_t tsMaxRetentWindow;
|
||||||
|
|
|
@ -441,30 +441,35 @@ void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols)
|
||||||
pCols->numOfRows++;
|
pCols->numOfRows++;
|
||||||
}
|
}
|
||||||
|
|
||||||
int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge) {
|
int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *pOffset) {
|
||||||
ASSERT(rowsToMerge > 0 && rowsToMerge <= source->numOfRows);
|
ASSERT(rowsToMerge > 0 && rowsToMerge <= source->numOfRows);
|
||||||
ASSERT(target->numOfCols == source->numOfCols);
|
ASSERT(target->numOfCols == source->numOfCols);
|
||||||
|
int offset = 0;
|
||||||
|
|
||||||
|
if (pOffset == NULL) {
|
||||||
|
pOffset = &offset;
|
||||||
|
}
|
||||||
|
|
||||||
SDataCols *pTarget = NULL;
|
SDataCols *pTarget = NULL;
|
||||||
|
|
||||||
if (dataColsKeyLast(target) < dataColsKeyFirst(source)) { // No overlap
|
if ((target->numOfRows == 0) || (dataColsKeyLast(target) < dataColsKeyFirst(source))) { // No overlap
|
||||||
ASSERT(target->numOfRows + rowsToMerge <= target->maxPoints);
|
ASSERT(target->numOfRows + rowsToMerge <= target->maxPoints);
|
||||||
for (int i = 0; i < rowsToMerge; i++) {
|
for (int i = 0; i < rowsToMerge; i++) {
|
||||||
for (int j = 0; j < source->numOfCols; j++) {
|
for (int j = 0; j < source->numOfCols; j++) {
|
||||||
if (source->cols[j].len > 0) {
|
if (source->cols[j].len > 0) {
|
||||||
dataColAppendVal(target->cols + j, tdGetColDataOfRow(source->cols + j, i), target->numOfRows,
|
dataColAppendVal(target->cols + j, tdGetColDataOfRow(source->cols + j, i + (*pOffset)), target->numOfRows,
|
||||||
target->maxPoints);
|
target->maxPoints);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
target->numOfRows++;
|
target->numOfRows++;
|
||||||
}
|
}
|
||||||
|
(*pOffset) += rowsToMerge;
|
||||||
} else {
|
} else {
|
||||||
pTarget = tdDupDataCols(target, true);
|
pTarget = tdDupDataCols(target, true);
|
||||||
if (pTarget == NULL) goto _err;
|
if (pTarget == NULL) goto _err;
|
||||||
|
|
||||||
int iter1 = 0;
|
int iter1 = 0;
|
||||||
int iter2 = 0;
|
tdMergeTwoDataCols(target, pTarget, &iter1, pTarget->numOfRows, source, pOffset, source->numOfRows,
|
||||||
tdMergeTwoDataCols(target, pTarget, &iter1, pTarget->numOfRows, source, &iter2, source->numOfRows,
|
|
||||||
pTarget->numOfRows + rowsToMerge);
|
pTarget->numOfRows + rowsToMerge);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -42,11 +42,12 @@ int32_t tsNumOfMnodes = 3;
|
||||||
int8_t tsEnableVnodeBak = 1;
|
int8_t tsEnableVnodeBak = 1;
|
||||||
int8_t tsEnableTelemetryReporting = 1;
|
int8_t tsEnableTelemetryReporting = 1;
|
||||||
int8_t tsArbOnline = 0;
|
int8_t tsArbOnline = 0;
|
||||||
|
int64_t tsArbOnlineTimestamp = TSDB_ARB_DUMMY_TIME;
|
||||||
char tsEmail[TSDB_FQDN_LEN] = {0};
|
char tsEmail[TSDB_FQDN_LEN] = {0};
|
||||||
int32_t tsDnodeId = 0;
|
int32_t tsDnodeId = 0;
|
||||||
|
|
||||||
// common
|
// common
|
||||||
int32_t tsRpcTimer = 1000;
|
int32_t tsRpcTimer = 300;
|
||||||
int32_t tsRpcMaxTime = 600; // seconds;
|
int32_t tsRpcMaxTime = 600; // seconds;
|
||||||
int32_t tsRpcForceTcp = 0; //disable this, means query, show command use udp protocol as default
|
int32_t tsRpcForceTcp = 0; //disable this, means query, show command use udp protocol as default
|
||||||
int32_t tsMaxShellConns = 50000;
|
int32_t tsMaxShellConns = 50000;
|
||||||
|
@ -93,7 +94,7 @@ int32_t tsMaxStreamComputDelay = 20000;
|
||||||
int32_t tsStreamCompStartDelay = 10000;
|
int32_t tsStreamCompStartDelay = 10000;
|
||||||
|
|
||||||
// the stream computing delay time after executing failed, change accordingly
|
// the stream computing delay time after executing failed, change accordingly
|
||||||
int32_t tsStreamCompRetryDelay = 10;
|
int32_t tsRetryStreamCompDelay = 10*1000;
|
||||||
|
|
||||||
// The delayed computing ration. 10% of the whole computing time window by default.
|
// The delayed computing ration. 10% of the whole computing time window by default.
|
||||||
float tsStreamComputDelayRatio = 0.1f;
|
float tsStreamComputDelayRatio = 0.1f;
|
||||||
|
@ -710,7 +711,7 @@ static void doInitGlobalConfig(void) {
|
||||||
taosInitConfigOption(cfg);
|
taosInitConfigOption(cfg);
|
||||||
|
|
||||||
cfg.option = "retryStreamCompDelay";
|
cfg.option = "retryStreamCompDelay";
|
||||||
cfg.ptr = &tsStreamCompRetryDelay;
|
cfg.ptr = &tsRetryStreamCompDelay;
|
||||||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
||||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
|
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
|
||||||
cfg.minValue = 10;
|
cfg.minValue = 10;
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Subproject commit 8ce6d86558afc8c0b50c10f990fd2b4270cf06fc
|
Subproject commit 7a26c432f8b4203e42344ff3290b9b9b01b983d5
|
|
@ -16,13 +16,13 @@
|
||||||
*/
|
*/
|
||||||
package com.taosdata.jdbc;
|
package com.taosdata.jdbc;
|
||||||
|
|
||||||
import com.taosdata.jdbc.utils.TaosInfo;
|
|
||||||
|
|
||||||
import java.nio.ByteBuffer;
|
import java.nio.ByteBuffer;
|
||||||
import java.sql.SQLException;
|
import java.sql.SQLException;
|
||||||
import java.sql.SQLWarning;
|
import java.sql.SQLWarning;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
|
import com.taosdata.jdbc.utils.TaosInfo;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* JNI connector
|
* JNI connector
|
||||||
*/
|
*/
|
||||||
|
@ -276,23 +276,14 @@ public class TSDBJNIConnector {
|
||||||
private native int validateCreateTableSqlImp(long connection, byte[] sqlBytes);
|
private native int validateCreateTableSqlImp(long connection, byte[] sqlBytes);
|
||||||
|
|
||||||
public long prepareStmt(String sql) throws SQLException {
|
public long prepareStmt(String sql) throws SQLException {
|
||||||
Long stmt = 0L;
|
Long stmt = prepareStmtImp(sql.getBytes(), this.taos);
|
||||||
try {
|
if (stmt == TSDBConstants.JNI_TDENGINE_ERROR) {
|
||||||
stmt = prepareStmtImp(sql.getBytes(), this.taos);
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_SQL);
|
||||||
} catch (Exception e) {
|
} else if (stmt == TSDBConstants.JNI_CONNECTION_NULL) {
|
||||||
e.printStackTrace();
|
|
||||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_ENCODING);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (stmt == TSDBConstants.JNI_CONNECTION_NULL) {
|
|
||||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL);
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL);
|
||||||
}
|
} else if (stmt == TSDBConstants.JNI_SQL_NULL) {
|
||||||
|
|
||||||
if (stmt == TSDBConstants.JNI_SQL_NULL) {
|
|
||||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_SQL_NULL);
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_SQL_NULL);
|
||||||
}
|
} else if (stmt == TSDBConstants.JNI_OUT_OF_MEMORY) {
|
||||||
|
|
||||||
if (stmt == TSDBConstants.JNI_OUT_OF_MEMORY) {
|
|
||||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_OUT_OF_MEMORY);
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_OUT_OF_MEMORY);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2,6 +2,10 @@
|
||||||
from .connection import TDengineConnection
|
from .connection import TDengineConnection
|
||||||
from .cursor import TDengineCursor
|
from .cursor import TDengineCursor
|
||||||
|
|
||||||
|
# For some reason, the following is needed for VS Code (through PyLance) to
|
||||||
|
# recognize that "error" is a valid module of the "taos" package.
|
||||||
|
from .error import ProgrammingError
|
||||||
|
|
||||||
# Globals
|
# Globals
|
||||||
threadsafety = 0
|
threadsafety = 0
|
||||||
paramstyle = 'pyformat'
|
paramstyle = 'pyformat'
|
||||||
|
|
|
@ -437,6 +437,10 @@ static void cqProcessCreateTimer(void *param, void *tmrId) {
|
||||||
taosReleaseRef(cqObjRef, (int64_t)param);
|
taosReleaseRef(cqObjRef, (int64_t)param);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// inner implement in tscStream.c
|
||||||
|
TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* desName, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
|
||||||
|
int64_t stime, void *param, void (*callback)(void *));
|
||||||
|
|
||||||
static void cqCreateStream(SCqContext *pContext, SCqObj *pObj) {
|
static void cqCreateStream(SCqContext *pContext, SCqObj *pObj) {
|
||||||
pObj->pContext = pContext;
|
pObj->pContext = pContext;
|
||||||
|
|
||||||
|
@ -449,11 +453,10 @@ static void cqCreateStream(SCqContext *pContext, SCqObj *pObj) {
|
||||||
pObj->tmrId = 0;
|
pObj->tmrId = 0;
|
||||||
|
|
||||||
if (pObj->pStream == NULL) {
|
if (pObj->pStream == NULL) {
|
||||||
pObj->pStream = taos_open_stream(pContext->dbConn, pObj->sqlStr, cqProcessStreamRes, INT64_MIN, (void *)pObj->rid, NULL);
|
pObj->pStream = taos_open_stream_withname(pContext->dbConn, pObj->dstTable, pObj->sqlStr, cqProcessStreamRes, INT64_MIN, (void *)pObj->rid, NULL);
|
||||||
|
|
||||||
// TODO the pObj->pStream may be released if error happens
|
// TODO the pObj->pStream may be released if error happens
|
||||||
if (pObj->pStream) {
|
if (pObj->pStream) {
|
||||||
tscSetStreamDestTable(pObj->pStream, pObj->dstTable);
|
|
||||||
pContext->num++;
|
pContext->num++;
|
||||||
cDebug("vgId:%d, id:%d CQ:%s is opened", pContext->vgId, pObj->tid, pObj->sqlStr);
|
cDebug("vgId:%d, id:%d CQ:%s is opened", pContext->vgId, pObj->tid, pObj->sqlStr);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -375,6 +375,8 @@ do { \
|
||||||
|
|
||||||
#define TSDB_MAX_WAL_SIZE (1024*1024*3)
|
#define TSDB_MAX_WAL_SIZE (1024*1024*3)
|
||||||
|
|
||||||
|
#define TSDB_ARB_DUMMY_TIME 4765104000000 // 2121-01-01 00:00:00.000, :P
|
||||||
|
|
||||||
typedef enum {
|
typedef enum {
|
||||||
TAOS_QTYPE_RPC = 0,
|
TAOS_QTYPE_RPC = 0,
|
||||||
TAOS_QTYPE_FWD = 1,
|
TAOS_QTYPE_FWD = 1,
|
||||||
|
|
|
@ -215,11 +215,11 @@ int32_t* taosGetErrno();
|
||||||
#define TSDB_CODE_VND_IS_FLOWCTRL TAOS_DEF_ERROR_CODE(0, 0x050C) //"Database memory is full for waiting commit")
|
#define TSDB_CODE_VND_IS_FLOWCTRL TAOS_DEF_ERROR_CODE(0, 0x050C) //"Database memory is full for waiting commit")
|
||||||
#define TSDB_CODE_VND_IS_DROPPING TAOS_DEF_ERROR_CODE(0, 0x050D) //"Database is dropping")
|
#define TSDB_CODE_VND_IS_DROPPING TAOS_DEF_ERROR_CODE(0, 0x050D) //"Database is dropping")
|
||||||
#define TSDB_CODE_VND_IS_BALANCING TAOS_DEF_ERROR_CODE(0, 0x050E) //"Database is balancing")
|
#define TSDB_CODE_VND_IS_BALANCING TAOS_DEF_ERROR_CODE(0, 0x050E) //"Database is balancing")
|
||||||
|
#define TSDB_CODE_VND_IS_CLOSING TAOS_DEF_ERROR_CODE(0, 0x0510) //"Database is closing")
|
||||||
#define TSDB_CODE_VND_NOT_SYNCED TAOS_DEF_ERROR_CODE(0, 0x0511) //"Database suspended")
|
#define TSDB_CODE_VND_NOT_SYNCED TAOS_DEF_ERROR_CODE(0, 0x0511) //"Database suspended")
|
||||||
#define TSDB_CODE_VND_NO_WRITE_AUTH TAOS_DEF_ERROR_CODE(0, 0x0512) //"Database write operation denied")
|
#define TSDB_CODE_VND_NO_WRITE_AUTH TAOS_DEF_ERROR_CODE(0, 0x0512) //"Database write operation denied")
|
||||||
#define TSDB_CODE_VND_IS_SYNCING TAOS_DEF_ERROR_CODE(0, 0x0513) //"Database is syncing")
|
#define TSDB_CODE_VND_IS_SYNCING TAOS_DEF_ERROR_CODE(0, 0x0513) //"Database is syncing")
|
||||||
#define TSDB_CODE_VND_INVALID_TSDB_STATE TAOS_DEF_ERROR_CODE(0, 0x0514) //"Invalid tsdb state")
|
#define TSDB_CODE_VND_INVALID_TSDB_STATE TAOS_DEF_ERROR_CODE(0, 0x0514) //"Invalid tsdb state")
|
||||||
#define TSDB_CODE_VND_IS_CLOSING TAOS_DEF_ERROR_CODE(0, 0x0515) //"Database is closing")
|
|
||||||
|
|
||||||
// tsdb
|
// tsdb
|
||||||
#define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600) //"Invalid table ID")
|
#define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600) //"Invalid table ID")
|
||||||
|
|
|
@ -31,6 +31,8 @@ typedef struct {
|
||||||
#define TFS_UNDECIDED_ID -1
|
#define TFS_UNDECIDED_ID -1
|
||||||
#define TFS_PRIMARY_LEVEL 0
|
#define TFS_PRIMARY_LEVEL 0
|
||||||
#define TFS_PRIMARY_ID 0
|
#define TFS_PRIMARY_ID 0
|
||||||
|
#define TFS_MIN_LEVEL 0
|
||||||
|
#define TFS_MAX_LEVEL (TSDB_MAX_TIERS - 1)
|
||||||
|
|
||||||
// FS APIs ====================================
|
// FS APIs ====================================
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
|
|
@ -409,6 +409,9 @@ void tsdbDecCommitRef(int vgId);
|
||||||
int tsdbSyncSend(void *pRepo, SOCKET socketFd);
|
int tsdbSyncSend(void *pRepo, SOCKET socketFd);
|
||||||
int tsdbSyncRecv(void *pRepo, SOCKET socketFd);
|
int tsdbSyncRecv(void *pRepo, SOCKET socketFd);
|
||||||
|
|
||||||
|
// For TSDB Compact
|
||||||
|
int tsdbCompact(STsdbRepo *pRepo);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -941,7 +941,7 @@ static int32_t mnodeRetrieveDnodes(SShowObj *pShow, char *data, int32_t rows, vo
|
||||||
cols++;
|
cols++;
|
||||||
|
|
||||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||||
*(int64_t *)pWrite = 0;
|
*(int64_t *)pWrite = tsArbOnlineTimestamp;
|
||||||
cols++;
|
cols++;
|
||||||
|
|
||||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||||
|
|
|
@ -656,8 +656,6 @@ static int32_t sdbProcessWrite(void *wparam, void *hparam, int32_t qtype, void *
|
||||||
dnodeReportStep("mnode-sdb", stepDesc, 0);
|
dnodeReportStep("mnode-sdb", stepDesc, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (qtype == TAOS_QTYPE_QUERY) return sdbPerformDeleteAction(pHead, pTable);
|
|
||||||
|
|
||||||
pthread_mutex_lock(&tsSdbMgmt.mutex);
|
pthread_mutex_lock(&tsSdbMgmt.mutex);
|
||||||
|
|
||||||
if (pHead->version == 0) {
|
if (pHead->version == 0) {
|
||||||
|
@ -721,13 +719,11 @@ static int32_t sdbProcessWrite(void *wparam, void *hparam, int32_t qtype, void *
|
||||||
if (action == SDB_ACTION_INSERT) {
|
if (action == SDB_ACTION_INSERT) {
|
||||||
return sdbPerformInsertAction(pHead, pTable);
|
return sdbPerformInsertAction(pHead, pTable);
|
||||||
} else if (action == SDB_ACTION_DELETE) {
|
} else if (action == SDB_ACTION_DELETE) {
|
||||||
//if (qtype == TAOS_QTYPE_FWD) {
|
if (qtype == TAOS_QTYPE_FWD) {
|
||||||
// Drop database/stable may take a long time and cause a timeout, so we confirm first then reput it into queue
|
// Drop database/stable may take a long time and cause a timeout, so we confirm first
|
||||||
// sdbWriteFwdToQueue(1, hparam, TAOS_QTYPE_QUERY, unused);
|
syncConfirmForward(tsSdbMgmt.sync, pHead->version, TSDB_CODE_SUCCESS, false);
|
||||||
// return TSDB_CODE_SUCCESS;
|
}
|
||||||
//} else {
|
return sdbPerformDeleteAction(pHead, pTable);
|
||||||
return sdbPerformDeleteAction(pHead, pTable);
|
|
||||||
//}
|
|
||||||
} else if (action == SDB_ACTION_UPDATE) {
|
} else if (action == SDB_ACTION_UPDATE) {
|
||||||
return sdbPerformUpdateAction(pHead, pTable);
|
return sdbPerformUpdateAction(pHead, pTable);
|
||||||
} else {
|
} else {
|
||||||
|
@ -1140,7 +1136,10 @@ static void *sdbWorkerFp(void *pWorker) {
|
||||||
sdbConfirmForward(1, pRow, pRow->code);
|
sdbConfirmForward(1, pRow, pRow->code);
|
||||||
} else {
|
} else {
|
||||||
if (qtype == TAOS_QTYPE_FWD) {
|
if (qtype == TAOS_QTYPE_FWD) {
|
||||||
syncConfirmForward(tsSdbMgmt.sync, pRow->pHead.version, pRow->code, false);
|
int32_t action = pRow->pHead.msgType % 10;
|
||||||
|
if (action != SDB_ACTION_DELETE) {
|
||||||
|
syncConfirmForward(tsSdbMgmt.sync, pRow->pHead.version, pRow->code, false);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
sdbFreeFromQueue(pRow);
|
sdbFreeFromQueue(pRow);
|
||||||
}
|
}
|
||||||
|
|
|
@ -2490,7 +2490,6 @@ static void buildTopBotStruct(STopBotInfo *pTopBotInfo, SQLFunctionCtx *pCtx) {
|
||||||
tmp += POINTER_BYTES * pCtx->param[0].i64;
|
tmp += POINTER_BYTES * pCtx->param[0].i64;
|
||||||
|
|
||||||
size_t size = sizeof(tValuePair) + pCtx->tagInfo.tagsLen;
|
size_t size = sizeof(tValuePair) + pCtx->tagInfo.tagsLen;
|
||||||
// assert(pCtx->param[0].i64 > 0);
|
|
||||||
|
|
||||||
for (int32_t i = 0; i < pCtx->param[0].i64; ++i) {
|
for (int32_t i = 0; i < pCtx->param[0].i64; ++i) {
|
||||||
pTopBotInfo->res[i] = (tValuePair*) tmp;
|
pTopBotInfo->res[i] = (tValuePair*) tmp;
|
||||||
|
@ -2499,7 +2498,6 @@ static void buildTopBotStruct(STopBotInfo *pTopBotInfo, SQLFunctionCtx *pCtx) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
bool topbot_datablock_filter(SQLFunctionCtx *pCtx, const char *minval, const char *maxval) {
|
bool topbot_datablock_filter(SQLFunctionCtx *pCtx, const char *minval, const char *maxval) {
|
||||||
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
|
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
|
||||||
if (pResInfo == NULL) {
|
if (pResInfo == NULL) {
|
||||||
|
@ -2579,13 +2577,14 @@ static void top_function(SQLFunctionCtx *pCtx) {
|
||||||
|
|
||||||
for (int32_t i = 0; i < pCtx->size; ++i) {
|
for (int32_t i = 0; i < pCtx->size; ++i) {
|
||||||
char *data = GET_INPUT_DATA(pCtx, i);
|
char *data = GET_INPUT_DATA(pCtx, i);
|
||||||
TSKEY ts = GET_TS_DATA(pCtx, i);
|
|
||||||
|
|
||||||
if (pCtx->hasNull && isNull(data, pCtx->inputType)) {
|
if (pCtx->hasNull && isNull(data, pCtx->inputType)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
notNullElems++;
|
notNullElems++;
|
||||||
|
|
||||||
|
// NOTE: Set the default timestamp if it is missing [todo refactor]
|
||||||
|
TSKEY ts = (pCtx->ptsList != NULL)? GET_TS_DATA(pCtx, i):0;
|
||||||
do_top_function_add(pRes, (int32_t)pCtx->param[0].i64, data, ts, pCtx->inputType, &pCtx->tagInfo, NULL, 0);
|
do_top_function_add(pRes, (int32_t)pCtx->param[0].i64, data, ts, pCtx->inputType, &pCtx->tagInfo, NULL, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2658,13 +2657,13 @@ static void bottom_function(SQLFunctionCtx *pCtx) {
|
||||||
|
|
||||||
for (int32_t i = 0; i < pCtx->size; ++i) {
|
for (int32_t i = 0; i < pCtx->size; ++i) {
|
||||||
char *data = GET_INPUT_DATA(pCtx, i);
|
char *data = GET_INPUT_DATA(pCtx, i);
|
||||||
TSKEY ts = GET_TS_DATA(pCtx, i);
|
|
||||||
|
|
||||||
if (pCtx->hasNull && isNull(data, pCtx->inputType)) {
|
if (pCtx->hasNull && isNull(data, pCtx->inputType)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
notNullElems++;
|
notNullElems++;
|
||||||
|
// NOTE: Set the default timestamp if it is missing [todo refactor]
|
||||||
|
TSKEY ts = (pCtx->ptsList != NULL)? GET_TS_DATA(pCtx, i):0;
|
||||||
do_bottom_function_add(pRes, (int32_t)pCtx->param[0].i64, data, ts, pCtx->inputType, &pCtx->tagInfo, NULL, 0);
|
do_bottom_function_add(pRes, (int32_t)pCtx->param[0].i64, data, ts, pCtx->inputType, &pCtx->tagInfo, NULL, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2742,7 +2741,7 @@ static void top_bottom_func_finalizer(SQLFunctionCtx *pCtx) {
|
||||||
if (pCtx->param[1].i64 == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
|
if (pCtx->param[1].i64 == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
|
||||||
__compar_fn_t comparator = (pCtx->param[2].i64 == TSDB_ORDER_ASC) ? resAscComparFn : resDescComparFn;
|
__compar_fn_t comparator = (pCtx->param[2].i64 == TSDB_ORDER_ASC) ? resAscComparFn : resDescComparFn;
|
||||||
qsort(tvp, (size_t)pResInfo->numOfRes, POINTER_BYTES, comparator);
|
qsort(tvp, (size_t)pResInfo->numOfRes, POINTER_BYTES, comparator);
|
||||||
} else if (pCtx->param[1].i64 > PRIMARYKEY_TIMESTAMP_COL_INDEX) {
|
} else /*if (pCtx->param[1].i64 > PRIMARYKEY_TIMESTAMP_COL_INDEX)*/ {
|
||||||
__compar_fn_t comparator = (pCtx->param[2].i64 == TSDB_ORDER_ASC) ? resDataAscComparFn : resDataDescComparFn;
|
__compar_fn_t comparator = (pCtx->param[2].i64 == TSDB_ORDER_ASC) ? resDataAscComparFn : resDataDescComparFn;
|
||||||
qsort(tvp, (size_t)pResInfo->numOfRes, POINTER_BYTES, comparator);
|
qsort(tvp, (size_t)pResInfo->numOfRes, POINTER_BYTES, comparator);
|
||||||
}
|
}
|
||||||
|
|
|
@ -950,7 +950,13 @@ static void doSetInputDataBlock(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx,
|
||||||
uint32_t status = aAggs[pCtx[i].functionId].status;
|
uint32_t status = aAggs[pCtx[i].functionId].status;
|
||||||
if ((status & (TSDB_FUNCSTATE_SELECTIVITY | TSDB_FUNCSTATE_NEED_TS)) != 0) {
|
if ((status & (TSDB_FUNCSTATE_SELECTIVITY | TSDB_FUNCSTATE_NEED_TS)) != 0) {
|
||||||
SColumnInfoData* tsInfo = taosArrayGet(pBlock->pDataBlock, 0);
|
SColumnInfoData* tsInfo = taosArrayGet(pBlock->pDataBlock, 0);
|
||||||
pCtx[i].ptsList = (int64_t*) tsInfo->pData;
|
// In case of the top/bottom query again the nest query result, which has no timestamp column
|
||||||
|
// don't set the ptsList attribute.
|
||||||
|
if (tsInfo->info.type == TSDB_DATA_TYPE_TIMESTAMP) {
|
||||||
|
pCtx[i].ptsList = (int64_t*) tsInfo->pData;
|
||||||
|
} else {
|
||||||
|
pCtx[i].ptsList = NULL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else if (TSDB_COL_IS_UD_COL(pCol->flag) && (pOperator->pRuntimeEnv->scanFlag == MERGE_STAGE)) {
|
} else if (TSDB_COL_IS_UD_COL(pCol->flag) && (pOperator->pRuntimeEnv->scanFlag == MERGE_STAGE)) {
|
||||||
SColIndex* pColIndex = &pOperator->pExpr[i].base.colInfo;
|
SColIndex* pColIndex = &pOperator->pExpr[i].base.colInfo;
|
||||||
|
@ -4228,6 +4234,10 @@ static void updateTableIdInfo(STableQueryInfo* pTableQueryInfo, SSDataBlock* pBl
|
||||||
int32_t step = GET_FORWARD_DIRECTION_FACTOR(order);
|
int32_t step = GET_FORWARD_DIRECTION_FACTOR(order);
|
||||||
pTableQueryInfo->lastKey = ((order == TSDB_ORDER_ASC)? pBlock->info.window.ekey:pBlock->info.window.skey) + step;
|
pTableQueryInfo->lastKey = ((order == TSDB_ORDER_ASC)? pBlock->info.window.ekey:pBlock->info.window.skey) + step;
|
||||||
|
|
||||||
|
if (pTableQueryInfo->pTable == NULL) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
STableIdInfo tidInfo = createTableIdInfo(pTableQueryInfo);
|
STableIdInfo tidInfo = createTableIdInfo(pTableQueryInfo);
|
||||||
STableIdInfo *idinfo = taosHashGet(pTableIdInfo, &tidInfo.tid, sizeof(tidInfo.tid));
|
STableIdInfo *idinfo = taosHashGet(pTableIdInfo, &tidInfo.tid, sizeof(tidInfo.tid));
|
||||||
if (idinfo != NULL) {
|
if (idinfo != NULL) {
|
||||||
|
@ -4905,8 +4915,7 @@ static SSDataBlock* doArithmeticOperation(void* param, bool* newgroup) {
|
||||||
updateOutputBuf(&pArithInfo->binfo, &pArithInfo->bufCapacity, pBlock->info.rows);
|
updateOutputBuf(&pArithInfo->binfo, &pArithInfo->bufCapacity, pBlock->info.rows);
|
||||||
|
|
||||||
arithmeticApplyFunctions(pRuntimeEnv, pInfo->pCtx, pOperator->numOfOutput);
|
arithmeticApplyFunctions(pRuntimeEnv, pInfo->pCtx, pOperator->numOfOutput);
|
||||||
|
if (pTableQueryInfo != NULL) {
|
||||||
if (pTableQueryInfo != NULL) { // TODO refactor
|
|
||||||
updateTableIdInfo(pTableQueryInfo, pBlock, pRuntimeEnv->pTableRetrieveTsMap, order);
|
updateTableIdInfo(pTableQueryInfo, pBlock, pRuntimeEnv->pTableRetrieveTsMap, order);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4949,8 +4958,7 @@ static SSDataBlock* doArithmeticOperation(void* param, bool* newgroup) {
|
||||||
updateOutputBuf(&pArithInfo->binfo, &pArithInfo->bufCapacity, pBlock->info.rows);
|
updateOutputBuf(&pArithInfo->binfo, &pArithInfo->bufCapacity, pBlock->info.rows);
|
||||||
|
|
||||||
arithmeticApplyFunctions(pRuntimeEnv, pInfo->pCtx, pOperator->numOfOutput);
|
arithmeticApplyFunctions(pRuntimeEnv, pInfo->pCtx, pOperator->numOfOutput);
|
||||||
|
if (pTableQueryInfo != NULL) {
|
||||||
if (pTableQueryInfo != NULL) { // TODO refactor
|
|
||||||
updateTableIdInfo(pTableQueryInfo, pBlock, pRuntimeEnv->pTableRetrieveTsMap, order);
|
updateTableIdInfo(pTableQueryInfo, pBlock, pRuntimeEnv->pTableRetrieveTsMap, order);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1150,7 +1150,12 @@ static void syncSetupPeerConnection(SSyncPeer *pPeer) {
|
||||||
pPeer->peerFd = connFd;
|
pPeer->peerFd = connFd;
|
||||||
pPeer->role = TAOS_SYNC_ROLE_UNSYNCED;
|
pPeer->role = TAOS_SYNC_ROLE_UNSYNCED;
|
||||||
pPeer->pConn = syncAllocateTcpConn(tsTcpPool, pPeer->rid, connFd);
|
pPeer->pConn = syncAllocateTcpConn(tsTcpPool, pPeer->rid, connFd);
|
||||||
if (pPeer->isArb) tsArbOnline = 1;
|
if (pPeer->isArb) {
|
||||||
|
tsArbOnline = 1;
|
||||||
|
if (tsArbOnlineTimestamp == TSDB_ARB_DUMMY_TIME) {
|
||||||
|
tsArbOnlineTimestamp = taosGetTimestampMs();
|
||||||
|
}
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
sDebug("%s, failed to setup peer connection to server since %s, try later", pPeer->id, strerror(errno));
|
sDebug("%s, failed to setup peer connection to server since %s, try later", pPeer->id, strerror(errno));
|
||||||
taosCloseSocket(connFd);
|
taosCloseSocket(connFd);
|
||||||
|
|
|
@ -6,6 +6,10 @@ AUX_SOURCE_DIRECTORY(src SRC)
|
||||||
ADD_LIBRARY(tsdb ${SRC})
|
ADD_LIBRARY(tsdb ${SRC})
|
||||||
TARGET_LINK_LIBRARIES(tsdb tfs common tutil)
|
TARGET_LINK_LIBRARIES(tsdb tfs common tutil)
|
||||||
|
|
||||||
|
IF (TD_TSDB_PLUGINS)
|
||||||
|
TARGET_LINK_LIBRARIES(tsdb tsdbPlugins)
|
||||||
|
ENDIF ()
|
||||||
|
|
||||||
IF (TD_LINUX)
|
IF (TD_LINUX)
|
||||||
# Someone has no gtest directory, so comment it
|
# Someone has no gtest directory, so comment it
|
||||||
# ADD_SUBDIRECTORY(tests)
|
# ADD_SUBDIRECTORY(tests)
|
||||||
|
|
|
@ -29,10 +29,17 @@ typedef struct {
|
||||||
int64_t size;
|
int64_t size;
|
||||||
} SKVRecord;
|
} SKVRecord;
|
||||||
|
|
||||||
|
#define TSDB_DEFAULT_BLOCK_ROWS(maxRows) ((maxRows)*4 / 5)
|
||||||
|
|
||||||
void tsdbGetRtnSnap(STsdbRepo *pRepo, SRtn *pRtn);
|
void tsdbGetRtnSnap(STsdbRepo *pRepo, SRtn *pRtn);
|
||||||
int tsdbEncodeKVRecord(void **buf, SKVRecord *pRecord);
|
int tsdbEncodeKVRecord(void **buf, SKVRecord *pRecord);
|
||||||
void *tsdbDecodeKVRecord(void *buf, SKVRecord *pRecord);
|
void *tsdbDecodeKVRecord(void *buf, SKVRecord *pRecord);
|
||||||
void *tsdbCommitData(STsdbRepo *pRepo);
|
void *tsdbCommitData(STsdbRepo *pRepo);
|
||||||
|
int tsdbApplyRtnOnFSet(STsdbRepo *pRepo, SDFileSet *pSet, SRtn *pRtn);
|
||||||
|
int tsdbWriteBlockInfoImpl(SDFile *pHeadf, STable *pTable, SArray *pSupA, SArray *pSubA, void **ppBuf, SBlockIdx *pIdx);
|
||||||
|
int tsdbWriteBlockIdx(SDFile *pHeadf, SArray *pIdxA, void **ppBuf);
|
||||||
|
int tsdbWriteBlockImpl(STsdbRepo *pRepo, STable *pTable, SDFile *pDFile, SDataCols *pDataCols, SBlock *pBlock,
|
||||||
|
bool isLast, bool isSuper, void **ppBuf, void **ppCBuf);
|
||||||
int tsdbApplyRtn(STsdbRepo *pRepo);
|
int tsdbApplyRtn(STsdbRepo *pRepo);
|
||||||
|
|
||||||
static FORCE_INLINE int tsdbGetFidLevel(int fid, SRtn *pRtn) {
|
static FORCE_INLINE int tsdbGetFidLevel(int fid, SRtn *pRtn) {
|
||||||
|
|
|
@ -16,6 +16,8 @@
|
||||||
#ifndef _TD_TSDB_COMMIT_QUEUE_H_
|
#ifndef _TD_TSDB_COMMIT_QUEUE_H_
|
||||||
#define _TD_TSDB_COMMIT_QUEUE_H_
|
#define _TD_TSDB_COMMIT_QUEUE_H_
|
||||||
|
|
||||||
int tsdbScheduleCommit(STsdbRepo *pRepo);
|
typedef enum { COMMIT_REQ, COMPACT_REQ } TSDB_REQ_T;
|
||||||
|
|
||||||
|
int tsdbScheduleCommit(STsdbRepo *pRepo, TSDB_REQ_T req);
|
||||||
|
|
||||||
#endif /* _TD_TSDB_COMMIT_QUEUE_H_ */
|
#endif /* _TD_TSDB_COMMIT_QUEUE_H_ */
|
|
@ -0,0 +1,28 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||||
|
*
|
||||||
|
* This program is free software: you can use, redistribute, and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License, version 3
|
||||||
|
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
#ifndef _TD_TSDB_COMPACT_H_
|
||||||
|
#define _TD_TSDB_COMPACT_H_
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
void *tsdbCompactImpl(STsdbRepo *pRepo);
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif /* _TD_TSDB_COMPACT_H_ */
|
|
@ -64,6 +64,8 @@ extern "C" {
|
||||||
#include "tsdbReadImpl.h"
|
#include "tsdbReadImpl.h"
|
||||||
// Commit
|
// Commit
|
||||||
#include "tsdbCommit.h"
|
#include "tsdbCommit.h"
|
||||||
|
// Compact
|
||||||
|
#include "tsdbCompact.h"
|
||||||
// Commit Queue
|
// Commit Queue
|
||||||
#include "tsdbCommitQueue.h"
|
#include "tsdbCommitQueue.h"
|
||||||
// Main definitions
|
// Main definitions
|
||||||
|
|
|
@ -51,7 +51,7 @@ typedef struct {
|
||||||
#define TSDB_COMMIT_LAST_FILE(ch) TSDB_DFILE_IN_SET(TSDB_COMMIT_WRITE_FSET(ch), TSDB_FILE_LAST)
|
#define TSDB_COMMIT_LAST_FILE(ch) TSDB_DFILE_IN_SET(TSDB_COMMIT_WRITE_FSET(ch), TSDB_FILE_LAST)
|
||||||
#define TSDB_COMMIT_BUF(ch) TSDB_READ_BUF(&((ch)->readh))
|
#define TSDB_COMMIT_BUF(ch) TSDB_READ_BUF(&((ch)->readh))
|
||||||
#define TSDB_COMMIT_COMP_BUF(ch) TSDB_READ_COMP_BUF(&((ch)->readh))
|
#define TSDB_COMMIT_COMP_BUF(ch) TSDB_READ_COMP_BUF(&((ch)->readh))
|
||||||
#define TSDB_COMMIT_DEFAULT_ROWS(ch) (TSDB_COMMIT_REPO(ch)->config.maxRowsPerFileBlock * 4 / 5)
|
#define TSDB_COMMIT_DEFAULT_ROWS(ch) TSDB_DEFAULT_BLOCK_ROWS(TSDB_COMMIT_REPO(ch)->config.maxRowsPerFileBlock)
|
||||||
#define TSDB_COMMIT_TXN_VERSION(ch) FS_TXN_VERSION(REPO_FS(TSDB_COMMIT_REPO(ch)))
|
#define TSDB_COMMIT_TXN_VERSION(ch) FS_TXN_VERSION(REPO_FS(TSDB_COMMIT_REPO(ch)))
|
||||||
|
|
||||||
static int tsdbCommitMeta(STsdbRepo *pRepo);
|
static int tsdbCommitMeta(STsdbRepo *pRepo);
|
||||||
|
@ -72,7 +72,6 @@ static int tsdbCommitToTable(SCommitH *pCommith, int tid);
|
||||||
static int tsdbSetCommitTable(SCommitH *pCommith, STable *pTable);
|
static int tsdbSetCommitTable(SCommitH *pCommith, STable *pTable);
|
||||||
static int tsdbComparKeyBlock(const void *arg1, const void *arg2);
|
static int tsdbComparKeyBlock(const void *arg1, const void *arg2);
|
||||||
static int tsdbWriteBlockInfo(SCommitH *pCommih);
|
static int tsdbWriteBlockInfo(SCommitH *pCommih);
|
||||||
static int tsdbWriteBlockIdx(SCommitH *pCommih);
|
|
||||||
static int tsdbCommitMemData(SCommitH *pCommith, SCommitIter *pIter, TSKEY keyLimit, bool toData);
|
static int tsdbCommitMemData(SCommitH *pCommith, SCommitIter *pIter, TSKEY keyLimit, bool toData);
|
||||||
static int tsdbMergeMemData(SCommitH *pCommith, SCommitIter *pIter, int bidx);
|
static int tsdbMergeMemData(SCommitH *pCommith, SCommitIter *pIter, int bidx);
|
||||||
static int tsdbMoveBlock(SCommitH *pCommith, int bidx);
|
static int tsdbMoveBlock(SCommitH *pCommith, int bidx);
|
||||||
|
@ -86,7 +85,6 @@ static void tsdbCloseCommitFile(SCommitH *pCommith, bool hasError);
|
||||||
static bool tsdbCanAddSubBlock(SCommitH *pCommith, SBlock *pBlock, SMergeInfo *pInfo);
|
static bool tsdbCanAddSubBlock(SCommitH *pCommith, SBlock *pBlock, SMergeInfo *pInfo);
|
||||||
static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIter *pCommitIter, SDataCols *pTarget,
|
static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIter *pCommitIter, SDataCols *pTarget,
|
||||||
TSKEY maxKey, int maxRows, int8_t update);
|
TSKEY maxKey, int maxRows, int8_t update);
|
||||||
static int tsdbApplyRtnOnFSet(STsdbRepo *pRepo, SDFileSet *pSet, SRtn *pRtn);
|
|
||||||
|
|
||||||
void *tsdbCommitData(STsdbRepo *pRepo) {
|
void *tsdbCommitData(STsdbRepo *pRepo) {
|
||||||
if (pRepo->imem == NULL) {
|
if (pRepo->imem == NULL) {
|
||||||
|
@ -117,6 +115,151 @@ _err:
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int tsdbApplyRtnOnFSet(STsdbRepo *pRepo, SDFileSet *pSet, SRtn *pRtn) {
|
||||||
|
SDiskID did;
|
||||||
|
SDFileSet nSet;
|
||||||
|
STsdbFS * pfs = REPO_FS(pRepo);
|
||||||
|
int level;
|
||||||
|
|
||||||
|
ASSERT(pSet->fid >= pRtn->minFid);
|
||||||
|
|
||||||
|
level = tsdbGetFidLevel(pSet->fid, pRtn);
|
||||||
|
|
||||||
|
tfsAllocDisk(level, &(did.level), &(did.id));
|
||||||
|
if (did.level == TFS_UNDECIDED_LEVEL) {
|
||||||
|
terrno = TSDB_CODE_TDB_NO_AVAIL_DISK;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (did.level > TSDB_FSET_LEVEL(pSet)) {
|
||||||
|
// Need to move the FSET to higher level
|
||||||
|
tsdbInitDFileSet(&nSet, did, REPO_ID(pRepo), pSet->fid, FS_TXN_VERSION(pfs));
|
||||||
|
|
||||||
|
if (tsdbCopyDFileSet(pSet, &nSet) < 0) {
|
||||||
|
tsdbError("vgId:%d failed to copy FSET %d from level %d to level %d since %s", REPO_ID(pRepo), pSet->fid,
|
||||||
|
TSDB_FSET_LEVEL(pSet), did.level, tstrerror(terrno));
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tsdbUpdateDFileSet(pfs, &nSet) < 0) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
tsdbInfo("vgId:%d FSET %d is copied from level %d disk id %d to level %d disk id %d", REPO_ID(pRepo), pSet->fid,
|
||||||
|
TSDB_FSET_LEVEL(pSet), TSDB_FSET_ID(pSet), did.level, did.id);
|
||||||
|
} else {
|
||||||
|
// On a correct level
|
||||||
|
if (tsdbUpdateDFileSet(pfs, pSet) < 0) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int tsdbWriteBlockInfoImpl(SDFile *pHeadf, STable *pTable, SArray *pSupA, SArray *pSubA, void **ppBuf,
|
||||||
|
SBlockIdx *pIdx) {
|
||||||
|
size_t nSupBlocks;
|
||||||
|
size_t nSubBlocks;
|
||||||
|
uint32_t tlen;
|
||||||
|
SBlockInfo *pBlkInfo;
|
||||||
|
int64_t offset;
|
||||||
|
SBlock * pBlock;
|
||||||
|
|
||||||
|
memset(pIdx, 0, sizeof(*pIdx));
|
||||||
|
|
||||||
|
nSupBlocks = taosArrayGetSize(pSupA);
|
||||||
|
nSubBlocks = (pSubA == NULL) ? 0 : taosArrayGetSize(pSubA);
|
||||||
|
|
||||||
|
if (nSupBlocks <= 0) {
|
||||||
|
// No data (data all deleted)
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
tlen = (uint32_t)(sizeof(SBlockInfo) + sizeof(SBlock) * (nSupBlocks + nSubBlocks) + sizeof(TSCKSUM));
|
||||||
|
if (tsdbMakeRoom(ppBuf, tlen) < 0) return -1;
|
||||||
|
pBlkInfo = *ppBuf;
|
||||||
|
|
||||||
|
pBlkInfo->delimiter = TSDB_FILE_DELIMITER;
|
||||||
|
pBlkInfo->tid = TABLE_TID(pTable);
|
||||||
|
pBlkInfo->uid = TABLE_UID(pTable);
|
||||||
|
|
||||||
|
memcpy((void *)(pBlkInfo->blocks), taosArrayGet(pSupA, 0), nSupBlocks * sizeof(SBlock));
|
||||||
|
if (nSubBlocks > 0) {
|
||||||
|
memcpy((void *)(pBlkInfo->blocks + nSupBlocks), taosArrayGet(pSubA, 0), nSubBlocks * sizeof(SBlock));
|
||||||
|
|
||||||
|
for (int i = 0; i < nSupBlocks; i++) {
|
||||||
|
pBlock = pBlkInfo->blocks + i;
|
||||||
|
|
||||||
|
if (pBlock->numOfSubBlocks > 1) {
|
||||||
|
pBlock->offset += (sizeof(SBlockInfo) + sizeof(SBlock) * nSupBlocks);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
taosCalcChecksumAppend(0, (uint8_t *)pBlkInfo, tlen);
|
||||||
|
|
||||||
|
if (tsdbAppendDFile(pHeadf, (void *)pBlkInfo, tlen, &offset) < 0) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
tsdbUpdateDFileMagic(pHeadf, POINTER_SHIFT(pBlkInfo, tlen - sizeof(TSCKSUM)));
|
||||||
|
|
||||||
|
// Set pIdx
|
||||||
|
pBlock = taosArrayGetLast(pSupA);
|
||||||
|
|
||||||
|
pIdx->tid = TABLE_TID(pTable);
|
||||||
|
pIdx->uid = TABLE_UID(pTable);
|
||||||
|
pIdx->hasLast = pBlock->last ? 1 : 0;
|
||||||
|
pIdx->maxKey = pBlock->keyLast;
|
||||||
|
pIdx->numOfBlocks = (uint32_t)nSupBlocks;
|
||||||
|
pIdx->len = tlen;
|
||||||
|
pIdx->offset = (uint32_t)offset;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int tsdbWriteBlockIdx(SDFile *pHeadf, SArray *pIdxA, void **ppBuf) {
|
||||||
|
SBlockIdx *pBlkIdx;
|
||||||
|
size_t nidx = taosArrayGetSize(pIdxA);
|
||||||
|
int tlen = 0, size;
|
||||||
|
int64_t offset;
|
||||||
|
|
||||||
|
if (nidx <= 0) {
|
||||||
|
// All data are deleted
|
||||||
|
pHeadf->info.offset = 0;
|
||||||
|
pHeadf->info.len = 0;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (size_t i = 0; i < nidx; i++) {
|
||||||
|
pBlkIdx = (SBlockIdx *)taosArrayGet(pIdxA, i);
|
||||||
|
|
||||||
|
size = tsdbEncodeSBlockIdx(NULL, pBlkIdx);
|
||||||
|
if (tsdbMakeRoom(ppBuf, tlen + size) < 0) return -1;
|
||||||
|
|
||||||
|
void *ptr = POINTER_SHIFT(*ppBuf, tlen);
|
||||||
|
tsdbEncodeSBlockIdx(&ptr, pBlkIdx);
|
||||||
|
|
||||||
|
tlen += size;
|
||||||
|
}
|
||||||
|
|
||||||
|
tlen += sizeof(TSCKSUM);
|
||||||
|
if (tsdbMakeRoom(ppBuf, tlen) < 0) return -1;
|
||||||
|
taosCalcChecksumAppend(0, (uint8_t *)(*ppBuf), tlen);
|
||||||
|
|
||||||
|
if (tsdbAppendDFile(pHeadf, *ppBuf, tlen, &offset) < tlen) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
tsdbUpdateDFileMagic(pHeadf, POINTER_SHIFT(*ppBuf, tlen - sizeof(TSCKSUM)));
|
||||||
|
pHeadf->info.offset = (uint32_t)offset;
|
||||||
|
pHeadf->info.len = tlen;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
// =================== Commit Meta Data
|
// =================== Commit Meta Data
|
||||||
static int tsdbCommitMeta(STsdbRepo *pRepo) {
|
static int tsdbCommitMeta(STsdbRepo *pRepo) {
|
||||||
STsdbFS * pfs = REPO_FS(pRepo);
|
STsdbFS * pfs = REPO_FS(pRepo);
|
||||||
|
@ -446,7 +589,8 @@ static int tsdbCommitToFile(SCommitH *pCommith, SDFileSet *pSet, int fid) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tsdbWriteBlockIdx(pCommith) < 0) {
|
if (tsdbWriteBlockIdx(TSDB_COMMIT_HEAD_FILE(pCommith), pCommith->aBlkIdx, (void **)(&(TSDB_COMMIT_BUF(pCommith)))) <
|
||||||
|
0) {
|
||||||
tsdbError("vgId:%d failed to write SBlockIdx part to FSET %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno));
|
tsdbError("vgId:%d failed to write SBlockIdx part to FSET %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno));
|
||||||
tsdbCloseCommitFile(pCommith, true);
|
tsdbCloseCommitFile(pCommith, true);
|
||||||
// revert the file change
|
// revert the file change
|
||||||
|
@ -754,23 +898,21 @@ static int tsdbComparKeyBlock(const void *arg1, const void *arg2) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tsdbWriteBlock(SCommitH *pCommith, SDFile *pDFile, SDataCols *pDataCols, SBlock *pBlock, bool isLast,
|
int tsdbWriteBlockImpl(STsdbRepo *pRepo, STable *pTable, SDFile *pDFile, SDataCols *pDataCols, SBlock *pBlock,
|
||||||
bool isSuper) {
|
bool isLast, bool isSuper, void **ppBuf, void **ppCBuf) {
|
||||||
STsdbRepo * pRepo = TSDB_COMMIT_REPO(pCommith);
|
|
||||||
STsdbCfg * pCfg = REPO_CFG(pRepo);
|
STsdbCfg * pCfg = REPO_CFG(pRepo);
|
||||||
SBlockData *pBlockData;
|
SBlockData *pBlockData;
|
||||||
int64_t offset = 0;
|
int64_t offset = 0;
|
||||||
STable * pTable = TSDB_COMMIT_TABLE(pCommith);
|
|
||||||
int rowsToWrite = pDataCols->numOfRows;
|
int rowsToWrite = pDataCols->numOfRows;
|
||||||
|
|
||||||
ASSERT(rowsToWrite > 0 && rowsToWrite <= pCfg->maxRowsPerFileBlock);
|
ASSERT(rowsToWrite > 0 && rowsToWrite <= pCfg->maxRowsPerFileBlock);
|
||||||
ASSERT((!isLast) || rowsToWrite < pCfg->minRowsPerFileBlock);
|
ASSERT((!isLast) || rowsToWrite < pCfg->minRowsPerFileBlock);
|
||||||
|
|
||||||
// Make buffer space
|
// Make buffer space
|
||||||
if (tsdbMakeRoom((void **)(&TSDB_COMMIT_BUF(pCommith)), TSDB_BLOCK_STATIS_SIZE(pDataCols->numOfCols)) < 0) {
|
if (tsdbMakeRoom(ppBuf, TSDB_BLOCK_STATIS_SIZE(pDataCols->numOfCols)) < 0) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
pBlockData = (SBlockData *)TSDB_COMMIT_BUF(pCommith);
|
pBlockData = (SBlockData *)(*ppBuf);
|
||||||
|
|
||||||
// Get # of cols not all NULL(not including key column)
|
// Get # of cols not all NULL(not including key column)
|
||||||
int nColsNotAllNull = 0;
|
int nColsNotAllNull = 0;
|
||||||
|
@ -816,23 +958,23 @@ static int tsdbWriteBlock(SCommitH *pCommith, SDFile *pDFile, SDataCols *pDataCo
|
||||||
void * tptr;
|
void * tptr;
|
||||||
|
|
||||||
// Make room
|
// Make room
|
||||||
if (tsdbMakeRoom((void **)(&TSDB_COMMIT_BUF(pCommith)), lsize + tlen + COMP_OVERFLOW_BYTES + sizeof(TSCKSUM)) < 0) {
|
if (tsdbMakeRoom(ppBuf, lsize + tlen + COMP_OVERFLOW_BYTES + sizeof(TSCKSUM)) < 0) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
pBlockData = (SBlockData *)TSDB_COMMIT_BUF(pCommith);
|
pBlockData = (SBlockData *)(*ppBuf);
|
||||||
pBlockCol = pBlockData->cols + tcol;
|
pBlockCol = pBlockData->cols + tcol;
|
||||||
tptr = POINTER_SHIFT(pBlockData, lsize);
|
tptr = POINTER_SHIFT(pBlockData, lsize);
|
||||||
|
|
||||||
if (pCfg->compression == TWO_STAGE_COMP &&
|
if (pCfg->compression == TWO_STAGE_COMP &&
|
||||||
tsdbMakeRoom((void **)(&TSDB_COMMIT_COMP_BUF(pCommith)), tlen + COMP_OVERFLOW_BYTES) < 0) {
|
tsdbMakeRoom(ppCBuf, tlen + COMP_OVERFLOW_BYTES) < 0) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compress or just copy
|
// Compress or just copy
|
||||||
if (pCfg->compression) {
|
if (pCfg->compression) {
|
||||||
flen = (*(tDataTypes[pDataCol->type].compFunc))((char *)pDataCol->pData, tlen, rowsToWrite, tptr,
|
flen = (*(tDataTypes[pDataCol->type].compFunc))((char *)pDataCol->pData, tlen, rowsToWrite, tptr,
|
||||||
tlen + COMP_OVERFLOW_BYTES, pCfg->compression,
|
tlen + COMP_OVERFLOW_BYTES, pCfg->compression, *ppCBuf,
|
||||||
TSDB_COMMIT_COMP_BUF(pCommith), tlen + COMP_OVERFLOW_BYTES);
|
tlen + COMP_OVERFLOW_BYTES);
|
||||||
} else {
|
} else {
|
||||||
flen = tlen;
|
flen = tlen;
|
||||||
memcpy(tptr, pDataCol->pData, flen);
|
memcpy(tptr, pDataCol->pData, flen);
|
||||||
|
@ -888,68 +1030,27 @@ static int tsdbWriteBlock(SCommitH *pCommith, SDFile *pDFile, SDataCols *pDataCo
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int tsdbWriteBlock(SCommitH *pCommith, SDFile *pDFile, SDataCols *pDataCols, SBlock *pBlock, bool isLast,
|
||||||
|
bool isSuper) {
|
||||||
|
return tsdbWriteBlockImpl(TSDB_COMMIT_REPO(pCommith), TSDB_COMMIT_TABLE(pCommith), pDFile, pDataCols, pBlock, isLast,
|
||||||
|
isSuper, (void **)(&(TSDB_COMMIT_BUF(pCommith))),
|
||||||
|
(void **)(&(TSDB_COMMIT_COMP_BUF(pCommith))));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
static int tsdbWriteBlockInfo(SCommitH *pCommih) {
|
static int tsdbWriteBlockInfo(SCommitH *pCommih) {
|
||||||
SDFile * pHeadf = TSDB_COMMIT_HEAD_FILE(pCommih);
|
SDFile * pHeadf = TSDB_COMMIT_HEAD_FILE(pCommih);
|
||||||
SBlockIdx blkIdx;
|
SBlockIdx blkIdx;
|
||||||
STable * pTable = TSDB_COMMIT_TABLE(pCommih);
|
STable * pTable = TSDB_COMMIT_TABLE(pCommih);
|
||||||
SBlock * pBlock;
|
|
||||||
size_t nSupBlocks;
|
|
||||||
size_t nSubBlocks;
|
|
||||||
uint32_t tlen;
|
|
||||||
SBlockInfo *pBlkInfo;
|
|
||||||
int64_t offset;
|
|
||||||
|
|
||||||
nSupBlocks = taosArrayGetSize(pCommih->aSupBlk);
|
if (tsdbWriteBlockInfoImpl(pHeadf, pTable, pCommih->aSupBlk, pCommih->aSubBlk, (void **)(&(TSDB_COMMIT_BUF(pCommih))),
|
||||||
nSubBlocks = taosArrayGetSize(pCommih->aSubBlk);
|
&blkIdx) < 0) {
|
||||||
|
|
||||||
if (nSupBlocks <= 0) {
|
|
||||||
// No data (data all deleted)
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
tlen = (uint32_t)(sizeof(SBlockInfo) + sizeof(SBlock) * (nSupBlocks + nSubBlocks) + sizeof(TSCKSUM));
|
|
||||||
|
|
||||||
// Write SBlockInfo part
|
|
||||||
if (tsdbMakeRoom((void **)(&(TSDB_COMMIT_BUF(pCommih))), tlen) < 0) return -1;
|
|
||||||
pBlkInfo = TSDB_COMMIT_BUF(pCommih);
|
|
||||||
|
|
||||||
pBlkInfo->delimiter = TSDB_FILE_DELIMITER;
|
|
||||||
pBlkInfo->tid = TABLE_TID(pTable);
|
|
||||||
pBlkInfo->uid = TABLE_UID(pTable);
|
|
||||||
|
|
||||||
memcpy((void *)(pBlkInfo->blocks), taosArrayGet(pCommih->aSupBlk, 0), nSupBlocks * sizeof(SBlock));
|
|
||||||
if (nSubBlocks > 0) {
|
|
||||||
memcpy((void *)(pBlkInfo->blocks + nSupBlocks), taosArrayGet(pCommih->aSubBlk, 0), nSubBlocks * sizeof(SBlock));
|
|
||||||
|
|
||||||
for (int i = 0; i < nSupBlocks; i++) {
|
|
||||||
pBlock = pBlkInfo->blocks + i;
|
|
||||||
|
|
||||||
if (pBlock->numOfSubBlocks > 1) {
|
|
||||||
pBlock->offset += (sizeof(SBlockInfo) + sizeof(SBlock) * nSupBlocks);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
taosCalcChecksumAppend(0, (uint8_t *)pBlkInfo, tlen);
|
|
||||||
|
|
||||||
if (tsdbAppendDFile(pHeadf, TSDB_COMMIT_BUF(pCommih), tlen, &offset) < 0) {
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
tsdbUpdateDFileMagic(pHeadf, POINTER_SHIFT(pBlkInfo, tlen - sizeof(TSCKSUM)));
|
if (blkIdx.numOfBlocks == 0) {
|
||||||
|
return 0;
|
||||||
// Set blkIdx
|
}
|
||||||
pBlock = taosArrayGet(pCommih->aSupBlk, nSupBlocks - 1);
|
|
||||||
|
|
||||||
blkIdx.tid = TABLE_TID(pTable);
|
|
||||||
blkIdx.uid = TABLE_UID(pTable);
|
|
||||||
blkIdx.hasLast = pBlock->last ? 1 : 0;
|
|
||||||
blkIdx.maxKey = pBlock->keyLast;
|
|
||||||
blkIdx.numOfBlocks = (uint32_t)nSupBlocks;
|
|
||||||
blkIdx.len = tlen;
|
|
||||||
blkIdx.offset = (uint32_t)offset;
|
|
||||||
|
|
||||||
ASSERT(blkIdx.numOfBlocks > 0);
|
|
||||||
|
|
||||||
if (taosArrayPush(pCommih->aBlkIdx, (void *)(&blkIdx)) == NULL) {
|
if (taosArrayPush(pCommih->aBlkIdx, (void *)(&blkIdx)) == NULL) {
|
||||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||||
|
@ -959,49 +1060,6 @@ static int tsdbWriteBlockInfo(SCommitH *pCommih) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tsdbWriteBlockIdx(SCommitH *pCommih) {
|
|
||||||
SBlockIdx *pBlkIdx = NULL;
|
|
||||||
SDFile * pHeadf = TSDB_COMMIT_HEAD_FILE(pCommih);
|
|
||||||
size_t nidx = taosArrayGetSize(pCommih->aBlkIdx);
|
|
||||||
int tlen = 0, size = 0;
|
|
||||||
int64_t offset = 0;
|
|
||||||
|
|
||||||
if (nidx <= 0) {
|
|
||||||
// All data are deleted
|
|
||||||
pHeadf->info.offset = 0;
|
|
||||||
pHeadf->info.len = 0;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (size_t i = 0; i < nidx; i++) {
|
|
||||||
pBlkIdx = (SBlockIdx *)taosArrayGet(pCommih->aBlkIdx, i);
|
|
||||||
|
|
||||||
size = tsdbEncodeSBlockIdx(NULL, pBlkIdx);
|
|
||||||
if (tsdbMakeRoom((void **)(&TSDB_COMMIT_BUF(pCommih)), tlen + size) < 0) return -1;
|
|
||||||
|
|
||||||
void *ptr = POINTER_SHIFT(TSDB_COMMIT_BUF(pCommih), tlen);
|
|
||||||
tsdbEncodeSBlockIdx(&ptr, pBlkIdx);
|
|
||||||
|
|
||||||
tlen += size;
|
|
||||||
}
|
|
||||||
|
|
||||||
tlen += sizeof(TSCKSUM);
|
|
||||||
if (tsdbMakeRoom((void **)(&TSDB_COMMIT_BUF(pCommih)), tlen) < 0) return -1;
|
|
||||||
taosCalcChecksumAppend(0, (uint8_t *)TSDB_COMMIT_BUF(pCommih), tlen);
|
|
||||||
|
|
||||||
if (tsdbAppendDFile(pHeadf, TSDB_COMMIT_BUF(pCommih), tlen, &offset) < tlen) {
|
|
||||||
tsdbError("vgId:%d failed to write block index part to file %s since %s", TSDB_COMMIT_REPO_ID(pCommih),
|
|
||||||
TSDB_FILE_FULL_NAME(pHeadf), tstrerror(terrno));
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
tsdbUpdateDFileMagic(pHeadf, POINTER_SHIFT(TSDB_COMMIT_BUF(pCommih), tlen - sizeof(TSCKSUM)));
|
|
||||||
pHeadf->info.offset = (uint32_t)offset;
|
|
||||||
pHeadf->info.len = tlen;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int tsdbCommitMemData(SCommitH *pCommith, SCommitIter *pIter, TSKEY keyLimit, bool toData) {
|
static int tsdbCommitMemData(SCommitH *pCommith, SCommitIter *pIter, TSKEY keyLimit, bool toData) {
|
||||||
STsdbRepo *pRepo = TSDB_COMMIT_REPO(pCommith);
|
STsdbRepo *pRepo = TSDB_COMMIT_REPO(pCommith);
|
||||||
STsdbCfg * pCfg = REPO_CFG(pRepo);
|
STsdbCfg * pCfg = REPO_CFG(pRepo);
|
||||||
|
@ -1454,45 +1512,3 @@ int tsdbApplyRtn(STsdbRepo *pRepo) {
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tsdbApplyRtnOnFSet(STsdbRepo *pRepo, SDFileSet *pSet, SRtn *pRtn) {
|
|
||||||
SDiskID did;
|
|
||||||
SDFileSet nSet;
|
|
||||||
STsdbFS * pfs = REPO_FS(pRepo);
|
|
||||||
int level;
|
|
||||||
|
|
||||||
ASSERT(pSet->fid >= pRtn->minFid);
|
|
||||||
|
|
||||||
level = tsdbGetFidLevel(pSet->fid, pRtn);
|
|
||||||
|
|
||||||
tfsAllocDisk(level, &(did.level), &(did.id));
|
|
||||||
if (did.level == TFS_UNDECIDED_LEVEL) {
|
|
||||||
terrno = TSDB_CODE_TDB_NO_AVAIL_DISK;
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (did.level > TSDB_FSET_LEVEL(pSet)) {
|
|
||||||
// Need to move the FSET to higher level
|
|
||||||
tsdbInitDFileSet(&nSet, did, REPO_ID(pRepo), pSet->fid, FS_TXN_VERSION(pfs));
|
|
||||||
|
|
||||||
if (tsdbCopyDFileSet(pSet, &nSet) < 0) {
|
|
||||||
tsdbError("vgId:%d failed to copy FSET %d from level %d to level %d since %s", REPO_ID(pRepo), pSet->fid,
|
|
||||||
TSDB_FSET_LEVEL(pSet), did.level, tstrerror(terrno));
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (tsdbUpdateDFileSet(pfs, &nSet) < 0) {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
tsdbInfo("vgId:%d FSET %d is copied from level %d disk id %d to level %d disk id %d", REPO_ID(pRepo), pSet->fid,
|
|
||||||
TSDB_FSET_LEVEL(pSet), TSDB_FSET_ID(pSet), did.level, did.id);
|
|
||||||
} else {
|
|
||||||
// On a correct level
|
|
||||||
if (tsdbUpdateDFileSet(pfs, pSet) < 0) {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
|
@ -26,8 +26,9 @@ typedef struct {
|
||||||
} SCommitQueue;
|
} SCommitQueue;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
TSDB_REQ_T req;
|
||||||
STsdbRepo *pRepo;
|
STsdbRepo *pRepo;
|
||||||
} SCommitReq;
|
} SReq;
|
||||||
|
|
||||||
static void *tsdbLoopCommit(void *arg);
|
static void *tsdbLoopCommit(void *arg);
|
||||||
|
|
||||||
|
@ -90,16 +91,17 @@ void tsdbDestroyCommitQueue() {
|
||||||
pthread_mutex_destroy(&(pQueue->lock));
|
pthread_mutex_destroy(&(pQueue->lock));
|
||||||
}
|
}
|
||||||
|
|
||||||
int tsdbScheduleCommit(STsdbRepo *pRepo) {
|
int tsdbScheduleCommit(STsdbRepo *pRepo, TSDB_REQ_T req) {
|
||||||
SCommitQueue *pQueue = &tsCommitQueue;
|
SCommitQueue *pQueue = &tsCommitQueue;
|
||||||
|
|
||||||
SListNode *pNode = (SListNode *)calloc(1, sizeof(SListNode) + sizeof(SCommitReq));
|
SListNode *pNode = (SListNode *)calloc(1, sizeof(SListNode) + sizeof(SReq));
|
||||||
if (pNode == NULL) {
|
if (pNode == NULL) {
|
||||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
((SCommitReq *)pNode->data)->pRepo = pRepo;
|
((SReq *)pNode->data)->req = req;
|
||||||
|
((SReq *)pNode->data)->pRepo = pRepo;
|
||||||
|
|
||||||
pthread_mutex_lock(&(pQueue->lock));
|
pthread_mutex_lock(&(pQueue->lock));
|
||||||
|
|
||||||
|
@ -154,6 +156,7 @@ static void *tsdbLoopCommit(void *arg) {
|
||||||
SCommitQueue *pQueue = &tsCommitQueue;
|
SCommitQueue *pQueue = &tsCommitQueue;
|
||||||
SListNode * pNode = NULL;
|
SListNode * pNode = NULL;
|
||||||
STsdbRepo * pRepo = NULL;
|
STsdbRepo * pRepo = NULL;
|
||||||
|
TSDB_REQ_T req;
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
pthread_mutex_lock(&(pQueue->lock));
|
pthread_mutex_lock(&(pQueue->lock));
|
||||||
|
@ -174,14 +177,22 @@ static void *tsdbLoopCommit(void *arg) {
|
||||||
|
|
||||||
pthread_mutex_unlock(&(pQueue->lock));
|
pthread_mutex_unlock(&(pQueue->lock));
|
||||||
|
|
||||||
pRepo = ((SCommitReq *)pNode->data)->pRepo;
|
req = ((SReq *)pNode->data)->req;
|
||||||
|
pRepo = ((SReq *)pNode->data)->pRepo;
|
||||||
|
|
||||||
// check if need to apply new config
|
// check if need to apply new config
|
||||||
if (pRepo->config_changed) {
|
if (pRepo->config_changed) {
|
||||||
tsdbApplyRepoConfig(pRepo);
|
tsdbApplyRepoConfig(pRepo);
|
||||||
}
|
}
|
||||||
|
|
||||||
tsdbCommitData(pRepo);
|
if (req == COMMIT_REQ) {
|
||||||
|
tsdbCommitData(pRepo);
|
||||||
|
} else if (req == COMPACT_REQ) {
|
||||||
|
tsdbCompactImpl(pRepo);
|
||||||
|
} else {
|
||||||
|
ASSERT(0);
|
||||||
|
}
|
||||||
|
|
||||||
listNodeFree(pNode);
|
listNodeFree(pNode);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -11,4 +11,12 @@
|
||||||
*
|
*
|
||||||
* You should have received a copy of the GNU Affero General Public License
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
*/
|
*/
|
||||||
|
#include "tsdb.h"
|
||||||
|
|
||||||
|
#ifndef _TSDB_PLUGINS
|
||||||
|
|
||||||
|
int tsdbCompact(STsdbRepo *pRepo) { return 0; }
|
||||||
|
void *tsdbCompactImpl(STsdbRepo *pRepo) { return NULL; }
|
||||||
|
|
||||||
|
#endif
|
|
@ -288,7 +288,7 @@ int tsdbAsyncCommit(STsdbRepo *pRepo) {
|
||||||
if (tsdbLockRepo(pRepo) < 0) return -1;
|
if (tsdbLockRepo(pRepo) < 0) return -1;
|
||||||
pRepo->imem = pRepo->mem;
|
pRepo->imem = pRepo->mem;
|
||||||
pRepo->mem = NULL;
|
pRepo->mem = NULL;
|
||||||
tsdbScheduleCommit(pRepo);
|
tsdbScheduleCommit(pRepo, COMMIT_REQ);
|
||||||
if (tsdbUnlockRepo(pRepo) < 0) return -1;
|
if (tsdbUnlockRepo(pRepo) < 0) return -1;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -68,7 +68,7 @@ int tsdbCreateTable(STsdbRepo *repo, STableCfg *pCfg) {
|
||||||
TABLE_CHAR_NAME(pMeta->tables[tid]), TABLE_TID(pMeta->tables[tid]), TABLE_UID(pMeta->tables[tid]));
|
TABLE_CHAR_NAME(pMeta->tables[tid]), TABLE_TID(pMeta->tables[tid]), TABLE_UID(pMeta->tables[tid]));
|
||||||
return 0;
|
return 0;
|
||||||
} else {
|
} else {
|
||||||
tsdbError("vgId:%d table %s at tid %d uid %" PRIu64
|
tsdbInfo("vgId:%d table %s at tid %d uid %" PRIu64
|
||||||
" exists, replace it with new table, this can be not reasonable",
|
" exists, replace it with new table, this can be not reasonable",
|
||||||
REPO_ID(pRepo), TABLE_CHAR_NAME(pMeta->tables[tid]), TABLE_TID(pMeta->tables[tid]),
|
REPO_ID(pRepo), TABLE_CHAR_NAME(pMeta->tables[tid]), TABLE_TID(pMeta->tables[tid]),
|
||||||
TABLE_UID(pMeta->tables[tid]));
|
TABLE_UID(pMeta->tables[tid]));
|
||||||
|
@ -1055,10 +1055,7 @@ static int tsdbRemoveTableFromIndex(STsdbMeta *pMeta, STable *pTable) {
|
||||||
STable *pSTable = pTable->pSuper;
|
STable *pSTable = pTable->pSuper;
|
||||||
ASSERT(pSTable != NULL);
|
ASSERT(pSTable != NULL);
|
||||||
|
|
||||||
STSchema *pSchema = tsdbGetTableTagSchema(pTable);
|
char* key = getTagIndexKey(pTable);
|
||||||
STColumn *pCol = schemaColAt(pSchema, DEFAULT_TAG_INDEX_COLUMN);
|
|
||||||
|
|
||||||
char * key = tdGetKVRowValOfCol(pTable->tagVal, pCol->colId);
|
|
||||||
SArray *res = tSkipListGet(pSTable->pIndex, key);
|
SArray *res = tSkipListGet(pSTable->pIndex, key);
|
||||||
|
|
||||||
size_t size = taosArrayGetSize(res);
|
size_t size = taosArrayGetSize(res);
|
||||||
|
|
|
@ -368,40 +368,39 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pC
|
||||||
goto out_of_memory;
|
goto out_of_memory;
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(pCond != NULL && pCond->numOfCols > 0 && pMemRef != NULL);
|
assert(pCond != NULL && pMemRef != NULL);
|
||||||
if (ASCENDING_TRAVERSE(pCond->order)) {
|
if (ASCENDING_TRAVERSE(pCond->order)) {
|
||||||
assert(pQueryHandle->window.skey <= pQueryHandle->window.ekey);
|
assert(pQueryHandle->window.skey <= pQueryHandle->window.ekey);
|
||||||
} else {
|
} else {
|
||||||
assert(pQueryHandle->window.skey >= pQueryHandle->window.ekey);
|
assert(pQueryHandle->window.skey >= pQueryHandle->window.ekey);
|
||||||
}
|
}
|
||||||
|
if (pCond->numOfCols > 0) {
|
||||||
// allocate buffer in order to load data blocks from file
|
// allocate buffer in order to load data blocks from file
|
||||||
pQueryHandle->statis = calloc(pCond->numOfCols, sizeof(SDataStatis));
|
pQueryHandle->statis = calloc(pCond->numOfCols, sizeof(SDataStatis));
|
||||||
if (pQueryHandle->statis == NULL) {
|
if (pQueryHandle->statis == NULL) {
|
||||||
goto out_of_memory;
|
|
||||||
}
|
|
||||||
|
|
||||||
pQueryHandle->pColumns = taosArrayInit(pCond->numOfCols, sizeof(SColumnInfoData)); // todo: use list instead of array?
|
|
||||||
if (pQueryHandle->pColumns == NULL) {
|
|
||||||
goto out_of_memory;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int32_t i = 0; i < pCond->numOfCols; ++i) {
|
|
||||||
SColumnInfoData colInfo = {{0}, 0};
|
|
||||||
|
|
||||||
colInfo.info = pCond->colList[i];
|
|
||||||
colInfo.pData = calloc(1, EXTRA_BYTES + pQueryHandle->outputCapacity * pCond->colList[i].bytes);
|
|
||||||
if (colInfo.pData == NULL) {
|
|
||||||
goto out_of_memory;
|
goto out_of_memory;
|
||||||
}
|
}
|
||||||
taosArrayPush(pQueryHandle->pColumns, &colInfo);
|
|
||||||
pQueryHandle->statis[i].colId = colInfo.info.colId;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (pCond->numOfCols > 0) {
|
pQueryHandle->pColumns =
|
||||||
|
taosArrayInit(pCond->numOfCols, sizeof(SColumnInfoData)); // todo: use list instead of array?
|
||||||
|
if (pQueryHandle->pColumns == NULL) {
|
||||||
|
goto out_of_memory;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int32_t i = 0; i < pCond->numOfCols; ++i) {
|
||||||
|
SColumnInfoData colInfo = {{0}, 0};
|
||||||
|
|
||||||
|
colInfo.info = pCond->colList[i];
|
||||||
|
colInfo.pData = calloc(1, EXTRA_BYTES + pQueryHandle->outputCapacity * pCond->colList[i].bytes);
|
||||||
|
if (colInfo.pData == NULL) {
|
||||||
|
goto out_of_memory;
|
||||||
|
}
|
||||||
|
taosArrayPush(pQueryHandle->pColumns, &colInfo);
|
||||||
|
pQueryHandle->statis[i].colId = colInfo.info.colId;
|
||||||
|
}
|
||||||
|
|
||||||
pQueryHandle->defaultLoadColumn = getDefaultLoadColumns(pQueryHandle, true);
|
pQueryHandle->defaultLoadColumn = getDefaultLoadColumns(pQueryHandle, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
STsdbMeta* pMeta = tsdbGetMeta(tsdb);
|
STsdbMeta* pMeta = tsdbGetMeta(tsdb);
|
||||||
assert(pMeta != NULL);
|
assert(pMeta != NULL);
|
||||||
|
|
||||||
|
|
|
@ -258,7 +258,7 @@ int tsdbLoadBlockData(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo) {
|
||||||
for (int i = 1; i < pBlock->numOfSubBlocks; i++) {
|
for (int i = 1; i < pBlock->numOfSubBlocks; i++) {
|
||||||
iBlock++;
|
iBlock++;
|
||||||
if (tsdbLoadBlockDataImpl(pReadh, iBlock, pReadh->pDCols[1]) < 0) return -1;
|
if (tsdbLoadBlockDataImpl(pReadh, iBlock, pReadh->pDCols[1]) < 0) return -1;
|
||||||
if (tdMergeDataCols(pReadh->pDCols[0], pReadh->pDCols[1], pReadh->pDCols[1]->numOfRows) < 0) return -1;
|
if (tdMergeDataCols(pReadh->pDCols[0], pReadh->pDCols[1], pReadh->pDCols[1]->numOfRows, NULL) < 0) return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(pReadh->pDCols[0]->numOfRows == pBlock->numOfRows);
|
ASSERT(pReadh->pDCols[0]->numOfRows == pBlock->numOfRows);
|
||||||
|
@ -284,7 +284,7 @@ int tsdbLoadBlockDataCols(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo,
|
||||||
for (int i = 1; i < pBlock->numOfSubBlocks; i++) {
|
for (int i = 1; i < pBlock->numOfSubBlocks; i++) {
|
||||||
iBlock++;
|
iBlock++;
|
||||||
if (tsdbLoadBlockDataColsImpl(pReadh, iBlock, pReadh->pDCols[1], colIds, numOfColsIds) < 0) return -1;
|
if (tsdbLoadBlockDataColsImpl(pReadh, iBlock, pReadh->pDCols[1], colIds, numOfColsIds) < 0) return -1;
|
||||||
if (tdMergeDataCols(pReadh->pDCols[0], pReadh->pDCols[1], pReadh->pDCols[1]->numOfRows) < 0) return -1;
|
if (tdMergeDataCols(pReadh->pDCols[0], pReadh->pDCols[1], pReadh->pDCols[1]->numOfRows, NULL) < 0) return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(pReadh->pDCols[0]->numOfRows == pBlock->numOfRows);
|
ASSERT(pReadh->pDCols[0]->numOfRows == pBlock->numOfRows);
|
||||||
|
|
|
@ -183,6 +183,7 @@ void taosCleanupKeywordsTable();
|
||||||
|
|
||||||
SStrToken tscReplaceStrToken(char **str, SStrToken *token, const char* newToken);
|
SStrToken tscReplaceStrToken(char **str, SStrToken *token, const char* newToken);
|
||||||
|
|
||||||
|
SStrToken taosTokenDup(SStrToken* pToken, char* buf, int32_t len);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
|
@ -224,6 +224,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_FULL, "Database memory is fu
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_FLOWCTRL, "Database memory is full for waiting commit")
|
TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_FLOWCTRL, "Database memory is full for waiting commit")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_DROPPING, "Database is dropping")
|
TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_DROPPING, "Database is dropping")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_BALANCING, "Database is balancing")
|
TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_BALANCING, "Database is balancing")
|
||||||
|
TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_CLOSING, "Database is closing")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_VND_NOT_SYNCED, "Database suspended")
|
TAOS_DEFINE_ERROR(TSDB_CODE_VND_NOT_SYNCED, "Database suspended")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_WRITE_AUTH, "Database write operation denied")
|
TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_WRITE_AUTH, "Database write operation denied")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_SYNCING, "Database is syncing")
|
TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_SYNCING, "Database is syncing")
|
||||||
|
|
|
@ -674,3 +674,15 @@ void taosCleanupKeywordsTable() {
|
||||||
taosHashCleanup(m);
|
taosHashCleanup(m);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SStrToken taosTokenDup(SStrToken* pToken, char* buf, int32_t len) {
|
||||||
|
assert(pToken != NULL && buf != NULL);
|
||||||
|
SStrToken token = *pToken;
|
||||||
|
token.z = buf;
|
||||||
|
|
||||||
|
assert(len > token.n);
|
||||||
|
strncpy(token.z, pToken->z, pToken->n);
|
||||||
|
token.z[token.n] = 0;
|
||||||
|
|
||||||
|
return token;
|
||||||
|
}
|
||||||
|
|
|
@ -91,18 +91,18 @@ static void vnodeIncRef(void *ptNode) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void *vnodeAcquire(int32_t vgId) {
|
void *vnodeAcquire(int32_t vgId) {
|
||||||
SVnodeObj **ppVnode = NULL;
|
SVnodeObj *pVnode = NULL;
|
||||||
if (tsVnodesHash != NULL) {
|
if (tsVnodesHash != NULL) {
|
||||||
ppVnode = taosHashGetClone(tsVnodesHash, &vgId, sizeof(int32_t), vnodeIncRef, NULL, sizeof(void *));
|
taosHashGetClone(tsVnodesHash, &vgId, sizeof(int32_t), vnodeIncRef, &pVnode, sizeof(void *));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ppVnode == NULL || *ppVnode == NULL) {
|
if (pVnode == NULL) {
|
||||||
terrno = TSDB_CODE_VND_INVALID_VGROUP_ID;
|
terrno = TSDB_CODE_VND_INVALID_VGROUP_ID;
|
||||||
vDebug("vgId:%d, not exist", vgId);
|
vDebug("vgId:%d, not exist", vgId);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
return *ppVnode;
|
return pVnode;
|
||||||
}
|
}
|
||||||
|
|
||||||
void vnodeRelease(void *vparam) {
|
void vnodeRelease(void *vparam) {
|
||||||
|
|
|
@ -303,6 +303,17 @@ static int32_t vnodeWriteToWQueueImp(SVWriteMsg *pWrite) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t vnodeWriteToWQueue(void *vparam, void *wparam, int32_t qtype, void *rparam) {
|
int32_t vnodeWriteToWQueue(void *vparam, void *wparam, int32_t qtype, void *rparam) {
|
||||||
|
SVnodeObj *pVnode = vparam;
|
||||||
|
if (qtype == TAOS_QTYPE_RPC) {
|
||||||
|
if (!vnodeInReadyStatus(pVnode)) {
|
||||||
|
return TSDB_CODE_APP_NOT_READY; // it may be in deleting or closing state
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pVnode->role != TAOS_SYNC_ROLE_MASTER) {
|
||||||
|
return TSDB_CODE_APP_NOT_READY;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
SVWriteMsg *pWrite = vnodeBuildVWriteMsg(vparam, wparam, qtype, rparam);
|
SVWriteMsg *pWrite = vnodeBuildVWriteMsg(vparam, wparam, qtype, rparam);
|
||||||
if (pWrite == NULL) {
|
if (pWrite == NULL) {
|
||||||
assert(terrno != 0);
|
assert(terrno != 0);
|
||||||
|
|
|
@ -0,0 +1,109 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from util.log import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.dnodes import tdDnodes
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
def init(self, conn, logSql):
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor(), logSql)
|
||||||
|
|
||||||
|
def getBuildPath(self):
|
||||||
|
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
|
||||||
|
if ("community" in selfPath):
|
||||||
|
projPath = selfPath[:selfPath.find("community")]
|
||||||
|
else:
|
||||||
|
projPath = selfPath[:selfPath.find("tests")]
|
||||||
|
|
||||||
|
for root, dirs, files in os.walk(projPath):
|
||||||
|
if ("taosd" in files):
|
||||||
|
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||||
|
if ("packaging" not in rootRealPath):
|
||||||
|
buildPath = root[:len(root) - len("/build/bin")]
|
||||||
|
break
|
||||||
|
return buildPath
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
tdSql.prepare()
|
||||||
|
tdSql.query('show databases')
|
||||||
|
tdSql.checkData(0,15,0)
|
||||||
|
buildPath = self.getBuildPath()
|
||||||
|
if (buildPath == ""):
|
||||||
|
tdLog.exit("taosd not found!")
|
||||||
|
else:
|
||||||
|
tdLog.info("taosd found in %s" % buildPath)
|
||||||
|
binPath = buildPath + "/build/bin/"
|
||||||
|
|
||||||
|
#write 5M rows into db, then restart to force the data move into disk.
|
||||||
|
#create 500 tables
|
||||||
|
os.system("%staosdemo -f tools/taosdemoAllTest/insert_5M_rows.json -y " % binPath)
|
||||||
|
tdDnodes.stop(1)
|
||||||
|
tdDnodes.start(1)
|
||||||
|
tdSql.execute('use db')
|
||||||
|
|
||||||
|
#prepare to query 500 tables last_row()
|
||||||
|
tableName = []
|
||||||
|
for i in range(500):
|
||||||
|
tableName.append(f"stb_{i}")
|
||||||
|
tdSql.execute('use db')
|
||||||
|
lastRow_Off_start = datetime.now()
|
||||||
|
|
||||||
|
slow = 0 #count time where lastRow on is slower
|
||||||
|
for i in range(5):
|
||||||
|
#switch lastRow to off and check
|
||||||
|
tdSql.execute('alter database db cachelast 0')
|
||||||
|
tdSql.query('show databases')
|
||||||
|
tdSql.checkData(0,15,0)
|
||||||
|
|
||||||
|
#run last_row(*) query 500 times
|
||||||
|
for i in range(500):
|
||||||
|
tdSql.execute(f'SELECT LAST_ROW(*) FROM {tableName[i]}')
|
||||||
|
lastRow_Off_end = datetime.now()
|
||||||
|
|
||||||
|
tdLog.debug(f'time used:{lastRow_Off_end-lastRow_Off_start}')
|
||||||
|
|
||||||
|
#switch lastRow to on and check
|
||||||
|
tdSql.execute('alter database db cachelast 1')
|
||||||
|
tdSql.query('show databases')
|
||||||
|
tdSql.checkData(0,15,1)
|
||||||
|
|
||||||
|
#run last_row(*) query 500 times
|
||||||
|
tdSql.execute('use db')
|
||||||
|
lastRow_On_start = datetime.now()
|
||||||
|
for i in range(500):
|
||||||
|
tdSql.execute(f'SELECT LAST_ROW(*) FROM {tableName[i]}')
|
||||||
|
lastRow_On_end = datetime.now()
|
||||||
|
|
||||||
|
tdLog.debug(f'time used:{lastRow_On_end-lastRow_On_start}')
|
||||||
|
|
||||||
|
#check which one used more time
|
||||||
|
if (lastRow_Off_end-lastRow_Off_start > lastRow_On_end-lastRow_On_start):
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
slow += 1
|
||||||
|
tdLog.debug(slow)
|
||||||
|
if slow > 1: #tolerance for the first time
|
||||||
|
tdLog.exit('lastRow hot alter failed')
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success("%s successfully executed" % __file__)
|
||||||
|
|
||||||
|
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -37,6 +37,7 @@ import requests
|
||||||
import gc
|
import gc
|
||||||
import taos
|
import taos
|
||||||
|
|
||||||
|
|
||||||
from .shared.types import TdColumns, TdTags
|
from .shared.types import TdColumns, TdTags
|
||||||
|
|
||||||
# from crash_gen import ServiceManager, TdeInstance, TdeSubProcess
|
# from crash_gen import ServiceManager, TdeInstance, TdeSubProcess
|
||||||
|
@ -160,6 +161,7 @@ class WorkerThread:
|
||||||
Logging.debug("[TRD] Thread Coordinator not running any more, worker thread now stopping...")
|
Logging.debug("[TRD] Thread Coordinator not running any more, worker thread now stopping...")
|
||||||
break
|
break
|
||||||
|
|
||||||
|
|
||||||
# Before we fetch the task and run it, let's ensure we properly "use" the database (not needed any more)
|
# Before we fetch the task and run it, let's ensure we properly "use" the database (not needed any more)
|
||||||
try:
|
try:
|
||||||
if (Config.getConfig().per_thread_db_connection): # most likely TRUE
|
if (Config.getConfig().per_thread_db_connection): # most likely TRUE
|
||||||
|
@ -1362,9 +1364,12 @@ class Task():
|
||||||
Progress.emit(Progress.ACCEPTABLE_ERROR)
|
Progress.emit(Progress.ACCEPTABLE_ERROR)
|
||||||
self._err = err
|
self._err = err
|
||||||
else: # not an acceptable error
|
else: # not an acceptable error
|
||||||
errMsg = "[=] Unexpected Taos library exception ({}): errno=0x{:X}, msg: {}, SQL: {}".format(
|
shortTid = threading.get_ident() % 10000
|
||||||
|
errMsg = "[=] Unexpected Taos library exception ({}): errno=0x{:X}, thread={}, msg: {}, SQL: {}".format(
|
||||||
self.__class__.__name__,
|
self.__class__.__name__,
|
||||||
errno2, err, wt.getDbConn().getLastSql())
|
errno2,
|
||||||
|
shortTid,
|
||||||
|
err, wt.getDbConn().getLastSql())
|
||||||
self.logDebug(errMsg)
|
self.logDebug(errMsg)
|
||||||
if Config.getConfig().debug:
|
if Config.getConfig().debug:
|
||||||
# raise # so that we see full stack
|
# raise # so that we see full stack
|
||||||
|
@ -1411,21 +1416,31 @@ class Task():
|
||||||
|
|
||||||
def lockTable(self, ftName): # full table name
|
def lockTable(self, ftName): # full table name
|
||||||
# print(" <<" + ftName + '_', end="", flush=True)
|
# print(" <<" + ftName + '_', end="", flush=True)
|
||||||
with Task._lock:
|
with Task._lock: # SHORT lock! so we only protect lock creation
|
||||||
if not ftName in Task._tableLocks:
|
if not ftName in Task._tableLocks: # Create new lock and add to list, if needed
|
||||||
Task._tableLocks[ftName] = threading.Lock()
|
Task._tableLocks[ftName] = threading.Lock()
|
||||||
|
|
||||||
Task._tableLocks[ftName].acquire()
|
# No lock protection, anybody can do this any time
|
||||||
|
lock = Task._tableLocks[ftName]
|
||||||
|
# Logging.info("Acquiring lock: {}, {}".format(ftName, lock))
|
||||||
|
lock.acquire()
|
||||||
|
# Logging.info("Acquiring lock successful: {}".format(lock))
|
||||||
|
|
||||||
def unlockTable(self, ftName):
|
def unlockTable(self, ftName):
|
||||||
# print('_' + ftName + ">> ", end="", flush=True)
|
# print('_' + ftName + ">> ", end="", flush=True)
|
||||||
with Task._lock:
|
with Task._lock:
|
||||||
if not ftName in self._tableLocks:
|
if not ftName in self._tableLocks:
|
||||||
raise RuntimeError("Corrupt state, no such lock")
|
raise RuntimeError("Corrupt state, no such lock")
|
||||||
lock = Task._tableLocks[ftName]
|
lock = Task._tableLocks[ftName]
|
||||||
if not lock.locked():
|
if not lock.locked():
|
||||||
raise RuntimeError("Corrupte state, already unlocked")
|
raise RuntimeError("Corrupte state, already unlocked")
|
||||||
lock.release()
|
|
||||||
|
# Important note, we want to protect unlocking under the task level
|
||||||
|
# locking, because we don't want the lock to be deleted (maybe in the futur)
|
||||||
|
# while we unlock it
|
||||||
|
# Logging.info("Releasing lock: {}".format(lock))
|
||||||
|
lock.release()
|
||||||
|
# Logging.info("Releasing lock successful: {}".format(lock))
|
||||||
|
|
||||||
|
|
||||||
class ExecutionStats:
|
class ExecutionStats:
|
||||||
|
@ -1696,6 +1711,11 @@ class TdSuperTable:
|
||||||
return dbc.query("SELECT * FROM {}.{}".format(self._dbName, self._stName)) > 0
|
return dbc.query("SELECT * FROM {}.{}".format(self._dbName, self._stName)) > 0
|
||||||
|
|
||||||
def ensureRegTable(self, task: Optional[Task], dbc: DbConn, regTableName: str):
|
def ensureRegTable(self, task: Optional[Task], dbc: DbConn, regTableName: str):
|
||||||
|
'''
|
||||||
|
Make sure a regular table exists for this super table, creating it if necessary.
|
||||||
|
If there is an associated "Task" that wants to do this, "lock" this table so that
|
||||||
|
others don't access it while we create it.
|
||||||
|
'''
|
||||||
dbName = self._dbName
|
dbName = self._dbName
|
||||||
sql = "select tbname from {}.{} where tbname in ('{}')".format(dbName, self._stName, regTableName)
|
sql = "select tbname from {}.{} where tbname in ('{}')".format(dbName, self._stName, regTableName)
|
||||||
if dbc.query(sql) >= 1 : # reg table exists already
|
if dbc.query(sql) >= 1 : # reg table exists already
|
||||||
|
@ -1703,18 +1723,24 @@ class TdSuperTable:
|
||||||
|
|
||||||
# acquire a lock first, so as to be able to *verify*. More details in TD-1471
|
# acquire a lock first, so as to be able to *verify*. More details in TD-1471
|
||||||
fullTableName = dbName + '.' + regTableName
|
fullTableName = dbName + '.' + regTableName
|
||||||
if task is not None: # TODO: what happens if we don't lock the table
|
if task is not None: # Somethime thie operation is requested on behalf of a "task"
|
||||||
task.lockTable(fullTableName)
|
# Logging.info("Locking table for creation: {}".format(fullTableName))
|
||||||
|
task.lockTable(fullTableName) # in which case we'll lock this table to ensure serialized access
|
||||||
|
# Logging.info("Table locked for creation".format(fullTableName))
|
||||||
Progress.emit(Progress.CREATE_TABLE_ATTEMPT) # ATTEMPT to create a new table
|
Progress.emit(Progress.CREATE_TABLE_ATTEMPT) # ATTEMPT to create a new table
|
||||||
# print("(" + fullTableName[-3:] + ")", end="", flush=True)
|
# print("(" + fullTableName[-3:] + ")", end="", flush=True)
|
||||||
try:
|
try:
|
||||||
sql = "CREATE TABLE {} USING {}.{} tags ({})".format(
|
sql = "CREATE TABLE {} USING {}.{} tags ({})".format(
|
||||||
fullTableName, dbName, self._stName, self._getTagStrForSql(dbc)
|
fullTableName, dbName, self._stName, self._getTagStrForSql(dbc)
|
||||||
)
|
)
|
||||||
|
# Logging.info("Creating regular with SQL: {}".format(sql))
|
||||||
dbc.execute(sql)
|
dbc.execute(sql)
|
||||||
|
# Logging.info("Regular table created: {}".format(sql))
|
||||||
finally:
|
finally:
|
||||||
if task is not None:
|
if task is not None:
|
||||||
|
# Logging.info("Unlocking table after creation: {}".format(fullTableName))
|
||||||
task.unlockTable(fullTableName) # no matter what
|
task.unlockTable(fullTableName) # no matter what
|
||||||
|
# Logging.info("Table unlocked after creation: {}".format(fullTableName))
|
||||||
|
|
||||||
def _getTagStrForSql(self, dbc) :
|
def _getTagStrForSql(self, dbc) :
|
||||||
tags = self._getTags(dbc)
|
tags = self._getTags(dbc)
|
||||||
|
@ -2011,9 +2037,30 @@ class TaskAddData(StateTransitionTask):
|
||||||
def canBeginFrom(cls, state: AnyState):
|
def canBeginFrom(cls, state: AnyState):
|
||||||
return state.canAddData()
|
return state.canAddData()
|
||||||
|
|
||||||
|
def _lockTableIfNeeded(self, fullTableName, extraMsg = ''):
|
||||||
|
if Config.getConfig().verify_data:
|
||||||
|
# Logging.info("Locking table: {}".format(fullTableName))
|
||||||
|
self.lockTable(fullTableName)
|
||||||
|
# Logging.info("Table locked {}: {}".format(extraMsg, fullTableName))
|
||||||
|
# print("_w" + str(nextInt % 100), end="", flush=True) # Trace what was written
|
||||||
|
else:
|
||||||
|
# Logging.info("Skipping locking table")
|
||||||
|
pass
|
||||||
|
|
||||||
|
def _unlockTableIfNeeded(self, fullTableName):
|
||||||
|
if Config.getConfig().verify_data:
|
||||||
|
# Logging.info("Unlocking table: {}".format(fullTableName))
|
||||||
|
self.unlockTable(fullTableName)
|
||||||
|
# Logging.info("Table unlocked: {}".format(fullTableName))
|
||||||
|
else:
|
||||||
|
pass
|
||||||
|
# Logging.info("Skipping unlocking table")
|
||||||
|
|
||||||
def _addDataInBatch(self, db, dbc, regTableName, te: TaskExecutor):
|
def _addDataInBatch(self, db, dbc, regTableName, te: TaskExecutor):
|
||||||
numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS
|
numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS
|
||||||
|
|
||||||
fullTableName = db.getName() + '.' + regTableName
|
fullTableName = db.getName() + '.' + regTableName
|
||||||
|
self._lockTableIfNeeded(fullTableName, 'batch')
|
||||||
|
|
||||||
sql = "INSERT INTO {} VALUES ".format(fullTableName)
|
sql = "INSERT INTO {} VALUES ".format(fullTableName)
|
||||||
for j in range(numRecords): # number of records per table
|
for j in range(numRecords): # number of records per table
|
||||||
|
@ -2021,51 +2068,60 @@ class TaskAddData(StateTransitionTask):
|
||||||
nextTick = db.getNextTick()
|
nextTick = db.getNextTick()
|
||||||
nextColor = db.getNextColor()
|
nextColor = db.getNextColor()
|
||||||
sql += "('{}', {}, '{}');".format(nextTick, nextInt, nextColor)
|
sql += "('{}', {}, '{}');".format(nextTick, nextInt, nextColor)
|
||||||
dbc.execute(sql)
|
|
||||||
|
# Logging.info("Adding data in batch: {}".format(sql))
|
||||||
|
try:
|
||||||
|
dbc.execute(sql)
|
||||||
|
finally:
|
||||||
|
# Logging.info("Data added in batch: {}".format(sql))
|
||||||
|
self._unlockTableIfNeeded(fullTableName)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def _addData(self, db: Database, dbc, regTableName, te: TaskExecutor): # implied: NOT in batches
|
def _addData(self, db: Database, dbc, regTableName, te: TaskExecutor): # implied: NOT in batches
|
||||||
numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS
|
numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS
|
||||||
|
|
||||||
for j in range(numRecords): # number of records per table
|
for j in range(numRecords): # number of records per table
|
||||||
nextInt = db.getNextInt()
|
intToWrite = db.getNextInt()
|
||||||
nextTick = db.getNextTick()
|
nextTick = db.getNextTick()
|
||||||
nextColor = db.getNextColor()
|
nextColor = db.getNextColor()
|
||||||
if Config.getConfig().record_ops:
|
if Config.getConfig().record_ops:
|
||||||
self.prepToRecordOps()
|
self.prepToRecordOps()
|
||||||
if self.fAddLogReady is None:
|
if self.fAddLogReady is None:
|
||||||
raise CrashGenError("Unexpected empty fAddLogReady")
|
raise CrashGenError("Unexpected empty fAddLogReady")
|
||||||
self.fAddLogReady.write("Ready to write {} to {}\n".format(nextInt, regTableName))
|
self.fAddLogReady.write("Ready to write {} to {}\n".format(intToWrite, regTableName))
|
||||||
self.fAddLogReady.flush()
|
self.fAddLogReady.flush()
|
||||||
os.fsync(self.fAddLogReady.fileno())
|
os.fsync(self.fAddLogReady.fileno())
|
||||||
|
|
||||||
# TODO: too ugly trying to lock the table reliably, refactor...
|
# TODO: too ugly trying to lock the table reliably, refactor...
|
||||||
fullTableName = db.getName() + '.' + regTableName
|
fullTableName = db.getName() + '.' + regTableName
|
||||||
if Config.getConfig().verify_data:
|
self._lockTableIfNeeded(fullTableName) # so that we are verify read-back. TODO: deal with exceptions before unlock
|
||||||
self.lockTable(fullTableName)
|
|
||||||
# print("_w" + str(nextInt % 100), end="", flush=True) # Trace what was written
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
sql = "INSERT INTO {} VALUES ('{}', {}, '{}');".format( # removed: tags ('{}', {})
|
sql = "INSERT INTO {} VALUES ('{}', {}, '{}');".format( # removed: tags ('{}', {})
|
||||||
fullTableName,
|
fullTableName,
|
||||||
# ds.getFixedSuperTableName(),
|
# ds.getFixedSuperTableName(),
|
||||||
# ds.getNextBinary(), ds.getNextFloat(),
|
# ds.getNextBinary(), ds.getNextFloat(),
|
||||||
nextTick, nextInt, nextColor)
|
nextTick, intToWrite, nextColor)
|
||||||
|
# Logging.info("Adding data: {}".format(sql))
|
||||||
dbc.execute(sql)
|
dbc.execute(sql)
|
||||||
|
# Logging.info("Data added: {}".format(sql))
|
||||||
|
intWrote = intToWrite
|
||||||
|
|
||||||
# Quick hack, attach an update statement here. TODO: create an "update" task
|
# Quick hack, attach an update statement here. TODO: create an "update" task
|
||||||
if (not Config.getConfig().use_shadow_db) and Dice.throw(5) == 0: # 1 in N chance, plus not using shaddow DB
|
if (not Config.getConfig().use_shadow_db) and Dice.throw(5) == 0: # 1 in N chance, plus not using shaddow DB
|
||||||
nextInt = db.getNextInt()
|
intToUpdate = db.getNextInt() # Updated, but should not succeed
|
||||||
nextColor = db.getNextColor()
|
nextColor = db.getNextColor()
|
||||||
sql = "INSERt INTO {} VALUES ('{}', {}, '{}');".format( # "INSERt" means "update" here
|
sql = "INSERt INTO {} VALUES ('{}', {}, '{}');".format( # "INSERt" means "update" here
|
||||||
fullTableName,
|
fullTableName,
|
||||||
nextTick, nextInt, nextColor)
|
nextTick, intToUpdate, nextColor)
|
||||||
# sql = "UPDATE {} set speed={}, color='{}' WHERE ts='{}'".format(
|
# sql = "UPDATE {} set speed={}, color='{}' WHERE ts='{}'".format(
|
||||||
# fullTableName, db.getNextInt(), db.getNextColor(), nextTick)
|
# fullTableName, db.getNextInt(), db.getNextColor(), nextTick)
|
||||||
dbc.execute(sql)
|
dbc.execute(sql)
|
||||||
|
intWrote = intToUpdate # We updated, seems TDengine non-cluster accepts this.
|
||||||
|
|
||||||
except: # Any exception at all
|
except: # Any exception at all
|
||||||
if Config.getConfig().verify_data:
|
self._unlockTableIfNeeded(fullTableName)
|
||||||
self.unlockTable(fullTableName)
|
|
||||||
raise
|
raise
|
||||||
|
|
||||||
# Now read it back and verify, we might encounter an error if table is dropped
|
# Now read it back and verify, we might encounter an error if table is dropped
|
||||||
|
@ -2073,33 +2129,41 @@ class TaskAddData(StateTransitionTask):
|
||||||
try:
|
try:
|
||||||
readBack = dbc.queryScalar("SELECT speed from {}.{} WHERE ts='{}'".
|
readBack = dbc.queryScalar("SELECT speed from {}.{} WHERE ts='{}'".
|
||||||
format(db.getName(), regTableName, nextTick))
|
format(db.getName(), regTableName, nextTick))
|
||||||
if readBack != nextInt :
|
if readBack != intWrote :
|
||||||
raise taos.error.ProgrammingError(
|
raise taos.error.ProgrammingError(
|
||||||
"Failed to read back same data, wrote: {}, read: {}"
|
"Failed to read back same data, wrote: {}, read: {}"
|
||||||
.format(nextInt, readBack), 0x999)
|
.format(intWrote, readBack), 0x999)
|
||||||
except taos.error.ProgrammingError as err:
|
except taos.error.ProgrammingError as err:
|
||||||
errno = Helper.convertErrno(err.errno)
|
errno = Helper.convertErrno(err.errno)
|
||||||
if errno in [CrashGenError.INVALID_EMPTY_RESULT, CrashGenError.INVALID_MULTIPLE_RESULT] : # not a single result
|
if errno == CrashGenError.INVALID_EMPTY_RESULT: # empty result
|
||||||
raise taos.error.ProgrammingError(
|
raise taos.error.ProgrammingError(
|
||||||
"Failed to read back same data for tick: {}, wrote: {}, read: {}"
|
"Failed to read back same data for tick: {}, wrote: {}, read: EMPTY"
|
||||||
.format(nextTick, nextInt, "Empty Result" if errno == CrashGenError.INVALID_EMPTY_RESULT else "Multiple Result"),
|
.format(nextTick, intWrote),
|
||||||
|
errno)
|
||||||
|
elif errno == CrashGenError.INVALID_MULTIPLE_RESULT : # multiple results
|
||||||
|
raise taos.error.ProgrammingError(
|
||||||
|
"Failed to read back same data for tick: {}, wrote: {}, read: MULTIPLE RESULTS"
|
||||||
|
.format(nextTick, intWrote),
|
||||||
errno)
|
errno)
|
||||||
elif errno in [0x218, 0x362]: # table doesn't exist
|
elif errno in [0x218, 0x362]: # table doesn't exist
|
||||||
# do nothing
|
# do nothing
|
||||||
dummy = 0
|
pass
|
||||||
else:
|
else:
|
||||||
# Re-throw otherwise
|
# Re-throw otherwise
|
||||||
raise
|
raise
|
||||||
finally:
|
finally:
|
||||||
self.unlockTable(fullTableName) # Unlock the table no matter what
|
self._unlockTableIfNeeded(fullTableName) # Quite ugly, refactor lock/unlock
|
||||||
|
# Done with read-back verification, unlock the table now
|
||||||
|
else:
|
||||||
|
self._unlockTableIfNeeded(fullTableName)
|
||||||
|
|
||||||
# Successfully wrote the data into the DB, let's record it somehow
|
# Successfully wrote the data into the DB, let's record it somehow
|
||||||
te.recordDataMark(nextInt)
|
te.recordDataMark(intWrote)
|
||||||
|
|
||||||
if Config.getConfig().record_ops:
|
if Config.getConfig().record_ops:
|
||||||
if self.fAddLogDone is None:
|
if self.fAddLogDone is None:
|
||||||
raise CrashGenError("Unexpected empty fAddLogDone")
|
raise CrashGenError("Unexpected empty fAddLogDone")
|
||||||
self.fAddLogDone.write("Wrote {} to {}\n".format(nextInt, regTableName))
|
self.fAddLogDone.write("Wrote {} to {}\n".format(intWrote, regTableName))
|
||||||
self.fAddLogDone.flush()
|
self.fAddLogDone.flush()
|
||||||
os.fsync(self.fAddLogDone.fileno())
|
os.fsync(self.fAddLogDone.fileno())
|
||||||
|
|
||||||
|
@ -2137,15 +2201,16 @@ class TaskAddData(StateTransitionTask):
|
||||||
class ThreadStacks: # stack info for all threads
|
class ThreadStacks: # stack info for all threads
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self._allStacks = {}
|
self._allStacks = {}
|
||||||
allFrames = sys._current_frames()
|
allFrames = sys._current_frames() # All current stack frames
|
||||||
for th in threading.enumerate():
|
for th in threading.enumerate(): # For each thread
|
||||||
if th.ident is None:
|
if th.ident is None:
|
||||||
continue
|
continue
|
||||||
stack = traceback.extract_stack(allFrames[th.ident])
|
stack = traceback.extract_stack(allFrames[th.ident]) # Get stack for a thread
|
||||||
self._allStacks[th.native_id] = stack
|
shortTid = th.ident % 10000
|
||||||
|
self._allStacks[shortTid] = stack # Was using th.native_id
|
||||||
|
|
||||||
def print(self, filteredEndName = None, filterInternal = False):
|
def print(self, filteredEndName = None, filterInternal = False):
|
||||||
for thNid, stack in self._allStacks.items(): # for each thread, stack frames top to bottom
|
for tIdent, stack in self._allStacks.items(): # for each thread, stack frames top to bottom
|
||||||
lastFrame = stack[-1]
|
lastFrame = stack[-1]
|
||||||
if filteredEndName: # we need to filter out stacks that match this name
|
if filteredEndName: # we need to filter out stacks that match this name
|
||||||
if lastFrame.name == filteredEndName : # end did not match
|
if lastFrame.name == filteredEndName : # end did not match
|
||||||
|
@ -2157,7 +2222,7 @@ class ThreadStacks: # stack info for all threads
|
||||||
'__init__']: # the thread that extracted the stack
|
'__init__']: # the thread that extracted the stack
|
||||||
continue # ignore
|
continue # ignore
|
||||||
# Now print
|
# Now print
|
||||||
print("\n<----- Thread Info for LWP/ID: {} (most recent call last) <-----".format(thNid))
|
print("\n<----- Thread Info for LWP/ID: {} (most recent call last) <-----".format(tIdent))
|
||||||
stackFrame = 0
|
stackFrame = 0
|
||||||
for frame in stack: # was using: reversed(stack)
|
for frame in stack: # was using: reversed(stack)
|
||||||
# print(frame)
|
# print(frame)
|
||||||
|
@ -2376,7 +2441,7 @@ class MainExec:
|
||||||
action='store',
|
action='store',
|
||||||
default=0,
|
default=0,
|
||||||
type=int,
|
type=int,
|
||||||
help='Maximum number of DBs to keep, set to disable dropping DB. (default: 0)')
|
help='Number of DBs to use, set to disable dropping DB. (default: 0)')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-c',
|
'-c',
|
||||||
'--connector-type',
|
'--connector-type',
|
||||||
|
|
|
@ -179,7 +179,7 @@ quorum 2
|
||||||
def getServiceCmdLine(self): # to start the instance
|
def getServiceCmdLine(self): # to start the instance
|
||||||
if Config.getConfig().track_memory_leaks:
|
if Config.getConfig().track_memory_leaks:
|
||||||
Logging.info("Invoking VALGRIND on service...")
|
Logging.info("Invoking VALGRIND on service...")
|
||||||
return ['exec /usr/bin/valgrind', '--leak-check=yes', self.getExecFile(), '-c', self.getCfgDir()]
|
return ['exec valgrind', '--leak-check=yes', self.getExecFile(), '-c', self.getCfgDir()]
|
||||||
else:
|
else:
|
||||||
# TODO: move "exec -c" into Popen(), we can both "use shell" and NOT fork so ask to lose kill control
|
# TODO: move "exec -c" into Popen(), we can both "use shell" and NOT fork so ask to lose kill control
|
||||||
return ["exec " + self.getExecFile(), '-c', self.getCfgDir()] # used in subproce.Popen()
|
return ["exec " + self.getExecFile(), '-c', self.getCfgDir()] # used in subproce.Popen()
|
||||||
|
@ -310,7 +310,7 @@ class TdeSubProcess:
|
||||||
# print("Starting TDengine with env: ", myEnv.items())
|
# print("Starting TDengine with env: ", myEnv.items())
|
||||||
print("Starting TDengine: {}".format(cmdLine))
|
print("Starting TDengine: {}".format(cmdLine))
|
||||||
|
|
||||||
return Popen(
|
ret = Popen(
|
||||||
' '.join(cmdLine), # ' '.join(cmdLine) if useShell else cmdLine,
|
' '.join(cmdLine), # ' '.join(cmdLine) if useShell else cmdLine,
|
||||||
shell=True, # Always use shell, since we need to pass ENV vars
|
shell=True, # Always use shell, since we need to pass ENV vars
|
||||||
stdout=PIPE,
|
stdout=PIPE,
|
||||||
|
@ -318,6 +318,10 @@ class TdeSubProcess:
|
||||||
close_fds=ON_POSIX,
|
close_fds=ON_POSIX,
|
||||||
env=myEnv
|
env=myEnv
|
||||||
) # had text=True, which interferred with reading EOF
|
) # had text=True, which interferred with reading EOF
|
||||||
|
time.sleep(0.01) # very brief wait, then let's check if sub process started successfully.
|
||||||
|
if ret.poll():
|
||||||
|
raise CrashGenError("Sub process failed to start with command line: {}".format(cmdLine))
|
||||||
|
return ret
|
||||||
|
|
||||||
STOP_SIGNAL = signal.SIGINT # signal.SIGKILL/SIGINT # What signal to use (in kill) to stop a taosd process?
|
STOP_SIGNAL = signal.SIGINT # signal.SIGKILL/SIGINT # What signal to use (in kill) to stop a taosd process?
|
||||||
SIG_KILL_RETCODE = 137 # ref: https://stackoverflow.com/questions/43268156/process-finished-with-exit-code-137-in-pycharm
|
SIG_KILL_RETCODE = 137 # ref: https://stackoverflow.com/questions/43268156/process-finished-with-exit-code-137-in-pycharm
|
||||||
|
@ -614,7 +618,7 @@ class ServiceManager:
|
||||||
|
|
||||||
# Find if there's already a taosd service, and then kill it
|
# Find if there's already a taosd service, and then kill it
|
||||||
for proc in psutil.process_iter():
|
for proc in psutil.process_iter():
|
||||||
if proc.name() == 'taosd':
|
if proc.name() == 'taosd' or proc.name() == 'memcheck-amd64-': # Regular or under Valgrind
|
||||||
Logging.info("Killing an existing TAOSD process in 2 seconds... press CTRL-C to interrupt")
|
Logging.info("Killing an existing TAOSD process in 2 seconds... press CTRL-C to interrupt")
|
||||||
time.sleep(2.0)
|
time.sleep(2.0)
|
||||||
proc.kill()
|
proc.kill()
|
||||||
|
|
|
@ -35,7 +35,8 @@ class LoggingFilter(logging.Filter):
|
||||||
|
|
||||||
class MyLoggingAdapter(logging.LoggerAdapter):
|
class MyLoggingAdapter(logging.LoggerAdapter):
|
||||||
def process(self, msg, kwargs):
|
def process(self, msg, kwargs):
|
||||||
return "[{:04d}] {}".format(threading.get_ident() % 10000, msg), kwargs
|
shortTid = threading.get_ident() % 10000
|
||||||
|
return "[{:04d}] {}".format(shortTid, msg), kwargs
|
||||||
# return '[%s] %s' % (self.extra['connid'], msg), kwargs
|
# return '[%s] %s' % (self.extra['connid'], msg), kwargs
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -31,7 +31,7 @@ python3 ./test.py -f table/column_name.py
|
||||||
python3 ./test.py -f table/column_num.py
|
python3 ./test.py -f table/column_num.py
|
||||||
python3 ./test.py -f table/db_table.py
|
python3 ./test.py -f table/db_table.py
|
||||||
python3 ./test.py -f table/create_sensitive.py
|
python3 ./test.py -f table/create_sensitive.py
|
||||||
#python3 ./test.py -f table/tablename-boundary.py
|
python3 ./test.py -f table/tablename-boundary.py
|
||||||
python3 ./test.py -f table/max_table_length.py
|
python3 ./test.py -f table/max_table_length.py
|
||||||
python3 ./test.py -f table/alter_column.py
|
python3 ./test.py -f table/alter_column.py
|
||||||
python3 ./test.py -f table/boundary.py
|
python3 ./test.py -f table/boundary.py
|
||||||
|
@ -334,6 +334,7 @@ python3 ./test.py -f tag_lite/alter_tag.py
|
||||||
|
|
||||||
python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py
|
python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py
|
||||||
python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py
|
python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py
|
||||||
|
python3 ./test.py -f tag_lite/drop_auto_create.py
|
||||||
python3 test.py -f insert/insert_before_use_db.py
|
python3 test.py -f insert/insert_before_use_db.py
|
||||||
|
python3 test.py -f alter/alter_cacheLastRow.py
|
||||||
#======================p4-end===============
|
#======================p4-end===============
|
||||||
|
|
|
@ -36,6 +36,10 @@ class TDTestCase:
|
||||||
tdSql.checkData(1, 1, '涛思数据')
|
tdSql.checkData(1, 1, '涛思数据')
|
||||||
|
|
||||||
tdSql.error("insert into tb values (now, 'taosdata001')")
|
tdSql.error("insert into tb values (now, 'taosdata001')")
|
||||||
|
|
||||||
|
tdSql.error("insert into tb(now, 😀)")
|
||||||
|
tdSql.query("select * from tb")
|
||||||
|
tdSql.checkRows(2)
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
tdSql.close()
|
tdSql.close()
|
||||||
|
|
|
@ -14,6 +14,13 @@ class TDTestCase:
|
||||||
tdLog.debug("start to execute %s" % __file__)
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
tdSql.init(conn.cursor(), logSql)
|
tdSql.init(conn.cursor(), logSql)
|
||||||
|
|
||||||
|
self.ts = 1622100000000
|
||||||
|
|
||||||
|
def get_random_string(self, length):
|
||||||
|
letters = string.ascii_lowercase
|
||||||
|
result_str = ''.join(random.choice(letters) for i in range(length))
|
||||||
|
return result_str
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
tdSql.prepare()
|
tdSql.prepare()
|
||||||
|
|
||||||
|
@ -24,19 +31,62 @@ class TDTestCase:
|
||||||
shell=True)) - 1
|
shell=True)) - 1
|
||||||
tdLog.info("table name max length is %d" % tableNameMaxLen)
|
tdLog.info("table name max length is %d" % tableNameMaxLen)
|
||||||
chars = string.ascii_uppercase + string.ascii_lowercase
|
chars = string.ascii_uppercase + string.ascii_lowercase
|
||||||
tb_name = ''.join(random.choices(chars, k=tableNameMaxLen))
|
tb_name = ''.join(random.choices(chars, k=tableNameMaxLen + 1))
|
||||||
tdLog.info('tb_name length %d' % len(tb_name))
|
tdLog.info('tb_name length %d' % len(tb_name))
|
||||||
tdLog.info('create table %s (ts timestamp, value int)' % tb_name)
|
tdLog.info('create table %s (ts timestamp, value int)' % tb_name)
|
||||||
tdSql.error(
|
tdSql.error('create table %s (ts timestamp, speed binary(4089))' % tb_name)
|
||||||
'create table %s (ts timestamp, speed binary(4089))' %
|
|
||||||
tb_name)
|
|
||||||
|
|
||||||
tb_name = ''.join(random.choices(chars, k=191))
|
tb_name = ''.join(random.choices(chars, k=tableNameMaxLen))
|
||||||
tdLog.info('tb_name length %d' % len(tb_name))
|
tdLog.info('tb_name length %d' % len(tb_name))
|
||||||
tdLog.info('create table %s (ts timestamp, value int)' % tb_name)
|
tdLog.info('create table %s (ts timestamp, value int)' % tb_name)
|
||||||
tdSql.execute(
|
tdSql.execute(
|
||||||
'create table %s (ts timestamp, speed binary(4089))' %
|
'create table %s (ts timestamp, speed binary(4089))' %
|
||||||
tb_name)
|
tb_name)
|
||||||
|
|
||||||
|
db_name = self.get_random_string(33)
|
||||||
|
tdSql.error("create database %s" % db_name)
|
||||||
|
|
||||||
|
db_name = self.get_random_string(32)
|
||||||
|
tdSql.execute("create database %s" % db_name)
|
||||||
|
tdSql.execute("use %s" % db_name)
|
||||||
|
|
||||||
|
tb_name = self.get_random_string(193)
|
||||||
|
tdSql.error("create table %s(ts timestamp, val int)" % tb_name)
|
||||||
|
|
||||||
|
tb_name = self.get_random_string(192)
|
||||||
|
tdSql.execute("create table %s.%s(ts timestamp, val int)" % (db_name, tb_name))
|
||||||
|
tdSql.query("show %s.tables" % db_name)
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
tdSql.checkData(0, 0, tb_name)
|
||||||
|
|
||||||
|
tdSql.execute("insert into %s.%s values(now, 1)" % (db_name, tb_name))
|
||||||
|
tdSql.query("select * from %s.%s" %(db_name, tb_name))
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
|
||||||
|
db_name = self.get_random_string(32)
|
||||||
|
tdSql.execute("create database %s update 1" % db_name)
|
||||||
|
|
||||||
|
stb_name = self.get_random_string(192)
|
||||||
|
tdSql.execute("create table %s.%s(ts timestamp, val int) tags(id int)" % (db_name, stb_name))
|
||||||
|
tb_name1 = self.get_random_string(192)
|
||||||
|
tdSql.execute("insert into %s.%s using %s.%s tags(1) values(%d, 1)(%d, 2)(%d, 3)" % (db_name, tb_name1, db_name, stb_name, self.ts, self.ts + 1, self.ts + 2))
|
||||||
|
tb_name2 = self.get_random_string(192)
|
||||||
|
tdSql.execute("insert into %s.%s using %s.%s tags(2) values(%d, 1)(%d, 2)(%d, 3)" % (db_name, tb_name2, db_name, stb_name, self.ts, self.ts + 1, self.ts + 2))
|
||||||
|
|
||||||
|
tdSql.query("show %s.tables" % db_name)
|
||||||
|
tdSql.checkRows(2)
|
||||||
|
|
||||||
|
tdSql.query("select * from %s.%s" % (db_name, stb_name))
|
||||||
|
tdSql.checkRows(6)
|
||||||
|
|
||||||
|
tdSql.execute("insert into %s.%s using %s.%s tags(1) values(%d, null)" % (db_name, tb_name1, db_name, stb_name, self.ts))
|
||||||
|
|
||||||
|
tdSql.query("select * from %s.%s" % (db_name, stb_name))
|
||||||
|
tdSql.checkRows(6)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
tdSql.close()
|
tdSql.close()
|
||||||
|
|
|
@ -0,0 +1,47 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from util.log import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.sql import *
|
||||||
|
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
def init(self, conn, logSql):
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor(), logSql)
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
tdSql.prepare()
|
||||||
|
|
||||||
|
tdSql.execute('create table m1(ts timestamp, k int) tags(a binary(12), b int, c double);')
|
||||||
|
tdSql.execute('insert into tm0 using m1(b,c) tags(1, 99) values(now, 1);')
|
||||||
|
tdSql.execute('insert into tm1 using m1(b,c) tags(2, 100) values(now, 2);')
|
||||||
|
tdLog.info("2 rows inserted")
|
||||||
|
tdSql.query('select * from m1;')
|
||||||
|
tdSql.checkRows(2)
|
||||||
|
tdSql.query('select *,tbname from m1;')
|
||||||
|
tdSql.execute("drop table tm0; ")
|
||||||
|
tdSql.query('select * from m1')
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success("%s successfully executed" % __file__)
|
||||||
|
|
||||||
|
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,60 @@
|
||||||
|
{
|
||||||
|
"filetype": "insert",
|
||||||
|
"cfgdir": "/etc/taos",
|
||||||
|
"host": "127.0.0.1",
|
||||||
|
"port": 6030,
|
||||||
|
"user": "root",
|
||||||
|
"password": "taosdata",
|
||||||
|
"thread_count": 4,
|
||||||
|
"thread_count_create_tbl": 4,
|
||||||
|
"result_file": "./insert_res.txt",
|
||||||
|
"confirm_parameter_prompt": "no",
|
||||||
|
"insert_interval": 0,
|
||||||
|
"interlace_rows": 100,
|
||||||
|
"num_of_records_per_req": 100,
|
||||||
|
"databases": [{
|
||||||
|
"dbinfo": {
|
||||||
|
"name": "db",
|
||||||
|
"drop": "yes",
|
||||||
|
"replica": 1,
|
||||||
|
"days": 10,
|
||||||
|
"cache": 16,
|
||||||
|
"blocks": 8,
|
||||||
|
"precision": "ms",
|
||||||
|
"keep": 3650,
|
||||||
|
"minRows": 100,
|
||||||
|
"maxRows": 4096,
|
||||||
|
"comp":2,
|
||||||
|
"walLevel":1,
|
||||||
|
"cachelast":0,
|
||||||
|
"quorum":1,
|
||||||
|
"fsync":3000,
|
||||||
|
"update": 0
|
||||||
|
},
|
||||||
|
"super_tables": [{
|
||||||
|
"name": "stb",
|
||||||
|
"child_table_exists":"no",
|
||||||
|
"childtable_count": 500,
|
||||||
|
"childtable_prefix": "stb_",
|
||||||
|
"auto_create_table": "no",
|
||||||
|
"batch_create_tbl_num": 20,
|
||||||
|
"data_source": "rand",
|
||||||
|
"insert_mode": "taosc",
|
||||||
|
"insert_rows": 10000,
|
||||||
|
"childtable_limit": 10,
|
||||||
|
"childtable_offset":100,
|
||||||
|
"interlace_rows": 0,
|
||||||
|
"insert_interval":0,
|
||||||
|
"max_sql_len": 1024000,
|
||||||
|
"disorder_ratio": 0,
|
||||||
|
"disorder_range": 1000,
|
||||||
|
"timestamp_step": 10,
|
||||||
|
"start_timestamp": "2020-10-01 00:00:00.000",
|
||||||
|
"sample_format": "csv",
|
||||||
|
"sample_file": "./sample.csv",
|
||||||
|
"tags_file": "",
|
||||||
|
"columns": [{"type": "INT"}],
|
||||||
|
"tags": [{"type": "TINYINT", "count":2}]
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
}
|
|
@ -432,7 +432,7 @@ class TDDnodes:
|
||||||
self.simDeployed = False
|
self.simDeployed = False
|
||||||
|
|
||||||
def init(self, path):
|
def init(self, path):
|
||||||
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
|
psCmd = "ps -ef|grep -w taosd| grep -v grep| grep -v defunct | awk '{print $2}'"
|
||||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||||
while(processID):
|
while(processID):
|
||||||
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||||
|
@ -545,14 +545,14 @@ class TDDnodes:
|
||||||
for i in range(len(self.dnodes)):
|
for i in range(len(self.dnodes)):
|
||||||
self.dnodes[i].stop()
|
self.dnodes[i].stop()
|
||||||
|
|
||||||
psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep | awk '{print $2}'"
|
psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep| grep -v defunct | awk '{print $2}'"
|
||||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||||
if processID:
|
if processID:
|
||||||
cmd = "sudo systemctl stop taosd"
|
cmd = "sudo systemctl stop taosd"
|
||||||
os.system(cmd)
|
os.system(cmd)
|
||||||
# if os.system(cmd) != 0 :
|
# if os.system(cmd) != 0 :
|
||||||
# tdLog.exit(cmd)
|
# tdLog.exit(cmd)
|
||||||
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
|
psCmd = "ps -ef|grep -w taosd| grep -v grep| grep -v defunct | awk '{print $2}'"
|
||||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||||
while(processID):
|
while(processID):
|
||||||
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||||
|
|
|
@ -3486,6 +3486,204 @@ static int stmt_specifyCol_bind_case_002_autoCreateTbl(TAOS_STMT *stmt, int tabl
|
||||||
v->ts[i] = tts + i;
|
v->ts[i] = tts + i;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (int i = 0; i < 1; ++i) {
|
||||||
|
//tags[i+0].buffer_type = TSDB_DATA_TYPE_INT;
|
||||||
|
//tags[i+0].buffer = v->v4;
|
||||||
|
//tags[i+0].is_null = &one_not_null;
|
||||||
|
//tags[i+0].length = NULL;
|
||||||
|
|
||||||
|
tags[i+0].buffer_type = TSDB_DATA_TYPE_BOOL;
|
||||||
|
tags[i+0].buffer = v->b;
|
||||||
|
tags[i+0].is_null = &one_not_null;
|
||||||
|
tags[i+0].length = NULL;
|
||||||
|
|
||||||
|
//tags[i+2].buffer_type = TSDB_DATA_TYPE_TINYINT;
|
||||||
|
//tags[i+2].buffer = v->v1;
|
||||||
|
//tags[i+2].is_null = &one_not_null;
|
||||||
|
//tags[i+2].length = NULL;
|
||||||
|
|
||||||
|
tags[i+1].buffer_type = TSDB_DATA_TYPE_SMALLINT;
|
||||||
|
tags[i+1].buffer = v->v2;
|
||||||
|
tags[i+1].is_null = &one_not_null;
|
||||||
|
tags[i+1].length = NULL;
|
||||||
|
|
||||||
|
tags[i+2].buffer_type = TSDB_DATA_TYPE_BIGINT;
|
||||||
|
tags[i+2].buffer = v->v8;
|
||||||
|
tags[i+2].is_null = &one_not_null;
|
||||||
|
tags[i+2].length = NULL;
|
||||||
|
|
||||||
|
tags[i+3].buffer_type = TSDB_DATA_TYPE_FLOAT;
|
||||||
|
tags[i+3].buffer = v->f4;
|
||||||
|
tags[i+3].is_null = &one_not_null;
|
||||||
|
tags[i+3].length = NULL;
|
||||||
|
|
||||||
|
tags[i+4].buffer_type = TSDB_DATA_TYPE_DOUBLE;
|
||||||
|
tags[i+4].buffer = v->f8;
|
||||||
|
tags[i+4].is_null = &one_not_null;
|
||||||
|
tags[i+4].length = NULL;
|
||||||
|
|
||||||
|
tags[i+5].buffer_type = TSDB_DATA_TYPE_BINARY;
|
||||||
|
tags[i+5].buffer = v->br;
|
||||||
|
tags[i+5].is_null = &one_not_null;
|
||||||
|
tags[i+5].length = (uintptr_t *)lb;
|
||||||
|
|
||||||
|
tags[i+6].buffer_type = TSDB_DATA_TYPE_NCHAR;
|
||||||
|
tags[i+6].buffer = v->nr;
|
||||||
|
tags[i+6].is_null = &one_not_null;
|
||||||
|
tags[i+6].length = (uintptr_t *)lb;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
unsigned long long starttime = getCurrentTime();
|
||||||
|
|
||||||
|
// create table m%d (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, br binary(%d), nr nchar(%d), ts2 timestamp)
|
||||||
|
//char *sql = "insert into ? (ts,b,v4,f4,br) using stb1 tags (?,?,?,?,?,?,?,?,?) values(?,?,?,?,?)";
|
||||||
|
char *sql = "insert into ? using stb1 tags (33,?,99,?,?,?,?,?,?) (ts,b,v4,f4,br) values(?,?,?,?,?)";
|
||||||
|
|
||||||
|
int code = taos_stmt_prepare(stmt, sql, 0);
|
||||||
|
if (code != 0){
|
||||||
|
printf("failed to execute taos_stmt_prepare. code:0x%x[%s]\n", code, tstrerror(code));
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
int id = 0;
|
||||||
|
for (int l = 0; l < bingNum; l++) {
|
||||||
|
for (int zz = 0; zz < tableNum; zz++) {
|
||||||
|
char buf[32];
|
||||||
|
sprintf(buf, "m%d", zz);
|
||||||
|
code = taos_stmt_set_tbname_tags(stmt, buf, tags);
|
||||||
|
if (code != 0){
|
||||||
|
printf("failed to execute taos_stmt_set_tbname. code:0x%x[%s]\n", code, tstrerror(code));
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int col=0; col < columnNum; ++col) {
|
||||||
|
code = taos_stmt_bind_single_param_batch(stmt, params + id, col);
|
||||||
|
if (code != 0){
|
||||||
|
printf("failed to execute taos_stmt_bind_single_param_batch. code:0x%x[%s]\n", code, tstrerror(code));
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
id++;
|
||||||
|
}
|
||||||
|
|
||||||
|
code = taos_stmt_add_batch(stmt);
|
||||||
|
if (code != 0) {
|
||||||
|
printf("failed to execute taos_stmt_add_batch. code:0x%x[%s]\n", code, tstrerror(code));
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
code = taos_stmt_execute(stmt);
|
||||||
|
if (code != 0) {
|
||||||
|
printf("failed to execute taos_stmt_execute. code:0x%x[%s]\n", code, tstrerror(code));
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
unsigned long long endtime = getCurrentTime();
|
||||||
|
unsigned long long totalRows = (uint32_t)(totalRowsPerTbl * tableNum);
|
||||||
|
printf("insert total %d records, used %u seconds, avg:%u useconds per record\n", totalRows, (endtime-starttime)/1000000UL, (endtime-starttime)/totalRows);
|
||||||
|
|
||||||
|
free(v->ts);
|
||||||
|
free(v->br);
|
||||||
|
free(v->nr);
|
||||||
|
free(v);
|
||||||
|
free(lb);
|
||||||
|
free(params);
|
||||||
|
free(tags);
|
||||||
|
free(is_null);
|
||||||
|
free(no_null);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// some tags are null
|
||||||
|
static int stmt_specifyCol_bind_case_003_autoCreateTbl(TAOS_STMT *stmt, int tableNum, int rowsOfPerColum, int bingNum, int lenOfBinaryDef, int lenOfBinaryAct, int columnNum) {
|
||||||
|
sampleValue* v = (sampleValue *)calloc(1, sizeof(sampleValue));
|
||||||
|
|
||||||
|
int totalRowsPerTbl = rowsOfPerColum * bingNum;
|
||||||
|
|
||||||
|
v->ts = (int64_t *)malloc(sizeof(int64_t) * (size_t)(totalRowsPerTbl * tableNum));
|
||||||
|
v->br = (char *)malloc(sizeof(int64_t) * (size_t)(totalRowsPerTbl * lenOfBinaryDef));
|
||||||
|
v->nr = (char *)malloc(sizeof(int64_t) * (size_t)(totalRowsPerTbl * lenOfBinaryDef));
|
||||||
|
|
||||||
|
int *lb = (int *)malloc(MAX_ROWS_OF_PER_COLUMN * sizeof(int));
|
||||||
|
|
||||||
|
TAOS_BIND *tags = calloc(1, sizeof(TAOS_BIND) * 9 * 1);
|
||||||
|
TAOS_MULTI_BIND *params = calloc(1, sizeof(TAOS_MULTI_BIND) * (size_t)(bingNum * columnNum * (tableNum+1) * rowsOfPerColum));
|
||||||
|
char* is_null = malloc(sizeof(char) * MAX_ROWS_OF_PER_COLUMN);
|
||||||
|
char* no_null = malloc(sizeof(char) * MAX_ROWS_OF_PER_COLUMN);
|
||||||
|
int one_not_null = 0;
|
||||||
|
int one_is_null = 1;
|
||||||
|
|
||||||
|
int64_t tts = 1591060628000;
|
||||||
|
|
||||||
|
for (int i = 0; i < rowsOfPerColum; ++i) {
|
||||||
|
lb[i] = lenOfBinaryAct;
|
||||||
|
no_null[i] = 0;
|
||||||
|
is_null[i] = (i % 10 == 2) ? 1 : 0;
|
||||||
|
v->b[i] = (int8_t)(i % 2);
|
||||||
|
v->v1[i] = (int8_t)((i+1) % 2);
|
||||||
|
v->v2[i] = (int16_t)i;
|
||||||
|
v->v4[i] = (int32_t)(i+1);
|
||||||
|
v->v8[i] = (int64_t)(i+2);
|
||||||
|
v->f4[i] = (float)(i+3);
|
||||||
|
v->f8[i] = (double)(i+4);
|
||||||
|
char tbuf[MAX_BINARY_DEF_LEN];
|
||||||
|
memset(tbuf, 0, MAX_BINARY_DEF_LEN);
|
||||||
|
sprintf(tbuf, "binary-%d-0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789", i%10);
|
||||||
|
memcpy(v->br + i*lenOfBinaryDef, tbuf, (size_t)lenOfBinaryAct);
|
||||||
|
memset(tbuf, 0, MAX_BINARY_DEF_LEN);
|
||||||
|
sprintf(tbuf, "nchar-%d-0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789", i%10);
|
||||||
|
memcpy(v->nr + i*lenOfBinaryDef, tbuf, (size_t)lenOfBinaryAct);
|
||||||
|
v->ts2[i] = tts + i;
|
||||||
|
}
|
||||||
|
|
||||||
|
int i = 0;
|
||||||
|
for (int j = 0; j < bingNum * tableNum; j++) {
|
||||||
|
params[i+0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
|
||||||
|
params[i+0].buffer_length = sizeof(int64_t);
|
||||||
|
params[i+0].buffer = &v->ts[j*rowsOfPerColum];
|
||||||
|
params[i+0].length = NULL;
|
||||||
|
params[i+0].is_null = no_null;
|
||||||
|
params[i+0].num = rowsOfPerColum;
|
||||||
|
|
||||||
|
params[i+1].buffer_type = TSDB_DATA_TYPE_BOOL;
|
||||||
|
params[i+1].buffer_length = sizeof(int8_t);
|
||||||
|
params[i+1].buffer = v->b;
|
||||||
|
params[i+1].length = NULL;
|
||||||
|
params[i+1].is_null = is_null;
|
||||||
|
params[i+1].num = rowsOfPerColum;
|
||||||
|
|
||||||
|
params[i+2].buffer_type = TSDB_DATA_TYPE_INT;
|
||||||
|
params[i+2].buffer_length = sizeof(int32_t);
|
||||||
|
params[i+2].buffer = v->v4;
|
||||||
|
params[i+2].length = NULL;
|
||||||
|
params[i+2].is_null = is_null;
|
||||||
|
params[i+2].num = rowsOfPerColum;
|
||||||
|
|
||||||
|
params[i+3].buffer_type = TSDB_DATA_TYPE_FLOAT;
|
||||||
|
params[i+3].buffer_length = sizeof(float);
|
||||||
|
params[i+3].buffer = v->f4;
|
||||||
|
params[i+3].length = NULL;
|
||||||
|
params[i+3].is_null = is_null;
|
||||||
|
params[i+3].num = rowsOfPerColum;
|
||||||
|
|
||||||
|
params[i+4].buffer_type = TSDB_DATA_TYPE_BINARY;
|
||||||
|
params[i+4].buffer_length = (uintptr_t)lenOfBinaryDef;
|
||||||
|
params[i+4].buffer = v->br;
|
||||||
|
params[i+4].length = lb;
|
||||||
|
params[i+4].is_null = is_null;
|
||||||
|
params[i+4].num = rowsOfPerColum;
|
||||||
|
|
||||||
|
i+=columnNum;
|
||||||
|
}
|
||||||
|
|
||||||
|
//int64_t tts = 1591060628000;
|
||||||
|
for (int i = 0; i < totalRowsPerTbl * tableNum; ++i) {
|
||||||
|
v->ts[i] = tts + i;
|
||||||
|
}
|
||||||
|
|
||||||
for (int i = 0; i < 1; ++i) {
|
for (int i = 0; i < 1; ++i) {
|
||||||
tags[i+0].buffer_type = TSDB_DATA_TYPE_INT;
|
tags[i+0].buffer_type = TSDB_DATA_TYPE_INT;
|
||||||
tags[i+0].buffer = v->v4;
|
tags[i+0].buffer = v->v4;
|
||||||
|
@ -3494,12 +3692,12 @@ static int stmt_specifyCol_bind_case_002_autoCreateTbl(TAOS_STMT *stmt, int tabl
|
||||||
|
|
||||||
tags[i+1].buffer_type = TSDB_DATA_TYPE_BOOL;
|
tags[i+1].buffer_type = TSDB_DATA_TYPE_BOOL;
|
||||||
tags[i+1].buffer = v->b;
|
tags[i+1].buffer = v->b;
|
||||||
tags[i+1].is_null = &one_not_null;
|
tags[i+1].is_null = &one_is_null;
|
||||||
tags[i+1].length = NULL;
|
tags[i+1].length = NULL;
|
||||||
|
|
||||||
tags[i+2].buffer_type = TSDB_DATA_TYPE_TINYINT;
|
tags[i+2].buffer_type = TSDB_DATA_TYPE_TINYINT;
|
||||||
tags[i+2].buffer = v->v1;
|
tags[i+2].buffer = v->v1;
|
||||||
tags[i+2].is_null = &one_not_null;
|
tags[i+2].is_null = &one_is_null;
|
||||||
tags[i+2].length = NULL;
|
tags[i+2].length = NULL;
|
||||||
|
|
||||||
tags[i+3].buffer_type = TSDB_DATA_TYPE_SMALLINT;
|
tags[i+3].buffer_type = TSDB_DATA_TYPE_SMALLINT;
|
||||||
|
@ -3514,7 +3712,7 @@ static int stmt_specifyCol_bind_case_002_autoCreateTbl(TAOS_STMT *stmt, int tabl
|
||||||
|
|
||||||
tags[i+5].buffer_type = TSDB_DATA_TYPE_FLOAT;
|
tags[i+5].buffer_type = TSDB_DATA_TYPE_FLOAT;
|
||||||
tags[i+5].buffer = v->f4;
|
tags[i+5].buffer = v->f4;
|
||||||
tags[i+5].is_null = &one_not_null;
|
tags[i+5].is_null = &one_is_null;
|
||||||
tags[i+5].length = NULL;
|
tags[i+5].length = NULL;
|
||||||
|
|
||||||
tags[i+6].buffer_type = TSDB_DATA_TYPE_DOUBLE;
|
tags[i+6].buffer_type = TSDB_DATA_TYPE_DOUBLE;
|
||||||
|
@ -3597,7 +3795,8 @@ static int stmt_specifyCol_bind_case_002_autoCreateTbl(TAOS_STMT *stmt, int tabl
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int stmt_specifyCol_bind_case_003_autoCreateTbl(TAOS_STMT *stmt, int tableNum, int rowsOfPerColum, int bingNum, int lenOfBinaryDef, int lenOfBinaryAct, int columnNum) {
|
// specify tags field, and not support , then is error case
|
||||||
|
static int stmt_specifyCol_bind_case_004_autoCreateTbl(TAOS_STMT *stmt, int tableNum, int rowsOfPerColum, int bingNum, int lenOfBinaryDef, int lenOfBinaryAct, int columnNum) {
|
||||||
sampleValue* v = (sampleValue *)calloc(1, sizeof(sampleValue));
|
sampleValue* v = (sampleValue *)calloc(1, sizeof(sampleValue));
|
||||||
|
|
||||||
int totalRowsPerTbl = rowsOfPerColum * bingNum;
|
int totalRowsPerTbl = rowsOfPerColum * bingNum;
|
||||||
|
@ -3683,50 +3882,50 @@ static int stmt_specifyCol_bind_case_003_autoCreateTbl(TAOS_STMT *stmt, int tabl
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = 0; i < 1; ++i) {
|
for (int i = 0; i < 1; ++i) {
|
||||||
tags[i+0].buffer_type = TSDB_DATA_TYPE_INT;
|
//tags[i+0].buffer_type = TSDB_DATA_TYPE_INT;
|
||||||
tags[i+0].buffer = v->v4;
|
//tags[i+0].buffer = v->v4;
|
||||||
|
//tags[i+0].is_null = &one_not_null;
|
||||||
|
//tags[i+0].length = NULL;
|
||||||
|
|
||||||
|
tags[i+0].buffer_type = TSDB_DATA_TYPE_BOOL;
|
||||||
|
tags[i+0].buffer = v->b;
|
||||||
tags[i+0].is_null = &one_not_null;
|
tags[i+0].is_null = &one_not_null;
|
||||||
tags[i+0].length = NULL;
|
tags[i+0].length = NULL;
|
||||||
|
|
||||||
tags[i+1].buffer_type = TSDB_DATA_TYPE_BOOL;
|
//tags[i+2].buffer_type = TSDB_DATA_TYPE_TINYINT;
|
||||||
tags[i+1].buffer = v->b;
|
//tags[i+2].buffer = v->v1;
|
||||||
|
//tags[i+2].is_null = &one_not_null;
|
||||||
|
//tags[i+2].length = NULL;
|
||||||
|
|
||||||
|
tags[i+1].buffer_type = TSDB_DATA_TYPE_SMALLINT;
|
||||||
|
tags[i+1].buffer = v->v2;
|
||||||
tags[i+1].is_null = &one_not_null;
|
tags[i+1].is_null = &one_not_null;
|
||||||
tags[i+1].length = NULL;
|
tags[i+1].length = NULL;
|
||||||
|
|
||||||
tags[i+2].buffer_type = TSDB_DATA_TYPE_TINYINT;
|
tags[i+2].buffer_type = TSDB_DATA_TYPE_BIGINT;
|
||||||
tags[i+2].buffer = v->v1;
|
tags[i+2].buffer = v->v8;
|
||||||
tags[i+2].is_null = &one_not_null;
|
tags[i+2].is_null = &one_not_null;
|
||||||
tags[i+2].length = NULL;
|
tags[i+2].length = NULL;
|
||||||
|
|
||||||
tags[i+3].buffer_type = TSDB_DATA_TYPE_SMALLINT;
|
tags[i+3].buffer_type = TSDB_DATA_TYPE_FLOAT;
|
||||||
tags[i+3].buffer = v->v2;
|
tags[i+3].buffer = v->f4;
|
||||||
tags[i+3].is_null = &one_not_null;
|
tags[i+3].is_null = &one_not_null;
|
||||||
tags[i+3].length = NULL;
|
tags[i+3].length = NULL;
|
||||||
|
|
||||||
tags[i+4].buffer_type = TSDB_DATA_TYPE_BIGINT;
|
tags[i+4].buffer_type = TSDB_DATA_TYPE_DOUBLE;
|
||||||
tags[i+4].buffer = v->v8;
|
tags[i+4].buffer = v->f8;
|
||||||
tags[i+4].is_null = &one_not_null;
|
tags[i+4].is_null = &one_not_null;
|
||||||
tags[i+4].length = NULL;
|
tags[i+4].length = NULL;
|
||||||
|
|
||||||
tags[i+5].buffer_type = TSDB_DATA_TYPE_FLOAT;
|
tags[i+5].buffer_type = TSDB_DATA_TYPE_BINARY;
|
||||||
tags[i+5].buffer = v->f4;
|
tags[i+5].buffer = v->br;
|
||||||
tags[i+5].is_null = &one_not_null;
|
tags[i+5].is_null = &one_not_null;
|
||||||
tags[i+5].length = NULL;
|
tags[i+5].length = (uintptr_t *)lb;
|
||||||
|
|
||||||
tags[i+6].buffer_type = TSDB_DATA_TYPE_DOUBLE;
|
tags[i+6].buffer_type = TSDB_DATA_TYPE_NCHAR;
|
||||||
tags[i+6].buffer = v->f8;
|
tags[i+6].buffer = v->nr;
|
||||||
tags[i+6].is_null = &one_not_null;
|
tags[i+6].is_null = &one_not_null;
|
||||||
tags[i+6].length = NULL;
|
tags[i+6].length = (uintptr_t *)lb;
|
||||||
|
|
||||||
tags[i+7].buffer_type = TSDB_DATA_TYPE_BINARY;
|
|
||||||
tags[i+7].buffer = v->br;
|
|
||||||
tags[i+7].is_null = &one_not_null;
|
|
||||||
tags[i+7].length = (uintptr_t *)lb;
|
|
||||||
|
|
||||||
tags[i+8].buffer_type = TSDB_DATA_TYPE_NCHAR;
|
|
||||||
tags[i+8].buffer = v->nr;
|
|
||||||
tags[i+8].is_null = &one_not_null;
|
|
||||||
tags[i+8].length = (uintptr_t *)lb;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -3734,7 +3933,7 @@ static int stmt_specifyCol_bind_case_003_autoCreateTbl(TAOS_STMT *stmt, int tabl
|
||||||
|
|
||||||
// create table m%d (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, br binary(%d), nr nchar(%d), ts2 timestamp)
|
// create table m%d (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, br binary(%d), nr nchar(%d), ts2 timestamp)
|
||||||
//char *sql = "insert into ? (ts,b,v4,f4,br) using stb1 tags (?,?,?,?,?,?,?,?,?) values(?,?,?,?,?)";
|
//char *sql = "insert into ? (ts,b,v4,f4,br) using stb1 tags (?,?,?,?,?,?,?,?,?) values(?,?,?,?,?)";
|
||||||
char *sql = "insert into ? using stb1 tags (33,?,99,?,?,?,?,?,?) (ts,b,v4,f4,br) values(?,?,?,?,?)";
|
char *sql = "insert into ? using stb1 (id1, id2, id3, id4, id5, id6, id7, id8, id9) tags (33,?,99,?,?,?,?,?,?) (ts,b,v4,f4,br) values(?,?,?,?,?)";
|
||||||
|
|
||||||
int code = taos_stmt_prepare(stmt, sql, 0);
|
int code = taos_stmt_prepare(stmt, sql, 0);
|
||||||
if (code != 0){
|
if (code != 0){
|
||||||
|
@ -3808,7 +4007,7 @@ static void SpecifyColumnBatchCase_autoCreateTbl(TAOS *taos) {
|
||||||
//=======================================================================//
|
//=======================================================================//
|
||||||
//=============================== single table ==========================//
|
//=============================== single table ==========================//
|
||||||
//========== case 1: ======================//
|
//========== case 1: ======================//
|
||||||
#if 0
|
#if 1
|
||||||
{
|
{
|
||||||
stmt = taos_stmt_init(taos);
|
stmt = taos_stmt_init(taos);
|
||||||
|
|
||||||
|
@ -3830,7 +4029,7 @@ static void SpecifyColumnBatchCase_autoCreateTbl(TAOS *taos) {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
//========== case 2: ======================//
|
//========== case 2: ======================//
|
||||||
#if 0
|
#if 1
|
||||||
{
|
{
|
||||||
stmt = taos_stmt_init(taos);
|
stmt = taos_stmt_init(taos);
|
||||||
|
|
||||||
|
@ -3861,7 +4060,7 @@ static void SpecifyColumnBatchCase_autoCreateTbl(TAOS *taos) {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
//========== case 2-1: ======================//
|
//========== case 2-1: ======================//
|
||||||
#if 0
|
#if 1
|
||||||
{
|
{
|
||||||
stmt = taos_stmt_init(taos);
|
stmt = taos_stmt_init(taos);
|
||||||
|
|
||||||
|
@ -3891,7 +4090,7 @@ static void SpecifyColumnBatchCase_autoCreateTbl(TAOS *taos) {
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
//========== case 2-2: ======================//
|
//========== case 2-2: ======================//
|
||||||
#if 0
|
#if 1
|
||||||
{
|
{
|
||||||
printf("====case 2-2 error test start\n");
|
printf("====case 2-2 error test start\n");
|
||||||
stmt = taos_stmt_init(taos);
|
stmt = taos_stmt_init(taos);
|
||||||
|
@ -3924,7 +4123,7 @@ static void SpecifyColumnBatchCase_autoCreateTbl(TAOS *taos) {
|
||||||
|
|
||||||
|
|
||||||
//========== case 3: ======================//
|
//========== case 3: ======================//
|
||||||
#if 0
|
#if 1
|
||||||
{
|
{
|
||||||
stmt = taos_stmt_init(taos);
|
stmt = taos_stmt_init(taos);
|
||||||
|
|
||||||
|
@ -3955,7 +4154,7 @@ static void SpecifyColumnBatchCase_autoCreateTbl(TAOS *taos) {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
//========== case 4: ======================//
|
//========== case 4: ======================//
|
||||||
#if 0
|
#if 1
|
||||||
{
|
{
|
||||||
stmt = taos_stmt_init(taos);
|
stmt = taos_stmt_init(taos);
|
||||||
|
|
||||||
|
@ -3988,7 +4187,7 @@ static void SpecifyColumnBatchCase_autoCreateTbl(TAOS *taos) {
|
||||||
//=======================================================================//
|
//=======================================================================//
|
||||||
//=============================== multi-rows to single table ==========================//
|
//=============================== multi-rows to single table ==========================//
|
||||||
//========== case 5: ======================//
|
//========== case 5: ======================//
|
||||||
#if 0
|
#if 1
|
||||||
{
|
{
|
||||||
stmt = taos_stmt_init(taos);
|
stmt = taos_stmt_init(taos);
|
||||||
|
|
||||||
|
@ -4023,7 +4222,7 @@ static void SpecifyColumnBatchCase_autoCreateTbl(TAOS *taos) {
|
||||||
columnNum = 5;
|
columnNum = 5;
|
||||||
|
|
||||||
prepareVcolumn_autoCreateTbl(taos, 1, tableNum, lenOfBinaryDef, "db6");
|
prepareVcolumn_autoCreateTbl(taos, 1, tableNum, lenOfBinaryDef, "db6");
|
||||||
stmt_specifyCol_bind_case_002_autoCreateTbl(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum);
|
stmt_specifyCol_bind_case_001_autoCreateTbl(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum);
|
||||||
|
|
||||||
totalRowsPerTbl = rowsOfPerColum * bingNum;
|
totalRowsPerTbl = rowsOfPerColum * bingNum;
|
||||||
checkResult(taos, "m0", 0, totalRowsPerTbl);
|
checkResult(taos, "m0", 0, totalRowsPerTbl);
|
||||||
|
@ -4033,28 +4232,175 @@ static void SpecifyColumnBatchCase_autoCreateTbl(TAOS *taos) {
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
//========== case 7: ======================//
|
//========== case 7: ======================//
|
||||||
#if 0
|
#if 1
|
||||||
|
{
|
||||||
|
stmt = taos_stmt_init(taos);
|
||||||
|
|
||||||
|
tableNum = 200;
|
||||||
|
rowsOfPerColum = 60;
|
||||||
|
bingNum = 1;
|
||||||
|
lenOfBinaryDef = 40;
|
||||||
|
lenOfBinaryAct = 8;
|
||||||
|
columnNum = 5;
|
||||||
|
|
||||||
|
prepareVcolumn_autoCreateTbl(taos, 1, tableNum, lenOfBinaryDef, "db7");
|
||||||
|
stmt_specifyCol_bind_case_001_autoCreateTbl(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum);
|
||||||
|
|
||||||
|
totalRowsPerTbl = rowsOfPerColum * bingNum;
|
||||||
|
checkResult(taos, "m0", 0, totalRowsPerTbl);
|
||||||
|
checkResult(taos, "m1", 0, totalRowsPerTbl);
|
||||||
|
checkResult(taos, "m99", 0, totalRowsPerTbl);
|
||||||
|
checkResult(taos, "m139", 0, totalRowsPerTbl);
|
||||||
|
checkResult(taos, "m199", 0, totalRowsPerTbl);
|
||||||
|
taos_stmt_close(stmt);
|
||||||
|
printf("case 7 check result end\n\n");
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
//========== case 8: ======================//
|
||||||
|
#if 1
|
||||||
{
|
{
|
||||||
stmt = taos_stmt_init(taos);
|
stmt = taos_stmt_init(taos);
|
||||||
|
|
||||||
tableNum = 1;
|
tableNum = 1;
|
||||||
rowsOfPerColum = 23740;
|
rowsOfPerColum = 5;
|
||||||
bingNum = 1;
|
bingNum = 1;
|
||||||
lenOfBinaryDef = 40;
|
lenOfBinaryDef = 40;
|
||||||
lenOfBinaryAct = 8;
|
lenOfBinaryAct = 8;
|
||||||
columnNum = 1;
|
columnNum = 5;
|
||||||
|
|
||||||
prepareVcolumn_autoCreateTbl(taos, 1, tableNum, lenOfBinaryDef, "db6");
|
prepareVcolumn_autoCreateTbl(taos, 1, tableNum, lenOfBinaryDef, "db8");
|
||||||
stmt_specifyCol_bind_case_003_autoCreateTbl(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum);
|
stmt_specifyCol_bind_case_002_autoCreateTbl(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum);
|
||||||
|
|
||||||
totalRowsPerTbl = rowsOfPerColum * bingNum;
|
totalRowsPerTbl = rowsOfPerColum * bingNum;
|
||||||
checkResult(taos, "m0", 0, totalRowsPerTbl);
|
checkResult(taos, "m0", 0, totalRowsPerTbl);
|
||||||
taos_stmt_close(stmt);
|
taos_stmt_close(stmt);
|
||||||
printf("case 7 check result end\n\n");
|
printf("case 8 check result end\n\n");
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
//========== case 9: ======================//
|
||||||
|
|
||||||
|
#if 1
|
||||||
|
{
|
||||||
|
stmt = taos_stmt_init(taos);
|
||||||
|
|
||||||
|
tableNum = 10;
|
||||||
|
rowsOfPerColum = 5;
|
||||||
|
bingNum = 1;
|
||||||
|
lenOfBinaryDef = 40;
|
||||||
|
lenOfBinaryAct = 8;
|
||||||
|
columnNum = 5;
|
||||||
|
|
||||||
|
prepareVcolumn_autoCreateTbl(taos, 1, tableNum, lenOfBinaryDef, "db9");
|
||||||
|
stmt_specifyCol_bind_case_002_autoCreateTbl(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum);
|
||||||
|
|
||||||
|
totalRowsPerTbl = rowsOfPerColum * bingNum;
|
||||||
|
checkResult(taos, "m0", 0, totalRowsPerTbl);
|
||||||
|
checkResult(taos, "m3", 0, totalRowsPerTbl);
|
||||||
|
checkResult(taos, "m6", 0, totalRowsPerTbl);
|
||||||
|
checkResult(taos, "m9", 0, totalRowsPerTbl);
|
||||||
|
taos_stmt_close(stmt);
|
||||||
|
printf("case 9 check result end\n\n");
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
//========== case 10: ======================//
|
||||||
|
#if 1
|
||||||
|
{
|
||||||
|
stmt = taos_stmt_init(taos);
|
||||||
|
|
||||||
|
tableNum = 1;
|
||||||
|
rowsOfPerColum = 23740;
|
||||||
|
bingNum = 1;
|
||||||
|
lenOfBinaryDef = 40;
|
||||||
|
lenOfBinaryAct = 8;
|
||||||
|
columnNum = 5;
|
||||||
|
|
||||||
|
prepareVcolumn_autoCreateTbl(taos, 1, tableNum, lenOfBinaryDef, "db10");
|
||||||
|
stmt_specifyCol_bind_case_003_autoCreateTbl(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum);
|
||||||
|
|
||||||
|
totalRowsPerTbl = rowsOfPerColum * bingNum;
|
||||||
|
checkResult(taos, "m0", 0, totalRowsPerTbl);
|
||||||
|
taos_stmt_close(stmt);
|
||||||
|
printf("case 10 check result end\n\n");
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
//========== case 11: ======================//
|
||||||
|
#if 1
|
||||||
|
{
|
||||||
|
stmt = taos_stmt_init(taos);
|
||||||
|
|
||||||
|
tableNum = 2;
|
||||||
|
rowsOfPerColum = 5;
|
||||||
|
bingNum = 1;
|
||||||
|
lenOfBinaryDef = 40;
|
||||||
|
lenOfBinaryAct = 8;
|
||||||
|
columnNum = 5;
|
||||||
|
|
||||||
|
prepareVcolumn_autoCreateTbl(taos, 1, tableNum, lenOfBinaryDef, "db11");
|
||||||
|
stmt_specifyCol_bind_case_003_autoCreateTbl(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum);
|
||||||
|
|
||||||
|
totalRowsPerTbl = rowsOfPerColum * bingNum;
|
||||||
|
checkResult(taos, "m0", 0, totalRowsPerTbl);
|
||||||
|
checkResult(taos, "m1", 0, totalRowsPerTbl);
|
||||||
|
taos_stmt_close(stmt);
|
||||||
|
printf("case 11 check result end\n\n");
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
//========== case 12: ======================//
|
||||||
|
#if 1
|
||||||
|
{
|
||||||
|
stmt = taos_stmt_init(taos);
|
||||||
|
|
||||||
|
tableNum = 200;
|
||||||
|
rowsOfPerColum = 60;
|
||||||
|
bingNum = 1;
|
||||||
|
lenOfBinaryDef = 40;
|
||||||
|
lenOfBinaryAct = 8;
|
||||||
|
columnNum = 5;
|
||||||
|
|
||||||
|
prepareVcolumn_autoCreateTbl(taos, 1, tableNum, lenOfBinaryDef, "db12");
|
||||||
|
stmt_specifyCol_bind_case_003_autoCreateTbl(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum);
|
||||||
|
|
||||||
|
totalRowsPerTbl = rowsOfPerColum * bingNum;
|
||||||
|
checkResult(taos, "m0", 0, totalRowsPerTbl);
|
||||||
|
checkResult(taos, "m1", 0, totalRowsPerTbl);
|
||||||
|
checkResult(taos, "m99", 0, totalRowsPerTbl);
|
||||||
|
checkResult(taos, "m139", 0, totalRowsPerTbl);
|
||||||
|
checkResult(taos, "m199", 0, totalRowsPerTbl);
|
||||||
|
taos_stmt_close(stmt);
|
||||||
|
printf("case 12 check result end\n\n");
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
//========== case 13: ======================//
|
||||||
|
#if 1
|
||||||
|
{
|
||||||
|
printf("====case 13 error test start\n");
|
||||||
|
stmt = taos_stmt_init(taos);
|
||||||
|
|
||||||
|
tableNum = 1;
|
||||||
|
rowsOfPerColum = 8;
|
||||||
|
bingNum = 1;
|
||||||
|
lenOfBinaryDef = 40;
|
||||||
|
lenOfBinaryAct = 8;
|
||||||
|
columnNum = 5;
|
||||||
|
|
||||||
|
prepareVcolumn_autoCreateTbl(taos, 1, tableNum, lenOfBinaryDef, "db13");
|
||||||
|
stmt_specifyCol_bind_case_004_autoCreateTbl(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum);
|
||||||
|
|
||||||
|
totalRowsPerTbl = rowsOfPerColum * bingNum;
|
||||||
|
checkResult(taos, "m0", 0, totalRowsPerTbl);
|
||||||
|
taos_stmt_close(stmt);
|
||||||
|
printf("====case 13 check result end\n\n");
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
return ;
|
return ;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,6 +24,9 @@ sql drop database if exists $db
|
||||||
sql create database $db keep 36500
|
sql create database $db keep 36500
|
||||||
sql use $db
|
sql use $db
|
||||||
|
|
||||||
|
print =====================================> td-4481
|
||||||
|
sql create database $db
|
||||||
|
|
||||||
print =====================================> test case for twa in single block
|
print =====================================> test case for twa in single block
|
||||||
|
|
||||||
sql create table t1 (ts timestamp, k float);
|
sql create table t1 (ts timestamp, k float);
|
||||||
|
|
|
@ -9,7 +9,7 @@ sql connect
|
||||||
|
|
||||||
print ======================== dnode1 start
|
print ======================== dnode1 start
|
||||||
|
|
||||||
$dbPrefix = nest_query
|
$dbPrefix = nest_db
|
||||||
$tbPrefix = nest_tb
|
$tbPrefix = nest_tb
|
||||||
$mtPrefix = nest_mt
|
$mtPrefix = nest_mt
|
||||||
$tbNum = 10
|
$tbNum = 10
|
||||||
|
@ -17,7 +17,6 @@ $rowNum = 10000
|
||||||
$totalNum = $tbNum * $rowNum
|
$totalNum = $tbNum * $rowNum
|
||||||
|
|
||||||
print =============== nestquery.sim
|
print =============== nestquery.sim
|
||||||
|
|
||||||
$i = 0
|
$i = 0
|
||||||
$db = $dbPrefix . $i
|
$db = $dbPrefix . $i
|
||||||
$mt = $mtPrefix . $i
|
$mt = $mtPrefix . $i
|
||||||
|
|
|
@ -60,4 +60,5 @@ run general/parser/slimit_alter_tags.sim
|
||||||
run general/parser/binary_escapeCharacter.sim
|
run general/parser/binary_escapeCharacter.sim
|
||||||
run general/parser/between_and.sim
|
run general/parser/between_and.sim
|
||||||
run general/parser/last_cache.sim
|
run general/parser/last_cache.sim
|
||||||
|
run general/parser/nestquery.sim
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue