diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh index 28be037e6c..e6ddb6d742 100755 --- a/packaging/deb/makedeb.sh +++ b/packaging/deb/makedeb.sh @@ -24,14 +24,14 @@ echo "compile_dir: ${compile_dir}" echo "pkg_dir: ${pkg_dir}" if [ -d ${pkg_dir} ]; then - rm -rf ${pkg_dir} + rm -rf ${pkg_dir} fi mkdir -p ${pkg_dir} cd ${pkg_dir} libfile="libtaos.so.${tdengine_ver}" -# create install dir +# create install dir install_home_path="/usr/local/taos" mkdir -p ${pkg_dir}${install_home_path} mkdir -p ${pkg_dir}${install_home_path}/bin @@ -42,7 +42,7 @@ mkdir -p ${pkg_dir}${install_home_path}/examples mkdir -p ${pkg_dir}${install_home_path}/include mkdir -p ${pkg_dir}${install_home_path}/init.d mkdir -p ${pkg_dir}${install_home_path}/script - + cp ${compile_dir}/../packaging/cfg/taos.cfg ${pkg_dir}${install_home_path}/cfg cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_path}/init.d cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script @@ -54,7 +54,7 @@ cp ${compile_dir}/build/bin/taosdemo ${pkg_dir}${install_home_pat cp ${compile_dir}/build/bin/taosdump ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin -cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver +cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver cp ${compile_dir}/../src/inc/taos.h ${pkg_dir}${install_home_path}/include cp ${compile_dir}/../src/inc/taoserror.h ${pkg_dir}${install_home_path}/include cp -r ${top_dir}/tests/examples/* ${pkg_dir}${install_home_path}/examples @@ -67,7 +67,41 @@ fi cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector -cp ${compile_dir}/build/lib/taos-jdbcdriver*dist.* ${pkg_dir}${install_home_path}/connector ||: +cp ${compile_dir}/build/lib/taos-jdbcdriver*.* ${pkg_dir}${install_home_path}/connector ||: + +if [ -f ${compile_dir}/build/bin/jemalloc-config ]; then + install_user_local_path="/usr/local" + mkdir -p ${pkg_dir}${install_user_local_path}/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3} + cp ${compile_dir}/build/bin/jemalloc-config ${pkg_dir}${install_user_local_path}/bin/ + if [ -f ${compile_dir}/build/bin/jemalloc.sh ]; then + cp ${compile_dir}/build/bin/jemalloc.sh ${pkg_dir}${install_user_local_path}/bin/ + fi + if [ -f ${compile_dir}/build/bin/jeprof ]; then + cp ${compile_dir}/build/bin/jeprof ${pkg_dir}${install_user_local_path}/bin/ + fi + if [ -f ${compile_dir}/build/include/jemalloc/jemalloc.h ]; then + cp ${compile_dir}/build/include/jemalloc/jemalloc.h ${pkg_dir}${install_user_local_path}/include/jemalloc/ + fi + if [ -f ${compile_dir}/build/lib/libjemalloc.so.2 ]; then + cp ${compile_dir}/build/lib/libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/ + ln -sf libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/libjemalloc.so + fi + if [ -f ${compile_dir}/build/lib/libjemalloc.a ]; then + cp ${compile_dir}/build/lib/libjemalloc.a ${pkg_dir}${install_user_local_path}/lib/ + fi + if [ -f ${compile_dir}/build/lib/libjemalloc_pic.a ]; then + cp ${compile_dir}/build/lib/libjemalloc_pic.a ${pkg_dir}${install_user_local_path}/lib/ + fi + if [ -f ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ]; then + cp ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ${pkg_dir}${install_user_local_path}/lib/pkgconfig/ + fi + if [ -f ${compile_dir}/build/share/doc/jemalloc/jemalloc.html ]; then + cp ${compile_dir}/build/share/doc/jemalloc/jemalloc.html ${pkg_dir}${install_user_local_path}/share/doc/jemalloc/ + fi + if [ -f ${compile_dir}/build/share/man/man3/jemalloc.3 ]; then + cp ${compile_dir}/build/share/man/man3/jemalloc.3 ${pkg_dir}${install_user_local_path}/share/man/man3/ + fi +fi cp -r ${compile_dir}/../packaging/deb/DEBIAN ${pkg_dir}/ chmod 755 ${pkg_dir}/DEBIAN/* @@ -75,7 +109,7 @@ chmod 755 ${pkg_dir}/DEBIAN/* # modify version of control debver="Version: "$tdengine_ver sed -i "2c$debver" ${pkg_dir}/DEBIAN/control - + #get taos version, then set deb name @@ -90,7 +124,7 @@ fi if [ "$verType" == "beta" ]; then debname=${debname}-${verType}".deb" -elif [ "$verType" == "stable" ]; then +elif [ "$verType" == "stable" ]; then debname=${debname}".deb" else echo "unknow verType, nor stabel or beta" @@ -101,7 +135,7 @@ fi dpkg -b ${pkg_dir} $debname echo "make deb package success!" -cp ${pkg_dir}/*.deb ${output_dir} +cp ${pkg_dir}/*.deb ${output_dir} # clean tmep dir rm -rf ${pkg_dir} diff --git a/packaging/rpm/makerpm.sh b/packaging/rpm/makerpm.sh index 678e75c500..7c3272f8d0 100755 --- a/packaging/rpm/makerpm.sh +++ b/packaging/rpm/makerpm.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Generate rpm package for centos +# Generate rpm package for centos set -e # set -x @@ -60,7 +60,7 @@ ${csudo} rpmbuild --define="_version ${tdengine_ver}" --define="_topdir ${pkg_di # copy rpm package to output_dir, and modify package name, then clean temp dir #${csudo} cp -rf RPMS/* ${output_dir} -cp_rpm_package ${pkg_dir}/RPMS +cp_rpm_package ${pkg_dir}/RPMS if [ "$verMode" == "cluster" ]; then @@ -74,7 +74,7 @@ fi if [ "$verType" == "beta" ]; then rpmname=${rpmname}-${verType}".rpm" -elif [ "$verType" == "stable" ]; then +elif [ "$verType" == "stable" ]; then rpmname=${rpmname}".rpm" else echo "unknow verType, nor stabel or beta" diff --git a/packaging/rpm/tdengine.spec b/packaging/rpm/tdengine.spec index 9910e20bfe..8a870286ab 100644 --- a/packaging/rpm/tdengine.spec +++ b/packaging/rpm/tdengine.spec @@ -1,4 +1,5 @@ %define homepath /usr/local/taos +%define userlocalpath /usr/local %define cfg_install_dir /etc/taos %define __strip /bin/true @@ -12,22 +13,22 @@ URL: www.taosdata.com AutoReqProv: no #BuildRoot: %_topdir/BUILDROOT -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root #Prefix: /usr/local/taos -#BuildRequires: -#Requires: +#BuildRequires: +#Requires: %description Big Data Platform Designed and Optimized for IoT -#"prep" Nothing needs to be done +#"prep" Nothing needs to be done #%prep #%setup -q -#%setup -T +#%setup -T -#"build" Nothing needs to be done +#"build" Nothing needs to be done #%build #%configure #make %{?_smp_mflags} @@ -75,9 +76,53 @@ fi cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector -cp %{_compiledir}/build/lib/taos-jdbcdriver*dist.* %{buildroot}%{homepath}/connector ||: +cp %{_compiledir}/build/lib/taos-jdbcdriver*.* %{buildroot}%{homepath}/connector ||: cp -r %{_compiledir}/../tests/examples/* %{buildroot}%{homepath}/examples + +if [ -f %{_compiledir}/build/bin/jemalloc-config ]; then + mkdir -p %{buildroot}%{userlocalpath}/bin + mkdir -p %{buildroot}%{userlocalpath}/lib + mkdir -p %{buildroot}%{userlocalpath}/lib/pkgconfig + mkdir -p %{buildroot}%{userlocalpath}/include + mkdir -p %{buildroot}%{userlocalpath}/include/jemalloc + mkdir -p %{buildroot}%{userlocalpath}/share + mkdir -p %{buildroot}%{userlocalpath}/share/doc + mkdir -p %{buildroot}%{userlocalpath}/share/doc/jemalloc + mkdir -p %{buildroot}%{userlocalpath}/share/man + mkdir -p %{buildroot}%{userlocalpath}/share/man/man3 + + cp %{_compiledir}/build/bin/jemalloc-config %{buildroot}%{userlocalpath}/bin/ + if [ -f %{_compiledir}/build/bin/jemalloc.sh ]; then + cp %{_compiledir}/build/bin/jemalloc.sh %{buildroot}%{userlocalpath}/bin/ + fi + if [ -f %{_compiledir}/build/bin/jeprof ]; then + cp %{_compiledir}/build/bin/jeprof %{buildroot}%{userlocalpath}/bin/ + fi + if [ -f %{_compiledir}/build/include/jemalloc/jemalloc.h ]; then + cp %{_compiledir}/build/include/jemalloc/jemalloc.h %{buildroot}%{userlocalpath}/include/jemalloc/ + fi + if [ -f %{_compiledir}/build/lib/libjemalloc.so.2 ]; then + cp %{_compiledir}/build/lib/libjemalloc.so.2 %{buildroot}%{userlocalpath}/lib/ + ln -sf libjemalloc.so.2 %{buildroot}%{userlocalpath}/lib/libjemalloc.so + fi + if [ -f %{_compiledir}/build/lib/libjemalloc.a ]; then + cp %{_compiledir}/build/lib/libjemalloc.a %{buildroot}%{userlocalpath}/lib/ + fi + if [ -f %{_compiledir}/build/lib/libjemalloc_pic.a ]; then + cp %{_compiledir}/build/lib/libjemalloc_pic.a %{buildroot}%{userlocalpath}/lib/ + fi + if [ -f %{_compiledir}/build/lib/pkgconfig/jemalloc.pc ]; then + cp %{_compiledir}/build/lib/pkgconfig/jemalloc.pc %{buildroot}%{userlocalpath}/lib/pkgconfig/ + fi + if [ -f %{_compiledir}/build/share/doc/jemalloc/jemalloc.html ]; then + cp %{_compiledir}/build/share/doc/jemalloc/jemalloc.html %{buildroot}%{userlocalpath}/share/doc/jemalloc/ + fi + if [ -f %{_compiledir}/build/share/man/man3/jemalloc.3 ]; then + cp %{_compiledir}/build/share/man/man3/jemalloc.3 %{buildroot}%{userlocalpath}/share/man/man3/ + fi +fi + #Scripts executed before installation %pre csudo="" @@ -103,7 +148,7 @@ fi # if taos.cfg already softlink, remove it if [ -f %{cfg_install_dir}/taos.cfg ]; then ${csudo} rm -f %{homepath}/cfg/taos.cfg || : -fi +fi # there can not libtaos.so*, otherwise ln -s error ${csudo} rm -f %{homepath}/driver/libtaos* || : @@ -116,18 +161,18 @@ if command -v sudo > /dev/null; then fi cd %{homepath}/script ${csudo} ./post.sh - + # Scripts executed before uninstall %preun csudo="" if command -v sudo > /dev/null; then csudo="sudo" fi -# only remove package to call preun.sh, not but update(2) +# only remove package to call preun.sh, not but update(2) if [ $1 -eq 0 ];then #cd %{homepath}/script #${csudo} ./preun.sh - + if [ -f %{homepath}/script/preun.sh ]; then cd %{homepath}/script ${csudo} ./preun.sh @@ -135,7 +180,7 @@ if [ $1 -eq 0 ];then bin_link_dir="/usr/bin" lib_link_dir="/usr/lib" inc_link_dir="/usr/include" - + data_link_dir="/usr/local/taos/data" log_link_dir="/usr/local/taos/log" cfg_link_dir="/usr/local/taos/cfg" @@ -149,20 +194,20 @@ if [ $1 -eq 0 ];then ${csudo} rm -f ${inc_link_dir}/taos.h || : ${csudo} rm -f ${inc_link_dir}/taoserror.h || : ${csudo} rm -f ${lib_link_dir}/libtaos.* || : - + ${csudo} rm -f ${log_link_dir} || : ${csudo} rm -f ${data_link_dir} || : - + pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}') if [ -n "$pid" ]; then ${csudo} kill -9 $pid || : - fi - fi + fi + fi fi - + # Scripts executed after uninstall %postun - + # clean build dir %clean csudo="" diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index 178a248cfe..325ac81053 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -59,11 +59,11 @@ initd_mod=0 service_mod=2 if pidof systemd &> /dev/null; then service_mod=0 -elif $(which service &> /dev/null); then +elif $(which service &> /dev/null); then service_mod=1 - service_config_dir="/etc/init.d" + service_config_dir="/etc/init.d" if $(which chkconfig &> /dev/null); then - initd_mod=1 + initd_mod=1 elif $(which insserv &> /dev/null); then initd_mod=2 elif $(which update-rc.d &> /dev/null); then @@ -71,7 +71,7 @@ elif $(which service &> /dev/null); then else service_mod=2 fi -else +else service_mod=2 fi @@ -103,7 +103,7 @@ elif echo $osinfo | grep -qwi "fedora" ; then os_type=2 else echo " osinfo: ${osinfo}" - echo " This is an officially unverified linux system," + echo " This is an officially unverified linux system," echo " if there are any problems with the installation and operation, " echo " please feel free to contact taosdata.com for support." os_type=1 @@ -138,7 +138,7 @@ do echo "Usage: `basename $0` -v [server | client] -e [yes | no]" exit 0 ;; - ?) #unknow option + ?) #unknow option echo "unkonw argument" exit 1 ;; @@ -157,9 +157,9 @@ function kill_process() { function install_main_path() { #create install main dir and all sub dir ${csudo} rm -rf ${install_main_dir} || : - ${csudo} mkdir -p ${install_main_dir} + ${csudo} mkdir -p ${install_main_dir} ${csudo} mkdir -p ${install_main_dir}/cfg - ${csudo} mkdir -p ${install_main_dir}/bin + ${csudo} mkdir -p ${install_main_dir}/bin ${csudo} mkdir -p ${install_main_dir}/connector ${csudo} mkdir -p ${install_main_dir}/driver ${csudo} mkdir -p ${install_main_dir}/examples @@ -168,10 +168,10 @@ function install_main_path() { if [ "$verMode" == "cluster" ]; then ${csudo} mkdir -p ${nginx_dir} fi - + if [[ -e ${script_dir}/email ]]; then - ${csudo} cp ${script_dir}/email ${install_main_dir}/ ||: - fi + ${csudo} cp ${script_dir}/email ${install_main_dir}/ ||: + fi } function install_bin() { @@ -207,29 +207,75 @@ function install_lib() { ${csudo} rm -f ${lib_link_dir}/libtaos.* || : ${csudo} rm -f ${lib64_link_dir}/libtaos.* || : #${csudo} rm -rf ${v15_java_app_dir} || : - ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* - + ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so - + if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : fi - - #if [ "$verMode" == "cluster" ]; then + + #if [ "$verMode" == "cluster" ]; then # # Compatible with version 1.5 # ${csudo} mkdir -p ${v15_java_app_dir} # ${csudo} ln -s ${install_main_dir}/connector/taos-jdbcdriver-1.0.2-dist.jar ${v15_java_app_dir}/JDBCDriver-1.0.2-dist.jar # ${csudo} chmod 777 ${v15_java_app_dir} || : #fi - + ${csudo} ldconfig } +function install_jemalloc() { + jemalloc_dir=${script_dir}/jemalloc + + if [ -d ${jemalloc_dir} ]; then + ${csudo} /usr/bin/install -c -d /usr/local/bin + + if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin + fi + if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin + fi + if [ -f ${jemalloc_dir}/bin/jeprof ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin + fi + if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then + ${csudo} /usr/bin/install -c -d /usr/local/include/jemalloc + ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc + fi + if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then + ${csudo} /usr/bin/install -c -d /usr/local/lib + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib + ${csudo} ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so + ${csudo} /usr/bin/install -c -d /usr/local/lib + if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib + fi + if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib + fi + if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then + ${csudo} /usr/bin/install -c -d /usr/local/lib/pkgconfig + ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig + fi + fi + if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then + ${csudo} /usr/bin/install -c -d /usr/local/share/doc/jemalloc + ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc + fi + if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then + ${csudo} /usr/bin/install -c -d /usr/local/share/man/man3 + ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3 + fi + fi +} + function install_header() { ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : - ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* + ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h } @@ -246,13 +292,13 @@ function add_newHostname_to_hosts() { if [[ "$s" == "$localIp" ]]; then return fi - done + done ${csudo} echo "127.0.0.1 $1" >> /etc/hosts ||: } function set_hostname() { echo -e -n "${GREEN}Please enter one hostname(must not be 'localhost')${NC}:" - read newHostname + read newHostname while true; do if [[ ! -z "$newHostname" && "$newHostname" != "localhost" ]]; then break @@ -266,25 +312,25 @@ function set_hostname() { if [[ $retval != 0 ]]; then echo echo "set hostname fail!" - return + return fi #echo -e -n "$(hostnamectl status --static)" #echo -e -n "$(hostnamectl status --transient)" #echo -e -n "$(hostnamectl status --pretty)" - + #ubuntu/centos /etc/hostname if [[ -e /etc/hostname ]]; then ${csudo} echo $newHostname > /etc/hostname ||: fi - + #debian: #HOSTNAME=yourname if [[ -e /etc/sysconfig/network ]]; then ${csudo} sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network ||: fi ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/taos.cfg - serverFqdn=$newHostname - + serverFqdn=$newHostname + if [[ -e /etc/hosts ]]; then add_newHostname_to_hosts $newHostname fi @@ -302,7 +348,7 @@ function is_correct_ipaddr() { return 0 fi done - + return 1 } @@ -316,13 +362,13 @@ function set_ipAsFqdn() { echo echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}" localFqdn="127.0.0.1" - # Write the local FQDN to configuration file - ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg + # Write the local FQDN to configuration file + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg serverFqdn=$localFqdn echo return - fi - + fi + echo -e -n "${GREEN}Please choose an IP from local IP list${NC}:" echo echo -e -n "${GREEN}$iplist${NC}" @@ -331,15 +377,15 @@ function set_ipAsFqdn() { echo -e -n "${GREEN}Notes: if IP is used as the node name, data can NOT be migrated to other machine directly${NC}:" read localFqdn while true; do - if [ ! -z "$localFqdn" ]; then + if [ ! -z "$localFqdn" ]; then # Check if correct ip address is_correct_ipaddr $localFqdn retval=`echo $?` if [[ $retval != 0 ]]; then read -p "Please choose an IP from local IP list:" localFqdn else - # Write the local FQDN to configuration file - ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg + # Write the local FQDN to configuration file + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg serverFqdn=$localFqdn break fi @@ -354,59 +400,59 @@ function local_fqdn_check() { echo echo -e -n "System hostname is: ${GREEN}$serverFqdn${NC}" echo - if [[ "$serverFqdn" == "" ]] || [[ "$serverFqdn" == "localhost" ]]; then + if [[ "$serverFqdn" == "" ]] || [[ "$serverFqdn" == "localhost" ]]; then echo -e -n "${GREEN}It is strongly recommended to configure a hostname for this machine ${NC}" echo - + while true do - read -r -p "Set hostname now? [Y/n] " input - if [ ! -n "$input" ]; then - set_hostname - break - else - case $input in - [yY][eE][sS]|[yY]) - set_hostname - break - ;; - - [nN][oO]|[nN]) - set_ipAsFqdn - break - ;; - - *) - echo "Invalid input..." - ;; - esac - fi + read -r -p "Set hostname now? [Y/n] " input + if [ ! -n "$input" ]; then + set_hostname + break + else + case $input in + [yY][eE][sS]|[yY]) + set_hostname + break + ;; + + [nN][oO]|[nN]) + set_ipAsFqdn + break + ;; + + *) + echo "Invalid input..." + ;; + esac + fi done fi } function install_config() { #${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || : - + if [ ! -f ${cfg_install_dir}/taos.cfg ]; then ${csudo} mkdir -p ${cfg_install_dir} [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir} ${csudo} chmod 644 ${cfg_install_dir}/* - fi - + fi + ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg [ ! -z $1 ] && return 0 || : # only install client - + if ((${update_flag}==1)); then return 0 fi - + if [ "$interactiveFqdn" == "no" ]; then return 0 - fi - + fi + local_fqdn_check #FQDN_FORMAT="(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)" @@ -424,8 +470,8 @@ function install_config() { if [ ! -z "$firstEp" ]; then # check the format of the firstEp #if [[ $firstEp == $FQDN_PATTERN ]]; then - # Write the first FQDN to configuration file - ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/taos.cfg + # Write the first FQDN to configuration file + ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/taos.cfg break #else # read -p "Please enter the correct FQDN:port: " firstEp @@ -433,9 +479,9 @@ function install_config() { else break fi - done + done - # user email + # user email #EMAIL_PATTERN='^[A-Za-z0-9\u4e00-\u9fa5]+@[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+)+$' #EMAIL_PATTERN='^[\w-]+(\.[\w-]+)*@[\w-]+(\.[\w-]+)+$' #EMAIL_PATTERN="^[\w-]+(\.[\w-]+)*@[\w-]+(\.[\w-]+)+$" @@ -446,31 +492,31 @@ function install_config() { if [ ! -z "$emailAddr" ]; then # check the format of the emailAddr #if [[ "$emailAddr" =~ $EMAIL_PATTERN ]]; then - # Write the email address to temp file - email_file="${install_main_dir}/email" + # Write the email address to temp file + email_file="${install_main_dir}/email" ${csudo} bash -c "echo $emailAddr > ${email_file}" - break + break #else - # read -p "Please enter the correct email address: " emailAddr + # read -p "Please enter the correct email address: " emailAddr #fi else break fi - done + done } function install_log() { ${csudo} rm -rf ${log_dir} || : ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} - + ${csudo} ln -s ${log_dir} ${install_main_dir}/log } function install_data() { ${csudo} mkdir -p ${data_dir} - - ${csudo} ln -s ${data_dir} ${install_main_dir}/data + + ${csudo} ln -s ${data_dir} ${install_main_dir}/data } function install_connector() { @@ -485,26 +531,26 @@ function install_examples() { function clean_service_on_sysvinit() { #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" - #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || : - + #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || : + if pidof taosd &> /dev/null; then ${csudo} service taosd stop || : fi - + if pidof tarbitrator &> /dev/null; then ${csudo} service tarbitratord stop || : fi if ((${initd_mod}==1)); then - if [ -e ${service_config_dir}/taosd ]; then + if [ -e ${service_config_dir}/taosd ]; then ${csudo} chkconfig --del taosd || : fi - if [ -e ${service_config_dir}/tarbitratord ]; then + if [ -e ${service_config_dir}/tarbitratord ]; then ${csudo} chkconfig --del tarbitratord || : fi elif ((${initd_mod}==2)); then - if [ -e ${service_config_dir}/taosd ]; then + if [ -e ${service_config_dir}/taosd ]; then ${csudo} insserv -r taosd || : fi if [ -e ${service_config_dir}/tarbitratord ]; then @@ -518,10 +564,10 @@ function clean_service_on_sysvinit() { ${csudo} update-rc.d -f tarbitratord remove || : fi fi - + ${csudo} rm -f ${service_config_dir}/taosd || : ${csudo} rm -f ${service_config_dir}/tarbitratord || : - + if $(which init &> /dev/null); then ${csudo} init q || : fi @@ -544,10 +590,10 @@ function install_service_on_sysvinit() { ${csudo} cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord ${csudo} cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord fi - + #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" #${csudo} grep -q -F "$restart_config_str" /etc/inittab || ${csudo} bash -c "echo '${restart_config_str}' >> /etc/inittab" - + if ((${initd_mod}==1)); then ${csudo} chkconfig --add taosd || : ${csudo} chkconfig --level 2345 taosd on || : @@ -572,7 +618,7 @@ function clean_service_on_systemd() { fi ${csudo} systemctl disable taosd &> /dev/null || echo &> /dev/null ${csudo} rm -f ${taosd_service_config} - + tarbitratord_service_config="${service_config_dir}/tarbitratord.service" if systemctl is-active --quiet tarbitratord; then echo "tarbitrator is running, stopping it..." @@ -580,7 +626,7 @@ function clean_service_on_systemd() { fi ${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null ${csudo} rm -f ${tarbitratord_service_config} - + if [ "$verMode" == "cluster" ]; then nginx_service_config="${service_config_dir}/nginxd.service" if systemctl is-active --quiet nginxd; then @@ -588,8 +634,8 @@ function clean_service_on_systemd() { ${csudo} systemctl stop nginxd &> /dev/null || echo &> /dev/null fi ${csudo} systemctl disable nginxd &> /dev/null || echo &> /dev/null - ${csudo} rm -f ${nginx_service_config} - fi + ${csudo} rm -f ${nginx_service_config} + fi } # taos:2345:respawn:/etc/init.d/taosd start @@ -621,7 +667,7 @@ function install_service_on_systemd() { ${csudo} bash -c "echo '[Install]' >> ${taosd_service_config}" ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${taosd_service_config}" ${csudo} systemctl enable taosd - + tarbitratord_service_config="${service_config_dir}/tarbitratord.service" ${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}" ${csudo} bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}" @@ -643,9 +689,9 @@ function install_service_on_systemd() { ${csudo} bash -c "echo >> ${tarbitratord_service_config}" ${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}" ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}" - #${csudo} systemctl enable tarbitratord - - if [ "$verMode" == "cluster" ]; then + #${csudo} systemctl enable tarbitratord + + if [ "$verMode" == "cluster" ]; then nginx_service_config="${service_config_dir}/nginxd.service" ${csudo} bash -c "echo '[Unit]' >> ${nginx_service_config}" ${csudo} bash -c "echo 'Description=Nginx For TDengine Service' >> ${nginx_service_config}" @@ -674,7 +720,7 @@ function install_service_on_systemd() { ${csudo} systemctl enable nginxd fi ${csudo} systemctl start nginxd - fi + fi } function install_service() { @@ -757,7 +803,7 @@ function update_TDengine() { fi sleep 1 fi - + if [ "$verMode" == "cluster" ]; then if pidof nginx &> /dev/null; then if ((${service_mod}==0)); then @@ -770,12 +816,13 @@ function update_TDengine() { sleep 1 fi fi - + install_main_path install_log install_header install_lib + install_jemalloc if [ "$pagMode" != "lite" ]; then install_connector fi @@ -783,10 +830,10 @@ function update_TDengine() { if [ -z $1 ]; then install_bin install_service - install_config - + install_config + openresty_work=false - if [ "$verMode" == "cluster" ]; then + if [ "$verMode" == "cluster" ]; then # Check if openresty is installed # Check if nginx is installed successfully if type curl &> /dev/null; then @@ -797,7 +844,7 @@ function update_TDengine() { echo -e "\033[44;31;5mNginx for TDengine does not work! Please try again!\033[0m" fi fi - fi + fi #echo #echo -e "\033[44;32;1mTDengine is updated successfully!${NC}" @@ -816,7 +863,7 @@ function update_TDengine() { else echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos -h $serverFqdn${NC} in shell${NC}" fi - + echo echo -e "\033[44;32;1mTDengine is updated successfully!${NC}" else @@ -839,14 +886,14 @@ function install_TDengine() { tar -zxf taos.tar.gz echo -e "${GREEN}Start to install TDengine...${NC}" - - install_main_path - + + install_main_path + if [ -z $1 ]; then install_data - fi - - install_log + fi + + install_log install_header install_lib if [ "$pagMode" != "lite" ]; then @@ -871,8 +918,8 @@ function install_TDengine() { fi fi fi - - install_config + + install_config # Ask if to start the service #echo @@ -885,36 +932,36 @@ function install_TDengine() { echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} service taosd start${NC}" else echo -e "${GREEN_DARK}To start TDengine ${NC}: taosd${NC}" - fi + fi #if [ ${openresty_work} = 'true' ]; then # echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}" #else # echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" #fi - + if [ ! -z "$firstEp" ]; then - tmpFqdn=${firstEp%%:*} - substr=":" - if [[ $firstEp =~ $substr ]];then - tmpPort=${firstEp#*:} - else - tmpPort="" - fi - if [[ "$tmpPort" != "" ]];then - echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}" - else - echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}" - fi - echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}" - echo + tmpFqdn=${firstEp%%:*} + substr=":" + if [[ $firstEp =~ $substr ]];then + tmpPort=${firstEp#*:} + else + tmpPort="" + fi + if [[ "$tmpPort" != "" ]];then + echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}" + else + echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}" + fi + echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}" + echo elif [ ! -z "$serverFqdn" ]; then - echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $serverFqdn${GREEN_DARK} to login into TDengine server${NC}" - echo + echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $serverFqdn${GREEN_DARK} to login into TDengine server${NC}" + echo fi - + echo -e "\033[44;32;1mTDengine is installed successfully!${NC}" - echo + echo else # Only install client install_bin install_config @@ -945,6 +992,6 @@ elif [ "$verType" == "client" ]; then else install_TDengine client fi -else - echo "please input correct verType" +else + echo "please input correct verType" fi diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index 240804ed95..0c755d9f72 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -204,7 +204,7 @@ function install_jemalloc() { if [ -f ${binary_dir}/build/lib/libjemalloc_pic.a ]; then /usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc_pic.a /usr/local/lib fi - if [ -f ${binary_dir}/build/lib/libjemalloc_pic.a ]; then + if [ -f ${binary_dir}/build/lib/pkgconfig/jemalloc.pc ]; then /usr/bin/install -c -d /usr/local/lib/pkgconfig /usr/bin/install -c -m 644 ${binary_dir}/build/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig fi diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index e4d2d71b01..624f72278a 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -30,12 +30,12 @@ else install_dir="${release_dir}/TDengine-server-${version}" fi -# Directories and files. +# Directories and files if [ "$pagMode" == "lite" ]; then - strip ${build_dir}/bin/taosd + strip ${build_dir}/bin/taosd strip ${build_dir}/bin/taos bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh" -else +else bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator\ ${script_dir}/remove.sh ${script_dir}/set_core.sh ${script_dir}/startPre.sh ${script_dir}/taosd-dump-cfg.gdb" fi @@ -73,10 +73,43 @@ mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/taos mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || : mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || : +if [ -f ${build_dir}/bin/jemalloc-config ]; then + mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3} + cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin + if [ -f ${build_dir}/bin/jemalloc.sh ]; then + cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin + fi + if [ -f ${build_dir}/bin/jeprof ]; then + cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin + fi + if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then + cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc + fi + if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then + cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib + ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so + fi + if [ -f ${build_dir}/lib/libjemalloc.a ]; then + cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib + fi + if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then + cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib + fi + if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then + cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig + fi + if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then + cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc + fi + if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then + cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3 + fi +fi + if [ "$verMode" == "cluster" ]; then sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove.sh >> remove_temp.sh mv remove_temp.sh ${install_dir}/bin/remove.sh - + mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png rm -rf ${install_dir}/nginxd/png @@ -132,7 +165,7 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then if [ -d ${examples_dir}/JDBC/taosdemo/target ]; then rm -rf ${examples_dir}/JDBC/taosdemo/target fi - + cp -r ${examples_dir}/JDBC ${install_dir}/examples cp -r ${examples_dir}/matlab ${install_dir}/examples cp -r ${examples_dir}/python ${install_dir}/examples @@ -142,7 +175,7 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then cp -r ${examples_dir}/C# ${install_dir}/examples fi # Copy driver -mkdir -p ${install_dir}/driver +mkdir -p ${install_dir}/driver cp ${lib_files} ${install_dir}/driver # Copy connector @@ -168,7 +201,7 @@ fi # exit 1 -cd ${release_dir} +cd ${release_dir} if [ "$verMode" == "cluster" ]; then pkg_name=${install_dir}-${osType}-${cpuType} @@ -185,8 +218,8 @@ fi if [ "$verType" == "beta" ]; then pkg_name=${pkg_name}-${verType} -elif [ "$verType" == "stable" ]; then - pkg_name=${pkg_name} +elif [ "$verType" == "stable" ]; then + pkg_name=${pkg_name} else echo "unknow verType, nor stabel or beta" exit 1 diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index 67fd34ffd7..c002f2cf32 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -283,6 +283,7 @@ typedef struct SSqlStream { int64_t ctime; // stream created time int64_t stime; // stream next executed time int64_t etime; // stream end query time, when time is larger then etime, the stream will be closed + int64_t ltime; // stream last row time in stream table SInterval interval; void * pTimer; diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index 89b024eb17..cd9853df03 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -774,6 +774,10 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC index = 0; sToken = tStrGetToken(sql, &index, false); + if (sToken.type == TK_ILLEGAL) { + return tscSQLSyntaxErrMsg(pCmd->payload, "unrecognized token", sToken.z); + } + if (sToken.type == TK_RP) { break; } diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index 8966f3234a..f199a57987 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -1204,7 +1204,6 @@ static int insertBatchStmtExecute(STscStmt* pStmt) { return pStmt->pSql->res.code; } - int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) { SSqlCmd *pCmd = &pSql->cmd; int32_t ret = TSDB_CODE_SUCCESS; @@ -1234,28 +1233,28 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) { pStmt->mtb.tagSet = true; sToken = tStrGetToken(pCmd->insertParam.sql, &index, false); - if (sToken.n > 0 && sToken.type == TK_VALUES) { + if (sToken.n > 0 && (sToken.type == TK_VALUES || sToken.type == TK_LP)) { return TSDB_CODE_SUCCESS; } if (sToken.n <= 0 || sToken.type != TK_USING) { - return TSDB_CODE_TSC_INVALID_OPERATION; + return tscSQLSyntaxErrMsg(pCmd->payload, "keywords USING is expected", sToken.z); } sToken = tStrGetToken(pCmd->insertParam.sql, &index, false); if (sToken.n <= 0 || ((sToken.type != TK_ID) && (sToken.type != TK_STRING))) { - return TSDB_CODE_TSC_INVALID_OPERATION; + return tscSQLSyntaxErrMsg(pCmd->payload, "invalid token", sToken.z); } pStmt->mtb.stbname = sToken; sToken = tStrGetToken(pCmd->insertParam.sql, &index, false); if (sToken.n <= 0 || sToken.type != TK_TAGS) { - return TSDB_CODE_TSC_INVALID_OPERATION; + return tscSQLSyntaxErrMsg(pCmd->payload, "keyword TAGS expected", sToken.z); } sToken = tStrGetToken(pCmd->insertParam.sql, &index, false); if (sToken.n <= 0 || sToken.type != TK_LP) { - return TSDB_CODE_TSC_INVALID_OPERATION; + return tscSQLSyntaxErrMsg(pCmd->payload, ") expected", sToken.z); } pStmt->mtb.tags = taosArrayInit(4, sizeof(SStrToken)); @@ -1298,9 +1297,6 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) { return TSDB_CODE_SUCCESS; } - - - int stmtGenInsertStatement(SSqlObj* pSql, STscStmt* pStmt, const char* name, TAOS_BIND* tags) { size_t tagNum = taosArrayGetSize(pStmt->mtb.tags); size_t size = 1048576; @@ -1386,8 +1382,6 @@ int stmtGenInsertStatement(SSqlObj* pSql, STscStmt* pStmt, const char* name, TAO return TSDB_CODE_SUCCESS; } - - //////////////////////////////////////////////////////////////////////////////// // interface functions diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index a5e3fbc262..f34434e66c 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -396,11 +396,18 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { const char* msg2 = "name too long"; SCreateDbInfo* pCreateDB = &(pInfo->pMiscInfo->dbOpt); - if (tscValidateName(&pCreateDB->dbname) != TSDB_CODE_SUCCESS) { + if (pCreateDB->dbname.n >= TSDB_DB_NAME_LEN) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + } + + char buf[TSDB_DB_NAME_LEN] = {0}; + SStrToken token = taosTokenDup(&pCreateDB->dbname, buf, tListLen(buf)); + + if (tscValidateName(&token) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } - int32_t ret = tNameSetDbName(&pTableMetaInfo->name, getAccountId(pSql), &(pCreateDB->dbname)); + int32_t ret = tNameSetDbName(&pTableMetaInfo->name, getAccountId(pSql), &token); if (ret != TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } @@ -7482,6 +7489,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf const char* msg1 = "point interpolation query needs timestamp"; const char* msg2 = "too many tables in from clause"; const char* msg3 = "start(end) time of query range required or time range too large"; + const char* msg4 = "interval query not supported, since the result of sub query not include valid timestamp column"; const char* msg9 = "only tag query not compatible with normal column filter"; int32_t code = TSDB_CODE_SUCCESS; @@ -7540,11 +7548,25 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf if (validateIntervalNode(pSql, pQueryInfo, pSqlNode) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; } else { - if (isTimeWindowQuery(pQueryInfo) && - (validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS)) { - return TSDB_CODE_TSC_INVALID_OPERATION; + if (isTimeWindowQuery(pQueryInfo)) { + // check if the first column of the nest query result is timestamp column + SColumn* pCol = taosArrayGetP(pQueryInfo->colList, 0); + if (pCol->info.type != TSDB_DATA_TYPE_TIMESTAMP) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4); + } + + if (validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } } } + + // set order by info + STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta; + if (validateOrderbyNode(pCmd, pQueryInfo, pSqlNode, tscGetTableSchema(pTableMeta)) != + TSDB_CODE_SUCCESS) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } } else { pQueryInfo->command = TSDB_SQL_SELECT; @@ -7697,8 +7719,8 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf SExprInfo** p = NULL; int32_t numOfExpr = 0; + pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); code = createProjectionExpr(pQueryInfo, pTableMetaInfo, &p, &numOfExpr); - if (pQueryInfo->exprList1 == NULL) { pQueryInfo->exprList1 = taosArrayInit(4, POINTER_BYTES); } diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index a74350496a..f997d487d1 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -477,7 +477,6 @@ int doBuildAndSendMsg(SSqlObj *pSql) { pCmd->command == TSDB_SQL_INSERT || pCmd->command == TSDB_SQL_CONNECT || pCmd->command == TSDB_SQL_HB || -// pCmd->command == TSDB_SQL_META || pCmd->command == TSDB_SQL_STABLEVGROUP) { pRes->code = tscBuildMsg[pCmd->command](pSql, NULL); } @@ -2470,8 +2469,8 @@ int32_t getMultiTableMetaFromMnode(SSqlObj *pSql, SArray* pNameList, SArray* pVg pNew->fp = fp; pNew->param = (void *)pSql->self; - tscDebug("0x%"PRIx64" metaRid from %" PRId64 " to %" PRId64 , pSql->self, pSql->metaRid, pNew->self); - + tscDebug("0x%"PRIx64" metaRid from 0x%" PRIx64 " to 0x%" PRIx64 , pSql->self, pSql->metaRid, pNew->self); + pSql->metaRid = pNew->self; int32_t code = tscBuildAndSendRequest(pNew, NULL); if (code == TSDB_CODE_SUCCESS) { diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 1ffa6416b2..554ce351eb 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -627,7 +627,7 @@ static bool hasAdditionalErrorInfo(int32_t code, SSqlCmd *pCmd) { char *z = NULL; if (len > 0) { - z = strstr(pCmd->payload, "invalid SQL"); + z = strstr(pCmd->payload, "invalid operation"); if (z == NULL) { z = strstr(pCmd->payload, "syntax error"); } diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c index 73a3fbafc3..2fc18943da 100644 --- a/src/client/src/tscStream.c +++ b/src/client/src/tscStream.c @@ -24,6 +24,7 @@ #include "tutil.h" #include "tscProfile.h" +#include "tscSubquery.h" static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOfRows); static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOfRows); @@ -47,8 +48,8 @@ static bool isProjectStream(SQueryInfo* pQueryInfo) { static int64_t tscGetRetryDelayTime(SSqlStream* pStream, int64_t slidingTime, int16_t prec) { float retryRangeFactor = 0.3f; - int64_t retryDelta = (int64_t)(tsStreamCompRetryDelay * retryRangeFactor); - retryDelta = ((rand() % retryDelta) + tsStreamCompRetryDelay) * 1000L; + int64_t retryDelta = (int64_t)(tsRetryStreamCompDelay * retryRangeFactor); + retryDelta = ((rand() % retryDelta) + tsRetryStreamCompDelay) * 1000L; if (pStream->interval.intervalUnit != 'n' && pStream->interval.intervalUnit != 'y') { // change to ms @@ -575,6 +576,14 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) { pStream->stime = tscGetStreamStartTimestamp(pSql, pStream, pStream->stime); + // set stime with ltime if ltime > stime + const char* dstTable = pStream->dstTable? pStream->dstTable: ""; + tscDebug(" CQ table=%s ltime is %"PRId64, dstTable, pStream->ltime); + if(pStream->ltime != INT64_MIN && pStream->ltime > pStream->stime) { + tscWarn(" CQ set stream %s stime=%"PRId64" replace with ltime=%"PRId64" if ltime>0 ", dstTable, pStream->stime, pStream->ltime); + pStream->stime = pStream->ltime; + } + int64_t starttime = tscGetLaunchTimestamp(pStream); pCmd->command = TSDB_SQL_SELECT; @@ -590,7 +599,66 @@ void tscSetStreamDestTable(SSqlStream* pStream, const char* dstTable) { pStream->dstTable = dstTable; } -TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), +// fetchFp call back +void fetchFpStreamLastRow(void* param ,TAOS_RES* res, int num) { + SSqlStream* pStream = (SSqlStream*)param; + SSqlObj* pSql = res; + + // get row data set to ltime + tscSetSqlOwner(pSql); + TAOS_ROW row = doSetResultRowData(pSql); + if( row && row[0] ) { + pStream->ltime = *((int64_t*)row[0]); + const char* dstTable = pStream->dstTable? pStream->dstTable: ""; + tscDebug(" CQ stream table=%s last row time=%"PRId64" .", dstTable, pStream->ltime); + } + tscClearSqlOwner(pSql); + + // no condition call + tscCreateStream(param, pStream->pSql, TSDB_CODE_SUCCESS); + taos_free_result(res); +} + +// fp callback +void fpStreamLastRow(void* param ,TAOS_RES* res, int code) { + // check result successful + if (code != TSDB_CODE_SUCCESS) { + tscCreateStream(param, res, TSDB_CODE_SUCCESS); + taos_free_result(res); + return ; + } + + // asynchronous fetch last row data + taos_fetch_rows_a(res, fetchFpStreamLastRow, param); +} + +void cbParseSql(void* param, TAOS_RES* res, int code) { + // check result successful + SSqlStream* pStream = (SSqlStream*)param; + SSqlObj* pSql = pStream->pSql; + SSqlCmd* pCmd = &pSql->cmd; + if (code != TSDB_CODE_SUCCESS) { + pSql->res.code = code; + tscDebug("0x%"PRIx64" open stream parse sql failed, sql:%s, reason:%s, code:%s", pSql->self, pSql->sqlstr, pCmd->payload, tstrerror(code)); + pStream->fp(pStream->param, NULL, NULL); + return; + } + + // check dstTable valid + if(pStream->dstTable == NULL || strlen(pStream->dstTable) == 0) { + tscDebug(" cbParseSql dstTable is empty."); + tscCreateStream(param, res, code); + return ; + } + + // query stream last row time async + char sql[128] = ""; + sprintf(sql, "select last_row(*) from %s;", pStream->dstTable); + taos_query_a(pSql->pTscObj, sql, fpStreamLastRow, param); + return ; +} + +TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* dstTable, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), int64_t stime, void *param, void (*callback)(void *)) { STscObj *pObj = (STscObj *)taos; if (pObj == NULL || pObj->signature != pObj) return NULL; @@ -613,11 +681,16 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p return NULL; } - pStream->stime = stime; - pStream->fp = fp; + pStream->ltime = INT64_MIN; + pStream->stime = stime; + pStream->fp = fp; pStream->callback = callback; - pStream->param = param; - pStream->pSql = pSql; + pStream->param = param; + pStream->pSql = pSql; + pSql->pStream = pStream; + pSql->param = pStream; + pSql->maxRetry = TSDB_MAX_REPLICA; + tscSetStreamDestTable(pStream, dstTable); pSql->pStream = pStream; pSql->param = pStream; @@ -640,10 +713,17 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p tscDebugL("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr); + pSql->fp = cbParseSql; + pSql->fetchFp = cbParseSql; + + registerSqlObj(pSql); + int32_t code = tsParseSql(pSql, true); if (code == TSDB_CODE_SUCCESS) { - tscCreateStream(pStream, pSql, code); - } else if (code != TSDB_CODE_TSC_ACTION_IN_PROGRESS) { + cbParseSql(pStream, pSql, code); + } else if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { + tscDebug(" CQ taso_open_stream IN Process. sql=%s", sqlstr); + } else { tscError("0x%"PRIx64" open stream failed, sql:%s, code:%s", pSql->self, sqlstr, tstrerror(code)); taosReleaseRef(tscObjRef, pSql->self); free(pStream); @@ -653,6 +733,11 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p return pStream; } +TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), + int64_t stime, void *param, void (*callback)(void *)) { + return taos_open_stream_withname(taos, "", sqlstr, fp, stime, param, callback); +} + void taos_close_stream(TAOS_STREAM *handle) { SSqlStream *pStream = (SSqlStream *)handle; diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 4b1b3e9080..2f32e775d6 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -1469,6 +1469,8 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR SSqlRes* pRes1 = &pParentSql->pSubs[i]->res; + pParentSql->res.precision = pRes1->precision; + if (pRes1->row > 0 && pRes1->numOfRows > 0) { tscDebug("0x%"PRIx64" sub:0x%"PRIx64" index:%d numOfRows:%d total:%"PRId64 " (not retrieve)", pParentSql->self, pParentSql->pSubs[i]->self, i, pRes1->numOfRows, pRes1->numOfTotal); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 4aa9630489..8042f032c8 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -780,7 +780,9 @@ SSDataBlock* doGetDataBlock(void* param, bool* newgroup) { SSqlRes* pRes = &pSql->res; SSDataBlock* pBlock = pInput->block; - pOperator->pRuntimeEnv->current = pInput->pTableQueryInfo; + if (pOperator->pRuntimeEnv != NULL) { + pOperator->pRuntimeEnv->current = pInput->pTableQueryInfo; + } pBlock->info.rows = pRes->numOfRows; if (pRes->numOfRows != 0) { @@ -804,6 +806,24 @@ SSDataBlock* doGetDataBlock(void* param, bool* newgroup) { return pBlock; } +static void fetchNextBlockIfCompleted(SOperatorInfo* pOperator, bool* newgroup) { + SJoinOperatorInfo* pJoinInfo = pOperator->info; + + for (int32_t i = 0; i < pOperator->numOfUpstream; ++i) { + SJoinStatus* pStatus = &pJoinInfo->status[i]; + if (pStatus->pBlock == NULL || pStatus->index >= pStatus->pBlock->info.rows) { + pStatus->pBlock = pOperator->upstream[i]->exec(pOperator->upstream[i], newgroup); + pStatus->index = 0; + + if (pStatus->pBlock == NULL) { + pOperator->status = OP_EXEC_DONE; + pJoinInfo->resultInfo.total += pJoinInfo->pRes->info.rows; + break; + } + } + } +} + SSDataBlock* doDataBlockJoin(void* param, bool* newgroup) { SOperatorInfo *pOperator = (SOperatorInfo*) param; if (pOperator->status == OP_EXEC_DONE) { @@ -816,19 +836,9 @@ SSDataBlock* doDataBlockJoin(void* param, bool* newgroup) { pJoinInfo->pRes->info.rows = 0; while(1) { - for (int32_t i = 0; i < pOperator->numOfUpstream; ++i) { - SJoinStatus* pStatus = &pJoinInfo->status[i]; - if (pStatus->pBlock == NULL || pStatus->index >= pStatus->pBlock->info.rows) { - pStatus->pBlock = pOperator->upstream[i]->exec(pOperator->upstream[i], newgroup); - pStatus->index = 0; - - if (pStatus->pBlock == NULL) { - pOperator->status = OP_EXEC_DONE; - - pJoinInfo->resultInfo.total += pJoinInfo->pRes->info.rows; - return pJoinInfo->pRes; - } - } + fetchNextBlockIfCompleted(pOperator, newgroup); + if (pOperator->status == OP_EXEC_DONE) { + return pJoinInfo->pRes; } SJoinStatus* st0 = &pJoinInfo->status[0]; @@ -847,8 +857,12 @@ SSDataBlock* doDataBlockJoin(void* param, bool* newgroup) { if (ts[st->index] < ts0[st0->index]) { // less than the first prefixEqual = false; + if ((++(st->index)) >= st->pBlock->info.rows) { - break; + fetchNextBlockIfCompleted(pOperator, newgroup); + if (pOperator->status == OP_EXEC_DONE) { + return pJoinInfo->pRes; + } } } else if (ts[st->index] > ts0[st0->index]) { // greater than the first; if (prefixEqual == true) { @@ -856,12 +870,19 @@ SSDataBlock* doDataBlockJoin(void* param, bool* newgroup) { for (int32_t j = 0; j < i; ++j) { SJoinStatus* stx = &pJoinInfo->status[j]; if ((++(stx->index)) >= stx->pBlock->info.rows) { - break; + + fetchNextBlockIfCompleted(pOperator, newgroup); + if (pOperator->status == OP_EXEC_DONE) { + return pJoinInfo->pRes; + } } } } else { if ((++(st0->index)) >= st0->pBlock->info.rows) { - break; + fetchNextBlockIfCompleted(pOperator, newgroup); + if (pOperator->status == OP_EXEC_DONE) { + return pJoinInfo->pRes; + } } } } @@ -1132,6 +1153,19 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue memcpy(schema, pSchema, numOfCol1*sizeof(SSchema)); } + // update the exprinfo + int32_t numOfOutput = (int32_t)tscNumOfExprs(px); + for(int32_t i = 0; i < numOfOutput; ++i) { + SExprInfo* pex = taosArrayGetP(px->exprList, i); + int32_t colId = pex->base.colInfo.colId; + for(int32_t j = 0; j < pSourceOperator->numOfOutput; ++j) { + if (colId == schema[j].colId) { + pex->base.colInfo.colIndex = j; + break; + } + } + } + px->pQInfo = createQInfoFromQueryNode(px, &tableGroupInfo, pSourceOperator, NULL, NULL, MASTER_SCAN); tfree(pColumnInfo); tfree(schema); diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h index fcae7a415f..8ee7329156 100644 --- a/src/common/inc/tdataformat.h +++ b/src/common/inc/tdataformat.h @@ -319,7 +319,7 @@ int tdInitDataCols(SDataCols *pCols, STSchema *pSchema); SDataCols *tdDupDataCols(SDataCols *pCols, bool keepData); SDataCols *tdFreeDataCols(SDataCols *pCols); void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols); -int tdMergeDataCols(SDataCols *target, SDataCols *src, int rowsToMerge); +int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *pOffset); // ----------------- K-V data row structure /* diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h index e07c3611d7..47460a5fab 100644 --- a/src/common/inc/tglobal.h +++ b/src/common/inc/tglobal.h @@ -39,6 +39,7 @@ extern int8_t tsEnableTelemetryReporting; extern char tsEmail[]; extern char tsArbitrator[]; extern int8_t tsArbOnline; +extern int64_t tsArbOnlineTimestamp; extern int32_t tsDnodeId; // common @@ -75,7 +76,7 @@ extern int32_t tsMinSlidingTime; extern int32_t tsMinIntervalTime; extern int32_t tsMaxStreamComputDelay; extern int32_t tsStreamCompStartDelay; -extern int32_t tsStreamCompRetryDelay; +extern int32_t tsRetryStreamCompDelay; extern float tsStreamComputDelayRatio; // the delayed computing ration of the whole time window extern int32_t tsProjectExecInterval; extern int64_t tsMaxRetentWindow; diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c index f5b84e4c9a..7ae34d532c 100644 --- a/src/common/src/tdataformat.c +++ b/src/common/src/tdataformat.c @@ -441,30 +441,35 @@ void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols) pCols->numOfRows++; } -int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge) { +int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *pOffset) { ASSERT(rowsToMerge > 0 && rowsToMerge <= source->numOfRows); ASSERT(target->numOfCols == source->numOfCols); + int offset = 0; + + if (pOffset == NULL) { + pOffset = &offset; + } SDataCols *pTarget = NULL; - if (dataColsKeyLast(target) < dataColsKeyFirst(source)) { // No overlap + if ((target->numOfRows == 0) || (dataColsKeyLast(target) < dataColsKeyFirst(source))) { // No overlap ASSERT(target->numOfRows + rowsToMerge <= target->maxPoints); for (int i = 0; i < rowsToMerge; i++) { for (int j = 0; j < source->numOfCols; j++) { if (source->cols[j].len > 0) { - dataColAppendVal(target->cols + j, tdGetColDataOfRow(source->cols + j, i), target->numOfRows, + dataColAppendVal(target->cols + j, tdGetColDataOfRow(source->cols + j, i + (*pOffset)), target->numOfRows, target->maxPoints); } } target->numOfRows++; } + (*pOffset) += rowsToMerge; } else { pTarget = tdDupDataCols(target, true); if (pTarget == NULL) goto _err; int iter1 = 0; - int iter2 = 0; - tdMergeTwoDataCols(target, pTarget, &iter1, pTarget->numOfRows, source, &iter2, source->numOfRows, + tdMergeTwoDataCols(target, pTarget, &iter1, pTarget->numOfRows, source, pOffset, source->numOfRows, pTarget->numOfRows + rowsToMerge); } diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index d6bbc288ad..eae2fab32b 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -42,11 +42,12 @@ int32_t tsNumOfMnodes = 3; int8_t tsEnableVnodeBak = 1; int8_t tsEnableTelemetryReporting = 1; int8_t tsArbOnline = 0; +int64_t tsArbOnlineTimestamp = TSDB_ARB_DUMMY_TIME; char tsEmail[TSDB_FQDN_LEN] = {0}; int32_t tsDnodeId = 0; // common -int32_t tsRpcTimer = 1000; +int32_t tsRpcTimer = 300; int32_t tsRpcMaxTime = 600; // seconds; int32_t tsRpcForceTcp = 0; //disable this, means query, show command use udp protocol as default int32_t tsMaxShellConns = 50000; @@ -93,7 +94,7 @@ int32_t tsMaxStreamComputDelay = 20000; int32_t tsStreamCompStartDelay = 10000; // the stream computing delay time after executing failed, change accordingly -int32_t tsStreamCompRetryDelay = 10; +int32_t tsRetryStreamCompDelay = 10*1000; // The delayed computing ration. 10% of the whole computing time window by default. float tsStreamComputDelayRatio = 0.1f; @@ -710,7 +711,7 @@ static void doInitGlobalConfig(void) { taosInitConfigOption(cfg); cfg.option = "retryStreamCompDelay"; - cfg.ptr = &tsStreamCompRetryDelay; + cfg.ptr = &tsRetryStreamCompDelay; cfg.valType = TAOS_CFG_VTYPE_INT32; cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW; cfg.minValue = 10; diff --git a/src/connector/go b/src/connector/go index 8ce6d86558..7a26c432f8 160000 --- a/src/connector/go +++ b/src/connector/go @@ -1 +1 @@ -Subproject commit 8ce6d86558afc8c0b50c10f990fd2b4270cf06fc +Subproject commit 7a26c432f8b4203e42344ff3290b9b9b01b983d5 diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java index 179bad2ced..256e735285 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java @@ -16,13 +16,13 @@ */ package com.taosdata.jdbc; -import com.taosdata.jdbc.utils.TaosInfo; - import java.nio.ByteBuffer; import java.sql.SQLException; import java.sql.SQLWarning; import java.util.List; +import com.taosdata.jdbc.utils.TaosInfo; + /** * JNI connector */ @@ -276,23 +276,14 @@ public class TSDBJNIConnector { private native int validateCreateTableSqlImp(long connection, byte[] sqlBytes); public long prepareStmt(String sql) throws SQLException { - Long stmt = 0L; - try { - stmt = prepareStmtImp(sql.getBytes(), this.taos); - } catch (Exception e) { - e.printStackTrace(); - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_ENCODING); - } - - if (stmt == TSDBConstants.JNI_CONNECTION_NULL) { + Long stmt = prepareStmtImp(sql.getBytes(), this.taos); + if (stmt == TSDBConstants.JNI_TDENGINE_ERROR) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_SQL); + } else if (stmt == TSDBConstants.JNI_CONNECTION_NULL) { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL); - } - - if (stmt == TSDBConstants.JNI_SQL_NULL) { + } else if (stmt == TSDBConstants.JNI_SQL_NULL) { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_SQL_NULL); - } - - if (stmt == TSDBConstants.JNI_OUT_OF_MEMORY) { + } else if (stmt == TSDBConstants.JNI_OUT_OF_MEMORY) { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_OUT_OF_MEMORY); } diff --git a/src/connector/python/taos/__init__.py b/src/connector/python/taos/__init__.py index 9732635738..52c6db311e 100644 --- a/src/connector/python/taos/__init__.py +++ b/src/connector/python/taos/__init__.py @@ -2,6 +2,10 @@ from .connection import TDengineConnection from .cursor import TDengineCursor +# For some reason, the following is needed for VS Code (through PyLance) to +# recognize that "error" is a valid module of the "taos" package. +from .error import ProgrammingError + # Globals threadsafety = 0 paramstyle = 'pyformat' diff --git a/src/cq/src/cqMain.c b/src/cq/src/cqMain.c index 5d5d5f339e..ee4be02b90 100644 --- a/src/cq/src/cqMain.c +++ b/src/cq/src/cqMain.c @@ -437,6 +437,10 @@ static void cqProcessCreateTimer(void *param, void *tmrId) { taosReleaseRef(cqObjRef, (int64_t)param); } +// inner implement in tscStream.c +TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* desName, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), + int64_t stime, void *param, void (*callback)(void *)); + static void cqCreateStream(SCqContext *pContext, SCqObj *pObj) { pObj->pContext = pContext; @@ -449,11 +453,10 @@ static void cqCreateStream(SCqContext *pContext, SCqObj *pObj) { pObj->tmrId = 0; if (pObj->pStream == NULL) { - pObj->pStream = taos_open_stream(pContext->dbConn, pObj->sqlStr, cqProcessStreamRes, INT64_MIN, (void *)pObj->rid, NULL); + pObj->pStream = taos_open_stream_withname(pContext->dbConn, pObj->dstTable, pObj->sqlStr, cqProcessStreamRes, INT64_MIN, (void *)pObj->rid, NULL); // TODO the pObj->pStream may be released if error happens if (pObj->pStream) { - tscSetStreamDestTable(pObj->pStream, pObj->dstTable); pContext->num++; cDebug("vgId:%d, id:%d CQ:%s is opened", pContext->vgId, pObj->tid, pObj->sqlStr); } else { diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h index 5bdd197aa9..e6eec73681 100644 --- a/src/inc/taosdef.h +++ b/src/inc/taosdef.h @@ -375,6 +375,8 @@ do { \ #define TSDB_MAX_WAL_SIZE (1024*1024*3) +#define TSDB_ARB_DUMMY_TIME 4765104000000 // 2121-01-01 00:00:00.000, :P + typedef enum { TAOS_QTYPE_RPC = 0, TAOS_QTYPE_FWD = 1, diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index 09f632ea32..ab15e851e7 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -215,11 +215,11 @@ int32_t* taosGetErrno(); #define TSDB_CODE_VND_IS_FLOWCTRL TAOS_DEF_ERROR_CODE(0, 0x050C) //"Database memory is full for waiting commit") #define TSDB_CODE_VND_IS_DROPPING TAOS_DEF_ERROR_CODE(0, 0x050D) //"Database is dropping") #define TSDB_CODE_VND_IS_BALANCING TAOS_DEF_ERROR_CODE(0, 0x050E) //"Database is balancing") +#define TSDB_CODE_VND_IS_CLOSING TAOS_DEF_ERROR_CODE(0, 0x0510) //"Database is closing") #define TSDB_CODE_VND_NOT_SYNCED TAOS_DEF_ERROR_CODE(0, 0x0511) //"Database suspended") #define TSDB_CODE_VND_NO_WRITE_AUTH TAOS_DEF_ERROR_CODE(0, 0x0512) //"Database write operation denied") #define TSDB_CODE_VND_IS_SYNCING TAOS_DEF_ERROR_CODE(0, 0x0513) //"Database is syncing") #define TSDB_CODE_VND_INVALID_TSDB_STATE TAOS_DEF_ERROR_CODE(0, 0x0514) //"Invalid tsdb state") -#define TSDB_CODE_VND_IS_CLOSING TAOS_DEF_ERROR_CODE(0, 0x0515) //"Database is closing") // tsdb #define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600) //"Invalid table ID") diff --git a/src/inc/tfs.h b/src/inc/tfs.h index 4ed21bc6e1..e72620eca6 100644 --- a/src/inc/tfs.h +++ b/src/inc/tfs.h @@ -31,6 +31,8 @@ typedef struct { #define TFS_UNDECIDED_ID -1 #define TFS_PRIMARY_LEVEL 0 #define TFS_PRIMARY_ID 0 +#define TFS_MIN_LEVEL 0 +#define TFS_MAX_LEVEL (TSDB_MAX_TIERS - 1) // FS APIs ==================================== typedef struct { diff --git a/src/inc/tsdb.h b/src/inc/tsdb.h index 376836369d..05d29daad5 100644 --- a/src/inc/tsdb.h +++ b/src/inc/tsdb.h @@ -409,6 +409,9 @@ void tsdbDecCommitRef(int vgId); int tsdbSyncSend(void *pRepo, SOCKET socketFd); int tsdbSyncRecv(void *pRepo, SOCKET socketFd); +// For TSDB Compact +int tsdbCompact(STsdbRepo *pRepo); + #ifdef __cplusplus } #endif diff --git a/src/mnode/src/mnodeDnode.c b/src/mnode/src/mnodeDnode.c index 51f16e4bc6..7702978af1 100644 --- a/src/mnode/src/mnodeDnode.c +++ b/src/mnode/src/mnodeDnode.c @@ -941,7 +941,7 @@ static int32_t mnodeRetrieveDnodes(SShowObj *pShow, char *data, int32_t rows, vo cols++; pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; - *(int64_t *)pWrite = 0; + *(int64_t *)pWrite = tsArbOnlineTimestamp; cols++; pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; diff --git a/src/mnode/src/mnodeSdb.c b/src/mnode/src/mnodeSdb.c index ac3e5d86ec..e9acd5b9bc 100644 --- a/src/mnode/src/mnodeSdb.c +++ b/src/mnode/src/mnodeSdb.c @@ -656,8 +656,6 @@ static int32_t sdbProcessWrite(void *wparam, void *hparam, int32_t qtype, void * dnodeReportStep("mnode-sdb", stepDesc, 0); } - if (qtype == TAOS_QTYPE_QUERY) return sdbPerformDeleteAction(pHead, pTable); - pthread_mutex_lock(&tsSdbMgmt.mutex); if (pHead->version == 0) { @@ -721,13 +719,11 @@ static int32_t sdbProcessWrite(void *wparam, void *hparam, int32_t qtype, void * if (action == SDB_ACTION_INSERT) { return sdbPerformInsertAction(pHead, pTable); } else if (action == SDB_ACTION_DELETE) { - //if (qtype == TAOS_QTYPE_FWD) { - // Drop database/stable may take a long time and cause a timeout, so we confirm first then reput it into queue - // sdbWriteFwdToQueue(1, hparam, TAOS_QTYPE_QUERY, unused); - // return TSDB_CODE_SUCCESS; - //} else { - return sdbPerformDeleteAction(pHead, pTable); - //} + if (qtype == TAOS_QTYPE_FWD) { + // Drop database/stable may take a long time and cause a timeout, so we confirm first + syncConfirmForward(tsSdbMgmt.sync, pHead->version, TSDB_CODE_SUCCESS, false); + } + return sdbPerformDeleteAction(pHead, pTable); } else if (action == SDB_ACTION_UPDATE) { return sdbPerformUpdateAction(pHead, pTable); } else { @@ -1140,7 +1136,10 @@ static void *sdbWorkerFp(void *pWorker) { sdbConfirmForward(1, pRow, pRow->code); } else { if (qtype == TAOS_QTYPE_FWD) { - syncConfirmForward(tsSdbMgmt.sync, pRow->pHead.version, pRow->code, false); + int32_t action = pRow->pHead.msgType % 10; + if (action != SDB_ACTION_DELETE) { + syncConfirmForward(tsSdbMgmt.sync, pRow->pHead.version, pRow->code, false); + } } sdbFreeFromQueue(pRow); } diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index 4935633b9c..be0716ce99 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -2490,7 +2490,6 @@ static void buildTopBotStruct(STopBotInfo *pTopBotInfo, SQLFunctionCtx *pCtx) { tmp += POINTER_BYTES * pCtx->param[0].i64; size_t size = sizeof(tValuePair) + pCtx->tagInfo.tagsLen; -// assert(pCtx->param[0].i64 > 0); for (int32_t i = 0; i < pCtx->param[0].i64; ++i) { pTopBotInfo->res[i] = (tValuePair*) tmp; @@ -2499,7 +2498,6 @@ static void buildTopBotStruct(STopBotInfo *pTopBotInfo, SQLFunctionCtx *pCtx) { } } - bool topbot_datablock_filter(SQLFunctionCtx *pCtx, const char *minval, const char *maxval) { SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); if (pResInfo == NULL) { @@ -2579,13 +2577,14 @@ static void top_function(SQLFunctionCtx *pCtx) { for (int32_t i = 0; i < pCtx->size; ++i) { char *data = GET_INPUT_DATA(pCtx, i); - TSKEY ts = GET_TS_DATA(pCtx, i); - if (pCtx->hasNull && isNull(data, pCtx->inputType)) { continue; } notNullElems++; + + // NOTE: Set the default timestamp if it is missing [todo refactor] + TSKEY ts = (pCtx->ptsList != NULL)? GET_TS_DATA(pCtx, i):0; do_top_function_add(pRes, (int32_t)pCtx->param[0].i64, data, ts, pCtx->inputType, &pCtx->tagInfo, NULL, 0); } @@ -2658,13 +2657,13 @@ static void bottom_function(SQLFunctionCtx *pCtx) { for (int32_t i = 0; i < pCtx->size; ++i) { char *data = GET_INPUT_DATA(pCtx, i); - TSKEY ts = GET_TS_DATA(pCtx, i); - if (pCtx->hasNull && isNull(data, pCtx->inputType)) { continue; } - + notNullElems++; + // NOTE: Set the default timestamp if it is missing [todo refactor] + TSKEY ts = (pCtx->ptsList != NULL)? GET_TS_DATA(pCtx, i):0; do_bottom_function_add(pRes, (int32_t)pCtx->param[0].i64, data, ts, pCtx->inputType, &pCtx->tagInfo, NULL, 0); } @@ -2742,7 +2741,7 @@ static void top_bottom_func_finalizer(SQLFunctionCtx *pCtx) { if (pCtx->param[1].i64 == PRIMARYKEY_TIMESTAMP_COL_INDEX) { __compar_fn_t comparator = (pCtx->param[2].i64 == TSDB_ORDER_ASC) ? resAscComparFn : resDescComparFn; qsort(tvp, (size_t)pResInfo->numOfRes, POINTER_BYTES, comparator); - } else if (pCtx->param[1].i64 > PRIMARYKEY_TIMESTAMP_COL_INDEX) { + } else /*if (pCtx->param[1].i64 > PRIMARYKEY_TIMESTAMP_COL_INDEX)*/ { __compar_fn_t comparator = (pCtx->param[2].i64 == TSDB_ORDER_ASC) ? resDataAscComparFn : resDataDescComparFn; qsort(tvp, (size_t)pResInfo->numOfRes, POINTER_BYTES, comparator); } diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 8a8ec0cf55..25e7e446bd 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -950,7 +950,13 @@ static void doSetInputDataBlock(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, uint32_t status = aAggs[pCtx[i].functionId].status; if ((status & (TSDB_FUNCSTATE_SELECTIVITY | TSDB_FUNCSTATE_NEED_TS)) != 0) { SColumnInfoData* tsInfo = taosArrayGet(pBlock->pDataBlock, 0); - pCtx[i].ptsList = (int64_t*) tsInfo->pData; + // In case of the top/bottom query again the nest query result, which has no timestamp column + // don't set the ptsList attribute. + if (tsInfo->info.type == TSDB_DATA_TYPE_TIMESTAMP) { + pCtx[i].ptsList = (int64_t*) tsInfo->pData; + } else { + pCtx[i].ptsList = NULL; + } } } else if (TSDB_COL_IS_UD_COL(pCol->flag) && (pOperator->pRuntimeEnv->scanFlag == MERGE_STAGE)) { SColIndex* pColIndex = &pOperator->pExpr[i].base.colInfo; @@ -4228,6 +4234,10 @@ static void updateTableIdInfo(STableQueryInfo* pTableQueryInfo, SSDataBlock* pBl int32_t step = GET_FORWARD_DIRECTION_FACTOR(order); pTableQueryInfo->lastKey = ((order == TSDB_ORDER_ASC)? pBlock->info.window.ekey:pBlock->info.window.skey) + step; + if (pTableQueryInfo->pTable == NULL) { + return; + } + STableIdInfo tidInfo = createTableIdInfo(pTableQueryInfo); STableIdInfo *idinfo = taosHashGet(pTableIdInfo, &tidInfo.tid, sizeof(tidInfo.tid)); if (idinfo != NULL) { @@ -4905,8 +4915,7 @@ static SSDataBlock* doArithmeticOperation(void* param, bool* newgroup) { updateOutputBuf(&pArithInfo->binfo, &pArithInfo->bufCapacity, pBlock->info.rows); arithmeticApplyFunctions(pRuntimeEnv, pInfo->pCtx, pOperator->numOfOutput); - - if (pTableQueryInfo != NULL) { // TODO refactor + if (pTableQueryInfo != NULL) { updateTableIdInfo(pTableQueryInfo, pBlock, pRuntimeEnv->pTableRetrieveTsMap, order); } @@ -4949,8 +4958,7 @@ static SSDataBlock* doArithmeticOperation(void* param, bool* newgroup) { updateOutputBuf(&pArithInfo->binfo, &pArithInfo->bufCapacity, pBlock->info.rows); arithmeticApplyFunctions(pRuntimeEnv, pInfo->pCtx, pOperator->numOfOutput); - - if (pTableQueryInfo != NULL) { // TODO refactor + if (pTableQueryInfo != NULL) { updateTableIdInfo(pTableQueryInfo, pBlock, pRuntimeEnv->pTableRetrieveTsMap, order); } diff --git a/src/sync/src/syncMain.c b/src/sync/src/syncMain.c index 493d5f117a..d34536543c 100644 --- a/src/sync/src/syncMain.c +++ b/src/sync/src/syncMain.c @@ -1150,7 +1150,12 @@ static void syncSetupPeerConnection(SSyncPeer *pPeer) { pPeer->peerFd = connFd; pPeer->role = TAOS_SYNC_ROLE_UNSYNCED; pPeer->pConn = syncAllocateTcpConn(tsTcpPool, pPeer->rid, connFd); - if (pPeer->isArb) tsArbOnline = 1; + if (pPeer->isArb) { + tsArbOnline = 1; + if (tsArbOnlineTimestamp == TSDB_ARB_DUMMY_TIME) { + tsArbOnlineTimestamp = taosGetTimestampMs(); + } + } } else { sDebug("%s, failed to setup peer connection to server since %s, try later", pPeer->id, strerror(errno)); taosCloseSocket(connFd); diff --git a/src/tsdb/CMakeLists.txt b/src/tsdb/CMakeLists.txt index 31d52aae7d..8080a61a6c 100644 --- a/src/tsdb/CMakeLists.txt +++ b/src/tsdb/CMakeLists.txt @@ -6,6 +6,10 @@ AUX_SOURCE_DIRECTORY(src SRC) ADD_LIBRARY(tsdb ${SRC}) TARGET_LINK_LIBRARIES(tsdb tfs common tutil) +IF (TD_TSDB_PLUGINS) + TARGET_LINK_LIBRARIES(tsdb tsdbPlugins) +ENDIF () + IF (TD_LINUX) # Someone has no gtest directory, so comment it # ADD_SUBDIRECTORY(tests) diff --git a/src/tsdb/inc/tsdbCommit.h b/src/tsdb/inc/tsdbCommit.h index 6bd5dc4325..cde728b170 100644 --- a/src/tsdb/inc/tsdbCommit.h +++ b/src/tsdb/inc/tsdbCommit.h @@ -29,10 +29,17 @@ typedef struct { int64_t size; } SKVRecord; +#define TSDB_DEFAULT_BLOCK_ROWS(maxRows) ((maxRows)*4 / 5) + void tsdbGetRtnSnap(STsdbRepo *pRepo, SRtn *pRtn); int tsdbEncodeKVRecord(void **buf, SKVRecord *pRecord); void *tsdbDecodeKVRecord(void *buf, SKVRecord *pRecord); void *tsdbCommitData(STsdbRepo *pRepo); +int tsdbApplyRtnOnFSet(STsdbRepo *pRepo, SDFileSet *pSet, SRtn *pRtn); +int tsdbWriteBlockInfoImpl(SDFile *pHeadf, STable *pTable, SArray *pSupA, SArray *pSubA, void **ppBuf, SBlockIdx *pIdx); +int tsdbWriteBlockIdx(SDFile *pHeadf, SArray *pIdxA, void **ppBuf); +int tsdbWriteBlockImpl(STsdbRepo *pRepo, STable *pTable, SDFile *pDFile, SDataCols *pDataCols, SBlock *pBlock, + bool isLast, bool isSuper, void **ppBuf, void **ppCBuf); int tsdbApplyRtn(STsdbRepo *pRepo); static FORCE_INLINE int tsdbGetFidLevel(int fid, SRtn *pRtn) { diff --git a/src/tsdb/inc/tsdbCommitQueue.h b/src/tsdb/inc/tsdbCommitQueue.h index c2353391f9..6342c036b7 100644 --- a/src/tsdb/inc/tsdbCommitQueue.h +++ b/src/tsdb/inc/tsdbCommitQueue.h @@ -16,6 +16,8 @@ #ifndef _TD_TSDB_COMMIT_QUEUE_H_ #define _TD_TSDB_COMMIT_QUEUE_H_ -int tsdbScheduleCommit(STsdbRepo *pRepo); +typedef enum { COMMIT_REQ, COMPACT_REQ } TSDB_REQ_T; + +int tsdbScheduleCommit(STsdbRepo *pRepo, TSDB_REQ_T req); #endif /* _TD_TSDB_COMMIT_QUEUE_H_ */ \ No newline at end of file diff --git a/src/tsdb/inc/tsdbCompact.h b/src/tsdb/inc/tsdbCompact.h new file mode 100644 index 0000000000..5a382de5e0 --- /dev/null +++ b/src/tsdb/inc/tsdbCompact.h @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ +#ifndef _TD_TSDB_COMPACT_H_ +#define _TD_TSDB_COMPACT_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +void *tsdbCompactImpl(STsdbRepo *pRepo); + +#ifdef __cplusplus +} +#endif + +#endif /* _TD_TSDB_COMPACT_H_ */ \ No newline at end of file diff --git a/src/tsdb/inc/tsdbint.h b/src/tsdb/inc/tsdbint.h index 945b74af35..049c1bdb6e 100644 --- a/src/tsdb/inc/tsdbint.h +++ b/src/tsdb/inc/tsdbint.h @@ -64,6 +64,8 @@ extern "C" { #include "tsdbReadImpl.h" // Commit #include "tsdbCommit.h" +// Compact +#include "tsdbCompact.h" // Commit Queue #include "tsdbCommitQueue.h" // Main definitions diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c index f2bbe347bd..82cc6f07f7 100644 --- a/src/tsdb/src/tsdbCommit.c +++ b/src/tsdb/src/tsdbCommit.c @@ -51,7 +51,7 @@ typedef struct { #define TSDB_COMMIT_LAST_FILE(ch) TSDB_DFILE_IN_SET(TSDB_COMMIT_WRITE_FSET(ch), TSDB_FILE_LAST) #define TSDB_COMMIT_BUF(ch) TSDB_READ_BUF(&((ch)->readh)) #define TSDB_COMMIT_COMP_BUF(ch) TSDB_READ_COMP_BUF(&((ch)->readh)) -#define TSDB_COMMIT_DEFAULT_ROWS(ch) (TSDB_COMMIT_REPO(ch)->config.maxRowsPerFileBlock * 4 / 5) +#define TSDB_COMMIT_DEFAULT_ROWS(ch) TSDB_DEFAULT_BLOCK_ROWS(TSDB_COMMIT_REPO(ch)->config.maxRowsPerFileBlock) #define TSDB_COMMIT_TXN_VERSION(ch) FS_TXN_VERSION(REPO_FS(TSDB_COMMIT_REPO(ch))) static int tsdbCommitMeta(STsdbRepo *pRepo); @@ -72,7 +72,6 @@ static int tsdbCommitToTable(SCommitH *pCommith, int tid); static int tsdbSetCommitTable(SCommitH *pCommith, STable *pTable); static int tsdbComparKeyBlock(const void *arg1, const void *arg2); static int tsdbWriteBlockInfo(SCommitH *pCommih); -static int tsdbWriteBlockIdx(SCommitH *pCommih); static int tsdbCommitMemData(SCommitH *pCommith, SCommitIter *pIter, TSKEY keyLimit, bool toData); static int tsdbMergeMemData(SCommitH *pCommith, SCommitIter *pIter, int bidx); static int tsdbMoveBlock(SCommitH *pCommith, int bidx); @@ -86,7 +85,6 @@ static void tsdbCloseCommitFile(SCommitH *pCommith, bool hasError); static bool tsdbCanAddSubBlock(SCommitH *pCommith, SBlock *pBlock, SMergeInfo *pInfo); static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIter *pCommitIter, SDataCols *pTarget, TSKEY maxKey, int maxRows, int8_t update); -static int tsdbApplyRtnOnFSet(STsdbRepo *pRepo, SDFileSet *pSet, SRtn *pRtn); void *tsdbCommitData(STsdbRepo *pRepo) { if (pRepo->imem == NULL) { @@ -117,6 +115,151 @@ _err: return NULL; } +int tsdbApplyRtnOnFSet(STsdbRepo *pRepo, SDFileSet *pSet, SRtn *pRtn) { + SDiskID did; + SDFileSet nSet; + STsdbFS * pfs = REPO_FS(pRepo); + int level; + + ASSERT(pSet->fid >= pRtn->minFid); + + level = tsdbGetFidLevel(pSet->fid, pRtn); + + tfsAllocDisk(level, &(did.level), &(did.id)); + if (did.level == TFS_UNDECIDED_LEVEL) { + terrno = TSDB_CODE_TDB_NO_AVAIL_DISK; + return -1; + } + + if (did.level > TSDB_FSET_LEVEL(pSet)) { + // Need to move the FSET to higher level + tsdbInitDFileSet(&nSet, did, REPO_ID(pRepo), pSet->fid, FS_TXN_VERSION(pfs)); + + if (tsdbCopyDFileSet(pSet, &nSet) < 0) { + tsdbError("vgId:%d failed to copy FSET %d from level %d to level %d since %s", REPO_ID(pRepo), pSet->fid, + TSDB_FSET_LEVEL(pSet), did.level, tstrerror(terrno)); + return -1; + } + + if (tsdbUpdateDFileSet(pfs, &nSet) < 0) { + return -1; + } + + tsdbInfo("vgId:%d FSET %d is copied from level %d disk id %d to level %d disk id %d", REPO_ID(pRepo), pSet->fid, + TSDB_FSET_LEVEL(pSet), TSDB_FSET_ID(pSet), did.level, did.id); + } else { + // On a correct level + if (tsdbUpdateDFileSet(pfs, pSet) < 0) { + return -1; + } + } + + return 0; +} + +int tsdbWriteBlockInfoImpl(SDFile *pHeadf, STable *pTable, SArray *pSupA, SArray *pSubA, void **ppBuf, + SBlockIdx *pIdx) { + size_t nSupBlocks; + size_t nSubBlocks; + uint32_t tlen; + SBlockInfo *pBlkInfo; + int64_t offset; + SBlock * pBlock; + + memset(pIdx, 0, sizeof(*pIdx)); + + nSupBlocks = taosArrayGetSize(pSupA); + nSubBlocks = (pSubA == NULL) ? 0 : taosArrayGetSize(pSubA); + + if (nSupBlocks <= 0) { + // No data (data all deleted) + return 0; + } + + tlen = (uint32_t)(sizeof(SBlockInfo) + sizeof(SBlock) * (nSupBlocks + nSubBlocks) + sizeof(TSCKSUM)); + if (tsdbMakeRoom(ppBuf, tlen) < 0) return -1; + pBlkInfo = *ppBuf; + + pBlkInfo->delimiter = TSDB_FILE_DELIMITER; + pBlkInfo->tid = TABLE_TID(pTable); + pBlkInfo->uid = TABLE_UID(pTable); + + memcpy((void *)(pBlkInfo->blocks), taosArrayGet(pSupA, 0), nSupBlocks * sizeof(SBlock)); + if (nSubBlocks > 0) { + memcpy((void *)(pBlkInfo->blocks + nSupBlocks), taosArrayGet(pSubA, 0), nSubBlocks * sizeof(SBlock)); + + for (int i = 0; i < nSupBlocks; i++) { + pBlock = pBlkInfo->blocks + i; + + if (pBlock->numOfSubBlocks > 1) { + pBlock->offset += (sizeof(SBlockInfo) + sizeof(SBlock) * nSupBlocks); + } + } + } + + taosCalcChecksumAppend(0, (uint8_t *)pBlkInfo, tlen); + + if (tsdbAppendDFile(pHeadf, (void *)pBlkInfo, tlen, &offset) < 0) { + return -1; + } + + tsdbUpdateDFileMagic(pHeadf, POINTER_SHIFT(pBlkInfo, tlen - sizeof(TSCKSUM))); + + // Set pIdx + pBlock = taosArrayGetLast(pSupA); + + pIdx->tid = TABLE_TID(pTable); + pIdx->uid = TABLE_UID(pTable); + pIdx->hasLast = pBlock->last ? 1 : 0; + pIdx->maxKey = pBlock->keyLast; + pIdx->numOfBlocks = (uint32_t)nSupBlocks; + pIdx->len = tlen; + pIdx->offset = (uint32_t)offset; + + return 0; +} + +int tsdbWriteBlockIdx(SDFile *pHeadf, SArray *pIdxA, void **ppBuf) { + SBlockIdx *pBlkIdx; + size_t nidx = taosArrayGetSize(pIdxA); + int tlen = 0, size; + int64_t offset; + + if (nidx <= 0) { + // All data are deleted + pHeadf->info.offset = 0; + pHeadf->info.len = 0; + return 0; + } + + for (size_t i = 0; i < nidx; i++) { + pBlkIdx = (SBlockIdx *)taosArrayGet(pIdxA, i); + + size = tsdbEncodeSBlockIdx(NULL, pBlkIdx); + if (tsdbMakeRoom(ppBuf, tlen + size) < 0) return -1; + + void *ptr = POINTER_SHIFT(*ppBuf, tlen); + tsdbEncodeSBlockIdx(&ptr, pBlkIdx); + + tlen += size; + } + + tlen += sizeof(TSCKSUM); + if (tsdbMakeRoom(ppBuf, tlen) < 0) return -1; + taosCalcChecksumAppend(0, (uint8_t *)(*ppBuf), tlen); + + if (tsdbAppendDFile(pHeadf, *ppBuf, tlen, &offset) < tlen) { + return -1; + } + + tsdbUpdateDFileMagic(pHeadf, POINTER_SHIFT(*ppBuf, tlen - sizeof(TSCKSUM))); + pHeadf->info.offset = (uint32_t)offset; + pHeadf->info.len = tlen; + + return 0; +} + + // =================== Commit Meta Data static int tsdbCommitMeta(STsdbRepo *pRepo) { STsdbFS * pfs = REPO_FS(pRepo); @@ -446,7 +589,8 @@ static int tsdbCommitToFile(SCommitH *pCommith, SDFileSet *pSet, int fid) { } } - if (tsdbWriteBlockIdx(pCommith) < 0) { + if (tsdbWriteBlockIdx(TSDB_COMMIT_HEAD_FILE(pCommith), pCommith->aBlkIdx, (void **)(&(TSDB_COMMIT_BUF(pCommith)))) < + 0) { tsdbError("vgId:%d failed to write SBlockIdx part to FSET %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno)); tsdbCloseCommitFile(pCommith, true); // revert the file change @@ -754,23 +898,21 @@ static int tsdbComparKeyBlock(const void *arg1, const void *arg2) { } } -static int tsdbWriteBlock(SCommitH *pCommith, SDFile *pDFile, SDataCols *pDataCols, SBlock *pBlock, bool isLast, - bool isSuper) { - STsdbRepo * pRepo = TSDB_COMMIT_REPO(pCommith); +int tsdbWriteBlockImpl(STsdbRepo *pRepo, STable *pTable, SDFile *pDFile, SDataCols *pDataCols, SBlock *pBlock, + bool isLast, bool isSuper, void **ppBuf, void **ppCBuf) { STsdbCfg * pCfg = REPO_CFG(pRepo); SBlockData *pBlockData; int64_t offset = 0; - STable * pTable = TSDB_COMMIT_TABLE(pCommith); int rowsToWrite = pDataCols->numOfRows; ASSERT(rowsToWrite > 0 && rowsToWrite <= pCfg->maxRowsPerFileBlock); ASSERT((!isLast) || rowsToWrite < pCfg->minRowsPerFileBlock); // Make buffer space - if (tsdbMakeRoom((void **)(&TSDB_COMMIT_BUF(pCommith)), TSDB_BLOCK_STATIS_SIZE(pDataCols->numOfCols)) < 0) { + if (tsdbMakeRoom(ppBuf, TSDB_BLOCK_STATIS_SIZE(pDataCols->numOfCols)) < 0) { return -1; } - pBlockData = (SBlockData *)TSDB_COMMIT_BUF(pCommith); + pBlockData = (SBlockData *)(*ppBuf); // Get # of cols not all NULL(not including key column) int nColsNotAllNull = 0; @@ -816,23 +958,23 @@ static int tsdbWriteBlock(SCommitH *pCommith, SDFile *pDFile, SDataCols *pDataCo void * tptr; // Make room - if (tsdbMakeRoom((void **)(&TSDB_COMMIT_BUF(pCommith)), lsize + tlen + COMP_OVERFLOW_BYTES + sizeof(TSCKSUM)) < 0) { + if (tsdbMakeRoom(ppBuf, lsize + tlen + COMP_OVERFLOW_BYTES + sizeof(TSCKSUM)) < 0) { return -1; } - pBlockData = (SBlockData *)TSDB_COMMIT_BUF(pCommith); + pBlockData = (SBlockData *)(*ppBuf); pBlockCol = pBlockData->cols + tcol; tptr = POINTER_SHIFT(pBlockData, lsize); if (pCfg->compression == TWO_STAGE_COMP && - tsdbMakeRoom((void **)(&TSDB_COMMIT_COMP_BUF(pCommith)), tlen + COMP_OVERFLOW_BYTES) < 0) { + tsdbMakeRoom(ppCBuf, tlen + COMP_OVERFLOW_BYTES) < 0) { return -1; } // Compress or just copy if (pCfg->compression) { flen = (*(tDataTypes[pDataCol->type].compFunc))((char *)pDataCol->pData, tlen, rowsToWrite, tptr, - tlen + COMP_OVERFLOW_BYTES, pCfg->compression, - TSDB_COMMIT_COMP_BUF(pCommith), tlen + COMP_OVERFLOW_BYTES); + tlen + COMP_OVERFLOW_BYTES, pCfg->compression, *ppCBuf, + tlen + COMP_OVERFLOW_BYTES); } else { flen = tlen; memcpy(tptr, pDataCol->pData, flen); @@ -888,68 +1030,27 @@ static int tsdbWriteBlock(SCommitH *pCommith, SDFile *pDFile, SDataCols *pDataCo return 0; } +static int tsdbWriteBlock(SCommitH *pCommith, SDFile *pDFile, SDataCols *pDataCols, SBlock *pBlock, bool isLast, + bool isSuper) { + return tsdbWriteBlockImpl(TSDB_COMMIT_REPO(pCommith), TSDB_COMMIT_TABLE(pCommith), pDFile, pDataCols, pBlock, isLast, + isSuper, (void **)(&(TSDB_COMMIT_BUF(pCommith))), + (void **)(&(TSDB_COMMIT_COMP_BUF(pCommith)))); +} + + static int tsdbWriteBlockInfo(SCommitH *pCommih) { - SDFile * pHeadf = TSDB_COMMIT_HEAD_FILE(pCommih); - SBlockIdx blkIdx; - STable * pTable = TSDB_COMMIT_TABLE(pCommih); - SBlock * pBlock; - size_t nSupBlocks; - size_t nSubBlocks; - uint32_t tlen; - SBlockInfo *pBlkInfo; - int64_t offset; + SDFile * pHeadf = TSDB_COMMIT_HEAD_FILE(pCommih); + SBlockIdx blkIdx; + STable * pTable = TSDB_COMMIT_TABLE(pCommih); - nSupBlocks = taosArrayGetSize(pCommih->aSupBlk); - nSubBlocks = taosArrayGetSize(pCommih->aSubBlk); - - if (nSupBlocks <= 0) { - // No data (data all deleted) - return 0; - } - - tlen = (uint32_t)(sizeof(SBlockInfo) + sizeof(SBlock) * (nSupBlocks + nSubBlocks) + sizeof(TSCKSUM)); - - // Write SBlockInfo part - if (tsdbMakeRoom((void **)(&(TSDB_COMMIT_BUF(pCommih))), tlen) < 0) return -1; - pBlkInfo = TSDB_COMMIT_BUF(pCommih); - - pBlkInfo->delimiter = TSDB_FILE_DELIMITER; - pBlkInfo->tid = TABLE_TID(pTable); - pBlkInfo->uid = TABLE_UID(pTable); - - memcpy((void *)(pBlkInfo->blocks), taosArrayGet(pCommih->aSupBlk, 0), nSupBlocks * sizeof(SBlock)); - if (nSubBlocks > 0) { - memcpy((void *)(pBlkInfo->blocks + nSupBlocks), taosArrayGet(pCommih->aSubBlk, 0), nSubBlocks * sizeof(SBlock)); - - for (int i = 0; i < nSupBlocks; i++) { - pBlock = pBlkInfo->blocks + i; - - if (pBlock->numOfSubBlocks > 1) { - pBlock->offset += (sizeof(SBlockInfo) + sizeof(SBlock) * nSupBlocks); - } - } - } - - taosCalcChecksumAppend(0, (uint8_t *)pBlkInfo, tlen); - - if (tsdbAppendDFile(pHeadf, TSDB_COMMIT_BUF(pCommih), tlen, &offset) < 0) { + if (tsdbWriteBlockInfoImpl(pHeadf, pTable, pCommih->aSupBlk, pCommih->aSubBlk, (void **)(&(TSDB_COMMIT_BUF(pCommih))), + &blkIdx) < 0) { return -1; } - tsdbUpdateDFileMagic(pHeadf, POINTER_SHIFT(pBlkInfo, tlen - sizeof(TSCKSUM))); - - // Set blkIdx - pBlock = taosArrayGet(pCommih->aSupBlk, nSupBlocks - 1); - - blkIdx.tid = TABLE_TID(pTable); - blkIdx.uid = TABLE_UID(pTable); - blkIdx.hasLast = pBlock->last ? 1 : 0; - blkIdx.maxKey = pBlock->keyLast; - blkIdx.numOfBlocks = (uint32_t)nSupBlocks; - blkIdx.len = tlen; - blkIdx.offset = (uint32_t)offset; - - ASSERT(blkIdx.numOfBlocks > 0); + if (blkIdx.numOfBlocks == 0) { + return 0; + } if (taosArrayPush(pCommih->aBlkIdx, (void *)(&blkIdx)) == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; @@ -959,49 +1060,6 @@ static int tsdbWriteBlockInfo(SCommitH *pCommih) { return 0; } -static int tsdbWriteBlockIdx(SCommitH *pCommih) { - SBlockIdx *pBlkIdx = NULL; - SDFile * pHeadf = TSDB_COMMIT_HEAD_FILE(pCommih); - size_t nidx = taosArrayGetSize(pCommih->aBlkIdx); - int tlen = 0, size = 0; - int64_t offset = 0; - - if (nidx <= 0) { - // All data are deleted - pHeadf->info.offset = 0; - pHeadf->info.len = 0; - return 0; - } - - for (size_t i = 0; i < nidx; i++) { - pBlkIdx = (SBlockIdx *)taosArrayGet(pCommih->aBlkIdx, i); - - size = tsdbEncodeSBlockIdx(NULL, pBlkIdx); - if (tsdbMakeRoom((void **)(&TSDB_COMMIT_BUF(pCommih)), tlen + size) < 0) return -1; - - void *ptr = POINTER_SHIFT(TSDB_COMMIT_BUF(pCommih), tlen); - tsdbEncodeSBlockIdx(&ptr, pBlkIdx); - - tlen += size; - } - - tlen += sizeof(TSCKSUM); - if (tsdbMakeRoom((void **)(&TSDB_COMMIT_BUF(pCommih)), tlen) < 0) return -1; - taosCalcChecksumAppend(0, (uint8_t *)TSDB_COMMIT_BUF(pCommih), tlen); - - if (tsdbAppendDFile(pHeadf, TSDB_COMMIT_BUF(pCommih), tlen, &offset) < tlen) { - tsdbError("vgId:%d failed to write block index part to file %s since %s", TSDB_COMMIT_REPO_ID(pCommih), - TSDB_FILE_FULL_NAME(pHeadf), tstrerror(terrno)); - return -1; - } - - tsdbUpdateDFileMagic(pHeadf, POINTER_SHIFT(TSDB_COMMIT_BUF(pCommih), tlen - sizeof(TSCKSUM))); - pHeadf->info.offset = (uint32_t)offset; - pHeadf->info.len = tlen; - - return 0; -} - static int tsdbCommitMemData(SCommitH *pCommith, SCommitIter *pIter, TSKEY keyLimit, bool toData) { STsdbRepo *pRepo = TSDB_COMMIT_REPO(pCommith); STsdbCfg * pCfg = REPO_CFG(pRepo); @@ -1454,45 +1512,3 @@ int tsdbApplyRtn(STsdbRepo *pRepo) { return 0; } - -static int tsdbApplyRtnOnFSet(STsdbRepo *pRepo, SDFileSet *pSet, SRtn *pRtn) { - SDiskID did; - SDFileSet nSet; - STsdbFS * pfs = REPO_FS(pRepo); - int level; - - ASSERT(pSet->fid >= pRtn->minFid); - - level = tsdbGetFidLevel(pSet->fid, pRtn); - - tfsAllocDisk(level, &(did.level), &(did.id)); - if (did.level == TFS_UNDECIDED_LEVEL) { - terrno = TSDB_CODE_TDB_NO_AVAIL_DISK; - return -1; - } - - if (did.level > TSDB_FSET_LEVEL(pSet)) { - // Need to move the FSET to higher level - tsdbInitDFileSet(&nSet, did, REPO_ID(pRepo), pSet->fid, FS_TXN_VERSION(pfs)); - - if (tsdbCopyDFileSet(pSet, &nSet) < 0) { - tsdbError("vgId:%d failed to copy FSET %d from level %d to level %d since %s", REPO_ID(pRepo), pSet->fid, - TSDB_FSET_LEVEL(pSet), did.level, tstrerror(terrno)); - return -1; - } - - if (tsdbUpdateDFileSet(pfs, &nSet) < 0) { - return -1; - } - - tsdbInfo("vgId:%d FSET %d is copied from level %d disk id %d to level %d disk id %d", REPO_ID(pRepo), pSet->fid, - TSDB_FSET_LEVEL(pSet), TSDB_FSET_ID(pSet), did.level, did.id); - } else { - // On a correct level - if (tsdbUpdateDFileSet(pfs, pSet) < 0) { - return -1; - } - } - - return 0; -} \ No newline at end of file diff --git a/src/tsdb/src/tsdbCommitQueue.c b/src/tsdb/src/tsdbCommitQueue.c index abea79bc4f..bb844e8e83 100644 --- a/src/tsdb/src/tsdbCommitQueue.c +++ b/src/tsdb/src/tsdbCommitQueue.c @@ -26,8 +26,9 @@ typedef struct { } SCommitQueue; typedef struct { + TSDB_REQ_T req; STsdbRepo *pRepo; -} SCommitReq; +} SReq; static void *tsdbLoopCommit(void *arg); @@ -90,16 +91,17 @@ void tsdbDestroyCommitQueue() { pthread_mutex_destroy(&(pQueue->lock)); } -int tsdbScheduleCommit(STsdbRepo *pRepo) { +int tsdbScheduleCommit(STsdbRepo *pRepo, TSDB_REQ_T req) { SCommitQueue *pQueue = &tsCommitQueue; - SListNode *pNode = (SListNode *)calloc(1, sizeof(SListNode) + sizeof(SCommitReq)); + SListNode *pNode = (SListNode *)calloc(1, sizeof(SListNode) + sizeof(SReq)); if (pNode == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; return -1; } - ((SCommitReq *)pNode->data)->pRepo = pRepo; + ((SReq *)pNode->data)->req = req; + ((SReq *)pNode->data)->pRepo = pRepo; pthread_mutex_lock(&(pQueue->lock)); @@ -154,6 +156,7 @@ static void *tsdbLoopCommit(void *arg) { SCommitQueue *pQueue = &tsCommitQueue; SListNode * pNode = NULL; STsdbRepo * pRepo = NULL; + TSDB_REQ_T req; while (true) { pthread_mutex_lock(&(pQueue->lock)); @@ -174,14 +177,22 @@ static void *tsdbLoopCommit(void *arg) { pthread_mutex_unlock(&(pQueue->lock)); - pRepo = ((SCommitReq *)pNode->data)->pRepo; + req = ((SReq *)pNode->data)->req; + pRepo = ((SReq *)pNode->data)->pRepo; // check if need to apply new config - if (pRepo->config_changed) { + if (pRepo->config_changed) { tsdbApplyRepoConfig(pRepo); } - tsdbCommitData(pRepo); + if (req == COMMIT_REQ) { + tsdbCommitData(pRepo); + } else if (req == COMPACT_REQ) { + tsdbCompactImpl(pRepo); + } else { + ASSERT(0); + } + listNodeFree(pNode); } diff --git a/src/tsdb/src/tsdbCompact.c b/src/tsdb/src/tsdbCompact.c index 6dea4a4e57..635bba388a 100644 --- a/src/tsdb/src/tsdbCompact.c +++ b/src/tsdb/src/tsdbCompact.c @@ -11,4 +11,12 @@ * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . - */ \ No newline at end of file + */ +#include "tsdb.h" + +#ifndef _TSDB_PLUGINS + +int tsdbCompact(STsdbRepo *pRepo) { return 0; } +void *tsdbCompactImpl(STsdbRepo *pRepo) { return NULL; } + +#endif \ No newline at end of file diff --git a/src/tsdb/src/tsdbMemTable.c b/src/tsdb/src/tsdbMemTable.c index 79dbb8be5d..bee5600af7 100644 --- a/src/tsdb/src/tsdbMemTable.c +++ b/src/tsdb/src/tsdbMemTable.c @@ -288,7 +288,7 @@ int tsdbAsyncCommit(STsdbRepo *pRepo) { if (tsdbLockRepo(pRepo) < 0) return -1; pRepo->imem = pRepo->mem; pRepo->mem = NULL; - tsdbScheduleCommit(pRepo); + tsdbScheduleCommit(pRepo, COMMIT_REQ); if (tsdbUnlockRepo(pRepo) < 0) return -1; return 0; diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c index 324a7c79c5..8174698197 100644 --- a/src/tsdb/src/tsdbMeta.c +++ b/src/tsdb/src/tsdbMeta.c @@ -68,7 +68,7 @@ int tsdbCreateTable(STsdbRepo *repo, STableCfg *pCfg) { TABLE_CHAR_NAME(pMeta->tables[tid]), TABLE_TID(pMeta->tables[tid]), TABLE_UID(pMeta->tables[tid])); return 0; } else { - tsdbError("vgId:%d table %s at tid %d uid %" PRIu64 + tsdbInfo("vgId:%d table %s at tid %d uid %" PRIu64 " exists, replace it with new table, this can be not reasonable", REPO_ID(pRepo), TABLE_CHAR_NAME(pMeta->tables[tid]), TABLE_TID(pMeta->tables[tid]), TABLE_UID(pMeta->tables[tid])); @@ -1055,10 +1055,7 @@ static int tsdbRemoveTableFromIndex(STsdbMeta *pMeta, STable *pTable) { STable *pSTable = pTable->pSuper; ASSERT(pSTable != NULL); - STSchema *pSchema = tsdbGetTableTagSchema(pTable); - STColumn *pCol = schemaColAt(pSchema, DEFAULT_TAG_INDEX_COLUMN); - - char * key = tdGetKVRowValOfCol(pTable->tagVal, pCol->colId); + char* key = getTagIndexKey(pTable); SArray *res = tSkipListGet(pSTable->pIndex, key); size_t size = taosArrayGetSize(res); diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index fd2b403184..1545d44395 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -368,40 +368,39 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pC goto out_of_memory; } - assert(pCond != NULL && pCond->numOfCols > 0 && pMemRef != NULL); + assert(pCond != NULL && pMemRef != NULL); if (ASCENDING_TRAVERSE(pCond->order)) { assert(pQueryHandle->window.skey <= pQueryHandle->window.ekey); } else { assert(pQueryHandle->window.skey >= pQueryHandle->window.ekey); } - - // allocate buffer in order to load data blocks from file - pQueryHandle->statis = calloc(pCond->numOfCols, sizeof(SDataStatis)); - if (pQueryHandle->statis == NULL) { - goto out_of_memory; - } - - pQueryHandle->pColumns = taosArrayInit(pCond->numOfCols, sizeof(SColumnInfoData)); // todo: use list instead of array? - if (pQueryHandle->pColumns == NULL) { - goto out_of_memory; - } - - for (int32_t i = 0; i < pCond->numOfCols; ++i) { - SColumnInfoData colInfo = {{0}, 0}; - - colInfo.info = pCond->colList[i]; - colInfo.pData = calloc(1, EXTRA_BYTES + pQueryHandle->outputCapacity * pCond->colList[i].bytes); - if (colInfo.pData == NULL) { + if (pCond->numOfCols > 0) { + // allocate buffer in order to load data blocks from file + pQueryHandle->statis = calloc(pCond->numOfCols, sizeof(SDataStatis)); + if (pQueryHandle->statis == NULL) { goto out_of_memory; } - taosArrayPush(pQueryHandle->pColumns, &colInfo); - pQueryHandle->statis[i].colId = colInfo.info.colId; - } - if (pCond->numOfCols > 0) { + pQueryHandle->pColumns = + taosArrayInit(pCond->numOfCols, sizeof(SColumnInfoData)); // todo: use list instead of array? + if (pQueryHandle->pColumns == NULL) { + goto out_of_memory; + } + + for (int32_t i = 0; i < pCond->numOfCols; ++i) { + SColumnInfoData colInfo = {{0}, 0}; + + colInfo.info = pCond->colList[i]; + colInfo.pData = calloc(1, EXTRA_BYTES + pQueryHandle->outputCapacity * pCond->colList[i].bytes); + if (colInfo.pData == NULL) { + goto out_of_memory; + } + taosArrayPush(pQueryHandle->pColumns, &colInfo); + pQueryHandle->statis[i].colId = colInfo.info.colId; + } + pQueryHandle->defaultLoadColumn = getDefaultLoadColumns(pQueryHandle, true); } - STsdbMeta* pMeta = tsdbGetMeta(tsdb); assert(pMeta != NULL); diff --git a/src/tsdb/src/tsdbReadImpl.c b/src/tsdb/src/tsdbReadImpl.c index 7212ae1636..dd14dc700f 100644 --- a/src/tsdb/src/tsdbReadImpl.c +++ b/src/tsdb/src/tsdbReadImpl.c @@ -258,7 +258,7 @@ int tsdbLoadBlockData(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo) { for (int i = 1; i < pBlock->numOfSubBlocks; i++) { iBlock++; if (tsdbLoadBlockDataImpl(pReadh, iBlock, pReadh->pDCols[1]) < 0) return -1; - if (tdMergeDataCols(pReadh->pDCols[0], pReadh->pDCols[1], pReadh->pDCols[1]->numOfRows) < 0) return -1; + if (tdMergeDataCols(pReadh->pDCols[0], pReadh->pDCols[1], pReadh->pDCols[1]->numOfRows, NULL) < 0) return -1; } ASSERT(pReadh->pDCols[0]->numOfRows == pBlock->numOfRows); @@ -284,7 +284,7 @@ int tsdbLoadBlockDataCols(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo, for (int i = 1; i < pBlock->numOfSubBlocks; i++) { iBlock++; if (tsdbLoadBlockDataColsImpl(pReadh, iBlock, pReadh->pDCols[1], colIds, numOfColsIds) < 0) return -1; - if (tdMergeDataCols(pReadh->pDCols[0], pReadh->pDCols[1], pReadh->pDCols[1]->numOfRows) < 0) return -1; + if (tdMergeDataCols(pReadh->pDCols[0], pReadh->pDCols[1], pReadh->pDCols[1]->numOfRows, NULL) < 0) return -1; } ASSERT(pReadh->pDCols[0]->numOfRows == pBlock->numOfRows); diff --git a/src/util/inc/ttoken.h b/src/util/inc/ttoken.h index 3bf030a9eb..f62329183f 100644 --- a/src/util/inc/ttoken.h +++ b/src/util/inc/ttoken.h @@ -183,6 +183,7 @@ void taosCleanupKeywordsTable(); SStrToken tscReplaceStrToken(char **str, SStrToken *token, const char* newToken); +SStrToken taosTokenDup(SStrToken* pToken, char* buf, int32_t len); #ifdef __cplusplus } diff --git a/src/util/src/terror.c b/src/util/src/terror.c index 887b231ec6..382f872486 100644 --- a/src/util/src/terror.c +++ b/src/util/src/terror.c @@ -224,6 +224,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_FULL, "Database memory is fu TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_FLOWCTRL, "Database memory is full for waiting commit") TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_DROPPING, "Database is dropping") TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_BALANCING, "Database is balancing") +TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_CLOSING, "Database is closing") TAOS_DEFINE_ERROR(TSDB_CODE_VND_NOT_SYNCED, "Database suspended") TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_WRITE_AUTH, "Database write operation denied") TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_SYNCING, "Database is syncing") diff --git a/src/util/src/ttokenizer.c b/src/util/src/ttokenizer.c index e9bbda1f9b..ea15ced898 100644 --- a/src/util/src/ttokenizer.c +++ b/src/util/src/ttokenizer.c @@ -674,3 +674,15 @@ void taosCleanupKeywordsTable() { taosHashCleanup(m); } } + +SStrToken taosTokenDup(SStrToken* pToken, char* buf, int32_t len) { + assert(pToken != NULL && buf != NULL); + SStrToken token = *pToken; + token.z = buf; + + assert(len > token.n); + strncpy(token.z, pToken->z, pToken->n); + token.z[token.n] = 0; + + return token; +} diff --git a/src/vnode/src/vnodeMgmt.c b/src/vnode/src/vnodeMgmt.c index 32f9532138..7e6022fc87 100644 --- a/src/vnode/src/vnodeMgmt.c +++ b/src/vnode/src/vnodeMgmt.c @@ -91,18 +91,18 @@ static void vnodeIncRef(void *ptNode) { } void *vnodeAcquire(int32_t vgId) { - SVnodeObj **ppVnode = NULL; + SVnodeObj *pVnode = NULL; if (tsVnodesHash != NULL) { - ppVnode = taosHashGetClone(tsVnodesHash, &vgId, sizeof(int32_t), vnodeIncRef, NULL, sizeof(void *)); + taosHashGetClone(tsVnodesHash, &vgId, sizeof(int32_t), vnodeIncRef, &pVnode, sizeof(void *)); } - if (ppVnode == NULL || *ppVnode == NULL) { + if (pVnode == NULL) { terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; vDebug("vgId:%d, not exist", vgId); return NULL; } - return *ppVnode; + return pVnode; } void vnodeRelease(void *vparam) { diff --git a/src/vnode/src/vnodeWrite.c b/src/vnode/src/vnodeWrite.c index 16089c8e91..555eda6d13 100644 --- a/src/vnode/src/vnodeWrite.c +++ b/src/vnode/src/vnodeWrite.c @@ -303,6 +303,17 @@ static int32_t vnodeWriteToWQueueImp(SVWriteMsg *pWrite) { } int32_t vnodeWriteToWQueue(void *vparam, void *wparam, int32_t qtype, void *rparam) { + SVnodeObj *pVnode = vparam; + if (qtype == TAOS_QTYPE_RPC) { + if (!vnodeInReadyStatus(pVnode)) { + return TSDB_CODE_APP_NOT_READY; // it may be in deleting or closing state + } + + if (pVnode->role != TAOS_SYNC_ROLE_MASTER) { + return TSDB_CODE_APP_NOT_READY; + } + } + SVWriteMsg *pWrite = vnodeBuildVWriteMsg(vparam, wparam, qtype, rparam); if (pWrite == NULL) { assert(terrno != 0); diff --git a/tests/pytest/alter/alter_cacheLastRow.py b/tests/pytest/alter/alter_cacheLastRow.py new file mode 100644 index 0000000000..36a2864d0f --- /dev/null +++ b/tests/pytest/alter/alter_cacheLastRow.py @@ -0,0 +1,109 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import tdDnodes +from datetime import datetime + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def run(self): + tdSql.prepare() + tdSql.query('show databases') + tdSql.checkData(0,15,0) + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + + #write 5M rows into db, then restart to force the data move into disk. + #create 500 tables + os.system("%staosdemo -f tools/taosdemoAllTest/insert_5M_rows.json -y " % binPath) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.execute('use db') + + #prepare to query 500 tables last_row() + tableName = [] + for i in range(500): + tableName.append(f"stb_{i}") + tdSql.execute('use db') + lastRow_Off_start = datetime.now() + + slow = 0 #count time where lastRow on is slower + for i in range(5): + #switch lastRow to off and check + tdSql.execute('alter database db cachelast 0') + tdSql.query('show databases') + tdSql.checkData(0,15,0) + + #run last_row(*) query 500 times + for i in range(500): + tdSql.execute(f'SELECT LAST_ROW(*) FROM {tableName[i]}') + lastRow_Off_end = datetime.now() + + tdLog.debug(f'time used:{lastRow_Off_end-lastRow_Off_start}') + + #switch lastRow to on and check + tdSql.execute('alter database db cachelast 1') + tdSql.query('show databases') + tdSql.checkData(0,15,1) + + #run last_row(*) query 500 times + tdSql.execute('use db') + lastRow_On_start = datetime.now() + for i in range(500): + tdSql.execute(f'SELECT LAST_ROW(*) FROM {tableName[i]}') + lastRow_On_end = datetime.now() + + tdLog.debug(f'time used:{lastRow_On_end-lastRow_On_start}') + + #check which one used more time + if (lastRow_Off_end-lastRow_Off_start > lastRow_On_end-lastRow_On_start): + pass + else: + slow += 1 + tdLog.debug(slow) + if slow > 1: #tolerance for the first time + tdLog.exit('lastRow hot alter failed') + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/crash_gen/crash_gen_main.py b/tests/pytest/crash_gen/crash_gen_main.py index 644aa79916..b743eee2ef 100755 --- a/tests/pytest/crash_gen/crash_gen_main.py +++ b/tests/pytest/crash_gen/crash_gen_main.py @@ -37,6 +37,7 @@ import requests import gc import taos + from .shared.types import TdColumns, TdTags # from crash_gen import ServiceManager, TdeInstance, TdeSubProcess @@ -160,6 +161,7 @@ class WorkerThread: Logging.debug("[TRD] Thread Coordinator not running any more, worker thread now stopping...") break + # Before we fetch the task and run it, let's ensure we properly "use" the database (not needed any more) try: if (Config.getConfig().per_thread_db_connection): # most likely TRUE @@ -1362,9 +1364,12 @@ class Task(): Progress.emit(Progress.ACCEPTABLE_ERROR) self._err = err else: # not an acceptable error - errMsg = "[=] Unexpected Taos library exception ({}): errno=0x{:X}, msg: {}, SQL: {}".format( + shortTid = threading.get_ident() % 10000 + errMsg = "[=] Unexpected Taos library exception ({}): errno=0x{:X}, thread={}, msg: {}, SQL: {}".format( self.__class__.__name__, - errno2, err, wt.getDbConn().getLastSql()) + errno2, + shortTid, + err, wt.getDbConn().getLastSql()) self.logDebug(errMsg) if Config.getConfig().debug: # raise # so that we see full stack @@ -1411,21 +1416,31 @@ class Task(): def lockTable(self, ftName): # full table name # print(" <<" + ftName + '_', end="", flush=True) - with Task._lock: - if not ftName in Task._tableLocks: + with Task._lock: # SHORT lock! so we only protect lock creation + if not ftName in Task._tableLocks: # Create new lock and add to list, if needed Task._tableLocks[ftName] = threading.Lock() - Task._tableLocks[ftName].acquire() + # No lock protection, anybody can do this any time + lock = Task._tableLocks[ftName] + # Logging.info("Acquiring lock: {}, {}".format(ftName, lock)) + lock.acquire() + # Logging.info("Acquiring lock successful: {}".format(lock)) def unlockTable(self, ftName): # print('_' + ftName + ">> ", end="", flush=True) - with Task._lock: + with Task._lock: if not ftName in self._tableLocks: raise RuntimeError("Corrupt state, no such lock") lock = Task._tableLocks[ftName] if not lock.locked(): raise RuntimeError("Corrupte state, already unlocked") - lock.release() + + # Important note, we want to protect unlocking under the task level + # locking, because we don't want the lock to be deleted (maybe in the futur) + # while we unlock it + # Logging.info("Releasing lock: {}".format(lock)) + lock.release() + # Logging.info("Releasing lock successful: {}".format(lock)) class ExecutionStats: @@ -1696,6 +1711,11 @@ class TdSuperTable: return dbc.query("SELECT * FROM {}.{}".format(self._dbName, self._stName)) > 0 def ensureRegTable(self, task: Optional[Task], dbc: DbConn, regTableName: str): + ''' + Make sure a regular table exists for this super table, creating it if necessary. + If there is an associated "Task" that wants to do this, "lock" this table so that + others don't access it while we create it. + ''' dbName = self._dbName sql = "select tbname from {}.{} where tbname in ('{}')".format(dbName, self._stName, regTableName) if dbc.query(sql) >= 1 : # reg table exists already @@ -1703,18 +1723,24 @@ class TdSuperTable: # acquire a lock first, so as to be able to *verify*. More details in TD-1471 fullTableName = dbName + '.' + regTableName - if task is not None: # TODO: what happens if we don't lock the table - task.lockTable(fullTableName) + if task is not None: # Somethime thie operation is requested on behalf of a "task" + # Logging.info("Locking table for creation: {}".format(fullTableName)) + task.lockTable(fullTableName) # in which case we'll lock this table to ensure serialized access + # Logging.info("Table locked for creation".format(fullTableName)) Progress.emit(Progress.CREATE_TABLE_ATTEMPT) # ATTEMPT to create a new table # print("(" + fullTableName[-3:] + ")", end="", flush=True) try: sql = "CREATE TABLE {} USING {}.{} tags ({})".format( fullTableName, dbName, self._stName, self._getTagStrForSql(dbc) ) + # Logging.info("Creating regular with SQL: {}".format(sql)) dbc.execute(sql) + # Logging.info("Regular table created: {}".format(sql)) finally: if task is not None: + # Logging.info("Unlocking table after creation: {}".format(fullTableName)) task.unlockTable(fullTableName) # no matter what + # Logging.info("Table unlocked after creation: {}".format(fullTableName)) def _getTagStrForSql(self, dbc) : tags = self._getTags(dbc) @@ -2011,9 +2037,30 @@ class TaskAddData(StateTransitionTask): def canBeginFrom(cls, state: AnyState): return state.canAddData() + def _lockTableIfNeeded(self, fullTableName, extraMsg = ''): + if Config.getConfig().verify_data: + # Logging.info("Locking table: {}".format(fullTableName)) + self.lockTable(fullTableName) + # Logging.info("Table locked {}: {}".format(extraMsg, fullTableName)) + # print("_w" + str(nextInt % 100), end="", flush=True) # Trace what was written + else: + # Logging.info("Skipping locking table") + pass + + def _unlockTableIfNeeded(self, fullTableName): + if Config.getConfig().verify_data: + # Logging.info("Unlocking table: {}".format(fullTableName)) + self.unlockTable(fullTableName) + # Logging.info("Table unlocked: {}".format(fullTableName)) + else: + pass + # Logging.info("Skipping unlocking table") + def _addDataInBatch(self, db, dbc, regTableName, te: TaskExecutor): numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS + fullTableName = db.getName() + '.' + regTableName + self._lockTableIfNeeded(fullTableName, 'batch') sql = "INSERT INTO {} VALUES ".format(fullTableName) for j in range(numRecords): # number of records per table @@ -2021,51 +2068,60 @@ class TaskAddData(StateTransitionTask): nextTick = db.getNextTick() nextColor = db.getNextColor() sql += "('{}', {}, '{}');".format(nextTick, nextInt, nextColor) - dbc.execute(sql) + + # Logging.info("Adding data in batch: {}".format(sql)) + try: + dbc.execute(sql) + finally: + # Logging.info("Data added in batch: {}".format(sql)) + self._unlockTableIfNeeded(fullTableName) + + def _addData(self, db: Database, dbc, regTableName, te: TaskExecutor): # implied: NOT in batches numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS for j in range(numRecords): # number of records per table - nextInt = db.getNextInt() + intToWrite = db.getNextInt() nextTick = db.getNextTick() nextColor = db.getNextColor() if Config.getConfig().record_ops: self.prepToRecordOps() if self.fAddLogReady is None: raise CrashGenError("Unexpected empty fAddLogReady") - self.fAddLogReady.write("Ready to write {} to {}\n".format(nextInt, regTableName)) + self.fAddLogReady.write("Ready to write {} to {}\n".format(intToWrite, regTableName)) self.fAddLogReady.flush() os.fsync(self.fAddLogReady.fileno()) # TODO: too ugly trying to lock the table reliably, refactor... fullTableName = db.getName() + '.' + regTableName - if Config.getConfig().verify_data: - self.lockTable(fullTableName) - # print("_w" + str(nextInt % 100), end="", flush=True) # Trace what was written - + self._lockTableIfNeeded(fullTableName) # so that we are verify read-back. TODO: deal with exceptions before unlock + try: sql = "INSERT INTO {} VALUES ('{}', {}, '{}');".format( # removed: tags ('{}', {}) fullTableName, # ds.getFixedSuperTableName(), # ds.getNextBinary(), ds.getNextFloat(), - nextTick, nextInt, nextColor) + nextTick, intToWrite, nextColor) + # Logging.info("Adding data: {}".format(sql)) dbc.execute(sql) + # Logging.info("Data added: {}".format(sql)) + intWrote = intToWrite # Quick hack, attach an update statement here. TODO: create an "update" task if (not Config.getConfig().use_shadow_db) and Dice.throw(5) == 0: # 1 in N chance, plus not using shaddow DB - nextInt = db.getNextInt() + intToUpdate = db.getNextInt() # Updated, but should not succeed nextColor = db.getNextColor() sql = "INSERt INTO {} VALUES ('{}', {}, '{}');".format( # "INSERt" means "update" here fullTableName, - nextTick, nextInt, nextColor) + nextTick, intToUpdate, nextColor) # sql = "UPDATE {} set speed={}, color='{}' WHERE ts='{}'".format( # fullTableName, db.getNextInt(), db.getNextColor(), nextTick) dbc.execute(sql) + intWrote = intToUpdate # We updated, seems TDengine non-cluster accepts this. except: # Any exception at all - if Config.getConfig().verify_data: - self.unlockTable(fullTableName) + self._unlockTableIfNeeded(fullTableName) raise # Now read it back and verify, we might encounter an error if table is dropped @@ -2073,33 +2129,41 @@ class TaskAddData(StateTransitionTask): try: readBack = dbc.queryScalar("SELECT speed from {}.{} WHERE ts='{}'". format(db.getName(), regTableName, nextTick)) - if readBack != nextInt : + if readBack != intWrote : raise taos.error.ProgrammingError( "Failed to read back same data, wrote: {}, read: {}" - .format(nextInt, readBack), 0x999) + .format(intWrote, readBack), 0x999) except taos.error.ProgrammingError as err: errno = Helper.convertErrno(err.errno) - if errno in [CrashGenError.INVALID_EMPTY_RESULT, CrashGenError.INVALID_MULTIPLE_RESULT] : # not a single result + if errno == CrashGenError.INVALID_EMPTY_RESULT: # empty result raise taos.error.ProgrammingError( - "Failed to read back same data for tick: {}, wrote: {}, read: {}" - .format(nextTick, nextInt, "Empty Result" if errno == CrashGenError.INVALID_EMPTY_RESULT else "Multiple Result"), + "Failed to read back same data for tick: {}, wrote: {}, read: EMPTY" + .format(nextTick, intWrote), + errno) + elif errno == CrashGenError.INVALID_MULTIPLE_RESULT : # multiple results + raise taos.error.ProgrammingError( + "Failed to read back same data for tick: {}, wrote: {}, read: MULTIPLE RESULTS" + .format(nextTick, intWrote), errno) elif errno in [0x218, 0x362]: # table doesn't exist # do nothing - dummy = 0 + pass else: # Re-throw otherwise raise finally: - self.unlockTable(fullTableName) # Unlock the table no matter what + self._unlockTableIfNeeded(fullTableName) # Quite ugly, refactor lock/unlock + # Done with read-back verification, unlock the table now + else: + self._unlockTableIfNeeded(fullTableName) # Successfully wrote the data into the DB, let's record it somehow - te.recordDataMark(nextInt) + te.recordDataMark(intWrote) if Config.getConfig().record_ops: if self.fAddLogDone is None: raise CrashGenError("Unexpected empty fAddLogDone") - self.fAddLogDone.write("Wrote {} to {}\n".format(nextInt, regTableName)) + self.fAddLogDone.write("Wrote {} to {}\n".format(intWrote, regTableName)) self.fAddLogDone.flush() os.fsync(self.fAddLogDone.fileno()) @@ -2137,15 +2201,16 @@ class TaskAddData(StateTransitionTask): class ThreadStacks: # stack info for all threads def __init__(self): self._allStacks = {} - allFrames = sys._current_frames() - for th in threading.enumerate(): + allFrames = sys._current_frames() # All current stack frames + for th in threading.enumerate(): # For each thread if th.ident is None: continue - stack = traceback.extract_stack(allFrames[th.ident]) - self._allStacks[th.native_id] = stack + stack = traceback.extract_stack(allFrames[th.ident]) # Get stack for a thread + shortTid = th.ident % 10000 + self._allStacks[shortTid] = stack # Was using th.native_id def print(self, filteredEndName = None, filterInternal = False): - for thNid, stack in self._allStacks.items(): # for each thread, stack frames top to bottom + for tIdent, stack in self._allStacks.items(): # for each thread, stack frames top to bottom lastFrame = stack[-1] if filteredEndName: # we need to filter out stacks that match this name if lastFrame.name == filteredEndName : # end did not match @@ -2157,7 +2222,7 @@ class ThreadStacks: # stack info for all threads '__init__']: # the thread that extracted the stack continue # ignore # Now print - print("\n<----- Thread Info for LWP/ID: {} (most recent call last) <-----".format(thNid)) + print("\n<----- Thread Info for LWP/ID: {} (most recent call last) <-----".format(tIdent)) stackFrame = 0 for frame in stack: # was using: reversed(stack) # print(frame) @@ -2376,7 +2441,7 @@ class MainExec: action='store', default=0, type=int, - help='Maximum number of DBs to keep, set to disable dropping DB. (default: 0)') + help='Number of DBs to use, set to disable dropping DB. (default: 0)') parser.add_argument( '-c', '--connector-type', diff --git a/tests/pytest/crash_gen/service_manager.py b/tests/pytest/crash_gen/service_manager.py index 1cd65c1dde..c6685ec469 100644 --- a/tests/pytest/crash_gen/service_manager.py +++ b/tests/pytest/crash_gen/service_manager.py @@ -179,7 +179,7 @@ quorum 2 def getServiceCmdLine(self): # to start the instance if Config.getConfig().track_memory_leaks: Logging.info("Invoking VALGRIND on service...") - return ['exec /usr/bin/valgrind', '--leak-check=yes', self.getExecFile(), '-c', self.getCfgDir()] + return ['exec valgrind', '--leak-check=yes', self.getExecFile(), '-c', self.getCfgDir()] else: # TODO: move "exec -c" into Popen(), we can both "use shell" and NOT fork so ask to lose kill control return ["exec " + self.getExecFile(), '-c', self.getCfgDir()] # used in subproce.Popen() @@ -310,7 +310,7 @@ class TdeSubProcess: # print("Starting TDengine with env: ", myEnv.items()) print("Starting TDengine: {}".format(cmdLine)) - return Popen( + ret = Popen( ' '.join(cmdLine), # ' '.join(cmdLine) if useShell else cmdLine, shell=True, # Always use shell, since we need to pass ENV vars stdout=PIPE, @@ -318,6 +318,10 @@ class TdeSubProcess: close_fds=ON_POSIX, env=myEnv ) # had text=True, which interferred with reading EOF + time.sleep(0.01) # very brief wait, then let's check if sub process started successfully. + if ret.poll(): + raise CrashGenError("Sub process failed to start with command line: {}".format(cmdLine)) + return ret STOP_SIGNAL = signal.SIGINT # signal.SIGKILL/SIGINT # What signal to use (in kill) to stop a taosd process? SIG_KILL_RETCODE = 137 # ref: https://stackoverflow.com/questions/43268156/process-finished-with-exit-code-137-in-pycharm @@ -614,7 +618,7 @@ class ServiceManager: # Find if there's already a taosd service, and then kill it for proc in psutil.process_iter(): - if proc.name() == 'taosd': + if proc.name() == 'taosd' or proc.name() == 'memcheck-amd64-': # Regular or under Valgrind Logging.info("Killing an existing TAOSD process in 2 seconds... press CTRL-C to interrupt") time.sleep(2.0) proc.kill() diff --git a/tests/pytest/crash_gen/shared/misc.py b/tests/pytest/crash_gen/shared/misc.py index 90ad802ff1..78923bcc29 100644 --- a/tests/pytest/crash_gen/shared/misc.py +++ b/tests/pytest/crash_gen/shared/misc.py @@ -35,7 +35,8 @@ class LoggingFilter(logging.Filter): class MyLoggingAdapter(logging.LoggerAdapter): def process(self, msg, kwargs): - return "[{:04d}] {}".format(threading.get_ident() % 10000, msg), kwargs + shortTid = threading.get_ident() % 10000 + return "[{:04d}] {}".format(shortTid, msg), kwargs # return '[%s] %s' % (self.extra['connid'], msg), kwargs diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index c93fbc5eb3..19bc5e9cf9 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -31,7 +31,7 @@ python3 ./test.py -f table/column_name.py python3 ./test.py -f table/column_num.py python3 ./test.py -f table/db_table.py python3 ./test.py -f table/create_sensitive.py -#python3 ./test.py -f table/tablename-boundary.py +python3 ./test.py -f table/tablename-boundary.py python3 ./test.py -f table/max_table_length.py python3 ./test.py -f table/alter_column.py python3 ./test.py -f table/boundary.py @@ -334,6 +334,7 @@ python3 ./test.py -f tag_lite/alter_tag.py python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py +python3 ./test.py -f tag_lite/drop_auto_create.py python3 test.py -f insert/insert_before_use_db.py - +python3 test.py -f alter/alter_cacheLastRow.py #======================p4-end=============== diff --git a/tests/pytest/insert/nchar.py b/tests/pytest/insert/nchar.py index 3319aa3c56..5ad52b96a1 100644 --- a/tests/pytest/insert/nchar.py +++ b/tests/pytest/insert/nchar.py @@ -36,6 +36,10 @@ class TDTestCase: tdSql.checkData(1, 1, '涛思数据') tdSql.error("insert into tb values (now, 'taosdata001')") + + tdSql.error("insert into tb(now, 😀)") + tdSql.query("select * from tb") + tdSql.checkRows(2) def stop(self): tdSql.close() diff --git a/tests/pytest/table/tablename-boundary.py b/tests/pytest/table/tablename-boundary.py index 0755e75355..dc22c3343b 100644 --- a/tests/pytest/table/tablename-boundary.py +++ b/tests/pytest/table/tablename-boundary.py @@ -14,6 +14,13 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) + self.ts = 1622100000000 + + def get_random_string(self, length): + letters = string.ascii_lowercase + result_str = ''.join(random.choice(letters) for i in range(length)) + return result_str + def run(self): tdSql.prepare() @@ -24,19 +31,62 @@ class TDTestCase: shell=True)) - 1 tdLog.info("table name max length is %d" % tableNameMaxLen) chars = string.ascii_uppercase + string.ascii_lowercase - tb_name = ''.join(random.choices(chars, k=tableNameMaxLen)) + tb_name = ''.join(random.choices(chars, k=tableNameMaxLen + 1)) tdLog.info('tb_name length %d' % len(tb_name)) tdLog.info('create table %s (ts timestamp, value int)' % tb_name) - tdSql.error( - 'create table %s (ts timestamp, speed binary(4089))' % - tb_name) + tdSql.error('create table %s (ts timestamp, speed binary(4089))' % tb_name) - tb_name = ''.join(random.choices(chars, k=191)) + tb_name = ''.join(random.choices(chars, k=tableNameMaxLen)) tdLog.info('tb_name length %d' % len(tb_name)) tdLog.info('create table %s (ts timestamp, value int)' % tb_name) tdSql.execute( 'create table %s (ts timestamp, speed binary(4089))' % tb_name) + + db_name = self.get_random_string(33) + tdSql.error("create database %s" % db_name) + + db_name = self.get_random_string(32) + tdSql.execute("create database %s" % db_name) + tdSql.execute("use %s" % db_name) + + tb_name = self.get_random_string(193) + tdSql.error("create table %s(ts timestamp, val int)" % tb_name) + + tb_name = self.get_random_string(192) + tdSql.execute("create table %s.%s(ts timestamp, val int)" % (db_name, tb_name)) + tdSql.query("show %s.tables" % db_name) + tdSql.checkRows(1) + tdSql.checkData(0, 0, tb_name) + + tdSql.execute("insert into %s.%s values(now, 1)" % (db_name, tb_name)) + tdSql.query("select * from %s.%s" %(db_name, tb_name)) + tdSql.checkRows(1) + + db_name = self.get_random_string(32) + tdSql.execute("create database %s update 1" % db_name) + + stb_name = self.get_random_string(192) + tdSql.execute("create table %s.%s(ts timestamp, val int) tags(id int)" % (db_name, stb_name)) + tb_name1 = self.get_random_string(192) + tdSql.execute("insert into %s.%s using %s.%s tags(1) values(%d, 1)(%d, 2)(%d, 3)" % (db_name, tb_name1, db_name, stb_name, self.ts, self.ts + 1, self.ts + 2)) + tb_name2 = self.get_random_string(192) + tdSql.execute("insert into %s.%s using %s.%s tags(2) values(%d, 1)(%d, 2)(%d, 3)" % (db_name, tb_name2, db_name, stb_name, self.ts, self.ts + 1, self.ts + 2)) + + tdSql.query("show %s.tables" % db_name) + tdSql.checkRows(2) + + tdSql.query("select * from %s.%s" % (db_name, stb_name)) + tdSql.checkRows(6) + + tdSql.execute("insert into %s.%s using %s.%s tags(1) values(%d, null)" % (db_name, tb_name1, db_name, stb_name, self.ts)) + + tdSql.query("select * from %s.%s" % (db_name, stb_name)) + tdSql.checkRows(6) + + + + def stop(self): tdSql.close() diff --git a/tests/pytest/tag_lite/drop_auto_create.py b/tests/pytest/tag_lite/drop_auto_create.py new file mode 100644 index 0000000000..f89b41008b --- /dev/null +++ b/tests/pytest/tag_lite/drop_auto_create.py @@ -0,0 +1,47 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + tdSql.execute('create table m1(ts timestamp, k int) tags(a binary(12), b int, c double);') + tdSql.execute('insert into tm0 using m1(b,c) tags(1, 99) values(now, 1);') + tdSql.execute('insert into tm1 using m1(b,c) tags(2, 100) values(now, 2);') + tdLog.info("2 rows inserted") + tdSql.query('select * from m1;') + tdSql.checkRows(2) + tdSql.query('select *,tbname from m1;') + tdSql.execute("drop table tm0; ") + tdSql.query('select * from m1') + tdSql.checkRows(1) + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/insert_5M_rows.json b/tests/pytest/tools/taosdemoAllTest/insert_5M_rows.json new file mode 100644 index 0000000000..4637009ca3 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insert_5M_rows.json @@ -0,0 +1,60 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 100, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 3650, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb", + "child_table_exists":"no", + "childtable_count": 500, + "childtable_prefix": "stb_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10000, + "childtable_limit": 10, + "childtable_offset":100, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT", "count":2}] + }] + }] +} \ No newline at end of file diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index 6eaf4e18af..0f71ffd0a3 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -432,7 +432,7 @@ class TDDnodes: self.simDeployed = False def init(self, path): - psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'" + psCmd = "ps -ef|grep -w taosd| grep -v grep| grep -v defunct | awk '{print $2}'" processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") while(processID): killCmd = "kill -TERM %s > /dev/null 2>&1" % processID @@ -545,14 +545,14 @@ class TDDnodes: for i in range(len(self.dnodes)): self.dnodes[i].stop() - psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep | awk '{print $2}'" + psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep| grep -v defunct | awk '{print $2}'" processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") if processID: cmd = "sudo systemctl stop taosd" os.system(cmd) # if os.system(cmd) != 0 : # tdLog.exit(cmd) - psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'" + psCmd = "ps -ef|grep -w taosd| grep -v grep| grep -v defunct | awk '{print $2}'" processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") while(processID): killCmd = "kill -TERM %s > /dev/null 2>&1" % processID diff --git a/tests/script/api/stmtBatchTest.c b/tests/script/api/stmtBatchTest.c index 80848febd1..24291adfa0 100644 --- a/tests/script/api/stmtBatchTest.c +++ b/tests/script/api/stmtBatchTest.c @@ -3486,6 +3486,204 @@ static int stmt_specifyCol_bind_case_002_autoCreateTbl(TAOS_STMT *stmt, int tabl v->ts[i] = tts + i; } + for (int i = 0; i < 1; ++i) { + //tags[i+0].buffer_type = TSDB_DATA_TYPE_INT; + //tags[i+0].buffer = v->v4; + //tags[i+0].is_null = &one_not_null; + //tags[i+0].length = NULL; + + tags[i+0].buffer_type = TSDB_DATA_TYPE_BOOL; + tags[i+0].buffer = v->b; + tags[i+0].is_null = &one_not_null; + tags[i+0].length = NULL; + + //tags[i+2].buffer_type = TSDB_DATA_TYPE_TINYINT; + //tags[i+2].buffer = v->v1; + //tags[i+2].is_null = &one_not_null; + //tags[i+2].length = NULL; + + tags[i+1].buffer_type = TSDB_DATA_TYPE_SMALLINT; + tags[i+1].buffer = v->v2; + tags[i+1].is_null = &one_not_null; + tags[i+1].length = NULL; + + tags[i+2].buffer_type = TSDB_DATA_TYPE_BIGINT; + tags[i+2].buffer = v->v8; + tags[i+2].is_null = &one_not_null; + tags[i+2].length = NULL; + + tags[i+3].buffer_type = TSDB_DATA_TYPE_FLOAT; + tags[i+3].buffer = v->f4; + tags[i+3].is_null = &one_not_null; + tags[i+3].length = NULL; + + tags[i+4].buffer_type = TSDB_DATA_TYPE_DOUBLE; + tags[i+4].buffer = v->f8; + tags[i+4].is_null = &one_not_null; + tags[i+4].length = NULL; + + tags[i+5].buffer_type = TSDB_DATA_TYPE_BINARY; + tags[i+5].buffer = v->br; + tags[i+5].is_null = &one_not_null; + tags[i+5].length = (uintptr_t *)lb; + + tags[i+6].buffer_type = TSDB_DATA_TYPE_NCHAR; + tags[i+6].buffer = v->nr; + tags[i+6].is_null = &one_not_null; + tags[i+6].length = (uintptr_t *)lb; + } + + + unsigned long long starttime = getCurrentTime(); + +// create table m%d (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, br binary(%d), nr nchar(%d), ts2 timestamp) + //char *sql = "insert into ? (ts,b,v4,f4,br) using stb1 tags (?,?,?,?,?,?,?,?,?) values(?,?,?,?,?)"; + char *sql = "insert into ? using stb1 tags (33,?,99,?,?,?,?,?,?) (ts,b,v4,f4,br) values(?,?,?,?,?)"; + + int code = taos_stmt_prepare(stmt, sql, 0); + if (code != 0){ + printf("failed to execute taos_stmt_prepare. code:0x%x[%s]\n", code, tstrerror(code)); + return -1; + } + + int id = 0; + for (int l = 0; l < bingNum; l++) { + for (int zz = 0; zz < tableNum; zz++) { + char buf[32]; + sprintf(buf, "m%d", zz); + code = taos_stmt_set_tbname_tags(stmt, buf, tags); + if (code != 0){ + printf("failed to execute taos_stmt_set_tbname. code:0x%x[%s]\n", code, tstrerror(code)); + return -1; + } + + for (int col=0; col < columnNum; ++col) { + code = taos_stmt_bind_single_param_batch(stmt, params + id, col); + if (code != 0){ + printf("failed to execute taos_stmt_bind_single_param_batch. code:0x%x[%s]\n", code, tstrerror(code)); + return -1; + } + id++; + } + + code = taos_stmt_add_batch(stmt); + if (code != 0) { + printf("failed to execute taos_stmt_add_batch. code:0x%x[%s]\n", code, tstrerror(code)); + return -1; + } + } + + code = taos_stmt_execute(stmt); + if (code != 0) { + printf("failed to execute taos_stmt_execute. code:0x%x[%s]\n", code, tstrerror(code)); + return -1; + } + } + + unsigned long long endtime = getCurrentTime(); + unsigned long long totalRows = (uint32_t)(totalRowsPerTbl * tableNum); + printf("insert total %d records, used %u seconds, avg:%u useconds per record\n", totalRows, (endtime-starttime)/1000000UL, (endtime-starttime)/totalRows); + + free(v->ts); + free(v->br); + free(v->nr); + free(v); + free(lb); + free(params); + free(tags); + free(is_null); + free(no_null); + + return 0; +} + +// some tags are null +static int stmt_specifyCol_bind_case_003_autoCreateTbl(TAOS_STMT *stmt, int tableNum, int rowsOfPerColum, int bingNum, int lenOfBinaryDef, int lenOfBinaryAct, int columnNum) { + sampleValue* v = (sampleValue *)calloc(1, sizeof(sampleValue)); + + int totalRowsPerTbl = rowsOfPerColum * bingNum; + + v->ts = (int64_t *)malloc(sizeof(int64_t) * (size_t)(totalRowsPerTbl * tableNum)); + v->br = (char *)malloc(sizeof(int64_t) * (size_t)(totalRowsPerTbl * lenOfBinaryDef)); + v->nr = (char *)malloc(sizeof(int64_t) * (size_t)(totalRowsPerTbl * lenOfBinaryDef)); + + int *lb = (int *)malloc(MAX_ROWS_OF_PER_COLUMN * sizeof(int)); + + TAOS_BIND *tags = calloc(1, sizeof(TAOS_BIND) * 9 * 1); + TAOS_MULTI_BIND *params = calloc(1, sizeof(TAOS_MULTI_BIND) * (size_t)(bingNum * columnNum * (tableNum+1) * rowsOfPerColum)); + char* is_null = malloc(sizeof(char) * MAX_ROWS_OF_PER_COLUMN); + char* no_null = malloc(sizeof(char) * MAX_ROWS_OF_PER_COLUMN); + int one_not_null = 0; + int one_is_null = 1; + + int64_t tts = 1591060628000; + + for (int i = 0; i < rowsOfPerColum; ++i) { + lb[i] = lenOfBinaryAct; + no_null[i] = 0; + is_null[i] = (i % 10 == 2) ? 1 : 0; + v->b[i] = (int8_t)(i % 2); + v->v1[i] = (int8_t)((i+1) % 2); + v->v2[i] = (int16_t)i; + v->v4[i] = (int32_t)(i+1); + v->v8[i] = (int64_t)(i+2); + v->f4[i] = (float)(i+3); + v->f8[i] = (double)(i+4); + char tbuf[MAX_BINARY_DEF_LEN]; + memset(tbuf, 0, MAX_BINARY_DEF_LEN); + sprintf(tbuf, "binary-%d-0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789", i%10); + memcpy(v->br + i*lenOfBinaryDef, tbuf, (size_t)lenOfBinaryAct); + memset(tbuf, 0, MAX_BINARY_DEF_LEN); + sprintf(tbuf, "nchar-%d-0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789", i%10); + memcpy(v->nr + i*lenOfBinaryDef, tbuf, (size_t)lenOfBinaryAct); + v->ts2[i] = tts + i; + } + + int i = 0; + for (int j = 0; j < bingNum * tableNum; j++) { + params[i+0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + params[i+0].buffer_length = sizeof(int64_t); + params[i+0].buffer = &v->ts[j*rowsOfPerColum]; + params[i+0].length = NULL; + params[i+0].is_null = no_null; + params[i+0].num = rowsOfPerColum; + + params[i+1].buffer_type = TSDB_DATA_TYPE_BOOL; + params[i+1].buffer_length = sizeof(int8_t); + params[i+1].buffer = v->b; + params[i+1].length = NULL; + params[i+1].is_null = is_null; + params[i+1].num = rowsOfPerColum; + + params[i+2].buffer_type = TSDB_DATA_TYPE_INT; + params[i+2].buffer_length = sizeof(int32_t); + params[i+2].buffer = v->v4; + params[i+2].length = NULL; + params[i+2].is_null = is_null; + params[i+2].num = rowsOfPerColum; + + params[i+3].buffer_type = TSDB_DATA_TYPE_FLOAT; + params[i+3].buffer_length = sizeof(float); + params[i+3].buffer = v->f4; + params[i+3].length = NULL; + params[i+3].is_null = is_null; + params[i+3].num = rowsOfPerColum; + + params[i+4].buffer_type = TSDB_DATA_TYPE_BINARY; + params[i+4].buffer_length = (uintptr_t)lenOfBinaryDef; + params[i+4].buffer = v->br; + params[i+4].length = lb; + params[i+4].is_null = is_null; + params[i+4].num = rowsOfPerColum; + + i+=columnNum; + } + + //int64_t tts = 1591060628000; + for (int i = 0; i < totalRowsPerTbl * tableNum; ++i) { + v->ts[i] = tts + i; + } + for (int i = 0; i < 1; ++i) { tags[i+0].buffer_type = TSDB_DATA_TYPE_INT; tags[i+0].buffer = v->v4; @@ -3494,12 +3692,12 @@ static int stmt_specifyCol_bind_case_002_autoCreateTbl(TAOS_STMT *stmt, int tabl tags[i+1].buffer_type = TSDB_DATA_TYPE_BOOL; tags[i+1].buffer = v->b; - tags[i+1].is_null = &one_not_null; + tags[i+1].is_null = &one_is_null; tags[i+1].length = NULL; tags[i+2].buffer_type = TSDB_DATA_TYPE_TINYINT; tags[i+2].buffer = v->v1; - tags[i+2].is_null = &one_not_null; + tags[i+2].is_null = &one_is_null; tags[i+2].length = NULL; tags[i+3].buffer_type = TSDB_DATA_TYPE_SMALLINT; @@ -3514,7 +3712,7 @@ static int stmt_specifyCol_bind_case_002_autoCreateTbl(TAOS_STMT *stmt, int tabl tags[i+5].buffer_type = TSDB_DATA_TYPE_FLOAT; tags[i+5].buffer = v->f4; - tags[i+5].is_null = &one_not_null; + tags[i+5].is_null = &one_is_null; tags[i+5].length = NULL; tags[i+6].buffer_type = TSDB_DATA_TYPE_DOUBLE; @@ -3597,7 +3795,8 @@ static int stmt_specifyCol_bind_case_002_autoCreateTbl(TAOS_STMT *stmt, int tabl return 0; } -static int stmt_specifyCol_bind_case_003_autoCreateTbl(TAOS_STMT *stmt, int tableNum, int rowsOfPerColum, int bingNum, int lenOfBinaryDef, int lenOfBinaryAct, int columnNum) { +// specify tags field, and not support , then is error case +static int stmt_specifyCol_bind_case_004_autoCreateTbl(TAOS_STMT *stmt, int tableNum, int rowsOfPerColum, int bingNum, int lenOfBinaryDef, int lenOfBinaryAct, int columnNum) { sampleValue* v = (sampleValue *)calloc(1, sizeof(sampleValue)); int totalRowsPerTbl = rowsOfPerColum * bingNum; @@ -3683,50 +3882,50 @@ static int stmt_specifyCol_bind_case_003_autoCreateTbl(TAOS_STMT *stmt, int tabl } for (int i = 0; i < 1; ++i) { - tags[i+0].buffer_type = TSDB_DATA_TYPE_INT; - tags[i+0].buffer = v->v4; + //tags[i+0].buffer_type = TSDB_DATA_TYPE_INT; + //tags[i+0].buffer = v->v4; + //tags[i+0].is_null = &one_not_null; + //tags[i+0].length = NULL; + + tags[i+0].buffer_type = TSDB_DATA_TYPE_BOOL; + tags[i+0].buffer = v->b; tags[i+0].is_null = &one_not_null; tags[i+0].length = NULL; - tags[i+1].buffer_type = TSDB_DATA_TYPE_BOOL; - tags[i+1].buffer = v->b; + //tags[i+2].buffer_type = TSDB_DATA_TYPE_TINYINT; + //tags[i+2].buffer = v->v1; + //tags[i+2].is_null = &one_not_null; + //tags[i+2].length = NULL; + + tags[i+1].buffer_type = TSDB_DATA_TYPE_SMALLINT; + tags[i+1].buffer = v->v2; tags[i+1].is_null = &one_not_null; tags[i+1].length = NULL; - tags[i+2].buffer_type = TSDB_DATA_TYPE_TINYINT; - tags[i+2].buffer = v->v1; + tags[i+2].buffer_type = TSDB_DATA_TYPE_BIGINT; + tags[i+2].buffer = v->v8; tags[i+2].is_null = &one_not_null; tags[i+2].length = NULL; - tags[i+3].buffer_type = TSDB_DATA_TYPE_SMALLINT; - tags[i+3].buffer = v->v2; + tags[i+3].buffer_type = TSDB_DATA_TYPE_FLOAT; + tags[i+3].buffer = v->f4; tags[i+3].is_null = &one_not_null; tags[i+3].length = NULL; - tags[i+4].buffer_type = TSDB_DATA_TYPE_BIGINT; - tags[i+4].buffer = v->v8; + tags[i+4].buffer_type = TSDB_DATA_TYPE_DOUBLE; + tags[i+4].buffer = v->f8; tags[i+4].is_null = &one_not_null; tags[i+4].length = NULL; - tags[i+5].buffer_type = TSDB_DATA_TYPE_FLOAT; - tags[i+5].buffer = v->f4; + tags[i+5].buffer_type = TSDB_DATA_TYPE_BINARY; + tags[i+5].buffer = v->br; tags[i+5].is_null = &one_not_null; - tags[i+5].length = NULL; + tags[i+5].length = (uintptr_t *)lb; - tags[i+6].buffer_type = TSDB_DATA_TYPE_DOUBLE; - tags[i+6].buffer = v->f8; + tags[i+6].buffer_type = TSDB_DATA_TYPE_NCHAR; + tags[i+6].buffer = v->nr; tags[i+6].is_null = &one_not_null; - tags[i+6].length = NULL; - - tags[i+7].buffer_type = TSDB_DATA_TYPE_BINARY; - tags[i+7].buffer = v->br; - tags[i+7].is_null = &one_not_null; - tags[i+7].length = (uintptr_t *)lb; - - tags[i+8].buffer_type = TSDB_DATA_TYPE_NCHAR; - tags[i+8].buffer = v->nr; - tags[i+8].is_null = &one_not_null; - tags[i+8].length = (uintptr_t *)lb; + tags[i+6].length = (uintptr_t *)lb; } @@ -3734,7 +3933,7 @@ static int stmt_specifyCol_bind_case_003_autoCreateTbl(TAOS_STMT *stmt, int tabl // create table m%d (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, br binary(%d), nr nchar(%d), ts2 timestamp) //char *sql = "insert into ? (ts,b,v4,f4,br) using stb1 tags (?,?,?,?,?,?,?,?,?) values(?,?,?,?,?)"; - char *sql = "insert into ? using stb1 tags (33,?,99,?,?,?,?,?,?) (ts,b,v4,f4,br) values(?,?,?,?,?)"; + char *sql = "insert into ? using stb1 (id1, id2, id3, id4, id5, id6, id7, id8, id9) tags (33,?,99,?,?,?,?,?,?) (ts,b,v4,f4,br) values(?,?,?,?,?)"; int code = taos_stmt_prepare(stmt, sql, 0); if (code != 0){ @@ -3808,7 +4007,7 @@ static void SpecifyColumnBatchCase_autoCreateTbl(TAOS *taos) { //=======================================================================// //=============================== single table ==========================// //========== case 1: ======================// -#if 0 +#if 1 { stmt = taos_stmt_init(taos); @@ -3830,7 +4029,7 @@ static void SpecifyColumnBatchCase_autoCreateTbl(TAOS *taos) { #endif //========== case 2: ======================// -#if 0 +#if 1 { stmt = taos_stmt_init(taos); @@ -3861,7 +4060,7 @@ static void SpecifyColumnBatchCase_autoCreateTbl(TAOS *taos) { #endif //========== case 2-1: ======================// -#if 0 +#if 1 { stmt = taos_stmt_init(taos); @@ -3891,7 +4090,7 @@ static void SpecifyColumnBatchCase_autoCreateTbl(TAOS *taos) { } #endif //========== case 2-2: ======================// -#if 0 +#if 1 { printf("====case 2-2 error test start\n"); stmt = taos_stmt_init(taos); @@ -3924,7 +4123,7 @@ static void SpecifyColumnBatchCase_autoCreateTbl(TAOS *taos) { //========== case 3: ======================// -#if 0 +#if 1 { stmt = taos_stmt_init(taos); @@ -3955,7 +4154,7 @@ static void SpecifyColumnBatchCase_autoCreateTbl(TAOS *taos) { #endif //========== case 4: ======================// -#if 0 +#if 1 { stmt = taos_stmt_init(taos); @@ -3988,7 +4187,7 @@ static void SpecifyColumnBatchCase_autoCreateTbl(TAOS *taos) { //=======================================================================// //=============================== multi-rows to single table ==========================// //========== case 5: ======================// -#if 0 +#if 1 { stmt = taos_stmt_init(taos); @@ -4023,7 +4222,7 @@ static void SpecifyColumnBatchCase_autoCreateTbl(TAOS *taos) { columnNum = 5; prepareVcolumn_autoCreateTbl(taos, 1, tableNum, lenOfBinaryDef, "db6"); - stmt_specifyCol_bind_case_002_autoCreateTbl(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum); + stmt_specifyCol_bind_case_001_autoCreateTbl(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum); totalRowsPerTbl = rowsOfPerColum * bingNum; checkResult(taos, "m0", 0, totalRowsPerTbl); @@ -4033,28 +4232,175 @@ static void SpecifyColumnBatchCase_autoCreateTbl(TAOS *taos) { } #endif - //========== case 7: ======================// -#if 0 + //========== case 7: ======================// +#if 1 + { + stmt = taos_stmt_init(taos); + + tableNum = 200; + rowsOfPerColum = 60; + bingNum = 1; + lenOfBinaryDef = 40; + lenOfBinaryAct = 8; + columnNum = 5; + + prepareVcolumn_autoCreateTbl(taos, 1, tableNum, lenOfBinaryDef, "db7"); + stmt_specifyCol_bind_case_001_autoCreateTbl(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum); + + totalRowsPerTbl = rowsOfPerColum * bingNum; + checkResult(taos, "m0", 0, totalRowsPerTbl); + checkResult(taos, "m1", 0, totalRowsPerTbl); + checkResult(taos, "m99", 0, totalRowsPerTbl); + checkResult(taos, "m139", 0, totalRowsPerTbl); + checkResult(taos, "m199", 0, totalRowsPerTbl); + taos_stmt_close(stmt); + printf("case 7 check result end\n\n"); + } +#endif + + //========== case 8: ======================// +#if 1 { stmt = taos_stmt_init(taos); tableNum = 1; - rowsOfPerColum = 23740; + rowsOfPerColum = 5; bingNum = 1; lenOfBinaryDef = 40; lenOfBinaryAct = 8; - columnNum = 1; + columnNum = 5; - prepareVcolumn_autoCreateTbl(taos, 1, tableNum, lenOfBinaryDef, "db6"); - stmt_specifyCol_bind_case_003_autoCreateTbl(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum); + prepareVcolumn_autoCreateTbl(taos, 1, tableNum, lenOfBinaryDef, "db8"); + stmt_specifyCol_bind_case_002_autoCreateTbl(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum); totalRowsPerTbl = rowsOfPerColum * bingNum; checkResult(taos, "m0", 0, totalRowsPerTbl); taos_stmt_close(stmt); - printf("case 7 check result end\n\n"); + printf("case 8 check result end\n\n"); } #endif + //========== case 9: ======================// + +#if 1 + { + stmt = taos_stmt_init(taos); + + tableNum = 10; + rowsOfPerColum = 5; + bingNum = 1; + lenOfBinaryDef = 40; + lenOfBinaryAct = 8; + columnNum = 5; + + prepareVcolumn_autoCreateTbl(taos, 1, tableNum, lenOfBinaryDef, "db9"); + stmt_specifyCol_bind_case_002_autoCreateTbl(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum); + + totalRowsPerTbl = rowsOfPerColum * bingNum; + checkResult(taos, "m0", 0, totalRowsPerTbl); + checkResult(taos, "m3", 0, totalRowsPerTbl); + checkResult(taos, "m6", 0, totalRowsPerTbl); + checkResult(taos, "m9", 0, totalRowsPerTbl); + taos_stmt_close(stmt); + printf("case 9 check result end\n\n"); + } +#endif + + //========== case 10: ======================// +#if 1 + { + stmt = taos_stmt_init(taos); + + tableNum = 1; + rowsOfPerColum = 23740; + bingNum = 1; + lenOfBinaryDef = 40; + lenOfBinaryAct = 8; + columnNum = 5; + + prepareVcolumn_autoCreateTbl(taos, 1, tableNum, lenOfBinaryDef, "db10"); + stmt_specifyCol_bind_case_003_autoCreateTbl(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum); + + totalRowsPerTbl = rowsOfPerColum * bingNum; + checkResult(taos, "m0", 0, totalRowsPerTbl); + taos_stmt_close(stmt); + printf("case 10 check result end\n\n"); + } +#endif + + //========== case 11: ======================// +#if 1 + { + stmt = taos_stmt_init(taos); + + tableNum = 2; + rowsOfPerColum = 5; + bingNum = 1; + lenOfBinaryDef = 40; + lenOfBinaryAct = 8; + columnNum = 5; + + prepareVcolumn_autoCreateTbl(taos, 1, tableNum, lenOfBinaryDef, "db11"); + stmt_specifyCol_bind_case_003_autoCreateTbl(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum); + + totalRowsPerTbl = rowsOfPerColum * bingNum; + checkResult(taos, "m0", 0, totalRowsPerTbl); + checkResult(taos, "m1", 0, totalRowsPerTbl); + taos_stmt_close(stmt); + printf("case 11 check result end\n\n"); + } +#endif + + //========== case 12: ======================// +#if 1 + { + stmt = taos_stmt_init(taos); + + tableNum = 200; + rowsOfPerColum = 60; + bingNum = 1; + lenOfBinaryDef = 40; + lenOfBinaryAct = 8; + columnNum = 5; + + prepareVcolumn_autoCreateTbl(taos, 1, tableNum, lenOfBinaryDef, "db12"); + stmt_specifyCol_bind_case_003_autoCreateTbl(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum); + + totalRowsPerTbl = rowsOfPerColum * bingNum; + checkResult(taos, "m0", 0, totalRowsPerTbl); + checkResult(taos, "m1", 0, totalRowsPerTbl); + checkResult(taos, "m99", 0, totalRowsPerTbl); + checkResult(taos, "m139", 0, totalRowsPerTbl); + checkResult(taos, "m199", 0, totalRowsPerTbl); + taos_stmt_close(stmt); + printf("case 12 check result end\n\n"); + } +#endif + + + //========== case 13: ======================// +#if 1 + { + printf("====case 13 error test start\n"); + stmt = taos_stmt_init(taos); + + tableNum = 1; + rowsOfPerColum = 8; + bingNum = 1; + lenOfBinaryDef = 40; + lenOfBinaryAct = 8; + columnNum = 5; + + prepareVcolumn_autoCreateTbl(taos, 1, tableNum, lenOfBinaryDef, "db13"); + stmt_specifyCol_bind_case_004_autoCreateTbl(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum); + + totalRowsPerTbl = rowsOfPerColum * bingNum; + checkResult(taos, "m0", 0, totalRowsPerTbl); + taos_stmt_close(stmt); + printf("====case 13 check result end\n\n"); + } +#endif + return ; } diff --git a/tests/script/general/parser/function.sim b/tests/script/general/parser/function.sim index 56ce15c36f..ee5a750c88 100644 --- a/tests/script/general/parser/function.sim +++ b/tests/script/general/parser/function.sim @@ -24,6 +24,9 @@ sql drop database if exists $db sql create database $db keep 36500 sql use $db +print =====================================> td-4481 +sql create database $db + print =====================================> test case for twa in single block sql create table t1 (ts timestamp, k float); diff --git a/tests/script/general/parser/nestquery.sim b/tests/script/general/parser/nestquery.sim index 9e2736833f..3d13ff504d 100644 --- a/tests/script/general/parser/nestquery.sim +++ b/tests/script/general/parser/nestquery.sim @@ -9,7 +9,7 @@ sql connect print ======================== dnode1 start -$dbPrefix = nest_query +$dbPrefix = nest_db $tbPrefix = nest_tb $mtPrefix = nest_mt $tbNum = 10 @@ -17,7 +17,6 @@ $rowNum = 10000 $totalNum = $tbNum * $rowNum print =============== nestquery.sim - $i = 0 $db = $dbPrefix . $i $mt = $mtPrefix . $i diff --git a/tests/script/general/parser/testSuite.sim b/tests/script/general/parser/testSuite.sim index afb76c799e..6265fc3a02 100644 --- a/tests/script/general/parser/testSuite.sim +++ b/tests/script/general/parser/testSuite.sim @@ -60,4 +60,5 @@ run general/parser/slimit_alter_tags.sim run general/parser/binary_escapeCharacter.sim run general/parser/between_and.sim run general/parser/last_cache.sim +run general/parser/nestquery.sim