Merge branch 'develop' into feature/cenc

This commit is contained in:
Haojun Liao 2021-07-08 18:29:13 +08:00
commit 61e6528957
32 changed files with 816 additions and 157 deletions

View File

@ -204,7 +204,7 @@ else
exit 1 exit 1
fi fi
make -j8 make
cd ${curr_dir} cd ${curr_dir}
@ -246,15 +246,15 @@ if [ "$osType" != "Darwin" ]; then
cd ${script_dir}/tools cd ${script_dir}/tools
if [[ "$dbName" == "taos" ]]; then if [[ "$dbName" == "taos" ]]; then
${csudo} ./makepkg.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${csudo} ./makepkg.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${verNumberComp}
${csudo} ./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${csudo} ./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
${csudo} ./makearbi.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${csudo} ./makearbi.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
elif [[ "$dbName" == "tq" ]]; then elif [[ "$dbName" == "tq" ]]; then
${csudo} ./makepkg_tq.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${csudo} ./makepkg_tq.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${verNumberComp}
${csudo} ./makeclient_tq.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${csudo} ./makeclient_tq.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
${csudo} ./makearbi_tq.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${csudo} ./makearbi_tq.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
else else
${csudo} ./makepkg_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${csudo} ./makepkg_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${verNumberComp}
${csudo} ./makeclient_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${csudo} ./makeclient_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
${csudo} ./makearbi_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${csudo} ./makearbi_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
fi fi

View File

@ -772,9 +772,13 @@ vercomp () {
function is_version_compatible() { function is_version_compatible() {
curr_version=$(${bin_dir}/taosd -V | head -1 | cut -d ' ' -f 3) curr_version=`ls ${script_dir}/driver/libtaos.so* |cut -d '.' -f 3-6`
min_compatible_version=$(${script_dir}/bin/taosd -V | head -1 | cut -d ' ' -f 5) if [ -f ${script_dir}/driver/vercomp.txt ]; then
min_compatible_version=`cat ${script_dir}/driver/vercomp.txt`
else
min_compatible_version=$(${script_dir}/bin/tqd -V | head -1 | cut -d ' ' -f 5)
fi
vercomp $curr_version $min_compatible_version vercomp $curr_version $min_compatible_version
case $? in case $? in

View File

@ -741,9 +741,13 @@ vercomp () {
function is_version_compatible() { function is_version_compatible() {
curr_version=$(${bin_dir}/powerd -V | head -1 | cut -d ' ' -f 3) curr_version=`ls ${script_dir}/driver/libtaos.so* |cut -d '.' -f 3-6`
min_compatible_version=$(${script_dir}/bin/powerd -V | head -1 | cut -d ' ' -f 5) if [ -f ${script_dir}/driver/vercomp.txt ]; then
min_compatible_version=`cat ${script_dir}/driver/vercomp.txt`
else
min_compatible_version=$(${script_dir}/bin/tqd -V | head -1 | cut -d ' ' -f 5)
fi
vercomp $curr_version $min_compatible_version vercomp $curr_version $min_compatible_version
case $? in case $? in

View File

@ -741,9 +741,13 @@ vercomp () {
function is_version_compatible() { function is_version_compatible() {
curr_version=$(${bin_dir}/tqd -V | head -1 | cut -d ' ' -f 3) curr_version=`ls ${script_dir}/driver/libtaos.so* |cut -d '.' -f 3-6`
min_compatible_version=$(${script_dir}/bin/tqd -V | head -1 | cut -d ' ' -f 5) if [ -f ${script_dir}/driver/vercomp.txt ]; then
min_compatible_version=`cat ${script_dir}/driver/vercomp.txt`
else
min_compatible_version=$(${script_dir}/bin/tqd -V | head -1 | cut -d ' ' -f 5)
fi
vercomp $curr_version $min_compatible_version vercomp $curr_version $min_compatible_version
case $? in case $? in

View File

@ -14,6 +14,7 @@ osType=$5
verMode=$6 verMode=$6
verType=$7 verType=$7
pagMode=$8 pagMode=$8
versionComp=$9
script_dir="$(dirname $(readlink -f $0))" script_dir="$(dirname $(readlink -f $0))"
top_dir="$(readlink -f ${script_dir}/../..)" top_dir="$(readlink -f ${script_dir}/../..)"
@ -175,8 +176,7 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp -r ${examples_dir}/C# ${install_dir}/examples cp -r ${examples_dir}/C# ${install_dir}/examples
fi fi
# Copy driver # Copy driver
mkdir -p ${install_dir}/driver mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" > ${install_dir}/driver/vercomp.txt
cp ${lib_files} ${install_dir}/driver
# Copy connector # Copy connector
connector_dir="${code_dir}/connector" connector_dir="${code_dir}/connector"

View File

@ -14,6 +14,7 @@ osType=$5
verMode=$6 verMode=$6
verType=$7 verType=$7
pagMode=$8 pagMode=$8
versionComp=$9
script_dir="$(dirname $(readlink -f $0))" script_dir="$(dirname $(readlink -f $0))"
top_dir="$(readlink -f ${script_dir}/../..)" top_dir="$(readlink -f ${script_dir}/../..)"
@ -32,10 +33,10 @@ fi
# Directories and files. # Directories and files.
#if [ "$pagMode" == "lite" ]; then #if [ "$pagMode" == "lite" ]; then
# strip ${build_dir}/bin/taosd # strip ${build_dir}/bin/taosd
# strip ${build_dir}/bin/taos # strip ${build_dir}/bin/taos
# bin_files="${build_dir}/bin/powerd ${build_dir}/bin/power ${script_dir}/remove_power.sh" # bin_files="${build_dir}/bin/powerd ${build_dir}/bin/power ${script_dir}/remove_power.sh"
#else #else
# bin_files="${build_dir}/bin/powerd ${build_dir}/bin/power ${build_dir}/bin/powerdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove_power.sh\ # bin_files="${build_dir}/bin/powerd ${build_dir}/bin/power ${build_dir}/bin/powerdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove_power.sh\
# ${script_dir}/set_core.sh ${script_dir}/startPre.sh ${script_dir}/taosd-dump-cfg.gdb" # ${script_dir}/set_core.sh ${script_dir}/startPre.sh ${script_dir}/taosd-dump-cfg.gdb"
#fi #fi
@ -70,19 +71,19 @@ mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cf
#mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || : #mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
mkdir -p ${install_dir}/bin mkdir -p ${install_dir}/bin
if [ "$pagMode" == "lite" ]; then if [ "$pagMode" == "lite" ]; then
strip ${build_dir}/bin/taosd strip ${build_dir}/bin/taosd
strip ${build_dir}/bin/taos strip ${build_dir}/bin/taos
# bin_files="${build_dir}/bin/powerd ${build_dir}/bin/power ${script_dir}/remove_power.sh" # bin_files="${build_dir}/bin/powerd ${build_dir}/bin/power ${script_dir}/remove_power.sh"
cp ${build_dir}/bin/taos ${install_dir}/bin/power cp ${build_dir}/bin/taos ${install_dir}/bin/power
cp ${build_dir}/bin/taosd ${install_dir}/bin/powerd cp ${build_dir}/bin/taosd ${install_dir}/bin/powerd
cp ${script_dir}/remove_power.sh ${install_dir}/bin cp ${script_dir}/remove_power.sh ${install_dir}/bin
else else
# bin_files="${build_dir}/bin/powerd ${build_dir}/bin/power ${build_dir}/bin/powerdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove_power.sh ${script_dir}/set_core.sh" # bin_files="${build_dir}/bin/powerd ${build_dir}/bin/power ${build_dir}/bin/powerdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove_power.sh ${script_dir}/set_core.sh"
cp ${build_dir}/bin/taos ${install_dir}/bin/power cp ${build_dir}/bin/taos ${install_dir}/bin/power
cp ${build_dir}/bin/taosd ${install_dir}/bin/powerd cp ${build_dir}/bin/taosd ${install_dir}/bin/powerd
cp ${script_dir}/remove_power.sh ${install_dir}/bin cp ${script_dir}/remove_power.sh ${install_dir}/bin
cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo
cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump
cp ${build_dir}/bin/tarbitrator ${install_dir}/bin cp ${build_dir}/bin/tarbitrator ${install_dir}/bin
cp ${script_dir}/set_core.sh ${install_dir}/bin cp ${script_dir}/set_core.sh ${install_dir}/bin
cp ${script_dir}/get_client.sh ${install_dir}/bin cp ${script_dir}/get_client.sh ${install_dir}/bin
@ -99,14 +100,14 @@ mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}
if [ "$verMode" == "cluster" ]; then if [ "$verMode" == "cluster" ]; then
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove_power.sh >> remove_power_temp.sh sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove_power.sh >> remove_power_temp.sh
mv remove_power_temp.sh ${install_dir}/bin/remove_power.sh mv remove_power_temp.sh ${install_dir}/bin/remove_power.sh
mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd
cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png
rm -rf ${install_dir}/nginxd/png rm -rf ${install_dir}/nginxd/png
sed -i "s/TDengine/PowerDB/g" ${install_dir}/nginxd/admin/*.html sed -i "s/TDengine/PowerDB/g" ${install_dir}/nginxd/admin/*.html
sed -i "s/TDengine/PowerDB/g" ${install_dir}/nginxd/admin/js/*.js sed -i "s/TDengine/PowerDB/g" ${install_dir}/nginxd/admin/js/*.js
sed -i '/dataDir/ {s/taos/power/g}' ${install_dir}/cfg/taos.cfg sed -i '/dataDir/ {s/taos/power/g}' ${install_dir}/cfg/taos.cfg
sed -i '/logDir/ {s/taos/power/g}' ${install_dir}/cfg/taos.cfg sed -i '/logDir/ {s/taos/power/g}' ${install_dir}/cfg/taos.cfg
sed -i "s/TDengine/PowerDB/g" ${install_dir}/cfg/taos.cfg sed -i "s/TDengine/PowerDB/g" ${install_dir}/cfg/taos.cfg
@ -149,17 +150,16 @@ sed -i '/root/ {s/taosdata/powerdb/g}' ${install_dir}/examples/c/*.c
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp -r ${examples_dir}/JDBC ${install_dir}/examples cp -r ${examples_dir}/JDBC ${install_dir}/examples
cp -r ${examples_dir}/matlab ${install_dir}/examples cp -r ${examples_dir}/matlab ${install_dir}/examples
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/matlab/TDengineDemo.m sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/matlab/TDengineDemo.m
cp -r ${examples_dir}/python ${install_dir}/examples cp -r ${examples_dir}/python ${install_dir}/examples
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/python/read_example.py sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/python/read_example.py
cp -r ${examples_dir}/R ${install_dir}/examples cp -r ${examples_dir}/R ${install_dir}/examples
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/R/command.txt sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/R/command.txt
cp -r ${examples_dir}/go ${install_dir}/examples cp -r ${examples_dir}/go ${install_dir}/examples
sed -i '/root/ {s/taosdata/powerdb/g}' ${install_dir}/examples/go/taosdemo.go sed -i '/root/ {s/taosdata/powerdb/g}' ${install_dir}/examples/go/taosdemo.go
fi fi
# Copy driver # Copy driver
mkdir -p ${install_dir}/driver mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" > ${install_dir}/driver/vercomp.txt
cp ${lib_files} ${install_dir}/driver
# Copy connector # Copy connector
connector_dir="${code_dir}/connector" connector_dir="${code_dir}/connector"
@ -178,11 +178,11 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
echo "WARNING: go connector not found, please check if want to use it!" echo "WARNING: go connector not found, please check if want to use it!"
fi fi
cp -r ${connector_dir}/python ${install_dir}/connector/ cp -r ${connector_dir}/python ${install_dir}/connector/
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/cinterface.py sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/subscription.py sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/subscription.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/connection.py sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/connection.py
fi fi
# Copy release note # Copy release note
@ -190,7 +190,7 @@ fi
# exit 1 # exit 1
cd ${release_dir} cd ${release_dir}
if [ "$verMode" == "cluster" ]; then if [ "$verMode" == "cluster" ]; then
pkg_name=${install_dir}-${osType}-${cpuType} pkg_name=${install_dir}-${osType}-${cpuType}
@ -207,8 +207,8 @@ fi
if [ "$verType" == "beta" ]; then if [ "$verType" == "beta" ]; then
pkg_name=${pkg_name}-${verType} pkg_name=${pkg_name}-${verType}
elif [ "$verType" == "stable" ]; then elif [ "$verType" == "stable" ]; then
pkg_name=${pkg_name} pkg_name=${pkg_name}
else else
echo "unknow verType, nor stabel or beta" echo "unknow verType, nor stabel or beta"
exit 1 exit 1

View File

@ -14,6 +14,7 @@ osType=$5
verMode=$6 verMode=$6
verType=$7 verType=$7
pagMode=$8 pagMode=$8
versionComp=$9
script_dir="$(dirname $(readlink -f $0))" script_dir="$(dirname $(readlink -f $0))"
top_dir="$(readlink -f ${script_dir}/../..)" top_dir="$(readlink -f ${script_dir}/../..)"
@ -32,10 +33,10 @@ fi
# Directories and files. # Directories and files.
#if [ "$pagMode" == "lite" ]; then #if [ "$pagMode" == "lite" ]; then
# strip ${build_dir}/bin/taosd # strip ${build_dir}/bin/taosd
# strip ${build_dir}/bin/taos # strip ${build_dir}/bin/taos
# bin_files="${build_dir}/bin/tqd ${build_dir}/bin/tq ${script_dir}/remove_tq.sh" # bin_files="${build_dir}/bin/tqd ${build_dir}/bin/tq ${script_dir}/remove_tq.sh"
#else #else
# bin_files="${build_dir}/bin/tqd ${build_dir}/bin/tq ${build_dir}/bin/tqdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove_tq.sh\ # bin_files="${build_dir}/bin/tqd ${build_dir}/bin/tq ${build_dir}/bin/tqdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove_tq.sh\
# ${script_dir}/set_core.sh ${script_dir}/startPre.sh ${script_dir}/taosd-dump-cfg.gdb" # ${script_dir}/set_core.sh ${script_dir}/startPre.sh ${script_dir}/taosd-dump-cfg.gdb"
#fi #fi
@ -70,13 +71,13 @@ mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cf
#mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || : #mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
mkdir -p ${install_dir}/bin mkdir -p ${install_dir}/bin
if [ "$pagMode" == "lite" ]; then if [ "$pagMode" == "lite" ]; then
strip ${build_dir}/bin/taosd strip ${build_dir}/bin/taosd
strip ${build_dir}/bin/taos strip ${build_dir}/bin/taos
# bin_files="${build_dir}/bin/tqd ${build_dir}/bin/tq ${script_dir}/remove_tq.sh" # bin_files="${build_dir}/bin/tqd ${build_dir}/bin/tq ${script_dir}/remove_tq.sh"
cp ${build_dir}/bin/taos ${install_dir}/bin/tq cp ${build_dir}/bin/taos ${install_dir}/bin/tq
cp ${build_dir}/bin/taosd ${install_dir}/bin/tqd cp ${build_dir}/bin/taosd ${install_dir}/bin/tqd
cp ${script_dir}/remove_tq.sh ${install_dir}/bin cp ${script_dir}/remove_tq.sh ${install_dir}/bin
else else
# bin_files="${build_dir}/bin/tqd ${build_dir}/bin/tq ${build_dir}/bin/tqdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove_tq.sh ${script_dir}/set_core.sh" # bin_files="${build_dir}/bin/tqd ${build_dir}/bin/tq ${build_dir}/bin/tqdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove_tq.sh ${script_dir}/set_core.sh"
cp ${build_dir}/bin/taos ${install_dir}/bin/tq cp ${build_dir}/bin/taos ${install_dir}/bin/tq
cp ${build_dir}/bin/taosd ${install_dir}/bin/tqd cp ${build_dir}/bin/taosd ${install_dir}/bin/tqd
@ -99,14 +100,14 @@ chmod a+x ${install_dir}/bin/* || :
if [ "$verMode" == "cluster" ]; then if [ "$verMode" == "cluster" ]; then
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove_tq.sh >> remove_tq_temp.sh sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove_tq.sh >> remove_tq_temp.sh
mv remove_tq_temp.sh ${install_dir}/bin/remove_tq.sh mv remove_tq_temp.sh ${install_dir}/bin/remove_tq.sh
mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd
cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png
rm -rf ${install_dir}/nginxd/png rm -rf ${install_dir}/nginxd/png
sed -i "s/TDengine/TQ/g" ${install_dir}/nginxd/admin/*.html sed -i "s/TDengine/TQ/g" ${install_dir}/nginxd/admin/*.html
sed -i "s/TDengine/TQ/g" ${install_dir}/nginxd/admin/js/*.js sed -i "s/TDengine/TQ/g" ${install_dir}/nginxd/admin/js/*.js
sed -i '/dataDir/ {s/taos/tq/g}' ${install_dir}/cfg/taos.cfg sed -i '/dataDir/ {s/taos/tq/g}' ${install_dir}/cfg/taos.cfg
sed -i '/logDir/ {s/taos/tq/g}' ${install_dir}/cfg/taos.cfg sed -i '/logDir/ {s/taos/tq/g}' ${install_dir}/cfg/taos.cfg
sed -i "s/TDengine/TQ/g" ${install_dir}/cfg/taos.cfg sed -i "s/TDengine/TQ/g" ${install_dir}/cfg/taos.cfg
@ -154,12 +155,11 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
sed -i '/password/ {s/taosdata/tqueue/g}' ${install_dir}/examples/python/read_example.py sed -i '/password/ {s/taosdata/tqueue/g}' ${install_dir}/examples/python/read_example.py
cp -r ${examples_dir}/R ${install_dir}/examples cp -r ${examples_dir}/R ${install_dir}/examples
sed -i '/password/ {s/taosdata/tqueue/g}' ${install_dir}/examples/R/command.txt sed -i '/password/ {s/taosdata/tqueue/g}' ${install_dir}/examples/R/command.txt
cp -r ${examples_dir}/go ${install_dir}/examples cp -r ${examples_dir}/go ${install_dir}/examples
sed -i '/root/ {s/taosdata/tqueue/g}' ${install_dir}/examples/go/taosdemo.go sed -i '/root/ {s/taosdata/tqueue/g}' ${install_dir}/examples/go/taosdemo.go
fi fi
# Copy driver # Copy driver
mkdir -p ${install_dir}/driver mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" > ${install_dir}/driver/vercomp.txt
cp ${lib_files} ${install_dir}/driver
# Copy connector # Copy connector
connector_dir="${code_dir}/connector" connector_dir="${code_dir}/connector"
@ -178,11 +178,11 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
echo "WARNING: go connector not found, please check if want to use it!" echo "WARNING: go connector not found, please check if want to use it!"
fi fi
cp -r ${connector_dir}/python ${install_dir}/connector/ cp -r ${connector_dir}/python ${install_dir}/connector/
sed -i '/password/ {s/taosdata/tqueue/g}' ${install_dir}/connector/python/taos/cinterface.py sed -i '/password/ {s/taosdata/tqueue/g}' ${install_dir}/connector/python/taos/cinterface.py
sed -i '/password/ {s/taosdata/tqueue/g}' ${install_dir}/connector/python/taos/subscription.py sed -i '/password/ {s/taosdata/tqueue/g}' ${install_dir}/connector/python/taos/subscription.py
sed -i '/self._password/ {s/taosdata/tqueue/g}' ${install_dir}/connector/python/taos/connection.py sed -i '/self._password/ {s/taosdata/tqueue/g}' ${install_dir}/connector/python/taos/connection.py
fi fi
# Copy release note # Copy release note
@ -190,7 +190,7 @@ fi
# exit 1 # exit 1
cd ${release_dir} cd ${release_dir}
if [ "$verMode" == "cluster" ]; then if [ "$verMode" == "cluster" ]; then
pkg_name=${install_dir}-${osType}-${cpuType} pkg_name=${install_dir}-${osType}-${cpuType}
@ -207,8 +207,8 @@ fi
if [ "$verType" == "beta" ]; then if [ "$verType" == "beta" ]; then
pkg_name=${pkg_name}-${verType} pkg_name=${pkg_name}-${verType}
elif [ "$verType" == "stable" ]; then elif [ "$verType" == "stable" ]; then
pkg_name=${pkg_name} pkg_name=${pkg_name}
else else
echo "unknow verType, nor stabel or beta" echo "unknow verType, nor stabel or beta"
exit 1 exit 1

View File

@ -961,6 +961,10 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
break; break;
} }
if (sToken.n == 0 || sToken.type == TK_SEMI || index == 0) {
return tscSQLSyntaxErrMsg(pCmd->payload, "unexpected token", sql);
}
sql += index; sql += index;
++numOfColsAfterTags; ++numOfColsAfterTags;
} }

View File

@ -2087,7 +2087,6 @@ static SVgroupsInfo* createVgroupInfoFromMsg(char* pMsg, int32_t* size, uint64_t
SVgroupMsg *vmsg = &pVgroupMsg->vgroups[j]; SVgroupMsg *vmsg = &pVgroupMsg->vgroups[j];
vmsg->vgId = htonl(vmsg->vgId); vmsg->vgId = htonl(vmsg->vgId);
vmsg->numOfEps = vmsg->numOfEps;
for (int32_t k = 0; k < vmsg->numOfEps; ++k) { for (int32_t k = 0; k < vmsg->numOfEps; ++k) {
vmsg->epAddr[k].port = htons(vmsg->epAddr[k].port); vmsg->epAddr[k].port = htons(vmsg->epAddr[k].port);
} }

View File

@ -666,7 +666,9 @@ static void setResRawPtrImpl(SSqlRes* pRes, SInternalField* pInfo, int32_t i, bo
void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo) { void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo) {
assert(pRes->numOfCols > 0); assert(pRes->numOfCols > 0);
if (pRes->numOfRows == 0) {
return;
}
int32_t offset = 0; int32_t offset = 0;
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
SInternalField* pInfo = (SInternalField*)TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, i); SInternalField* pInfo = (SInternalField*)TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, i);
@ -778,6 +780,37 @@ typedef struct SJoinOperatorInfo {
SRspResultInfo resultInfo; // todo refactor, add this info for each operator SRspResultInfo resultInfo; // todo refactor, add this info for each operator
} SJoinOperatorInfo; } SJoinOperatorInfo;
static void converNcharFilterColumn(SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols, int32_t rows, bool *gotNchar) {
for (int32_t i = 0; i < numOfFilterCols; ++i) {
if (pFilterInfo[i].info.type == TSDB_DATA_TYPE_NCHAR) {
pFilterInfo[i].pData2 = pFilterInfo[i].pData;
pFilterInfo[i].pData = malloc(rows * pFilterInfo[i].info.bytes);
int32_t bufSize = pFilterInfo[i].info.bytes - VARSTR_HEADER_SIZE;
for (int32_t j = 0; j < rows; ++j) {
char* dst = (char *)pFilterInfo[i].pData + j * pFilterInfo[i].info.bytes;
char* src = (char *)pFilterInfo[i].pData2 + j * pFilterInfo[i].info.bytes;
int32_t len = 0;
taosMbsToUcs4(varDataVal(src), varDataLen(src), varDataVal(dst), bufSize, &len);
varDataLen(dst) = len;
}
*gotNchar = true;
}
}
}
static void freeNcharFilterColumn(SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols) {
for (int32_t i = 0; i < numOfFilterCols; ++i) {
if (pFilterInfo[i].info.type == TSDB_DATA_TYPE_NCHAR) {
if (pFilterInfo[i].pData2) {
tfree(pFilterInfo[i].pData);
pFilterInfo[i].pData = pFilterInfo[i].pData2;
pFilterInfo[i].pData2 = NULL;
}
}
}
}
static void doSetupSDataBlock(SSqlRes* pRes, SSDataBlock* pBlock, SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols) { static void doSetupSDataBlock(SSqlRes* pRes, SSDataBlock* pBlock, SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols) {
int32_t offset = 0; int32_t offset = 0;
char* pData = pRes->data; char* pData = pRes->data;
@ -796,8 +829,13 @@ static void doSetupSDataBlock(SSqlRes* pRes, SSDataBlock* pBlock, SSingleColumnF
// filter data if needed // filter data if needed
if (numOfFilterCols > 0) { if (numOfFilterCols > 0) {
doSetFilterColumnInfo(pFilterInfo, numOfFilterCols, pBlock); doSetFilterColumnInfo(pFilterInfo, numOfFilterCols, pBlock);
bool gotNchar = false;
converNcharFilterColumn(pFilterInfo, numOfFilterCols, pBlock->info.rows, &gotNchar);
int8_t* p = calloc(pBlock->info.rows, sizeof(int8_t)); int8_t* p = calloc(pBlock->info.rows, sizeof(int8_t));
bool all = doFilterDataBlock(pFilterInfo, numOfFilterCols, pBlock->info.rows, p); bool all = doFilterDataBlock(pFilterInfo, numOfFilterCols, pBlock->info.rows, p);
if (gotNchar) {
freeNcharFilterColumn(pFilterInfo, numOfFilterCols);
}
if (!all) { if (!all) {
doCompactSDataBlock(pBlock, pBlock->info.rows, p); doCompactSDataBlock(pBlock, pBlock->info.rows, p);
} }

View File

@ -2,9 +2,10 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest) FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest)
FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib) FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib /usr/lib64)
FIND_LIBRARY(LIB_GTEST_SHARED_DIR libgtest.so /usr/lib/ /usr/local/lib /usr/lib64)
IF (HEADER_GTEST_INCLUDE_DIR AND LIB_GTEST_STATIC_DIR) IF (HEADER_GTEST_INCLUDE_DIR AND (LIB_GTEST_STATIC_DIR OR LIB_GTEST_SHARED_DIR))
MESSAGE(STATUS "gTest library found, build unit test") MESSAGE(STATUS "gTest library found, build unit test")
# GoogleTest requires at least C++11 # GoogleTest requires at least C++11
@ -17,4 +18,4 @@ IF (HEADER_GTEST_INCLUDE_DIR AND LIB_GTEST_STATIC_DIR)
ADD_EXECUTABLE(cliTest ${SOURCE_LIST}) ADD_EXECUTABLE(cliTest ${SOURCE_LIST})
TARGET_LINK_LIBRARIES(cliTest taos tutil common gtest pthread) TARGET_LINK_LIBRARIES(cliTest taos tutil common gtest pthread)
ENDIF() ENDIF()

View File

@ -20,8 +20,8 @@ IF (TD_LINUX_64)
if (CMAKE_C_COMPILER_ID STREQUAL "GNU" AND CMAKE_C_COMPILER_VERSION VERSION_LESS 5.0.0) if (CMAKE_C_COMPILER_ID STREQUAL "GNU" AND CMAKE_C_COMPILER_VERSION VERSION_LESS 5.0.0)
message(WARNING "gcc 4.8.0 will complain too much about flex-generated code, we just bypass building ODBC driver in such case") message(WARNING "gcc 4.8.0 will complain too much about flex-generated code, we just bypass building ODBC driver in such case")
else () else ()
SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -Wconversion") SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} ")
SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -Wconversion") SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ")
ADD_SUBDIRECTORY(src) ADD_SUBDIRECTORY(src)
ADD_SUBDIRECTORY(tools) ADD_SUBDIRECTORY(tools)
ADD_SUBDIRECTORY(examples) ADD_SUBDIRECTORY(examples)

View File

@ -45,7 +45,7 @@ extern void updateBuffer(Command *cmd);
extern int isReadyGo(Command *cmd); extern int isReadyGo(Command *cmd);
extern void resetCommand(Command *cmd, const char s[]); extern void resetCommand(Command *cmd, const char s[]);
int countPrefixOnes(char c); int countPrefixOnes(unsigned char c);
void clearScreen(int ecmd_pos, int cursor_pos); void clearScreen(int ecmd_pos, int cursor_pos);
void printChar(char c, int times); void printChar(char c, int times);
void positionCursor(int step, int direction); void positionCursor(int step, int direction);

View File

@ -26,7 +26,7 @@ typedef struct {
char widthOnScreen; char widthOnScreen;
} UTFCodeInfo; } UTFCodeInfo;
int countPrefixOnes(char c) { int countPrefixOnes(unsigned char c) {
unsigned char mask = 127; unsigned char mask = 127;
mask = ~mask; mask = ~mask;
int ret = 0; int ret = 0;
@ -48,7 +48,7 @@ void getPrevCharSize(const char *str, int pos, int *size, int *width) {
while (--pos >= 0) { while (--pos >= 0) {
*size += 1; *size += 1;
if (str[pos] > 0 || countPrefixOnes(str[pos]) > 1) break; if (str[pos] > 0 || countPrefixOnes((unsigned char )str[pos]) > 1) break;
} }
int rc = mbtowc(&wc, str + pos, MB_CUR_MAX); int rc = mbtowc(&wc, str + pos, MB_CUR_MAX);

View File

@ -3216,13 +3216,6 @@ static int readTagFromCsvFileToMem(SSuperTable * superTblInfo) {
return 0; return 0;
} }
#if 0
int readSampleFromJsonFileToMem(SSuperTable * superTblInfo) {
// TODO
return 0;
}
#endif
/* /*
Read 10000 lines at most. If more than 10000 lines, continue to read after using Read 10000 lines at most. If more than 10000 lines, continue to read after using
*/ */
@ -5122,13 +5115,13 @@ static int32_t generateStbDataTail(
} else { } else {
lenOfRow = getRowDataFromSample( lenOfRow = getRowDataFromSample(
data, data,
remainderBufLen < MAX_DATA_SIZE ? remainderBufLen : MAX_DATA_SIZE, (remainderBufLen < MAX_DATA_SIZE)?remainderBufLen:MAX_DATA_SIZE,
startTime + superTblInfo->timeStampStep * k, startTime + superTblInfo->timeStampStep * k,
superTblInfo, superTblInfo,
pSamplePos); pSamplePos);
} }
if (lenOfRow > remainderBufLen) { if ((lenOfRow + 1) > remainderBufLen) {
break; break;
} }
@ -5338,7 +5331,7 @@ static int64_t generateInterlaceDataWithoutStb(
#if STMT_IFACE_ENABLED == 1 #if STMT_IFACE_ENABLED == 1
static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind,
char *dataType, int32_t dataLen, char **ptr) char *dataType, int32_t dataLen, char **ptr, char *value)
{ {
if (0 == strncasecmp(dataType, if (0 == strncasecmp(dataType,
"BINARY", strlen("BINARY"))) { "BINARY", strlen("BINARY"))) {
@ -5348,12 +5341,18 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind,
return -1; return -1;
} }
char *bind_binary = (char *)*ptr; char *bind_binary = (char *)*ptr;
rand_string(bind_binary, dataLen);
bind->buffer_type = TSDB_DATA_TYPE_BINARY; bind->buffer_type = TSDB_DATA_TYPE_BINARY;
bind->buffer_length = dataLen; if (value) {
bind->buffer = bind_binary; strncpy(bind_binary, value, strlen(value));
bind->buffer_length = strlen(bind_binary);
} else {
rand_string(bind_binary, dataLen);
bind->buffer_length = dataLen;
}
bind->length = &bind->buffer_length; bind->length = &bind->buffer_length;
bind->buffer = bind_binary;
bind->is_null = NULL; bind->is_null = NULL;
*ptr += bind->buffer_length; *ptr += bind->buffer_length;
@ -5365,9 +5364,14 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind,
return -1; return -1;
} }
char *bind_nchar = (char *)*ptr; char *bind_nchar = (char *)*ptr;
rand_string(bind_nchar, dataLen);
bind->buffer_type = TSDB_DATA_TYPE_NCHAR; bind->buffer_type = TSDB_DATA_TYPE_NCHAR;
if (value) {
strncpy(bind_nchar, value, strlen(value));
} else {
rand_string(bind_nchar, dataLen);
}
bind->buffer_length = strlen(bind_nchar); bind->buffer_length = strlen(bind_nchar);
bind->buffer = bind_nchar; bind->buffer = bind_nchar;
bind->length = &bind->buffer_length; bind->length = &bind->buffer_length;
@ -5378,7 +5382,11 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind,
"INT", strlen("INT"))) { "INT", strlen("INT"))) {
int32_t *bind_int = (int32_t *)*ptr; int32_t *bind_int = (int32_t *)*ptr;
*bind_int = rand_int(); if (value) {
*bind_int = atoi(value);
} else {
*bind_int = rand_int();
}
bind->buffer_type = TSDB_DATA_TYPE_INT; bind->buffer_type = TSDB_DATA_TYPE_INT;
bind->buffer_length = sizeof(int32_t); bind->buffer_length = sizeof(int32_t);
bind->buffer = bind_int; bind->buffer = bind_int;
@ -5390,7 +5398,11 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind,
"BIGINT", strlen("BIGINT"))) { "BIGINT", strlen("BIGINT"))) {
int64_t *bind_bigint = (int64_t *)*ptr; int64_t *bind_bigint = (int64_t *)*ptr;
*bind_bigint = rand_bigint(); if (value) {
*bind_bigint = atoll(value);
} else {
*bind_bigint = rand_bigint();
}
bind->buffer_type = TSDB_DATA_TYPE_BIGINT; bind->buffer_type = TSDB_DATA_TYPE_BIGINT;
bind->buffer_length = sizeof(int64_t); bind->buffer_length = sizeof(int64_t);
bind->buffer = bind_bigint; bind->buffer = bind_bigint;
@ -5402,7 +5414,11 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind,
"FLOAT", strlen("FLOAT"))) { "FLOAT", strlen("FLOAT"))) {
float *bind_float = (float *) *ptr; float *bind_float = (float *) *ptr;
*bind_float = rand_float(); if (value) {
*bind_float = (float)atof(value);
} else {
*bind_float = rand_float();
}
bind->buffer_type = TSDB_DATA_TYPE_FLOAT; bind->buffer_type = TSDB_DATA_TYPE_FLOAT;
bind->buffer_length = sizeof(float); bind->buffer_length = sizeof(float);
bind->buffer = bind_float; bind->buffer = bind_float;
@ -5414,7 +5430,11 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind,
"DOUBLE", strlen("DOUBLE"))) { "DOUBLE", strlen("DOUBLE"))) {
double *bind_double = (double *)*ptr; double *bind_double = (double *)*ptr;
*bind_double = rand_double(); if (value) {
*bind_double = atof(value);
} else {
*bind_double = rand_double();
}
bind->buffer_type = TSDB_DATA_TYPE_DOUBLE; bind->buffer_type = TSDB_DATA_TYPE_DOUBLE;
bind->buffer_length = sizeof(double); bind->buffer_length = sizeof(double);
bind->buffer = bind_double; bind->buffer = bind_double;
@ -5426,7 +5446,11 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind,
"SMALLINT", strlen("SMALLINT"))) { "SMALLINT", strlen("SMALLINT"))) {
int16_t *bind_smallint = (int16_t *)*ptr; int16_t *bind_smallint = (int16_t *)*ptr;
*bind_smallint = rand_smallint(); if (value) {
*bind_smallint = (int16_t)atoi(value);
} else {
*bind_smallint = rand_smallint();
}
bind->buffer_type = TSDB_DATA_TYPE_SMALLINT; bind->buffer_type = TSDB_DATA_TYPE_SMALLINT;
bind->buffer_length = sizeof(int16_t); bind->buffer_length = sizeof(int16_t);
bind->buffer = bind_smallint; bind->buffer = bind_smallint;
@ -5438,7 +5462,11 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind,
"TINYINT", strlen("TINYINT"))) { "TINYINT", strlen("TINYINT"))) {
int8_t *bind_tinyint = (int8_t *)*ptr; int8_t *bind_tinyint = (int8_t *)*ptr;
*bind_tinyint = rand_tinyint(); if (value) {
*bind_tinyint = (int8_t)atoi(value);
} else {
*bind_tinyint = rand_tinyint();
}
bind->buffer_type = TSDB_DATA_TYPE_TINYINT; bind->buffer_type = TSDB_DATA_TYPE_TINYINT;
bind->buffer_length = sizeof(int8_t); bind->buffer_length = sizeof(int8_t);
bind->buffer = bind_tinyint; bind->buffer = bind_tinyint;
@ -5461,7 +5489,11 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind,
"TIMESTAMP", strlen("TIMESTAMP"))) { "TIMESTAMP", strlen("TIMESTAMP"))) {
int64_t *bind_ts2 = (int64_t *) *ptr; int64_t *bind_ts2 = (int64_t *) *ptr;
*bind_ts2 = rand_bigint(); if (value) {
*bind_ts2 = atoll(value);
} else {
*bind_ts2 = rand_bigint();
}
bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
bind->buffer_length = sizeof(int64_t); bind->buffer_length = sizeof(int64_t);
bind->buffer = bind_ts2; bind->buffer = bind_ts2;
@ -5527,12 +5559,13 @@ static int32_t prepareStmtWithoutStb(
ptr += bind->buffer_length; ptr += bind->buffer_length;
for (int i = 0; i < g_args.num_of_CPR; i ++) { for (int i = 0; i < g_args.num_of_CPR; i ++) {
bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * (i + 1))); bind = (TAOS_BIND *)((char *)bindArray
+ (sizeof(TAOS_BIND) * (i + 1)));
if ( -1 == prepareStmtBindArrayByType( if ( -1 == prepareStmtBindArrayByType(
bind, bind,
data_type[i], data_type[i],
g_args.len_of_binary, g_args.len_of_binary,
&ptr)) { &ptr, NULL)) {
return -1; return -1;
} }
} }
@ -5551,12 +5584,14 @@ static int32_t prepareStmtWithoutStb(
return k; return k;
} }
static int32_t prepareStbStmt(SSuperTable *stbInfo, static int32_t prepareStbStmt(
SSuperTable *stbInfo,
TAOS_STMT *stmt, TAOS_STMT *stmt,
char *tableName, uint32_t batch, char *tableName, uint32_t batch,
uint64_t insertRows, uint64_t insertRows,
uint64_t recordFrom, uint64_t recordFrom,
int64_t startTime, char *buffer) int64_t startTime,
int64_t *pSamplePos)
{ {
int ret = taos_stmt_set_tbname(stmt, tableName); int ret = taos_stmt_set_tbname(stmt, tableName);
if (ret != 0) { if (ret != 0) {
@ -5567,16 +5602,24 @@ static int32_t prepareStbStmt(SSuperTable *stbInfo,
char *bindArray = malloc(sizeof(TAOS_BIND) * (stbInfo->columnCount + 1)); char *bindArray = malloc(sizeof(TAOS_BIND) * (stbInfo->columnCount + 1));
if (bindArray == NULL) { if (bindArray == NULL) {
errorPrint("Failed to allocate %d bind params\n", errorPrint("%s() LN%d, Failed to allocate %d bind params\n",
(stbInfo->columnCount + 1)); __func__, __LINE__, (stbInfo->columnCount + 1));
return -1; return -1;
} }
bool tsRand; bool sourceRand;
if (0 == strncasecmp(stbInfo->dataSource, "rand", strlen("rand"))) { if (0 == strncasecmp(stbInfo->dataSource, "rand", strlen("rand"))) {
tsRand = true; sourceRand = true;
} else { } else {
tsRand = false; sourceRand = false; // from sample data file
}
char *bindBuffer = malloc(g_args.len_of_binary);
if (bindBuffer == NULL) {
errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n",
__func__, __LINE__, g_args.len_of_binary);
free(bindArray);
return -1;
} }
uint32_t k; uint32_t k;
@ -5592,7 +5635,7 @@ static int32_t prepareStbStmt(SSuperTable *stbInfo,
bind_ts = (int64_t *)ptr; bind_ts = (int64_t *)ptr;
bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
if (tsRand) { if (sourceRand) {
*bind_ts = startTime + getTSRandTail( *bind_ts = startTime + getTSRandTail(
stbInfo->timeStampStep, k, stbInfo->timeStampStep, k,
stbInfo->disorderRatio, stbInfo->disorderRatio,
@ -5607,14 +5650,46 @@ static int32_t prepareStbStmt(SSuperTable *stbInfo,
ptr += bind->buffer_length; ptr += bind->buffer_length;
int cursor = 0;
for (int i = 0; i < stbInfo->columnCount; i ++) { for (int i = 0; i < stbInfo->columnCount; i ++) {
bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * (i + 1))); bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * (i + 1)));
if ( -1 == prepareStmtBindArrayByType(
bind, if (sourceRand) {
stbInfo->columns[i].dataType, if ( -1 == prepareStmtBindArrayByType(
stbInfo->columns[i].dataLen, bind,
&ptr)) { stbInfo->columns[i].dataType,
return -1; stbInfo->columns[i].dataLen,
&ptr,
NULL)) {
free(bindArray);
free(bindBuffer);
return -1;
}
} else {
char *restStr = stbInfo->sampleDataBuf + cursor;
int lengthOfRest = strlen(restStr);
int index = 0;
for (index = 0; index < lengthOfRest; index ++) {
if (restStr[index] == ',') {
break;
}
}
memset(bindBuffer, 0, g_args.len_of_binary);
strncpy(bindBuffer, restStr, index);
cursor += index + 1; // skip ',' too
if ( -1 == prepareStmtBindArrayByType(
bind,
stbInfo->columns[i].dataType,
stbInfo->columns[i].dataLen,
&ptr,
bindBuffer)) {
free(bindArray);
free(bindBuffer);
return -1;
}
} }
} }
taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray); taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray);
@ -5623,11 +5698,16 @@ static int32_t prepareStbStmt(SSuperTable *stbInfo,
k++; k++;
recordFrom ++; recordFrom ++;
if (!sourceRand) {
(*pSamplePos) ++;
}
if (recordFrom >= insertRows) { if (recordFrom >= insertRows) {
break; break;
} }
} }
free(bindBuffer);
free(bindArray); free(bindArray);
return k; return k;
} }
@ -5820,13 +5900,14 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
if (superTblInfo) { if (superTblInfo) {
if (superTblInfo->iface == STMT_IFACE) { if (superTblInfo->iface == STMT_IFACE) {
#if STMT_IFACE_ENABLED == 1 #if STMT_IFACE_ENABLED == 1
generated = prepareStbStmt(superTblInfo, generated = prepareStbStmt(
superTblInfo,
pThreadInfo->stmt, pThreadInfo->stmt,
tableName, tableName,
batchPerTbl, batchPerTbl,
insertRows, i, insertRows, i,
startTime, startTime,
pThreadInfo->buffer); &(pThreadInfo->samplePos));
#else #else
generated = -1; generated = -1;
#endif #endif
@ -6051,7 +6132,8 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
pThreadInfo->stmt, pThreadInfo->stmt,
tableName, tableName,
g_args.num_of_RPR, g_args.num_of_RPR,
insertRows, i, start_time, pstr); insertRows, i, start_time,
&(pThreadInfo->samplePos));
#else #else
generated = -1; generated = -1;
#endif #endif
@ -7332,6 +7414,7 @@ static void *superSubscribe(void *sarg) {
TAOS_RES* res = NULL; TAOS_RES* res = NULL;
uint64_t st = 0, et = 0; uint64_t st = 0, et = 0;
while ((g_queryInfo.superQueryInfo.endAfterConsume == -1) while ((g_queryInfo.superQueryInfo.endAfterConsume == -1)
|| (g_queryInfo.superQueryInfo.endAfterConsume > || (g_queryInfo.superQueryInfo.endAfterConsume >
consumed[pThreadInfo->end_table_to consumed[pThreadInfo->end_table_to

View File

@ -27,6 +27,8 @@
#include "tutil.h" #include "tutil.h"
#include <taos.h> #include <taos.h>
#define TSDB_SUPPORT_NANOSECOND 1
#define MAX_FILE_NAME_LEN 256 // max file name length on linux is 255 #define MAX_FILE_NAME_LEN 256 // max file name length on linux is 255
#define COMMAND_SIZE 65536 #define COMMAND_SIZE 65536
#define MAX_RECORDS_PER_REQ 32766 #define MAX_RECORDS_PER_REQ 32766
@ -228,7 +230,11 @@ static struct argp_option options[] = {
{"avro", 'V', 0, 0, "Dump apache avro format data file. By default, dump sql command sequence.", 2}, {"avro", 'V', 0, 0, "Dump apache avro format data file. By default, dump sql command sequence.", 2},
{"start-time", 'S', "START_TIME", 0, "Start time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 4}, {"start-time", 'S', "START_TIME", 0, "Start time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 4},
{"end-time", 'E', "END_TIME", 0, "End time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 5}, {"end-time", 'E', "END_TIME", 0, "End time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 5},
#if TSDB_SUPPORT_NANOSECOND == 1
{"precision", 'C', "PRECISION", 0, "Epoch precision. Valid value is one of ms, us, and ns. Default is ms.", 6}, {"precision", 'C', "PRECISION", 0, "Epoch precision. Valid value is one of ms, us, and ns. Default is ms.", 6},
#else
{"precision", 'C', "PRECISION", 0, "Epoch precision. Valid value is one of ms and us. Default is ms.", 6},
#endif
{"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Max value is 32766. Default is 1.", 3}, {"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Max value is 32766. Default is 1.", 3},
{"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3}, {"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3},
{"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3}, {"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3},
@ -527,7 +533,10 @@ static void parse_precision_first(
} }
if ((0 != strncasecmp(tmp, "ms", strlen("ms"))) if ((0 != strncasecmp(tmp, "ms", strlen("ms")))
&& (0 != strncasecmp(tmp, "us", strlen("us"))) && (0 != strncasecmp(tmp, "us", strlen("us")))
&& (0 != strncasecmp(tmp, "ns", strlen("ns")))) { #if TSDB_SUPPORT_NANOSECOND == 1
&& (0 != strncasecmp(tmp, "ns", strlen("ns")))
#endif
) {
// //
errorPrint("input precision: %s is invalid value\n", tmp); errorPrint("input precision: %s is invalid value\n", tmp);
free(tmp); free(tmp);
@ -564,9 +573,11 @@ static void parse_timestamp(
} else if (0 == strncasecmp(arguments->precision, } else if (0 == strncasecmp(arguments->precision,
"us", strlen("us"))) { "us", strlen("us"))) {
timePrec = TSDB_TIME_PRECISION_MICRO; timePrec = TSDB_TIME_PRECISION_MICRO;
#if TSDB_SUPPORT_NANOSECOND == 1
} else if (0 == strncasecmp(arguments->precision, } else if (0 == strncasecmp(arguments->precision,
"ns", strlen("ns"))) { "ns", strlen("ns"))) {
timePrec = TSDB_TIME_PRECISION_NANO; timePrec = TSDB_TIME_PRECISION_NANO;
#endif
} else { } else {
errorPrint("Invalid time precision: %s", errorPrint("Invalid time precision: %s",
arguments->precision); arguments->precision);

View File

@ -2,9 +2,10 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest) FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest)
FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib) FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib /usr/lib64)
FIND_LIBRARY(LIB_GTEST_SHARED_DIR libgtest.so /usr/lib/ /usr/local/lib /usr/lib64)
IF (HEADER_GTEST_INCLUDE_DIR AND LIB_GTEST_STATIC_DIR) IF (HEADER_GTEST_INCLUDE_DIR AND (LIB_GTEST_STATIC_DIR OR LIB_GTEST_SHARED_DIR))
MESSAGE(STATUS "gTest library found, build unit test") MESSAGE(STATUS "gTest library found, build unit test")
# GoogleTest requires at least C++11 # GoogleTest requires at least C++11
@ -17,4 +18,4 @@ IF (HEADER_GTEST_INCLUDE_DIR AND LIB_GTEST_STATIC_DIR)
ADD_EXECUTABLE(osTest ${SOURCE_LIST}) ADD_EXECUTABLE(osTest ${SOURCE_LIST})
TARGET_LINK_LIBRARIES(osTest taos os tutil common gtest pthread) TARGET_LINK_LIBRARIES(osTest taos os tutil common gtest pthread)
ENDIF() ENDIF()

View File

@ -74,14 +74,14 @@ typedef struct SResultRowPool {
typedef struct SResultRow { typedef struct SResultRow {
int32_t pageId; // pageId & rowId is the position of current result in disk-based output buffer int32_t pageId; // pageId & rowId is the position of current result in disk-based output buffer
int32_t offset:29; // row index in buffer page int32_t offset:29; // row index in buffer page
bool startInterp; // the time window start timestamp has done the interpolation already. bool startInterp; // the time window start timestamp has done the interpolation already.
bool endInterp; // the time window end timestamp has done the interpolation already. bool endInterp; // the time window end timestamp has done the interpolation already.
bool closed; // this result status: closed or opened bool closed; // this result status: closed or opened
uint32_t numOfRows; // number of rows of current time window uint32_t numOfRows; // number of rows of current time window
SResultRowCellInfo* pCellInfo; // For each result column, there is a resultInfo SResultRowCellInfo* pCellInfo; // For each result column, there is a resultInfo
STimeWindow win; STimeWindow win;
char* key; // start key of current result row char *key; // start key of current result row
} SResultRow; } SResultRow;
typedef struct SGroupResInfo { typedef struct SGroupResInfo {
@ -106,7 +106,7 @@ typedef struct SResultRowInfo {
int16_t type:8; // data type for hash key int16_t type:8; // data type for hash key
int32_t size:24; // number of result set int32_t size:24; // number of result set
int32_t capacity; // max capacity int32_t capacity; // max capacity
int32_t curIndex; // current start active index SResultRow* current; // current start active index
int64_t prevSKey; // previous (not completed) sliding window start key int64_t prevSKey; // previous (not completed) sliding window start key
} SResultRowInfo; } SResultRowInfo;
@ -119,6 +119,7 @@ typedef struct SColumnFilterElem {
typedef struct SSingleColumnFilterInfo { typedef struct SSingleColumnFilterInfo {
void* pData; void* pData;
void* pData2; //used for nchar column
int32_t numOfFilters; int32_t numOfFilters;
SColumnInfo info; SColumnInfo info;
SColumnFilterElem* pFilters; SColumnFilterElem* pFilters;

View File

@ -411,6 +411,21 @@ static void prepareResultListBuffer(SResultRowInfo* pResultRowInfo, SQueryRuntim
pResultRowInfo->capacity = (int32_t)newCapacity; pResultRowInfo->capacity = (int32_t)newCapacity;
} }
static int32_t ascResultRowCompareFn(const void* p1, const void* p2) {
SResultRow* pRow1 = *(SResultRow**)p1;
SResultRow* pRow2 = *(SResultRow**)p2;
if (pRow1 == pRow2) {
return 0;
} else {
return pRow1->win.skey < pRow2->win.skey? -1:1;
}
}
static int32_t descResultRowCompareFn(const void* p1, const void* p2) {
return -ascResultRowCompareFn(p1, p2);
}
static SResultRow *doPrepareResultRowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultRowInfo, char *pData, static SResultRow *doPrepareResultRowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultRowInfo, char *pData,
int16_t bytes, bool masterscan, uint64_t uid) { int16_t bytes, bool masterscan, uint64_t uid) {
bool existed = false; bool existed = false;
@ -426,11 +441,17 @@ static SResultRow *doPrepareResultRowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SRes
} }
if (p1 != NULL) { if (p1 != NULL) {
for(int32_t i = pResultRowInfo->size - 1; i >= 0; --i) { pResultRowInfo->current = (*p1);
if (pResultRowInfo->pResult[i] == (*p1)) {
pResultRowInfo->curIndex = i; if (pResultRowInfo->size == 0) {
existed = false;
} else if (pResultRowInfo->size == 1) {
existed = (pResultRowInfo->pResult[0] == (*p1));
} else {
__compar_fn_t fn = QUERY_IS_ASC_QUERY(pRuntimeEnv->pQueryAttr)? ascResultRowCompareFn:descResultRowCompareFn;
void* ptr = taosbsearch(p1, pResultRowInfo->pResult, pResultRowInfo->size, POINTER_BYTES, fn, TD_EQ);
if (ptr != NULL) {
existed = true; existed = true;
break;
} }
} }
} }
@ -457,8 +478,8 @@ static SResultRow *doPrepareResultRowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SRes
pResult = *p1; pResult = *p1;
} }
pResultRowInfo->pResult[pResultRowInfo->size] = pResult; pResultRowInfo->pResult[pResultRowInfo->size++] = pResult;
pResultRowInfo->curIndex = pResultRowInfo->size++; pResultRowInfo->current = pResult;
} }
// too many time window in query // too many time window in query
@ -466,7 +487,7 @@ static SResultRow *doPrepareResultRowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SRes
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW); longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW);
} }
return getResultRow(pResultRowInfo, pResultRowInfo->curIndex); return pResultRowInfo->current;
} }
static void getInitialStartTimeWindow(SQueryAttr* pQueryAttr, TSKEY ts, STimeWindow* w) { static void getInitialStartTimeWindow(SQueryAttr* pQueryAttr, TSKEY ts, STimeWindow* w) {
@ -497,7 +518,7 @@ static void getInitialStartTimeWindow(SQueryAttr* pQueryAttr, TSKEY ts, STimeWin
static STimeWindow getActiveTimeWindow(SResultRowInfo * pResultRowInfo, int64_t ts, SQueryAttr *pQueryAttr) { static STimeWindow getActiveTimeWindow(SResultRowInfo * pResultRowInfo, int64_t ts, SQueryAttr *pQueryAttr) {
STimeWindow w = {0}; STimeWindow w = {0};
if (pResultRowInfo->curIndex == -1) { // the first window, from the previous stored value if (pResultRowInfo->current == NULL) { // the first window, from the previous stored value
if (pResultRowInfo->prevSKey == TSKEY_INITIAL_VAL) { if (pResultRowInfo->prevSKey == TSKEY_INITIAL_VAL) {
getInitialStartTimeWindow(pQueryAttr, ts, &w); getInitialStartTimeWindow(pQueryAttr, ts, &w);
pResultRowInfo->prevSKey = w.skey; pResultRowInfo->prevSKey = w.skey;
@ -511,8 +532,9 @@ static STimeWindow getActiveTimeWindow(SResultRowInfo * pResultRowInfo, int64_t
w.ekey = w.skey + pQueryAttr->interval.interval - 1; w.ekey = w.skey + pQueryAttr->interval.interval - 1;
} }
} else { } else {
int32_t slot = curTimeWindowIndex(pResultRowInfo); // int32_t slot = curTimeWindowIndex(pResultRowInfo);
SResultRow* pWindowRes = getResultRow(pResultRowInfo, slot); // SResultRow* pWindowRes = getResultRow(pResultRowInfo, slot);
SResultRow* pWindowRes = pResultRowInfo->current;
w = pWindowRes->win; w = pWindowRes->win;
} }
@ -698,7 +720,12 @@ static void doUpdateResultRowIndex(SResultRowInfo*pResultRowInfo, TSKEY lastKey,
// all result rows are closed, set the last one to be the skey // all result rows are closed, set the last one to be the skey
if (skey == TSKEY_INITIAL_VAL) { if (skey == TSKEY_INITIAL_VAL) {
pResultRowInfo->curIndex = pResultRowInfo->size - 1; if (pResultRowInfo->size == 0) {
// assert(pResultRowInfo->current == NULL);
pResultRowInfo->current = NULL;
} else {
pResultRowInfo->current = pResultRowInfo->pResult[pResultRowInfo->size - 1];
}
} else { } else {
for (i = pResultRowInfo->size - 1; i >= 0; --i) { for (i = pResultRowInfo->size - 1; i >= 0; --i) {
@ -709,12 +736,12 @@ static void doUpdateResultRowIndex(SResultRowInfo*pResultRowInfo, TSKEY lastKey,
} }
if (i == pResultRowInfo->size - 1) { if (i == pResultRowInfo->size - 1) {
pResultRowInfo->curIndex = i; pResultRowInfo->current = pResultRowInfo->pResult[i];
} else { } else {
pResultRowInfo->curIndex = i + 1; // current not closed result object pResultRowInfo->current = pResultRowInfo->pResult[i + 1]; // current not closed result object
} }
pResultRowInfo->prevSKey = pResultRowInfo->pResult[pResultRowInfo->curIndex]->win.skey; pResultRowInfo->prevSKey = pResultRowInfo->current->win.skey;
} }
} }
@ -722,7 +749,7 @@ static void updateResultRowInfoActiveIndex(SResultRowInfo* pResultRowInfo, SQuer
bool ascQuery = QUERY_IS_ASC_QUERY(pQueryAttr); bool ascQuery = QUERY_IS_ASC_QUERY(pQueryAttr);
if ((lastKey > pQueryAttr->window.ekey && ascQuery) || (lastKey < pQueryAttr->window.ekey && (!ascQuery))) { if ((lastKey > pQueryAttr->window.ekey && ascQuery) || (lastKey < pQueryAttr->window.ekey && (!ascQuery))) {
closeAllResultRows(pResultRowInfo); closeAllResultRows(pResultRowInfo);
pResultRowInfo->curIndex = pResultRowInfo->size - 1; pResultRowInfo->current = pResultRowInfo->pResult[pResultRowInfo->size - 1];
} else { } else {
int32_t step = ascQuery ? 1 : -1; int32_t step = ascQuery ? 1 : -1;
doUpdateResultRowIndex(pResultRowInfo, lastKey - step, ascQuery, pQueryAttr->timeWindowInterpo); doUpdateResultRowIndex(pResultRowInfo, lastKey - step, ascQuery, pQueryAttr->timeWindowInterpo);
@ -1332,7 +1359,8 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQueryAttr->order.order); int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQueryAttr->order.order);
bool ascQuery = QUERY_IS_ASC_QUERY(pQueryAttr); bool ascQuery = QUERY_IS_ASC_QUERY(pQueryAttr);
int32_t prevIndex = curTimeWindowIndex(pResultRowInfo); SResultRow* prevRow = pResultRowInfo->current;
// int32_t prevIndex = curTimeWindowIndex(pResultRowInfo);
TSKEY* tsCols = NULL; TSKEY* tsCols = NULL;
if (pSDataBlock->pDataBlock != NULL) { if (pSDataBlock->pDataBlock != NULL) {
@ -1361,13 +1389,19 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
getNumOfRowsInTimeWindow(pRuntimeEnv, &pSDataBlock->info, tsCols, startPos, ekey, binarySearchForKey, true); getNumOfRowsInTimeWindow(pRuntimeEnv, &pSDataBlock->info, tsCols, startPos, ekey, binarySearchForKey, true);
// prev time window not interpolation yet. // prev time window not interpolation yet.
int32_t curIndex = curTimeWindowIndex(pResultRowInfo); // int32_t curIndex = curTimeWindowIndex(pResultRowInfo);
if (prevIndex != -1 && prevIndex < curIndex && pQueryAttr->timeWindowInterpo) { // if (prevIndex != -1 && prevIndex < curIndex && pQueryAttr->timeWindowInterpo) {
for (int32_t j = prevIndex; j < curIndex; ++j) { // previous time window may be all closed already. // for (int32_t j = prevIndex; j < curIndex; ++j) { // previous time window may be all closed already.
if (prevRow != NULL && prevRow != pResultRowInfo->current && pQueryAttr->timeWindowInterpo) {
int32_t j = 0;
while(pResultRowInfo->pResult[j] != prevRow) {
j++;
}
for(; pResultRowInfo->pResult[j] != pResultRowInfo->current; ++j) {
SResultRow* pRes = pResultRowInfo->pResult[j]; SResultRow* pRes = pResultRowInfo->pResult[j];
if (pRes->closed) { if (pRes->closed) {
assert(resultRowInterpolated(pRes, RESULT_ROW_START_INTERP) && assert(resultRowInterpolated(pRes, RESULT_ROW_START_INTERP) && resultRowInterpolated(pRes, RESULT_ROW_END_INTERP));
resultRowInterpolated(pRes, RESULT_ROW_END_INTERP));
continue; continue;
} }
@ -3255,7 +3289,7 @@ void copyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, int32_t threshold, SSDataBl
} }
static void updateTableQueryInfoForReverseScan(SQueryAttr *pQueryAttr, STableQueryInfo *pTableQueryInfo) { static void updateTableQueryInfoForReverseScan(STableQueryInfo *pTableQueryInfo) {
if (pTableQueryInfo == NULL) { if (pTableQueryInfo == NULL) {
return; return;
} }
@ -3267,7 +3301,12 @@ static void updateTableQueryInfoForReverseScan(SQueryAttr *pQueryAttr, STableQue
pTableQueryInfo->cur.vgroupIndex = -1; pTableQueryInfo->cur.vgroupIndex = -1;
// set the index to be the end slot of result rows array // set the index to be the end slot of result rows array
pTableQueryInfo->resInfo.curIndex = pTableQueryInfo->resInfo.size - 1; SResultRowInfo* pResRowInfo = &pTableQueryInfo->resInfo;
if (pResRowInfo->size > 0) {
pResRowInfo->current = pResRowInfo->pResult[pResRowInfo->size - 1];
} else {
pResRowInfo->current = NULL;
}
} }
static void setupQueryRangeForReverseScan(SQueryRuntimeEnv* pRuntimeEnv) { static void setupQueryRangeForReverseScan(SQueryRuntimeEnv* pRuntimeEnv) {
@ -3281,7 +3320,7 @@ static void setupQueryRangeForReverseScan(SQueryRuntimeEnv* pRuntimeEnv) {
size_t t = taosArrayGetSize(group); size_t t = taosArrayGetSize(group);
for (int32_t j = 0; j < t; ++j) { for (int32_t j = 0; j < t; ++j) {
STableQueryInfo *pCheckInfo = taosArrayGetP(group, j); STableQueryInfo *pCheckInfo = taosArrayGetP(group, j);
updateTableQueryInfoForReverseScan(pQueryAttr, pCheckInfo); updateTableQueryInfoForReverseScan(pCheckInfo);
// update the last key in tableKeyInfo list, the tableKeyInfo is used to build the tsdbQueryHandle and decide // update the last key in tableKeyInfo list, the tableKeyInfo is used to build the tsdbQueryHandle and decide
// the start check timestamp of tsdbQueryHandle // the start check timestamp of tsdbQueryHandle
@ -4716,7 +4755,7 @@ static SSDataBlock* doTableScan(void* param, bool *newgroup) {
} }
if (pResultRowInfo->size > 0) { if (pResultRowInfo->size > 0) {
pResultRowInfo->curIndex = 0; pResultRowInfo->current = pResultRowInfo->pResult[0];
pResultRowInfo->prevSKey = pResultRowInfo->pResult[0]->win.skey; pResultRowInfo->prevSKey = pResultRowInfo->pResult[0]->win.skey;
} }
@ -4742,8 +4781,8 @@ static SSDataBlock* doTableScan(void* param, bool *newgroup) {
pTableScanInfo->order = cond.order; pTableScanInfo->order = cond.order;
if (pResultRowInfo->size > 0) { if (pResultRowInfo->size > 0) {
pResultRowInfo->curIndex = pResultRowInfo->size-1; pResultRowInfo->current = pResultRowInfo->pResult[pResultRowInfo->size - 1];
pResultRowInfo->prevSKey = pResultRowInfo->pResult[pResultRowInfo->size-1]->win.skey; pResultRowInfo->prevSKey = pResultRowInfo->current->win.skey;
} }
p = doTableScanImpl(pOperator, newgroup); p = doTableScanImpl(pOperator, newgroup);

View File

@ -952,7 +952,7 @@ void SqlInfoDestroy(SSqlInfo *pInfo) {
taosArrayDestroy(pInfo->pMiscInfo->a); taosArrayDestroy(pInfo->pMiscInfo->a);
} }
if (pInfo->pMiscInfo != NULL && pInfo->type == TSDB_SQL_CREATE_DB) { if (pInfo->pMiscInfo != NULL && (pInfo->type == TSDB_SQL_CREATE_DB || pInfo->type == TSDB_SQL_ALTER_DB)) {
taosArrayDestroyEx(pInfo->pMiscInfo->dbOpt.keep, freeVariant); taosArrayDestroyEx(pInfo->pMiscInfo->dbOpt.keep, freeVariant);
} }

View File

@ -45,7 +45,7 @@ int32_t initResultRowInfo(SResultRowInfo *pResultRowInfo, int32_t size, int16_t
pResultRowInfo->type = type; pResultRowInfo->type = type;
pResultRowInfo->size = 0; pResultRowInfo->size = 0;
pResultRowInfo->prevSKey = TSKEY_INITIAL_VAL; pResultRowInfo->prevSKey = TSKEY_INITIAL_VAL;
pResultRowInfo->curIndex = -1; pResultRowInfo->current = NULL;
pResultRowInfo->capacity = size; pResultRowInfo->capacity = size;
pResultRowInfo->pResult = calloc(pResultRowInfo->capacity, POINTER_BYTES); pResultRowInfo->pResult = calloc(pResultRowInfo->capacity, POINTER_BYTES);
@ -90,9 +90,9 @@ void resetResultRowInfo(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultRo
SET_RES_WINDOW_KEY(pRuntimeEnv->keyBuf, &groupIndex, sizeof(groupIndex), uid); SET_RES_WINDOW_KEY(pRuntimeEnv->keyBuf, &groupIndex, sizeof(groupIndex), uid);
taosHashRemove(pRuntimeEnv->pResultRowHashTable, (const char *)pRuntimeEnv->keyBuf, GET_RES_WINDOW_KEY_LEN(sizeof(groupIndex))); taosHashRemove(pRuntimeEnv->pResultRowHashTable, (const char *)pRuntimeEnv->keyBuf, GET_RES_WINDOW_KEY_LEN(sizeof(groupIndex)));
} }
pResultRowInfo->curIndex = -1; pResultRowInfo->size = 0;
pResultRowInfo->size = 0; pResultRowInfo->current = NULL;
pResultRowInfo->prevSKey = TSKEY_INITIAL_VAL; pResultRowInfo->prevSKey = TSKEY_INITIAL_VAL;
} }

View File

@ -2,9 +2,10 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest) FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest)
FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib) FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib /usr/lib64)
FIND_LIBRARY(LIB_GTEST_SHARED_DIR libgtest.so /usr/lib/ /usr/local/lib /usr/lib64)
IF (HEADER_GTEST_INCLUDE_DIR AND LIB_GTEST_STATIC_DIR) IF (HEADER_GTEST_INCLUDE_DIR AND (LIB_GTEST_STATIC_DIR OR LIB_GTEST_SHARED_DIR))
MESSAGE(STATUS "gTest library found, build unit test") MESSAGE(STATUS "gTest library found, build unit test")
# GoogleTest requires at least C++11 # GoogleTest requires at least C++11

View File

@ -126,15 +126,38 @@ _hash_fn_t taosGetDefaultHashFunction(int32_t type) {
_hash_fn_t fn = NULL; _hash_fn_t fn = NULL;
switch(type) { switch(type) {
case TSDB_DATA_TYPE_TIMESTAMP: case TSDB_DATA_TYPE_TIMESTAMP:
case TSDB_DATA_TYPE_BIGINT: fn = taosIntHash_64;break; case TSDB_DATA_TYPE_UBIGINT:
case TSDB_DATA_TYPE_BINARY: fn = MurmurHash3_32;break; case TSDB_DATA_TYPE_BIGINT:
case TSDB_DATA_TYPE_NCHAR: fn = MurmurHash3_32;break; fn = taosIntHash_64;
case TSDB_DATA_TYPE_INT: fn = taosIntHash_32; break; break;
case TSDB_DATA_TYPE_SMALLINT: fn = taosIntHash_16; break; case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_TINYINT: fn = taosIntHash_8; break; fn = MurmurHash3_32;
case TSDB_DATA_TYPE_FLOAT: fn = taosFloatHash; break; break;
case TSDB_DATA_TYPE_DOUBLE: fn = taosDoubleHash; break; case TSDB_DATA_TYPE_NCHAR:
default: fn = taosIntHash_32;break; fn = MurmurHash3_32;
break;
case TSDB_DATA_TYPE_UINT:
case TSDB_DATA_TYPE_INT:
fn = taosIntHash_32;
break;
case TSDB_DATA_TYPE_SMALLINT:
case TSDB_DATA_TYPE_USMALLINT:
fn = taosIntHash_16;
break;
case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_UTINYINT:
case TSDB_DATA_TYPE_TINYINT:
fn = taosIntHash_8;
break;
case TSDB_DATA_TYPE_FLOAT:
fn = taosFloatHash;
break;
case TSDB_DATA_TYPE_DOUBLE:
fn = taosDoubleHash;
break;
default:
fn = taosIntHash_32;
break;
} }
return fn; return fn;

View File

@ -2,14 +2,15 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest) FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest)
FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib) FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib /usr/lib64)
FIND_LIBRARY(LIB_GTEST_SHARED_DIR libgtest.so /usr/lib/ /usr/local/lib /usr/lib64)
IF (HEADER_GTEST_INCLUDE_DIR AND LIB_GTEST_STATIC_DIR) IF (HEADER_GTEST_INCLUDE_DIR AND (LIB_GTEST_STATIC_DIR OR LIB_GTEST_SHARED_DIR))
MESSAGE(STATUS "gTest library found, build unit test") MESSAGE(STATUS "gTest library found, build unit test")
INCLUDE_DIRECTORIES(${HEADER_GTEST_INCLUDE_DIR}) INCLUDE_DIRECTORIES(${HEADER_GTEST_INCLUDE_DIR})
AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST)
LIST(REMOVE_ITEM SOURCE_LIST ${CMAKE_CURRENT_SOURCE_DIR}/trefTest.c) LIST(REMOVE_ITEM SOURCE_LIST ${CMAKE_CURRENT_SOURCE_DIR}/trefTest.c)
ADD_EXECUTABLE(utilTest ${SOURCE_LIST}) ADD_EXECUTABLE(utilTest ${SOURCE_LIST})
TARGET_LINK_LIBRARIES(utilTest tutil common os gtest pthread gcov) TARGET_LINK_LIBRARIES(utilTest tutil common os gtest pthread gcov)

View File

@ -95,7 +95,7 @@ void vnodeCtrlFlow(int32_t vgId, int32_t level) {
} }
void vnodeStartSyncFile(int32_t vgId) { void vnodeStartSyncFile(int32_t vgId) {
SVnodeObj *pVnode = vnodeAcquire(vgId); SVnodeObj *pVnode = vnodeAcquireNotClose(vgId);
if (pVnode == NULL) { if (pVnode == NULL) {
vError("vgId:%d, vnode not found while start filesync", vgId); vError("vgId:%d, vnode not found while start filesync", vgId);
return; return;
@ -155,7 +155,7 @@ int32_t vnodeWriteToCache(int32_t vgId, void *wparam, int32_t qtype, void *rpara
} }
int32_t vnodeGetVersion(int32_t vgId, uint64_t *fver, uint64_t *wver) { int32_t vnodeGetVersion(int32_t vgId, uint64_t *fver, uint64_t *wver) {
SVnodeObj *pVnode = vnodeAcquire(vgId); SVnodeObj *pVnode = vnodeAcquireNotClose(vgId);
if (pVnode == NULL) { if (pVnode == NULL) {
vError("vgId:%d, vnode not found while write to cache", vgId); vError("vgId:%d, vnode not found while write to cache", vgId);
return -1; return -1;

View File

@ -345,12 +345,14 @@ python3 ./test.py -f tag_lite/unsignedTinyint.py
python3 ./test.py -f functions/function_percentile2.py python3 ./test.py -f functions/function_percentile2.py
python3 ./test.py -f insert/boundary2.py python3 ./test.py -f insert/boundary2.py
python3 ./test.py -f insert/insert_locking.py
python3 ./test.py -f alter/alter_debugFlag.py python3 ./test.py -f alter/alter_debugFlag.py
python3 ./test.py -f query/queryBetweenAnd.py python3 ./test.py -f query/queryBetweenAnd.py
python3 ./test.py -f tag_lite/alter_tag.py python3 ./test.py -f tag_lite/alter_tag.py
python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py
python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py
python3 test.py -f tools/taosdemoAllTest/TD-4985/query-limit-offset.py
python3 ./test.py -f tag_lite/drop_auto_create.py python3 ./test.py -f tag_lite/drop_auto_create.py
python3 test.py -f insert/insert_before_use_db.py python3 test.py -f insert/insert_before_use_db.py
python3 test.py -f alter/alter_keep.py python3 test.py -f alter/alter_keep.py

View File

@ -0,0 +1,178 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import tdLog
from util.cases import tdCases
from util.sql import tdSql
import random
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
# test case for https://jira.taosdata.com:18080/browse/TD-5021
tdLog.info("\n\n----------step1 : drop db and create db----------\n")
tdSql.execute('''drop database if exists db ;''')
tdSql.execute('''create database db ;''')
sql = '''show databases;'''
tdSql.query(sql)
tdSql.checkRows(1)
tdLog.info("\n\n----------step2 : create stable----------\n")
tdSql.execute('''create stable
db.stable_1 (ts timestamp, payload binary(256))
tags(t1 binary(16),t2 int);''')
sql = '''show db.stables;'''
tdSql.query(sql)
tdSql.checkRows(1)
tdLog.info("\n\n----------step3 : create table and insert----------\n")
sql = '''insert into db.table1 using db.stable_1 (t1 , t2) tags ("table_1" , 111) ( values (now, ;'''
tdLog.info(sql)
tdSql.error(sql)
try:
tdSql.execute(sql)
tdLog.exit(" unexpected token")
except Exception as e:
tdLog.info(repr(e))
tdLog.info("DB error: syntax error near ', ;' (unexpected token)")
sql = '''insert into db.table1(ts , payload) using db.stable_1 (t1 , t2) tags ("table_1" , 111) ( values (now, ;'''
tdLog.info(sql)
tdSql.error(sql)
try:
tdSql.execute(sql)
tdLog.exit(" bind columns again")
except Exception as e:
tdLog.info(repr(e))
tdLog.info("DB error: syntax error near ', ;' (bind columns again)")
sql = '''insert into db.table1 using db.stable_1 (t1 , t2) tags ("table_1",111) (ts , payload) ( values (now, ;'''
tdLog.info(sql)
tdSql.error(sql)
try:
tdSql.execute(sql)
tdLog.exit(" keyword VALUES or FILE required ")
except Exception as e:
tdLog.info(repr(e))
tdLog.info("DB error: invalid SQL: (keyword VALUES or FILE required)")
tdSql.execute('''insert into db.table1 using db.stable_1 (t1 , t2)
tags ("table_1" , 111) values ( now , 1) ''')
sql = '''select * from db.stable_1;'''
tdSql.query(sql)
tdSql.checkRows(1)
tdSql.checkData(0,1,1)
tdSql.checkData(0,2,'table_1')
tdLog.info("\n\n----------step4 : create table and insert again----------\n")
sql = '''insert into db.table2 using db.stable_1 (t1) tags ("table_2") ( values (now, ;'''
tdLog.info(sql)
tdSql.error(sql)
try:
tdSql.execute(sql)
tdLog.exit(" unexpected token")
except Exception as e:
tdLog.info(repr(e))
tdLog.info("DB error: syntax error near ', ;' (unexpected token)")
tdSql.execute('''insert into db.table2 using db.stable_1 (t1)
tags ("table_2") values ( now , 2) ''')
sql = '''select * from db.stable_1;'''
tdSql.query(sql)
tdSql.checkRows(2)
tdSql.checkData(1,1,2)
tdSql.checkData(1,2,'table_2')
tdLog.info("\n\n----------step5 : create table and insert without db----------\n")
tdSql.execute('''use db''')
sql = '''insert into table3 using stable_1 (t1) tags ("table_3") ( values (now, ;'''
tdLog.info(sql)
tdSql.error(sql)
try:
tdSql.execute(sql)
tdLog.exit(" unexpected token")
except Exception as e:
tdLog.info(repr(e))
tdLog.info("DB error: syntax error near ', ;' (unexpected token)")
tdSql.execute('''insert into table3 using stable_1 (t1 , t2)
tags ("table_3" , 333) values ( now , 3) ''')
sql = '''select * from stable_1;'''
tdSql.query(sql)
tdSql.checkRows(3)
tdSql.checkData(2,1,3)
tdSql.checkData(2,2,'table_3')
tdLog.info("\n\n----------step6 : create tables in one sql ----------\n")
sql = '''insert into table4 using stable_1 (t1) tags ("table_4") values (now, 4)
table5 using stable_1 (t1) tags ("table_5") ( values (now, ;'''
tdLog.info(sql)
tdSql.error(sql)
try:
tdSql.execute(sql)
tdLog.exit(" unexpected token")
except Exception as e:
tdLog.info(repr(e))
tdLog.info("DB error: syntax error near ', ;' (unexpected token)")
tdSql.execute('''insert into table4 using stable_1 (t1) tags ("table_4") values (now, 4)
table5 using stable_1 (t1) tags ("table_5") values (now, 5) ''')
sql = '''select * from stable_1;'''
tdSql.query(sql)
tdSql.checkRows(5)
tdSql.checkData(3,1,4)
tdSql.checkData(3,2,'table_4')
tdSql.checkData(4,1,5)
tdSql.checkData(4,2,'table_5')
sql = '''insert into table6 using stable_1 (t1) tags ("table_6") ( values (now,
table7 using stable_1 (t1) tags ("table_7") values (now, 7);'''
tdLog.info(sql)
tdSql.error(sql)
try:
tdSql.execute(sql)
tdLog.exit(" invalid SQL")
except Exception as e:
tdLog.info(repr(e))
tdLog.info("invalid SQL")
tdSql.execute('''insert into table6 using stable_1 (t1 , t2) tags ("table_6" , 666) values (now, 6)
table7 using stable_1 (t1) tags ("table_7") values (now, 7) ''')
sql = '''select * from stable_1;'''
tdSql.query(sql)
tdSql.checkRows(7)
tdSql.checkData(5,1,6)
tdSql.checkData(5,2,'table_6')
tdSql.checkData(6,1,7)
tdSql.checkData(6,2,'table_7')
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,10 @@
0,0,'TAOSdata-0'
1,1,'TAOSdata-1'
2,22,'TAOSdata-2'
3,333,'TAOSdata-3'
4,4444,'TAOSdata-4'
5,55555,'TAOSdata-5'
6,666666,'TAOSdata-6'
7,7777777,'TAOSdata-7'
8,88888888,'TAOSdata-8'
9,999999999,'TAOSdata-9'
1 0 0 'TAOSdata-0'
2 1 1 'TAOSdata-1'
3 2 22 'TAOSdata-2'
4 3 333 'TAOSdata-3'
5 4 4444 'TAOSdata-4'
6 5 55555 'TAOSdata-5'
7 6 666666 'TAOSdata-6'
8 7 7777777 'TAOSdata-7'
9 8 88888888 'TAOSdata-8'
10 9 999999999 'TAOSdata-9'

View File

@ -0,0 +1,62 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 10,
"thread_count_create_tbl": 10,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 10,
"num_of_records_per_req": 1,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "db",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "ms",
"keep": 365,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 10000,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 1,
"data_source": "sample",
"insert_mode": "taosc",
"insert_rows": 10,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./tools/taosdemoAllTest/TD-4985/query-limit-offset.csv",
"tags_file": "./tools/taosdemoAllTest/TD-4985/query-limit-offset.csv",
"columns": [{"type": "INT","count":2}, {"type": "BINARY", "len": 16, "count":1}],
"tags": [{"type": "INT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
}]
}]
}

View File

@ -0,0 +1,191 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root)-len("/build/bin")]
break
return buildPath
def run(self):
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath+ "/build/bin/"
# insert: create one or mutiple tables per sql and insert multiple rows per sql
# test case for https://jira.taosdata.com:18080/browse/TD-4985
os.system("%staosdemo -f tools/taosdemoAllTest/TD-4985/query-limit-offset.json -y " % binPath)
tdSql.execute("use db")
tdSql.query("select count (tbname) from stb0")
tdSql.checkData(0, 0, 10000)
for i in range(1000):
tdSql.execute('''insert into stb00_9999 values(%d, %d, %d,'test99.%s')'''
% (1600000000000 + i, i, -10000+i, i))
tdSql.execute('''insert into stb00_8888 values(%d, %d, %d,'test98.%s')'''
% (1600000000000 + i, i, -10000+i, i))
tdSql.execute('''insert into stb00_7777 values(%d, %d, %d,'test97.%s')'''
% (1600000000000 + i, i, -10000+i, i))
tdSql.execute('''insert into stb00_6666 values(%d, %d, %d,'test96.%s')'''
% (1600000000000 + i, i, -10000+i, i))
tdSql.execute('''insert into stb00_5555 values(%d, %d, %d,'test95.%s')'''
% (1600000000000 + i, i, -10000+i, i))
tdSql.execute('''insert into stb00_4444 values(%d, %d, %d,'test94.%s')'''
% (1600000000000 + i, i, -10000+i, i))
tdSql.execute('''insert into stb00_3333 values(%d, %d, %d,'test93.%s')'''
% (1600000000000 + i, i, -10000+i, i))
tdSql.execute('''insert into stb00_2222 values(%d, %d, %d,'test92.%s')'''
% (1600000000000 + i, i, -10000+i, i))
tdSql.execute('''insert into stb00_1111 values(%d, %d, %d,'test91.%s')'''
% (1600000000000 + i, i, -10000+i, i))
tdSql.execute('''insert into stb00_100 values(%d, %d, %d,'test90.%s')'''
% (1600000000000 + i, i, -10000+i, i))
tdSql.query("select * from stb0 where col2 like 'test99%' ")
tdSql.checkRows(1000)
tdSql.query("select * from stb0 where tbname like 'stb00_9999' limit 10" )
tdSql.checkData(0, 1, 0)
tdSql.checkData(1, 1, 1)
tdSql.checkData(2, 1, 2)
tdSql.query("select * from stb0 where tbname like 'stb00_9999' limit 10 offset 5" )
tdSql.checkData(0, 1, 5)
tdSql.checkData(1, 1, 6)
tdSql.checkData(2, 1, 7)
tdSql.query("select * from stb0 where col2 like 'test98%' ")
tdSql.checkRows(1000)
tdSql.query("select * from stb0 where tbname like 'stb00_8888' limit 10" )
tdSql.checkData(0, 1, 0)
tdSql.checkData(1, 1, 1)
tdSql.checkData(2, 1, 2)
tdSql.query("select * from stb0 where tbname like 'stb00_8888' limit 10 offset 5" )
tdSql.checkData(0, 1, 5)
tdSql.checkData(1, 1, 6)
tdSql.checkData(2, 1, 7)
tdSql.query("select * from stb0 where col2 like 'test97%' ")
tdSql.checkRows(1000)
tdSql.query("select * from stb0 where tbname like 'stb00_7777' limit 10" )
tdSql.checkData(0, 1, 0)
tdSql.checkData(1, 1, 1)
tdSql.checkData(2, 1, 2)
tdSql.query("select * from stb0 where tbname like 'stb00_7777' limit 10 offset 5" )
tdSql.checkData(0, 1, 5)
tdSql.checkData(1, 1, 6)
tdSql.checkData(2, 1, 7)
tdSql.query("select * from stb0 where col2 like 'test96%' ")
tdSql.checkRows(1000)
tdSql.query("select * from stb0 where tbname like 'stb00_6666' limit 10" )
tdSql.checkData(0, 1, 0)
tdSql.checkData(1, 1, 1)
tdSql.checkData(2, 1, 2)
tdSql.query("select * from stb0 where tbname like 'stb00_6666' limit 10 offset 5" )
tdSql.checkData(0, 1, 5)
tdSql.checkData(1, 1, 6)
tdSql.checkData(2, 1, 7)
tdSql.query("select * from stb0 where col2 like 'test95%' ")
tdSql.checkRows(1000)
tdSql.query("select * from stb0 where tbname like 'stb00_5555' limit 10" )
tdSql.checkData(0, 1, 0)
tdSql.checkData(1, 1, 1)
tdSql.checkData(2, 1, 2)
tdSql.query("select * from stb0 where tbname like 'stb00_5555' limit 10 offset 5" )
tdSql.checkData(0, 1, 5)
tdSql.checkData(1, 1, 6)
tdSql.checkData(2, 1, 7)
tdSql.query("select * from stb0 where col2 like 'test94%' ")
tdSql.checkRows(1000)
tdSql.query("select * from stb0 where tbname like 'stb00_4444' limit 10" )
tdSql.checkData(0, 1, 0)
tdSql.checkData(1, 1, 1)
tdSql.checkData(2, 1, 2)
tdSql.query("select * from stb0 where tbname like 'stb00_4444' limit 10 offset 5" )
tdSql.checkData(0, 1, 5)
tdSql.checkData(1, 1, 6)
tdSql.checkData(2, 1, 7)
tdSql.query("select * from stb0 where col2 like 'test93%' ")
tdSql.checkRows(1000)
tdSql.query("select * from stb0 where tbname like 'stb00_3333' limit 100" )
tdSql.checkData(0, 1, 0)
tdSql.checkData(1, 1, 1)
tdSql.checkData(2, 1, 2)
tdSql.query("select * from stb0 where tbname like 'stb00_3333' limit 100 offset 5" )
tdSql.checkData(0, 1, 5)
tdSql.checkData(1, 1, 6)
tdSql.checkData(2, 1, 7)
tdSql.query("select * from stb0 where col2 like 'test92%' ")
tdSql.checkRows(1000)
tdSql.query("select * from stb0 where tbname like 'stb00_2222' limit 100" )
tdSql.checkData(0, 1, 0)
tdSql.checkData(1, 1, 1)
tdSql.checkData(2, 1, 2)
tdSql.query("select * from stb0 where tbname like 'stb00_2222' limit 100 offset 5" )
tdSql.checkData(0, 1, 5)
tdSql.checkData(1, 1, 6)
tdSql.checkData(2, 1, 7)
tdSql.query("select * from stb0 where col2 like 'test91%' ")
tdSql.checkRows(1000)
tdSql.query("select * from stb0 where tbname like 'stb00_1111' limit 100" )
tdSql.checkData(0, 1, 0)
tdSql.checkData(1, 1, 1)
tdSql.checkData(2, 1, 2)
tdSql.query("select * from stb0 where tbname like 'stb00_1111' limit 100 offset 5" )
tdSql.checkData(0, 1, 5)
tdSql.checkData(1, 1, 6)
tdSql.checkData(2, 1, 7)
tdSql.query("select * from stb0 where col2 like 'test90%' ")
tdSql.checkRows(1000)
tdSql.query("select * from stb0 where tbname like 'stb00_100' limit 100" )
tdSql.checkData(0, 1, 0)
tdSql.checkData(1, 1, 1)
tdSql.checkData(2, 1, 2)
tdSql.query("select * from stb0 where tbname like 'stb00_100' limit 100 offset 5" )
tdSql.checkData(0, 1, 5)
tdSql.checkData(1, 1, 6)
tdSql.checkData(2, 1, 7)
os.system("rm -rf tools/taosdemoAllTest/TD-4985/query-limit-offset.py.sql")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -16,6 +16,8 @@ import pandas as pd
import argparse import argparse
import os.path import os.path
import json import json
from util.log import tdLog
from util.sql import tdSql
class taosdemoPerformace: class taosdemoPerformace:

View File

@ -63,7 +63,6 @@ class TDTestCase:
tdSql.execute("create database db days 11 keep 3649 blocks 8 ") tdSql.execute("create database db days 11 keep 3649 blocks 8 ")
tdSql.execute("create database db1 days 12 keep 3640 blocks 7 ") tdSql.execute("create database db1 days 12 keep 3640 blocks 7 ")
tdSql.execute("use db") tdSql.execute("use db")
tdSql.execute( tdSql.execute(
"create table st(ts timestamp, c1 int, c2 nchar(10)) tags(t1 int, t2 binary(10))") "create table st(ts timestamp, c1 int, c2 nchar(10)) tags(t1 int, t2 binary(10))")
tdSql.execute("create table t1 using st tags(1, 'beijing')") tdSql.execute("create table t1 using st tags(1, 'beijing')")
@ -86,9 +85,10 @@ class TDTestCase:
tdLog.info("taosdump found in %s" % buildPath) tdLog.info("taosdump found in %s" % buildPath)
binPath = buildPath + "/build/bin/" binPath = buildPath + "/build/bin/"
os.system("rm ./taosdumptest/tmp1/*.sql")
os.system("%staosdump --databases db -o ./taosdumptest/tmp1" % binPath) os.system("%staosdump --databases db -o ./taosdumptest/tmp1" % binPath)
os.system("%staosdump --databases db1 -o ./taosdumptest/tmp2" % binPath) os.system(
"%staosdump --databases db1 -o ./taosdumptest/tmp2" %
binPath)
tdSql.execute("drop database db") tdSql.execute("drop database db")
tdSql.execute("drop database db1") tdSql.execute("drop database db1")