diff --git a/cmake/cmake.version b/cmake/cmake.version
index 232e86d891..3166a0695c 100644
--- a/cmake/cmake.version
+++ b/cmake/cmake.version
@@ -2,7 +2,7 @@
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
- SET(TD_VER_NUMBER "3.0.4.0")
+ SET(TD_VER_NUMBER "3.0.4.1")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
diff --git a/cmake/taosadapter_CMakeLists.txt.in b/cmake/taosadapter_CMakeLists.txt.in
index ba937b40c1..4a8f4864b3 100644
--- a/cmake/taosadapter_CMakeLists.txt.in
+++ b/cmake/taosadapter_CMakeLists.txt.in
@@ -2,7 +2,7 @@
# taosadapter
ExternalProject_Add(taosadapter
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
- GIT_TAG e02ddb2
+ GIT_TAG ae8d51c
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in
index 4838e97dd7..d9d2f12069 100644
--- a/cmake/taostools_CMakeLists.txt.in
+++ b/cmake/taostools_CMakeLists.txt.in
@@ -2,7 +2,7 @@
# taos-tools
ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
- GIT_TAG ffc2e6f
+ GIT_TAG 4378702
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
diff --git a/include/common/systable.h b/include/common/systable.h
index 558a1ca297..ea18338e9e 100644
--- a/include/common/systable.h
+++ b/include/common/systable.h
@@ -12,7 +12,7 @@
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see .
*/
-
+
#ifndef TDENGINE_SYSTABLE_H
#define TDENGINE_SYSTABLE_H
diff --git a/include/common/tmsg.h b/include/common/tmsg.h
index 02c097b8d0..dc997221e8 100644
--- a/include/common/tmsg.h
+++ b/include/common/tmsg.h
@@ -689,6 +689,7 @@ typedef struct {
int32_t tSerializeSAlterUserReq(void* buf, int32_t bufLen, SAlterUserReq* pReq);
int32_t tDeserializeSAlterUserReq(void* buf, int32_t bufLen, SAlterUserReq* pReq);
+void tFreeSAlterUserReq(SAlterUserReq* pReq);
typedef struct {
char user[TSDB_USER_LEN];
@@ -3423,7 +3424,7 @@ typedef struct {
int32_t tEncodeSSubmitReq2(SEncoder* pCoder, const SSubmitReq2* pReq);
int32_t tDecodeSSubmitReq2(SDecoder* pCoder, SSubmitReq2* pReq);
void tDestroySSubmitTbData(SSubmitTbData* pTbData, int32_t flag);
-void tDestroySSubmitReq2(SSubmitReq2* pReq, int32_t flag);
+void tDestroySSubmitReq(SSubmitReq2* pReq, int32_t flag);
typedef struct {
int32_t affectedRows;
diff --git a/include/libs/catalog/catalog.h b/include/libs/catalog/catalog.h
index 2c684f8f76..d7084cfac4 100644
--- a/include/libs/catalog/catalog.h
+++ b/include/libs/catalog/catalog.h
@@ -82,6 +82,7 @@ typedef struct SCatalogReq {
SArray* pUser; // element is SUserAuthInfo
SArray* pTableIndex; // element is SNAME
SArray* pTableCfg; // element is SNAME
+ SArray* pTableTag; // element is SNAME
bool qNodeRequired; // valid qnode
bool dNodeRequired; // valid dnode
bool svrVerRequired;
@@ -105,6 +106,7 @@ typedef struct SMetaData {
SArray* pUser; // pRes = SUserAuthRes*
SArray* pQnodeList; // pRes = SArray*
SArray* pTableCfg; // pRes = STableCfg*
+ SArray* pTableTag; // pRes = SArray*
SArray* pDnodeList; // pRes = SArray*
SMetaRes* pSvrVer; // pRes = char*
} SMetaData;
@@ -122,8 +124,8 @@ typedef struct SSTableVersion {
char stbName[TSDB_TABLE_NAME_LEN];
uint64_t dbId;
uint64_t suid;
- int16_t sversion;
- int16_t tversion;
+ int32_t sversion;
+ int32_t tversion;
int32_t smaVer;
} SSTableVersion;
@@ -312,6 +314,8 @@ int32_t catalogGetIndexMeta(SCatalog* pCtg, SRequestConnInfo* pConn, const char*
int32_t catalogGetTableIndex(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTableName, SArray** pRes);
+int32_t catalogGetTableTag(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTableName, SArray** pRes);
+
int32_t catalogRefreshGetTableCfg(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTableName, STableCfg** pCfg);
int32_t catalogUpdateTableIndex(SCatalog* pCtg, STableIndexRsp* pRsp);
diff --git a/include/libs/executor/executor.h b/include/libs/executor/executor.h
index 34372dc2ff..61eca6cc4f 100644
--- a/include/libs/executor/executor.h
+++ b/include/libs/executor/executor.h
@@ -208,8 +208,6 @@ void* qExtractReaderFromStreamScanner(void* scanner);
int32_t qExtractStreamScanner(qTaskInfo_t tinfo, void** scanner);
-int32_t qStreamInput(qTaskInfo_t tinfo, void* pItem);
-
int32_t qStreamSetParamForRecover(qTaskInfo_t tinfo);
int32_t qStreamSourceRecoverStep1(qTaskInfo_t tinfo, int64_t ver);
int32_t qStreamSourceRecoverStep2(qTaskInfo_t tinfo, int64_t ver);
diff --git a/include/libs/nodes/querynodes.h b/include/libs/nodes/querynodes.h
index 480912a8cf..9569cfe055 100644
--- a/include/libs/nodes/querynodes.h
+++ b/include/libs/nodes/querynodes.h
@@ -379,6 +379,8 @@ typedef struct SVnodeModifyOpStmt {
SName usingTableName;
const char* pBoundCols;
struct STableMeta* pTableMeta;
+ SNode* pTagCond;
+ SArray* pTableTag;
SHashObj* pVgroupsHashObj;
SHashObj* pTableBlockHashObj; // SHashObj
SHashObj* pSubTableHashObj;
diff --git a/include/libs/qcom/query.h b/include/libs/qcom/query.h
index cfc6ef2025..3841210076 100644
--- a/include/libs/qcom/query.h
+++ b/include/libs/qcom/query.h
@@ -116,8 +116,8 @@ typedef struct STableMeta {
// if the table is TSDB_CHILD_TABLE, the following information is acquired from the corresponding super table meta
// info
- int16_t sversion;
- int16_t tversion;
+ int32_t sversion;
+ int32_t tversion;
STableComInfo tableInfo;
SSchema schema[];
} STableMeta;
diff --git a/include/util/tutil.h b/include/util/tutil.h
index e96c7a07d9..7a59aa170a 100644
--- a/include/util/tutil.h
+++ b/include/util/tutil.h
@@ -106,6 +106,8 @@ static FORCE_INLINE int32_t taosGetTbHashVal(const char *tbname, int32_t tblen,
goto LABEL; \
}
+#define VND_CHECK_CODE(CODE, LINO, LABEL) TSDB_CHECK_CODE(CODE, LINO, LABEL)
+
#ifdef __cplusplus
}
#endif
diff --git a/packaging/deb/DEBIAN/preinst b/packaging/deb/DEBIAN/preinst
index 8a1a7d4d81..904a946e20 100644
--- a/packaging/deb/DEBIAN/preinst
+++ b/packaging/deb/DEBIAN/preinst
@@ -26,6 +26,38 @@ if pidof taosd &> /dev/null; then
sleep 1
fi
+# Stop adapter service if running
+if pidof taosadapter &> /dev/null; then
+ if pidof systemd &> /dev/null; then
+ ${csudo}systemctl stop taosadapter || :
+ elif $(which service &> /dev/null); then
+ ${csudo}service taosadapter stop || :
+ else
+ pid=$(ps -ef | grep "taosadapter" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo}kill -9 $pid || :
+ fi
+ fi
+ echo "Stop taosadapter service success!"
+ sleep 1
+fi
+
+# Stop keeper service if running
+if pidof taoskeeper &> /dev/null; then
+ if pidof systemd &> /dev/null; then
+ ${csudo}systemctl stop taoskeeper || :
+ elif $(which service &> /dev/null); then
+ ${csudo}service taoskeeper stop || :
+ else
+ pid=$(ps -ef | grep "taoskeeper" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo}kill -9 $pid || :
+ fi
+ fi
+ echo "Stop taoskeeper service success!"
+ sleep 1
+fi
+
# if taos.cfg already softlink, remove it
cfg_install_dir="/etc/taos"
install_main_dir="/usr/local/taos"
@@ -41,6 +73,11 @@ if [ -f "${install_main_dir}/taosadapter.service" ]; then
${csudo}rm -f ${install_main_dir}/cfg/taosadapter.service || :
fi
+if [ -f "${install_main_dir}/taoskeeper.toml" ]; then
+ ${csudo}rm -f ${install_main_dir}/cfg/taoskeeper.toml || :
+fi
+
+
# there can not libtaos.so*, otherwise ln -s error
${csudo}rm -f ${install_main_dir}/driver/libtaos.* || :
[ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}rm -f ${install_main_dir}/driver/libtaosws.so || :
diff --git a/packaging/deb/DEBIAN/prerm b/packaging/deb/DEBIAN/prerm
index 65f261db2c..0d63115a04 100644
--- a/packaging/deb/DEBIAN/prerm
+++ b/packaging/deb/DEBIAN/prerm
@@ -32,6 +32,7 @@ else
${csudo}rm -f ${bin_link_dir}/udfd || :
${csudo}rm -f ${bin_link_dir}/taosadapter || :
${csudo}rm -f ${bin_link_dir}/taosdemo || :
+ ${csudo}rm -f ${bin_link_dir}/taoskeeper || :
${csudo}rm -f ${cfg_link_dir}/* || :
${csudo}rm -f ${inc_link_dir}/taos.h || :
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh
index 94a24a4148..9f49cf345a 100755
--- a/packaging/deb/makedeb.sh
+++ b/packaging/deb/makedeb.sh
@@ -44,8 +44,31 @@ mkdir -p ${pkg_dir}${install_home_path}/include
#mkdir -p ${pkg_dir}${install_home_path}/init.d
mkdir -p ${pkg_dir}${install_home_path}/script
+# download taoskeeper and build
+if [ "$cpuType" = "x64" ] || [ "$cpuType" = "x86_64" ] || [ "$cpuType" = "amd64" ]; then
+ arch=amd64
+elif [ "$cpuType" = "x32" ] || [ "$cpuType" = "i386" ] || [ "$cpuType" = "i686" ]; then
+ arch=386
+elif [ "$cpuType" = "arm" ] || [ "$cpuType" = "aarch32" ]; then
+ arch=arm
+elif [ "$cpuType" = "arm64" ] || [ "$cpuType" = "aarch64" ]; then
+ arch=arm64
+else
+ arch=$cpuType
+fi
+
+echo "${top_dir}/../enterprise/packaging/build_taoskeeper.sh -r ${arch} -e taoskeeper"
+echo "$top_dir=${top_dir}"
+taoskeeper_binary=`${top_dir}/../enterprise/packaging/build_taoskeeper.sh -r $arch -e taoskeeper`
+echo "taoskeeper_binary: ${taoskeeper_binary}"
+
+# copy config files
+cp $(dirname ${taoskeeper_binary})/config/taoskeeper.toml ${pkg_dir}${install_home_path}/cfg
+cp $(dirname ${taoskeeper_binary})/taoskeeper.service ${pkg_dir}${install_home_path}/cfg
+
cp ${compile_dir}/../packaging/cfg/taos.cfg ${pkg_dir}${install_home_path}/cfg
cp ${compile_dir}/../packaging/cfg/taosd.service ${pkg_dir}${install_home_path}/cfg
+
if [ -f "${compile_dir}/test/cfg/taosadapter.toml" ]; then
cp ${compile_dir}/test/cfg/taosadapter.toml ${pkg_dir}${install_home_path}/cfg || :
fi
@@ -53,6 +76,7 @@ if [ -f "${compile_dir}/test/cfg/taosadapter.service" ]; then
cp ${compile_dir}/test/cfg/taosadapter.service ${pkg_dir}${install_home_path}/cfg || :
fi
+cp ${taoskeeper_binary} ${pkg_dir}${install_home_path}/bin
#cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_path}/init.d
cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script
cp ${compile_dir}/../packaging/tools/preun.sh ${pkg_dir}${install_home_path}/script
@@ -143,6 +167,7 @@ else
exit 1
fi
+rm -rf ${pkg_dir}/build-taoskeeper
# make deb package
dpkg -b ${pkg_dir} $debname
echo "make deb package success!"
@@ -150,4 +175,5 @@ echo "make deb package success!"
cp ${pkg_dir}/*.deb ${output_dir}
# clean temp dir
+
rm -rf ${pkg_dir}
diff --git a/packaging/rpm/makerpm.sh b/packaging/rpm/makerpm.sh
index 4ac67ec754..9cf00364aa 100755
--- a/packaging/rpm/makerpm.sh
+++ b/packaging/rpm/makerpm.sh
@@ -35,14 +35,16 @@ function cp_rpm_package() {
local cur_dir
cd $1
cur_dir=$(pwd)
-
+ echo "cp_rpm_package cd: ${cur_dir}"
for dirlist in "$(ls ${cur_dir})"; do
if test -d ${dirlist}; then
cd ${dirlist}
+ echo 'cp_rpm_package ${cur_dir}/${dirlist}'
cp_rpm_package ${cur_dir}/${dirlist}
cd ..
fi
if test -e ${dirlist}; then
+ echo "${cur_dir}/${dirlist} ${output_dir}/TDengine-${tdengine_ver}.rpm"
cp ${cur_dir}/${dirlist} ${output_dir}/TDengine-${tdengine_ver}.rpm
fi
done
@@ -54,6 +56,25 @@ fi
${csudo}mkdir -p ${pkg_dir}
cd ${pkg_dir}
+# download taoskeeper and build
+if [ "$cpuType" = "x64" ] || [ "$cpuType" = "x86_64" ] || [ "$cpuType" = "amd64" ]; then
+ arch=amd64
+elif [ "$cpuType" = "x32" ] || [ "$cpuType" = "i386" ] || [ "$cpuType" = "i686" ]; then
+ arch=386
+elif [ "$cpuType" = "arm" ] || [ "$cpuType" = "aarch32" ]; then
+ arch=arm
+elif [ "$cpuType" = "arm64" ] || [ "$cpuType" = "aarch64" ]; then
+ arch=arm64
+else
+ arch=$cpuType
+fi
+
+cd ${top_dir}
+echo "${top_dir}/../enterprise/packaging/build_taoskeeper.sh -r ${arch} -e taoskeeper"
+taoskeeper_binary=`${top_dir}/../enterprise/packaging/build_taoskeeper.sh -r $arch -e taoskeeper`
+echo "taoskeeper_binary: ${taoskeeper_binary}"
+cd ${package_dir}
+
${csudo}mkdir -p BUILD BUILDROOT RPMS SOURCES SPECS SRPMS
${csudo}rpmbuild --define="_version ${tdengine_ver}" --define="_topdir ${pkg_dir}" --define="_compiledir ${compile_dir}" -bb ${spec_file}
@@ -85,3 +106,4 @@ mv ${output_dir}/TDengine-${tdengine_ver}.rpm ${output_dir}/${rpmname}
cd ..
${csudo}rm -rf ${pkg_dir}
+rm -rf ${top_dir}/build-taoskeeper
\ No newline at end of file
diff --git a/packaging/rpm/tdengine.spec b/packaging/rpm/tdengine.spec
index c21063e6a4..52d5335003 100644
--- a/packaging/rpm/tdengine.spec
+++ b/packaging/rpm/tdengine.spec
@@ -3,6 +3,7 @@
%define cfg_install_dir /etc/taos
%define __strip /bin/true
%global __python /usr/bin/python3
+%global _build_id_links none
Name: tdengine
Version: %{_version}
@@ -62,6 +63,15 @@ fi
if [ -f %{_compiledir}/test/cfg/taosadapter.service ]; then
cp %{_compiledir}/test/cfg/taosadapter.service %{buildroot}%{homepath}/cfg
fi
+
+if [ -f %{_compiledir}/../build-taoskeeper/config/taoskeeper.toml ]; then
+ cp %{_compiledir}/../build-taoskeeper/config/taoskeeper.toml %{buildroot}%{homepath}/cfg ||:
+fi
+
+if [ -f %{_compiledir}/../build-taoskeeper/taoskeeper.service ]; then
+ cp %{_compiledir}/../build-taoskeeper/taoskeeper.service %{buildroot}%{homepath}/cfg ||:
+fi
+
#cp %{_compiledir}/../packaging/rpm/taosd %{buildroot}%{homepath}/init.d
cp %{_compiledir}/../packaging/tools/post.sh %{buildroot}%{homepath}/script
cp %{_compiledir}/../packaging/tools/preun.sh %{buildroot}%{homepath}/script
@@ -73,8 +83,12 @@ cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/udfd %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taosBenchmark %{buildroot}%{homepath}/bin
+if [ -f %{_compiledir}/../build-taoskeeper/taoskeeper ]; then
+ cp %{_compiledir}/../build-taoskeeper/taoskeeper %{buildroot}%{homepath}/bin
+fi
+
if [ -f %{_compiledir}/build/bin/taosadapter ]; then
- cp %{_compiledir}/build/bin/taosadapter %{buildroot}%{homepath}/bin ||:
+ cp %{_compiledir}/build/bin/taosadapter %{buildroot}%{homepath}/bin
fi
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
[ -f %{_compiledir}/build/lib/${wslibfile} ] && cp %{_compiledir}/build/lib/${wslibfile} %{buildroot}%{homepath}/driver ||:
@@ -119,7 +133,9 @@ if [ -f %{_compiledir}/build/bin/jemalloc-config ]; then
cp %{_compiledir}/build/lib/pkgconfig/jemalloc.pc %{buildroot}%{homepath}/jemalloc/lib/pkgconfig
fi
fi
-
+ls -al %{buildroot}%{homepath}/bin
+tree -L 5
+echo "==============================copying files done"
#Scripts executed before installation
%pre
if [ -f /var/lib/taos/dnode/dnodeCfg.json ]; then
@@ -196,6 +212,7 @@ if [ $1 -eq 0 ];then
${csudo}rm -f ${bin_link_dir}/taosd || :
${csudo}rm -f ${bin_link_dir}/udfd || :
${csudo}rm -f ${bin_link_dir}/taosadapter || :
+ ${csudo}rm -f ${bin_link_dir}/taoskeeper || :
${csudo}rm -f ${cfg_link_dir}/* || :
${csudo}rm -f ${inc_link_dir}/taos.h || :
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh
index a590835257..3da005c405 100755
--- a/packaging/tools/makepkg.sh
+++ b/packaging/tools/makepkg.sh
@@ -341,7 +341,7 @@ if [ "$verMode" == "cluster" ] || [ "$verMode" == "cloud" ]; then
tmp_pwd=`pwd`
cd ${install_dir}/connector
if [ ! -d taos-connector-jdbc ];then
- git clone -b main --depth=1 https://github.com/taosdata/taos-connector-jdbc.git ||:
+ git clone -b 3.1.0 --depth=1 https://github.com/taosdata/taos-connector-jdbc.git ||:
fi
cd taos-connector-jdbc
mvn clean package -Dmaven.test.skip=true
diff --git a/packaging/tools/post.sh b/packaging/tools/post.sh
index 3a013ade2c..fc392c9684 100755
--- a/packaging/tools/post.sh
+++ b/packaging/tools/post.sh
@@ -436,7 +436,7 @@ function local_fqdn_check() {
function install_taosadapter_config() {
if [ ! -f "${cfg_install_dir}/taosadapter.toml" ]; then
- [ ! -d %{cfg_install_dir} ] &&
+ [ ! -d ${cfg_install_dir} ] &&
${csudo}${csudo}mkdir -p ${cfg_install_dir}
[ -f ${cfg_dir}/taosadapter.toml ] && ${csudo}cp ${cfg_dir}/taosadapter.toml ${cfg_install_dir}
[ -f ${cfg_install_dir}/taosadapter.toml ] &&
@@ -451,19 +451,26 @@ function install_taosadapter_config() {
}
function install_taoskeeper_config() {
- if [ ! -f "${cfg_install_dir}/keeper.toml" ]; then
- [ ! -d %{cfg_install_dir} ] &&
- ${csudo}${csudo}mkdir -p ${cfg_install_dir}
- [ -f ${cfg_dir}/keeper.toml ] && ${csudo}cp ${cfg_dir}/keeper.toml ${cfg_install_dir}
- [ -f ${cfg_install_dir}/keeper.toml ] &&
- ${csudo}chmod 644 ${cfg_install_dir}/keeper.toml
+ # if new environment without taoskeeper
+ if [[ ! -f "${cfg_install_dir}/keeper.toml" ]] && [[ ! -f "${cfg_install_dir}/taoskeeper.toml" ]]; then
+ [ ! -d ${cfg_install_dir} ] && ${csudo}${csudo}mkdir -p ${cfg_install_dir}
+ [ -f ${cfg_dir}/taoskeeper.toml ] && ${csudo}cp ${cfg_dir}/taoskeeper.toml ${cfg_install_dir}
+ [ -f ${cfg_install_dir}/taoskeeper.toml ] &&
+ ${csudo}chmod 644 ${cfg_install_dir}/taoskeeper.toml
+ fi
+ # if old machine with taoskeeper.toml file
+ if [ -f ${cfg_install_dir}/taoskeeper.toml ]; then
+ ${csudo}mv ${cfg_dir}/taoskeeper.toml ${cfg_dir}/taoskeeper.toml.new
fi
- [ -f ${cfg_dir}/keeper.toml ] &&
- ${csudo}mv ${cfg_dir}/keeper.toml ${cfg_dir}/keeper.toml.new
+ if [ -f ${cfg_install_dir}/keeper.toml ]; then
+ echo "The file keeper.toml will be renamed to taoskeeper.toml"
+ ${csudo}mv ${cfg_install_dir}/keeper.toml ${cfg_install_dir}/taoskeeper.toml
+ ${csudo}mv ${cfg_dir}/taoskeeper.toml ${cfg_dir}/taoskeeper.toml.new
+ fi
- [ -f ${cfg_install_dir}/keeper.toml ] &&
- ${csudo}ln -s ${cfg_install_dir}/keeper.toml ${cfg_dir}
+ [ -f ${cfg_install_dir}/taoskeeper.toml ] &&
+ ${csudo}ln -s ${cfg_install_dir}/taoskeeper.toml ${cfg_dir}
}
function install_config() {
@@ -655,6 +662,15 @@ function install_taosadapter_service() {
fi
}
+function install_taoskeeper_service() {
+ if ((${service_mod}==0)); then
+ [ -f ${script_dir}/../cfg/taoskeeper.service ] &&\
+ ${csudo}cp ${script_dir}/../cfg/taoskeeper.service \
+ ${service_config_dir}/ || :
+ ${csudo}systemctl daemon-reload
+ fi
+}
+
function install_service() {
log_print "start install service"
if [ "$osType" != "Darwin" ]; then
@@ -732,6 +748,7 @@ function install_TDengine() {
install_taosadapter_config
install_taoskeeper_config
install_taosadapter_service
+ install_taoskeeper_service
install_service
install_app
diff --git a/packaging/tools/preun.sh b/packaging/tools/preun.sh
index 0e96c71d5d..68f6b53c45 100755
--- a/packaging/tools/preun.sh
+++ b/packaging/tools/preun.sh
@@ -17,7 +17,7 @@ cfg_link_dir="/usr/local/taos/cfg"
service_config_dir="/etc/systemd/system"
taos_service_name="taosd"
-
+taoskeeper_service_name="taoskeeper"
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
@@ -57,6 +57,13 @@ function kill_taosd() {
fi
}
+function kill_taoskeeper() {
+ pid=$(ps -ef | grep "taoskeeper" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo}kill -9 $pid || :
+ fi
+}
+
function clean_service_on_systemd() {
taosadapter_service_config="${service_config_dir}/taosadapter.service"
if systemctl is-active --quiet taosadapter; then
@@ -76,6 +83,12 @@ function clean_service_on_systemd() {
[ -f ${taosadapter_service_config} ] && ${csudo}rm -f ${taosadapter_service_config}
+ taoskeeper_service_config="${service_config_dir}/${taoskeeper_service_name}.service"
+ if systemctl is-active --quiet ${taoskeeper_service_name}; then
+ echo "TDengine taoskeeper is running, stopping it..."
+ ${csudo}systemctl stop ${taoskeeper_service_name} &> /dev/null || echo &> /dev/null
+ fi
+ [ -f ${taoskeeper_service_config} ] && ${csudo}rm -f ${taoskeeper_service_config}
}
function clean_service_on_sysvinit() {
@@ -111,6 +124,7 @@ function clean_service() {
# must manual stop taosd
kill_taosadapter
kill_taosd
+ kill_taoskeeper
fi
}
@@ -124,6 +138,7 @@ ${csudo}rm -f ${bin_link_dir}/taosadapter || :
${csudo}rm -f ${bin_link_dir}/taosBenchmark || :
${csudo}rm -f ${bin_link_dir}/taosdemo || :
${csudo}rm -f ${bin_link_dir}/set_core || :
+${csudo}rm -f ${bin_link_dir}/taoskeeper || :
${csudo}rm -f ${cfg_link_dir}/*.new || :
${csudo}rm -f ${inc_link_dir}/taos.h || :
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c
index 60c7b44b3d..2ebc8e7379 100644
--- a/source/client/src/clientMain.c
+++ b/source/client/src/clientMain.c
@@ -108,7 +108,7 @@ TAOS *taos_connect(const char *ip, const char *user, const char *pass, const cha
if (pass == NULL) {
pass = TSDB_DEFAULT_PASS;
}
-
+
STscObj *pObj = taos_connect_internal(ip, user, pass, NULL, db, port, CONN_TYPE__QUERY);
if (pObj) {
int64_t *rid = taosMemoryCalloc(1, sizeof(int64_t));
@@ -359,11 +359,11 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields)
case TSDB_DATA_TYPE_NCHAR: {
int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE);
if (fields[i].type == TSDB_DATA_TYPE_BINARY) {
- if(ASSERT(charLen <= fields[i].bytes && charLen >= 0)){
+ if (ASSERT(charLen <= fields[i].bytes && charLen >= 0)) {
tscError("taos_print_row error binary. charLen:%d, fields[i].bytes:%d", charLen, fields[i].bytes);
}
} else {
- if(ASSERT(charLen <= fields[i].bytes * TSDB_NCHAR_SIZE && charLen >= 0)){
+ if (ASSERT(charLen <= fields[i].bytes * TSDB_NCHAR_SIZE && charLen >= 0)) {
tscError("taos_print_row error. charLen:%d, fields[i].bytes:%d", charLen, fields[i].bytes);
}
}
@@ -705,16 +705,16 @@ int taos_get_current_db(TAOS *taos, char *database, int len, int *required) {
int code = TSDB_CODE_SUCCESS;
taosThreadMutexLock(&pTscObj->mutex);
- if(database == NULL || len <= 0){
- if(required != NULL) *required = strlen(pTscObj->db) + 1;
+ if (database == NULL || len <= 0) {
+ if (required != NULL) *required = strlen(pTscObj->db) + 1;
terrno = TSDB_CODE_INVALID_PARA;
code = -1;
- }else if(len < strlen(pTscObj->db) + 1){
+ } else if (len < strlen(pTscObj->db) + 1) {
tstrncpy(database, pTscObj->db, len);
- if(required) *required = strlen(pTscObj->db) + 1;
+ if (required) *required = strlen(pTscObj->db) + 1;
terrno = TSDB_CODE_INVALID_PARA;
code = -1;
- }else{
+ } else {
strcpy(database, pTscObj->db);
code = 0;
}
@@ -741,6 +741,7 @@ static void destoryCatalogReq(SCatalogReq *pCatalogReq) {
taosArrayDestroy(pCatalogReq->pUser);
taosArrayDestroy(pCatalogReq->pTableIndex);
taosArrayDestroy(pCatalogReq->pTableCfg);
+ taosArrayDestroy(pCatalogReq->pTableTag);
taosMemoryFree(pCatalogReq);
}
@@ -975,8 +976,10 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
if (TSDB_CODE_SUCCESS == code) {
pRequest->stmtType = pRequest->pQuery->pRoot->type;
- phaseAsyncQuery(pWrapper);
- } else {
+ code = phaseAsyncQuery(pWrapper);
+ }
+
+ if (TSDB_CODE_SUCCESS != code) {
tscError("0x%" PRIx64 " error happens, code:%d - %s, reqId:0x%" PRIx64, pRequest->self, code, tstrerror(code),
pRequest->requestId);
destorySqlCallbackWrapper(pWrapper);
@@ -1042,11 +1045,11 @@ static void fetchCallback(void *pResult, void *param, int32_t code) {
}
void taos_fetch_rows_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) {
- if(ASSERT(res != NULL && fp != NULL)){
+ if (ASSERT(res != NULL && fp != NULL)) {
tscError("taos_fetch_rows_a invalid paras");
return;
}
- if(ASSERT(TD_RES_QUERY(res))){
+ if (ASSERT(TD_RES_QUERY(res))) {
tscError("taos_fetch_rows_a res is NULL");
return;
}
@@ -1092,11 +1095,11 @@ void taos_fetch_rows_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) {
}
void taos_fetch_raw_block_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) {
- if(ASSERT(res != NULL && fp != NULL)){
+ if (ASSERT(res != NULL && fp != NULL)) {
tscError("taos_fetch_rows_a invalid paras");
return;
}
- if(ASSERT(TD_RES_QUERY(res))){
+ if (ASSERT(TD_RES_QUERY(res))) {
tscError("taos_fetch_rows_a res is NULL");
return;
}
@@ -1111,11 +1114,11 @@ void taos_fetch_raw_block_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) {
}
const void *taos_get_raw_block(TAOS_RES *res) {
- if(ASSERT(res != NULL)){
+ if (ASSERT(res != NULL)) {
tscError("taos_fetch_rows_a invalid paras");
return NULL;
}
- if(ASSERT(TD_RES_QUERY(res))){
+ if (ASSERT(TD_RES_QUERY(res))) {
tscError("taos_fetch_rows_a res is NULL");
return NULL;
}
@@ -1273,7 +1276,6 @@ _return:
return code;
}
-
int taos_load_table_info(TAOS *taos, const char *tableNameList) {
if (NULL == taos) {
terrno = TSDB_CODE_TSC_DISCONNECTED;
diff --git a/source/client/src/clientMsgHandler.c b/source/client/src/clientMsgHandler.c
index a0146cfa39..032517cafc 100644
--- a/source/client/src/clientMsgHandler.c
+++ b/source/client/src/clientMsgHandler.c
@@ -92,7 +92,6 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
goto End;
}
- /*assert(connectRsp.epSet.numOfEps > 0);*/
if (connectRsp.epSet.numOfEps == 0) {
setErrno(pRequest, TSDB_CODE_APP_ERROR);
tsem_post(&pRequest->body.rspSem);
diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c
index cac559b0c1..57458ff8f7 100644
--- a/source/client/src/clientSml.c
+++ b/source/client/src/clientSml.c
@@ -649,6 +649,17 @@ static int32_t smlBuildFieldsList(SSmlHandle *info, SSchema *schemaField, SHashO
field->bytes = getBytes(kv->type, kv->length);
}
}
+
+ int32_t maxLen = isTag ? TSDB_MAX_TAGS_LEN : TSDB_MAX_BYTES_PER_ROW;
+ int32_t len = 0;
+ for (int j = 0; j < taosArrayGetSize(results); ++j) {
+ SField *field = taosArrayGet(results, j);
+ len += field->bytes;
+ }
+ if(len > maxLen){
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+
return TSDB_CODE_SUCCESS;
}
@@ -781,11 +792,15 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
code = smlBuildFieldsList(info, NULL, NULL, sTableData->tags, pTags, 0, true);
if (code != TSDB_CODE_SUCCESS) {
uError("SML:0x%" PRIx64 " smlBuildFieldsList tag1 failed. %s", info->id, pName.tname);
+ taosArrayDestroy(pColumns);
+ taosArrayDestroy(pTags);
goto end;
}
code = smlBuildFieldsList(info, NULL, NULL, sTableData->cols, pColumns, 0, false);
if (code != TSDB_CODE_SUCCESS) {
uError("SML:0x%" PRIx64 " smlBuildFieldsList col1 failed. %s", info->id, pName.tname);
+ taosArrayDestroy(pColumns);
+ taosArrayDestroy(pTags);
goto end;
}
code = smlSendMetaMsg(info, &pName, pColumns, pTags, NULL, SCHEMA_ACTION_CREATE_STABLE);
@@ -837,6 +852,23 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
pTableMeta->tableInfo.numOfColumns, true);
if (code != TSDB_CODE_SUCCESS) {
uError("SML:0x%" PRIx64 " smlBuildFieldsList tag2 failed. %s", info->id, pName.tname);
+ taosArrayDestroy(pColumns);
+ taosArrayDestroy(pTags);
+ goto end;
+ }
+
+ if (taosArrayGetSize(pTags) + pTableMeta->tableInfo.numOfColumns > TSDB_MAX_COLUMNS) {
+ uError("SML:0x%" PRIx64 " too many columns than 4096", info->id);
+ code = TSDB_CODE_PAR_TOO_MANY_COLUMNS;
+ taosArrayDestroy(pColumns);
+ taosArrayDestroy(pTags);
+ goto end;
+ }
+ if (taosArrayGetSize(pTags) > TSDB_MAX_TAGS) {
+ uError("SML:0x%" PRIx64 " too many tags than 128", info->id);
+ code = TSDB_CODE_PAR_INVALID_TAGS_NUM;
+ taosArrayDestroy(pColumns);
+ taosArrayDestroy(pTags);
goto end;
}
@@ -891,6 +923,16 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
pTableMeta->tableInfo.numOfColumns, false);
if (code != TSDB_CODE_SUCCESS) {
uError("SML:0x%" PRIx64 " smlBuildFieldsList col2 failed. %s", info->id, pName.tname);
+ taosArrayDestroy(pColumns);
+ taosArrayDestroy(pTags);
+ goto end;
+ }
+
+ if (taosArrayGetSize(pColumns) + pTableMeta->tableInfo.numOfTags > TSDB_MAX_COLUMNS) {
+ uError("SML:0x%" PRIx64 " too many columns than 4096", info->id);
+ code = TSDB_CODE_PAR_TOO_MANY_COLUMNS;
+ taosArrayDestroy(pColumns);
+ taosArrayDestroy(pTags);
goto end;
}
@@ -1514,7 +1556,8 @@ static int smlProcess(SSmlHandle *info, char *lines[], char *rawLine, char *rawL
do {
code = smlModifyDBSchemas(info);
- if (code == 0 || code == TSDB_CODE_SML_INVALID_DATA) break;
+ if (code == 0 || code == TSDB_CODE_SML_INVALID_DATA || code == TSDB_CODE_PAR_TOO_MANY_COLUMNS
+ || code == TSDB_CODE_PAR_INVALID_TAGS_NUM) break;
taosMsleep(100);
uInfo("SML:0x%" PRIx64 " smlModifyDBSchemas retry code:%s, times:%d", info->id, tstrerror(code), retryNum);
} while (retryNum++ < taosHashGetSize(info->superTables) * MAX_RETRY_TIMES);
diff --git a/source/client/src/clientSmlJson.c b/source/client/src/clientSmlJson.c
index 9fd98e33b7..b0ae316031 100644
--- a/source/client/src/clientSmlJson.c
+++ b/source/client/src/clientSmlJson.c
@@ -575,7 +575,7 @@ static int32_t smlConvertJSONString(SSmlKv *pVal, char *typeStr, cJSON *value) {
uError("OTD:invalid type(%s) for JSON String", typeStr);
return TSDB_CODE_TSC_INVALID_JSON_TYPE;
}
- pVal->length = (int16_t)strlen(value->valuestring);
+ pVal->length = strlen(value->valuestring);
if (pVal->type == TSDB_DATA_TYPE_BINARY && pVal->length > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) {
return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN;
diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c
index 16a4f55840..9292be83e9 100644
--- a/source/client/src/clientTmq.c
+++ b/source/client/src/clientTmq.c
@@ -1664,11 +1664,7 @@ static int32_t doTmqPollImpl(tmq_t* pTmq, SMqClientTopic* pTopic, SMqClientVg* p
return handleErrorBeforePoll(pVg, pTmq);
}
- sendInfo->msgInfo = (SDataBuf){
- .pData = msg,
- .len = msgSize,
- .handle = NULL,
- };
+ sendInfo->msgInfo = (SDataBuf){ .pData = msg, .len = msgSize, .handle = NULL };
sendInfo->requestId = req.reqId;
sendInfo->requestObjRefId = 0;
diff --git a/source/client/test/clientTests.cpp b/source/client/test/clientTests.cpp
index 055ac450dc..b9062fc8ff 100644
--- a/source/client/test/clientTests.cpp
+++ b/source/client/test/clientTests.cpp
@@ -165,7 +165,7 @@ void* queryThread(void* arg) {
int32_t numOfThreads = 1;
void tmq_commit_cb_print(tmq_t* pTmq, int32_t code, void* param) {
- printf("auto commit success, code:%d\n\n\n\n", code);
+// printf("auto commit success, code:%d\n", code);
}
void* doConsumeData(void* param) {
@@ -1053,19 +1053,24 @@ TEST(clientCase, sub_db_test) {
}
TEST(clientCase, sub_tb_test) {
- taos_options(TSDB_OPTION_CONFIGDIR, "~/first/cfg");
+ taos_options(TSDB_OPTION_CONFIGDIR, "/home/tests/dir/cfg/");
- TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
+ TAOS* pConn = taos_connect("vm116", "root", "taosdata", NULL, 0);
ASSERT_NE(pConn, nullptr);
tmq_conf_t* conf = tmq_conf_new();
+
+ int32_t ts = taosGetTimestampMs()%INT32_MAX;
+ char consumerGroupid[128] = {0};
+ sprintf(consumerGroupid, "group_id_%d", ts);
+
tmq_conf_set(conf, "enable.auto.commit", "true");
- tmq_conf_set(conf, "auto.commit.interval.ms", "1000");
- tmq_conf_set(conf, "group.id", "cgrpName45");
+ tmq_conf_set(conf, "auto.commit.interval.ms", "2000");
+ tmq_conf_set(conf, "group.id", consumerGroupid);
tmq_conf_set(conf, "td.connect.user", "root");
tmq_conf_set(conf, "td.connect.pass", "taosdata");
tmq_conf_set(conf, "auto.offset.reset", "earliest");
- tmq_conf_set(conf, "experimental.snapshot.enable", "true");
+ tmq_conf_set(conf, "experimental.snapshot.enable", "false");
tmq_conf_set(conf, "msg.with.table.name", "true");
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
@@ -1074,7 +1079,7 @@ TEST(clientCase, sub_tb_test) {
// 创建订阅 topics 列表
tmq_list_t* topicList = tmq_list_new();
- tmq_list_append(topicList, "topic_t2");
+ tmq_list_append(topicList, "topic_t1");
// 启动订阅
tmq_subscribe(tmq, topicList);
@@ -1093,15 +1098,15 @@ TEST(clientCase, sub_tb_test) {
while (1) {
TAOS_RES* pRes = tmq_consumer_poll(tmq, timeout);
if (pRes) {
- char buf[1024];
+ char buf[128];
const char* topicName = tmq_get_topic_name(pRes);
- const char* dbName = tmq_get_db_name(pRes);
- int32_t vgroupId = tmq_get_vgroup_id(pRes);
-
- printf("topic: %s\n", topicName);
- printf("db: %s\n", dbName);
- printf("vgroup id: %d\n", vgroupId);
+// const char* dbName = tmq_get_db_name(pRes);
+// int32_t vgroupId = tmq_get_vgroup_id(pRes);
+//
+// printf("topic: %s\n", topicName);
+// printf("db: %s\n", dbName);
+// printf("vgroup id: %d\n", vgroupId);
while (1) {
TAOS_ROW row = taos_fetch_row(pRes);
@@ -1111,16 +1116,14 @@ TEST(clientCase, sub_tb_test) {
fields = taos_fetch_fields(pRes);
numOfFields = taos_field_count(pRes);
- precision = taos_result_precision(pRes);
- taos_print_row(buf, row, fields, numOfFields);
totalRows += 1;
- printf("precision: %d, row content: %s\n", precision, buf);
+ if (totalRows % 100000 == 0) {
+ taos_print_row(buf, row, fields, numOfFields);
+ printf("row content: %s\n", buf);
+ }
}
taos_free_result(pRes);
- // if ((++count) > 1) {
- // break;
- // }
} else {
break;
}
diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c
index f4d2ed01b0..3558feaa66 100644
--- a/source/common/src/tdatablock.c
+++ b/source/common/src/tdatablock.c
@@ -2388,7 +2388,7 @@ _end:
if (terrno != 0) {
*ppReq = NULL;
if (pReq) {
- tDestroySSubmitReq2(pReq, TSDB_MSG_FLG_ENCODE);
+ tDestroySSubmitReq(pReq, TSDB_MSG_FLG_ENCODE);
taosMemoryFreeClear(pReq);
}
diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c
index d9802244b7..155ef7a62a 100644
--- a/source/common/src/tmsg.c
+++ b/source/common/src/tmsg.c
@@ -1409,6 +1409,8 @@ int32_t tDeserializeSAlterUserReq(void *buf, int32_t bufLen, SAlterUserReq *pReq
return 0;
}
+void tFreeSAlterUserReq(SAlterUserReq *pReq) { taosMemoryFreeClear(pReq->tagCond); }
+
int32_t tSerializeSGetUserAuthReq(void *buf, int32_t bufLen, SGetUserAuthReq *pReq) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
@@ -1635,6 +1637,7 @@ int32_t tDeserializeSGetUserAuthRspImpl(SDecoder *pDecoder, SGetUserAuthRsp *pRs
int32_t ref = 0;
if (tDecodeI32(pDecoder, &ref) < 0) return -1;
taosHashPut(pRsp->useDbs, key, strlen(key), &ref, sizeof(ref));
+ taosMemoryFree(key);
}
}
@@ -1831,7 +1834,6 @@ int32_t tSerializeSCreateFuncReq(void *buf, int32_t bufLen, SCreateFuncReq *pReq
if (tEncodeCStr(&encoder, pReq->pComment) < 0) return -1;
}
-
if (tEncodeI8(&encoder, pReq->orReplace) < 0) return -1;
tEndEncode(&encoder);
@@ -1876,7 +1878,6 @@ int32_t tDeserializeSCreateFuncReq(void *buf, int32_t bufLen, SCreateFuncReq *pR
if (tDecodeCStrTo(&decoder, pReq->pComment) < 0) return -1;
}
-
if (!tDecodeIsEnd(&decoder)) {
if (tDecodeI8(&decoder, &pReq->orReplace) < 0) return -1;
} else {
@@ -2053,12 +2054,12 @@ int32_t tDeserializeSRetrieveFuncRsp(void *buf, int32_t bufLen, SRetrieveFuncRsp
if (pRsp->pFuncExtraInfos == NULL) return -1;
if (tDecodeIsEnd(&decoder)) {
for (int32_t i = 0; i < pRsp->numOfFuncs; ++i) {
- SFuncExtraInfo extraInfo = { 0 };
+ SFuncExtraInfo extraInfo = {0};
taosArrayPush(pRsp->pFuncExtraInfos, &extraInfo);
}
} else {
for (int32_t i = 0; i < pRsp->numOfFuncs; ++i) {
- SFuncExtraInfo extraInfo = { 0 };
+ SFuncExtraInfo extraInfo = {0};
if (tDecodeI32(&decoder, &extraInfo.funcVersion) < 0) return -1;
if (tDecodeI64(&decoder, &extraInfo.funcCreatedTime) < 0) return -1;
taosArrayPush(pRsp->pFuncExtraInfos, &extraInfo);
@@ -7436,7 +7437,7 @@ void tDestroySSubmitTbData(SSubmitTbData *pTbData, int32_t flag) {
}
}
-void tDestroySSubmitReq2(SSubmitReq2 *pReq, int32_t flag) {
+void tDestroySSubmitReq(SSubmitReq2 *pReq, int32_t flag) {
if (pReq->aSubmitTbData == NULL) return;
int32_t nSubmitTbData = TARRAY_SIZE(pReq->aSubmitTbData);
diff --git a/source/common/src/tname.c b/source/common/src/tname.c
index e5ed7a3728..c6210ca8c9 100644
--- a/source/common/src/tname.c
+++ b/source/common/src/tname.c
@@ -122,10 +122,8 @@ int32_t tNameLen(const SName* name) {
int32_t len2 = (int32_t)strlen(name->tname);
if (name->type == TSDB_DB_NAME_T) {
- ASSERT(len2 == 0);
return len + len1 + TSDB_NAME_DELIMITER_LEN;
} else {
- ASSERT(len2 > 0);
return len + len1 + len2 + TSDB_NAME_DELIMITER_LEN * 2;
}
}
diff --git a/source/common/src/ttszip.c b/source/common/src/ttszip.c
index 0faa6eb4c8..f415bd20cd 100644
--- a/source/common/src/ttszip.c
+++ b/source/common/src/ttszip.c
@@ -982,7 +982,6 @@ void tsBufSetCursor(STSBuf* pTSBuf, STSCursor* pCur) {
return;
}
- // assert(pCur->vgroupIndex != -1 && pCur->tsIndex >= 0 && pCur->blockIndex >= 0);
if (pCur->vgroupIndex != -1) {
tsBufGetBlock(pTSBuf, pCur->vgroupIndex, pCur->blockIndex);
}
diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c
index da08bd01ac..a318b9886e 100644
--- a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c
+++ b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c
@@ -113,8 +113,16 @@ static void vmProcessFetchQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO
int32_t code = vnodeProcessFetchMsg(pVnode->pImpl, pMsg, pInfo);
if (code != 0) {
- if (terrno != 0) code = terrno;
- dGError("vnodeProcessFetchMsg vgId:%d, msg:%p failed to fetch since %s", pVnode->vgId, pMsg, terrstr());
+ if (terrno != 0) {
+ code = terrno;
+ }
+
+ if (code == TSDB_CODE_WAL_LOG_NOT_EXIST) {
+ dGDebug("vnodeProcessFetchMsg vgId:%d, msg:%p failed to fetch since %s", pVnode->vgId, pMsg, terrstr());
+ } else {
+ dGError("vnodeProcessFetchMsg vgId:%d, msg:%p failed to fetch since %s", pVnode->vgId, pMsg, terrstr());
+ }
+
vmSendRsp(pMsg, code);
}
diff --git a/source/dnode/mgmt/node_mgmt/src/dmNodes.c b/source/dnode/mgmt/node_mgmt/src/dmNodes.c
index 16931ab6df..19d5e06c5b 100644
--- a/source/dnode/mgmt/node_mgmt/src/dmNodes.c
+++ b/source/dnode/mgmt/node_mgmt/src/dmNodes.c
@@ -132,11 +132,15 @@ int32_t dmRunDnode(SDnode *pDnode) {
int32_t count = 0;
if (dmOpenNodes(pDnode) != 0) {
dError("failed to open nodes since %s", terrstr());
+ dmCloseNodes(pDnode);
return -1;
}
if (dmStartNodes(pDnode) != 0) {
dError("failed to start nodes since %s", terrstr());
+ dmSetStatus(pDnode, DND_STAT_STOPPED);
+ dmStopNodes(pDnode);
+ dmCloseNodes(pDnode);
return -1;
}
diff --git a/source/dnode/mnode/impl/src/mndMain.c b/source/dnode/mnode/impl/src/mndMain.c
index 5c20887cf5..92ff550895 100644
--- a/source/dnode/mnode/impl/src/mndMain.c
+++ b/source/dnode/mnode/impl/src/mndMain.c
@@ -124,11 +124,7 @@ static void mndCalMqRebalance(SMnode *pMnode) {
int32_t contLen = 0;
void *pReq = mndBuildTimerMsg(&contLen);
if (pReq != NULL) {
- SRpcMsg rpcMsg = {
- .msgType = TDMT_MND_TMQ_TIMER,
- .pCont = pReq,
- .contLen = contLen,
- };
+ SRpcMsg rpcMsg = { .msgType = TDMT_MND_TMQ_TIMER, .pCont = pReq, .contLen = contLen };
tmsgPutToQueue(&pMnode->msgCb, READ_QUEUE, &rpcMsg);
}
}
diff --git a/source/dnode/mnode/impl/src/mndUser.c b/source/dnode/mnode/impl/src/mndUser.c
index d08227927a..523753d7c6 100644
--- a/source/dnode/mnode/impl/src/mndUser.c
+++ b/source/dnode/mnode/impl/src/mndUser.c
@@ -390,6 +390,7 @@ static SSdbRow *mndUserActionDecode(SSdbRaw *pRaw) {
SDB_GET_INT32(pRaw, dataPos, &ref, _OVER);
taosHashPut(pUser->useDbs, key, keyLen, &ref, sizeof(ref));
+ taosMemoryFree(key);
}
}
@@ -956,6 +957,7 @@ _OVER:
mError("user:%s, failed to alter since %s", alterReq.user, terrstr());
}
+ tFreeSAlterUserReq(&alterReq);
mndReleaseUser(pMnode, pOperUser);
mndReleaseUser(pMnode, pUser);
mndUserFreeObj(&newUser);
diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h
index fb2c2f4be3..002dcda488 100644
--- a/source/dnode/vnode/inc/vnode.h
+++ b/source/dnode/vnode/inc/vnode.h
@@ -162,7 +162,6 @@ int32_t metaTbCursorPrev(SMTbCursor *pTbCur, ETableType jumpTableType);
#endif
// tsdb
-// typedef struct STsdb STsdb;
typedef struct STsdbReader STsdbReader;
#define TSDB_DEFAULT_STT_FILE 8
@@ -176,11 +175,8 @@ typedef struct STsdbReader STsdbReader;
#define CACHESCAN_RETRIEVE_LAST_ROW 0x4
#define CACHESCAN_RETRIEVE_LAST 0x8
-int32_t tsdbSetTableList(STsdbReader *pReader, const void *pTableList, int32_t num);
-int32_t tsdbReaderOpen(SVnode *pVnode, SQueryTableDataCond *pCond, void *pTableList, int32_t numOfTables,
- SSDataBlock *pResBlock, STsdbReader **ppReader, const char *idstr, bool countOnly);
-
-void tsdbReaderSetId(STsdbReader* pReader, const char* idstr);
+int32_t tsdbReaderOpen(SVnode *pVnode, SQueryTableDataCond *pCond, void *pTableList, int32_t numOfTables,
+ SSDataBlock *pResBlock, STsdbReader **ppReader, const char *idstr, bool countOnly);
void tsdbReaderClose(STsdbReader *pReader);
int32_t tsdbNextDataBlock(STsdbReader *pReader, bool *hasNext);
int32_t tsdbRetrieveDatablockSMA(STsdbReader *pReader, SSDataBlock *pDataBlock, bool *allHave);
@@ -191,7 +187,10 @@ int32_t tsdbGetFileBlocksDistInfo(STsdbReader *pReader, STableBlockDistInfo
int64_t tsdbGetNumOfRowsInMemTable(STsdbReader *pHandle);
void *tsdbGetIdx(SMeta *pMeta);
void *tsdbGetIvtIdx(SMeta *pMeta);
-uint64_t getReaderMaxVersion(STsdbReader *pReader);
+uint64_t tsdbGetReaderMaxVersion(STsdbReader *pReader);
+int32_t tsdbSetTableList(STsdbReader *pReader, const void *pTableList, int32_t num);
+void tsdbReaderSetId(STsdbReader *pReader, const char *idstr);
+void tsdbReaderSetCloseFlag(STsdbReader *pReader);
int32_t tsdbCacherowsReaderOpen(void *pVnode, int32_t type, void *pTableIdList, int32_t numOfTables, int32_t numOfCols,
uint64_t suid, void **pReader, const char *idstr);
@@ -232,26 +231,21 @@ typedef struct SSnapContext {
} SSnapContext;
typedef struct STqReader {
- SPackedData msg2;
-
- SSubmitReq2 submit;
- int32_t nextBlk;
-
- int64_t lastBlkUid;
-
- SWalReader *pWalReader;
-
- SMeta *pVnodeMeta;
- SHashObj *tbIdHash;
- SArray *pColIdList; // SArray
-
+ SPackedData msg2;
+ SSubmitReq2 submit;
+ int32_t nextBlk;
+ int64_t lastBlkUid;
+ SWalReader *pWalReader;
+ SMeta *pVnodeMeta;
+ SHashObj *tbIdHash;
+ SArray *pColIdList; // SArray
int32_t cachedSchemaVer;
int64_t cachedSchemaSuid;
SSchemaWrapper *pSchemaWrapper;
STSchema *pSchema;
} STqReader;
-STqReader *tqOpenReader(SVnode *pVnode);
+STqReader *tqReaderOpen(SVnode *pVnode);
void tqCloseReader(STqReader *);
void tqReaderSetColIdList(STqReader *pReader, SArray *pColIdList);
@@ -260,17 +254,14 @@ int32_t tqReaderAddTbUidList(STqReader *pReader, const SArray *pTableUidList);
int32_t tqReaderRemoveTbUidList(STqReader *pReader, const SArray *tbUidList);
int32_t tqSeekVer(STqReader *pReader, int64_t ver, const char *id);
-void tqNextBlock(STqReader *pReader, SFetchRet *ret);
+int32_t tqNextBlock(STqReader *pReader, SSDataBlock* pBlock);
int32_t extractSubmitMsgFromWal(SWalReader *pReader, SPackedData *pPackedData);
int32_t tqReaderSetSubmitMsg(STqReader *pReader, void *msgStr, int32_t msgLen, int64_t ver);
-// int32_t tqReaderSetDataMsg(STqReader *pReader, const SSubmitReq *pMsg, int64_t ver);
-bool tqNextDataBlock(STqReader *pReader);
-bool tqNextDataBlockFilterOut2(STqReader *pReader, SHashObj *filterOutUids);
-int32_t tqRetrieveDataBlock2(SSDataBlock *pBlock, STqReader *pReader, SSubmitTbData **pSubmitTbDataRet);
-int32_t tqRetrieveTaosxBlock2(STqReader *pReader, SArray *blocks, SArray *schemas, SSubmitTbData **pSubmitTbDataRet);
-// int32_t tqRetrieveDataBlock(SSDataBlock *pBlock, STqReader *pReader);
-// int32_t tqRetrieveTaosxBlock(STqReader *pReader, SArray *blocks, SArray *schemas);
+bool tqNextBlockImpl(STqReader *pReader);
+bool tqNextDataBlockFilterOut(STqReader *pReader, SHashObj *filterOutUids);
+int32_t tqRetrieveDataBlock(SSDataBlock *pBlock, STqReader *pReader, SSubmitTbData **pSubmitTbDataRet);
+int32_t tqRetrieveTaosxBlock(STqReader *pReader, SArray *blocks, SArray *schemas, SSubmitTbData **pSubmitTbDataRet);
int32_t vnodeEnqueueStreamMsg(SVnode *pVnode, SRpcMsg *pMsg);
diff --git a/source/dnode/vnode/src/inc/tsdb.h b/source/dnode/vnode/src/inc/tsdb.h
index 2a85b191a4..6102487400 100644
--- a/source/dnode/vnode/src/inc/tsdb.h
+++ b/source/dnode/vnode/src/inc/tsdb.h
@@ -16,6 +16,7 @@
#ifndef _TD_VNODE_TSDB_H_
#define _TD_VNODE_TSDB_H_
+#include "tsimplehash.h"
#include "vnodeInt.h"
#ifdef __cplusplus
@@ -122,14 +123,14 @@ int32_t tsdbRowCmprFn(const void *p1, const void *p2);
int32_t tsdbRowIterOpen(STSDBRowIter *pIter, TSDBROW *pRow, STSchema *pTSchema);
void tsdbRowClose(STSDBRowIter *pIter);
SColVal *tsdbRowIterNext(STSDBRowIter *pIter);
-// SRowMerger
-int32_t tsdbRowMergerInit(SRowMerger *pMerger, STSchema *pResTSchema, TSDBROW *pRow, STSchema *pTSchema);
-int32_t tsdbRowMergerAdd(SRowMerger *pMerger, TSDBROW *pRow, STSchema *pTSchema);
-// int32_t tsdbRowMergerInit(SRowMerger *pMerger, TSDBROW *pRow, STSchema *pTSchema);
-void tsdbRowMergerClear(SRowMerger *pMerger);
-// int32_t tsdbRowMerge(SRowMerger *pMerger, TSDBROW *pRow);
+// SRowMerger
+int32_t tsdbRowMergerInit(SRowMerger *pMerger, STSchema *pSchema);
+int32_t tsdbRowMergerAdd(SRowMerger *pMerger, TSDBROW *pRow, STSchema *pTSchema);
int32_t tsdbRowMergerGetRow(SRowMerger *pMerger, SRow **ppRow);
+void tsdbRowMergerClear(SRowMerger *pMerger);
+void tsdbRowMergerCleanup(SRowMerger *pMerger);
+
// TABLEID
int32_t tTABLEIDCmprFn(const void *p1, const void *p2);
// TSDBKEY
@@ -224,7 +225,7 @@ int32_t tsdbTbDataIterCreate(STbData *pTbData, TSDBKEY *pFrom, int8_t backward,
void *tsdbTbDataIterDestroy(STbDataIter *pIter);
void tsdbTbDataIterOpen(STbData *pTbData, TSDBKEY *pFrom, int8_t backward, STbDataIter *pIter);
bool tsdbTbDataIterNext(STbDataIter *pIter);
-void tsdbMemTableCountRows(SMemTable *pMemTable, SHashObj *pTableMap, int64_t *rowsNum);
+void tsdbMemTableCountRows(SMemTable *pMemTable, SSHashObj *pTableMap, int64_t *rowsNum);
// STbData
int32_t tsdbGetNRowsInTbData(STbData *pTbData);
@@ -322,8 +323,9 @@ int32_t tGnrtDiskData(SDiskDataBuilder *pBuilder, const SDiskData **ppDiskData,
#define TSDB_STT_FILE_DATA_ITER 2
#define TSDB_TOMB_FILE_DATA_ITER 3
-#define TSDB_FILTER_FLAG_BY_VERSION 0x1
-#define TSDB_FILTER_FLAG_BY_TABLEID 0x2
+#define TSDB_FILTER_FLAG_BY_VERSION 0x1
+#define TSDB_FILTER_FLAG_BY_TABLEID 0x2
+#define TSDB_FILTER_FLAG_IGNORE_DROPPED_TABLE 0x4
#define TSDB_RBTN_TO_DATA_ITER(pNode) ((STsdbDataIter2 *)(((char *)pNode) - offsetof(STsdbDataIter2, rbtn)))
/* open */
@@ -705,7 +707,6 @@ typedef struct SSttBlockLoadInfo {
typedef struct SMergeTree {
int8_t backward;
SRBTree rbt;
- SArray *pIterList;
SLDataIter *pIter;
bool destroyLoadInfo;
SSttBlockLoadInfo *pLoadInfo;
@@ -751,13 +752,29 @@ struct SDiskDataBuilder {
SBlkInfo bi;
};
+typedef struct SLDataIter {
+ SRBTreeNode node;
+ SSttBlk *pSttBlk;
+ SDataFReader *pReader;
+ int32_t iStt;
+ int8_t backward;
+ int32_t iSttBlk;
+ int32_t iRow;
+ SRowInfo rInfo;
+ uint64_t uid;
+ STimeWindow timeWindow;
+ SVersionRange verRange;
+ SSttBlockLoadInfo *pBlockLoadInfo;
+ bool ignoreEarlierTs;
+} SLDataIter;
+
+#define tMergeTreeGetRow(_t) (&((_t)->pIter->rInfo.row))
int32_t tMergeTreeOpen(SMergeTree *pMTree, int8_t backward, SDataFReader *pFReader, uint64_t suid, uint64_t uid,
STimeWindow *pTimeWindow, SVersionRange *pVerRange, SSttBlockLoadInfo *pBlockLoadInfo,
- bool destroyLoadInfo, const char *idStr, bool strictTimeRange);
+ bool destroyLoadInfo, const char *idStr, bool strictTimeRange, SLDataIter* pLDataIter);
void tMergeTreeAddIter(SMergeTree *pMTree, SLDataIter *pIter);
bool tMergeTreeNext(SMergeTree *pMTree);
bool tMergeTreeIgnoreEarlierTs(SMergeTree *pMTree);
-TSDBROW tMergeTreeGetRow(SMergeTree *pMTree);
void tMergeTreeClose(SMergeTree *pMTree);
SSttBlockLoadInfo *tCreateLastBlockLoadInfo(STSchema *pSchema, int16_t *colList, int32_t numOfCols, int32_t numOfStt);
@@ -782,6 +799,7 @@ typedef struct SCacheRowsReader {
STableKeyInfo *pTableList; // table id list
int32_t numOfTables;
SSttBlockLoadInfo *pLoadInfo;
+ SLDataIter *pDataIter;
STsdbReadSnap *pReadSnap;
SDataFReader *pDataFReader;
SDataFReader *pDataFReaderLast;
diff --git a/source/dnode/vnode/src/meta/metaSnapshot.c b/source/dnode/vnode/src/meta/metaSnapshot.c
index 0126d29cc9..56e802d4fb 100644
--- a/source/dnode/vnode/src/meta/metaSnapshot.c
+++ b/source/dnode/vnode/src/meta/metaSnapshot.c
@@ -187,23 +187,24 @@ _err:
int32_t metaSnapWrite(SMetaSnapWriter* pWriter, uint8_t* pData, uint32_t nData) {
int32_t code = 0;
+ int32_t line = 0;
SMeta* pMeta = pWriter->pMeta;
SMetaEntry metaEntry = {0};
SDecoder* pDecoder = &(SDecoder){0};
tDecoderInit(pDecoder, pData + sizeof(SSnapDataHdr), nData - sizeof(SSnapDataHdr));
code = metaDecodeEntry(pDecoder, &metaEntry);
- if (code) goto _err;
+ VND_CHECK_CODE(code, line, _err);
code = metaHandleEntry(pMeta, &metaEntry);
- if (code) goto _err;
+ VND_CHECK_CODE(code, line, _err);
tDecoderClear(pDecoder);
return code;
_err:
tDecoderClear(pDecoder);
- metaError("vgId:%d, vnode snapshot meta write failed since %s", TD_VID(pMeta->pVnode), tstrerror(code));
+ metaError("vgId:%d, vnode snapshot meta write failed since %s at line:%d", TD_VID(pMeta->pVnode), terrstr(), line);
return code;
}
diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c
index 3325f4055c..96eec89127 100644
--- a/source/dnode/vnode/src/meta/metaTable.c
+++ b/source/dnode/vnode/src/meta/metaTable.c
@@ -936,8 +936,7 @@ static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) {
int tLen = 0;
if (tdbTbGet(pMeta->pUidIdx, &e.ctbEntry.suid, sizeof(tb_uid_t), &tData, &tLen) == 0) {
- version = ((SUidIdxVal *)tData)[0].version;
- STbDbKey tbDbKey = {.uid = e.ctbEntry.suid, .version = version};
+ STbDbKey tbDbKey = {.uid = e.ctbEntry.suid, .version = ((SUidIdxVal *)tData)[0].version};
if (tdbTbGet(pMeta->pTbDb, &tbDbKey, sizeof(tbDbKey), &tData, &tLen) == 0) {
SDecoder tdc = {0};
SMetaEntry stbEntry = {0};
@@ -1029,7 +1028,7 @@ int metaUpdateCtimeIdx(SMeta *pMeta, const SMetaEntry *pME) {
metaTrace("vgId:%d, start to save version:%" PRId64 " uid:%" PRId64 " ctime:%" PRId64, TD_VID(pMeta->pVnode),
pME->version, pME->uid, ctimeKey.ctime);
- return tdbTbInsert(pMeta->pCtimeIdx, &ctimeKey, sizeof(ctimeKey), NULL, 0, pMeta->txn);
+ return tdbTbUpsert(pMeta->pCtimeIdx, &ctimeKey, sizeof(ctimeKey), NULL, 0, pMeta->txn);
}
int metaDeleteCtimeIdx(SMeta *pMeta, const SMetaEntry *pME) {
@@ -1044,7 +1043,7 @@ int metaUpdateNcolIdx(SMeta *pMeta, const SMetaEntry *pME) {
if (metaBuildNColIdxKey(&ncolKey, pME) < 0) {
return 0;
}
- return tdbTbInsert(pMeta->pNcolIdx, &ncolKey, sizeof(ncolKey), NULL, 0, pMeta->txn);
+ return tdbTbUpsert(pMeta->pNcolIdx, &ncolKey, sizeof(ncolKey), NULL, 0, pMeta->txn);
}
int metaDeleteNcolIdx(SMeta *pMeta, const SMetaEntry *pME) {
@@ -1878,24 +1877,24 @@ static int metaUpdateUidIdx(SMeta *pMeta, const SMetaEntry *pME) {
}
static int metaUpdateSuidIdx(SMeta *pMeta, const SMetaEntry *pME) {
- return tdbTbInsert(pMeta->pSuidIdx, &pME->uid, sizeof(tb_uid_t), NULL, 0, pMeta->txn);
+ return tdbTbUpsert(pMeta->pSuidIdx, &pME->uid, sizeof(tb_uid_t), NULL, 0, pMeta->txn);
}
static int metaUpdateNameIdx(SMeta *pMeta, const SMetaEntry *pME) {
- return tdbTbInsert(pMeta->pNameIdx, pME->name, strlen(pME->name) + 1, &pME->uid, sizeof(tb_uid_t), pMeta->txn);
+ return tdbTbUpsert(pMeta->pNameIdx, pME->name, strlen(pME->name) + 1, &pME->uid, sizeof(tb_uid_t), pMeta->txn);
}
static int metaUpdateTtlIdx(SMeta *pMeta, const SMetaEntry *pME) {
STtlIdxKey ttlKey = {0};
metaBuildTtlIdxKey(&ttlKey, pME);
if (ttlKey.dtime == 0) return 0;
- return tdbTbInsert(pMeta->pTtlIdx, &ttlKey, sizeof(ttlKey), NULL, 0, pMeta->txn);
+ return tdbTbUpsert(pMeta->pTtlIdx, &ttlKey, sizeof(ttlKey), NULL, 0, pMeta->txn);
}
static int metaUpdateCtbIdx(SMeta *pMeta, const SMetaEntry *pME) {
SCtbIdxKey ctbIdxKey = {.suid = pME->ctbEntry.suid, .uid = pME->uid};
- return tdbTbInsert(pMeta->pCtbIdx, &ctbIdxKey, sizeof(ctbIdxKey), pME->ctbEntry.pTags,
+ return tdbTbUpsert(pMeta->pCtbIdx, &ctbIdxKey, sizeof(ctbIdxKey), pME->ctbEntry.pTags,
((STag *)(pME->ctbEntry.pTags))->len, pMeta->txn);
}
@@ -2065,49 +2064,66 @@ _exit:
}
int metaHandleEntry(SMeta *pMeta, const SMetaEntry *pME) {
+ int32_t code = 0;
+ int32_t line = 0;
metaWLock(pMeta);
// save to table.db
- if (metaSaveToTbDb(pMeta, pME) < 0) goto _err;
+ code = metaSaveToTbDb(pMeta, pME);
+ VND_CHECK_CODE(code, line, _err);
// update uid.idx
- if (metaUpdateUidIdx(pMeta, pME) < 0) goto _err;
+ code = metaUpdateUidIdx(pMeta, pME);
+ VND_CHECK_CODE(code, line, _err);
// update name.idx
- if (metaUpdateNameIdx(pMeta, pME) < 0) goto _err;
+ code = metaUpdateNameIdx(pMeta, pME);
+ VND_CHECK_CODE(code, line, _err);
if (pME->type == TSDB_CHILD_TABLE) {
// update ctb.idx
- if (metaUpdateCtbIdx(pMeta, pME) < 0) goto _err;
+ code = metaUpdateCtbIdx(pMeta, pME);
+ VND_CHECK_CODE(code, line, _err);
// update tag.idx
- if (metaUpdateTagIdx(pMeta, pME) < 0) goto _err;
+ code = metaUpdateTagIdx(pMeta, pME);
+ VND_CHECK_CODE(code, line, _err);
} else {
// update schema.db
- if (metaSaveToSkmDb(pMeta, pME) < 0) goto _err;
+ code = metaSaveToSkmDb(pMeta, pME);
+ VND_CHECK_CODE(code, line, _err);
if (pME->type == TSDB_SUPER_TABLE) {
- if (metaUpdateSuidIdx(pMeta, pME) < 0) goto _err;
+ code = metaUpdateSuidIdx(pMeta, pME);
+ VND_CHECK_CODE(code, line, _err);
}
}
- if (metaUpdateCtimeIdx(pMeta, pME) < 0) goto _err;
+ code = metaUpdateCtimeIdx(pMeta, pME);
+ VND_CHECK_CODE(code, line, _err);
if (pME->type == TSDB_NORMAL_TABLE) {
- if (metaUpdateNcolIdx(pMeta, pME) < 0) goto _err;
+ code = metaUpdateNcolIdx(pMeta, pME);
+ VND_CHECK_CODE(code, line, _err);
}
if (pME->type != TSDB_SUPER_TABLE) {
- if (metaUpdateTtlIdx(pMeta, pME) < 0) goto _err;
+ code = metaUpdateTtlIdx(pMeta, pME);
+ VND_CHECK_CODE(code, line, _err);
}
metaULock(pMeta);
+ metaDebug("vgId:%d, handle meta entry, ver:%" PRId64 ", uid:%" PRId64 ", name:%s", TD_VID(pMeta->pVnode),
+ pME->version, pME->uid, pME->name);
return 0;
_err:
metaULock(pMeta);
+ metaError("vgId:%d, failed to handle meta entry since %s at line:%d, ver:%" PRId64 ", uid:%" PRId64 ", name:%s",
+ TD_VID(pMeta->pVnode), terrstr(), line, pME->version, pME->uid, pME->name);
return -1;
}
+
// refactor later
void *metaGetIdx(SMeta *pMeta) { return pMeta->pTagIdx; }
void *metaGetIvtIdx(SMeta *pMeta) { return pMeta->pTagIvtIdx; }
diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c
index 7f5f224d15..0beb17a56e 100644
--- a/source/dnode/vnode/src/sma/smaRollup.c
+++ b/source/dnode/vnode/src/sma/smaRollup.c
@@ -696,7 +696,7 @@ static int32_t tdRSmaExecAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSma
}
if (pReq && tdProcessSubmitReq(sinkTsdb, output->info.version, pReq) < 0) {
- tDestroySSubmitReq2(pReq, TSDB_MSG_FLG_ENCODE);
+ tDestroySSubmitReq(pReq, TSDB_MSG_FLG_ENCODE);
taosMemoryFree(pReq);
smaError("vgId:%d, process submit req for rsma suid:%" PRIu64 ", uid:%" PRIu64 " level %" PRIi8
" failed since %s",
@@ -708,7 +708,7 @@ static int32_t tdRSmaExecAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSma
SMA_VID(pSma), suid, output->info.id.groupId, pItem->level, output->info.version);
if (pReq) {
- tDestroySSubmitReq2(pReq, TSDB_MSG_FLG_ENCODE);
+ tDestroySSubmitReq(pReq, TSDB_MSG_FLG_ENCODE);
taosMemoryFree(pReq);
}
}
diff --git a/source/dnode/vnode/src/sma/smaTimeRange.c b/source/dnode/vnode/src/sma/smaTimeRange.c
index 4fa1edce57..e3ea4d0548 100644
--- a/source/dnode/vnode/src/sma/smaTimeRange.c
+++ b/source/dnode/vnode/src/sma/smaTimeRange.c
@@ -328,7 +328,7 @@ _exit:
taosArrayDestroy(tagArray);
taosArrayDestroy(pVals);
if (pReq) {
- tDestroySSubmitReq2(pReq, TSDB_MSG_FLG_ENCODE);
+ tDestroySSubmitReq(pReq, TSDB_MSG_FLG_ENCODE);
taosMemoryFree(pReq);
}
diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c
index ae52db163f..36c35ab415 100644
--- a/source/dnode/vnode/src/tq/tq.c
+++ b/source/dnode/vnode/src/tq/tq.c
@@ -504,7 +504,7 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
pHandle->execHandle.pTqReader = qExtractReaderFromStreamScanner(scanner);
} else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__DB) {
pHandle->pWalReader = walOpenReader(pVnode->pWal, NULL);
- pHandle->execHandle.pTqReader = tqOpenReader(pVnode);
+ pHandle->execHandle.pTqReader = tqReaderOpen(pVnode);
pHandle->execHandle.execDb.pFilterOutTbUid =
taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
@@ -523,7 +523,7 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
int64_t tbUid = *(int64_t*)taosArrayGet(tbUidList, i);
tqDebug("vgId:%d, idx %d, uid:%" PRId64, vgId, i, tbUid);
}
- pHandle->execHandle.pTqReader = tqOpenReader(pVnode);
+ pHandle->execHandle.pTqReader = tqReaderOpen(pVnode);
tqReaderSetTbUidList(pHandle->execHandle.pTqReader, tbUidList);
taosArrayDestroy(tbUidList);
@@ -821,13 +821,18 @@ int32_t tqProcessTaskRecover1Req(STQ* pTq, SRpcMsg* pMsg) {
}
// do recovery step 1
- streamSourceRecoverScanStep1(pTask);
+ tqDebug("s-task:%s start recover step 1 scan", pTask->id.idStr);
+ int64_t st = taosGetTimestampMs();
+ streamSourceRecoverScanStep1(pTask);
if (atomic_load_8(&pTask->status.taskStatus) == TASK_STATUS__DROPPING) {
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
return 0;
}
+ double el = (taosGetTimestampMs() - st) / 1000.0;
+ tqDebug("s-task:%s recover step 1 ended, elapsed time:%.2fs", pTask->id.idStr, el);
+
// build msg to launch next step
SStreamRecoverStep2Req req;
code = streamBuildSourceRecover2Req(pTask, &req);
@@ -853,20 +858,17 @@ int32_t tqProcessTaskRecover1Req(STQ* pTq, SRpcMsg* pMsg) {
memcpy(serializedReq, &req, len);
// dispatch msg
+ tqDebug("s-task:%s start recover block stage", pTask->id.idStr);
+
SRpcMsg rpcMsg = {
- .code = 0,
- .contLen = len,
- .msgType = TDMT_VND_STREAM_RECOVER_BLOCKING_STAGE,
- .pCont = serializedReq,
- };
-
+ .code = 0, .contLen = len, .msgType = TDMT_VND_STREAM_RECOVER_BLOCKING_STAGE, .pCont = serializedReq};
tmsgPutToQueue(&pTq->pVnode->msgCb, WRITE_QUEUE, &rpcMsg);
-
return 0;
}
int32_t tqProcessTaskRecover2Req(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen) {
- int32_t code;
+ int32_t code = 0;
+
SStreamRecoverStep2Req* pReq = (SStreamRecoverStep2Req*)msg;
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, pReq->taskId);
if (pTask == NULL) {
diff --git a/source/dnode/vnode/src/tq/tqMeta.c b/source/dnode/vnode/src/tq/tqMeta.c
index cd8cefb307..f3ecaa08f6 100644
--- a/source/dnode/vnode/src/tq/tqMeta.c
+++ b/source/dnode/vnode/src/tq/tqMeta.c
@@ -328,7 +328,7 @@ int32_t tqMetaRestoreHandle(STQ* pTq) {
}
} else if (handle.execHandle.subType == TOPIC_SUB_TYPE__DB) {
handle.pWalReader = walOpenReader(pTq->pVnode->pWal, NULL);
- handle.execHandle.pTqReader = tqOpenReader(pTq->pVnode);
+ handle.execHandle.pTqReader = tqReaderOpen(pTq->pVnode);
buildSnapContext(reader.meta, reader.version, 0, handle.execHandle.subType, handle.fetchMeta,
(SSnapContext**)(&reader.sContext));
@@ -343,7 +343,7 @@ int32_t tqMetaRestoreHandle(STQ* pTq) {
int64_t tbUid = *(int64_t*)taosArrayGet(tbUidList, i);
tqDebug("vgId:%d, idx %d, uid:%" PRId64, vgId, i, tbUid);
}
- handle.execHandle.pTqReader = tqOpenReader(pTq->pVnode);
+ handle.execHandle.pTqReader = tqReaderOpen(pTq->pVnode);
tqReaderSetTbUidList(handle.execHandle.pTqReader, tbUidList);
taosArrayDestroy(tbUidList);
diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c
index 2cda12c0e1..ead00dcc35 100644
--- a/source/dnode/vnode/src/tq/tqRead.c
+++ b/source/dnode/vnode/src/tq/tqRead.c
@@ -249,7 +249,7 @@ END:
return code;
}
-STqReader* tqOpenReader(SVnode* pVnode) {
+STqReader* tqReaderOpen(SVnode* pVnode) {
STqReader* pReader = taosMemoryCalloc(1, sizeof(STqReader));
if (pReader == NULL) {
return NULL;
@@ -288,7 +288,7 @@ void tqCloseReader(STqReader* pReader) {
}
// free hash
taosHashCleanup(pReader->tbIdHash);
- tDestroySSubmitReq2(&pReader->submit, TSDB_MSG_FLG_DECODE);
+ tDestroySSubmitReq(&pReader->submit, TSDB_MSG_FLG_DECODE);
taosMemoryFree(pReader);
}
@@ -322,12 +322,11 @@ int32_t extractSubmitMsgFromWal(SWalReader* pReader, SPackedData* pPackedData) {
return 0;
}
-void tqNextBlock(STqReader* pReader, SFetchRet* ret) {
+int32_t tqNextBlock(STqReader* pReader, SSDataBlock* pBlock) {
while (1) {
if (pReader->msg2.msgStr == NULL) {
if (walNextValidMsg(pReader->pWalReader) < 0) {
- ret->fetchType = FETCH_TYPE__NONE;
- return;
+ return FETCH_TYPE__NONE;
}
void* pBody = POINTER_SHIFT(pReader->pWalReader->pHead->head.body, sizeof(SSubmitReq2Msg));
@@ -337,15 +336,14 @@ void tqNextBlock(STqReader* pReader, SFetchRet* ret) {
tqReaderSetSubmitMsg(pReader, pBody, bodyLen, ver);
}
- while (tqNextDataBlock(pReader)) {
- memset(&ret->data, 0, sizeof(SSDataBlock));
- int32_t code = tqRetrieveDataBlock2(&ret->data, pReader, NULL);
- if (code != 0 || ret->data.info.rows == 0) {
+ while (tqNextBlockImpl(pReader)) {
+ memset(pBlock, 0, sizeof(SSDataBlock));
+ int32_t code = tqRetrieveDataBlock(pBlock, pReader, NULL);
+ if (code != TSDB_CODE_SUCCESS || pBlock->info.rows == 0) {
continue;
}
- ret->fetchType = FETCH_TYPE__DATA;
- return;
+ return FETCH_TYPE__DATA;
}
}
}
@@ -367,7 +365,7 @@ int32_t tqReaderSetSubmitMsg(STqReader* pReader, void* msgStr, int32_t msgLen, i
return 0;
}
-bool tqNextDataBlock(STqReader* pReader) {
+bool tqNextBlockImpl(STqReader* pReader) {
if (pReader->msg2.msgStr == NULL) {
return false;
}
@@ -387,20 +385,20 @@ bool tqNextDataBlock(STqReader* pReader) {
tqDebug("tq reader block found, ver:%"PRId64", uid:%"PRId64, pReader->msg2.ver, pSubmitTbData->uid);
return true;
} else {
- tqDebug("tq reader discard block, uid:%"PRId64", continue", pSubmitTbData->uid);
+ tqDebug("tq reader discard submit block, uid:%"PRId64", continue", pSubmitTbData->uid);
}
pReader->nextBlk++;
}
- tDestroySSubmitReq2(&pReader->submit, TSDB_MSG_FLG_DECODE);
+ tDestroySSubmitReq(&pReader->submit, TSDB_MSG_FLG_DECODE);
pReader->nextBlk = 0;
pReader->msg2.msgStr = NULL;
return false;
}
-bool tqNextDataBlockFilterOut2(STqReader* pReader, SHashObj* filterOutUids) {
+bool tqNextDataBlockFilterOut(STqReader* pReader, SHashObj* filterOutUids) {
if (pReader->msg2.msgStr == NULL) return false;
int32_t blockSz = taosArrayGetSize(pReader->submit.aSubmitTbData);
@@ -415,7 +413,7 @@ bool tqNextDataBlockFilterOut2(STqReader* pReader, SHashObj* filterOutUids) {
pReader->nextBlk++;
}
- tDestroySSubmitReq2(&pReader->submit, TSDB_MSG_FLG_DECODE);
+ tDestroySSubmitReq(&pReader->submit, TSDB_MSG_FLG_DECODE);
pReader->nextBlk = 0;
pReader->msg2.msgStr = NULL;
@@ -451,7 +449,7 @@ int32_t tqMaskBlock(SSchemaWrapper* pDst, SSDataBlock* pBlock, const SSchemaWrap
return 0;
}
-int32_t tqRetrieveDataBlock2(SSDataBlock* pBlock, STqReader* pReader, SSubmitTbData** pSubmitTbDataRet) {
+int32_t tqRetrieveDataBlock(SSDataBlock* pBlock, STqReader* pReader, SSubmitTbData** pSubmitTbDataRet) {
tqDebug("tq reader retrieve data block %p, index:%d", pReader->msg2.msgStr, pReader->nextBlk);
SSubmitTbData* pSubmitTbData = taosArrayGet(pReader->submit.aSubmitTbData, pReader->nextBlk);
pReader->nextBlk++;
@@ -560,7 +558,7 @@ int32_t tqRetrieveDataBlock2(SSDataBlock* pBlock, STqReader* pReader, SSubmitTbD
int32_t sourceIdx = 0;
while (targetIdx < colActual) {
if(sourceIdx >= numOfCols){
- tqError("tqRetrieveDataBlock2 sourceIdx:%d >= numOfCols:%d", sourceIdx, numOfCols);
+ tqError("tqRetrieveDataBlock sourceIdx:%d >= numOfCols:%d", sourceIdx, numOfCols);
goto FAIL;
}
SColData* pCol = taosArrayGet(pCols, sourceIdx);
@@ -568,7 +566,7 @@ int32_t tqRetrieveDataBlock2(SSDataBlock* pBlock, STqReader* pReader, SSubmitTbD
SColVal colVal;
if(pCol->nVal != numOfRows){
- tqError("tqRetrieveDataBlock2 pCol->nVal:%d != numOfRows:%d", pCol->nVal, numOfRows);
+ tqError("tqRetrieveDataBlock pCol->nVal:%d != numOfRows:%d", pCol->nVal, numOfRows);
goto FAIL;
}
@@ -655,7 +653,7 @@ FAIL:
return -1;
}
-int32_t tqRetrieveTaosxBlock2(STqReader* pReader, SArray* blocks, SArray* schemas, SSubmitTbData** pSubmitTbDataRet) {
+int32_t tqRetrieveTaosxBlock(STqReader* pReader, SArray* blocks, SArray* schemas, SSubmitTbData** pSubmitTbDataRet) {
tqDebug("tq reader retrieve data block %p, %d", pReader->msg2.msgStr, pReader->nextBlk);
SSubmitTbData* pSubmitTbData = taosArrayGet(pReader->submit.aSubmitTbData, pReader->nextBlk);
diff --git a/source/dnode/vnode/src/tq/tqRestore.c b/source/dnode/vnode/src/tq/tqRestore.c
index c164d037e0..22903b95d9 100644
--- a/source/dnode/vnode/src/tq/tqRestore.c
+++ b/source/dnode/vnode/src/tq/tqRestore.c
@@ -107,7 +107,7 @@ int32_t createStreamRunReq(SStreamMeta* pStreamMeta, bool* pScanIdle) {
if (streamTaskShouldStop(&pTask->status) || status == TASK_STATUS__RECOVER_PREPARE ||
status == TASK_STATUS__WAIT_DOWNSTREAM) {
- tqDebug("s-task:%s skip push data, not ready for processing, status %d", pTask->id.idStr, status);
+ tqDebug("s-task:%s not ready for new submit block from wal, status:%d", pTask->id.idStr, status);
streamMetaReleaseTask(pStreamMeta, pTask);
continue;
}
diff --git a/source/dnode/vnode/src/tq/tqScan.c b/source/dnode/vnode/src/tq/tqScan.c
index 27db66f048..8e243a8bd1 100644
--- a/source/dnode/vnode/src/tq/tqScan.c
+++ b/source/dnode/vnode/src/tq/tqScan.c
@@ -74,7 +74,6 @@ int32_t tqScanData(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffs
qTaskInfo_t task = pExec->task;
if (qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType) < 0) {
- tqError("prepare scan failed, return");
return -1;
}
@@ -119,7 +118,6 @@ int32_t tqScanTaosx(STQ* pTq, const STqHandle* pHandle, STaosxRsp* pRsp, SMqMeta
qTaskInfo_t task = pExec->task;
if (qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType) < 0) {
- tqDebug("tqScanTaosx prepare scan failed, return");
return -1;
}
@@ -132,6 +130,7 @@ int32_t tqScanTaosx(STQ* pTq, const STqHandle* pHandle, STaosxRsp* pRsp, SMqMeta
tqError("vgId:%d, task exec error since %s", pTq->pVnode->config.vgId, terrstr());
return -1;
}
+
tqDebug("tmqsnap task execute end, get %p", pDataBlock);
if (pDataBlock != NULL && pDataBlock->info.rows > 0) {
@@ -205,11 +204,11 @@ int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SPackedData submit, STaosxR
if (pExec->subType == TOPIC_SUB_TYPE__TABLE) {
STqReader* pReader = pExec->pTqReader;
tqReaderSetSubmitMsg(pReader, submit.msgStr, submit.msgLen, submit.ver);
- while (tqNextDataBlock(pReader)) {
+ while (tqNextBlockImpl(pReader)) {
taosArrayClear(pBlocks);
taosArrayClear(pSchemas);
SSubmitTbData* pSubmitTbDataRet = NULL;
- if (tqRetrieveTaosxBlock2(pReader, pBlocks, pSchemas, &pSubmitTbDataRet) < 0) {
+ if (tqRetrieveTaosxBlock(pReader, pBlocks, pSchemas, &pSubmitTbDataRet) < 0) {
if (terrno == TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND) continue;
}
if (pRsp->withTbName) {
@@ -264,11 +263,11 @@ int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SPackedData submit, STaosxR
} else if (pExec->subType == TOPIC_SUB_TYPE__DB) {
STqReader* pReader = pExec->pTqReader;
tqReaderSetSubmitMsg(pReader, submit.msgStr, submit.msgLen, submit.ver);
- while (tqNextDataBlockFilterOut2(pReader, pExec->execDb.pFilterOutTbUid)) {
+ while (tqNextDataBlockFilterOut(pReader, pExec->execDb.pFilterOutTbUid)) {
taosArrayClear(pBlocks);
taosArrayClear(pSchemas);
SSubmitTbData* pSubmitTbDataRet = NULL;
- if (tqRetrieveTaosxBlock2(pReader, pBlocks, pSchemas, &pSubmitTbDataRet) < 0) {
+ if (tqRetrieveTaosxBlock(pReader, pBlocks, pSchemas, &pSubmitTbDataRet) < 0) {
if (terrno == TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND) continue;
}
if (pRsp->withTbName) {
diff --git a/source/dnode/vnode/src/tq/tqSink.c b/source/dnode/vnode/src/tq/tqSink.c
index 62b81305b7..c2e6946b04 100644
--- a/source/dnode/vnode/src/tq/tqSink.c
+++ b/source/dnode/vnode/src/tq/tqSink.c
@@ -695,7 +695,7 @@ void tqSinkToTablePipeline2(SStreamTask* pTask, void* vnode, int64_t ver, void*
len += sizeof(SSubmitReq2Msg);
pBuf = rpcMallocCont(len);
if (NULL == pBuf) {
- tDestroySSubmitReq2(&submitReq, TSDB_MSG_FLG_ENCODE);
+ tDestroySSubmitReq(&submitReq, TSDB_MSG_FLG_ENCODE);
goto _end;
}
((SSubmitReq2Msg*)pBuf)->header.vgId = TD_VID(pVnode);
@@ -707,11 +707,11 @@ void tqSinkToTablePipeline2(SStreamTask* pTask, void* vnode, int64_t ver, void*
tqError("failed to encode submit req since %s", terrstr());
tEncoderClear(&encoder);
rpcFreeCont(pBuf);
- tDestroySSubmitReq2(&submitReq, TSDB_MSG_FLG_ENCODE);
+ tDestroySSubmitReq(&submitReq, TSDB_MSG_FLG_ENCODE);
continue;
}
tEncoderClear(&encoder);
- tDestroySSubmitReq2(&submitReq, TSDB_MSG_FLG_ENCODE);
+ tDestroySSubmitReq(&submitReq, TSDB_MSG_FLG_ENCODE);
SRpcMsg msg = {
.msgType = TDMT_VND_SUBMIT,
diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c
index 3c7edd931b..65fc086f8d 100644
--- a/source/dnode/vnode/src/tsdb/tsdbCache.c
+++ b/source/dnode/vnode/src/tsdb/tsdbCache.c
@@ -598,6 +598,7 @@ typedef struct {
SMergeTree mergeTree;
SMergeTree *pMergeTree;
SSttBlockLoadInfo *pLoadInfo;
+ SLDataIter* pDataIter;
int64_t lastTs;
} SFSLastNextRowIter;
@@ -645,7 +646,7 @@ static int32_t getNextRowFromFSLast(void *iter, TSDBROW **ppRow, bool *pIgnoreEa
}
tMergeTreeOpen(&state->mergeTree, 1, *state->pDataFReader, state->suid, state->uid,
&(STimeWindow){.skey = state->lastTs, .ekey = TSKEY_MAX},
- &(SVersionRange){.minVer = 0, .maxVer = UINT64_MAX}, state->pLoadInfo, false, NULL, true);
+ &(SVersionRange){.minVer = 0, .maxVer = UINT64_MAX}, state->pLoadInfo, false, NULL, true, state->pDataIter);
state->pMergeTree = &state->mergeTree;
state->state = SFSLASTNEXTROW_BLOCKROW;
}
@@ -667,7 +668,7 @@ static int32_t getNextRowFromFSLast(void *iter, TSDBROW **ppRow, bool *pIgnoreEa
state->state = SFSLASTNEXTROW_FILESET;
goto _next_fileset;
}
- state->row = tMergeTreeGetRow(&state->mergeTree);
+ state->row = *tMergeTreeGetRow(&state->mergeTree);
*ppRow = &state->row;
if (TSDBROW_TS(&state->row) <= state->lastTs) {
@@ -1211,7 +1212,7 @@ typedef struct {
} CacheNextRowIter;
static int32_t nextRowIterOpen(CacheNextRowIter *pIter, tb_uid_t uid, STsdb *pTsdb, STSchema *pTSchema, tb_uid_t suid,
- SSttBlockLoadInfo *pLoadInfo, STsdbReadSnap *pReadSnap, SDataFReader **pDataFReader,
+ SSttBlockLoadInfo *pLoadInfo, SLDataIter* pLDataIter, STsdbReadSnap *pReadSnap, SDataFReader **pDataFReader,
SDataFReader **pDataFReaderLast, int64_t lastTs) {
int code = 0;
@@ -1274,6 +1275,7 @@ static int32_t nextRowIterOpen(CacheNextRowIter *pIter, tb_uid_t uid, STsdb *pTs
pIter->fsLastState.pLoadInfo = pLoadInfo;
pIter->fsLastState.pDataFReader = pDataFReaderLast;
pIter->fsLastState.lastTs = lastTs;
+ pIter->fsLastState.pDataIter = pLDataIter;
pIter->fsState.state = SFSNEXTROW_FS;
pIter->fsState.pTsdb = pTsdb;
@@ -1465,7 +1467,7 @@ static int32_t mergeLastRow(tb_uid_t uid, STsdb *pTsdb, bool *dup, SArray **ppCo
TSKEY lastRowTs = TSKEY_MAX;
CacheNextRowIter iter = {0};
- nextRowIterOpen(&iter, uid, pTsdb, pTSchema, pr->suid, pr->pLoadInfo, pr->pReadSnap, &pr->pDataFReader,
+ nextRowIterOpen(&iter, uid, pTsdb, pTSchema, pr->suid, pr->pLoadInfo, pr->pDataIter, pr->pReadSnap, &pr->pDataFReader,
&pr->pDataFReaderLast, pr->lastTs);
do {
@@ -1622,7 +1624,7 @@ static int32_t mergeLast(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, SCach
TSKEY lastRowTs = TSKEY_MAX;
CacheNextRowIter iter = {0};
- nextRowIterOpen(&iter, uid, pTsdb, pTSchema, pr->suid, pr->pLoadInfo, pr->pReadSnap, &pr->pDataFReader,
+ nextRowIterOpen(&iter, uid, pTsdb, pTSchema, pr->suid, pr->pLoadInfo, pr->pDataIter, pr->pReadSnap, &pr->pDataFReader,
&pr->pDataFReaderLast, pr->lastTs);
do {
diff --git a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c
index 95981c2f08..64d30c77a3 100644
--- a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c
+++ b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c
@@ -187,13 +187,21 @@ int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, void* pTableIdList,
}
}
- int32_t numOfStt = ((SVnode*)pVnode)->config.sttTrigger;
+ SVnodeCfg* pCfg = &((SVnode*)pVnode)->config;
+
+ int32_t numOfStt = pCfg->sttTrigger;
p->pLoadInfo = tCreateLastBlockLoadInfo(p->pSchema, NULL, 0, numOfStt);
if (p->pLoadInfo == NULL) {
tsdbCacherowsReaderClose(p);
return TSDB_CODE_OUT_OF_MEMORY;
}
+ p->pDataIter = taosMemoryCalloc(pCfg->sttTrigger, sizeof(SLDataIter));
+ if (p->pDataIter == NULL) {
+ tsdbCacherowsReaderClose(p);
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+
p->idstr = taosStrdup(idstr);
taosThreadMutexInit(&p->readerMutex, NULL);
@@ -215,6 +223,7 @@ void* tsdbCacherowsReaderClose(void* pReader) {
taosMemoryFree(p->pSchema);
}
+ taosMemoryFreeClear(p->pDataIter);
taosMemoryFree(p->pCurrSchema);
destroyLastBlockLoadInfo(p->pLoadInfo);
diff --git a/source/dnode/vnode/src/tsdb/tsdbDataIter.c b/source/dnode/vnode/src/tsdb/tsdbDataIter.c
index 3299a2f497..e27aec5b1b 100644
--- a/source/dnode/vnode/src/tsdb/tsdbDataIter.c
+++ b/source/dnode/vnode/src/tsdb/tsdbDataIter.c
@@ -14,6 +14,7 @@
*/
#include "tsdb.h"
+#include "vnodeInt.h"
// STsdbDataIter2
/* open */
@@ -202,13 +203,6 @@ static int32_t tsdbDataFileDataIterNext(STsdbDataIter2* pIter, STsdbFilterInfo*
for (;;) {
while (pIter->dIter.iRow < pIter->dIter.bData.nRow) {
if (pFilterInfo) {
- if (pFilterInfo->flag & TSDB_FILTER_FLAG_BY_TABLEID) {
- if (pFilterInfo->tbid.uid == pIter->dIter.bData.uid) {
- pIter->dIter.iRow = pIter->dIter.bData.nRow;
- continue;
- }
- }
-
if (pFilterInfo->flag & TSDB_FILTER_FLAG_BY_VERSION) {
if (pIter->dIter.bData.aVersion[pIter->dIter.iRow] < pFilterInfo->sver ||
pIter->dIter.bData.aVersion[pIter->dIter.iRow] > pFilterInfo->ever) {
@@ -232,13 +226,6 @@ static int32_t tsdbDataFileDataIterNext(STsdbDataIter2* pIter, STsdbFilterInfo*
// filter
if (pFilterInfo) {
- if (pFilterInfo->flag & TSDB_FILTER_FLAG_BY_TABLEID) {
- if (tTABLEIDCmprFn(&pFilterInfo->tbid, &pIter->rowInfo) == 0) {
- pIter->dIter.iDataBlk = pIter->dIter.mDataBlk.nItem;
- continue;
- }
- }
-
if (pFilterInfo->flag & TSDB_FILTER_FLAG_BY_VERSION) {
if (pFilterInfo->sver > dataBlk.maxVer || pFilterInfo->ever < dataBlk.minVer) {
pIter->dIter.iDataBlk++;
@@ -262,13 +249,23 @@ static int32_t tsdbDataFileDataIterNext(STsdbDataIter2* pIter, STsdbFilterInfo*
if (pIter->dIter.iBlockIdx < taosArrayGetSize(pIter->dIter.aBlockIdx)) {
SBlockIdx* pBlockIdx = taosArrayGet(pIter->dIter.aBlockIdx, pIter->dIter.iBlockIdx);
- if (pFilterInfo && (pFilterInfo->flag & TSDB_FILTER_FLAG_BY_TABLEID)) {
- int32_t c = tTABLEIDCmprFn(pBlockIdx, &pFilterInfo->tbid);
- if (c == 0) {
- pIter->dIter.iBlockIdx++;
- continue;
- } else if (c < 0) {
- ASSERT(0);
+ if (pFilterInfo) {
+ if (pFilterInfo->flag & TSDB_FILTER_FLAG_BY_TABLEID) {
+ int32_t c = tTABLEIDCmprFn(pBlockIdx, &pFilterInfo->tbid);
+ if (c == 0) {
+ pIter->dIter.iBlockIdx++;
+ continue;
+ } else if (c < 0) {
+ ASSERT(0);
+ }
+ }
+
+ if (pFilterInfo->flag & TSDB_FILTER_FLAG_IGNORE_DROPPED_TABLE) {
+ SMetaInfo info;
+ if (metaGetInfo(pIter->dIter.pReader->pTsdb->pVnode->pMeta, pBlockIdx->uid, &info, NULL)) {
+ pIter->dIter.iBlockIdx++;
+ continue;
+ }
}
}
@@ -304,14 +301,24 @@ static int32_t tsdbSttFileDataIterNext(STsdbDataIter2* pIter, STsdbFilterInfo* p
for (;;) {
while (pIter->sIter.iRow < pIter->sIter.bData.nRow) {
if (pFilterInfo) {
+ int64_t uid = pIter->sIter.bData.uid ? pIter->sIter.bData.uid : pIter->sIter.bData.aUid[pIter->sIter.iRow];
if (pFilterInfo->flag & TSDB_FILTER_FLAG_BY_TABLEID) {
- int64_t uid = pIter->sIter.bData.uid ? pIter->sIter.bData.uid : pIter->sIter.bData.aUid[pIter->sIter.iRow];
if (pFilterInfo->tbid.uid == uid) {
pIter->sIter.iRow++;
continue;
}
}
+ if (pFilterInfo->flag & TSDB_FILTER_FLAG_IGNORE_DROPPED_TABLE) {
+ if (pIter->rowInfo.uid != uid) {
+ SMetaInfo info;
+ if (metaGetInfo(pIter->sIter.pReader->pTsdb->pVnode->pMeta, uid, &info, NULL)) {
+ pIter->sIter.iRow++;
+ continue;
+ }
+ }
+ }
+
if (pFilterInfo->flag & TSDB_FILTER_FLAG_BY_VERSION) {
if (pFilterInfo->sver > pIter->sIter.bData.aVersion[pIter->sIter.iRow] ||
pFilterInfo->ever < pIter->sIter.bData.aVersion[pIter->sIter.iRow]) {
@@ -395,6 +402,16 @@ static int32_t tsdbTombFileDataIterNext(STsdbDataIter2* pIter, STsdbFilterInfo*
if (pIter->tIter.iDelIdx < taosArrayGetSize(pIter->tIter.aDelIdx)) {
SDelIdx* pDelIdx = taosArrayGet(pIter->tIter.aDelIdx, pIter->tIter.iDelIdx);
+ if (pFilterInfo) {
+ if (pFilterInfo->flag & TSDB_FILTER_FLAG_IGNORE_DROPPED_TABLE) {
+ SMetaInfo info;
+ if (metaGetInfo(pIter->dIter.pReader->pTsdb->pVnode->pMeta, pDelIdx->uid, &info, NULL)) {
+ pIter->tIter.iDelIdx++;
+ continue;
+ }
+ }
+ }
+
code = tsdbReadDelData(pIter->tIter.pReader, pDelIdx, pIter->tIter.aDelData);
TSDB_CHECK_CODE(code, lino, _exit);
diff --git a/source/dnode/vnode/src/tsdb/tsdbMemTable.c b/source/dnode/vnode/src/tsdb/tsdbMemTable.c
index d0ff403bf7..f27a28acb3 100644
--- a/source/dnode/vnode/src/tsdb/tsdbMemTable.c
+++ b/source/dnode/vnode/src/tsdb/tsdbMemTable.c
@@ -13,6 +13,7 @@
* along with this program. If not, see .
*/
+#include
#include "tsdb.h"
#define MEM_MIN_HASH 1024
@@ -298,12 +299,12 @@ int64_t tsdbCountTbDataRows(STbData *pTbData) {
return rowsNum;
}
-void tsdbMemTableCountRows(SMemTable *pMemTable, SHashObj* pTableMap, int64_t *rowsNum) {
+void tsdbMemTableCountRows(SMemTable *pMemTable, SSHashObj* pTableMap, int64_t *rowsNum) {
taosRLockLatch(&pMemTable->latch);
for (int32_t i = 0; i < pMemTable->nBucket; ++i) {
STbData *pTbData = pMemTable->aBucket[i];
while (pTbData) {
- void* p = taosHashGet(pTableMap, &pTbData->uid, sizeof(pTbData->uid));
+ void* p = tSimpleHashGet(pTableMap, &pTbData->uid, sizeof(pTbData->uid));
if (p == NULL) {
pTbData = pTbData->next;
continue;
diff --git a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c
index fa8870835c..79f4a17f65 100644
--- a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c
+++ b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c
@@ -16,22 +16,6 @@
#include "tsdb.h"
// SLDataIter =================================================
-struct SLDataIter {
- SRBTreeNode node;
- SSttBlk *pSttBlk;
- SDataFReader *pReader;
- int32_t iStt;
- int8_t backward;
- int32_t iSttBlk;
- int32_t iRow;
- SRowInfo rInfo;
- uint64_t uid;
- STimeWindow timeWindow;
- SVersionRange verRange;
- SSttBlockLoadInfo *pBlockLoadInfo;
- bool ignoreEarlierTs;
-};
-
SSttBlockLoadInfo *tCreateLastBlockLoadInfo(STSchema *pSchema, int16_t *colList, int32_t numOfCols,
int32_t numOfSttTrigger) {
SSttBlockLoadInfo *pLoadInfo = taosMemoryCalloc(numOfSttTrigger, sizeof(SSttBlockLoadInfo));
@@ -88,6 +72,10 @@ void getLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo, int64_t *blocks, double
}
void *destroyLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo) {
+ if (pLoadInfo == NULL) {
+ return NULL;
+ }
+
for (int32_t i = 0; i < pLoadInfo->numOfStt; ++i) {
pLoadInfo[i].currentLoadBlockIndex = 1;
pLoadInfo[i].blockIndex[0] = -1;
@@ -264,25 +252,21 @@ static int32_t binarySearchForStartRowIndex(uint64_t *uidList, int32_t num, uint
}
}
-int32_t tLDataIterOpen(struct SLDataIter **pIter, SDataFReader *pReader, int32_t iStt, int8_t backward, uint64_t suid,
+int32_t tLDataIterOpen(struct SLDataIter *pIter, SDataFReader *pReader, int32_t iStt, int8_t backward, uint64_t suid,
uint64_t uid, STimeWindow *pTimeWindow, SVersionRange *pRange, SSttBlockLoadInfo *pBlockLoadInfo,
const char *idStr, bool strictTimeRange) {
int32_t code = TSDB_CODE_SUCCESS;
- *pIter = taosMemoryCalloc(1, sizeof(SLDataIter));
- if (*pIter == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _exit;
- }
+ pIter->uid = uid;
+ pIter->pReader = pReader;
+ pIter->iStt = iStt;
+ pIter->backward = backward;
+ pIter->verRange.minVer = pRange->minVer;
+ pIter->verRange.maxVer = pRange->maxVer;
+ pIter->timeWindow.skey = pTimeWindow->skey;
+ pIter->timeWindow.ekey = pTimeWindow->ekey;
- (*pIter)->uid = uid;
- (*pIter)->pReader = pReader;
- (*pIter)->iStt = iStt;
- (*pIter)->backward = backward;
- (*pIter)->verRange = *pRange;
- (*pIter)->timeWindow = *pTimeWindow;
-
- (*pIter)->pBlockLoadInfo = pBlockLoadInfo;
+ pIter->pBlockLoadInfo = pBlockLoadInfo;
if (!pBlockLoadInfo->sttBlockLoaded) {
int64_t st = taosGetTimestampUs();
@@ -290,7 +274,7 @@ int32_t tLDataIterOpen(struct SLDataIter **pIter, SDataFReader *pReader, int32_t
code = tsdbReadSttBlk(pReader, iStt, pBlockLoadInfo->aSttBlk);
if (code) {
- goto _exit;
+ return code;
}
// only apply to the child tables, ordinary tables will not incur this filter procedure.
@@ -306,7 +290,7 @@ int32_t tLDataIterOpen(struct SLDataIter **pIter, SDataFReader *pReader, int32_t
// no qualified stt block existed
taosArrayClear(pBlockLoadInfo->aSttBlk);
- (*pIter)->iSttBlk = -1;
+ pIter->iSttBlk = -1;
double el = (taosGetTimestampUs() - st) / 1000.0;
tsdbDebug("load the last file info completed, elapsed time:%.2fms, %s", el, idStr);
return code;
@@ -339,31 +323,27 @@ int32_t tLDataIterOpen(struct SLDataIter **pIter, SDataFReader *pReader, int32_t
size_t size = taosArrayGetSize(pBlockLoadInfo->aSttBlk);
// find the start block
- (*pIter)->iSttBlk = binarySearchForStartBlock(pBlockLoadInfo->aSttBlk->pData, size, uid, backward);
- if ((*pIter)->iSttBlk != -1) {
- (*pIter)->pSttBlk = taosArrayGet(pBlockLoadInfo->aSttBlk, (*pIter)->iSttBlk);
- (*pIter)->iRow = ((*pIter)->backward) ? (*pIter)->pSttBlk->nRow : -1;
+ pIter->iSttBlk = binarySearchForStartBlock(pBlockLoadInfo->aSttBlk->pData, size, uid, backward);
+ if (pIter->iSttBlk != -1) {
+ pIter->pSttBlk = taosArrayGet(pBlockLoadInfo->aSttBlk, pIter->iSttBlk);
+ pIter->iRow = (pIter->backward) ? pIter->pSttBlk->nRow : -1;
- if ((!backward) && ((strictTimeRange && (*pIter)->pSttBlk->minKey >= (*pIter)->timeWindow.ekey) ||
- (!strictTimeRange && (*pIter)->pSttBlk->minKey > (*pIter)->timeWindow.ekey))) {
- (*pIter)->pSttBlk = NULL;
+ if ((!backward) && ((strictTimeRange && pIter->pSttBlk->minKey >= pIter->timeWindow.ekey) ||
+ (!strictTimeRange && pIter->pSttBlk->minKey > pIter->timeWindow.ekey))) {
+ pIter->pSttBlk = NULL;
}
- if (backward && ((strictTimeRange && (*pIter)->pSttBlk->maxKey <= (*pIter)->timeWindow.skey) ||
- (!strictTimeRange && (*pIter)->pSttBlk->maxKey < (*pIter)->timeWindow.skey))) {
- (*pIter)->pSttBlk = NULL;
- (*pIter)->ignoreEarlierTs = true;
+ if (backward && ((strictTimeRange && pIter->pSttBlk->maxKey <= pIter->timeWindow.skey) ||
+ (!strictTimeRange && pIter->pSttBlk->maxKey < pIter->timeWindow.skey))) {
+ pIter->pSttBlk = NULL;
+ pIter->ignoreEarlierTs = true;
}
}
return code;
-
-_exit:
- taosMemoryFree(*pIter);
- return code;
}
-void tLDataIterClose(SLDataIter *pIter) { taosMemoryFree(pIter); }
+void tLDataIterClose(SLDataIter *pIter) { /*taosMemoryFree(pIter); */}
void tLDataIterNextBlock(SLDataIter *pIter, const char *idStr) {
int32_t step = pIter->backward ? -1 : 1;
@@ -590,43 +570,38 @@ static FORCE_INLINE int32_t tLDataIterDescCmprFn(const SRBTreeNode *p1, const SR
int32_t tMergeTreeOpen(SMergeTree *pMTree, int8_t backward, SDataFReader *pFReader, uint64_t suid, uint64_t uid,
STimeWindow *pTimeWindow, SVersionRange *pVerRange, SSttBlockLoadInfo *pBlockLoadInfo,
- bool destroyLoadInfo, const char *idStr, bool strictTimeRange) {
+ bool destroyLoadInfo, const char *idStr, bool strictTimeRange, SLDataIter* pLDataIter) {
+ int32_t code = TSDB_CODE_SUCCESS;
+
pMTree->backward = backward;
pMTree->pIter = NULL;
- pMTree->pIterList = taosArrayInit(4, POINTER_BYTES);
- if (pMTree->pIterList == NULL) {
- return TSDB_CODE_OUT_OF_MEMORY;
- }
-
pMTree->idStr = idStr;
+
if (!pMTree->backward) { // asc
tRBTreeCreate(&pMTree->rbt, tLDataIterCmprFn);
} else { // desc
tRBTreeCreate(&pMTree->rbt, tLDataIterDescCmprFn);
}
- int32_t code = TSDB_CODE_SUCCESS;
pMTree->pLoadInfo = pBlockLoadInfo;
pMTree->destroyLoadInfo = destroyLoadInfo;
pMTree->ignoreEarlierTs = false;
for (int32_t i = 0; i < pFReader->pSet->nSttF; ++i) { // open all last file
- struct SLDataIter *pIter = NULL;
- code = tLDataIterOpen(&pIter, pFReader, i, pMTree->backward, suid, uid, pTimeWindow, pVerRange,
+ memset(&pLDataIter[i], 0, sizeof(SLDataIter));
+ code = tLDataIterOpen(&pLDataIter[i], pFReader, i, pMTree->backward, suid, uid, pTimeWindow, pVerRange,
&pMTree->pLoadInfo[i], pMTree->idStr, strictTimeRange);
if (code != TSDB_CODE_SUCCESS) {
goto _end;
}
- bool hasVal = tLDataIterNextRow(pIter, pMTree->idStr);
+ bool hasVal = tLDataIterNextRow(&pLDataIter[i], pMTree->idStr);
if (hasVal) {
- taosArrayPush(pMTree->pIterList, &pIter);
- tMergeTreeAddIter(pMTree, pIter);
+ tMergeTreeAddIter(pMTree, &pLDataIter[i]);
} else {
if (!pMTree->ignoreEarlierTs) {
- pMTree->ignoreEarlierTs = pIter->ignoreEarlierTs;
+ pMTree->ignoreEarlierTs = pLDataIter[i].ignoreEarlierTs;
}
- tLDataIterClose(pIter);
}
}
@@ -674,18 +649,8 @@ bool tMergeTreeNext(SMergeTree *pMTree) {
return pMTree->pIter != NULL;
}
-TSDBROW tMergeTreeGetRow(SMergeTree *pMTree) { return pMTree->pIter->rInfo.row; }
-
void tMergeTreeClose(SMergeTree *pMTree) {
- size_t size = taosArrayGetSize(pMTree->pIterList);
- for (int32_t i = 0; i < size; ++i) {
- SLDataIter *pIter = taosArrayGetP(pMTree->pIterList, i);
- tLDataIterClose(pIter);
- }
-
- pMTree->pIterList = taosArrayDestroy(pMTree->pIterList);
pMTree->pIter = NULL;
-
if (pMTree->destroyLoadInfo) {
pMTree->pLoadInfo = destroyLastBlockLoadInfo(pMTree->pLoadInfo);
pMTree->destroyLoadInfo = false;
diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c
index 5bd41dd86f..eb15400d05 100644
--- a/source/dnode/vnode/src/tsdb/tsdbRead.c
+++ b/source/dnode/vnode/src/tsdb/tsdbRead.c
@@ -18,6 +18,13 @@
#include "tsimplehash.h"
#define ASCENDING_TRAVERSE(o) (o == TSDB_ORDER_ASC)
+#define getCurrentKeyInLastBlock(_r) ((_r)->currentKey)
+
+typedef enum {
+ READER_STATUS_SUSPEND = 0x1,
+ READER_STATUS_SHOULD_STOP = 0x2,
+ READER_STATUS_NORMAL = 0x3,
+} EReaderExecStatus;
typedef enum {
EXTERNAL_ROWS_PREV = 0x1,
@@ -108,6 +115,7 @@ typedef struct SLastBlockReader {
uint64_t uid;
SMergeTree mergeTree;
SSttBlockLoadInfo* pInfo;
+ int64_t currentKey;
} SLastBlockReader;
typedef struct SFilesetIter {
@@ -125,12 +133,12 @@ typedef struct SFileDataBlockInfo {
} SFileDataBlockInfo;
typedef struct SDataBlockIter {
- int32_t numOfBlocks;
- int32_t index;
- SArray* blockList; // SArray
- int32_t order;
- SDataBlk block; // current SDataBlk data
- SHashObj* pTableMap;
+ int32_t numOfBlocks;
+ int32_t index;
+ SArray* blockList; // SArray
+ int32_t order;
+ SDataBlk block; // current SDataBlk data
+ SSHashObj* pTableMap;
} SDataBlockIter;
typedef struct SFileBlockDumpInfo {
@@ -148,7 +156,8 @@ typedef struct STableUidList {
typedef struct SReaderStatus {
bool loadFromFile; // check file stage
bool composedDataBlock; // the returned data block is a composed block or not
- SHashObj* pTableMap; // SHash
+ bool mapDataCleaned; // mapData has been cleaned up alreay or not
+ SSHashObj* pTableMap; // SHash
STableBlockScanInfo** pTableIter; // table iterator used in building in-memory buffer data blocks.
STableUidList uidList; // check tables in uid order, to avoid the repeatly load of blocks in STT.
SFileBlockDumpInfo fBlockDumpInfo;
@@ -156,6 +165,9 @@ typedef struct SReaderStatus {
SBlockData fileBlockData;
SFilesetIter fileIter;
SDataBlockIter blockIter;
+ SLDataIter* pLDataIter;
+ SRowMerger merger;
+ SColumnInfoData* pPrimaryTsCol; // primary time stamp output col info data
} SReaderStatus;
typedef struct SBlockInfoBuf {
@@ -165,56 +177,66 @@ typedef struct SBlockInfoBuf {
int32_t numOfTables;
} SBlockInfoBuf;
+typedef struct STsdbReaderAttr {
+ STSchema* pSchema;
+ EReadMode readMode;
+ uint64_t rowsNum;
+ STimeWindow window;
+ bool freeBlock;
+ SVersionRange verRange;
+} STsdbReaderAttr;
+
+typedef struct SResultBlockInfo {
+ SSDataBlock* pResBlock;
+ bool freeBlock;
+ int64_t capacity;
+} SResultBlockInfo;
+
struct STsdbReader {
STsdb* pTsdb;
SVersionRange verRange;
TdThreadMutex readerMutex;
- bool suspended;
+ EReaderExecStatus flag;
uint64_t suid;
int16_t order;
- bool freeBlock;
EReadMode readMode;
uint64_t rowsNum;
STimeWindow window; // the primary query time window that applies to all queries
- SSDataBlock* pResBlock;
- int32_t capacity;
+ SResultBlockInfo resBlockInfo;
SReaderStatus status;
char* idStr; // query info handle, for debug purpose
int32_t type; // query type: 1. retrieve all data blocks, 2. retrieve direct prev|next rows
SBlockLoadSuppInfo suppInfo;
STsdbReadSnap* pReadSnap;
SIOCostSummary cost;
- STSchema* pSchema; // the newest version schema
- // STSchema* pMemSchema; // the previous schema for in-memory data, to avoid load schema too many times
- SSHashObj* pSchemaMap; // keep the retrieved schema info, to avoid the overhead by repeatly load schema
- SDataFReader* pFileReader; // the file reader
- SDelFReader* pDelFReader; // the del file reader
- SArray* pDelIdx; // del file block index;
- SBlockInfoBuf blockInfoBuf;
- int32_t step;
- STsdbReader* innerReader[2];
+ STSchema* pSchema; // the newest version schema
+ SSHashObj* pSchemaMap; // keep the retrieved schema info, to avoid the overhead by repeatly load schema
+ SDataFReader* pFileReader; // the file reader
+ SDelFReader* pDelFReader; // the del file reader
+ SArray* pDelIdx; // del file block index;
+ SBlockInfoBuf blockInfoBuf;
+ EContentData step;
+ STsdbReader* innerReader[2];
};
static SFileDataBlockInfo* getCurrentBlockInfo(SDataBlockIter* pBlockIter);
static int buildDataBlockFromBufImpl(STableBlockScanInfo* pBlockScanInfo, int64_t endKey, int32_t capacity,
STsdbReader* pReader);
static TSDBROW* getValidMemRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pReader);
-static int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pScanInfo, STsdbReader* pReader,
- SRowMerger* pMerger);
+static int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pScanInfo, STsdbReader* pReader);
static int32_t doMergeRowsInLastBlock(SLastBlockReader* pLastBlockReader, STableBlockScanInfo* pScanInfo, int64_t ts,
SRowMerger* pMerger, SVersionRange* pVerRange, const char* id);
-static int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, int64_t ts, SArray* pDelList, SRowMerger* pMerger,
- STsdbReader* pReader);
+static int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, int64_t ts, SArray* pDelList, STsdbReader* pReader);
static int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, SRow* pTSRow,
- STableBlockScanInfo* pInfo);
+ STableBlockScanInfo* pScanInfo);
static int32_t doAppendRowFromFileBlock(SSDataBlock* pResBlock, STsdbReader* pReader, SBlockData* pBlockData,
int32_t rowIndex);
static void setComposedBlockFlag(STsdbReader* pReader, bool composed);
-static bool hasBeenDropped(const SArray* pDelList, int32_t* index, TSDBKEY* pKey, int32_t order,
+static bool hasBeenDropped(const SArray* pDelList, int32_t* index, int64_t key, int64_t ver, int32_t order,
SVersionRange* pVerRange);
static int32_t doMergeMemTableMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDelList,
- TSDBROW* pTSRow, STsdbReader* pReader, bool* freeTSRow);
+ TSDBROW* pResRow, STsdbReader* pReader, bool* freeTSRow);
static int32_t doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlockScanInfo,
STsdbReader* pReader, SRow** pTSRow);
static int32_t mergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pBlockScanInfo, int64_t key,
@@ -225,7 +247,6 @@ static int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, STsdb
static STsdb* getTsdbByRetentions(SVnode* pVnode, TSKEY winSKey, SRetention* retentions, const char* idstr,
int8_t* pLevel);
static SVersionRange getQueryVerRange(SVnode* pVnode, SQueryTableDataCond* pCond, int8_t level);
-static int64_t getCurrentKeyInLastBlock(SLastBlockReader* pLastBlockReader);
static bool hasDataInLastBlock(SLastBlockReader* pLastBlockReader);
static int32_t doBuildDataBlock(STsdbReader* pReader);
static TSDBKEY getCurrentKeyInBuf(STableBlockScanInfo* pScanInfo, STsdbReader* pReader);
@@ -233,9 +254,7 @@ static bool hasDataInFileBlock(const SBlockData* pBlockData, const SFil
static void initBlockDumpInfo(STsdbReader* pReader, SDataBlockIter* pBlockIter);
static int32_t getInitialDelIndex(const SArray* pDelSkyline, int32_t order);
-static STableBlockScanInfo* getTableBlockScanInfo(SHashObj* pTableMap, uint64_t uid, const char* id);
-
-static FORCE_INLINE STSchema* getLatestTableSchema(STsdbReader* pReader, uint64_t uid);
+static STableBlockScanInfo* getTableBlockScanInfo(SSHashObj* pTableMap, uint64_t uid, const char* id);
static bool outOfTimeWindow(int64_t ts, STimeWindow* pWindow) { return (ts > pWindow->ekey) || (ts < pWindow->skey); }
@@ -384,12 +403,11 @@ static int32_t uidComparFunc(const void* p1, const void* p2) {
}
// NOTE: speedup the whole processing by preparing the buffer for STableBlockScanInfo in batch model
-static SHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, SBlockInfoBuf* pBuf, const STableKeyInfo* idList,
+static SSHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, SBlockInfoBuf* pBuf, const STableKeyInfo* idList,
STableUidList* pUidList, int32_t numOfTables) {
// allocate buffer in order to load data blocks from file
// todo use simple hash instead, optimize the memory consumption
- SHashObj* pTableMap =
- taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
+ SSHashObj* pTableMap = tSimpleHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT));
if (pTableMap == NULL) {
return NULL;
}
@@ -399,7 +417,7 @@ static SHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, SBlockInfoBuf
pUidList->tableUidList = taosMemoryMalloc(numOfTables * sizeof(uint64_t));
if (pUidList->tableUidList == NULL) {
- taosHashCleanup(pTableMap);
+ tSimpleHashCleanup(pTableMap);
return NULL;
}
@@ -421,7 +439,7 @@ static SHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, SBlockInfoBuf
pScanInfo->lastKeyInStt = ekey;
}
- taosHashPut(pTableMap, &pScanInfo->uid, sizeof(uint64_t), &pScanInfo, POINTER_BYTES);
+ tSimpleHashPut(pTableMap, &pScanInfo->uid, sizeof(uint64_t), &pScanInfo, POINTER_BYTES);
tsdbTrace("%p check table uid:%" PRId64 " from lastKey:%" PRId64 " %s", pTsdbReader, pScanInfo->uid,
pScanInfo->lastKey, pTsdbReader->idStr);
}
@@ -436,9 +454,11 @@ static SHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, SBlockInfoBuf
return pTableMap;
}
-static void resetAllDataBlockScanInfo(SHashObj* pTableMap, int64_t ts, int32_t step) {
- STableBlockScanInfo** p = NULL;
- while ((p = taosHashIterate(pTableMap, p)) != NULL) {
+static void resetAllDataBlockScanInfo(SSHashObj* pTableMap, int64_t ts, int32_t step) {
+ void *p = NULL;
+ int32_t iter = 0;
+
+ while ((p = tSimpleHashIterate(pTableMap, p, &iter)) != NULL) {
STableBlockScanInfo* pInfo = *(STableBlockScanInfo**)p;
pInfo->iterInit = false;
@@ -478,13 +498,15 @@ static void clearBlockScanInfo(STableBlockScanInfo* p) {
tMapDataClear(&p->mapData);
}
-static void destroyAllBlockScanInfo(SHashObj* pTableMap) {
+static void destroyAllBlockScanInfo(SSHashObj* pTableMap) {
void* p = NULL;
- while ((p = taosHashIterate(pTableMap, p)) != NULL) {
+ int32_t iter = 0;
+
+ while ((p = tSimpleHashIterate(pTableMap, p, &iter)) != NULL) {
clearBlockScanInfo(*(STableBlockScanInfo**)p);
}
- taosHashCleanup(pTableMap);
+ tSimpleHashCleanup(pTableMap);
}
static bool isEmptyQueryTimeWindow(STimeWindow* pWindow) { return pWindow->skey > pWindow->ekey; }
@@ -715,6 +737,21 @@ void tsdbReleaseDataBlock(STsdbReader* pReader) {
}
}
+static int32_t initResBlockInfo(SResultBlockInfo* pResBlockInfo, int64_t capacity, SSDataBlock* pResBlock, SQueryTableDataCond* pCond) {
+ pResBlockInfo->capacity = capacity;
+ pResBlockInfo->pResBlock = pResBlock;
+ terrno = 0;
+
+ if (pResBlockInfo->pResBlock == NULL) {
+ pResBlockInfo->freeBlock = true;
+ pResBlockInfo->pResBlock = createResBlock(pCond, pResBlockInfo->capacity);
+ } else {
+ pResBlockInfo->freeBlock = false;
+ }
+
+ return terrno;
+}
+
static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsdbReader** ppReader, int32_t capacity,
SSDataBlock* pResBlock, const char* idstr) {
int32_t code = 0;
@@ -734,21 +771,16 @@ static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsd
pReader->pTsdb = getTsdbByRetentions(pVnode, pCond->twindows.skey, pVnode->config.tsdbCfg.retentions, idstr, &level);
pReader->suid = pCond->suid;
pReader->order = pCond->order;
- pReader->capacity = capacity;
- pReader->pResBlock = pResBlock;
+
pReader->idStr = (idstr != NULL) ? taosStrdup(idstr) : NULL;
pReader->verRange = getQueryVerRange(pVnode, pCond, level);
pReader->type = pCond->type;
pReader->window = updateQueryTimeWindow(pReader->pTsdb, &pCond->twindows);
pReader->blockInfoBuf.numPerBucket = 1000; // 1000 tables per bucket
- if (pReader->pResBlock == NULL) {
- pReader->freeBlock = true;
- pReader->pResBlock = createResBlock(pCond, pReader->capacity);
- if (pReader->pResBlock == NULL) {
- code = terrno;
- goto _end;
- }
+ code = initResBlockInfo(&pReader->resBlockInfo, capacity, pResBlock, pCond);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _end;
}
if (pCond->numOfCols <= 0) {
@@ -766,6 +798,7 @@ static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsd
}
pSup->tsColAgg.colId = PRIMARYKEY_TIMESTAMP_COL_ID;
+ setColumnIdSlotList(pSup, pCond->colList, pCond->pSlotList, pCond->numOfCols);
code = tBlockDataCreate(&pReader->status.fileBlockData);
if (code != TSDB_CODE_SUCCESS) {
@@ -773,7 +806,20 @@ static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsd
goto _end;
}
- setColumnIdSlotList(pSup, pCond->colList, pCond->pSlotList, pCond->numOfCols);
+ if (pReader->suppInfo.colId[0] != PRIMARYKEY_TIMESTAMP_COL_ID) {
+ tsdbError("the first column isn't primary timestamp, %d, %s", pReader->suppInfo.colId[0], pReader->idStr);
+ terrno = TSDB_CODE_INVALID_PARA;
+ goto _end;
+ }
+
+ pReader->status.pPrimaryTsCol = taosArrayGet(pReader->resBlockInfo.pResBlock->pDataBlock, pSup->slotId[0]);
+ int32_t type = pReader->status.pPrimaryTsCol->info.type;
+ if (type != TSDB_DATA_TYPE_TIMESTAMP) {
+ tsdbError("the first column isn't primary timestamp in result block, actual: %s, %s", tDataTypes[type].name,
+ pReader->idStr);
+ terrno = TSDB_CODE_INVALID_PARA;
+ goto _end;
+ }
tsdbInitReaderLock(pReader);
@@ -794,7 +840,7 @@ static int32_t doLoadBlockIndex(STsdbReader* pReader, SDataFReader* pFileReader,
goto _end;
}
- int32_t numOfTables = taosHashGetSize(pReader->status.pTableMap);
+ int32_t numOfTables = tSimpleHashGetSize(pReader->status.pTableMap);
SArray* aBlockIdx = (SArray*)taosLRUCacheValue(pFileReader->pTsdb->biCache, handle);
size_t num = taosArrayGetSize(aBlockIdx);
@@ -858,28 +904,42 @@ _end:
return code;
}
-static void cleanupTableScanInfo(SHashObj* pTableMap) {
+static void doCleanupTableScanInfo(STableBlockScanInfo* pScanInfo) {
+ // reset the index in last block when handing a new file
+ tMapDataClear(&pScanInfo->mapData);
+ taosArrayClear(pScanInfo->pBlockList);
+}
+
+static void cleanupTableScanInfo(SReaderStatus* pStatus) {
+ if (pStatus->mapDataCleaned) {
+ return;
+ }
+
+ SSHashObj* pTableMap = pStatus->pTableMap;
STableBlockScanInfo** px = NULL;
+ int32_t iter = 0;
+
while (1) {
- px = taosHashIterate(pTableMap, px);
+ px = tSimpleHashIterate(pTableMap, px, &iter);
if (px == NULL) {
break;
}
- // reset the index in last block when handing a new file
- tMapDataClear(&(*px)->mapData);
- taosArrayClear((*px)->pBlockList);
+ doCleanupTableScanInfo(*px);
}
+
+ pStatus->mapDataCleaned = true;
}
-static int32_t doLoadFileBlock(STsdbReader* pReader, SArray* pIndexList, SBlockNumber* pBlockNum) {
- int32_t numOfQTable = 0;
+static int32_t doLoadFileBlock(STsdbReader* pReader, SArray* pIndexList, SBlockNumber* pBlockNum, SArray* pTableScanInfoList) {
size_t sizeInDisk = 0;
size_t numOfTables = taosArrayGetSize(pIndexList);
int64_t st = taosGetTimestampUs();
- cleanupTableScanInfo(pReader->status.pTableMap);
+ cleanupTableScanInfo(&pReader->status);
+ // set the flag for the new file
+ pReader->status.mapDataCleaned = false;
for (int32_t i = 0; i < numOfTables; ++i) {
SBlockIdx* pBlockIdx = taosArrayGet(pIndexList, i);
STableBlockScanInfo* pScanInfo = getTableBlockScanInfo(pReader->status.pTableMap, pBlockIdx->uid, pReader->idStr);
@@ -933,7 +993,7 @@ static int32_t doLoadFileBlock(STsdbReader* pReader, SArray* pIndexList, SBlockN
}
if (taosArrayGetSize(pScanInfo->pBlockList) > 0) {
- numOfQTable += 1;
+ taosArrayPush(pTableScanInfoList, &pScanInfo);
}
}
@@ -944,8 +1004,8 @@ static int32_t doLoadFileBlock(STsdbReader* pReader, SArray* pIndexList, SBlockN
tsdbDebug(
"load block of %ld tables completed, blocks:%d in %d tables, last-files:%d, block-info-size:%.2f Kb, elapsed "
"time:%.2f ms %s",
- numOfTables, pBlockNum->numOfBlocks, numOfQTable, pBlockNum->numOfLastFiles, sizeInDisk / 1000.0, el,
- pReader->idStr);
+ numOfTables, pBlockNum->numOfBlocks, (int32_t)taosArrayGetSize(pTableScanInfoList), pBlockNum->numOfLastFiles,
+ sizeInDisk / 1000.0, el, pReader->idStr);
pReader->cost.numOfBlocks += total;
pReader->cost.headFileLoadTime += el;
@@ -1181,7 +1241,7 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader) {
SBlockData* pBlockData = &pStatus->fileBlockData;
SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(pBlockIter);
SDataBlk* pBlock = getCurrentBlock(pBlockIter);
- SSDataBlock* pResBlock = pReader->pResBlock;
+ SSDataBlock* pResBlock = pReader->resBlockInfo.pResBlock;
int32_t numOfOutputCols = pSupInfo->numOfCols;
int32_t code = TSDB_CODE_SUCCESS;
@@ -1229,8 +1289,8 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader) {
endIndex += step;
int32_t dumpedRows = asc ? (endIndex - pDumpInfo->rowIndex) : (pDumpInfo->rowIndex - endIndex);
- if (dumpedRows > pReader->capacity) { // output buffer check
- dumpedRows = pReader->capacity;
+ if (dumpedRows > pReader->resBlockInfo.capacity) { // output buffer check
+ dumpedRows = pReader->resBlockInfo.capacity;
}
int32_t i = 0;
@@ -1312,16 +1372,40 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader) {
return TSDB_CODE_SUCCESS;
}
+static FORCE_INLINE STSchema* getTableSchemaImpl(STsdbReader* pReader, uint64_t uid) {
+ ASSERT(pReader->pSchema == NULL);
+
+ int32_t code = metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->suid, uid, -1, &pReader->pSchema);
+ if (code != TSDB_CODE_SUCCESS || pReader->pSchema == NULL) {
+ terrno = code;
+ tsdbError("failed to get table schema, uid:%" PRIu64 ", it may have been dropped, ver:-1, %s", uid, pReader->idStr);
+ return NULL;
+ }
+
+ code = tsdbRowMergerInit(&pReader->status.merger, pReader->pSchema);
+ if (code != TSDB_CODE_SUCCESS) {
+ terrno = code;
+ tsdbError("failed to init merger, code:%s, %s", tstrerror(code), pReader->idStr);
+ return NULL;
+ }
+
+ return pReader->pSchema;
+}
+
static int32_t doLoadFileBlockData(STsdbReader* pReader, SDataBlockIter* pBlockIter, SBlockData* pBlockData,
uint64_t uid) {
- int32_t code = 0;
- int64_t st = taosGetTimestampUs();
+ int32_t code = 0;
+ STSchema* pSchema = pReader->pSchema;
+ int64_t st = taosGetTimestampUs();
tBlockDataReset(pBlockData);
- STSchema* pSchema = getLatestTableSchema(pReader, uid);
- if (pSchema == NULL) {
- tsdbDebug("%p table uid:%" PRIu64 " has been dropped, no data existed, %s", pReader, uid, pReader->idStr);
- return code;
+
+ if (pReader->pSchema == NULL) {
+ pSchema = getTableSchemaImpl(pReader, uid);
+ if (pSchema == NULL) {
+ tsdbDebug("%p table uid:%" PRIu64 " has been dropped, no data existed, %s", pReader, uid, pReader->idStr);
+ return code;
+ }
}
SBlockLoadSuppInfo* pSup = &pReader->suppInfo;
@@ -1424,7 +1508,7 @@ static int32_t doSetCurrentBlock(SDataBlockIter* pBlockIter, const char* idStr)
return TSDB_CODE_SUCCESS;
}
-static int32_t initBlockIterator(STsdbReader* pReader, SDataBlockIter* pBlockIter, int32_t numOfBlocks) {
+static int32_t initBlockIterator(STsdbReader* pReader, SDataBlockIter* pBlockIter, int32_t numOfBlocks, SArray* pTableList) {
bool asc = ASCENDING_TRAVERSE(pReader->order);
SBlockOrderSupporter sup = {0};
@@ -1433,7 +1517,7 @@ static int32_t initBlockIterator(STsdbReader* pReader, SDataBlockIter* pBlockIte
pBlockIter->pTableMap = pReader->status.pTableMap;
// access data blocks according to the offset of each block in asc/desc order.
- int32_t numOfTables = (int32_t)taosHashGetSize(pReader->status.pTableMap);
+ int32_t numOfTables = taosArrayGetSize(pTableList);
int64_t st = taosGetTimestampUs();
int32_t code = initBlockOrderSupporter(&sup, numOfTables);
@@ -1442,17 +1526,10 @@ static int32_t initBlockIterator(STsdbReader* pReader, SDataBlockIter* pBlockIte
}
int32_t cnt = 0;
- void* ptr = NULL;
- while (1) {
- ptr = taosHashIterate(pReader->status.pTableMap, ptr);
- if (ptr == NULL) {
- break;
- }
- STableBlockScanInfo* pTableScanInfo = *(STableBlockScanInfo**)ptr;
- if (pTableScanInfo->pBlockList == NULL || taosArrayGetSize(pTableScanInfo->pBlockList) == 0) {
- continue;
- }
+ for (int32_t i = 0; i < numOfTables; ++i) {
+ STableBlockScanInfo* pTableScanInfo = taosArrayGetP(pTableList, i);
+ ASSERT(pTableScanInfo->pBlockList != NULL && taosArrayGetSize(pTableScanInfo->pBlockList) > 0);
size_t num = taosArrayGetSize(pTableScanInfo->pBlockList);
sup.numOfBlocksPerTable[sup.numOfTables] = num;
@@ -1728,7 +1805,7 @@ static void getBlockToLoadInfo(SDataBlockToLoadInfo* pInfo, SFileDataBlockInfo*
pInfo->overlapWithLastBlock = !(pBlock->maxKey.ts < tsLast || pBlock->minKey.ts > tsLast);
}
- pInfo->moreThanCapcity = pBlock->nRow > pReader->capacity;
+ pInfo->moreThanCapcity = pBlock->nRow > pReader->resBlockInfo.capacity;
pInfo->partiallyRequired = dataBlockPartiallyRequired(&pReader->window, &pReader->verRange, pBlock);
pInfo->overlapWithKeyInBuf = keyOverlapFileBlock(keyInBuf, pBlock, &pReader->verRange);
}
@@ -1775,10 +1852,10 @@ static int32_t buildDataBlockFromBuf(STsdbReader* pReader, STableBlockScanInfo*
return TSDB_CODE_SUCCESS;
}
- SSDataBlock* pBlock = pReader->pResBlock;
+ SSDataBlock* pBlock = pReader->resBlockInfo.pResBlock;
int64_t st = taosGetTimestampUs();
- int32_t code = buildDataBlockFromBufImpl(pBlockScanInfo, endKey, pReader->capacity, pReader);
+ int32_t code = buildDataBlockFromBufImpl(pBlockScanInfo, endKey, pReader->resBlockInfo.capacity, pReader);
blockDataUpdateTsWindow(pBlock, pReader->suppInfo.slotId[0]);
pBlock->info.id.uid = pBlockScanInfo->uid;
@@ -1809,7 +1886,7 @@ static bool tryCopyDistinctRowFromFileBlock(STsdbReader* pReader, SBlockData* pB
int64_t nextKey = pBlockData->aTSKEY[pDumpInfo->rowIndex + step];
if (nextKey != key) { // merge is not needed
- code = doAppendRowFromFileBlock(pReader->pResBlock, pReader, pBlockData, pDumpInfo->rowIndex);
+ code = doAppendRowFromFileBlock(pReader->resBlockInfo.pResBlock, pReader, pBlockData, pDumpInfo->rowIndex);
if (code) {
return code;
}
@@ -1832,13 +1909,14 @@ static bool nextRowFromLastBlocks(SLastBlockReader* pLastBlockReader, STableBloc
return false;
}
- TSDBROW row = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
- TSDBKEY k = TSDBROW_KEY(&row);
- pScanInfo->lastKeyInStt = k.ts;
+ TSDBROW* pRow = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
+ int64_t key = pRow->pBlockData->aTSKEY[pRow->iRow];
+ int64_t ver = pRow->pBlockData->aVersion[pRow->iRow];
- if (!hasBeenDropped(pScanInfo->delSkyline, &pScanInfo->lastBlockDelIndex, &k, pLastBlockReader->order, pVerRange)) {
- // the qualifed ts may equal to k.ts, only a greater version one.
- // here we need to fallback one step.
+ pLastBlockReader->currentKey = key;
+ pScanInfo->lastKeyInStt = key;
+
+ if (!hasBeenDropped(pScanInfo->delSkyline, &pScanInfo->lastBlockDelIndex, key, ver, pLastBlockReader->order, pVerRange)) {
return true;
}
}
@@ -1855,7 +1933,7 @@ static bool tryCopyDistinctRowFromSttBlock(TSDBROW* fRow, SLastBlockReader* pLas
if (hasVal) {
int64_t next1 = getCurrentKeyInLastBlock(pLastBlockReader);
if (next1 != ts) {
- code = doAppendRowFromFileBlock(pReader->pResBlock, pReader, fRow->pBlockData, fRow->iRow);
+ code = doAppendRowFromFileBlock(pReader->resBlockInfo.pResBlock, pReader, fRow->pBlockData, fRow->iRow);
if (code) {
return code;
}
@@ -1864,7 +1942,7 @@ static bool tryCopyDistinctRowFromSttBlock(TSDBROW* fRow, SLastBlockReader* pLas
return code;
}
} else {
- code = doAppendRowFromFileBlock(pReader->pResBlock, pReader, fRow->pBlockData, fRow->iRow);
+ code = doAppendRowFromFileBlock(pReader->resBlockInfo.pResBlock, pReader, fRow->pBlockData, fRow->iRow);
if (code) {
return code;
}
@@ -1876,27 +1954,11 @@ static bool tryCopyDistinctRowFromSttBlock(TSDBROW* fRow, SLastBlockReader* pLas
return code;
}
-static FORCE_INLINE STSchema* getLatestTableSchema(STsdbReader* pReader, uint64_t uid) {
- if (pReader->pSchema != NULL) {
- return pReader->pSchema;
- }
-
- int32_t code = metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->suid, uid, -1, &pReader->pSchema);
- if (code != TSDB_CODE_SUCCESS || pReader->pSchema == NULL) {
- tsdbError("failed to get table schema, uid:%" PRIu64 ", it may have been dropped, ver:-1, %s", uid, pReader->idStr);
- }
-
- return pReader->pSchema;
-}
-
static FORCE_INLINE STSchema* doGetSchemaForTSRow(int32_t sversion, STsdbReader* pReader, uint64_t uid) {
- int32_t code = 0;
-
// always set the newest schema version in pReader->pSchema
if (pReader->pSchema == NULL) {
- code = metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->suid, uid, -1, &pReader->pSchema);
- if (code != TSDB_CODE_SUCCESS) {
- terrno = code;
+ STSchema* ps = getTableSchemaImpl(pReader, uid);
+ if (ps == NULL) {
return NULL;
}
}
@@ -1911,7 +1973,7 @@ static FORCE_INLINE STSchema* doGetSchemaForTSRow(int32_t sversion, STsdbReader*
}
STSchema* ptr = NULL;
- code = metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->suid, uid, sversion, &ptr);
+ int32_t code = metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->suid, uid, sversion, &ptr);
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
return NULL;
@@ -1927,7 +1989,7 @@ static FORCE_INLINE STSchema* doGetSchemaForTSRow(int32_t sversion, STsdbReader*
static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, TSDBROW* pRow,
SIterInfo* pIter, int64_t key, SLastBlockReader* pLastBlockReader) {
- SRowMerger merge = {0};
+ SRowMerger* pMerger = &pReader->status.merger;
SRow* pTSRow = NULL;
SBlockData* pBlockData = &pReader->status.fileBlockData;
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
@@ -1940,6 +2002,15 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
TSDBKEY k = TSDBROW_KEY(pRow);
TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
+ // merge is not initialized yet, due to the fact that the pReader->pSchema is not initialized
+ if (pMerger->pArray == NULL) {
+ ASSERT(pReader->pSchema == NULL);
+ STSchema* ps = getTableSchemaImpl(pReader, pBlockScanInfo->uid);
+ if (ps == NULL) {
+ return terrno;
+ }
+ }
+
int64_t minKey = 0;
if (pReader->order == TSDB_ORDER_ASC) {
minKey = INT64_MAX; // chosen the minimum value
@@ -1969,32 +2040,33 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
}
}
+ // todo remove init
bool init = false;
// ASC: file block ---> last block -----> imem -----> mem
// DESC: mem -----> imem -----> last block -----> file block
if (pReader->order == TSDB_ORDER_ASC) {
if (minKey == key) {
- init = true; // todo check if pReader->pSchema is null or not
- int32_t code = tsdbRowMergerInit(&merge, NULL, &fRow, pReader->pSchema);
+ init = true;
+ int32_t code = tsdbRowMergerAdd(pMerger, &fRow, pReader->pSchema);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
- doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
+ doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader);
}
if (minKey == tsLast) {
- TSDBROW fRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
+ TSDBROW* fRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
if (init) {
- tsdbRowMergerAdd(&merge, &fRow1, NULL);
+ tsdbRowMergerAdd(pMerger, fRow1, NULL);
} else {
init = true;
- int32_t code = tsdbRowMergerInit(&merge, NULL, &fRow1, pReader->pSchema);
+ int32_t code = tsdbRowMergerAdd(pMerger, fRow1, pReader->pSchema);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
}
- doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLast, &merge, &pReader->verRange, pReader->idStr);
+ doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLast, pMerger, &pReader->verRange, pReader->idStr);
}
if (minKey == k.ts) {
@@ -2003,15 +2075,15 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
return terrno;
}
if (init) {
- tsdbRowMergerAdd(&merge, pRow, pSchema);
+ tsdbRowMergerAdd(pMerger, pRow, pSchema);
} else {
init = true;
- int32_t code = tsdbRowMergerInit(&merge, NULL, pRow, pSchema);
+ int32_t code = tsdbRowMergerAdd(pMerger, pRow, pSchema);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
}
- int32_t code = doMergeRowsInBuf(pIter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
+ int32_t code = doMergeRowsInBuf(pIter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, pReader);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@@ -2024,54 +2096,54 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
return terrno;
}
- int32_t code = tsdbRowMergerInit(&merge, NULL, pRow, pSchema);
+ int32_t code = tsdbRowMergerAdd(pMerger, pRow, pSchema);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
- code = doMergeRowsInBuf(pIter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
- if (code != TSDB_CODE_SUCCESS || merge.pTSchema == NULL) {
+ code = doMergeRowsInBuf(pIter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, pReader);
+ if (code != TSDB_CODE_SUCCESS || pMerger->pTSchema == NULL) {
return code;
}
}
if (minKey == tsLast) {
- TSDBROW fRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
+ TSDBROW* fRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
if (init) {
- tsdbRowMergerAdd(&merge, &fRow1, NULL);
+ tsdbRowMergerAdd(pMerger, fRow1, NULL);
} else {
init = true;
- int32_t code = tsdbRowMergerInit(&merge, NULL, &fRow1, pReader->pSchema);
+ int32_t code = tsdbRowMergerAdd(pMerger, fRow1, pReader->pSchema);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
}
- doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLast, &merge, &pReader->verRange, pReader->idStr);
+ doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLast, pMerger, &pReader->verRange, pReader->idStr);
}
if (minKey == key) {
if (init) {
- tsdbRowMergerAdd(&merge, &fRow, NULL);
+ tsdbRowMergerAdd(pMerger, &fRow, NULL);
} else {
init = true;
- int32_t code = tsdbRowMergerInit(&merge, NULL, &fRow, pReader->pSchema);
+ int32_t code = tsdbRowMergerAdd(pMerger, &fRow, pReader->pSchema);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
}
- doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
+ doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader);
}
}
- int32_t code = tsdbRowMergerGetRow(&merge, &pTSRow);
+ int32_t code = tsdbRowMergerGetRow(pMerger, &pTSRow);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
- code = doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo);
+ code = doAppendRowFromTSRow(pReader->resBlockInfo.pResBlock, pReader, pTSRow, pBlockScanInfo);
taosMemoryFree(pTSRow);
- tsdbRowMergerClear(&merge);
+ tsdbRowMergerClear(pMerger);
return code;
}
@@ -2079,14 +2151,19 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
static int32_t doMergeFileBlockAndLastBlock(SLastBlockReader* pLastBlockReader, STsdbReader* pReader,
STableBlockScanInfo* pBlockScanInfo, SBlockData* pBlockData,
bool mergeBlockData) {
+ SRowMerger* pMerger = &pReader->status.merger;
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
- int64_t tsLastBlock = getCurrentKeyInLastBlock(pLastBlockReader);
- bool copied = false;
- int32_t code = TSDB_CODE_SUCCESS;
- SRow* pTSRow = NULL;
- SRowMerger merge = {0};
- TSDBROW fRow = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
- tsdbTrace("fRow ptr:%p, %d, uid:%" PRIu64 ", %s", fRow.pBlockData, fRow.iRow, pLastBlockReader->uid, pReader->idStr);
+
+ int64_t tsLastBlock = getCurrentKeyInLastBlock(pLastBlockReader);
+ bool copied = false;
+ int32_t code = TSDB_CODE_SUCCESS;
+ SRow* pTSRow = NULL;
+ TSDBROW* pRow = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
+
+ // create local variable to hold the row value
+ TSDBROW fRow = {.iRow = pRow->iRow, .type = TSDBROW_COL_FMT, .pBlockData = pRow->pBlockData};
+
+ tsdbTrace("fRow ptr:%p, %d, uid:%" PRIu64 ", %s", pRow->pBlockData, pRow->iRow, pLastBlockReader->uid, pReader->idStr);
// only last block exists
if ((!mergeBlockData) || (tsLastBlock != pBlockData->aTSKEY[pDumpInfo->rowIndex])) {
@@ -2099,51 +2176,51 @@ static int32_t doMergeFileBlockAndLastBlock(SLastBlockReader* pLastBlockReader,
pBlockScanInfo->lastKey = tsLastBlock;
return TSDB_CODE_SUCCESS;
} else {
- code = tsdbRowMergerInit(&merge, NULL, &fRow, pReader->pSchema);
+ code = tsdbRowMergerAdd(pMerger, &fRow, pReader->pSchema);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
- TSDBROW fRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
- tsdbRowMergerAdd(&merge, &fRow1, NULL);
- doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLastBlock, &merge, &pReader->verRange, pReader->idStr);
+ TSDBROW* pRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
+ tsdbRowMergerAdd(pMerger, pRow1, NULL);
+ doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLastBlock, pMerger, &pReader->verRange, pReader->idStr);
- code = tsdbRowMergerGetRow(&merge, &pTSRow);
+ code = tsdbRowMergerGetRow(pMerger, &pTSRow);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
- code = doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo);
+ code = doAppendRowFromTSRow(pReader->resBlockInfo.pResBlock, pReader, pTSRow, pBlockScanInfo);
taosMemoryFree(pTSRow);
- tsdbRowMergerClear(&merge);
+ tsdbRowMergerClear(pMerger);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
}
} else { // not merge block data
- code = tsdbRowMergerInit(&merge, NULL, &fRow, pReader->pSchema);
+ code = tsdbRowMergerAdd(pMerger, &fRow, pReader->pSchema);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
- doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLastBlock, &merge, &pReader->verRange, pReader->idStr);
+ doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLastBlock, pMerger, &pReader->verRange, pReader->idStr);
// merge with block data if ts == key
if (tsLastBlock == pBlockData->aTSKEY[pDumpInfo->rowIndex]) {
- doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
+ doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader);
}
- code = tsdbRowMergerGetRow(&merge, &pTSRow);
+ code = tsdbRowMergerGetRow(pMerger, &pTSRow);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
- code = doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo);
+ code = doAppendRowFromTSRow(pReader->resBlockInfo.pResBlock, pReader, pTSRow, pBlockScanInfo);
taosMemoryFree(pTSRow);
- tsdbRowMergerClear(&merge);
+ tsdbRowMergerClear(pMerger);
if (code != TSDB_CODE_SUCCESS) {
return code;
@@ -2156,6 +2233,16 @@ static int32_t doMergeFileBlockAndLastBlock(SLastBlockReader* pLastBlockReader,
static int32_t mergeFileBlockAndLastBlock(STsdbReader* pReader, SLastBlockReader* pLastBlockReader, int64_t key,
STableBlockScanInfo* pBlockScanInfo, SBlockData* pBlockData) {
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
+ SRowMerger* pMerger = &pReader->status.merger;
+
+ // merge is not initialized yet, due to the fact that the pReader->pSchema is not initialized
+ if (pMerger->pArray == NULL) {
+ ASSERT(pReader->pSchema == NULL);
+ STSchema* ps = getTableSchemaImpl(pReader, pBlockScanInfo->uid);
+ if (ps == NULL) {
+ return terrno;
+ }
+ }
if (hasDataInFileBlock(pBlockData, pDumpInfo)) {
// no last block available, only data block exists
@@ -2172,30 +2259,28 @@ static int32_t mergeFileBlockAndLastBlock(STsdbReader* pReader, SLastBlockReader
if (key < ts) { // imem, mem are all empty, file blocks (data blocks and last block) exist
return mergeRowsInFileBlocks(pBlockData, pBlockScanInfo, key, pReader);
} else if (key == ts) {
- SRow* pTSRow = NULL;
- SRowMerger merge = {0};
-
- int32_t code = tsdbRowMergerInit(&merge, NULL, &fRow, pReader->pSchema);
+ SRow* pTSRow = NULL;
+ int32_t code = tsdbRowMergerAdd(pMerger, &fRow, pReader->pSchema);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
- doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
+ doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader);
- TSDBROW fRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
- tsdbRowMergerAdd(&merge, &fRow1, NULL);
+ TSDBROW* pRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
+ tsdbRowMergerAdd(pMerger, pRow1, NULL);
- doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, ts, &merge, &pReader->verRange, pReader->idStr);
+ doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, ts, pMerger, &pReader->verRange, pReader->idStr);
- code = tsdbRowMergerGetRow(&merge, &pTSRow);
+ code = tsdbRowMergerGetRow(pMerger, &pTSRow);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
- code = doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo);
+ code = doAppendRowFromTSRow(pReader->resBlockInfo.pResBlock, pReader, pTSRow, pBlockScanInfo);
taosMemoryFree(pTSRow);
- tsdbRowMergerClear(&merge);
+ tsdbRowMergerClear(pMerger);
return code;
} else {
return TSDB_CODE_SUCCESS;
@@ -2210,7 +2295,7 @@ static int32_t mergeFileBlockAndLastBlock(STsdbReader* pReader, SLastBlockReader
static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, SBlockData* pBlockData,
SLastBlockReader* pLastBlockReader) {
- SRowMerger merge = {0};
+ SRowMerger* pMerger = &pReader->status.merger;
SRow* pTSRow = NULL;
int32_t code = TSDB_CODE_SUCCESS;
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
@@ -2238,6 +2323,15 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
return code;
}
+ // merge is not initialized yet, due to the fact that the pReader->pSchema is not initialized
+ if (pMerger->pArray == NULL) {
+ ASSERT(pReader->pSchema == NULL);
+ STSchema* ps = getTableSchemaImpl(pReader, pBlockScanInfo->uid);
+ if (ps == NULL) {
+ return terrno;
+ }
+ }
+
int64_t minKey = 0;
if (ASCENDING_TRAVERSE(pReader->order)) {
minKey = INT64_MAX; // let's find the minimum
@@ -2283,42 +2377,41 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
if (minKey == key) {
init = true;
TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
- code = tsdbRowMergerInit(&merge, NULL, &fRow, pReader->pSchema);
+ code = tsdbRowMergerAdd(pMerger, &fRow, pReader->pSchema);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
- doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
+ doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader);
}
if (minKey == tsLast) {
- TSDBROW fRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
+ TSDBROW* pRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
if (init) {
- tsdbRowMergerAdd(&merge, &fRow1, NULL);
+ tsdbRowMergerAdd(pMerger, pRow1, NULL);
} else {
init = true;
- code = tsdbRowMergerInit(&merge, NULL, &fRow1, pReader->pSchema);
+ code = tsdbRowMergerAdd(pMerger, pRow1, pReader->pSchema);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
}
- doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLast, &merge, &pReader->verRange, pReader->idStr);
+ doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLast, pMerger, &pReader->verRange, pReader->idStr);
}
if (minKey == ik.ts) {
if (init) {
- tsdbRowMergerAdd(&merge, piRow, piSchema);
+ tsdbRowMergerAdd(pMerger, piRow, piSchema);
} else {
init = true;
- code = tsdbRowMergerInit(&merge, pSchema, piRow, piSchema);
+ code = tsdbRowMergerAdd(pMerger, piRow, piSchema);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
}
- code = doMergeRowsInBuf(&pBlockScanInfo->iiter, pBlockScanInfo->uid, ik.ts, pBlockScanInfo->delSkyline, &merge,
- pReader);
+ code = doMergeRowsInBuf(&pBlockScanInfo->iiter, pBlockScanInfo->uid, ik.ts, pBlockScanInfo->delSkyline, pReader);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@@ -2326,20 +2419,15 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
if (minKey == k.ts) {
if (init) {
- if (merge.pTSchema == NULL) {
- return code;
- }
-
- tsdbRowMergerAdd(&merge, pRow, pSchema);
+ tsdbRowMergerAdd(pMerger, pRow, pSchema);
} else {
// STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid);
- code = tsdbRowMergerInit(&merge, NULL, pRow, pSchema);
+ code = tsdbRowMergerAdd(pMerger, pRow, pSchema);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
}
- code = doMergeRowsInBuf(&pBlockScanInfo->iter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge,
- pReader);
+ code = doMergeRowsInBuf(&pBlockScanInfo->iter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, pReader);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@@ -2347,13 +2435,12 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
} else {
if (minKey == k.ts) {
init = true;
- code = tsdbRowMergerInit(&merge, NULL, pRow, pSchema);
+ code = tsdbRowMergerAdd(pMerger, pRow, pSchema);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
- code = doMergeRowsInBuf(&pBlockScanInfo->iter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge,
- pReader);
+ code = doMergeRowsInBuf(&pBlockScanInfo->iter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, pReader);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@@ -2361,66 +2448,57 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
if (minKey == ik.ts) {
if (init) {
- tsdbRowMergerAdd(&merge, piRow, piSchema);
+ tsdbRowMergerAdd(pMerger, piRow, piSchema);
} else {
init = true;
- // STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(piRow), pReader, pBlockScanInfo->uid);
- code = tsdbRowMergerInit(&merge, pSchema, piRow, piSchema);
+ code = tsdbRowMergerAdd(pMerger, piRow, piSchema);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
}
- code = doMergeRowsInBuf(&pBlockScanInfo->iiter, pBlockScanInfo->uid, ik.ts, pBlockScanInfo->delSkyline, &merge,
- pReader);
+ code = doMergeRowsInBuf(&pBlockScanInfo->iiter, pBlockScanInfo->uid, ik.ts, pBlockScanInfo->delSkyline, pReader);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
}
if (minKey == tsLast) {
- TSDBROW fRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
+ TSDBROW* pRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
if (init) {
- tsdbRowMergerAdd(&merge, &fRow1, NULL);
+ tsdbRowMergerAdd(pMerger, pRow1, NULL);
} else {
init = true;
- code = tsdbRowMergerInit(&merge, NULL, &fRow1, pReader->pSchema);
+ code = tsdbRowMergerAdd(pMerger, pRow1, pReader->pSchema);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
}
- doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLast, &merge, &pReader->verRange, pReader->idStr);
+ doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLast, pMerger, &pReader->verRange, pReader->idStr);
}
if (minKey == key) {
TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
if (!init) {
- code = tsdbRowMergerInit(&merge, NULL, &fRow, pReader->pSchema);
+ code = tsdbRowMergerAdd(pMerger, &fRow, pReader->pSchema);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
} else {
- if (merge.pTSchema == NULL) {
- return code;
- }
- tsdbRowMergerAdd(&merge, &fRow, NULL);
+ tsdbRowMergerAdd(pMerger, &fRow, NULL);
}
- doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
+ doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader);
}
}
- if (merge.pTSchema == NULL) {
- return code;
- }
-
- code = tsdbRowMergerGetRow(&merge, &pTSRow);
+ code = tsdbRowMergerGetRow(pMerger, &pTSRow);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
- code = doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo);
+ code = doAppendRowFromTSRow(pReader->resBlockInfo.pResBlock, pReader, pTSRow, pBlockScanInfo);
taosMemoryFree(pTSRow);
- tsdbRowMergerClear(&merge);
+ tsdbRowMergerClear(pMerger);
return code;
}
@@ -2514,8 +2592,7 @@ static bool isValidFileBlockRow(SBlockData* pBlockData, SFileBlockDumpInfo* pDum
return false;
}
- TSDBKEY k = {.ts = ts, .version = ver};
- if (hasBeenDropped(pBlockScanInfo->delSkyline, &pBlockScanInfo->fileDelIndex, &k, pReader->order,
+ if (hasBeenDropped(pBlockScanInfo->delSkyline, &pBlockScanInfo->fileDelIndex, ts, ver, pReader->order,
&pReader->verRange)) {
return false;
}
@@ -2547,7 +2624,7 @@ static bool initLastBlockReader(SLastBlockReader* pLBlockReader, STableBlockScan
pScanInfo->uid, pReader->idStr);
int32_t code = tMergeTreeOpen(&pLBlockReader->mergeTree, (pLBlockReader->order == TSDB_ORDER_DESC),
pReader->pFileReader, pReader->suid, pScanInfo->uid, &w, &pLBlockReader->verRange,
- pLBlockReader->pInfo, false, pReader->idStr, false);
+ pLBlockReader->pInfo, false, pReader->idStr, false, pReader->status.pLDataIter);
if (code != TSDB_CODE_SUCCESS) {
return false;
}
@@ -2555,11 +2632,6 @@ static bool initLastBlockReader(SLastBlockReader* pLBlockReader, STableBlockScan
return nextRowFromLastBlocks(pLBlockReader, pScanInfo, &pReader->verRange);
}
-static int64_t getCurrentKeyInLastBlock(SLastBlockReader* pLastBlockReader) {
- TSDBROW row = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
- return TSDBROW_TS(&row);
-}
-
static bool hasDataInLastBlock(SLastBlockReader* pLastBlockReader) { return pLastBlockReader->mergeTree.pIter != NULL; }
bool hasDataInFileBlock(const SBlockData* pBlockData, const SFileBlockDumpInfo* pDumpInfo) {
@@ -2571,6 +2643,7 @@ bool hasDataInFileBlock(const SBlockData* pBlockData, const SFileBlockDumpInfo*
int32_t mergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pBlockScanInfo, int64_t key,
STsdbReader* pReader) {
+ SRowMerger* pMerger = &pReader->status.merger;
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
bool copied = false;
int32_t code = tryCopyDistinctRowFromFileBlock(pReader, pBlockData, key, pDumpInfo, &copied);
@@ -2578,6 +2651,15 @@ int32_t mergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pBloc
return code;
}
+ // merge is not initialized yet, due to the fact that the pReader->pSchema is not initialized
+ if (pMerger->pArray == NULL) {
+ ASSERT(pReader->pSchema == NULL);
+ STSchema* ps = getTableSchemaImpl(pReader, pBlockScanInfo->uid);
+ if (ps == NULL) {
+ return terrno;
+ }
+ }
+
if (copied) {
pBlockScanInfo->lastKey = key;
return TSDB_CODE_SUCCESS;
@@ -2585,23 +2667,21 @@ int32_t mergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pBloc
TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
SRow* pTSRow = NULL;
- SRowMerger merge = {0};
-
- code = tsdbRowMergerInit(&merge, NULL, &fRow, pReader->pSchema);
+ code = tsdbRowMergerAdd(pMerger, &fRow, pReader->pSchema);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
- doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
- code = tsdbRowMergerGetRow(&merge, &pTSRow);
+ doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader);
+ code = tsdbRowMergerGetRow(pMerger, &pTSRow);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
- code = doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo);
+ code = doAppendRowFromTSRow(pReader->resBlockInfo.pResBlock, pReader, pTSRow, pBlockScanInfo);
taosMemoryFree(pTSRow);
- tsdbRowMergerClear(&merge);
+ tsdbRowMergerClear(pMerger);
return code;
}
}
@@ -2680,7 +2760,7 @@ static int32_t loadNeighborIfOverlap(SFileDataBlockInfo* pBlockInfo, STableBlock
}
static void updateComposedBlockInfo(STsdbReader* pReader, double el, STableBlockScanInfo* pBlockScanInfo) {
- SSDataBlock* pResBlock = pReader->pResBlock;
+ SSDataBlock* pResBlock = pReader->resBlockInfo.pResBlock;
pResBlock->info.id.uid = (pBlockScanInfo != NULL) ? pBlockScanInfo->uid : 0;
pResBlock->info.dataLoad = 1;
@@ -2695,7 +2775,7 @@ static void updateComposedBlockInfo(STsdbReader* pReader, double el, STableBlock
static int32_t buildComposedDataBlock(STsdbReader* pReader) {
int32_t code = TSDB_CODE_SUCCESS;
- SSDataBlock* pResBlock = pReader->pResBlock;
+ SSDataBlock* pResBlock = pReader->resBlockInfo.pResBlock;
SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(&pReader->status.blockIter);
SLastBlockReader* pLastBlockReader = pReader->status.fileIter.pLastBlockReader;
@@ -2717,7 +2797,7 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) {
// it is a clean block, load it directly
if (isCleanFileDataBlock(pReader, pBlockInfo, pBlock, pBlockScanInfo, keyInBuf, pLastBlockReader) &&
- pBlock->nRow <= pReader->capacity) {
+ pBlock->nRow <= pReader->resBlockInfo.capacity) {
if (asc || ((!asc) && (!hasDataInLastBlock(pLastBlockReader)))) {
code = copyBlockDataToSDataBlock(pReader);
if (code) {
@@ -2781,7 +2861,7 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) {
break;
}
- if (pResBlock->info.rows >= pReader->capacity) {
+ if (pResBlock->info.rows >= pReader->resBlockInfo.capacity) {
break;
}
}
@@ -2905,18 +2985,24 @@ TSDBKEY getCurrentKeyInBuf(STableBlockScanInfo* pScanInfo, STsdbReader* pReader)
}
}
-static int32_t moveToNextFile(STsdbReader* pReader, SBlockNumber* pBlockNum) {
+static int32_t moveToNextFile(STsdbReader* pReader, SBlockNumber* pBlockNum, SArray* pTableList) {
SReaderStatus* pStatus = &pReader->status;
pBlockNum->numOfBlocks = 0;
pBlockNum->numOfLastFiles = 0;
- size_t numOfTables = taosHashGetSize(pReader->status.pTableMap);
+ size_t numOfTables = tSimpleHashGetSize(pReader->status.pTableMap);
SArray* pIndexList = taosArrayInit(numOfTables, sizeof(SBlockIdx));
while (1) {
+ // only check here, since the iterate data in memory is very fast.
+ if (pReader->flag == READER_STATUS_SHOULD_STOP) {
+ tsdbWarn("tsdb reader is stopped ASAP, %s", pReader->idStr);
+ return TSDB_CODE_SUCCESS;
+ }
+
bool hasNext = false;
int32_t code = filesetIteratorNext(&pStatus->fileIter, pReader, &hasNext);
- if (code) {
+ if (code != TSDB_CODE_SUCCESS) {
taosArrayDestroy(pIndexList);
return code;
}
@@ -2933,7 +3019,7 @@ static int32_t moveToNextFile(STsdbReader* pReader, SBlockNumber* pBlockNum) {
}
if (taosArrayGetSize(pIndexList) > 0 || pReader->pFileReader->pSet->nSttF > 0) {
- code = doLoadFileBlock(pReader, pIndexList, pBlockNum);
+ code = doLoadFileBlock(pReader, pIndexList, pBlockNum, pTableList);
if (code != TSDB_CODE_SUCCESS) {
taosArrayDestroy(pIndexList);
return code;
@@ -2979,18 +3065,18 @@ static void resetTableListIndex(SReaderStatus* pStatus) {
pList->currentIndex = 0;
uint64_t uid = pList->tableUidList[0];
- pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid));
+ pStatus->pTableIter = tSimpleHashGet(pStatus->pTableMap, &uid, sizeof(uid));
}
static bool moveToNextTable(STableUidList* pOrderedCheckInfo, SReaderStatus* pStatus) {
pOrderedCheckInfo->currentIndex += 1;
- if (pOrderedCheckInfo->currentIndex >= taosHashGetSize(pStatus->pTableMap)) {
+ if (pOrderedCheckInfo->currentIndex >= tSimpleHashGetSize(pStatus->pTableMap)) {
pStatus->pTableIter = NULL;
return false;
}
uint64_t uid = pOrderedCheckInfo->tableUidList[pOrderedCheckInfo->currentIndex];
- pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid));
+ pStatus->pTableIter = tSimpleHashGet(pStatus->pTableMap, &uid, sizeof(uid));
return (pStatus->pTableIter != NULL);
}
@@ -3000,18 +3086,27 @@ static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) {
STableUidList* pUidList = &pStatus->uidList;
int32_t code = TSDB_CODE_SUCCESS;
- if (taosHashGetSize(pStatus->pTableMap) == 0) {
+ if (tSimpleHashGetSize(pStatus->pTableMap) == 0) {
return TSDB_CODE_SUCCESS;
}
- SSDataBlock* pResBlock = pReader->pResBlock;
+ SSDataBlock* pResBlock = pReader->resBlockInfo.pResBlock;
while (1) {
+ if (pReader->flag == READER_STATUS_SHOULD_STOP) {
+ tsdbWarn("tsdb reader is stopped ASAP, %s", pReader->idStr);
+ return TSDB_CODE_SUCCESS;
+ }
+
// load the last data block of current table
STableBlockScanInfo* pScanInfo = *(STableBlockScanInfo**)pStatus->pTableIter;
- bool hasVal = initLastBlockReader(pLastBlockReader, pScanInfo, pReader);
- if (!hasVal) {
+ // reset the index in last block when handing a new file
+ doCleanupTableScanInfo(pScanInfo);
+ pStatus->mapDataCleaned = true;
+
+ bool hasDataInLastFile = initLastBlockReader(pLastBlockReader, pScanInfo, pReader);
+ if (!hasDataInLastFile) {
bool hasNexTable = moveToNextTable(pUidList, pStatus);
if (!hasNexTable) {
return TSDB_CODE_SUCCESS;
@@ -3034,7 +3129,7 @@ static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) {
return code;
}
- if (pResBlock->info.rows >= pReader->capacity) {
+ if (pResBlock->info.rows >= pReader->resBlockInfo.capacity) {
break;
}
}
@@ -3100,7 +3195,7 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) {
SBlockData* pBData = &pReader->status.fileBlockData;
tBlockDataReset(pBData);
- SSDataBlock* pResBlock = pReader->pResBlock;
+ SSDataBlock* pResBlock = pReader->resBlockInfo.pResBlock;
tsdbDebug("load data in last block firstly, due to desc scan data, %s", pReader->idStr);
int64_t st = taosGetTimestampUs();
@@ -3118,7 +3213,7 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) {
return code;
}
- if (pResBlock->info.rows >= pReader->capacity) {
+ if (pResBlock->info.rows >= pReader->resBlockInfo.capacity) {
break;
}
}
@@ -3133,7 +3228,7 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) {
pResBlock->info.rows, el, pReader->idStr);
}
} else { // whole block is required, return it directly
- SDataBlockInfo* pInfo = &pReader->pResBlock->info;
+ SDataBlockInfo* pInfo = &pReader->resBlockInfo.pResBlock->info;
pInfo->rows = pBlock->nRow;
pInfo->id.uid = pScanInfo->uid;
pInfo->dataLoad = 0;
@@ -3162,7 +3257,7 @@ static int32_t doSumFileBlockRows(STsdbReader* pReader, SDataFReader* pFileReade
goto _end;
}
- int32_t numOfTables = taosHashGetSize(pReader->status.pTableMap);
+ int32_t numOfTables = tSimpleHashGetSize(pReader->status.pTableMap);
SArray* aBlockIdx = (SArray*)taosLRUCacheValue(pFileReader->pTsdb->biCache, handle);
size_t num = taosArrayGetSize(aBlockIdx);
@@ -3172,14 +3267,13 @@ static int32_t doSumFileBlockRows(STsdbReader* pReader, SDataFReader* pFileReade
}
SBlockIdx* pBlockIdx = NULL;
- int32_t i = 0;
for (int32_t i = 0; i < num; ++i) {
pBlockIdx = (SBlockIdx*)taosArrayGet(aBlockIdx, i);
if (pBlockIdx->suid != pReader->suid) {
continue;
}
- STableBlockScanInfo** p = taosHashGet(pReader->status.pTableMap, &pBlockIdx->uid, sizeof(pBlockIdx->uid));
+ STableBlockScanInfo** p = tSimpleHashGet(pReader->status.pTableMap, &pBlockIdx->uid, sizeof(pBlockIdx->uid));
if (p == NULL) {
continue;
}
@@ -3225,13 +3319,13 @@ static int32_t doSumSttBlockRows(STsdbReader* pReader) {
taosArrayClear(pBlockLoadInfo->aSttBlk);
continue;
}
- for (int32_t i = 0; i < size; ++i) {
- SSttBlk* p = taosArrayGet(pBlockLoadInfo->aSttBlk, i);
+ for (int32_t j = 0; j < size; ++j) {
+ SSttBlk* p = taosArrayGet(pBlockLoadInfo->aSttBlk, j);
pReader->rowsNum += p->nRow;
}
} else {
- for (int32_t i = 0; i < size; ++i) {
- SSttBlk* p = taosArrayGet(pBlockLoadInfo->aSttBlk, i);
+ for (int32_t j = 0; j < size; ++j) {
+ SSttBlk* p = taosArrayGet(pBlockLoadInfo->aSttBlk, j);
uint64_t s = p->suid;
if (s < pReader->suid) {
continue;
@@ -3301,13 +3395,6 @@ static int32_t buildBlockFromBufferSequentially(STsdbReader* pReader) {
STableUidList* pUidList = &pStatus->uidList;
while (1) {
- // if (pStatus->pTableIter == NULL) {
- // pStatus->pTableIter = taosHashIterate(pStatus->pTableMap, NULL);
- // if (pStatus->pTableIter == NULL) {
- // return TSDB_CODE_SUCCESS;
- // }
- // }
-
STableBlockScanInfo** pBlockScanInfo = pStatus->pTableIter;
initMemDataIterator(*pBlockScanInfo, pReader);
@@ -3317,7 +3404,7 @@ static int32_t buildBlockFromBufferSequentially(STsdbReader* pReader) {
return code;
}
- if (pReader->pResBlock->info.rows > 0) {
+ if (pReader->resBlockInfo.pResBlock->info.rows > 0) {
return TSDB_CODE_SUCCESS;
}
@@ -3335,7 +3422,7 @@ static void initBlockDumpInfo(STsdbReader* pReader, SDataBlockIter* pBlockIter)
SDataBlk* pBlock = getCurrentBlock(pBlockIter);
SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(pBlockIter);
if (pBlockInfo) {
- STableBlockScanInfo* pScanInfo = taosHashGet(pBlockIter->pTableMap, &pBlockInfo->uid, sizeof(pBlockInfo->uid));
+ STableBlockScanInfo* pScanInfo = tSimpleHashGet(pBlockIter->pTableMap, &pBlockInfo->uid, sizeof(pBlockInfo->uid));
if (pScanInfo) {
lastKey = pScanInfo->lastKey;
}
@@ -3352,20 +3439,24 @@ static void initBlockDumpInfo(STsdbReader* pReader, SDataBlockIter* pBlockIter)
static int32_t initForFirstBlockInFile(STsdbReader* pReader, SDataBlockIter* pBlockIter) {
SBlockNumber num = {0};
- int32_t code = moveToNextFile(pReader, &num);
+ SArray* pTableList = taosArrayInit(40, POINTER_BYTES);
+
+ int32_t code = moveToNextFile(pReader, &num, pTableList);
if (code != TSDB_CODE_SUCCESS) {
+ taosArrayDestroy(pTableList);
return code;
}
// all data files are consumed, try data in buffer
if (num.numOfBlocks + num.numOfLastFiles == 0) {
pReader->status.loadFromFile = false;
+ taosArrayDestroy(pTableList);
return code;
}
// initialize the block iterator for a new fileset
if (num.numOfBlocks > 0) {
- code = initBlockIterator(pReader, pBlockIter, num.numOfBlocks);
+ code = initBlockIterator(pReader, pBlockIter, num.numOfBlocks, pTableList);
} else { // no block data, only last block exists
tBlockDataReset(&pReader->status.fileBlockData);
resetDataBlockIterator(pBlockIter, pReader->order);
@@ -3374,6 +3465,7 @@ static int32_t initForFirstBlockInFile(STsdbReader* pReader, SDataBlockIter* pBl
// set the correct start position according to the query time window
initBlockDumpInfo(pReader, pBlockIter);
+ taosArrayDestroy(pTableList);
return code;
}
@@ -3395,7 +3487,7 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) {
return code;
}
- if (pReader->pResBlock->info.rows > 0) {
+ if (pReader->resBlockInfo.pResBlock->info.rows > 0) {
return TSDB_CODE_SUCCESS;
}
@@ -3420,7 +3512,7 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) {
return code;
}
- if (pReader->pResBlock->info.rows > 0) {
+ if (pReader->resBlockInfo.pResBlock->info.rows > 0) {
return TSDB_CODE_SUCCESS;
}
}
@@ -3473,7 +3565,7 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) {
return code;
}
- if (pReader->pResBlock->info.rows > 0) {
+ if (pReader->resBlockInfo.pResBlock->info.rows > 0) {
return TSDB_CODE_SUCCESS;
}
}
@@ -3537,7 +3629,7 @@ SVersionRange getQueryVerRange(SVnode* pVnode, SQueryTableDataCond* pCond, int8_
return (SVersionRange){.minVer = startVer, .maxVer = endVer};
}
-bool hasBeenDropped(const SArray* pDelList, int32_t* index, TSDBKEY* pKey, int32_t order, SVersionRange* pVerRange) {
+bool hasBeenDropped(const SArray* pDelList, int32_t* index, int64_t key, int64_t ver, int32_t order, SVersionRange* pVerRange) {
if (pDelList == NULL) {
return false;
}
@@ -3549,29 +3641,29 @@ bool hasBeenDropped(const SArray* pDelList, int32_t* index, TSDBKEY* pKey, int32
if (asc) {
if (*index >= num - 1) {
TSDBKEY* last = taosArrayGetLast(pDelList);
- ASSERT(pKey->ts >= last->ts);
+ ASSERT(key >= last->ts);
- if (pKey->ts > last->ts) {
+ if (key > last->ts) {
return false;
- } else if (pKey->ts == last->ts) {
+ } else if (key == last->ts) {
TSDBKEY* prev = taosArrayGet(pDelList, num - 2);
- return (prev->version >= pKey->version && prev->version <= pVerRange->maxVer &&
+ return (prev->version >= ver && prev->version <= pVerRange->maxVer &&
prev->version >= pVerRange->minVer);
}
} else {
TSDBKEY* pCurrent = taosArrayGet(pDelList, *index);
TSDBKEY* pNext = taosArrayGet(pDelList, (*index) + 1);
- if (pKey->ts < pCurrent->ts) {
+ if (key < pCurrent->ts) {
return false;
}
- if (pCurrent->ts <= pKey->ts && pNext->ts >= pKey->ts && pCurrent->version >= pKey->version &&
+ if (pCurrent->ts <= key && pNext->ts >= key && pCurrent->version >= ver &&
pVerRange->maxVer >= pCurrent->version) {
return true;
}
- while (pNext->ts <= pKey->ts && (*index) < num - 1) {
+ while (pNext->ts <= key && (*index) < num - 1) {
(*index) += 1;
if ((*index) < num - 1) {
@@ -3583,7 +3675,7 @@ bool hasBeenDropped(const SArray* pDelList, int32_t* index, TSDBKEY* pKey, int32
continue;
}
- if (pCurrent->ts <= pKey->ts && pNext->ts >= pKey->ts && pCurrent->version >= pKey->version &&
+ if (pCurrent->ts <= key && pNext->ts >= key && pCurrent->version >= ver &&
pVerRange->maxVer >= pCurrent->version) {
return true;
}
@@ -3596,10 +3688,10 @@ bool hasBeenDropped(const SArray* pDelList, int32_t* index, TSDBKEY* pKey, int32
if (*index <= 0) {
TSDBKEY* pFirst = taosArrayGet(pDelList, 0);
- if (pKey->ts < pFirst->ts) {
+ if (key < pFirst->ts) {
return false;
- } else if (pKey->ts == pFirst->ts) {
- return pFirst->version >= pKey->version;
+ } else if (key == pFirst->ts) {
+ return pFirst->version >= ver;
} else {
ASSERT(0);
}
@@ -3607,15 +3699,15 @@ bool hasBeenDropped(const SArray* pDelList, int32_t* index, TSDBKEY* pKey, int32
TSDBKEY* pCurrent = taosArrayGet(pDelList, *index);
TSDBKEY* pPrev = taosArrayGet(pDelList, (*index) - 1);
- if (pKey->ts > pCurrent->ts) {
+ if (key > pCurrent->ts) {
return false;
}
- if (pPrev->ts <= pKey->ts && pCurrent->ts >= pKey->ts && pPrev->version >= pKey->version) {
+ if (pPrev->ts <= key && pCurrent->ts >= key && pPrev->version >= ver) {
return true;
}
- while (pPrev->ts >= pKey->ts && (*index) > 1) {
+ while (pPrev->ts >= key && (*index) > 1) {
(*index) += step;
if ((*index) >= 1) {
@@ -3627,7 +3719,7 @@ bool hasBeenDropped(const SArray* pDelList, int32_t* index, TSDBKEY* pKey, int32
continue;
}
- if (pPrev->ts <= pKey->ts && pCurrent->ts >= pKey->ts && pPrev->version >= pKey->version) {
+ if (pPrev->ts <= key && pCurrent->ts >= key && pPrev->version >= ver) {
return true;
}
}
@@ -3655,7 +3747,7 @@ TSDBROW* getValidMemRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* p
// it is a valid data version
if ((key.version <= pReader->verRange.maxVer && key.version >= pReader->verRange.minVer) &&
- (!hasBeenDropped(pDelList, &pIter->index, &key, pReader->order, &pReader->verRange))) {
+ (!hasBeenDropped(pDelList, &pIter->index, key.ts, key.version, pReader->order, &pReader->verRange))) {
return pRow;
}
@@ -3674,14 +3766,15 @@ TSDBROW* getValidMemRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* p
}
if (key.version <= pReader->verRange.maxVer && key.version >= pReader->verRange.minVer &&
- (!hasBeenDropped(pDelList, &pIter->index, &key, pReader->order, &pReader->verRange))) {
+ (!hasBeenDropped(pDelList, &pIter->index, key.ts, key.version, pReader->order, &pReader->verRange))) {
return pRow;
}
}
}
-int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, int64_t ts, SArray* pDelList, SRowMerger* pMerger,
- STsdbReader* pReader) {
+int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, int64_t ts, SArray* pDelList, STsdbReader* pReader) {
+ SRowMerger* pMerger = &pReader->status.merger;
+
while (1) {
pIter->hasVal = tsdbTbDataIterNext(pIter->iter);
if (!pIter->hasVal) {
@@ -3760,10 +3853,10 @@ static int32_t checkForNeighborFileBlock(STsdbReader* pReader, STableBlockScanIn
return code;
}
-int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pScanInfo, STsdbReader* pReader,
- SRowMerger* pMerger) {
+int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pScanInfo, STsdbReader* pReader) {
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
+ SRowMerger* pMerger = &pReader->status.merger;
bool asc = ASCENDING_TRAVERSE(pReader->order);
int64_t key = pBlockData->aTSKEY[pDumpInfo->rowIndex];
int32_t step = asc ? 1 : -1;
@@ -3801,8 +3894,8 @@ int32_t doMergeRowsInLastBlock(SLastBlockReader* pLastBlockReader, STableBlockSc
while (nextRowFromLastBlocks(pLastBlockReader, pScanInfo, pVerRange)) {
int64_t next1 = getCurrentKeyInLastBlock(pLastBlockReader);
if (next1 == ts) {
- TSDBROW fRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
- tsdbRowMergerAdd(pMerger, &fRow1, NULL);
+ TSDBROW* pRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
+ tsdbRowMergerAdd(pMerger, pRow1, NULL);
} else {
tsdbTrace("uid:%" PRIu64 " last del index:%d, del range:%d, lastKeyInStt:%" PRId64 ", %s", pScanInfo->uid,
pScanInfo->lastBlockDelIndex, (int32_t)taosArrayGetSize(pScanInfo->delSkyline), pScanInfo->lastKeyInStt,
@@ -3842,7 +3935,6 @@ int32_t doMergeMemTableMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter,
}
}
- SRowMerger merge = {0};
terrno = 0;
int32_t code = 0;
@@ -3854,8 +3946,7 @@ int32_t doMergeMemTableMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter,
return terrno;
}
- STSchema* ps = (pReader->pSchema != NULL)? pReader->pSchema:pTSchema;
- code = tsdbRowMergerInit(&merge, ps, ¤t, pTSchema);
+ code = tsdbRowMergerAdd(&pReader->status.merger, ¤t, pTSchema);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@@ -3865,28 +3956,28 @@ int32_t doMergeMemTableMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter,
return terrno;
}
- tsdbRowMergerAdd(&merge, pNextRow, pTSchema1);
+ tsdbRowMergerAdd(&pReader->status.merger,pNextRow, pTSchema1);
} else { // let's merge rows in file block
- code = tsdbRowMergerInit(&merge, NULL, ¤t, pReader->pSchema);
+ code = tsdbRowMergerAdd(&pReader->status.merger, ¤t, pReader->pSchema);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
- tsdbRowMergerAdd(&merge, pNextRow, NULL);
+ tsdbRowMergerAdd(&pReader->status.merger,pNextRow, NULL);
}
- code = doMergeRowsInBuf(pIter, uid, TSDBROW_TS(¤t), pDelList, &merge, pReader);
+ code = doMergeRowsInBuf(pIter, uid, TSDBROW_TS(¤t), pDelList, pReader);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
- code = tsdbRowMergerGetRow(&merge, &pResRow->pTSRow);
+ code = tsdbRowMergerGetRow(&pReader->status.merger, &pResRow->pTSRow);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
pResRow->type = TSDBROW_ROW_FMT;
- tsdbRowMergerClear(&merge);
+ tsdbRowMergerClear(&pReader->status.merger);
*freeTSRow = true;
return TSDB_CODE_SUCCESS;
@@ -3894,7 +3985,7 @@ int32_t doMergeMemTableMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter,
int32_t doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader,
SRow** pTSRow) {
- SRowMerger merge = {0};
+ SRowMerger* pMerger = &pReader->status.merger;
TSDBKEY k = TSDBROW_KEY(pRow);
TSDBKEY ik = TSDBROW_KEY(piRow);
@@ -3909,46 +4000,43 @@ int32_t doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* p
}
if (ASCENDING_TRAVERSE(pReader->order)) { // ascending order imem --> mem
- int32_t code = tsdbRowMergerInit(&merge, pSchema, piRow, piSchema);
+ int32_t code = tsdbRowMergerAdd(&pReader->status.merger, piRow, piSchema);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
- code = doMergeRowsInBuf(&pBlockScanInfo->iiter, pBlockScanInfo->uid, ik.ts, pBlockScanInfo->delSkyline, &merge,
- pReader);
+ code = doMergeRowsInBuf(&pBlockScanInfo->iiter, pBlockScanInfo->uid, ik.ts, pBlockScanInfo->delSkyline, pReader);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
- tsdbRowMergerAdd(&merge, pRow, pSchema);
+ tsdbRowMergerAdd(&pReader->status.merger,pRow, pSchema);
code =
- doMergeRowsInBuf(&pBlockScanInfo->iter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
+ doMergeRowsInBuf(&pBlockScanInfo->iter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, pReader);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
} else {
- int32_t code = tsdbRowMergerInit(&merge, NULL, pRow, pSchema);
- if (code != TSDB_CODE_SUCCESS || merge.pTSchema == NULL) {
+ int32_t code = tsdbRowMergerAdd(&pReader->status.merger, pRow, pSchema);
+ if (code != TSDB_CODE_SUCCESS || pMerger->pTSchema == NULL) {
return code;
}
- code =
- doMergeRowsInBuf(&pBlockScanInfo->iter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
+ code = doMergeRowsInBuf(&pBlockScanInfo->iter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, pReader);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
- tsdbRowMergerAdd(&merge, piRow, piSchema);
- code = doMergeRowsInBuf(&pBlockScanInfo->iiter, pBlockScanInfo->uid, ik.ts, pBlockScanInfo->delSkyline, &merge,
- pReader);
+ tsdbRowMergerAdd(&pReader->status.merger, piRow, piSchema);
+ code = doMergeRowsInBuf(&pBlockScanInfo->iiter, pBlockScanInfo->uid, ik.ts, pBlockScanInfo->delSkyline, pReader);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
}
- int32_t code = tsdbRowMergerGetRow(&merge, pTSRow);
- tsdbRowMergerClear(&merge);
+ int32_t code = tsdbRowMergerGetRow(pMerger, pTSRow);
+ tsdbRowMergerClear(pMerger);
return code;
}
@@ -4073,11 +4161,8 @@ int32_t doAppendRowFromFileBlock(SSDataBlock* pResBlock, STsdbReader* pReader, S
int32_t code = TSDB_CODE_SUCCESS;
SBlockLoadSuppInfo* pSupInfo = &pReader->suppInfo;
- if (pReader->suppInfo.colId[i] == PRIMARYKEY_TIMESTAMP_COL_ID) {
- SColumnInfoData* pColData = taosArrayGet(pResBlock->pDataBlock, pSupInfo->slotId[i]);
- ((int64_t*)pColData->pData)[outputRowIndex] = pBlockData->aTSKEY[rowIndex];
- i += 1;
- }
+ ((int64_t*)pReader->status.pPrimaryTsCol->pData)[outputRowIndex] = pBlockData->aTSKEY[rowIndex];
+ i += 1;
SColVal cv = {0};
int32_t numOfInputCols = pBlockData->nColData;
@@ -4119,7 +4204,7 @@ int32_t doAppendRowFromFileBlock(SSDataBlock* pResBlock, STsdbReader* pReader, S
int32_t buildDataBlockFromBufImpl(STableBlockScanInfo* pBlockScanInfo, int64_t endKey, int32_t capacity,
STsdbReader* pReader) {
- SSDataBlock* pBlock = pReader->pResBlock;
+ SSDataBlock* pBlock = pReader->resBlockInfo.pResBlock;
int32_t code = TSDB_CODE_SUCCESS;
do {
@@ -4163,10 +4248,12 @@ int32_t buildDataBlockFromBufImpl(STableBlockScanInfo* pBlockScanInfo, int64_t e
// TODO refactor: with createDataBlockScanInfo
int32_t tsdbSetTableList(STsdbReader* pReader, const void* pTableList, int32_t num) {
- int32_t size = taosHashGetSize(pReader->status.pTableMap);
+ int32_t size = tSimpleHashGetSize(pReader->status.pTableMap);
STableBlockScanInfo** p = NULL;
- while ((p = taosHashIterate(pReader->status.pTableMap, p)) != NULL) {
+ int32_t iter = 0;
+
+ while ((p = tSimpleHashIterate(pReader->status.pTableMap, p, &iter)) != NULL) {
clearBlockScanInfo(*p);
}
@@ -4184,7 +4271,7 @@ int32_t tsdbSetTableList(STsdbReader* pReader, const void* pTableList, int32_t n
pReader->status.uidList.tableUidList = (uint64_t*)p1;
}
- taosHashClear(pReader->status.pTableMap);
+ tSimpleHashClear(pReader->status.pTableMap);
STableUidList* pUidList = &pReader->status.uidList;
pUidList->currentIndex = 0;
@@ -4205,7 +4292,7 @@ int32_t tsdbSetTableList(STsdbReader* pReader, const void* pTableList, int32_t n
pInfo->lastKeyInStt = ekey;
}
- taosHashPut(pReader->status.pTableMap, &pInfo->uid, sizeof(uint64_t), &pInfo, POINTER_BYTES);
+ tSimpleHashPut(pReader->status.pTableMap, &pInfo->uid, sizeof(uint64_t), &pInfo, POINTER_BYTES);
}
return TDB_CODE_SUCCESS;
@@ -4225,7 +4312,7 @@ void* tsdbGetIvtIdx(SMeta* pMeta) {
return metaGetIvtIdx(pMeta);
}
-uint64_t getReaderMaxVersion(STsdbReader* pReader) { return pReader->verRange.maxVer; }
+uint64_t tsdbGetReaderMaxVersion(STsdbReader* pReader) { return pReader->verRange.maxVer; }
static int32_t doOpenReaderImpl(STsdbReader* pReader) {
SReaderStatus* pStatus = &pReader->status;
@@ -4327,6 +4414,10 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, void* pTableL
}
}
+ if (pReader->pSchema != NULL) {
+ tsdbRowMergerInit(&pReader->status.merger, pReader->pSchema);
+ }
+
pReader->pSchemaMap = tSimpleHashInit(8, taosFastHash);
if (pReader->pSchemaMap == NULL) {
tsdbError("failed init schema hash for reader %s", pReader->idStr);
@@ -4351,7 +4442,13 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, void* pTableL
goto _err;
}
- pReader->suspended = true;
+ pReader->status.pLDataIter = taosMemoryCalloc(pVnode->config.sttTrigger, sizeof(SLDataIter));
+ if (pReader->status.pLDataIter == NULL) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+
+ pReader->flag = READER_STATUS_SUSPEND;
if (countOnly) {
pReader->readMode = READ_MODE_COUNT_ONLY;
@@ -4367,29 +4464,42 @@ _err:
return code;
}
+static void clearSharedPtr(STsdbReader* p) {
+ p->status.pLDataIter = NULL;
+ p->status.pTableMap = NULL;
+ p->status.uidList.tableUidList = NULL;
+ p->pReadSnap = NULL;
+ p->pSchema = NULL;
+ p->pSchemaMap = NULL;
+}
+
+static void setSharedPtr(STsdbReader* pDst, const STsdbReader* pSrc) {
+ pDst->status.pTableMap = pSrc->status.pTableMap;
+ pDst->status.pLDataIter = pSrc->status.pLDataIter;
+ pDst->status.uidList = pSrc->status.uidList;
+ pDst->pSchema = pSrc->pSchema;
+ pDst->pSchemaMap = pSrc->pSchemaMap;
+ pDst->pReadSnap = pSrc->pReadSnap;
+
+ if (pDst->pSchema) {
+ tsdbRowMergerInit(&pDst->status.merger, pDst->pSchema);
+ }
+}
+
void tsdbReaderClose(STsdbReader* pReader) {
if (pReader == NULL) {
return;
}
tsdbAcquireReader(pReader);
+
{
if (pReader->innerReader[0] != NULL || pReader->innerReader[1] != NULL) {
STsdbReader* p = pReader->innerReader[0];
-
- p->status.pTableMap = NULL;
- p->status.uidList.tableUidList = NULL;
- p->pReadSnap = NULL;
- p->pSchema = NULL;
- p->pSchemaMap = NULL;
+ clearSharedPtr(p);
p = pReader->innerReader[1];
-
- p->status.pTableMap = NULL;
- p->status.uidList.tableUidList = NULL;
- p->pReadSnap = NULL;
- p->pSchema = NULL;
- p->pSchemaMap = NULL;
+ clearSharedPtr(p);
tsdbReaderClose(pReader->innerReader[0]);
tsdbReaderClose(pReader->innerReader[1]);
@@ -4405,15 +4515,15 @@ void tsdbReaderClose(STsdbReader* pReader) {
}
}
- if (pReader->freeBlock) {
- pReader->pResBlock = blockDataDestroy(pReader->pResBlock);
+ if (pReader->resBlockInfo.freeBlock) {
+ pReader->resBlockInfo.pResBlock = blockDataDestroy(pReader->resBlockInfo.pResBlock);
}
taosMemoryFree(pSupInfo->colId);
tBlockDataDestroy(&pReader->status.fileBlockData);
cleanupDataBlockIterator(&pReader->status.blockIter);
- size_t numOfTables = taosHashGetSize(pReader->status.pTableMap);
+ size_t numOfTables = tSimpleHashGetSize(pReader->status.pTableMap);
if (pReader->status.pTableMap != NULL) {
destroyAllBlockScanInfo(pReader->status.pTableMap);
clearBlockScanInfoBuf(&pReader->blockInfoBuf);
@@ -4440,7 +4550,8 @@ void tsdbReaderClose(STsdbReader* pReader) {
tsdbUninitReaderLock(pReader);
- taosMemoryFree(pReader->status.uidList.tableUidList);
+ taosMemoryFreeClear(pReader->status.pLDataIter);
+ taosMemoryFreeClear(pReader->status.uidList.tableUidList);
SIOCostSummary* pCost = &pReader->cost;
SFilesetIter* pFilesetIter = &pReader->status.fileIter;
@@ -4467,6 +4578,8 @@ void tsdbReaderClose(STsdbReader* pReader) {
pCost->initDelSkylineIterTime, pReader->idStr);
taosMemoryFree(pReader->idStr);
+
+ tsdbRowMergerCleanup(&pReader->status.merger);
taosMemoryFree(pReader->pSchema);
tSimpleHashCleanup(pReader->pSchemaMap);
@@ -4495,8 +4608,9 @@ int32_t tsdbReaderSuspend(STsdbReader* pReader) {
// resetDataBlockScanInfo excluding lastKey
STableBlockScanInfo** p = NULL;
+ int32_t iter = 0;
- while ((p = taosHashIterate(pStatus->pTableMap, p)) != NULL) {
+ while ((p = tSimpleHashIterate(pStatus->pTableMap, p, &iter)) != NULL) {
STableBlockScanInfo* pInfo = *(STableBlockScanInfo**)p;
pInfo->iterInit = false;
@@ -4512,13 +4626,13 @@ int32_t tsdbReaderSuspend(STsdbReader* pReader) {
}
pInfo->delSkyline = taosArrayDestroy(pInfo->delSkyline);
- // pInfo->lastKey = ts;
}
} else {
// resetDataBlockScanInfo excluding lastKey
STableBlockScanInfo** p = NULL;
+ int32_t iter = 0;
- while ((p = taosHashIterate(pStatus->pTableMap, p)) != NULL) {
+ while ((p = tSimpleHashIterate(pStatus->pTableMap, p, &iter)) != NULL) {
STableBlockScanInfo* pInfo = *(STableBlockScanInfo**)p;
pInfo->iterInit = false;
@@ -4534,13 +4648,12 @@ int32_t tsdbReaderSuspend(STsdbReader* pReader) {
}
pInfo->delSkyline = taosArrayDestroy(pInfo->delSkyline);
- // pInfo->lastKey = ts;
}
pBlockScanInfo = pStatus->pTableIter == NULL ? NULL : *pStatus->pTableIter;
if (pBlockScanInfo) {
// save lastKey to restore memory iterator
- STimeWindow w = pReader->pResBlock->info.window;
+ STimeWindow w = pReader->resBlockInfo.pResBlock->info.window;
pBlockScanInfo->lastKey = ASCENDING_TRAVERSE(pReader->order) ? w.ekey : w.skey;
// reset current current table's data block scan info,
@@ -4564,8 +4677,7 @@ int32_t tsdbReaderSuspend(STsdbReader* pReader) {
tsdbUntakeReadSnap(pReader, pReader->pReadSnap, false);
pReader->pReadSnap = NULL;
-
- pReader->suspended = true;
+ pReader->flag = READER_STATUS_SUSPEND;
tsdbDebug("reader: %p suspended uid %" PRIu64 " in this query %s", pReader, pBlockScanInfo ? pBlockScanInfo->uid : 0,
pReader->idStr);
@@ -4582,7 +4694,7 @@ static int32_t tsdbSetQueryReseek(void* pQHandle) {
code = tsdbTryAcquireReader(pReader);
if (code == 0) {
- if (pReader->suspended) {
+ if (pReader->flag == READER_STATUS_SUSPEND) {
tsdbReleaseReader(pReader);
return code;
}
@@ -4607,7 +4719,7 @@ int32_t tsdbReaderResume(STsdbReader* pReader) {
// restore reader's state
// task snapshot
- int32_t numOfTables = taosHashGetSize(pReader->status.pTableMap);
+ int32_t numOfTables = tSimpleHashGetSize(pReader->status.pTableMap);
if (numOfTables > 0) {
qTrace("tsdb/reader: %p, take snapshot", pReader);
code = tsdbTakeReadSnap(pReader, tsdbSetQueryReseek, &pReader->pReadSnap);
@@ -4625,19 +4737,11 @@ int32_t tsdbReaderResume(STsdbReader* pReader) {
STsdbReader* pNextReader = pReader->innerReader[1];
// we need only one row
- pPrevReader->capacity = 1;
- pPrevReader->status.pTableMap = pReader->status.pTableMap;
- pPrevReader->status.uidList = pReader->status.uidList;
- pPrevReader->pSchema = pReader->pSchema;
- pPrevReader->pSchemaMap = pReader->pSchemaMap;
- pPrevReader->pReadSnap = pReader->pReadSnap;
+ pPrevReader->resBlockInfo.capacity = 1;
+ setSharedPtr(pPrevReader, pReader);
- pNextReader->capacity = 1;
- pNextReader->status.pTableMap = pReader->status.pTableMap;
- pNextReader->status.uidList = pReader->status.uidList;
- pNextReader->pSchema = pReader->pSchema;
- pNextReader->pSchemaMap = pReader->pSchemaMap;
- pNextReader->pReadSnap = pReader->pReadSnap;
+ pNextReader->resBlockInfo.capacity = 1;
+ setSharedPtr(pNextReader, pReader);
code = doOpenReaderImpl(pPrevReader);
if (code != TSDB_CODE_SUCCESS) {
@@ -4646,8 +4750,7 @@ int32_t tsdbReaderResume(STsdbReader* pReader) {
}
}
- pReader->suspended = false;
-
+ pReader->flag = READER_STATUS_NORMAL;
tsdbDebug("reader: %p resumed uid %" PRIu64 ", numOfTable:%" PRId32 ", in this query %s", pReader,
pBlockScanInfo ? (*pBlockScanInfo)->uid : 0, numOfTables, pReader->idStr);
return code;
@@ -4659,7 +4762,7 @@ _err:
static bool tsdbReadRowsCountOnly(STsdbReader* pReader) {
int32_t code = TSDB_CODE_SUCCESS;
- SSDataBlock* pBlock = pReader->pResBlock;
+ SSDataBlock* pBlock = pReader->resBlockInfo.pResBlock;
if (pReader->status.loadFromFile == false) {
return false;
@@ -4688,13 +4791,13 @@ static int32_t doTsdbNextDataBlock(STsdbReader* pReader, bool* hasNext) {
int32_t code = TSDB_CODE_SUCCESS;
// cleanup the data that belongs to the previous data block
- SSDataBlock* pBlock = pReader->pResBlock;
+ SSDataBlock* pBlock = pReader->resBlockInfo.pResBlock;
blockDataCleanup(pBlock);
*hasNext = false;
SReaderStatus* pStatus = &pReader->status;
- if (taosHashGetSize(pStatus->pTableMap) == 0) {
+ if (tSimpleHashGetSize(pStatus->pTableMap) == 0) {
return code;
}
@@ -4735,7 +4838,7 @@ int32_t tsdbNextDataBlock(STsdbReader* pReader, bool* hasNext) {
code = tsdbAcquireReader(pReader);
qTrace("tsdb/read: %p, take read mutex, code: %d", pReader, code);
- if (pReader->suspended) {
+ if (pReader->flag == READER_STATUS_SUSPEND) {
tsdbReaderResume(pReader);
}
@@ -4873,7 +4976,7 @@ int32_t tsdbRetrieveDatablockSMA(STsdbReader* pReader, SSDataBlock* pDataBlock,
SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(&pReader->status.blockIter);
SBlockLoadSuppInfo* pSup = &pReader->suppInfo;
- if (pReader->pResBlock->info.id.uid != pFBlock->uid) {
+ if (pReader->resBlockInfo.pResBlock->info.id.uid != pFBlock->uid) {
return TSDB_CODE_SUCCESS;
}
@@ -4899,8 +5002,8 @@ int32_t tsdbRetrieveDatablockSMA(STsdbReader* pReader, SSDataBlock* pDataBlock,
pTsAgg->numOfNull = 0;
pTsAgg->colId = PRIMARYKEY_TIMESTAMP_COL_ID;
- pTsAgg->min = pReader->pResBlock->info.window.skey;
- pTsAgg->max = pReader->pResBlock->info.window.ekey;
+ pTsAgg->min = pReader->resBlockInfo.pResBlock->info.window.skey;
+ pTsAgg->max = pReader->resBlockInfo.pResBlock->info.window.ekey;
// update the number of NULL data rows
size_t numOfCols = pSup->numOfCols;
@@ -4911,7 +5014,7 @@ int32_t tsdbRetrieveDatablockSMA(STsdbReader* pReader, SSDataBlock* pDataBlock,
taosArrayEnsureCap(pSup->pColAgg, colsNum);
}
- SSDataBlock* pResBlock = pReader->pResBlock;
+ SSDataBlock* pResBlock = pReader->resBlockInfo.pResBlock;
if (pResBlock->pBlockAgg == NULL) {
size_t num = taosArrayGetSize(pResBlock->pDataBlock);
pResBlock->pBlockAgg = taosMemoryCalloc(num, POINTER_BYTES);
@@ -4947,11 +5050,11 @@ int32_t tsdbRetrieveDatablockSMA(STsdbReader* pReader, SSDataBlock* pDataBlock,
return code;
}
-STableBlockScanInfo* getTableBlockScanInfo(SHashObj* pTableMap, uint64_t uid, const char* id) {
- STableBlockScanInfo** p = taosHashGet(pTableMap, &uid, sizeof(uid));
+STableBlockScanInfo* getTableBlockScanInfo(SSHashObj* pTableMap, uint64_t uid, const char* id) {
+ STableBlockScanInfo** p = tSimpleHashGet(pTableMap, &uid, sizeof(uid));
if (p == NULL || *p == NULL) {
terrno = TSDB_CODE_INVALID_PARA;
- int32_t size = taosHashGetSize(pTableMap);
+ int32_t size = tSimpleHashGetSize(pTableMap);
tsdbError("failed to locate the uid:%" PRIu64 " in query table uid list, total tables:%d, %s", uid, size, id);
return NULL;
}
@@ -4982,7 +5085,7 @@ static SSDataBlock* doRetrieveDataBlock(STsdbReader* pReader) {
return NULL;
}
- return pReader->pResBlock;
+ return pReader->resBlockInfo.pResBlock;
}
SSDataBlock* tsdbRetrieveDataBlock(STsdbReader* pReader, SArray* pIdList) {
@@ -4997,7 +5100,7 @@ SSDataBlock* tsdbRetrieveDataBlock(STsdbReader* pReader, SArray* pIdList) {
SReaderStatus* pStatus = &pTReader->status;
if (pStatus->composedDataBlock) {
- return pTReader->pResBlock;
+ return pTReader->resBlockInfo.pResBlock;
}
SSDataBlock* ret = doRetrieveDataBlock(pTReader);
@@ -5012,7 +5115,7 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) {
qTrace("tsdb/reader-reset: %p, take read mutex", pReader);
tsdbAcquireReader(pReader);
- if (pReader->suspended) {
+ if (pReader->flag == READER_STATUS_SUSPEND) {
tsdbReaderResume(pReader);
}
@@ -5037,7 +5140,7 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) {
pReader->suppInfo.tsColAgg.colId = PRIMARYKEY_TIMESTAMP_COL_ID;
tsdbDataFReaderClose(&pReader->pFileReader);
- int32_t numOfTables = taosHashGetSize(pStatus->pTableMap);
+ int32_t numOfTables = tSimpleHashGetSize(pStatus->pTableMap);
initFilesetIterator(&pStatus->fileIter, pReader->pReadSnap->fs.aDFileSet, pReader);
resetDataBlockIterator(pBlockIter, pReader->order);
@@ -5091,11 +5194,9 @@ int32_t tsdbGetFileBlocksDistInfo(STsdbReader* pReader, STableBlockDistInfo* pTa
const int32_t numOfBuckets = 20.0;
- // find the start data block in file
-
// find the start data block in file
tsdbAcquireReader(pReader);
- if (pReader->suspended) {
+ if (pReader->flag == READER_STATUS_SUSPEND) {
tsdbReaderResume(pReader);
}
SReaderStatus* pStatus = &pReader->status;
@@ -5108,7 +5209,7 @@ int32_t tsdbGetFileBlocksDistInfo(STsdbReader* pReader, STableBlockDistInfo* pTa
pTableBlockInfo->numOfFiles += 1;
- int32_t numOfTables = (int32_t)taosHashGetSize(pStatus->pTableMap);
+ int32_t numOfTables = (int32_t)tSimpleHashGetSize(pStatus->pTableMap);
int defaultRows = 4096;
SDataBlockIter* pBlockIter = &pStatus->blockIter;
@@ -5168,11 +5269,12 @@ int64_t tsdbGetNumOfRowsInMemTable(STsdbReader* pReader) {
SReaderStatus* pStatus = &pReader->status;
tsdbAcquireReader(pReader);
- if (pReader->suspended) {
+ if (pReader->flag == READER_STATUS_SUSPEND) {
tsdbReaderResume(pReader);
}
- pStatus->pTableIter = taosHashIterate(pStatus->pTableMap, NULL);
+ int32_t iter = 0;
+ pStatus->pTableIter = tSimpleHashIterate(pStatus->pTableMap, NULL, &iter);
while (pStatus->pTableIter != NULL) {
STableBlockScanInfo* pBlockScanInfo = *(STableBlockScanInfo**)pStatus->pTableIter;
@@ -5194,7 +5296,7 @@ int64_t tsdbGetNumOfRowsInMemTable(STsdbReader* pReader) {
}
// current table is exhausted, let's try the next table
- pStatus->pTableIter = taosHashIterate(pStatus->pTableMap, pStatus->pTableIter);
+ pStatus->pTableIter = tSimpleHashIterate(pStatus->pTableMap, pStatus->pTableIter, &iter);
}
tsdbReleaseReader(pReader);
@@ -5333,3 +5435,5 @@ void tsdbReaderSetId(STsdbReader* pReader, const char* idstr) {
taosMemoryFreeClear(pReader->idStr);
pReader->idStr = taosStrdup(idstr);
}
+
+void tsdbReaderSetCloseFlag(STsdbReader* pReader) { pReader->flag = READER_STATUS_SHOULD_STOP; }
diff --git a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c
index aed863d194..dfea125cc1 100644
--- a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c
+++ b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c
@@ -70,10 +70,11 @@ static int32_t tsdbSnapReadFileDataStart(STsdbSnapReader* pReader) {
if (pReader->pIter) {
// iter to next with filter info (sver, ever)
- code = tsdbDataIterNext2(pReader->pIter,
- &(STsdbFilterInfo){.flag = TSDB_FILTER_FLAG_BY_VERSION, // flag
- .sver = pReader->sver,
- .ever = pReader->ever});
+ code = tsdbDataIterNext2(
+ pReader->pIter,
+ &(STsdbFilterInfo){.flag = TSDB_FILTER_FLAG_BY_VERSION | TSDB_FILTER_FLAG_IGNORE_DROPPED_TABLE, // flag
+ .sver = pReader->sver,
+ .ever = pReader->ever});
TSDB_CHECK_CODE(code, lino, _exit);
if (pReader->pIter->rowInfo.suid || pReader->pIter->rowInfo.uid) {
@@ -94,10 +95,11 @@ static int32_t tsdbSnapReadFileDataStart(STsdbSnapReader* pReader) {
if (pReader->pIter) {
// iter to valid row
- code = tsdbDataIterNext2(pReader->pIter,
- &(STsdbFilterInfo){.flag = TSDB_FILTER_FLAG_BY_VERSION, // flag
- .sver = pReader->sver,
- .ever = pReader->ever});
+ code = tsdbDataIterNext2(
+ pReader->pIter,
+ &(STsdbFilterInfo){.flag = TSDB_FILTER_FLAG_BY_VERSION | TSDB_FILTER_FLAG_IGNORE_DROPPED_TABLE, // flag
+ .sver = pReader->sver,
+ .ever = pReader->ever});
TSDB_CHECK_CODE(code, lino, _exit);
if (pReader->pIter->rowInfo.suid || pReader->pIter->rowInfo.uid) {
@@ -139,7 +141,8 @@ static int32_t tsdbSnapReadNextRow(STsdbSnapReader* pReader, SRowInfo** ppRowInf
int32_t lino = 0;
if (pReader->pIter) {
- code = tsdbDataIterNext2(pReader->pIter, &(STsdbFilterInfo){.flag = TSDB_FILTER_FLAG_BY_VERSION, // flag
+ code = tsdbDataIterNext2(pReader->pIter, &(STsdbFilterInfo){.flag = TSDB_FILTER_FLAG_BY_VERSION |
+ TSDB_FILTER_FLAG_IGNORE_DROPPED_TABLE, // flag
.sver = pReader->sver,
.ever = pReader->ever});
TSDB_CHECK_CODE(code, lino, _exit);
@@ -346,8 +349,9 @@ static int32_t tsdbSnapReadNextTombData(STsdbSnapReader* pReader, SDelInfo** ppD
int32_t lino = 0;
code = tsdbDataIterNext2(
- pReader->pTIter,
- &(STsdbFilterInfo){.flag = TSDB_FILTER_FLAG_BY_VERSION, .sver = pReader->sver, .ever = pReader->ever});
+ pReader->pTIter, &(STsdbFilterInfo){.flag = TSDB_FILTER_FLAG_BY_VERSION | TSDB_FILTER_FLAG_IGNORE_DROPPED_TABLE,
+ .sver = pReader->sver,
+ .ever = pReader->ever});
TSDB_CHECK_CODE(code, lino, _exit);
if (ppDelInfo) {
diff --git a/source/dnode/vnode/src/tsdb/tsdbUtil.c b/source/dnode/vnode/src/tsdb/tsdbUtil.c
index 8e778da877..556ec33526 100644
--- a/source/dnode/vnode/src/tsdb/tsdbUtil.c
+++ b/source/dnode/vnode/src/tsdb/tsdbUtil.c
@@ -637,201 +637,157 @@ SColVal *tsdbRowIterNext(STSDBRowIter *pIter) {
}
// SRowMerger ======================================================
-
-int32_t tsdbRowMergerInit(SRowMerger *pMerger, STSchema *pResTSchema, TSDBROW *pRow, STSchema *pTSchema) {
- int32_t code = 0;
- TSDBKEY key = TSDBROW_KEY(pRow);
- SColVal *pColVal = &(SColVal){0};
- STColumn *pTColumn;
- int32_t iCol, jCol = 0;
-
- if (NULL == pResTSchema) {
- pResTSchema = pTSchema;
- }
-
- pMerger->pTSchema = pResTSchema;
- pMerger->version = key.version;
-
- pMerger->pArray = taosArrayInit(pResTSchema->numOfCols, sizeof(SColVal));
- if (pMerger->pArray == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _exit;
- }
-
- // ts
- pTColumn = &pTSchema->columns[jCol++];
-
- ASSERT(pTColumn->type == TSDB_DATA_TYPE_TIMESTAMP);
-
- *pColVal = COL_VAL_VALUE(pTColumn->colId, pTColumn->type, (SValue){.val = key.ts});
- if (taosArrayPush(pMerger->pArray, pColVal) == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _exit;
- }
-
- // other
- for (iCol = 1; jCol < pTSchema->numOfCols && iCol < pResTSchema->numOfCols; ++iCol) {
- pTColumn = &pResTSchema->columns[iCol];
- if (pTSchema->columns[jCol].colId < pTColumn->colId) {
- ++jCol;
- --iCol;
- continue;
- } else if (pTSchema->columns[jCol].colId > pTColumn->colId) {
- taosArrayPush(pMerger->pArray, &COL_VAL_NONE(pTColumn->colId, pTColumn->type));
- continue;
- }
-
- tsdbRowGetColVal(pRow, pTSchema, jCol++, pColVal);
- if ((!COL_VAL_IS_NONE(pColVal)) && (!COL_VAL_IS_NULL(pColVal)) && IS_VAR_DATA_TYPE(pColVal->type)) {
- uint8_t *pVal = pColVal->value.pData;
-
- pColVal->value.pData = NULL;
- code = tRealloc(&pColVal->value.pData, pColVal->value.nData);
- if (code) goto _exit;
-
- if (pColVal->value.nData) {
- memcpy(pColVal->value.pData, pVal, pColVal->value.nData);
- }
- }
-
- if (taosArrayPush(pMerger->pArray, pColVal) == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _exit;
- }
- }
-
- for (; iCol < pResTSchema->numOfCols; ++iCol) {
- pTColumn = &pResTSchema->columns[iCol];
- taosArrayPush(pMerger->pArray, &COL_VAL_NONE(pTColumn->colId, pTColumn->type));
- }
-
-_exit:
- return code;
-}
-
int32_t tsdbRowMergerAdd(SRowMerger *pMerger, TSDBROW *pRow, STSchema *pTSchema) {
int32_t code = 0;
TSDBKEY key = TSDBROW_KEY(pRow);
- SColVal *pColVal = &(SColVal){0};
+ SColVal * pColVal = &(SColVal){0};
STColumn *pTColumn;
int32_t iCol, jCol = 1;
if (NULL == pTSchema) {
pTSchema = pMerger->pTSchema;
}
- ASSERT(((SColVal *)pMerger->pArray->pData)->value.val == key.ts);
- for (iCol = 1; iCol < pMerger->pTSchema->numOfCols && jCol < pTSchema->numOfCols; ++iCol) {
- pTColumn = &pMerger->pTSchema->columns[iCol];
- if (pTSchema->columns[jCol].colId < pTColumn->colId) {
- ++jCol;
- --iCol;
- continue;
- } else if (pTSchema->columns[jCol].colId > pTColumn->colId) {
- continue;
- }
+ if (taosArrayGetSize(pMerger->pArray) == 0) {
+ // ts
+ jCol = 0;
+ pTColumn = &pTSchema->columns[jCol++];
- tsdbRowGetColVal(pRow, pTSchema, jCol++, pColVal);
-
- if (key.version > pMerger->version) {
- if (!COL_VAL_IS_NONE(pColVal)) {
- if (IS_VAR_DATA_TYPE(pColVal->type)) {
- SColVal *pTColVal = taosArrayGet(pMerger->pArray, iCol);
- if (!COL_VAL_IS_NULL(pColVal)) {
- code = tRealloc(&pTColVal->value.pData, pColVal->value.nData);
- if (code) return code;
-
- pTColVal->value.nData = pColVal->value.nData;
- if (pTColVal->value.nData) {
- memcpy(pTColVal->value.pData, pColVal->value.pData, pTColVal->value.nData);
- }
- pTColVal->flag = 0;
- } else {
- tFree(pTColVal->value.pData);
- taosArraySet(pMerger->pArray, iCol, pColVal);
- }
- } else {
- taosArraySet(pMerger->pArray, iCol, pColVal);
- }
- }
- } else if (key.version < pMerger->version) {
- SColVal *tColVal = (SColVal *)taosArrayGet(pMerger->pArray, iCol);
- if (COL_VAL_IS_NONE(tColVal) && !COL_VAL_IS_NONE(pColVal)) {
- if ((!COL_VAL_IS_NULL(pColVal)) && IS_VAR_DATA_TYPE(pColVal->type)) {
- code = tRealloc(&tColVal->value.pData, pColVal->value.nData);
- if (code) return code;
-
- tColVal->value.nData = pColVal->value.nData;
- if (pColVal->value.nData) {
- memcpy(tColVal->value.pData, pColVal->value.pData, pColVal->value.nData);
- }
- tColVal->flag = 0;
- } else {
- taosArraySet(pMerger->pArray, iCol, pColVal);
- }
- }
- } else {
- ASSERT(0 && "dup versions not allowed");
- }
- }
-
- pMerger->version = key.version;
- return code;
-}
-/*
-int32_t tsdbRowMergerInit(SRowMerger *pMerger, TSDBROW *pRow, STSchema *pTSchema) {
- int32_t code = 0;
- TSDBKEY key = TSDBROW_KEY(pRow);
- SColVal *pColVal = &(SColVal){0};
- STColumn *pTColumn;
-
- pMerger->pTSchema = pTSchema;
- pMerger->version = key.version;
-
- pMerger->pArray = taosArrayInit(pTSchema->numOfCols, sizeof(SColVal));
- if (pMerger->pArray == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _exit;
- }
-
- // ts
- pTColumn = &pTSchema->columns[0];
-
- ASSERT(pTColumn->type == TSDB_DATA_TYPE_TIMESTAMP);
-
- *pColVal = COL_VAL_VALUE(pTColumn->colId, pTColumn->type, (SValue){.val = key.ts});
- if (taosArrayPush(pMerger->pArray, pColVal) == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _exit;
- }
-
- // other
- for (int16_t iCol = 1; iCol < pTSchema->numOfCols; iCol++) {
- tsdbRowGetColVal(pRow, pTSchema, iCol, pColVal);
- if ((!COL_VAL_IS_NONE(pColVal)) && (!COL_VAL_IS_NULL(pColVal)) && IS_VAR_DATA_TYPE(pColVal->type)) {
- uint8_t *pVal = pColVal->value.pData;
-
- pColVal->value.pData = NULL;
- code = tRealloc(&pColVal->value.pData, pColVal->value.nData);
- if (code) goto _exit;
-
- if (pColVal->value.nData) {
- memcpy(pColVal->value.pData, pVal, pColVal->value.nData);
- }
- }
+ ASSERT(pTColumn->type == TSDB_DATA_TYPE_TIMESTAMP);
+ *pColVal = COL_VAL_VALUE(pTColumn->colId, pTColumn->type, (SValue){.val = key.ts});
if (taosArrayPush(pMerger->pArray, pColVal) == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
- goto _exit;
+ return code;
+ // goto _exit;
+ }
+
+ // other
+ for (iCol = 1; jCol < pTSchema->numOfCols && iCol < pMerger->pTSchema->numOfCols; ++iCol) {
+ pTColumn = &pMerger->pTSchema->columns[iCol];
+ if (pTSchema->columns[jCol].colId < pTColumn->colId) {
+ ++jCol;
+ --iCol;
+ continue;
+ } else if (pTSchema->columns[jCol].colId > pTColumn->colId) {
+ taosArrayPush(pMerger->pArray, &COL_VAL_NONE(pTColumn->colId, pTColumn->type));
+ continue;
+ }
+
+ tsdbRowGetColVal(pRow, pTSchema, jCol++, pColVal);
+ if ((!COL_VAL_IS_NONE(pColVal)) && (!COL_VAL_IS_NULL(pColVal)) && IS_VAR_DATA_TYPE(pColVal->type)) {
+ uint8_t *pVal = pColVal->value.pData;
+
+ pColVal->value.pData = NULL;
+ code = tRealloc(&pColVal->value.pData, pColVal->value.nData);
+ if (code) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+
+ if (pColVal->value.nData) {
+ memcpy(pColVal->value.pData, pVal, pColVal->value.nData);
+ }
+ }
+
+ if (taosArrayPush(pMerger->pArray, pColVal) == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ return code;
+ }
+ }
+
+ for (; iCol < pMerger->pTSchema->numOfCols; ++iCol) {
+ pTColumn = &pMerger->pTSchema->columns[iCol];
+ taosArrayPush(pMerger->pArray, &COL_VAL_NONE(pTColumn->colId, pTColumn->type));
+ }
+
+ pMerger->version = key.version;
+ return 0;
+ } else {
+ ASSERT(((SColVal *)pMerger->pArray->pData)->value.val == key.ts);
+
+ for (iCol = 1; iCol < pMerger->pTSchema->numOfCols && jCol < pTSchema->numOfCols; ++iCol) {
+ pTColumn = &pMerger->pTSchema->columns[iCol];
+ if (pTSchema->columns[jCol].colId < pTColumn->colId) {
+ ++jCol;
+ --iCol;
+ continue;
+ } else if (pTSchema->columns[jCol].colId > pTColumn->colId) {
+ continue;
+ }
+
+ tsdbRowGetColVal(pRow, pTSchema, jCol++, pColVal);
+
+ if (key.version > pMerger->version) {
+ if (!COL_VAL_IS_NONE(pColVal)) {
+ if (IS_VAR_DATA_TYPE(pColVal->type)) {
+ SColVal *pTColVal = taosArrayGet(pMerger->pArray, iCol);
+ if (!COL_VAL_IS_NULL(pColVal)) {
+ code = tRealloc(&pTColVal->value.pData, pColVal->value.nData);
+ if (code) return code;
+
+ pTColVal->value.nData = pColVal->value.nData;
+ if (pTColVal->value.nData) {
+ memcpy(pTColVal->value.pData, pColVal->value.pData, pTColVal->value.nData);
+ }
+ pTColVal->flag = 0;
+ } else {
+ tFree(pTColVal->value.pData);
+ taosArraySet(pMerger->pArray, iCol, pColVal);
+ }
+ } else {
+ taosArraySet(pMerger->pArray, iCol, pColVal);
+ }
+ }
+ } else if (key.version < pMerger->version) {
+ SColVal *tColVal = (SColVal *)taosArrayGet(pMerger->pArray, iCol);
+ if (COL_VAL_IS_NONE(tColVal) && !COL_VAL_IS_NONE(pColVal)) {
+ if ((!COL_VAL_IS_NULL(pColVal)) && IS_VAR_DATA_TYPE(pColVal->type)) {
+ code = tRealloc(&tColVal->value.pData, pColVal->value.nData);
+ if (code) return code;
+
+ tColVal->value.nData = pColVal->value.nData;
+ if (pColVal->value.nData) {
+ memcpy(tColVal->value.pData, pColVal->value.pData, pColVal->value.nData);
+ }
+ tColVal->flag = 0;
+ } else {
+ taosArraySet(pMerger->pArray, iCol, pColVal);
+ }
+ }
+ } else {
+ ASSERT(0 && "dup versions not allowed");
+ }
+ }
+
+ pMerger->version = key.version;
+ return code;
+ }
+}
+
+int32_t tsdbRowMergerInit(SRowMerger* pMerger, STSchema *pSchema) {
+ pMerger->pTSchema = pSchema;
+ pMerger->pArray = taosArrayInit(pSchema->numOfCols, sizeof(SColVal));
+ if (pMerger->pArray == NULL) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ } else {
+ return TSDB_CODE_SUCCESS;
+ }
+}
+
+void tsdbRowMergerClear(SRowMerger* pMerger) {
+ for (int32_t iCol = 1; iCol < pMerger->pTSchema->numOfCols; iCol++) {
+ SColVal *pTColVal = taosArrayGet(pMerger->pArray, iCol);
+ if (IS_VAR_DATA_TYPE(pTColVal->type)) {
+ tFree(pTColVal->value.pData);
}
}
-_exit:
- return code;
+ taosArrayClear(pMerger->pArray);
}
-*/
-void tsdbRowMergerClear(SRowMerger *pMerger) {
- for (int32_t iCol = 1; iCol < pMerger->pTSchema->numOfCols; iCol++) {
+
+void tsdbRowMergerCleanup(SRowMerger* pMerger) {
+ int32_t numOfCols = taosArrayGetSize(pMerger->pArray);
+ for (int32_t iCol = 1; iCol < numOfCols; iCol++) {
SColVal *pTColVal = taosArrayGet(pMerger->pArray, iCol);
if (IS_VAR_DATA_TYPE(pTColVal->type)) {
tFree(pTColVal->value.pData);
@@ -840,72 +796,7 @@ void tsdbRowMergerClear(SRowMerger *pMerger) {
taosArrayDestroy(pMerger->pArray);
}
-/*
-int32_t tsdbRowMerge(SRowMerger *pMerger, TSDBROW *pRow) {
- int32_t code = 0;
- TSDBKEY key = TSDBROW_KEY(pRow);
- SColVal *pColVal = &(SColVal){0};
- ASSERT(((SColVal *)pMerger->pArray->pData)->value.val == key.ts);
-
- for (int32_t iCol = 1; iCol < pMerger->pTSchema->numOfCols; iCol++) {
- tsdbRowGetColVal(pRow, pMerger->pTSchema, iCol, pColVal);
-
- if (key.version > pMerger->version) {
- if (!COL_VAL_IS_NONE(pColVal)) {
- if (IS_VAR_DATA_TYPE(pColVal->type)) {
- SColVal *pTColVal = taosArrayGet(pMerger->pArray, iCol);
- if (!COL_VAL_IS_NULL(pColVal)) {
- code = tRealloc(&pTColVal->value.pData, pColVal->value.nData);
- if (code) goto _exit;
-
- pTColVal->value.nData = pColVal->value.nData;
- if (pTColVal->value.nData) {
- memcpy(pTColVal->value.pData, pColVal->value.pData, pTColVal->value.nData);
- }
- pTColVal->flag = 0;
- } else {
- tFree(pTColVal->value.pData);
- pTColVal->value.pData = NULL;
- taosArraySet(pMerger->pArray, iCol, pColVal);
- }
- } else {
- taosArraySet(pMerger->pArray, iCol, pColVal);
- }
- }
- } else if (key.version < pMerger->version) {
- SColVal *tColVal = (SColVal *)taosArrayGet(pMerger->pArray, iCol);
- if (COL_VAL_IS_NONE(tColVal) && !COL_VAL_IS_NONE(pColVal)) {
- if (IS_VAR_DATA_TYPE(pColVal->type)) {
- if (!COL_VAL_IS_NULL(pColVal)) {
- code = tRealloc(&tColVal->value.pData, pColVal->value.nData);
- if (code) goto _exit;
-
- tColVal->value.nData = pColVal->value.nData;
- if (tColVal->value.nData) {
- memcpy(tColVal->value.pData, pColVal->value.pData, tColVal->value.nData);
- }
- tColVal->flag = 0;
- } else {
- tFree(tColVal->value.pData);
- tColVal->value.pData = NULL;
- taosArraySet(pMerger->pArray, iCol, pColVal);
- }
- } else {
- taosArraySet(pMerger->pArray, iCol, pColVal);
- }
- }
- } else {
- ASSERT(0);
- }
- }
-
- pMerger->version = key.version;
-
-_exit:
- return code;
-}
-*/
int32_t tsdbRowMergerGetRow(SRowMerger *pMerger, SRow **ppRow) {
return tRowBuild(pMerger->pArray, pMerger->pTSchema, ppRow);
}
@@ -1160,8 +1051,6 @@ int32_t tBlockDataCreate(SBlockData *pBlockData) {
pBlockData->aTSKEY = NULL;
pBlockData->nColData = 0;
pBlockData->aColData = NULL;
-
-_exit:
return code;
}
@@ -1218,9 +1107,10 @@ int32_t tBlockDataInit(SBlockData *pBlockData, TABLEID *pId, STSchema *pTSchema,
int32_t iColumn = 1;
STColumn *pTColumn = &pTSchema->columns[iColumn];
for (int32_t iCid = 0; iCid < nCid; iCid++) {
- if (ASSERTS(pTColumn != NULL, "invalid input param")) {
- code = TSDB_CODE_INVALID_PARA;
- goto _exit;
+
+ // aCid array (from taos client catalog) contains columns that does not exist in the pTSchema. the pTSchema is newer
+ if (pTColumn == NULL) {
+ continue;
}
while (pTColumn->colId < aCid[iCid]) {
@@ -1229,9 +1119,8 @@ int32_t tBlockDataInit(SBlockData *pBlockData, TABLEID *pId, STSchema *pTSchema,
pTColumn = &pTSchema->columns[iColumn];
}
- if (ASSERTS(pTColumn->colId == aCid[iCid], "invalid input param")) {
- code = TSDB_CODE_INVALID_PARA;
- goto _exit;
+ if (pTColumn->colId != aCid[iCid]) {
+ continue;
}
tColDataInit(&pBlockData->aColData[iCid], pTColumn->colId, pTColumn->type,
diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c
index 0dfde8f579..2e6d452e95 100644
--- a/source/dnode/vnode/src/vnd/vnodeSvr.c
+++ b/source/dnode/vnode/src/vnd/vnodeSvr.c
@@ -1388,7 +1388,7 @@ _exit:
// clear
taosArrayDestroy(newTbUids);
- tDestroySSubmitReq2(pSubmitReq, 0 == pMsg->version ? TSDB_MSG_FLG_CMPT : TSDB_MSG_FLG_DECODE);
+ tDestroySSubmitReq(pSubmitReq, 0 == pMsg->version ? TSDB_MSG_FLG_CMPT : TSDB_MSG_FLG_DECODE);
tDestroySSubmitRsp2(pSubmitRsp, TSDB_MSG_FLG_ENCODE);
if (code) terrno = code;
diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c
index dc2d709d76..1f958c569f 100644
--- a/source/dnode/vnode/src/vnd/vnodeSync.c
+++ b/source/dnode/vnode/src/vnd/vnodeSync.c
@@ -555,7 +555,12 @@ static void vnodeRestoreFinish(const SSyncFSM *pFsm, const SyncIndex commitIdx)
vInfo("vgId:%d, sync restore finished, start to restore stream tasks by replay wal", pVnode->config.vgId);
// start to restore all stream tasks
- tqStartStreamTasks(pVnode->pTq);
+ if (tsDisableStream) {
+ vInfo("vgId:%d, not restore stream tasks, since disabled", pVnode->config.vgId);
+ } else {
+ vInfo("vgId:%d start to restore stream tasks", pVnode->config.vgId);
+ tqStartStreamTasks(pVnode->pTq);
+ }
}
static void vnodeBecomeFollower(const SSyncFSM *pFsm) {
diff --git a/source/libs/catalog/inc/catalogInt.h b/source/libs/catalog/inc/catalogInt.h
index 85a130d293..c548a6c696 100644
--- a/source/libs/catalog/inc/catalogInt.h
+++ b/source/libs/catalog/inc/catalogInt.h
@@ -58,6 +58,7 @@ typedef enum {
CTG_CI_OTHERTABLE_META,
CTG_CI_TBL_SMA,
CTG_CI_TBL_CFG,
+ CTG_CI_TBL_TAG,
CTG_CI_INDEX_INFO,
CTG_CI_USER,
CTG_CI_UDF,
@@ -110,6 +111,7 @@ typedef enum {
CTG_TASK_GET_SVR_VER,
CTG_TASK_GET_TB_META_BATCH,
CTG_TASK_GET_TB_HASH_BATCH,
+ CTG_TASK_GET_TB_TAG,
} CTG_TASK_TYPE;
typedef enum {
@@ -152,6 +154,11 @@ typedef struct SCtgTbCacheInfo {
int32_t tbType;
} SCtgTbCacheInfo;
+typedef struct SCtgTbMetaParam {
+ SName* pName;
+ int32_t flag;
+} SCtgTbMetaParam;
+
typedef struct SCtgTbMetaCtx {
SCtgTbCacheInfo tbInfo;
int32_t vgId;
@@ -186,6 +193,11 @@ typedef struct SCtgTbCfgCtx {
SVgroupInfo* pVgInfo;
} SCtgTbCfgCtx;
+typedef struct SCtgTbTagCtx {
+ SName* pName;
+ SVgroupInfo* pVgInfo;
+} SCtgTbTagCtx;
+
typedef struct SCtgDbVgCtx {
char dbFName[TSDB_DB_FNAME_LEN];
} SCtgDbVgCtx;
@@ -304,6 +316,7 @@ typedef struct SCtgJob {
catalogCallback userFp;
int32_t tbMetaNum;
int32_t tbHashNum;
+ int32_t tbTagNum;
int32_t dbVgNum;
int32_t udfNum;
int32_t qnodeNum;
@@ -346,6 +359,7 @@ typedef struct SCtgSubRes {
struct SCtgTask {
CTG_TASK_TYPE type;
+ bool subTask;
int32_t taskId;
SCtgJob* pJob;
void* taskCtx;
@@ -623,6 +637,7 @@ typedef struct SCtgCacheItemInfo {
#define CTG_FLAG_SYS_DB 0x8
#define CTG_FLAG_FORCE_UPDATE 0x10
#define CTG_FLAG_ONLY_CACHE 0x20
+#define CTG_FLAG_SYNC_OP 0x40
#define CTG_FLAG_SET(_flag, _v) ((_flag) |= (_v))
@@ -925,6 +940,10 @@ void ctgReleaseVgMetaToCache(SCatalog* pCtg, SCtgDBCache* dbCache, SCtgTbCach
void ctgReleaseTbMetaToCache(SCatalog* pCtg, SCtgDBCache* dbCache, SCtgTbCache* pCache);
void ctgGetGlobalCacheStat(SCtgCacheStat* pStat);
int32_t ctgChkSetAuthRes(SCatalog* pCtg, SCtgAuthReq* req, SCtgAuthRsp* res);
+int32_t ctgGetTbMeta(SCatalog* pCtg, SRequestConnInfo* pConn, SCtgTbMetaCtx* ctx, STableMeta** pTableMeta);
+int32_t ctgGetCachedStbNameFromSuid(SCatalog* pCtg, char* dbFName, uint64_t suid, char **stbName);
+int32_t ctgGetTbTagCb(SCtgTask* pTask);
+int32_t ctgGetUserCb(SCtgTask* pTask);
extern SCatalogMgmt gCtgMgmt;
extern SCtgDebug gCTGDebug;
diff --git a/source/libs/catalog/src/catalog.c b/source/libs/catalog/src/catalog.c
index bddc6c01a7..976a38c03d 100644
--- a/source/libs/catalog/src/catalog.c
+++ b/source/libs/catalog/src/catalog.c
@@ -208,7 +208,7 @@ int32_t ctgGetTbMeta(SCatalog* pCtg, SRequestConnInfo* pConn, SCtgTbMetaCtx* ctx
}
while (true) {
- CTG_ERR_JRET(ctgRefreshTbMeta(pCtg, pConn, ctx, &output, false));
+ CTG_ERR_JRET(ctgRefreshTbMeta(pCtg, pConn, ctx, &output, ctx->flag & CTG_FLAG_SYNC_OP));
if (CTG_IS_META_TABLE(output->metaType)) {
*pTableMeta = output->tbMeta;
@@ -429,6 +429,48 @@ int32_t ctgGetTbCfg(SCatalog* pCtg, SRequestConnInfo* pConn, SName* pTableName,
CTG_RET(TSDB_CODE_SUCCESS);
}
+int32_t ctgGetTbTag(SCatalog* pCtg, SRequestConnInfo* pConn, SName* pTableName, SArray** pRes) {
+ SVgroupInfo vgroupInfo = {0};
+ STableCfg* pCfg = NULL;
+ int32_t code = 0;
+
+ CTG_ERR_RET(ctgGetTbHashVgroup(pCtg, pConn, pTableName, &vgroupInfo, NULL));
+ CTG_ERR_RET(ctgGetTableCfgFromVnode(pCtg, pConn, pTableName, &vgroupInfo, &pCfg, NULL));
+
+ if (NULL == pCfg->pTags || pCfg->tagsLen <= 0) {
+ ctgError("invalid tag in tbCfg rsp, pTags:%p, len:%d", pCfg->pTags, pCfg->tagsLen);
+ CTG_ERR_JRET(TSDB_CODE_INVALID_MSG);
+ }
+
+ SArray* pTagVals = NULL;
+ STag* pTag = (STag*)pCfg->pTags;
+
+ if (tTagIsJson(pTag)) {
+ pTagVals = taosArrayInit(1, sizeof(STagVal));
+ if (NULL == pTagVals) {
+ CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
+ }
+
+ char* pJson = parseTagDatatoJson(pTag);
+ STagVal tagVal;
+ tagVal.cid = 0;
+ tagVal.type = TSDB_DATA_TYPE_JSON;
+ tagVal.pData = pJson;
+ tagVal.nData = strlen(pJson);
+ taosArrayPush(pTagVals, &tagVal);
+ } else {
+ CTG_ERR_JRET(tTagToValArray((const STag*)pCfg->pTags, &pTagVals));
+ }
+
+ *pRes = pTagVals;
+
+_return:
+
+ tFreeSTableCfgRsp((STableCfgRsp*)pCfg);
+
+ CTG_RET(code);
+}
+
int32_t ctgGetTbDistVgInfo(SCatalog* pCtg, SRequestConnInfo* pConn, SName* pTableName, SArray** pVgList) {
STableMeta* tbMeta = NULL;
int32_t code = 0;
@@ -1414,6 +1456,21 @@ _return:
CTG_API_LEAVE(code);
}
+int32_t catalogGetTableTag(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTableName, SArray** pRes) {
+ CTG_API_ENTER();
+
+ if (NULL == pCtg || NULL == pConn || NULL == pTableName || NULL == pRes) {
+ CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT);
+ }
+
+ int32_t code = 0;
+ CTG_ERR_JRET(ctgGetTbTag(pCtg, pConn, (SName*)pTableName, pRes));
+
+_return:
+
+ CTG_API_LEAVE(code);
+}
+
int32_t catalogRefreshGetTableCfg(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTableName, STableCfg** pCfg) {
CTG_API_ENTER();
diff --git a/source/libs/catalog/src/ctgAsync.c b/source/libs/catalog/src/ctgAsync.c
index f2a354997d..56c79eac1f 100644
--- a/source/libs/catalog/src/ctgAsync.c
+++ b/source/libs/catalog/src/ctgAsync.c
@@ -21,7 +21,8 @@
#include "trpc.h"
int32_t ctgInitGetTbMetaTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
- SName* name = (SName*)param;
+ SCtgTbMetaParam* pParam = (SCtgTbMetaParam*)param;
+ SName* name = pParam->pName;
SCtgTask task = {0};
task.type = CTG_TASK_GET_TB_META;
@@ -41,7 +42,7 @@ int32_t ctgInitGetTbMetaTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
}
memcpy(ctx->pName, name, sizeof(*name));
- ctx->flag = CTG_FLAG_UNKNOWN_STB;
+ ctx->flag = pParam->flag | CTG_FLAG_UNKNOWN_STB;
taosArrayPush(pJob->pTasks, &task);
@@ -386,6 +387,37 @@ int32_t ctgInitGetTbCfgTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
return TSDB_CODE_SUCCESS;
}
+int32_t ctgInitGetTbTagTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
+ SName* name = (SName*)param;
+ SCtgTask task = {0};
+
+ task.type = CTG_TASK_GET_TB_TAG;
+ task.taskId = taskIdx;
+ task.pJob = pJob;
+
+ task.taskCtx = taosMemoryCalloc(1, sizeof(SCtgTbTagCtx));
+ if (NULL == task.taskCtx) {
+ CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
+ }
+
+ SCtgTbTagCtx* ctx = task.taskCtx;
+ ctx->pName = taosMemoryMalloc(sizeof(*name));
+ if (NULL == ctx->pName) {
+ taosMemoryFree(task.taskCtx);
+ CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
+ }
+
+ memcpy(ctx->pName, name, sizeof(*name));
+
+ taosArrayPush(pJob->pTasks, &task);
+
+ qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, tbName:%s", pJob->queryId, taskIdx,
+ ctgTaskTypeStr(task.type), name->tname);
+
+ return TSDB_CODE_SUCCESS;
+}
+
+
int32_t ctgHandleForceUpdate(SCatalog* pCtg, int32_t taskNum, SCtgJob* pJob, const SCatalogReq* pReq) {
SHashObj* pDb = taosHashInit(taskNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
SHashObj* pTb = taosHashInit(taskNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
@@ -437,6 +469,15 @@ int32_t ctgHandleForceUpdate(SCatalog* pCtg, int32_t taskNum, SCtgJob* pJob, con
char dbFName[TSDB_DB_FNAME_LEN];
tNameGetFullDbName(name, dbFName);
taosHashPut(pDb, dbFName, strlen(dbFName), dbFName, TSDB_DB_FNAME_LEN);
+ taosHashPut(pTb, name, sizeof(SName), name, sizeof(SName));
+ }
+
+ for (int32_t i = 0; i < pJob->tbTagNum; ++i) {
+ SName* name = taosArrayGet(pReq->pTableTag, i);
+ char dbFName[TSDB_DB_FNAME_LEN];
+ tNameGetFullDbName(name, dbFName);
+ taosHashPut(pDb, dbFName, strlen(dbFName), dbFName, TSDB_DB_FNAME_LEN);
+ taosHashPut(pTb, name, sizeof(SName), name, sizeof(SName));
}
char* dbFName = taosHashIterate(pDb, NULL);
@@ -505,9 +546,10 @@ int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo* pConn, SCtgJob** job, const
int32_t dbInfoNum = (int32_t)taosArrayGetSize(pReq->pDbInfo);
int32_t tbIndexNum = (int32_t)taosArrayGetSize(pReq->pTableIndex);
int32_t tbCfgNum = (int32_t)taosArrayGetSize(pReq->pTableCfg);
+ int32_t tbTagNum = (int32_t)taosArrayGetSize(pReq->pTableTag);
int32_t taskNum = tbMetaNum + dbVgNum + udfNum + tbHashNum + qnodeNum + dnodeNum + svrVerNum + dbCfgNum + indexNum +
- userNum + dbInfoNum + tbIndexNum + tbCfgNum;
+ userNum + dbInfoNum + tbIndexNum + tbCfgNum + tbTagNum;
*job = taosMemoryCalloc(1, sizeof(SCtgJob));
if (NULL == *job) {
@@ -537,6 +579,7 @@ int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo* pConn, SCtgJob** job, const
pJob->tbIndexNum = tbIndexNum;
pJob->tbCfgNum = tbCfgNum;
pJob->svrVerNum = svrVerNum;
+ pJob->tbTagNum = tbTagNum;
#if CTG_BATCH_FETCH
pJob->pBatchs =
@@ -604,6 +647,12 @@ int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo* pConn, SCtgJob** job, const
CTG_ERR_JRET(ctgInitTask(pJob, CTG_TASK_GET_TB_CFG, name, NULL));
}
+ for (int32_t i = 0; i < tbTagNum; ++i) {
+ SName* name = taosArrayGet(pReq->pTableTag, i);
+ CTG_ERR_JRET(ctgInitTask(pJob, CTG_TASK_GET_TB_TAG, name, NULL));
+ }
+
+
for (int32_t i = 0; i < indexNum; ++i) {
char* indexName = taosArrayGet(pReq->pIndex, i);
CTG_ERR_JRET(ctgInitTask(pJob, CTG_TASK_GET_INDEX_INFO, indexName, NULL));
@@ -650,6 +699,10 @@ _return:
}
int32_t ctgDumpTbMetaRes(SCtgTask* pTask) {
+ if (pTask->subTask) {
+ return TSDB_CODE_SUCCESS;
+ }
+
SCtgJob* pJob = pTask->pJob;
if (NULL == pJob->jobRes.pTableMeta) {
pJob->jobRes.pTableMeta = taosArrayInit(pJob->tbMetaNum, sizeof(SMetaRes));
@@ -665,6 +718,10 @@ int32_t ctgDumpTbMetaRes(SCtgTask* pTask) {
}
int32_t ctgDumpTbMetasRes(SCtgTask* pTask) {
+ if (pTask->subTask) {
+ return TSDB_CODE_SUCCESS;
+ }
+
SCtgJob* pJob = pTask->pJob;
pJob->jobRes.pTableMeta = pTask->res;
@@ -673,6 +730,10 @@ int32_t ctgDumpTbMetasRes(SCtgTask* pTask) {
}
int32_t ctgDumpDbVgRes(SCtgTask* pTask) {
+ if (pTask->subTask) {
+ return TSDB_CODE_SUCCESS;
+ }
+
SCtgJob* pJob = pTask->pJob;
if (NULL == pJob->jobRes.pDbVgroup) {
pJob->jobRes.pDbVgroup = taosArrayInit(pJob->dbVgNum, sizeof(SMetaRes));
@@ -688,6 +749,10 @@ int32_t ctgDumpDbVgRes(SCtgTask* pTask) {
}
int32_t ctgDumpTbHashRes(SCtgTask* pTask) {
+ if (pTask->subTask) {
+ return TSDB_CODE_SUCCESS;
+ }
+
SCtgJob* pJob = pTask->pJob;
if (NULL == pJob->jobRes.pTableHash) {
pJob->jobRes.pTableHash = taosArrayInit(pJob->tbHashNum, sizeof(SMetaRes));
@@ -703,6 +768,10 @@ int32_t ctgDumpTbHashRes(SCtgTask* pTask) {
}
int32_t ctgDumpTbHashsRes(SCtgTask* pTask) {
+ if (pTask->subTask) {
+ return TSDB_CODE_SUCCESS;
+ }
+
SCtgJob* pJob = pTask->pJob;
pJob->jobRes.pTableHash = pTask->res;
@@ -711,9 +780,17 @@ int32_t ctgDumpTbHashsRes(SCtgTask* pTask) {
}
int32_t ctgDumpTbIndexRes(SCtgTask* pTask) {
+ if (pTask->subTask) {
+ return TSDB_CODE_SUCCESS;
+ }
+
SCtgJob* pJob = pTask->pJob;
if (NULL == pJob->jobRes.pTableIndex) {
- pJob->jobRes.pTableIndex = taosArrayInit(pJob->tbIndexNum, sizeof(SMetaRes));
+ SArray* pRes = taosArrayInit(pJob->tbIndexNum, sizeof(SMetaRes));
+ if (atomic_val_compare_exchange_ptr(&pJob->jobRes.pTableIndex, NULL, pRes)) {
+ taosArrayDestroy(pRes);
+ }
+
if (NULL == pJob->jobRes.pTableIndex) {
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
@@ -726,9 +803,17 @@ int32_t ctgDumpTbIndexRes(SCtgTask* pTask) {
}
int32_t ctgDumpTbCfgRes(SCtgTask* pTask) {
+ if (pTask->subTask) {
+ return TSDB_CODE_SUCCESS;
+ }
+
SCtgJob* pJob = pTask->pJob;
if (NULL == pJob->jobRes.pTableCfg) {
- pJob->jobRes.pTableCfg = taosArrayInit(pJob->tbCfgNum, sizeof(SMetaRes));
+ SArray* pRes = taosArrayInit(pJob->tbCfgNum, sizeof(SMetaRes));
+ if (atomic_val_compare_exchange_ptr(&pJob->jobRes.pTableCfg, NULL, pRes)) {
+ taosArrayDestroy(pRes);
+ }
+
if (NULL == pJob->jobRes.pTableCfg) {
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
@@ -740,7 +825,35 @@ int32_t ctgDumpTbCfgRes(SCtgTask* pTask) {
return TSDB_CODE_SUCCESS;
}
+int32_t ctgDumpTbTagRes(SCtgTask* pTask) {
+ if (pTask->subTask) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ SCtgJob* pJob = pTask->pJob;
+ if (NULL == pJob->jobRes.pTableTag) {
+ SArray* pRes = taosArrayInit(pJob->tbTagNum, sizeof(SMetaRes));
+ if (atomic_val_compare_exchange_ptr(&pJob->jobRes.pTableTag, NULL, pRes)) {
+ taosArrayDestroy(pRes);
+ }
+
+ if (NULL == pJob->jobRes.pTableTag) {
+ CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
+ }
+ }
+
+ SMetaRes res = {.code = pTask->code, .pRes = pTask->res};
+ taosArrayPush(pJob->jobRes.pTableTag, &res);
+
+ return TSDB_CODE_SUCCESS;
+}
+
+
int32_t ctgDumpIndexRes(SCtgTask* pTask) {
+ if (pTask->subTask) {
+ return TSDB_CODE_SUCCESS;
+ }
+
SCtgJob* pJob = pTask->pJob;
if (NULL == pJob->jobRes.pIndex) {
pJob->jobRes.pIndex = taosArrayInit(pJob->indexNum, sizeof(SMetaRes));
@@ -756,6 +869,10 @@ int32_t ctgDumpIndexRes(SCtgTask* pTask) {
}
int32_t ctgDumpQnodeRes(SCtgTask* pTask) {
+ if (pTask->subTask) {
+ return TSDB_CODE_SUCCESS;
+ }
+
SCtgJob* pJob = pTask->pJob;
if (NULL == pJob->jobRes.pQnodeList) {
pJob->jobRes.pQnodeList = taosArrayInit(1, sizeof(SMetaRes));
@@ -771,6 +888,10 @@ int32_t ctgDumpQnodeRes(SCtgTask* pTask) {
}
int32_t ctgDumpDnodeRes(SCtgTask* pTask) {
+ if (pTask->subTask) {
+ return TSDB_CODE_SUCCESS;
+ }
+
SCtgJob* pJob = pTask->pJob;
if (NULL == pJob->jobRes.pDnodeList) {
pJob->jobRes.pDnodeList = taosArrayInit(1, sizeof(SMetaRes));
@@ -786,6 +907,10 @@ int32_t ctgDumpDnodeRes(SCtgTask* pTask) {
}
int32_t ctgDumpDbCfgRes(SCtgTask* pTask) {
+ if (pTask->subTask) {
+ return TSDB_CODE_SUCCESS;
+ }
+
SCtgJob* pJob = pTask->pJob;
if (NULL == pJob->jobRes.pDbCfg) {
pJob->jobRes.pDbCfg = taosArrayInit(pJob->dbCfgNum, sizeof(SMetaRes));
@@ -801,6 +926,10 @@ int32_t ctgDumpDbCfgRes(SCtgTask* pTask) {
}
int32_t ctgDumpDbInfoRes(SCtgTask* pTask) {
+ if (pTask->subTask) {
+ return TSDB_CODE_SUCCESS;
+ }
+
SCtgJob* pJob = pTask->pJob;
if (NULL == pJob->jobRes.pDbInfo) {
pJob->jobRes.pDbInfo = taosArrayInit(pJob->dbInfoNum, sizeof(SMetaRes));
@@ -816,6 +945,10 @@ int32_t ctgDumpDbInfoRes(SCtgTask* pTask) {
}
int32_t ctgDumpUdfRes(SCtgTask* pTask) {
+ if (pTask->subTask) {
+ return TSDB_CODE_SUCCESS;
+ }
+
SCtgJob* pJob = pTask->pJob;
if (NULL == pJob->jobRes.pUdfList) {
pJob->jobRes.pUdfList = taosArrayInit(pJob->udfNum, sizeof(SMetaRes));
@@ -831,6 +964,10 @@ int32_t ctgDumpUdfRes(SCtgTask* pTask) {
}
int32_t ctgDumpUserRes(SCtgTask* pTask) {
+ if (pTask->subTask) {
+ return TSDB_CODE_SUCCESS;
+ }
+
SCtgJob* pJob = pTask->pJob;
if (NULL == pJob->jobRes.pUser) {
pJob->jobRes.pUser = taosArrayInit(pJob->userNum, sizeof(SMetaRes));
@@ -846,6 +983,10 @@ int32_t ctgDumpUserRes(SCtgTask* pTask) {
}
int32_t ctgDumpSvrVer(SCtgTask* pTask) {
+ if (pTask->subTask) {
+ return TSDB_CODE_SUCCESS;
+ }
+
SCtgJob* pJob = pTask->pJob;
if (NULL == pJob->jobRes.pSvrVer) {
pJob->jobRes.pSvrVer = taosMemoryCalloc(1, sizeof(SMetaRes));
@@ -1075,7 +1216,7 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
STableMetaOutput* pOut = (STableMetaOutput*)pMsgCtx->out;
- ctgUpdateTbMetaToCache(pCtg, pOut, false);
+ ctgUpdateTbMetaToCache(pCtg, pOut, flag & CTG_FLAG_SYNC_OP);
if (CTG_IS_META_BOTH(pOut->metaType)) {
memcpy(pOut->tbMeta, &pOut->ctbMeta, sizeof(pOut->ctbMeta));
@@ -1473,6 +1614,49 @@ _return:
CTG_RET(code);
}
+
+int32_t ctgHandleGetTbTagRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf* pMsg, int32_t rspCode) {
+ int32_t code = 0;
+ SCtgTask* pTask = tReq->pTask;
+ SCatalog* pCtg = pTask->pJob->pCtg;
+ CTG_ERR_JRET(ctgProcessRspMsg(&pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target));
+
+ STableCfgRsp* pRsp = (STableCfgRsp*)pTask->msgCtx.out;
+ if (NULL == pRsp->pTags || pRsp->tagsLen <= 0) {
+ ctgError("invalid tag in tbCfg rsp, pTags:%p, len:%d", pRsp->pTags, pRsp->tagsLen);
+ CTG_ERR_JRET(TSDB_CODE_INVALID_MSG);
+ }
+
+ SArray* pTagVals = NULL;
+ STag* pTag = (STag*)pRsp->pTags;
+
+ if (tTagIsJson(pTag)) {
+ pTagVals = taosArrayInit(1, sizeof(STagVal));
+ if (NULL == pTagVals) {
+ CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
+ }
+
+ char* pJson = parseTagDatatoJson(pTag);
+ STagVal tagVal;
+ tagVal.cid = 0;
+ tagVal.type = TSDB_DATA_TYPE_JSON;
+ tagVal.pData = pJson;
+ tagVal.nData = strlen(pJson);
+ taosArrayPush(pTagVals, &tagVal);
+ } else {
+ CTG_ERR_JRET(tTagToValArray((const STag*)pRsp->pTags, &pTagVals));
+ }
+
+ pTask->res = pTagVals;
+
+_return:
+
+ ctgHandleTaskEnd(pTask, code);
+
+ CTG_RET(code);
+}
+
+
int32_t ctgHandleGetDbCfgRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf* pMsg, int32_t rspCode) {
int32_t code = 0;
SCtgTask* pTask = tReq->pTask;
@@ -1905,7 +2089,10 @@ int32_t ctgLaunchGetTbCfgTask(SCtgTask* pTask) {
if (pCtx->tbType <= 0) {
CTG_ERR_JRET(ctgReadTbTypeFromCache(pCtg, dbFName, pCtx->pName->tname, &pCtx->tbType));
if (pCtx->tbType <= 0) {
- CTG_ERR_JRET(ctgLaunchSubTask(pTask, CTG_TASK_GET_TB_META, ctgGetTbCfgCb, pCtx->pName));
+ SCtgTbMetaParam param;
+ param.pName = pCtx->pName;
+ param.flag = 0;
+ CTG_ERR_JRET(ctgLaunchSubTask(pTask, CTG_TASK_GET_TB_META, ctgGetTbCfgCb, ¶m));
return TSDB_CODE_SUCCESS;
}
}
@@ -1935,6 +2122,45 @@ _return:
CTG_RET(code);
}
+
+int32_t ctgLaunchGetTbTagTask(SCtgTask* pTask) {
+ int32_t code = 0;
+ SCatalog* pCtg = pTask->pJob->pCtg;
+ SRequestConnInfo* pConn = &pTask->pJob->conn;
+ SCtgTbTagCtx* pCtx = (SCtgTbTagCtx*)pTask->taskCtx;
+ SArray* pRes = NULL;
+ char dbFName[TSDB_DB_FNAME_LEN];
+ tNameGetFullDbName(pCtx->pName, dbFName);
+ SCtgJob* pJob = pTask->pJob;
+ SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
+ if (NULL == pMsgCtx->pBatchs) {
+ pMsgCtx->pBatchs = pJob->pBatchs;
+ }
+
+ if (NULL == pCtx->pVgInfo) {
+ CTG_ERR_JRET(ctgGetTbHashVgroupFromCache(pCtg, pCtx->pName, &pCtx->pVgInfo));
+ if (NULL == pCtx->pVgInfo) {
+ CTG_ERR_JRET(ctgLaunchSubTask(pTask, CTG_TASK_GET_DB_VGROUP, ctgGetTbTagCb, dbFName));
+ return TSDB_CODE_SUCCESS;
+ }
+ }
+
+ CTG_CACHE_NHIT_INC(CTG_CI_TBL_TAG, 1);
+
+ CTG_ERR_JRET(ctgGetTableCfgFromVnode(pCtg, pConn, pCtx->pName, pCtx->pVgInfo, NULL, pTask));
+
+ return TSDB_CODE_SUCCESS;
+
+_return:
+
+ if (CTG_TASK_LAUNCHED == pTask->status) {
+ ctgHandleTaskEnd(pTask, code);
+ }
+
+ CTG_RET(code);
+}
+
+
int32_t ctgLaunchGetQnodeTask(SCtgTask* pTask) {
SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
@@ -2077,6 +2303,8 @@ int32_t ctgLaunchGetUserTask(SCtgTask* pTask) {
if (inCache) {
pTask->res = rsp.pRawRes;
+ ctgTaskDebug("Final res got, pass:%d, pCond:%p", rsp.pRawRes->pass, rsp.pRawRes->pCond);
+
CTG_ERR_RET(ctgHandleTaskEnd(pTask, 0));
return TSDB_CODE_SUCCESS;
}
@@ -2084,7 +2312,10 @@ int32_t ctgLaunchGetUserTask(SCtgTask* pTask) {
taosMemoryFreeClear(rsp.pRawRes);
if (rsp.metaNotExists) {
- CTG_ERR_RET(ctgLaunchSubTask(pTask, CTG_TASK_GET_TB_META, ctgGetTbCfgCb, &pCtx->user.tbName));
+ SCtgTbMetaParam param;
+ param.pName = &pCtx->user.tbName;
+ param.flag = CTG_FLAG_SYNC_OP;
+ CTG_ERR_RET(ctgLaunchSubTask(pTask, CTG_TASK_GET_TB_META, ctgGetUserCb, ¶m));
} else {
CTG_ERR_RET(ctgGetUserDbAuthFromMnode(pCtg, pConn, pCtx->user.user, NULL, pTask));
}
@@ -2138,6 +2369,27 @@ _return:
CTG_RET(ctgHandleTaskEnd(pTask, pTask->subRes.code));
}
+int32_t ctgGetTbTagCb(SCtgTask* pTask) {
+ int32_t code = 0;
+
+ CTG_ERR_JRET(pTask->subRes.code);
+
+ SCtgTbTagCtx* pCtx = (SCtgTbTagCtx*)pTask->taskCtx;
+ SDBVgInfo* pDb = (SDBVgInfo*)pTask->subRes.res;
+
+ if (NULL == pCtx->pVgInfo) {
+ pCtx->pVgInfo = taosMemoryCalloc(1, sizeof(SVgroupInfo));
+ CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pTask->pJob->pCtg, pDb, pCtx->pName, pCtx->pVgInfo));
+ }
+
+ CTG_RET(ctgLaunchGetTbTagTask(pTask));
+
+_return:
+
+ CTG_RET(ctgHandleTaskEnd(pTask, pTask->subRes.code));
+}
+
+
int32_t ctgGetUserCb(SCtgTask* pTask) {
int32_t code = 0;
@@ -2162,8 +2414,12 @@ int32_t ctgCompDbVgTasks(SCtgTask* pTask, void* param, bool* equal) {
int32_t ctgCompTbMetaTasks(SCtgTask* pTask, void* param, bool* equal) {
SCtgTbMetaCtx* ctx = pTask->taskCtx;
+ SCtgTbMetaParam* pParam = (SCtgTbMetaParam*)param;
- *equal = tNameTbNameEqual(ctx->pName, (SName*)param);
+ *equal = tNameTbNameEqual(ctx->pName, (SName*)pParam->pName);
+ if (*equal) {
+ ctx->flag |= pParam->flag;
+ }
return TSDB_CODE_SUCCESS;
}
@@ -2197,6 +2453,7 @@ SCtgAsyncFps gCtgAsyncFps[] = {
{ctgInitGetSvrVerTask, ctgLaunchGetSvrVerTask, ctgHandleGetSvrVerRsp, ctgDumpSvrVer, NULL, NULL},
{ctgInitGetTbMetasTask, ctgLaunchGetTbMetasTask, ctgHandleGetTbMetasRsp, ctgDumpTbMetasRes, NULL, NULL},
{ctgInitGetTbHashsTask, ctgLaunchGetTbHashsTask, ctgHandleGetTbHashsRsp, ctgDumpTbHashsRes, NULL, NULL},
+ {ctgInitGetTbTagTask, ctgLaunchGetTbTagTask, ctgHandleGetTbTagRsp, ctgDumpTbTagRes, NULL, NULL},
};
int32_t ctgMakeAsyncRes(SCtgJob* pJob) {
@@ -2284,6 +2541,9 @@ int32_t ctgLaunchSubTask(SCtgTask* pTask, CTG_TASK_TYPE type, ctgSubTaskCbFp fp,
}
SCtgTask* pSub = taosArrayGet(pJob->pTasks, subTaskId);
+ if (newTask) {
+ pSub->subTask = true;
+ }
CTG_ERR_RET(ctgSetSubTaskCb(pSub, pTask));
diff --git a/source/libs/catalog/src/ctgCache.c b/source/libs/catalog/src/ctgCache.c
index 592b6e9c72..ec087c1168 100644
--- a/source/libs/catalog/src/ctgCache.c
+++ b/source/libs/catalog/src/ctgCache.c
@@ -703,7 +703,31 @@ _return:
CTG_RET(code);
}
+int32_t ctgGetCachedStbNameFromSuid(SCatalog* pCtg, char* dbFName, uint64_t suid, char **stbName) {
+ *stbName = NULL;
+
+ SCtgDBCache *dbCache = NULL;
+ ctgAcquireDBCache(pCtg, dbFName, &dbCache);
+ if (NULL == dbCache) {
+ ctgDebug("db %s not in cache", dbFName);
+ return TSDB_CODE_SUCCESS;
+ }
+
+ char *stb = taosHashAcquire(dbCache->stbCache, &suid, sizeof(suid));
+ if (NULL == stb) {
+ ctgDebug("stb 0x%" PRIx64 " not in cache, dbFName:%s", suid, dbFName);
+ return TSDB_CODE_SUCCESS;
+ }
+
+ *stbName = taosStrdup(stb);
+
+ taosHashRelease(dbCache->stbCache, stb);
+
+ return TSDB_CODE_SUCCESS;
+}
+
int32_t ctgChkAuthFromCache(SCatalog *pCtg, SUserAuthInfo *pReq, bool *inCache, SCtgAuthRsp *pRes) {
+ int32_t code = 0;
if (IS_SYS_DBNAME(pReq->tbName.dbname)) {
*inCache = true;
pRes->pRawRes->pass = true;
@@ -728,7 +752,7 @@ int32_t ctgChkAuthFromCache(SCatalog *pCtg, SUserAuthInfo *pReq, bool *inCache,
CTG_LOCK(CTG_READ, &pUser->lock);
memcpy(&req.authInfo, &pUser->userAuth, sizeof(pUser->userAuth));
- int32_t code = ctgChkSetAuthRes(pCtg, &req, pRes);
+ code = ctgChkSetAuthRes(pCtg, &req, pRes);
CTG_UNLOCK(CTG_READ, &pUser->lock);
CTG_ERR_JRET(code);
@@ -742,8 +766,9 @@ _return:
*inCache = false;
CTG_CACHE_NHIT_INC(CTG_CI_USER, 1);
+ ctgDebug("Get user from cache failed, user:%s, metaNotExists:%d, code:%d", pReq->user, pRes->metaNotExists, code);
- return TSDB_CODE_SUCCESS;
+ return code;
}
void ctgDequeue(SCtgCacheOperation **op) {
diff --git a/source/libs/catalog/src/ctgUtil.c b/source/libs/catalog/src/ctgUtil.c
index b2b2b5a87e..9274c1ef92 100644
--- a/source/libs/catalog/src/ctgUtil.c
+++ b/source/libs/catalog/src/ctgUtil.c
@@ -170,6 +170,9 @@ void ctgFreeSMetaData(SMetaData* pData) {
taosArrayDestroy(pData->pTableCfg);
pData->pTableCfg = NULL;
+ taosArrayDestroy(pData->pTableTag);
+ pData->pTableTag = NULL;
+
taosMemoryFreeClear(pData->pSvrVer);
}
@@ -486,6 +489,18 @@ void ctgFreeBatchHash(void* hash) {
taosMemoryFreeClear(pRes->pRes);
}
+void ctgFreeJsonTagVal(void* val) {
+ if (NULL == val) {
+ return;
+ }
+
+ STagVal* pVal = (STagVal*)val;
+
+ if (TSDB_DATA_TYPE_JSON == pVal->type) {
+ taosMemoryFree(pVal->pData);
+ }
+}
+
void ctgFreeTaskRes(CTG_TASK_TYPE type, void** pRes) {
switch (type) {
case CTG_TASK_GET_QNODE:
@@ -516,16 +531,32 @@ void ctgFreeTaskRes(CTG_TASK_TYPE type, void** pRes) {
}
break;
}
+ case CTG_TASK_GET_USER: {
+ if (*pRes) {
+ SUserAuthRes* pAuth = (SUserAuthRes*)*pRes;
+ nodesDestroyNode(pAuth->pCond);
+ taosMemoryFreeClear(*pRes);
+ }
+ break;
+ }
case CTG_TASK_GET_TB_HASH:
case CTG_TASK_GET_DB_INFO:
case CTG_TASK_GET_INDEX_INFO:
case CTG_TASK_GET_UDF:
- case CTG_TASK_GET_USER:
case CTG_TASK_GET_SVR_VER:
case CTG_TASK_GET_TB_META: {
taosMemoryFreeClear(*pRes);
break;
}
+ case CTG_TASK_GET_TB_TAG: {
+ if (1 == taosArrayGetSize(*pRes)) {
+ taosArrayDestroyEx(*pRes, ctgFreeJsonTagVal);
+ } else {
+ taosArrayDestroy(*pRes);
+ }
+ *pRes = NULL;
+ break;
+ }
case CTG_TASK_GET_TB_META_BATCH: {
SArray* pArray = (SArray*)*pRes;
int32_t num = taosArrayGetSize(pArray);
@@ -679,6 +710,13 @@ void ctgFreeTaskCtx(SCtgTask* pTask) {
taosMemoryFreeClear(pTask->taskCtx);
break;
}
+ case CTG_TASK_GET_TB_TAG: {
+ SCtgTbTagCtx* taskCtx = (SCtgTbTagCtx*)pTask->taskCtx;
+ taosMemoryFreeClear(taskCtx->pName);
+ taosMemoryFreeClear(taskCtx->pVgInfo);
+ taosMemoryFreeClear(taskCtx);
+ break;
+ }
case CTG_TASK_GET_DB_VGROUP:
case CTG_TASK_GET_DB_CFG:
case CTG_TASK_GET_DB_INFO:
@@ -1336,57 +1374,75 @@ int32_t ctgChkSetTbAuthRes(SCatalog* pCtg, SCtgAuthReq* req, SCtgAuthRsp* res) {
STableMeta* pMeta = NULL;
SGetUserAuthRsp* pInfo = &req->authInfo;
SHashObj* pTbs = (AUTH_TYPE_READ == req->singleType) ? pInfo->readTbs : pInfo->writeTbs;
+ char* stbName = NULL;
- char tbFullName[TSDB_TABLE_FNAME_LEN];
- tNameExtractFullName(&req->pRawReq->tbName, tbFullName);
- char* pCond = taosHashGet(pTbs, tbFullName, strlen(tbFullName));
- if (pCond) {
- if (strlen(pCond) > 1) {
- CTG_ERR_RET(nodesStringToNode(pCond, &res->pRawRes->pCond));
+ char tbFName[TSDB_TABLE_FNAME_LEN];
+ char dbFName[TSDB_DB_FNAME_LEN];
+ tNameExtractFullName(&req->pRawReq->tbName, tbFName);
+ tNameGetFullDbName(&req->pRawReq->tbName, dbFName);
+
+ while (true) {
+ taosMemoryFreeClear(pMeta);
+
+ char* pCond = taosHashGet(pTbs, tbFName, strlen(tbFName));
+ if (pCond) {
+ if (strlen(pCond) > 1) {
+ CTG_ERR_JRET(nodesStringToNode(pCond, &res->pRawRes->pCond));
+ }
+
+ res->pRawRes->pass = true;
+ goto _return;
}
- res->pRawRes->pass = true;
- return TSDB_CODE_SUCCESS;
+ if (stbName) {
+ res->pRawRes->pass = false;
+ goto _return;
+ }
+
+ CTG_ERR_JRET(catalogGetCachedTableMeta(pCtg, &req->pRawReq->tbName, &pMeta));
+ if (NULL == pMeta) {
+ if (req->onlyCache) {
+ res->metaNotExists = true;
+ ctgDebug("db %s tb %s meta not in cache for auth", req->pRawReq->tbName.dbname, req->pRawReq->tbName.tname);
+ goto _return;
+ }
+
+ SCtgTbMetaCtx ctx = {0};
+ ctx.pName = (SName*)&req->pRawReq->tbName;
+ ctx.flag = CTG_FLAG_UNKNOWN_STB | CTG_FLAG_SYNC_OP;
+
+ CTG_ERR_JRET(ctgGetTbMeta(pCtg, req->pConn, &ctx, &pMeta));
+ }
+
+ if (TSDB_SUPER_TABLE == pMeta->tableType || TSDB_NORMAL_TABLE == pMeta->tableType) {
+ res->pRawRes->pass = false;
+ goto _return;
+ }
+
+ if (TSDB_CHILD_TABLE == pMeta->tableType) {
+ CTG_ERR_JRET(ctgGetCachedStbNameFromSuid(pCtg, dbFName, pMeta->suid, &stbName));
+ if (NULL == stbName) {
+ if (req->onlyCache) {
+ res->metaNotExists = true;
+ ctgDebug("suid %" PRIu64 " name not in cache for auth", pMeta->suid);
+ goto _return;
+ }
+
+ continue;
+ }
+
+ sprintf(tbFName, "%s.%s", dbFName, stbName);
+ continue;
+ }
+
+ ctgError("Invalid table type %d for %s", pMeta->tableType, tbFName);
+ CTG_ERR_JRET(TSDB_CODE_INVALID_PARA);
}
- res->pRawRes->pass = false;
-
- // CTG_ERR_RET(catalogGetCachedTableMeta(pCtg, &req->pRawReq->tbName, &pMeta));
- // if (NULL == pMeta) {
- // if (req->onlyCache) {
- // res->metaNotExists = true;
- // ctgDebug("db %s tb %s meta not in cache for auth", req->pRawReq->tbName.dbname, req->pRawReq->tbName.tname);
- // return TSDB_CODE_SUCCESS;
- // }
-
- // CTG_ERR_RET(catalogGetTableMeta(pCtg, req->pConn, &req->pRawReq->tbName, &pMeta));
- // }
-
- // if (TSDB_SUPER_TABLE == pMeta->tableType || TSDB_NORMAL_TABLE == pMeta->tableType) {
- // res->pRawRes->pass = false;
- // goto _return;
- // }
-
- // if (TSDB_CHILD_TABLE == pMeta->tableType) {
- // res->pRawRes->pass = true;
-
- // /*
- // char stbName[TSDB_TABLE_NAME_LEN] = {0};
- // CTG_ERR_JRET(ctgGetCachedStbNameFromSuid(pCtg, pMeta->suid, stbName));
- // if (0 == stbName[0]) {
- // if (req->onlyCache) {
- // res->notExists = true;
- // return TSDB_CODE_SUCCESS;
- // }
-
- // CTG_ERR_RET(catalogRefreshTableMeta(pCtg, req->pConn, &req->pRawReq->tbName, 0));
- // }
- // */
- // }
-
_return:
taosMemoryFree(pMeta);
+ taosMemoryFree(stbName);
CTG_RET(code);
}
@@ -1423,7 +1479,7 @@ int32_t ctgChkSetAuthRes(SCatalog* pCtg, SCtgAuthReq* req, SCtgAuthRsp* res) {
if (pInfo->readTbs && taosHashGetSize(pInfo->readTbs) > 0) {
req->singleType = AUTH_TYPE_READ;
CTG_ERR_RET(ctgChkSetTbAuthRes(pCtg, req, res));
- if (pRes->pass) {
+ if (pRes->pass || res->metaNotExists) {
return TSDB_CODE_SUCCESS;
}
}
@@ -1439,7 +1495,7 @@ int32_t ctgChkSetAuthRes(SCatalog* pCtg, SCtgAuthReq* req, SCtgAuthRsp* res) {
if (pInfo->writeTbs && taosHashGetSize(pInfo->writeTbs) > 0) {
req->singleType = AUTH_TYPE_WRITE;
CTG_ERR_RET(ctgChkSetTbAuthRes(pCtg, req, res));
- if (pRes->pass) {
+ if (pRes->pass || res->metaNotExists) {
return TSDB_CODE_SUCCESS;
}
}
diff --git a/source/libs/executor/inc/executil.h b/source/libs/executor/inc/executil.h
index 2e92f9e396..5a9f079c15 100644
--- a/source/libs/executor/inc/executil.h
+++ b/source/libs/executor/inc/executil.h
@@ -39,8 +39,6 @@
#define GET_RES_WINDOW_KEY_LEN(_l) ((_l) + sizeof(uint64_t))
-#define GET_TASKID(_t) (((SExecTaskInfo*)(_t))->id.str)
-
typedef struct SGroupResInfo {
int32_t index;
SArray* pRows; // SArray
diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h
index d22a7460bb..a4f1e2ef94 100644
--- a/source/libs/executor/inc/executorInt.h
+++ b/source/libs/executor/inc/executorInt.h
@@ -12,14 +12,75 @@
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see .
*/
-
-#ifndef _TD_EXECUTOR_INT_H
-#define _TD_EXECUTOR_INT_H
+#ifndef TDENGINE_EXECUTORINT_H
+#define TDENGINE_EXECUTORINT_H
#ifdef __cplusplus
extern "C" {
#endif
+#include "os.h"
+#include "tcommon.h"
+#include "tlosertree.h"
+#include "tsort.h"
+#include "ttszip.h"
+#include "tvariant.h"
+
+#include "dataSinkMgt.h"
+#include "executil.h"
+#include "executor.h"
+#include "planner.h"
+#include "scalar.h"
+#include "taosdef.h"
+#include "tarray.h"
+#include "tfill.h"
+#include "thash.h"
+#include "tlockfree.h"
+#include "tmsg.h"
+#include "tpagedbuf.h"
+#include "tstream.h"
+#include "tstreamUpdate.h"
+
+#include "vnode.h"
+
+typedef int32_t (*__block_search_fn_t)(char* data, int32_t num, int64_t key, int32_t order);
+
+#define IS_VALID_SESSION_WIN(winInfo) ((winInfo).sessionWin.win.skey > 0)
+#define SET_SESSION_WIN_INVALID(winInfo) ((winInfo).sessionWin.win.skey = INT64_MIN)
+#define IS_INVALID_SESSION_WIN_KEY(winKey) ((winKey).win.skey <= 0)
+#define SET_SESSION_WIN_KEY_INVALID(pWinKey) ((pWinKey)->win.skey = INT64_MIN)
+
+/**
+ * If the number of generated results is greater than this value,
+ * query query will be halt and return results to client immediate.
+ */
+typedef struct SResultInfo { // TODO refactor
+ int64_t totalRows; // total generated result size in rows
+ int64_t totalBytes; // total results in bytes.
+ int32_t capacity; // capacity of current result output buffer
+ int32_t threshold; // result size threshold in rows.
+} SResultInfo;
+
+typedef struct STableQueryInfo {
+ TSKEY lastKey; // last check ts, todo remove it later
+ SResultRowPosition pos; // current active time window
+} STableQueryInfo;
+
+typedef struct SLimit {
+ int64_t limit;
+ int64_t offset;
+} SLimit;
+
+typedef struct STableScanAnalyzeInfo SFileBlockLoadRecorder;
+
+enum {
+ STREAM_RECOVER_STEP__NONE = 0,
+ STREAM_RECOVER_STEP__PREPARE1,
+ STREAM_RECOVER_STEP__PREPARE2,
+ STREAM_RECOVER_STEP__SCAN1,
+ STREAM_RECOVER_STEP__SCAN2,
+};
+
extern int32_t exchangeObjRefPool;
typedef struct {
@@ -29,9 +90,584 @@ typedef struct {
int32_t bytes;
} SGroupKeys, SStateKeys;
+typedef struct {
+ char* tablename;
+ char* dbname;
+ int32_t tversion;
+ SSchemaWrapper* sw;
+ SSchemaWrapper* qsw;
+} SSchemaInfo;
+
+typedef struct SExchangeOpStopInfo {
+ int32_t operatorType;
+ int64_t refId;
+} SExchangeOpStopInfo;
+
+typedef struct SExprSupp {
+ SExprInfo* pExprInfo;
+ int32_t numOfExprs; // the number of scalar expression in group operator
+ SqlFunctionCtx* pCtx;
+ int32_t* rowEntryInfoOffset; // offset value for each row result cell info
+ SFilterInfo* pFilterInfo;
+} SExprSupp;
+
+typedef enum {
+ EX_SOURCE_DATA_NOT_READY = 0x1,
+ EX_SOURCE_DATA_READY = 0x2,
+ EX_SOURCE_DATA_EXHAUSTED = 0x3,
+} EX_SOURCE_STATUS;
+
+#define COL_MATCH_FROM_COL_ID 0x1
+#define COL_MATCH_FROM_SLOT_ID 0x2
+
+typedef struct SLoadRemoteDataInfo {
+ uint64_t totalSize; // total load bytes from remote
+ uint64_t totalRows; // total number of rows
+ uint64_t totalElapsed; // total elapsed time
+} SLoadRemoteDataInfo;
+
+typedef struct SLimitInfo {
+ SLimit limit;
+ SLimit slimit;
+ uint64_t currentGroupId;
+ int64_t remainGroupOffset;
+ int64_t numOfOutputGroups;
+ int64_t remainOffset;
+ int64_t numOfOutputRows;
+} SLimitInfo;
+
+typedef struct SExchangeInfo {
+ SArray* pSources;
+ SArray* pSourceDataInfo;
+ tsem_t ready;
+ void* pTransporter;
+
+ // SArray, result block list, used to keep the multi-block that
+ // passed by downstream operator
+ SArray* pResultBlockList;
+ SArray* pRecycledBlocks; // build a pool for small data block to avoid to repeatly create and then destroy.
+ SSDataBlock* pDummyBlock; // dummy block, not keep data
+ bool seqLoadData; // sequential load data or not, false by default
+ int32_t current;
+ SLoadRemoteDataInfo loadInfo;
+ uint64_t self;
+ SLimitInfo limitInfo;
+ int64_t openedTs; // start exec time stamp, todo: move to SLoadRemoteDataInfo
+} SExchangeInfo;
+
+typedef struct SScanInfo {
+ int32_t numOfAsc;
+ int32_t numOfDesc;
+} SScanInfo;
+
+typedef struct SSampleExecInfo {
+ double sampleRatio; // data block sample ratio, 1 by default
+ uint32_t seed; // random seed value
+} SSampleExecInfo;
+
+enum {
+ TABLE_SCAN__TABLE_ORDER = 1,
+ TABLE_SCAN__BLOCK_ORDER = 2,
+};
+
+typedef struct SAggSupporter {
+ SSHashObj* pResultRowHashTable; // quick locate the window object for each result
+ char* keyBuf; // window key buffer
+ SDiskbasedBuf* pResultBuf; // query result buffer based on blocked-wised disk file
+ int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row
+ int32_t currentPageId; // current write page id
+} SAggSupporter;
+
+typedef struct {
+ // if the upstream is an interval operator, the interval info is also kept here to get the time window to check if
+ // current data block needs to be loaded.
+ SInterval interval;
+ SAggSupporter* pAggSup;
+ SExprSupp* pExprSup; // expr supporter of aggregate operator
+} SAggOptrPushDownInfo;
+
+typedef struct STableMetaCacheInfo {
+ SLRUCache* pTableMetaEntryCache; // 100 by default
+ uint64_t metaFetch;
+ uint64_t cacheHit;
+} STableMetaCacheInfo;
+
+typedef struct STableScanBase {
+ STsdbReader* dataReader;
+ SFileBlockLoadRecorder readRecorder;
+ SQueryTableDataCond cond;
+ SAggOptrPushDownInfo pdInfo;
+ SColMatchInfo matchInfo;
+ SReadHandle readHandle;
+ SExprSupp pseudoSup;
+ STableMetaCacheInfo metaCache;
+ int32_t scanFlag; // table scan flag to denote if it is a repeat/reverse/main scan
+ int32_t dataBlockLoadFlag;
+ SLimitInfo limitInfo;
+ // there are more than one table list exists in one task, if only one vnode exists.
+ STableListInfo* pTableListInfo;
+} STableScanBase;
+
+typedef struct STableScanInfo {
+ STableScanBase base;
+ SScanInfo scanInfo;
+ int32_t scanTimes;
+ SSDataBlock* pResBlock;
+ SSampleExecInfo sample; // sample execution info
+ int32_t currentGroupId;
+ int32_t currentTable;
+ int8_t scanMode;
+ int8_t assignBlockUid;
+ bool hasGroupByTag;
+ bool countOnly;
+} STableScanInfo;
+
+typedef struct STableMergeScanInfo {
+ int32_t tableStartIndex;
+ int32_t tableEndIndex;
+ bool hasGroupId;
+ uint64_t groupId;
+ SArray* queryConds; // array of queryTableDataCond
+ STableScanBase base;
+ int32_t bufPageSize;
+ uint32_t sortBufSize; // max buffer size for in-memory sort
+ SArray* pSortInfo;
+ SSortHandle* pSortHandle;
+ SSDataBlock* pSortInputBlock;
+ int64_t startTs; // sort start time
+ SArray* sortSourceParams;
+ SLimitInfo limitInfo;
+ int64_t numOfRows;
+ SScanInfo scanInfo;
+ int32_t scanTimes;
+ SSDataBlock* pResBlock;
+ SSampleExecInfo sample; // sample execution info
+ SSortExecInfo sortExecInfo;
+} STableMergeScanInfo;
+
+typedef struct STagScanInfo {
+ SColumnInfo* pCols;
+ SSDataBlock* pRes;
+ SColMatchInfo matchInfo;
+ int32_t curPos;
+ SReadHandle readHandle;
+ STableListInfo* pTableListInfo;
+} STagScanInfo;
+
+typedef enum EStreamScanMode {
+ STREAM_SCAN_FROM_READERHANDLE = 1,
+ STREAM_SCAN_FROM_RES,
+ STREAM_SCAN_FROM_UPDATERES,
+ STREAM_SCAN_FROM_DELETE_DATA,
+ STREAM_SCAN_FROM_DATAREADER_RETRIEVE,
+ STREAM_SCAN_FROM_DATAREADER_RANGE,
+} EStreamScanMode;
+
+enum {
+ PROJECT_RETRIEVE_CONTINUE = 0x1,
+ PROJECT_RETRIEVE_DONE = 0x2,
+};
+
+typedef struct SStreamAggSupporter {
+ int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row
+ SSDataBlock* pScanBlock;
+ SStreamState* pState;
+ int64_t gap; // stream session window gap
+ SqlFunctionCtx* pDummyCtx; // for combine
+ SSHashObj* pResultRows;
+ int32_t stateKeySize;
+ int16_t stateKeyType;
+ SDiskbasedBuf* pResultBuf;
+} SStreamAggSupporter;
+
+typedef struct SWindowSupporter {
+ SStreamAggSupporter* pStreamAggSup;
+ int64_t gap;
+ uint16_t parentType;
+ SAggSupporter* pIntervalAggSup;
+} SWindowSupporter;
+
+typedef struct SPartitionBySupporter {
+ SArray* pGroupCols; // group by columns, SArray
+ SArray* pGroupColVals; // current group column values, SArray
+ char* keyBuf; // group by keys for hash
+ bool needCalc; // partition by column
+} SPartitionBySupporter;
+
+typedef struct SPartitionDataInfo {
+ uint64_t groupId;
+ char* tbname;
+ SArray* tags;
+ SArray* rowIds;
+} SPartitionDataInfo;
+
+typedef struct STimeWindowAggSupp {
+ int8_t calTrigger;
+ int8_t calTriggerSaved;
+ int64_t deleteMark;
+ int64_t deleteMarkSaved;
+ int64_t waterMark;
+ TSKEY maxTs;
+ TSKEY minTs;
+ SColumnInfoData timeWindowData; // query time window info for scalar function execution.
+} STimeWindowAggSupp;
+
+typedef struct SStreamScanInfo {
+ SExprInfo* pPseudoExpr;
+ int32_t numOfPseudoExpr;
+ SExprSupp tbnameCalSup;
+ SExprSupp tagCalSup;
+ int32_t primaryTsIndex; // primary time stamp slot id
+ SReadHandle readHandle;
+ SInterval interval; // if the upstream is an interval operator, the interval info is also kept here.
+ SColMatchInfo matchInfo;
+
+ SArray* pBlockLists; // multiple SSDatablock.
+ SSDataBlock* pRes; // result SSDataBlock
+ SSDataBlock* pUpdateRes; // update SSDataBlock
+ int32_t updateResIndex;
+ int32_t blockType; // current block type
+ int32_t validBlockIndex; // Is current data has returned?
+ uint64_t numOfExec; // execution times
+ STqReader* tqReader;
+
+ uint64_t groupId;
+ SUpdateInfo* pUpdateInfo;
+
+ EStreamScanMode scanMode;
+ struct SOperatorInfo* pStreamScanOp;
+ struct SOperatorInfo* pTableScanOp;
+ SArray* childIds;
+ SWindowSupporter windowSup;
+ SPartitionBySupporter partitionSup;
+ SExprSupp* pPartScalarSup;
+ bool assignBlockUid; // assign block uid to groupId, temporarily used for generating rollup SMA.
+ int32_t scanWinIndex; // for state operator
+ int32_t pullDataResIndex;
+ SSDataBlock* pPullDataRes; // pull data SSDataBlock
+ SSDataBlock* pDeleteDataRes; // delete data SSDataBlock
+ int32_t deleteDataIndex;
+ STimeWindow updateWin;
+ STimeWindowAggSupp twAggSup;
+ SSDataBlock* pUpdateDataRes;
+ // status for tmq
+ SNodeList* pGroupTags;
+ SNode* pTagCond;
+ SNode* pTagIndexCond;
+
+ // recover
+ int32_t blockRecoverContiCnt;
+ int32_t blockRecoverTotCnt;
+ SSDataBlock* pRecoverRes;
+
+ SSDataBlock* pCreateTbRes;
+ int8_t igCheckUpdate;
+ int8_t igExpired;
+} SStreamScanInfo;
+
+typedef struct {
+ SVnode* vnode;
+ SSDataBlock pRes; // result SSDataBlock
+ STsdbReader* dataReader;
+ SSnapContext* sContext;
+ STableListInfo* pTableListInfo;
+} SStreamRawScanInfo;
+
+typedef struct STableCountScanSupp {
+ int16_t dbNameSlotId;
+ int16_t stbNameSlotId;
+ int16_t tbCountSlotId;
+ bool groupByDbName;
+ bool groupByStbName;
+ char dbNameFilter[TSDB_DB_NAME_LEN];
+ char stbNameFilter[TSDB_TABLE_NAME_LEN];
+} STableCountScanSupp;
+
+typedef struct SOptrBasicInfo {
+ SResultRowInfo resultRowInfo;
+ SSDataBlock* pRes;
+ bool mergeResultBlock;
+} SOptrBasicInfo;
+
+typedef struct SIntervalAggOperatorInfo {
+ SOptrBasicInfo binfo; // basic info
+ SAggSupporter aggSup; // aggregate supporter
+ SExprSupp scalarSupp; // supporter for perform scalar function
+ SGroupResInfo groupResInfo; // multiple results build supporter
+ SInterval interval; // interval info
+ int32_t primaryTsIndex; // primary time stamp slot id from result of downstream operator.
+ STimeWindow win; // query time range
+ bool timeWindowInterpo; // interpolation needed or not
+ SArray* pInterpCols; // interpolation columns
+ int32_t resultTsOrder; // result timestamp order
+ int32_t inputOrder; // input data ts order
+ EOPTR_EXEC_MODEL execModel; // operator execution model [batch model|stream model]
+ STimeWindowAggSupp twAggSup;
+ SArray* pPrevValues; // SArray used to keep the previous not null value for interpolation.
+} SIntervalAggOperatorInfo;
+
+typedef struct SMergeAlignedIntervalAggOperatorInfo {
+ SIntervalAggOperatorInfo* intervalAggOperatorInfo;
+
+ uint64_t groupId; // current groupId
+ int64_t curTs; // current ts
+ SSDataBlock* prefetchedBlock;
+ SResultRow* pResultRow;
+} SMergeAlignedIntervalAggOperatorInfo;
+
+typedef struct SStreamIntervalOperatorInfo {
+ SOptrBasicInfo binfo; // basic info
+ SAggSupporter aggSup; // aggregate supporter
+ SExprSupp scalarSupp; // supporter for perform scalar function
+ SGroupResInfo groupResInfo; // multiple results build supporter
+ SInterval interval; // interval info
+ int32_t primaryTsIndex; // primary time stamp slot id from result of downstream operator.
+ STimeWindowAggSupp twAggSup;
+ bool invertible;
+ bool ignoreExpiredData;
+ bool ignoreExpiredDataSaved;
+ SArray* pDelWins; // SWinRes
+ int32_t delIndex;
+ SSDataBlock* pDelRes;
+ SPhysiNode* pPhyNode; // create new child
+ SHashObj* pPullDataMap;
+ SArray* pPullWins; // SPullWindowInfo
+ int32_t pullIndex;
+ SSDataBlock* pPullDataRes;
+ bool isFinal;
+ SArray* pChildren;
+ SStreamState* pState;
+ SWinKey delKey;
+ uint64_t numOfDatapack;
+ SArray* pUpdated;
+ SSHashObj* pUpdatedMap;
+ int64_t dataVersion;
+} SStreamIntervalOperatorInfo;
+
+typedef struct SDataGroupInfo {
+ uint64_t groupId;
+ int64_t numOfRows;
+ SArray* pPageList;
+} SDataGroupInfo;
+
+typedef struct SWindowRowsSup {
+ STimeWindow win;
+ TSKEY prevTs;
+ int32_t startRowIndex;
+ int32_t numOfRows;
+ uint64_t groupId;
+} SWindowRowsSup;
+
+typedef struct SResultWindowInfo {
+ void* pOutputBuf;
+ SSessionKey sessionWin;
+ bool isOutput;
+} SResultWindowInfo;
+
+typedef struct SStreamSessionAggOperatorInfo {
+ SOptrBasicInfo binfo;
+ SStreamAggSupporter streamAggSup;
+ SExprSupp scalarSupp; // supporter for perform scalar function
+ SGroupResInfo groupResInfo;
+ int32_t primaryTsIndex; // primary timestamp slot id
+ int32_t endTsIndex; // window end timestamp slot id
+ int32_t order; // current SSDataBlock scan order
+ STimeWindowAggSupp twAggSup;
+ SSDataBlock* pWinBlock; // window result
+ SSDataBlock* pDelRes; // delete result
+ SSDataBlock* pUpdateRes; // update window
+ bool returnUpdate;
+ SSHashObj* pStDeleted;
+ void* pDelIterator;
+ SArray* pChildren; // cache for children's result; final stream operator
+ SPhysiNode* pPhyNode; // create new child
+ bool isFinal;
+ bool ignoreExpiredData;
+ bool ignoreExpiredDataSaved;
+ SArray* pUpdated;
+ SSHashObj* pStUpdated;
+ int64_t dataVersion;
+} SStreamSessionAggOperatorInfo;
+
+typedef struct SStreamStateAggOperatorInfo {
+ SOptrBasicInfo binfo;
+ SStreamAggSupporter streamAggSup;
+ SExprSupp scalarSupp; // supporter for perform scalar function
+ SGroupResInfo groupResInfo;
+ int32_t primaryTsIndex; // primary timestamp slot id
+ STimeWindowAggSupp twAggSup;
+ SColumn stateCol;
+ SSDataBlock* pDelRes;
+ SSHashObj* pSeDeleted;
+ void* pDelIterator;
+ SArray* pChildren; // cache for children's result;
+ bool ignoreExpiredData;
+ bool ignoreExpiredDataSaved;
+ SArray* pUpdated;
+ SSHashObj* pSeUpdated;
+ int64_t dataVersion;
+} SStreamStateAggOperatorInfo;
+
+typedef struct SStreamPartitionOperatorInfo {
+ SOptrBasicInfo binfo;
+ SPartitionBySupporter partitionSup;
+ SExprSupp scalarSup;
+ SExprSupp tbnameCalSup;
+ SExprSupp tagCalSup;
+ SHashObj* pPartitions;
+ void* parIte;
+ void* pTbNameIte;
+ SSDataBlock* pInputDataBlock;
+ int32_t tsColIndex;
+ SSDataBlock* pDelRes;
+ SSDataBlock* pCreateTbRes;
+} SStreamPartitionOperatorInfo;
+
+typedef struct SStreamFillSupporter {
+ int32_t type; // fill type
+ SInterval interval;
+ SResultRowData prev;
+ SResultRowData cur;
+ SResultRowData next;
+ SResultRowData nextNext;
+ SFillColInfo* pAllColInfo; // fill exprs and not fill exprs
+ SExprSupp notFillExprSup;
+ int32_t numOfAllCols; // number of all exprs, including the tags columns
+ int32_t numOfFillCols;
+ int32_t numOfNotFillCols;
+ int32_t rowSize;
+ SSHashObj* pResMap;
+ bool hasDelete;
+} SStreamFillSupporter;
+
+typedef struct SStreamFillOperatorInfo {
+ SStreamFillSupporter* pFillSup;
+ SSDataBlock* pRes;
+ SSDataBlock* pSrcBlock;
+ int32_t srcRowIndex;
+ SSDataBlock* pSrcDelBlock;
+ int32_t srcDelRowIndex;
+ SSDataBlock* pDelRes;
+ SColMatchInfo matchInfo;
+ int32_t primaryTsCol;
+ int32_t primarySrcSlotId;
+ SStreamFillInfo* pFillInfo;
+} SStreamFillOperatorInfo;
+
+#define OPTR_IS_OPENED(_optr) (((_optr)->status & OP_OPENED) == OP_OPENED)
+#define OPTR_SET_OPENED(_optr) ((_optr)->status |= OP_OPENED)
+
+SSchemaWrapper* extractQueriedColumnSchema(SScanPhysiNode* pScanNode);
+int32_t initQueriedTableSchemaInfo(SReadHandle* pHandle, SScanPhysiNode* pScanNode, const char* dbName, SExecTaskInfo* pTaskInfo);
+void cleanupQueriedTableScanInfo(SSchemaInfo* pSchemaInfo);
+
+void initBasicInfo(SOptrBasicInfo* pInfo, SSDataBlock* pBlock);
+void cleanupBasicInfo(SOptrBasicInfo* pInfo);
+
+int32_t initExprSupp(SExprSupp* pSup, SExprInfo* pExprInfo, int32_t numOfExpr);
+void cleanupExprSupp(SExprSupp* pSup);
+
+void destroyExprInfo(SExprInfo* pExpr, int32_t numOfExprs);
+
+int32_t initAggSup(SExprSupp* pSup, SAggSupporter* pAggSup, SExprInfo* pExprInfo, int32_t numOfCols, size_t keyBufSize,
+ const char* pkey, void* pState);
+void cleanupAggSup(SAggSupporter* pAggSup);
+
+void initResultSizeInfo(SResultInfo* pResultInfo, int32_t numOfRows);
+
+void doBuildStreamResBlock(struct SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SGroupResInfo* pGroupResInfo,
+ SDiskbasedBuf* pBuf);
+void doBuildResultDatablock(struct SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SGroupResInfo* pGroupResInfo,
+ SDiskbasedBuf* pBuf);
+
+bool hasLimitOffsetInfo(SLimitInfo* pLimitInfo);
+bool hasSlimitOffsetInfo(SLimitInfo* pLimitInfo);
+void initLimitInfo(const SNode* pLimit, const SNode* pSLimit, SLimitInfo* pLimitInfo);
+void resetLimitInfoForNextGroup(SLimitInfo* pLimitInfo);
+bool applyLimitOffset(SLimitInfo* pLimitInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo);
+
+void applyAggFunctionOnPartialTuples(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, SColumnInfoData* pTimeWindowData,
+ int32_t offset, int32_t forwardStep, int32_t numOfTotal, int32_t numOfOutput);
+
+int32_t extractDataBlockFromFetchRsp(SSDataBlock* pRes, char* pData, SArray* pColList, char** pNextStart);
+void updateLoadRemoteInfo(SLoadRemoteDataInfo* pInfo, int64_t numOfRows, int32_t dataLen, int64_t startTs,
+ struct SOperatorInfo* pOperator);
+
+STimeWindow getFirstQualifiedTimeWindow(int64_t ts, STimeWindow* pWindow, SInterval* pInterval, int32_t order);
+int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaultBufsz);
+
+extern void doDestroyExchangeOperatorInfo(void* param);
+
+void doFilter(SSDataBlock* pBlock, SFilterInfo* pFilterInfo, SColMatchInfo* pColMatchInfo);
+int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int32_t numOfExpr, SSDataBlock* pBlock,
+ int32_t rows, const char* idStr, STableMetaCacheInfo* pCache);
+
+void appendOneRowToDataBlock(SSDataBlock* pBlock, STupleHandle* pTupleHandle);
+void setTbNameColData(const SSDataBlock* pBlock, SColumnInfoData* pColInfoData, int32_t functionId, const char* name);
+
+void setResultRowInitCtx(SResultRow* pResult, SqlFunctionCtx* pCtx, int32_t numOfOutput, int32_t* rowEntryInfoOffset);
+void clearResultRowInitFlag(SqlFunctionCtx* pCtx, int32_t numOfOutput);
+
+SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pResultRowInfo, char* pData,
+ int16_t bytes, bool masterscan, uint64_t groupId, SExecTaskInfo* pTaskInfo,
+ bool isIntervalQuery, SAggSupporter* pSup, bool keepGroup);
+
+int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBlock* pSrcBlock, SqlFunctionCtx* pCtx,
+ int32_t numOfOutput, SArray* pPseudoList);
+
+void setInputDataBlock(SExprSupp* pExprSupp, SSDataBlock* pBlock, int32_t order, int32_t scanFlag, bool createDummyCol);
+
+int32_t checkForQueryBuf(size_t numOfTables);
+
+int32_t createDataSinkParam(SDataSinkNode* pNode, void** pParam, SExecTaskInfo* pTask, SReadHandle* readHandle);
+
+STimeWindow getActiveTimeWindow(SDiskbasedBuf* pBuf, SResultRowInfo* pResultRowInfo, int64_t ts, SInterval* pInterval,
+ int32_t order);
+int32_t getNumOfRowsInTimeWindow(SDataBlockInfo* pDataBlockInfo, TSKEY* pPrimaryColumn, int32_t startPos, TSKEY ekey,
+ __block_search_fn_t searchFn, STableQueryInfo* item, int32_t order);
+int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order);
+SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int32_t* currentPageId, int32_t interBufSize);
+void getCurSessionWindow(SStreamAggSupporter* pAggSup, TSKEY startTs, TSKEY endTs, uint64_t groupId, SSessionKey* pKey);
+bool isInTimeWindow(STimeWindow* pWin, TSKEY ts, int64_t gap);
+bool functionNeedToExecute(SqlFunctionCtx* pCtx);
+bool isOverdue(TSKEY ts, STimeWindowAggSupp* pSup);
+bool isCloseWindow(STimeWindow* pWin, STimeWindowAggSupp* pSup);
+bool isDeletedStreamWindow(STimeWindow* pWin, uint64_t groupId, SStreamState* pState, STimeWindowAggSupp* pTwSup);
+void appendOneRowToStreamSpecialBlock(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, uint64_t* pUid,
+ uint64_t* pGp, void* pTbName);
+uint64_t calGroupIdByData(SPartitionBySupporter* pParSup, SExprSupp* pExprSup, SSDataBlock* pBlock, int32_t rowId);
+
+int32_t finalizeResultRows(SDiskbasedBuf* pBuf, SResultRowPosition* resultRowPosition, SExprSupp* pSup,
+ SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo);
+
+bool groupbyTbname(SNodeList* pGroupList);
+int32_t buildDataBlockFromGroupRes(struct SOperatorInfo* pOperator, SStreamState* pState, SSDataBlock* pBlock, SExprSupp* pSup,
+ SGroupResInfo* pGroupResInfo);
+int32_t saveSessionDiscBuf(SStreamState* pState, SSessionKey* key, void* buf, int32_t size);
+int32_t buildSessionResultDataBlock(struct SOperatorInfo* pOperator, SStreamState* pState, SSDataBlock* pBlock,
+ SExprSupp* pSup, SGroupResInfo* pGroupResInfo);
+int32_t setOutputBuf(SStreamState* pState, STimeWindow* win, SResultRow** pResult, int64_t tableGroupId,
+ SqlFunctionCtx* pCtx, int32_t numOfOutput, int32_t* rowEntryInfoOffset, SAggSupporter* pAggSup);
+int32_t releaseOutputBuf(SStreamState* pState, SWinKey* pKey, SResultRow* pResult);
+int32_t saveOutputBuf(SStreamState* pState, SWinKey* pKey, SResultRow* pResult, int32_t resSize);
+void getNextIntervalWindow(SInterval* pInterval, STimeWindow* tw, int32_t order);
+int32_t getForwardStepsInBlock(int32_t numOfRows, __block_search_fn_t searchFn, TSKEY ekey, int32_t pos, int32_t order,
+ int64_t* pData);
+void appendCreateTableRow(SStreamState* pState, SExprSupp* pTableSup, SExprSupp* pTagSup, uint64_t groupId,
+ SSDataBlock* pSrcBlock, int32_t rowId, SSDataBlock* pDestBlock);
+
+SSDataBlock* buildCreateTableBlock(SExprSupp* tbName, SExprSupp* tag);
+SExprInfo* createExpr(SNodeList* pNodeList, int32_t* numOfExprs);
+
+void copyResultrowToDataBlock(SExprInfo* pExprInfo, int32_t numOfExprs, SResultRow* pRow, SqlFunctionCtx* pCtx,
+ SSDataBlock* pBlock, const int32_t* rowEntryOffset, SExecTaskInfo* pTaskInfo);
+void doUpdateNumOfRows(SqlFunctionCtx* pCtx, SResultRow* pRow, int32_t numOfExprs, const int32_t* rowEntryOffset);
+void doClearBufferedBlocks(SStreamScanInfo* pInfo);
+
uint64_t calcGroupId(char* pData, int32_t len);
+
#ifdef __cplusplus
}
#endif
-#endif /*_TD_EXECUTOR_INT_H*/
\ No newline at end of file
+#endif // TDENGINE_EXECUTORINT_H
diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h
deleted file mode 100644
index 85424fd7de..0000000000
--- a/source/libs/executor/inc/executorimpl.h
+++ /dev/null
@@ -1,885 +0,0 @@
-/*
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-#ifndef TDENGINE_EXECUTORIMPL_H
-#define TDENGINE_EXECUTORIMPL_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "os.h"
-#include "tcommon.h"
-#include "tlosertree.h"
-#include "tsort.h"
-#include "ttszip.h"
-#include "tvariant.h"
-
-#include "dataSinkMgt.h"
-#include "executil.h"
-#include "executor.h"
-#include "planner.h"
-#include "scalar.h"
-#include "taosdef.h"
-#include "tarray.h"
-#include "tfill.h"
-#include "thash.h"
-#include "tlockfree.h"
-#include "tmsg.h"
-#include "tpagedbuf.h"
-#include "tstream.h"
-#include "tstreamUpdate.h"
-
-#include "executorInt.h"
-#include "vnode.h"
-
-typedef int32_t (*__block_search_fn_t)(char* data, int32_t num, int64_t key, int32_t order);
-
-#define IS_VALID_SESSION_WIN(winInfo) ((winInfo).sessionWin.win.skey > 0)
-#define SET_SESSION_WIN_INVALID(winInfo) ((winInfo).sessionWin.win.skey = INT64_MIN)
-#define IS_INVALID_SESSION_WIN_KEY(winKey) ((winKey).win.skey <= 0)
-#define SET_SESSION_WIN_KEY_INVALID(pWinKey) ((pWinKey)->win.skey = INT64_MIN)
-
-enum {
- // when this task starts to execute, this status will set
- TASK_NOT_COMPLETED = 0x1u,
-
- /* Task is over
- * 1. this status is used in one row result query process, e.g., count/sum/first/last/ avg...etc.
- * 2. when all data within queried time window, it is also denoted as query_completed
- */
- TASK_COMPLETED = 0x2u,
-};
-
-/**
- * If the number of generated results is greater than this value,
- * query query will be halt and return results to client immediate.
- */
-typedef struct SResultInfo { // TODO refactor
- int64_t totalRows; // total generated result size in rows
- int64_t totalBytes; // total results in bytes.
- int32_t capacity; // capacity of current result output buffer
- int32_t threshold; // result size threshold in rows.
-} SResultInfo;
-
-typedef struct STableQueryInfo {
- TSKEY lastKey; // last check ts, todo remove it later
- SResultRowPosition pos; // current active time window
-} STableQueryInfo;
-
-typedef struct SLimit {
- int64_t limit;
- int64_t offset;
-} SLimit;
-
-typedef struct STableScanAnalyzeInfo SFileBlockLoadRecorder;
-
-typedef struct STaskCostInfo {
- int64_t created;
- int64_t start;
- uint64_t elapsedTime;
- double extractListTime;
- double groupIdMapTime;
- SFileBlockLoadRecorder* pRecoder;
-} STaskCostInfo;
-
-typedef struct SOperatorCostInfo {
- double openCost;
- double totalCost;
-} SOperatorCostInfo;
-
-struct SOperatorInfo;
-
-typedef int32_t (*__optr_encode_fn_t)(struct SOperatorInfo* pOperator, char** result, int32_t* length);
-typedef int32_t (*__optr_decode_fn_t)(struct SOperatorInfo* pOperator, char* result);
-
-typedef int32_t (*__optr_open_fn_t)(struct SOperatorInfo* pOptr);
-typedef SSDataBlock* (*__optr_fn_t)(struct SOperatorInfo* pOptr);
-typedef void (*__optr_close_fn_t)(void* param);
-typedef int32_t (*__optr_explain_fn_t)(struct SOperatorInfo* pOptr, void** pOptrExplain, uint32_t* len);
-typedef int32_t (*__optr_reqBuf_fn_t)(struct SOperatorInfo* pOptr);
-
-typedef struct STaskIdInfo {
- uint64_t queryId; // this is also a request id
- uint64_t subplanId;
- uint64_t templateId;
- char* str;
- int32_t vgId;
-} STaskIdInfo;
-
-enum {
- STREAM_RECOVER_STEP__NONE = 0,
- STREAM_RECOVER_STEP__PREPARE1,
- STREAM_RECOVER_STEP__PREPARE2,
- STREAM_RECOVER_STEP__SCAN1,
- STREAM_RECOVER_STEP__SCAN2,
-};
-
-typedef struct {
- STqOffsetVal currentOffset; // for tmq
- SMqMetaRsp metaRsp; // for tmq fetching meta
- int64_t snapshotVer;
- SPackedData submit;
- SSchemaWrapper* schema;
- char tbName[TSDB_TABLE_NAME_LEN];
- int8_t recoverStep;
- int8_t recoverScanFinished;
- SQueryTableDataCond tableCond;
- int64_t fillHistoryVer1;
- int64_t fillHistoryVer2;
- SStreamState* pState;
- int64_t dataVersion;
- int64_t checkPointId;
-} SStreamTaskInfo;
-
-typedef struct {
- char* tablename;
- char* dbname;
- int32_t tversion;
- SSchemaWrapper* sw;
- SSchemaWrapper* qsw;
-} SSchemaInfo;
-
-typedef struct SExchangeOpStopInfo {
- int32_t operatorType;
- int64_t refId;
-} SExchangeOpStopInfo;
-
-typedef struct STaskStopInfo {
- SRWLatch lock;
- SArray* pStopInfo;
-} STaskStopInfo;
-
-struct SExecTaskInfo {
- STaskIdInfo id;
- uint32_t status;
- STimeWindow window;
- STaskCostInfo cost;
- int64_t owner; // if it is in execution
- int32_t code;
- int32_t qbufQuota; // total available buffer (in KB) during execution query
- int64_t version; // used for stream to record wal version, why not move to sschemainfo
- SStreamTaskInfo streamInfo;
- SSchemaInfo schemaInfo;
- const char* sql; // query sql string
- jmp_buf env; // jump to this position when error happens.
- EOPTR_EXEC_MODEL execModel; // operator execution model [batch model|stream model]
- SSubplan* pSubplan;
- struct SOperatorInfo* pRoot;
- SLocalFetch localFetch;
- SArray* pResultBlockList; // result block list
- STaskStopInfo stopInfo;
- SRWLatch lock; // secure the access of STableListInfo
-};
-
-enum {
- OP_NOT_OPENED = 0x0,
- OP_OPENED = 0x1,
- OP_RES_TO_RETURN = 0x5,
- OP_EXEC_DONE = 0x9,
-};
-
-typedef struct SOperatorFpSet {
- __optr_open_fn_t _openFn; // DO NOT invoke this function directly
- __optr_fn_t getNextFn;
- __optr_fn_t cleanupFn; // call this function to release the allocated resources ASAP
- __optr_close_fn_t closeFn;
- __optr_reqBuf_fn_t reqBufFn; // total used buffer for blocking operator
- __optr_encode_fn_t encodeResultRow;
- __optr_decode_fn_t decodeResultRow;
- __optr_explain_fn_t getExplainFn;
-} SOperatorFpSet;
-
-typedef struct SExprSupp {
- SExprInfo* pExprInfo;
- int32_t numOfExprs; // the number of scalar expression in group operator
- SqlFunctionCtx* pCtx;
- int32_t* rowEntryInfoOffset; // offset value for each row result cell info
- SFilterInfo* pFilterInfo;
-} SExprSupp;
-
-typedef struct SOperatorInfo {
- uint16_t operatorType;
- int16_t resultDataBlockId;
- bool blocking; // block operator or not
- uint8_t status; // denote if current operator is completed
- char* name; // name, for debug purpose
- void* info; // extension attribution
- SExprSupp exprSupp;
- SExecTaskInfo* pTaskInfo;
- SOperatorCostInfo cost;
- SResultInfo resultInfo;
- struct SOperatorInfo** pDownstream; // downstram pointer list
- int32_t numOfDownstream; // number of downstream. The value is always ONE expect for join operator
- SOperatorFpSet fpSet;
-} SOperatorInfo;
-
-typedef enum {
- EX_SOURCE_DATA_NOT_READY = 0x1,
- EX_SOURCE_DATA_READY = 0x2,
- EX_SOURCE_DATA_EXHAUSTED = 0x3,
-} EX_SOURCE_STATUS;
-
-#define COL_MATCH_FROM_COL_ID 0x1
-#define COL_MATCH_FROM_SLOT_ID 0x2
-
-typedef struct SLoadRemoteDataInfo {
- uint64_t totalSize; // total load bytes from remote
- uint64_t totalRows; // total number of rows
- uint64_t totalElapsed; // total elapsed time
-} SLoadRemoteDataInfo;
-
-typedef struct SLimitInfo {
- SLimit limit;
- SLimit slimit;
- uint64_t currentGroupId;
- int64_t remainGroupOffset;
- int64_t numOfOutputGroups;
- int64_t remainOffset;
- int64_t numOfOutputRows;
-} SLimitInfo;
-
-typedef struct SExchangeInfo {
- SArray* pSources;
- SArray* pSourceDataInfo;
- tsem_t ready;
- void* pTransporter;
-
- // SArray, result block list, used to keep the multi-block that
- // passed by downstream operator
- SArray* pResultBlockList;
- SArray* pRecycledBlocks; // build a pool for small data block to avoid to repeatly create and then destroy.
- SSDataBlock* pDummyBlock; // dummy block, not keep data
- bool seqLoadData; // sequential load data or not, false by default
- int32_t current;
- SLoadRemoteDataInfo loadInfo;
- uint64_t self;
- SLimitInfo limitInfo;
- int64_t openedTs; // start exec time stamp, todo: move to SLoadRemoteDataInfo
-} SExchangeInfo;
-
-typedef struct SScanInfo {
- int32_t numOfAsc;
- int32_t numOfDesc;
-} SScanInfo;
-
-typedef struct SSampleExecInfo {
- double sampleRatio; // data block sample ratio, 1 by default
- uint32_t seed; // random seed value
-} SSampleExecInfo;
-
-enum {
- TABLE_SCAN__TABLE_ORDER = 1,
- TABLE_SCAN__BLOCK_ORDER = 2,
-};
-
-typedef struct SAggSupporter {
- SSHashObj* pResultRowHashTable; // quick locate the window object for each result
- char* keyBuf; // window key buffer
- SDiskbasedBuf* pResultBuf; // query result buffer based on blocked-wised disk file
- int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row
- int32_t currentPageId; // current write page id
-} SAggSupporter;
-
-typedef struct {
- // if the upstream is an interval operator, the interval info is also kept here to get the time window to check if
- // current data block needs to be loaded.
- SInterval interval;
- SAggSupporter* pAggSup;
- SExprSupp* pExprSup; // expr supporter of aggregate operator
-} SAggOptrPushDownInfo;
-
-typedef struct STableMetaCacheInfo {
- SLRUCache* pTableMetaEntryCache; // 100 by default
- uint64_t metaFetch;
- uint64_t cacheHit;
-} STableMetaCacheInfo;
-
-typedef struct STableScanBase {
- STsdbReader* dataReader;
- SFileBlockLoadRecorder readRecorder;
- SQueryTableDataCond cond;
- SAggOptrPushDownInfo pdInfo;
- SColMatchInfo matchInfo;
- SReadHandle readHandle;
- SExprSupp pseudoSup;
- STableMetaCacheInfo metaCache;
- int32_t scanFlag; // table scan flag to denote if it is a repeat/reverse/main scan
- int32_t dataBlockLoadFlag;
- SLimitInfo limitInfo;
- // there are more than one table list exists in one task, if only one vnode exists.
- STableListInfo* pTableListInfo;
-} STableScanBase;
-
-typedef struct STableScanInfo {
- STableScanBase base;
- SScanInfo scanInfo;
- int32_t scanTimes;
- SSDataBlock* pResBlock;
- SSampleExecInfo sample; // sample execution info
- int32_t currentGroupId;
- int32_t currentTable;
- int8_t scanMode;
- int8_t assignBlockUid;
- bool hasGroupByTag;
- bool countOnly;
-} STableScanInfo;
-
-typedef struct STableMergeScanInfo {
- int32_t tableStartIndex;
- int32_t tableEndIndex;
- bool hasGroupId;
- uint64_t groupId;
- SArray* queryConds; // array of queryTableDataCond
- STableScanBase base;
- int32_t bufPageSize;
- uint32_t sortBufSize; // max buffer size for in-memory sort
- SArray* pSortInfo;
- SSortHandle* pSortHandle;
- SSDataBlock* pSortInputBlock;
- int64_t startTs; // sort start time
- SArray* sortSourceParams;
- SLimitInfo limitInfo;
- int64_t numOfRows;
- SScanInfo scanInfo;
- int32_t scanTimes;
- SSDataBlock* pResBlock;
- SSampleExecInfo sample; // sample execution info
- SSortExecInfo sortExecInfo;
-} STableMergeScanInfo;
-
-typedef struct STagScanInfo {
- SColumnInfo* pCols;
- SSDataBlock* pRes;
- SColMatchInfo matchInfo;
- int32_t curPos;
- SReadHandle readHandle;
- STableListInfo* pTableListInfo;
-} STagScanInfo;
-
-typedef enum EStreamScanMode {
- STREAM_SCAN_FROM_READERHANDLE = 1,
- STREAM_SCAN_FROM_RES,
- STREAM_SCAN_FROM_UPDATERES,
- STREAM_SCAN_FROM_DELETE_DATA,
- STREAM_SCAN_FROM_DATAREADER_RETRIEVE,
- STREAM_SCAN_FROM_DATAREADER_RANGE,
-} EStreamScanMode;
-
-enum {
- PROJECT_RETRIEVE_CONTINUE = 0x1,
- PROJECT_RETRIEVE_DONE = 0x2,
-};
-
-typedef struct SStreamAggSupporter {
- int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row
- SSDataBlock* pScanBlock;
- SStreamState* pState;
- int64_t gap; // stream session window gap
- SqlFunctionCtx* pDummyCtx; // for combine
- SSHashObj* pResultRows;
- int32_t stateKeySize;
- int16_t stateKeyType;
- SDiskbasedBuf* pResultBuf;
-} SStreamAggSupporter;
-
-typedef struct SWindowSupporter {
- SStreamAggSupporter* pStreamAggSup;
- int64_t gap;
- uint16_t parentType;
- SAggSupporter* pIntervalAggSup;
-} SWindowSupporter;
-
-typedef struct SPartitionBySupporter {
- SArray* pGroupCols; // group by columns, SArray
- SArray* pGroupColVals; // current group column values, SArray
- char* keyBuf; // group by keys for hash
- bool needCalc; // partition by column
-} SPartitionBySupporter;
-
-typedef struct SPartitionDataInfo {
- uint64_t groupId;
- char* tbname;
- SArray* tags;
- SArray* rowIds;
-} SPartitionDataInfo;
-
-typedef struct STimeWindowAggSupp {
- int8_t calTrigger;
- int8_t calTriggerSaved;
- int64_t deleteMark;
- int64_t deleteMarkSaved;
- int64_t waterMark;
- TSKEY maxTs;
- TSKEY minTs;
- SColumnInfoData timeWindowData; // query time window info for scalar function execution.
-} STimeWindowAggSupp;
-
-typedef struct SStreamScanInfo {
- SExprInfo* pPseudoExpr;
- int32_t numOfPseudoExpr;
- SExprSupp tbnameCalSup;
- SExprSupp tagCalSup;
- int32_t primaryTsIndex; // primary time stamp slot id
- SReadHandle readHandle;
- SInterval interval; // if the upstream is an interval operator, the interval info is also kept here.
- SColMatchInfo matchInfo;
-
- SArray* pBlockLists; // multiple SSDatablock.
- SSDataBlock* pRes; // result SSDataBlock
- SSDataBlock* pUpdateRes; // update SSDataBlock
- int32_t updateResIndex;
- int32_t blockType; // current block type
- int32_t validBlockIndex; // Is current data has returned?
- uint64_t numOfExec; // execution times
- STqReader* tqReader;
-
- uint64_t groupId;
- SUpdateInfo* pUpdateInfo;
-
- EStreamScanMode scanMode;
- SOperatorInfo* pStreamScanOp;
- SOperatorInfo* pTableScanOp;
- SArray* childIds;
- SWindowSupporter windowSup;
- SPartitionBySupporter partitionSup;
- SExprSupp* pPartScalarSup;
- bool assignBlockUid; // assign block uid to groupId, temporarily used for generating rollup SMA.
- int32_t scanWinIndex; // for state operator
- int32_t pullDataResIndex;
- SSDataBlock* pPullDataRes; // pull data SSDataBlock
- SSDataBlock* pDeleteDataRes; // delete data SSDataBlock
- int32_t deleteDataIndex;
- STimeWindow updateWin;
- STimeWindowAggSupp twAggSup;
- SSDataBlock* pUpdateDataRes;
- // status for tmq
- SNodeList* pGroupTags;
- SNode* pTagCond;
- SNode* pTagIndexCond;
-
- // recover
- int32_t blockRecoverContiCnt;
- int32_t blockRecoverTotCnt;
- SSDataBlock* pRecoverRes;
-
- SSDataBlock* pCreateTbRes;
- int8_t igCheckUpdate;
- int8_t igExpired;
-} SStreamScanInfo;
-
-typedef struct {
- SVnode* vnode;
- SSDataBlock pRes; // result SSDataBlock
- STsdbReader* dataReader;
- SSnapContext* sContext;
- STableListInfo* pTableListInfo;
-} SStreamRawScanInfo;
-
-typedef struct STableCountScanSupp {
- int16_t dbNameSlotId;
- int16_t stbNameSlotId;
- int16_t tbCountSlotId;
- bool groupByDbName;
- bool groupByStbName;
- char dbNameFilter[TSDB_DB_NAME_LEN];
- char stbNameFilter[TSDB_TABLE_NAME_LEN];
-} STableCountScanSupp;
-
-typedef struct SOptrBasicInfo {
- SResultRowInfo resultRowInfo;
- SSDataBlock* pRes;
- bool mergeResultBlock;
-} SOptrBasicInfo;
-
-typedef struct SIntervalAggOperatorInfo {
- SOptrBasicInfo binfo; // basic info
- SAggSupporter aggSup; // aggregate supporter
- SExprSupp scalarSupp; // supporter for perform scalar function
- SGroupResInfo groupResInfo; // multiple results build supporter
- SInterval interval; // interval info
- int32_t primaryTsIndex; // primary time stamp slot id from result of downstream operator.
- STimeWindow win; // query time range
- bool timeWindowInterpo; // interpolation needed or not
- SArray* pInterpCols; // interpolation columns
- int32_t resultTsOrder; // result timestamp order
- int32_t inputOrder; // input data ts order
- EOPTR_EXEC_MODEL execModel; // operator execution model [batch model|stream model]
- STimeWindowAggSupp twAggSup;
- SArray* pPrevValues; // SArray used to keep the previous not null value for interpolation.
-} SIntervalAggOperatorInfo;
-
-typedef struct SMergeAlignedIntervalAggOperatorInfo {
- SIntervalAggOperatorInfo* intervalAggOperatorInfo;
-
- uint64_t groupId; // current groupId
- int64_t curTs; // current ts
- SSDataBlock* prefetchedBlock;
- SResultRow* pResultRow;
-} SMergeAlignedIntervalAggOperatorInfo;
-
-typedef struct SStreamIntervalOperatorInfo {
- SOptrBasicInfo binfo; // basic info
- SAggSupporter aggSup; // aggregate supporter
- SExprSupp scalarSupp; // supporter for perform scalar function
- SGroupResInfo groupResInfo; // multiple results build supporter
- SInterval interval; // interval info
- int32_t primaryTsIndex; // primary time stamp slot id from result of downstream operator.
- STimeWindowAggSupp twAggSup;
- bool invertible;
- bool ignoreExpiredData;
- bool ignoreExpiredDataSaved;
- SArray* pDelWins; // SWinRes
- int32_t delIndex;
- SSDataBlock* pDelRes;
- SPhysiNode* pPhyNode; // create new child
- SHashObj* pPullDataMap;
- SArray* pPullWins; // SPullWindowInfo
- int32_t pullIndex;
- SSDataBlock* pPullDataRes;
- bool isFinal;
- SArray* pChildren;
- SStreamState* pState;
- SWinKey delKey;
- uint64_t numOfDatapack;
- SArray* pUpdated;
- SSHashObj* pUpdatedMap;
- int64_t dataVersion;
-} SStreamIntervalOperatorInfo;
-
-typedef struct SDataGroupInfo {
- uint64_t groupId;
- int64_t numOfRows;
- SArray* pPageList;
-} SDataGroupInfo;
-
-typedef struct SWindowRowsSup {
- STimeWindow win;
- TSKEY prevTs;
- int32_t startRowIndex;
- int32_t numOfRows;
- uint64_t groupId;
-} SWindowRowsSup;
-
-typedef struct SResultWindowInfo {
- void* pOutputBuf;
- SSessionKey sessionWin;
- bool isOutput;
-} SResultWindowInfo;
-
-typedef struct SStateWindowInfo {
- SResultWindowInfo winInfo;
- SStateKeys* pStateKey;
-} SStateWindowInfo;
-
-typedef struct SStreamSessionAggOperatorInfo {
- SOptrBasicInfo binfo;
- SStreamAggSupporter streamAggSup;
- SExprSupp scalarSupp; // supporter for perform scalar function
- SGroupResInfo groupResInfo;
- int32_t primaryTsIndex; // primary timestamp slot id
- int32_t endTsIndex; // window end timestamp slot id
- int32_t order; // current SSDataBlock scan order
- STimeWindowAggSupp twAggSup;
- SSDataBlock* pWinBlock; // window result
- SSDataBlock* pDelRes; // delete result
- SSDataBlock* pUpdateRes; // update window
- bool returnUpdate;
- SSHashObj* pStDeleted;
- void* pDelIterator;
- SArray* pChildren; // cache for children's result; final stream operator
- SPhysiNode* pPhyNode; // create new child
- bool isFinal;
- bool ignoreExpiredData;
- bool ignoreExpiredDataSaved;
- SArray* pUpdated;
- SSHashObj* pStUpdated;
- int64_t dataVersion;
-} SStreamSessionAggOperatorInfo;
-
-typedef struct SStreamStateAggOperatorInfo {
- SOptrBasicInfo binfo;
- SStreamAggSupporter streamAggSup;
- SExprSupp scalarSupp; // supporter for perform scalar function
- SGroupResInfo groupResInfo;
- int32_t primaryTsIndex; // primary timestamp slot id
- STimeWindowAggSupp twAggSup;
- SColumn stateCol;
- SSDataBlock* pDelRes;
- SSHashObj* pSeDeleted;
- void* pDelIterator;
- SArray* pChildren; // cache for children's result;
- bool ignoreExpiredData;
- bool ignoreExpiredDataSaved;
- SArray* pUpdated;
- SSHashObj* pSeUpdated;
- int64_t dataVersion;
-} SStreamStateAggOperatorInfo;
-
-typedef struct SStreamPartitionOperatorInfo {
- SOptrBasicInfo binfo;
- SPartitionBySupporter partitionSup;
- SExprSupp scalarSup;
- SExprSupp tbnameCalSup;
- SExprSupp tagCalSup;
- SHashObj* pPartitions;
- void* parIte;
- void* pTbNameIte;
- SSDataBlock* pInputDataBlock;
- int32_t tsColIndex;
- SSDataBlock* pDelRes;
- SSDataBlock* pCreateTbRes;
-} SStreamPartitionOperatorInfo;
-
-typedef struct SStreamFillSupporter {
- int32_t type; // fill type
- SInterval interval;
- SResultRowData prev;
- SResultRowData cur;
- SResultRowData next;
- SResultRowData nextNext;
- SFillColInfo* pAllColInfo; // fill exprs and not fill exprs
- SExprSupp notFillExprSup;
- int32_t numOfAllCols; // number of all exprs, including the tags columns
- int32_t numOfFillCols;
- int32_t numOfNotFillCols;
- int32_t rowSize;
- SSHashObj* pResMap;
- bool hasDelete;
-} SStreamFillSupporter;
-
-typedef struct SStreamFillOperatorInfo {
- SStreamFillSupporter* pFillSup;
- SSDataBlock* pRes;
- SSDataBlock* pSrcBlock;
- int32_t srcRowIndex;
- SSDataBlock* pSrcDelBlock;
- int32_t srcDelRowIndex;
- SSDataBlock* pDelRes;
- SColMatchInfo matchInfo;
- int32_t primaryTsCol;
- int32_t primarySrcSlotId;
- SStreamFillInfo* pFillInfo;
-} SStreamFillOperatorInfo;
-
-#define OPTR_IS_OPENED(_optr) (((_optr)->status & OP_OPENED) == OP_OPENED)
-#define OPTR_SET_OPENED(_optr) ((_optr)->status |= OP_OPENED)
-
-SExecTaskInfo* doCreateExecTaskInfo(uint64_t queryId, uint64_t taskId, int32_t vgId, EOPTR_EXEC_MODEL model,
- char* dbFName);
-
-SOperatorFpSet createOperatorFpSet(__optr_open_fn_t openFn, __optr_fn_t nextFn, __optr_fn_t cleanup,
- __optr_close_fn_t closeFn, __optr_reqBuf_fn_t reqBufFn, __optr_explain_fn_t explain);
-int32_t optrDummyOpenFn(SOperatorInfo* pOperator);
-int32_t appendDownstream(SOperatorInfo* p, SOperatorInfo** pDownstream, int32_t num);
-void setOperatorCompleted(SOperatorInfo* pOperator);
-void setOperatorInfo(SOperatorInfo* pOperator, const char* name, int32_t type, bool blocking, int32_t status,
- void* pInfo, SExecTaskInfo* pTaskInfo);
-void destroyOperatorInfo(SOperatorInfo* pOperator);
-int32_t optrDefaultBufFn(SOperatorInfo* pOperator);
-
-void initBasicInfo(SOptrBasicInfo* pInfo, SSDataBlock* pBlock);
-void cleanupBasicInfo(SOptrBasicInfo* pInfo);
-
-int32_t initExprSupp(SExprSupp* pSup, SExprInfo* pExprInfo, int32_t numOfExpr);
-void cleanupExprSupp(SExprSupp* pSup);
-
-void destroyExprInfo(SExprInfo* pExpr, int32_t numOfExprs);
-
-int32_t initAggSup(SExprSupp* pSup, SAggSupporter* pAggSup, SExprInfo* pExprInfo, int32_t numOfCols, size_t keyBufSize,
- const char* pkey, void* pState);
-void cleanupAggSup(SAggSupporter* pAggSup);
-
-void initResultSizeInfo(SResultInfo* pResultInfo, int32_t numOfRows);
-
-void doBuildStreamResBlock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SGroupResInfo* pGroupResInfo,
- SDiskbasedBuf* pBuf);
-void doBuildResultDatablock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SGroupResInfo* pGroupResInfo,
- SDiskbasedBuf* pBuf);
-
-bool hasLimitOffsetInfo(SLimitInfo* pLimitInfo);
-bool hasSlimitOffsetInfo(SLimitInfo* pLimitInfo);
-void initLimitInfo(const SNode* pLimit, const SNode* pSLimit, SLimitInfo* pLimitInfo);
-void resetLimitInfoForNextGroup(SLimitInfo* pLimitInfo);
-bool applyLimitOffset(SLimitInfo* pLimitInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo);
-
-void applyAggFunctionOnPartialTuples(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, SColumnInfoData* pTimeWindowData,
- int32_t offset, int32_t forwardStep, int32_t numOfTotal, int32_t numOfOutput);
-
-int32_t extractDataBlockFromFetchRsp(SSDataBlock* pRes, char* pData, SArray* pColList, char** pNextStart);
-void updateLoadRemoteInfo(SLoadRemoteDataInfo* pInfo, int64_t numOfRows, int32_t dataLen, int64_t startTs,
- SOperatorInfo* pOperator);
-
-STimeWindow getFirstQualifiedTimeWindow(int64_t ts, STimeWindow* pWindow, SInterval* pInterval, int32_t order);
-
-SOperatorInfo* extractOperatorInTree(SOperatorInfo* pOperator, int32_t type, const char* id);
-int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scanFlag, bool inheritUsOrder);
-int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaultBufsz);
-
-extern void doDestroyExchangeOperatorInfo(void* param);
-
-void doFilter(SSDataBlock* pBlock, SFilterInfo* pFilterInfo, SColMatchInfo* pColMatchInfo);
-int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int32_t numOfExpr, SSDataBlock* pBlock,
- int32_t rows, const char* idStr, STableMetaCacheInfo* pCache);
-
-void appendOneRowToDataBlock(SSDataBlock* pBlock, STupleHandle* pTupleHandle);
-void setTbNameColData(const SSDataBlock* pBlock, SColumnInfoData* pColInfoData, int32_t functionId, const char* name);
-
-void setResultRowInitCtx(SResultRow* pResult, SqlFunctionCtx* pCtx, int32_t numOfOutput, int32_t* rowEntryInfoOffset);
-void clearResultRowInitFlag(SqlFunctionCtx* pCtx, int32_t numOfOutput);
-
-SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pResultRowInfo, char* pData,
- int16_t bytes, bool masterscan, uint64_t groupId, SExecTaskInfo* pTaskInfo,
- bool isIntervalQuery, SAggSupporter* pSup, bool keepGroup);
-// operator creater functions
-// clang-format off
-SOperatorInfo* createExchangeOperatorInfo(void* pTransporter, SExchangePhysiNode* pExNode, SExecTaskInfo* pTaskInfo);
-
-SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle, STableListInfo* pTableList, SExecTaskInfo* pTaskInfo);
-
-SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SReadHandle* readHandle, STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo);
-
-SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* pPhyNode, STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo);
-
-SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScanPhysiNode* pScanPhyNode, const char* pUser, SExecTaskInfo* pTaskInfo);
-
-SOperatorInfo* createTableCountScanOperatorInfo(SReadHandle* handle, STableCountScanPhysiNode* pNode, SExecTaskInfo* pTaskInfo);
-
-SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SAggPhysiNode* pNode, SExecTaskInfo* pTaskInfo);
-
-SOperatorInfo* createIndefinitOutputOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pNode, SExecTaskInfo* pTaskInfo);
-
-SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhysiNode* pProjPhyNode, SExecTaskInfo* pTaskInfo);
-
-SOperatorInfo* createSortOperatorInfo(SOperatorInfo* downstream, SSortPhysiNode* pSortNode, SExecTaskInfo* pTaskInfo);
-
-SOperatorInfo* createMultiwayMergeOperatorInfo(SOperatorInfo** dowStreams, size_t numStreams, SMergePhysiNode* pMergePhysiNode, SExecTaskInfo* pTaskInfo);
-
-SOperatorInfo* createCacherowsScanOperator(SLastRowScanPhysiNode* pTableScanNode, SReadHandle* readHandle, STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo);
-
-SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SIntervalPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo);
-
-SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SMergeIntervalPhysiNode* pIntervalPhyNode, SExecTaskInfo* pTaskInfo);
-
-SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream, SMergeAlignedIntervalPhysiNode* pNode, SExecTaskInfo* pTaskInfo);
-
-SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, int32_t numOfChild);
-
-SOperatorInfo* createSessionAggOperatorInfo(SOperatorInfo* downstream, SSessionWinodwPhysiNode* pSessionNode, SExecTaskInfo* pTaskInfo);
-
-SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SAggPhysiNode* pAggNode, SExecTaskInfo* pTaskInfo);
-
-SOperatorInfo* createDataBlockInfoScanOperator(SReadHandle* readHandle, SBlockDistScanPhysiNode* pBlockScanNode, STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo);
-
-SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhysiNode* pTableScanNode, SNode* pTagCond, STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo);
-
-SOperatorInfo* createRawScanOperatorInfo(SReadHandle* pHandle, SExecTaskInfo* pTaskInfo);
-
-SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* pPhyFillNode, SExecTaskInfo* pTaskInfo);
-
-SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SStateWinodwPhysiNode* pStateNode, SExecTaskInfo* pTaskInfo);
-
-SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartitionPhysiNode* pPartNode, SExecTaskInfo* pTaskInfo);
-
-SOperatorInfo* createStreamPartitionOperatorInfo(SOperatorInfo* downstream, SStreamPartitionPhysiNode* pPartNode, SExecTaskInfo* pTaskInfo);
-
-SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pNode, SExecTaskInfo* pTaskInfo);
-
-SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream, SSortMergeJoinPhysiNode* pJoinNode, SExecTaskInfo* pTaskInfo);
-
-SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo);
-
-SOperatorInfo* createStreamFinalSessionAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, int32_t numOfChild);
-
-SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo);
-
-SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo);
-
-SOperatorInfo* createStreamFillOperatorInfo(SOperatorInfo* downstream, SStreamFillPhysiNode* pPhyFillNode, SExecTaskInfo* pTaskInfo);
-
-SOperatorInfo* createGroupSortOperatorInfo(SOperatorInfo* downstream, SGroupSortPhysiNode* pSortPhyNode, SExecTaskInfo* pTaskInfo);
-
-SOperatorInfo* createEventwindowOperatorInfo(SOperatorInfo* downstream, SPhysiNode* physiNode, SExecTaskInfo* pTaskInfo);
-// clang-format on
-
-int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBlock* pSrcBlock, SqlFunctionCtx* pCtx,
- int32_t numOfOutput, SArray* pPseudoList);
-
-void setInputDataBlock(SExprSupp* pExprSupp, SSDataBlock* pBlock, int32_t order, int32_t scanFlag, bool createDummyCol);
-
-int32_t checkForQueryBuf(size_t numOfTables);
-
-bool isTaskKilled(SExecTaskInfo* pTaskInfo);
-void setTaskKilled(SExecTaskInfo* pTaskInfo, int32_t rspCode);
-void doDestroyTask(SExecTaskInfo* pTaskInfo);
-void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status);
-
-void buildTaskId(uint64_t taskId, uint64_t queryId, char* dst);
-
-SArray* getTableListInfo(const SExecTaskInfo* pTaskInfo);
-
-int32_t createExecTaskInfo(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId,
- int32_t vgId, char* sql, EOPTR_EXEC_MODEL model);
-int32_t createDataSinkParam(SDataSinkNode* pNode, void** pParam, SExecTaskInfo* pTask, SReadHandle* readHandle);
-int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SArray* pExecInfoList);
-
-STimeWindow getActiveTimeWindow(SDiskbasedBuf* pBuf, SResultRowInfo* pResultRowInfo, int64_t ts, SInterval* pInterval,
- int32_t order);
-int32_t getNumOfRowsInTimeWindow(SDataBlockInfo* pDataBlockInfo, TSKEY* pPrimaryColumn, int32_t startPos, TSKEY ekey,
- __block_search_fn_t searchFn, STableQueryInfo* item, int32_t order);
-int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order);
-SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int32_t* currentPageId, int32_t interBufSize);
-void getCurSessionWindow(SStreamAggSupporter* pAggSup, TSKEY startTs, TSKEY endTs, uint64_t groupId, SSessionKey* pKey);
-bool isInTimeWindow(STimeWindow* pWin, TSKEY ts, int64_t gap);
-bool functionNeedToExecute(SqlFunctionCtx* pCtx);
-bool isOverdue(TSKEY ts, STimeWindowAggSupp* pSup);
-bool isCloseWindow(STimeWindow* pWin, STimeWindowAggSupp* pSup);
-bool isDeletedStreamWindow(STimeWindow* pWin, uint64_t groupId, SStreamState* pState, STimeWindowAggSupp* pTwSup);
-void appendOneRowToStreamSpecialBlock(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, uint64_t* pUid,
- uint64_t* pGp, void* pTbName);
-uint64_t calGroupIdByData(SPartitionBySupporter* pParSup, SExprSupp* pExprSup, SSDataBlock* pBlock, int32_t rowId);
-
-int32_t finalizeResultRows(SDiskbasedBuf* pBuf, SResultRowPosition* resultRowPosition, SExprSupp* pSup,
- SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo);
-
-bool groupbyTbname(SNodeList* pGroupList);
-int32_t buildDataBlockFromGroupRes(SOperatorInfo* pOperator, SStreamState* pState, SSDataBlock* pBlock, SExprSupp* pSup,
- SGroupResInfo* pGroupResInfo);
-int32_t saveSessionDiscBuf(SStreamState* pState, SSessionKey* key, void* buf, int32_t size);
-int32_t buildSessionResultDataBlock(SOperatorInfo* pOperator, SStreamState* pState, SSDataBlock* pBlock,
- SExprSupp* pSup, SGroupResInfo* pGroupResInfo);
-int32_t setOutputBuf(SStreamState* pState, STimeWindow* win, SResultRow** pResult, int64_t tableGroupId,
- SqlFunctionCtx* pCtx, int32_t numOfOutput, int32_t* rowEntryInfoOffset, SAggSupporter* pAggSup);
-int32_t releaseOutputBuf(SStreamState* pState, SWinKey* pKey, SResultRow* pResult);
-int32_t saveOutputBuf(SStreamState* pState, SWinKey* pKey, SResultRow* pResult, int32_t resSize);
-void getNextIntervalWindow(SInterval* pInterval, STimeWindow* tw, int32_t order);
-int32_t qAppendTaskStopInfo(SExecTaskInfo* pTaskInfo, SExchangeOpStopInfo* pInfo);
-int32_t getForwardStepsInBlock(int32_t numOfRows, __block_search_fn_t searchFn, TSKEY ekey, int32_t pos, int32_t order,
- int64_t* pData);
-void appendCreateTableRow(SStreamState* pState, SExprSupp* pTableSup, SExprSupp* pTagSup, uint64_t groupId,
- SSDataBlock* pSrcBlock, int32_t rowId, SSDataBlock* pDestBlock);
-
-SSDataBlock* buildCreateTableBlock(SExprSupp* tbName, SExprSupp* tag);
-SExprInfo* createExpr(SNodeList* pNodeList, int32_t* numOfExprs);
-
-void copyResultrowToDataBlock(SExprInfo* pExprInfo, int32_t numOfExprs, SResultRow* pRow, SqlFunctionCtx* pCtx,
- SSDataBlock* pBlock, const int32_t* rowEntryOffset, SExecTaskInfo* pTaskInfo);
-void doUpdateNumOfRows(SqlFunctionCtx* pCtx, SResultRow* pRow, int32_t numOfExprs, const int32_t* rowEntryOffset);
-void doClearBufferedBlocks(SStreamScanInfo* pInfo);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif // TDENGINE_EXECUTORIMPL_H
diff --git a/source/libs/executor/inc/operator.h b/source/libs/executor/inc/operator.h
new file mode 100644
index 0000000000..632b817a07
--- /dev/null
+++ b/source/libs/executor/inc/operator.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#ifndef TDENGINE_OPERATOR_H
+#define TDENGINE_OPERATOR_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct SOperatorCostInfo {
+ double openCost;
+ double totalCost;
+} SOperatorCostInfo;
+
+struct SOperatorInfo;
+
+typedef int32_t (*__optr_encode_fn_t)(struct SOperatorInfo* pOperator, char** result, int32_t* length);
+typedef int32_t (*__optr_decode_fn_t)(struct SOperatorInfo* pOperator, char* result);
+
+typedef int32_t (*__optr_open_fn_t)(struct SOperatorInfo* pOptr);
+typedef SSDataBlock* (*__optr_fn_t)(struct SOperatorInfo* pOptr);
+typedef void (*__optr_close_fn_t)(void* param);
+typedef int32_t (*__optr_explain_fn_t)(struct SOperatorInfo* pOptr, void** pOptrExplain, uint32_t* len);
+typedef int32_t (*__optr_reqBuf_fn_t)(struct SOperatorInfo* pOptr);
+
+typedef struct SOperatorFpSet {
+ __optr_open_fn_t _openFn; // DO NOT invoke this function directly
+ __optr_fn_t getNextFn;
+ __optr_fn_t cleanupFn; // call this function to release the allocated resources ASAP
+ __optr_close_fn_t closeFn;
+ __optr_reqBuf_fn_t reqBufFn; // total used buffer for blocking operator
+ __optr_encode_fn_t encodeResultRow;
+ __optr_decode_fn_t decodeResultRow;
+ __optr_explain_fn_t getExplainFn;
+} SOperatorFpSet;
+
+enum {
+ OP_NOT_OPENED = 0x0,
+ OP_OPENED = 0x1,
+ OP_RES_TO_RETURN = 0x5,
+ OP_EXEC_DONE = 0x9,
+};
+
+typedef struct SOperatorInfo {
+ uint16_t operatorType;
+ int16_t resultDataBlockId;
+ bool blocking; // block operator or not
+ uint8_t status; // denote if current operator is completed
+ char* name; // name, for debug purpose
+ void* info; // extension attribution
+ SExprSupp exprSupp;
+ SExecTaskInfo* pTaskInfo;
+ SOperatorCostInfo cost;
+ SResultInfo resultInfo;
+ struct SOperatorInfo** pDownstream; // downstram pointer list
+ int32_t numOfDownstream; // number of downstream. The value is always ONE expect for join operator
+ SOperatorFpSet fpSet;
+} SOperatorInfo;
+
+// operator creater functions
+// clang-format off
+SOperatorInfo* createExchangeOperatorInfo(void* pTransporter, SExchangePhysiNode* pExNode, SExecTaskInfo* pTaskInfo);
+
+SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle, STableListInfo* pTableList, SExecTaskInfo* pTaskInfo);
+
+SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SReadHandle* readHandle, STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo);
+
+SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* pPhyNode, STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo);
+
+SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScanPhysiNode* pScanPhyNode, const char* pUser, SExecTaskInfo* pTaskInfo);
+
+SOperatorInfo* createTableCountScanOperatorInfo(SReadHandle* handle, STableCountScanPhysiNode* pNode, SExecTaskInfo* pTaskInfo);
+
+SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SAggPhysiNode* pNode, SExecTaskInfo* pTaskInfo);
+
+SOperatorInfo* createIndefinitOutputOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pNode, SExecTaskInfo* pTaskInfo);
+
+SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhysiNode* pProjPhyNode, SExecTaskInfo* pTaskInfo);
+
+SOperatorInfo* createSortOperatorInfo(SOperatorInfo* downstream, SSortPhysiNode* pSortNode, SExecTaskInfo* pTaskInfo);
+
+SOperatorInfo* createMultiwayMergeOperatorInfo(SOperatorInfo** dowStreams, size_t numStreams, SMergePhysiNode* pMergePhysiNode, SExecTaskInfo* pTaskInfo);
+
+SOperatorInfo* createCacherowsScanOperator(SLastRowScanPhysiNode* pTableScanNode, SReadHandle* readHandle, STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo);
+
+SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SIntervalPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo);
+
+SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SMergeIntervalPhysiNode* pIntervalPhyNode, SExecTaskInfo* pTaskInfo);
+
+SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream, SMergeAlignedIntervalPhysiNode* pNode, SExecTaskInfo* pTaskInfo);
+
+SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, int32_t numOfChild);
+
+SOperatorInfo* createSessionAggOperatorInfo(SOperatorInfo* downstream, SSessionWinodwPhysiNode* pSessionNode, SExecTaskInfo* pTaskInfo);
+
+SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SAggPhysiNode* pAggNode, SExecTaskInfo* pTaskInfo);
+
+SOperatorInfo* createDataBlockInfoScanOperator(SReadHandle* readHandle, SBlockDistScanPhysiNode* pBlockScanNode, STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo);
+
+SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhysiNode* pTableScanNode, SNode* pTagCond, STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo);
+
+SOperatorInfo* createRawScanOperatorInfo(SReadHandle* pHandle, SExecTaskInfo* pTaskInfo);
+
+SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* pPhyFillNode, SExecTaskInfo* pTaskInfo);
+
+SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SStateWinodwPhysiNode* pStateNode, SExecTaskInfo* pTaskInfo);
+
+SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartitionPhysiNode* pPartNode, SExecTaskInfo* pTaskInfo);
+
+SOperatorInfo* createStreamPartitionOperatorInfo(SOperatorInfo* downstream, SStreamPartitionPhysiNode* pPartNode, SExecTaskInfo* pTaskInfo);
+
+SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pNode, SExecTaskInfo* pTaskInfo);
+
+SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream, SSortMergeJoinPhysiNode* pJoinNode, SExecTaskInfo* pTaskInfo);
+
+SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo);
+
+SOperatorInfo* createStreamFinalSessionAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, int32_t numOfChild);
+
+SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo);
+
+SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo);
+
+SOperatorInfo* createStreamFillOperatorInfo(SOperatorInfo* downstream, SStreamFillPhysiNode* pPhyFillNode, SExecTaskInfo* pTaskInfo);
+
+SOperatorInfo* createGroupSortOperatorInfo(SOperatorInfo* downstream, SGroupSortPhysiNode* pSortPhyNode, SExecTaskInfo* pTaskInfo);
+
+SOperatorInfo* createEventwindowOperatorInfo(SOperatorInfo* downstream, SPhysiNode* physiNode, SExecTaskInfo* pTaskInfo);
+// clang-format on
+
+SOperatorFpSet createOperatorFpSet(__optr_open_fn_t openFn, __optr_fn_t nextFn, __optr_fn_t cleanup,
+ __optr_close_fn_t closeFn, __optr_reqBuf_fn_t reqBufFn, __optr_explain_fn_t explain);
+int32_t optrDummyOpenFn(SOperatorInfo* pOperator);
+int32_t appendDownstream(SOperatorInfo* p, SOperatorInfo** pDownstream, int32_t num);
+void setOperatorCompleted(SOperatorInfo* pOperator);
+void setOperatorInfo(SOperatorInfo* pOperator, const char* name, int32_t type, bool blocking, int32_t status,
+ void* pInfo, SExecTaskInfo* pTaskInfo);
+int32_t optrDefaultBufFn(SOperatorInfo* pOperator);
+
+SOperatorInfo* createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SReadHandle* pHandle, SNode* pTagCond,
+ SNode* pTagIndexCond, const char* pUser, const char* dbname);
+void destroyOperator(SOperatorInfo* pOperator);
+
+SOperatorInfo* extractOperatorInTree(SOperatorInfo* pOperator, int32_t type, const char* id);
+int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scanFlag, bool inheritUsOrder);
+int32_t stopTableScanOperator(SOperatorInfo* pOperator, const char* pIdStr);
+int32_t getOperatorExplainExecInfo(struct SOperatorInfo* operatorInfo, SArray* pExecInfoList);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // TDENGINE_OPERATOR_H
\ No newline at end of file
diff --git a/source/libs/executor/inc/querytask.h b/source/libs/executor/inc/querytask.h
new file mode 100644
index 0000000000..8852265da0
--- /dev/null
+++ b/source/libs/executor/inc/querytask.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#ifndef TDENGINE_QUERYTASK_H
+#define TDENGINE_QUERYTASK_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define GET_TASKID(_t) (((SExecTaskInfo*)(_t))->id.str)
+
+enum {
+ // when this task starts to execute, this status will set
+ TASK_NOT_COMPLETED = 0x1u,
+
+ /* Task is over
+ * 1. this status is used in one row result query process, e.g., count/sum/first/last/ avg...etc.
+ * 2. when all data within queried time window, it is also denoted as query_completed
+ */
+ TASK_COMPLETED = 0x2u,
+};
+
+typedef struct STaskIdInfo {
+ uint64_t queryId; // this is also a request id
+ uint64_t subplanId;
+ uint64_t templateId;
+ char* str;
+ int32_t vgId;
+} STaskIdInfo;
+
+typedef struct STaskCostInfo {
+ int64_t created;
+ int64_t start;
+ uint64_t elapsedTime;
+ double extractListTime;
+ double groupIdMapTime;
+ SFileBlockLoadRecorder* pRecoder;
+} STaskCostInfo;
+
+typedef struct STaskStopInfo {
+ SRWLatch lock;
+ SArray* pStopInfo;
+} STaskStopInfo;
+
+typedef struct {
+ STqOffsetVal currentOffset; // for tmq
+ SMqMetaRsp metaRsp; // for tmq fetching meta
+ int64_t snapshotVer;
+ SPackedData submit; // todo remove it
+ SSchemaWrapper* schema;
+ char tbName[TSDB_TABLE_NAME_LEN]; // this is the current scan table: todo refactor
+ int8_t recoverStep;
+ int8_t recoverScanFinished;
+ SQueryTableDataCond tableCond;
+ int64_t fillHistoryVer1;
+ int64_t fillHistoryVer2;
+ SStreamState* pState;
+ int64_t dataVersion;
+ int64_t checkPointId;
+} SStreamTaskInfo;
+
+struct SExecTaskInfo {
+ STaskIdInfo id;
+ uint32_t status;
+ STimeWindow window;
+ STaskCostInfo cost;
+ int64_t owner; // if it is in execution
+ int32_t code;
+ int32_t qbufQuota; // total available buffer (in KB) during execution query
+ int64_t version; // used for stream to record wal version, why not move to sschemainfo
+ SStreamTaskInfo streamInfo;
+ SSchemaInfo schemaInfo;
+ const char* sql; // query sql string
+ jmp_buf env; // jump to this position when error happens.
+ EOPTR_EXEC_MODEL execModel; // operator execution model [batch model|stream model]
+ SSubplan* pSubplan;
+ struct SOperatorInfo* pRoot;
+ SLocalFetch localFetch;
+ SArray* pResultBlockList; // result block list
+ STaskStopInfo stopInfo;
+ SRWLatch lock; // secure the access of STableListInfo
+};
+
+void buildTaskId(uint64_t taskId, uint64_t queryId, char* dst);
+SExecTaskInfo* doCreateTask(uint64_t queryId, uint64_t taskId, int32_t vgId, EOPTR_EXEC_MODEL model);
+void doDestroyTask(SExecTaskInfo* pTaskInfo);
+bool isTaskKilled(SExecTaskInfo* pTaskInfo);
+void setTaskKilled(SExecTaskInfo* pTaskInfo, int32_t rspCode);
+void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status);
+int32_t createExecTaskInfo(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId,
+ int32_t vgId, char* sql, EOPTR_EXEC_MODEL model);
+int32_t qAppendTaskStopInfo(SExecTaskInfo* pTaskInfo, SExchangeOpStopInfo* pInfo);
+SArray* getTableListInfo(const SExecTaskInfo* pTaskInfo);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // TDENGINE_QUERYTASK_H
diff --git a/source/libs/executor/src/aggregateoperator.c b/source/libs/executor/src/aggregateoperator.c
index ec8060348d..9b463a3dee 100644
--- a/source/libs/executor/src/aggregateoperator.c
+++ b/source/libs/executor/src/aggregateoperator.c
@@ -15,24 +15,21 @@
#include "filter.h"
#include "function.h"
-#include "functionMgt.h"
#include "os.h"
#include "querynodes.h"
#include "tfill.h"
#include "tname.h"
+#include "executorInt.h"
+#include "index.h"
+#include "operator.h"
+#include "query.h"
+#include "querytask.h"
+#include "tcompare.h"
#include "tdatablock.h"
#include "tglobal.h"
-#include "tmsg.h"
-#include "ttime.h"
-
-#include "executorimpl.h"
-#include "index.h"
-#include "query.h"
-#include "tcompare.h"
#include "thash.h"
#include "ttypes.h"
-#include "vnode.h"
typedef struct {
bool hasAgg;
diff --git a/source/libs/executor/src/cachescanoperator.c b/source/libs/executor/src/cachescanoperator.c
index f6fc332b37..eec34a6406 100644
--- a/source/libs/executor/src/cachescanoperator.c
+++ b/source/libs/executor/src/cachescanoperator.c
@@ -20,7 +20,9 @@
#include "tdatablock.h"
#include "tmsg.h"
-#include "executorimpl.h"
+#include "executorInt.h"
+#include "operator.h"
+#include "querytask.h"
#include "tcompare.h"
#include "thash.h"
#include "ttypes.h"
diff --git a/source/libs/executor/src/dataDeleter.c b/source/libs/executor/src/dataDeleter.c
index d693faf7f1..11074b0e94 100644
--- a/source/libs/executor/src/dataDeleter.c
+++ b/source/libs/executor/src/dataDeleter.c
@@ -15,7 +15,7 @@
#include "dataSinkInt.h"
#include "dataSinkMgt.h"
-#include "executorimpl.h"
+#include "executorInt.h"
#include "planner.h"
#include "tcompression.h"
#include "tdatablock.h"
@@ -179,7 +179,7 @@ static void getDataLength(SDataSinkHandle* pHandle, int64_t* pLen, bool* pQueryE
static int32_t getDataBlock(SDataSinkHandle* pHandle, SOutputData* pOutput) {
SDataDeleterHandle* pDeleter = (SDataDeleterHandle*)pHandle;
if (NULL == pDeleter->nextOutput.pData) {
- assert(pDeleter->queryEnd);
+ ASSERT(pDeleter->queryEnd);
pOutput->useconds = pDeleter->useconds;
pOutput->precision = pDeleter->pSchema->precision;
pOutput->bufStatus = DS_BUF_EMPTY;
diff --git a/source/libs/executor/src/dataDispatcher.c b/source/libs/executor/src/dataDispatcher.c
index d8efcf50ca..ce8dc898a5 100644
--- a/source/libs/executor/src/dataDispatcher.c
+++ b/source/libs/executor/src/dataDispatcher.c
@@ -15,7 +15,7 @@
#include "dataSinkInt.h"
#include "dataSinkMgt.h"
-#include "executorimpl.h"
+#include "executorInt.h"
#include "planner.h"
#include "tcompression.h"
#include "tdatablock.h"
@@ -181,7 +181,7 @@ static void getDataLength(SDataSinkHandle* pHandle, int64_t* pLen, bool* pQueryE
static int32_t getDataBlock(SDataSinkHandle* pHandle, SOutputData* pOutput) {
SDataDispatchHandle* pDispatcher = (SDataDispatchHandle*)pHandle;
if (NULL == pDispatcher->nextOutput.pData) {
- assert(pDispatcher->queryEnd);
+ ASSERT(pDispatcher->queryEnd);
pOutput->useconds = pDispatcher->useconds;
pOutput->precision = pDispatcher->pSchema->precision;
pOutput->bufStatus = DS_BUF_EMPTY;
diff --git a/source/libs/executor/src/dataInserter.c b/source/libs/executor/src/dataInserter.c
index 90d740bebd..33eccf4759 100644
--- a/source/libs/executor/src/dataInserter.c
+++ b/source/libs/executor/src/dataInserter.c
@@ -15,7 +15,7 @@
#include "dataSinkInt.h"
#include "dataSinkMgt.h"
-#include "executorimpl.h"
+#include "executorInt.h"
#include "planner.h"
#include "tcompression.h"
#include "tdatablock.h"
@@ -301,7 +301,7 @@ _end:
if (terrno != 0) {
*ppReq = NULL;
if (pReq) {
- tDestroySSubmitReq2(pReq, TSDB_MSG_FLG_ENCODE);
+ tDestroySSubmitReq(pReq, TSDB_MSG_FLG_ENCODE);
taosMemoryFree(pReq);
}
return terrno;
@@ -326,7 +326,7 @@ int32_t dataBlocksToSubmitReq(SDataInserterHandle* pInserter, void** pMsg, int32
code = buildSubmitReqFromBlock(pInserter, &pReq, pDataBlock, pTSchema, uid, vgId, suid);
if (code) {
if (pReq) {
- tDestroySSubmitReq2(pReq, TSDB_MSG_FLG_ENCODE);
+ tDestroySSubmitReq(pReq, TSDB_MSG_FLG_ENCODE);
taosMemoryFree(pReq);
}
@@ -335,7 +335,7 @@ int32_t dataBlocksToSubmitReq(SDataInserterHandle* pInserter, void** pMsg, int32
}
code = submitReqToMsg(vgId, pReq, pMsg, msgLen);
- tDestroySSubmitReq2(pReq, TSDB_MSG_FLG_ENCODE);
+ tDestroySSubmitReq(pReq, TSDB_MSG_FLG_ENCODE);
taosMemoryFree(pReq);
return code;
diff --git a/source/libs/executor/src/eventwindowoperator.c b/source/libs/executor/src/eventwindowoperator.c
index 559fce5224..956d5b714d 100644
--- a/source/libs/executor/src/eventwindowoperator.c
+++ b/source/libs/executor/src/eventwindowoperator.c
@@ -13,10 +13,12 @@
* along with this program. If not, see .
*/
-#include "executorimpl.h"
+#include "executorInt.h"
#include "filter.h"
#include "function.h"
#include "functionMgt.h"
+#include "operator.h"
+#include "querytask.h"
#include "tcommon.h"
#include "tcompare.h"
#include "tdatablock.h"
diff --git a/source/libs/executor/src/exchangeoperator.c b/source/libs/executor/src/exchangeoperator.c
index c855a104b2..94041140d4 100644
--- a/source/libs/executor/src/exchangeoperator.c
+++ b/source/libs/executor/src/exchangeoperator.c
@@ -13,17 +13,19 @@
* along with this program. If not, see .
*/
+#include "executorInt.h"
#include "filter.h"
#include "function.h"
+#include "index.h"
+#include "operator.h"
#include "os.h"
+#include "query.h"
+#include "querytask.h"
+#include "tdatablock.h"
+#include "thash.h"
+#include "tmsg.h"
#include "tname.h"
#include "tref.h"
-#include "tdatablock.h"
-#include "tmsg.h"
-#include "executorimpl.h"
-#include "index.h"
-#include "query.h"
-#include "thash.h"
typedef struct SFetchRspHandleWrapper {
uint32_t exchangeId;
diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c
index f61fd1ae01..c51dc39b5b 100644
--- a/source/libs/executor/src/executil.c
+++ b/source/libs/executor/src/executil.c
@@ -24,7 +24,8 @@
#include "ttime.h"
#include "executil.h"
-#include "executorimpl.h"
+#include "executorInt.h"
+#include "querytask.h"
#include "tcompression.h"
typedef struct STableListIdInfo {
@@ -88,7 +89,6 @@ void resetResultRow(SResultRow* pResultRow, size_t entrySize) {
// TODO refactor: use macro
SResultRowEntryInfo* getResultEntryInfo(const SResultRow* pRow, int32_t index, const int32_t* offset) {
- assert(index >= 0 && offset != NULL);
return (SResultRowEntryInfo*)((char*)pRow->pEntryInfo + offset[index]);
}
diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c
index 546cd18cda..2d991a14f5 100644
--- a/source/libs/executor/src/executor.c
+++ b/source/libs/executor/src/executor.c
@@ -14,9 +14,10 @@
*/
#include "executor.h"
-#include
-#include "executorimpl.h"
+#include "executorInt.h"
+#include "operator.h"
#include "planner.h"
+#include "querytask.h"
#include "tdatablock.h"
#include "tref.h"
#include "tudf.h"
@@ -138,7 +139,6 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu
}
pInfo->blockType = STREAM_INPUT__DATA_SUBMIT;
} else if (type == STREAM_INPUT__DATA_SUBMIT) {
- ASSERT(numOfBlocks == 1);
taosArrayPush(pInfo->pBlockLists, input);
pInfo->blockType = STREAM_INPUT__DATA_SUBMIT;
} else if (type == STREAM_INPUT__DATA_BLOCK) {
@@ -249,7 +249,7 @@ int32_t qSetSMAInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks,
qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* pReaderHandle, int32_t vgId, int32_t* numOfCols,
uint64_t id) {
if (msg == NULL) { // create raw scan
- SExecTaskInfo* pTaskInfo = doCreateExecTaskInfo(0, id, vgId, OPTR_EXEC_MODEL_QUEUE, "");
+ SExecTaskInfo* pTaskInfo = doCreateTask(0, id, vgId, OPTR_EXEC_MODEL_QUEUE);
if (NULL == pTaskInfo) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
@@ -312,7 +312,6 @@ qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, SReadHandle* readers, int32_t v
qTaskInfo_t pTaskInfo = NULL;
code = qCreateExecTask(readers, vgId, 0, pPlan, &pTaskInfo, NULL, NULL, OPTR_EXEC_MODEL_STREAM);
if (code != TSDB_CODE_SUCCESS) {
- nodesDestroyNode((SNode*)pPlan);
qDestroyTask(pTaskInfo);
terrno = code;
return NULL;
@@ -717,8 +716,6 @@ void qRemoveTaskStopInfo(SExecTaskInfo* pTaskInfo, SExchangeOpStopInfo* pInfo) {
taosArrayRemove(pTaskInfo->stopInfo.pStopInfo, idx);
}
taosWUnLockLatch(&pTaskInfo->stopInfo.lock);
-
- return;
}
void qStopTaskOperators(SExecTaskInfo* pTaskInfo) {
@@ -802,7 +799,11 @@ void qDestroyTask(qTaskInfo_t qTaskHandle) {
return;
}
- qDebug("%s execTask completed, numOfRows:%" PRId64, GET_TASKID(pTaskInfo), pTaskInfo->pRoot->resultInfo.totalRows);
+ if (pTaskInfo->pRoot != NULL) {
+ qDebug("%s execTask completed, numOfRows:%" PRId64, GET_TASKID(pTaskInfo), pTaskInfo->pRoot->resultInfo.totalRows);
+ } else {
+ qDebug("%s execTask completed", GET_TASKID(pTaskInfo));
+ }
printTaskExecCostInLog(pTaskInfo); // print the query cost summary
doDestroyTask(pTaskInfo);
@@ -855,15 +856,6 @@ int32_t qExtractStreamScanner(qTaskInfo_t tinfo, void** scanner) {
}
}
-#if 0
-int32_t qStreamInput(qTaskInfo_t tinfo, void* pItem) {
- SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
- ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM);
- taosWriteQitem(pTaskInfo->streamInfo.inputQueue->queue, pItem);
- return 0;
-}
-#endif
-
int32_t qStreamSourceRecoverStep1(qTaskInfo_t tinfo, int64_t ver) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM);
@@ -898,8 +890,7 @@ int32_t qStreamSetParamForRecover(qTaskInfo_t tinfo) {
SStreamIntervalOperatorInfo* pInfo = pOperator->info;
ASSERT(pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE ||
pInfo->twAggSup.calTrigger == STREAM_TRIGGER_WINDOW_CLOSE);
- ASSERT(pInfo->twAggSup.calTriggerSaved == 0);
- ASSERT(pInfo->twAggSup.deleteMarkSaved == 0);
+ ASSERT(pInfo->twAggSup.calTriggerSaved == 0 && pInfo->twAggSup.deleteMarkSaved == 0);
qInfo("save stream param for interval: %d, %" PRId64, pInfo->twAggSup.calTrigger, pInfo->twAggSup.deleteMark);
@@ -915,9 +906,8 @@ int32_t qStreamSetParamForRecover(qTaskInfo_t tinfo) {
SStreamSessionAggOperatorInfo* pInfo = pOperator->info;
ASSERT(pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE ||
pInfo->twAggSup.calTrigger == STREAM_TRIGGER_WINDOW_CLOSE);
- ASSERT(pInfo->twAggSup.calTriggerSaved == 0);
- ASSERT(pInfo->twAggSup.deleteMarkSaved == 0);
+ ASSERT(pInfo->twAggSup.calTriggerSaved == 0 && pInfo->twAggSup.deleteMarkSaved == 0);
qInfo("save stream param for session: %d, %" PRId64, pInfo->twAggSup.calTrigger, pInfo->twAggSup.deleteMark);
pInfo->twAggSup.calTriggerSaved = pInfo->twAggSup.calTrigger;
@@ -930,8 +920,7 @@ int32_t qStreamSetParamForRecover(qTaskInfo_t tinfo) {
SStreamStateAggOperatorInfo* pInfo = pOperator->info;
ASSERT(pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE ||
pInfo->twAggSup.calTrigger == STREAM_TRIGGER_WINDOW_CLOSE);
- ASSERT(pInfo->twAggSup.calTriggerSaved == 0);
- ASSERT(pInfo->twAggSup.deleteMarkSaved == 0);
+ ASSERT(pInfo->twAggSup.calTriggerSaved == 0 && pInfo->twAggSup.deleteMarkSaved == 0);
qInfo("save stream param for state: %d, %" PRId64, pInfo->twAggSup.calTrigger, pInfo->twAggSup.deleteMark);
@@ -992,7 +981,6 @@ int32_t qStreamRestoreParam(qTaskInfo_t tinfo) {
if (pOperator->numOfDownstream != 1 || pOperator->pDownstream[0] == NULL) {
if (pOperator->numOfDownstream > 1) {
qError("unexpected stream, multiple downstream");
- /*ASSERT(0);*/
return -1;
}
return 0;
@@ -1303,3 +1291,25 @@ SArray* qGetQueriedTableListInfo(qTaskInfo_t tinfo) {
taosArrayDestroy(plist);
return pUidList;
}
+
+static void extractTableList(SArray* pList, const SOperatorInfo* pOperator) {
+ if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
+ SStreamScanInfo* pScanInfo = pOperator->info;
+ STableScanInfo* pTableScanInfo = pScanInfo->pTableScanOp->info;
+ taosArrayPush(pList, &pTableScanInfo->base.pTableListInfo);
+ } else if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN) {
+ STableScanInfo* pScanInfo = pOperator->info;
+ taosArrayPush(pList, &pScanInfo->base.pTableListInfo);
+ } else {
+ if (pOperator->pDownstream != NULL && pOperator->pDownstream[0] != NULL) {
+ extractTableList(pList, pOperator->pDownstream[0]);
+ }
+ }
+}
+
+SArray* getTableListInfo(const SExecTaskInfo* pTaskInfo) {
+ SArray* pArray = taosArrayInit(0, POINTER_BYTES);
+ SOperatorInfo* pOperator = pTaskInfo->pRoot;
+ extractTableList(pArray, pOperator);
+ return pArray;
+}
\ No newline at end of file
diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorInt.c
similarity index 61%
rename from source/libs/executor/src/executorimpl.c
rename to source/libs/executor/src/executorInt.c
index 7594079cfb..f525f6728c 100644
--- a/source/libs/executor/src/executorimpl.c
+++ b/source/libs/executor/src/executorInt.c
@@ -22,13 +22,14 @@
#include "tname.h"
#include "tdatablock.h"
-#include "tglobal.h"
#include "tmsg.h"
#include "ttime.h"
-#include "executorimpl.h"
+#include "executorInt.h"
#include "index.h"
+#include "operator.h"
#include "query.h"
+#include "querytask.h"
#include "tcompare.h"
#include "thash.h"
#include "ttypes.h"
@@ -71,12 +72,8 @@ static UNUSED_FUNC void* u_realloc(void* p, size_t __size) {
#define realloc u_realloc
#endif
-#define CLEAR_QUERY_STATUS(q, st) ((q)->status &= (~(st)))
-
static void setBlockSMAInfo(SqlFunctionCtx* pCtx, SExprInfo* pExpr, SSDataBlock* pBlock);
-static void releaseQueryBuf(size_t numOfTables);
-
static void initCtxOutputBuffer(SqlFunctionCtx* pCtx, int32_t size);
static void doApplyScalarCalculation(SOperatorInfo* pOperator, SSDataBlock* pBlock, int32_t order, int32_t scanFlag);
@@ -86,44 +83,6 @@ static int32_t doSetInputDataBlock(SExprSupp* pExprSup, SSDataBlock* pBlock, int
bool createDummyCol);
static int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprSupp* pSup, SDiskbasedBuf* pBuf,
SGroupResInfo* pGroupResInfo);
-static SSchemaWrapper* extractQueriedColumnSchema(SScanPhysiNode* pScanNode);
-
-void setOperatorCompleted(SOperatorInfo* pOperator) {
- pOperator->status = OP_EXEC_DONE;
- pOperator->cost.totalCost = (taosGetTimestampUs() - pOperator->pTaskInfo->cost.start) / 1000.0;
- setTaskStatus(pOperator->pTaskInfo, TASK_COMPLETED);
-}
-
-void setOperatorInfo(SOperatorInfo* pOperator, const char* name, int32_t type, bool blocking, int32_t status,
- void* pInfo, SExecTaskInfo* pTaskInfo) {
- pOperator->name = (char*)name;
- pOperator->operatorType = type;
- pOperator->blocking = blocking;
- pOperator->status = status;
- pOperator->info = pInfo;
- pOperator->pTaskInfo = pTaskInfo;
-}
-
-int32_t optrDummyOpenFn(SOperatorInfo* pOperator) {
- OPTR_SET_OPENED(pOperator);
- pOperator->cost.openCost = 0;
- return TSDB_CODE_SUCCESS;
-}
-
-SOperatorFpSet createOperatorFpSet(__optr_open_fn_t openFn, __optr_fn_t nextFn, __optr_fn_t cleanup,
- __optr_close_fn_t closeFn, __optr_reqBuf_fn_t reqBufFn,
- __optr_explain_fn_t explain) {
- SOperatorFpSet fpSet = {
- ._openFn = openFn,
- .getNextFn = nextFn,
- .cleanupFn = cleanup,
- .closeFn = closeFn,
- .reqBufFn = reqBufFn,
- .getExplainFn = explain,
- };
-
- return fpSet;
-}
SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int32_t* currentPageId, int32_t interBufSize) {
SFilePage* pData = NULL;
@@ -482,10 +441,6 @@ void setBlockSMAInfo(SqlFunctionCtx* pCtx, SExprInfo* pExprInfo, SSDataBlock* pB
}
}
-bool isTaskKilled(SExecTaskInfo* pTaskInfo) { return (0 != pTaskInfo->code); }
-
-void setTaskKilled(SExecTaskInfo* pTaskInfo, int32_t rspCode) { pTaskInfo->code = rspCode; }
-
/////////////////////////////////////////////////////////////////////////////////////////////
STimeWindow getAlignQueryTimeWindow(SInterval* pInterval, int32_t precision, int64_t key) {
STimeWindow win = {0};
@@ -503,16 +458,6 @@ STimeWindow getAlignQueryTimeWindow(SInterval* pInterval, int32_t precision, int
return win;
}
-void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status) {
- if (status == TASK_NOT_COMPLETED) {
- pTaskInfo->status = status;
- } else {
- // QUERY_NOT_COMPLETED is not compatible with any other status, so clear its position first
- CLEAR_QUERY_STATUS(pTaskInfo, TASK_NOT_COMPLETED);
- pTaskInfo->status |= status;
- }
-}
-
void setResultRowInitCtx(SResultRow* pResult, SqlFunctionCtx* pCtx, int32_t numOfOutput, int32_t* rowEntryInfoOffset) {
bool init = false;
for (int32_t i = 0; i < numOfOutput; ++i) {
@@ -949,72 +894,6 @@ void doBuildResultDatablock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SG
}
}
-int32_t appendDownstream(SOperatorInfo* p, SOperatorInfo** pDownstream, int32_t num) {
- p->pDownstream = taosMemoryCalloc(1, num * POINTER_BYTES);
- if (p->pDownstream == NULL) {
- return TSDB_CODE_OUT_OF_MEMORY;
- }
-
- memcpy(p->pDownstream, pDownstream, num * POINTER_BYTES);
- p->numOfDownstream = num;
- return TSDB_CODE_SUCCESS;
-}
-
-int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scanFlag, bool inheritUsOrder) {
- // todo add more information about exchange operation
- int32_t type = pOperator->operatorType;
- if (type == QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN || type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN ||
- type == QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN || type == QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN ||
- type == QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN || type == QUERY_NODE_PHYSICAL_PLAN_TABLE_COUNT_SCAN) {
- *order = TSDB_ORDER_ASC;
- *scanFlag = MAIN_SCAN;
- return TSDB_CODE_SUCCESS;
- } else if (type == QUERY_NODE_PHYSICAL_PLAN_EXCHANGE) {
- if (!inheritUsOrder) {
- *order = TSDB_ORDER_ASC;
- }
- *scanFlag = MAIN_SCAN;
- return TSDB_CODE_SUCCESS;
- } else if (type == QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN) {
- STableScanInfo* pTableScanInfo = pOperator->info;
- *order = pTableScanInfo->base.cond.order;
- *scanFlag = pTableScanInfo->base.scanFlag;
- return TSDB_CODE_SUCCESS;
- } else if (type == QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN) {
- STableMergeScanInfo* pTableScanInfo = pOperator->info;
- *order = pTableScanInfo->base.cond.order;
- *scanFlag = pTableScanInfo->base.scanFlag;
- return TSDB_CODE_SUCCESS;
- } else {
- if (pOperator->pDownstream == NULL || pOperator->pDownstream[0] == NULL) {
- return TSDB_CODE_INVALID_PARA;
- } else {
- return getTableScanInfo(pOperator->pDownstream[0], order, scanFlag, inheritUsOrder);
- }
- }
-}
-
-// QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN
-SOperatorInfo* extractOperatorInTree(SOperatorInfo* pOperator, int32_t type, const char* id) {
- if (pOperator == NULL) {
- qError("invalid operator, failed to find tableScanOperator %s", id);
- terrno = TSDB_CODE_PAR_INTERNAL_ERROR;
- return NULL;
- }
-
- if (pOperator->operatorType == type) {
- return pOperator;
- } else {
- if (pOperator->pDownstream == NULL || pOperator->pDownstream[0] == NULL) {
- qError("invalid operator, failed to find tableScanOperator %s", id);
- terrno = TSDB_CODE_PAR_INTERNAL_ERROR;
- return NULL;
- }
-
- return extractOperatorInTree(pOperator->pDownstream[0], type, id);
- }
-}
-
void destroyExprInfo(SExprInfo* pExpr, int32_t numOfExprs) {
for (int32_t i = 0; i < numOfExprs; ++i) {
SExprInfo* pExprInfo = &pExpr[i];
@@ -1031,37 +910,6 @@ void destroyExprInfo(SExprInfo* pExpr, int32_t numOfExprs) {
}
}
-void destroyOperatorInfo(SOperatorInfo* pOperator) {
- if (pOperator == NULL) {
- return;
- }
-
- if (pOperator->fpSet.closeFn != NULL) {
- pOperator->fpSet.closeFn(pOperator->info);
- }
-
- if (pOperator->pDownstream != NULL) {
- for (int32_t i = 0; i < pOperator->numOfDownstream; ++i) {
- destroyOperatorInfo(pOperator->pDownstream[i]);
- }
-
- taosMemoryFreeClear(pOperator->pDownstream);
- pOperator->numOfDownstream = 0;
- }
-
- cleanupExprSupp(&pOperator->exprSupp);
- taosMemoryFreeClear(pOperator);
-}
-
-// each operator should be set their own function to return total cost buffer
-int32_t optrDefaultBufFn(SOperatorInfo* pOperator) {
- if (pOperator->blocking) {
- return -1;
- } else {
- return 0;
- }
-}
-
int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaultBufsz) {
*defaultPgsz = 4096;
while (*defaultPgsz < rowSize * 4) {
@@ -1151,136 +999,6 @@ void cleanupExprSupp(SExprSupp* pSupp) {
void cleanupBasicInfo(SOptrBasicInfo* pInfo) { pInfo->pRes = blockDataDestroy(pInfo->pRes); }
-void buildTaskId(uint64_t taskId, uint64_t queryId, char* dst) {
- char* p = dst;
-
- int32_t offset = 6;
- memcpy(p, "TID:0x", offset);
- offset += tintToHex(taskId, &p[offset]);
-
- memcpy(&p[offset], " QID:0x", 7);
- offset += 7;
- offset += tintToHex(queryId, &p[offset]);
-
- p[offset] = 0;
-}
-
-SExecTaskInfo* doCreateExecTaskInfo(uint64_t queryId, uint64_t taskId, int32_t vgId, EOPTR_EXEC_MODEL model,
- char* dbFName) {
- SExecTaskInfo* pTaskInfo = taosMemoryCalloc(1, sizeof(SExecTaskInfo));
- if (pTaskInfo == NULL) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
- return NULL;
- }
-
- setTaskStatus(pTaskInfo, TASK_NOT_COMPLETED);
- pTaskInfo->cost.created = taosGetTimestampUs();
-
- pTaskInfo->schemaInfo.dbname = taosStrdup(dbFName);
- pTaskInfo->execModel = model;
- pTaskInfo->stopInfo.pStopInfo = taosArrayInit(4, sizeof(SExchangeOpStopInfo));
- pTaskInfo->pResultBlockList = taosArrayInit(128, POINTER_BYTES);
-
- taosInitRWLatch(&pTaskInfo->lock);
- pTaskInfo->id.vgId = vgId;
- pTaskInfo->id.queryId = queryId;
-
- pTaskInfo->id.str = taosMemoryMalloc(64);
- buildTaskId(taskId, queryId, pTaskInfo->id.str);
- return pTaskInfo;
-}
-
-int32_t extractTableSchemaInfo(SReadHandle* pHandle, SScanPhysiNode* pScanNode, SExecTaskInfo* pTaskInfo) {
- SMetaReader mr = {0};
- if (pHandle == NULL) {
- terrno = TSDB_CODE_INVALID_PARA;
- return terrno;
- }
-
- metaReaderInit(&mr, pHandle->meta, 0);
- int32_t code = metaGetTableEntryByUidCache(&mr, pScanNode->uid);
- if (code != TSDB_CODE_SUCCESS) {
- qError("failed to get the table meta, uid:0x%" PRIx64 ", suid:0x%" PRIx64 ", %s", pScanNode->uid, pScanNode->suid,
- GET_TASKID(pTaskInfo));
-
- metaReaderClear(&mr);
- return terrno;
- }
-
- SSchemaInfo* pSchemaInfo = &pTaskInfo->schemaInfo;
- pSchemaInfo->tablename = taosStrdup(mr.me.name);
-
- if (mr.me.type == TSDB_SUPER_TABLE) {
- pSchemaInfo->sw = tCloneSSchemaWrapper(&mr.me.stbEntry.schemaRow);
- pSchemaInfo->tversion = mr.me.stbEntry.schemaTag.version;
- } else if (mr.me.type == TSDB_CHILD_TABLE) {
- tDecoderClear(&mr.coder);
-
- tb_uid_t suid = mr.me.ctbEntry.suid;
- code = metaGetTableEntryByUidCache(&mr, suid);
- if (code != TSDB_CODE_SUCCESS) {
- metaReaderClear(&mr);
- return terrno;
- }
-
- pSchemaInfo->sw = tCloneSSchemaWrapper(&mr.me.stbEntry.schemaRow);
- pSchemaInfo->tversion = mr.me.stbEntry.schemaTag.version;
- } else {
- pSchemaInfo->sw = tCloneSSchemaWrapper(&mr.me.ntbEntry.schemaRow);
- }
-
- metaReaderClear(&mr);
-
- pSchemaInfo->qsw = extractQueriedColumnSchema(pScanNode);
- return TSDB_CODE_SUCCESS;
-}
-
-SSchemaWrapper* extractQueriedColumnSchema(SScanPhysiNode* pScanNode) {
- int32_t numOfCols = LIST_LENGTH(pScanNode->pScanCols);
- int32_t numOfTags = LIST_LENGTH(pScanNode->pScanPseudoCols);
-
- SSchemaWrapper* pqSw = taosMemoryCalloc(1, sizeof(SSchemaWrapper));
- pqSw->pSchema = taosMemoryCalloc(numOfCols + numOfTags, sizeof(SSchema));
-
- for (int32_t i = 0; i < numOfCols; ++i) {
- STargetNode* pNode = (STargetNode*)nodesListGetNode(pScanNode->pScanCols, i);
- SColumnNode* pColNode = (SColumnNode*)pNode->pExpr;
-
- SSchema* pSchema = &pqSw->pSchema[pqSw->nCols++];
- pSchema->colId = pColNode->colId;
- pSchema->type = pColNode->node.resType.type;
- pSchema->bytes = pColNode->node.resType.bytes;
- tstrncpy(pSchema->name, pColNode->colName, tListLen(pSchema->name));
- }
-
- // this the tags and pseudo function columns, we only keep the tag columns
- for (int32_t i = 0; i < numOfTags; ++i) {
- STargetNode* pNode = (STargetNode*)nodesListGetNode(pScanNode->pScanPseudoCols, i);
-
- int32_t type = nodeType(pNode->pExpr);
- if (type == QUERY_NODE_COLUMN) {
- SColumnNode* pColNode = (SColumnNode*)pNode->pExpr;
-
- SSchema* pSchema = &pqSw->pSchema[pqSw->nCols++];
- pSchema->colId = pColNode->colId;
- pSchema->type = pColNode->node.resType.type;
- pSchema->bytes = pColNode->node.resType.bytes;
- tstrncpy(pSchema->name, pColNode->colName, tListLen(pSchema->name));
- }
- }
-
- return pqSw;
-}
-
-static void cleanupTableSchemaInfo(SSchemaInfo* pSchemaInfo) {
- taosMemoryFreeClear(pSchemaInfo->dbname);
- taosMemoryFreeClear(pSchemaInfo->tablename);
- tDeleteSSchemaWrapper(pSchemaInfo->sw);
- tDeleteSSchemaWrapper(pSchemaInfo->qsw);
-}
-
-static void cleanupStreamInfo(SStreamTaskInfo* pStreamInfo) { tDeleteSSchemaWrapper(pStreamInfo->schema); }
-
bool groupbyTbname(SNodeList* pGroupList) {
bool bytbname = false;
if (LIST_LENGTH(pGroupList) == 1) {
@@ -1294,306 +1012,6 @@ bool groupbyTbname(SNodeList* pGroupList) {
return bytbname;
}
-SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SReadHandle* pHandle, SNode* pTagCond,
- SNode* pTagIndexCond, const char* pUser) {
- int32_t type = nodeType(pPhyNode);
- const char* idstr = GET_TASKID(pTaskInfo);
-
- if (pPhyNode->pChildren == NULL || LIST_LENGTH(pPhyNode->pChildren) == 0) {
- SOperatorInfo* pOperator = NULL;
- if (QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN == type) {
- STableScanPhysiNode* pTableScanNode = (STableScanPhysiNode*)pPhyNode;
-
- // NOTE: this is an patch to fix the physical plan
- // TODO remove it later
- if (pTableScanNode->scan.node.pLimit != NULL) {
- pTableScanNode->groupSort = true;
- }
-
- STableListInfo* pTableListInfo = tableListCreate();
- int32_t code =
- createScanTableListInfo(&pTableScanNode->scan, pTableScanNode->pGroupTags, pTableScanNode->groupSort, pHandle,
- pTableListInfo, pTagCond, pTagIndexCond, pTaskInfo);
- if (code) {
- pTaskInfo->code = code;
- tableListDestroy(pTableListInfo);
- qError("failed to createScanTableListInfo, code:%s, %s", tstrerror(code), idstr);
- return NULL;
- }
-
- code = extractTableSchemaInfo(pHandle, &pTableScanNode->scan, pTaskInfo);
- if (code) {
- pTaskInfo->code = terrno;
- tableListDestroy(pTableListInfo);
- return NULL;
- }
-
- pOperator = createTableScanOperatorInfo(pTableScanNode, pHandle, pTableListInfo, pTaskInfo);
- if (NULL == pOperator) {
- pTaskInfo->code = terrno;
- return NULL;
- }
-
- STableScanInfo* pScanInfo = pOperator->info;
- pTaskInfo->cost.pRecoder = &pScanInfo->base.readRecorder;
- } else if (QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN == type) {
- STableMergeScanPhysiNode* pTableScanNode = (STableMergeScanPhysiNode*)pPhyNode;
- STableListInfo* pTableListInfo = tableListCreate();
-
- int32_t code = createScanTableListInfo(&pTableScanNode->scan, pTableScanNode->pGroupTags, true, pHandle,
- pTableListInfo, pTagCond, pTagIndexCond, pTaskInfo);
- if (code) {
- pTaskInfo->code = code;
- tableListDestroy(pTableListInfo);
- qError("failed to createScanTableListInfo, code: %s", tstrerror(code));
- return NULL;
- }
-
- code = extractTableSchemaInfo(pHandle, &pTableScanNode->scan, pTaskInfo);
- if (code) {
- pTaskInfo->code = terrno;
- tableListDestroy(pTableListInfo);
- return NULL;
- }
-
- pOperator = createTableMergeScanOperatorInfo(pTableScanNode, pHandle, pTableListInfo, pTaskInfo);
- if (NULL == pOperator) {
- pTaskInfo->code = terrno;
- tableListDestroy(pTableListInfo);
- return NULL;
- }
-
- STableScanInfo* pScanInfo = pOperator->info;
- pTaskInfo->cost.pRecoder = &pScanInfo->base.readRecorder;
- } else if (QUERY_NODE_PHYSICAL_PLAN_EXCHANGE == type) {
- pOperator = createExchangeOperatorInfo(pHandle ? pHandle->pMsgCb->clientRpc : NULL, (SExchangePhysiNode*)pPhyNode,
- pTaskInfo);
- } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN == type) {
- STableScanPhysiNode* pTableScanNode = (STableScanPhysiNode*)pPhyNode;
- STableListInfo* pTableListInfo = tableListCreate();
-
- if (pHandle->vnode) {
- int32_t code =
- createScanTableListInfo(&pTableScanNode->scan, pTableScanNode->pGroupTags, pTableScanNode->groupSort,
- pHandle, pTableListInfo, pTagCond, pTagIndexCond, pTaskInfo);
- if (code) {
- pTaskInfo->code = code;
- tableListDestroy(pTableListInfo);
- qError("failed to createScanTableListInfo, code: %s", tstrerror(code));
- return NULL;
- }
- }
-
- pTaskInfo->schemaInfo.qsw = extractQueriedColumnSchema(&pTableScanNode->scan);
- pOperator = createStreamScanOperatorInfo(pHandle, pTableScanNode, pTagCond, pTableListInfo, pTaskInfo);
- } else if (QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN == type) {
- SSystemTableScanPhysiNode* pSysScanPhyNode = (SSystemTableScanPhysiNode*)pPhyNode;
- pOperator = createSysTableScanOperatorInfo(pHandle, pSysScanPhyNode, pUser, pTaskInfo);
- } else if (QUERY_NODE_PHYSICAL_PLAN_TABLE_COUNT_SCAN == type) {
- STableCountScanPhysiNode* pTblCountScanNode = (STableCountScanPhysiNode*)pPhyNode;
- pOperator = createTableCountScanOperatorInfo(pHandle, pTblCountScanNode, pTaskInfo);
- } else if (QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN == type) {
- STagScanPhysiNode* pScanPhyNode = (STagScanPhysiNode*)pPhyNode;
- STableListInfo* pTableListInfo = tableListCreate();
- int32_t code = createScanTableListInfo(pScanPhyNode, NULL, false, pHandle, pTableListInfo, pTagCond,
- pTagIndexCond, pTaskInfo);
- if (code != TSDB_CODE_SUCCESS) {
- pTaskInfo->code = code;
- qError("failed to getTableList, code: %s", tstrerror(code));
- return NULL;
- }
-
- pOperator = createTagScanOperatorInfo(pHandle, pScanPhyNode, pTableListInfo, pTaskInfo);
- } else if (QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN == type) {
- SBlockDistScanPhysiNode* pBlockNode = (SBlockDistScanPhysiNode*)pPhyNode;
- STableListInfo* pTableListInfo = tableListCreate();
-
- if (pBlockNode->tableType == TSDB_SUPER_TABLE) {
- SArray* pList = taosArrayInit(4, sizeof(STableKeyInfo));
- int32_t code = vnodeGetAllTableList(pHandle->vnode, pBlockNode->uid, pList);
- if (code != TSDB_CODE_SUCCESS) {
- pTaskInfo->code = terrno;
- return NULL;
- }
-
- size_t num = taosArrayGetSize(pList);
- for (int32_t i = 0; i < num; ++i) {
- STableKeyInfo* p = taosArrayGet(pList, i);
- tableListAddTableInfo(pTableListInfo, p->uid, 0);
- }
-
- taosArrayDestroy(pList);
- } else { // Create group with only one table
- tableListAddTableInfo(pTableListInfo, pBlockNode->uid, 0);
- }
-
- pOperator = createDataBlockInfoScanOperator(pHandle, pBlockNode, pTableListInfo, pTaskInfo);
- } else if (QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN == type) {
- SLastRowScanPhysiNode* pScanNode = (SLastRowScanPhysiNode*)pPhyNode;
- STableListInfo* pTableListInfo = tableListCreate();
-
- int32_t code = createScanTableListInfo(&pScanNode->scan, pScanNode->pGroupTags, true, pHandle, pTableListInfo,
- pTagCond, pTagIndexCond, pTaskInfo);
- if (code != TSDB_CODE_SUCCESS) {
- pTaskInfo->code = code;
- return NULL;
- }
-
- code = extractTableSchemaInfo(pHandle, &pScanNode->scan, pTaskInfo);
- if (code != TSDB_CODE_SUCCESS) {
- pTaskInfo->code = code;
- return NULL;
- }
-
- pOperator = createCacherowsScanOperator(pScanNode, pHandle, pTableListInfo, pTaskInfo);
- } else if (QUERY_NODE_PHYSICAL_PLAN_PROJECT == type) {
- pOperator = createProjectOperatorInfo(NULL, (SProjectPhysiNode*)pPhyNode, pTaskInfo);
- } else {
- terrno = TSDB_CODE_INVALID_PARA;
- return NULL;
- }
-
- if (pOperator != NULL) { // todo moved away
- pOperator->resultDataBlockId = pPhyNode->pOutputDataBlockDesc->dataBlockId;
- }
-
- return pOperator;
- }
-
- size_t size = LIST_LENGTH(pPhyNode->pChildren);
- SOperatorInfo** ops = taosMemoryCalloc(size, POINTER_BYTES);
- if (ops == NULL) {
- return NULL;
- }
-
- for (int32_t i = 0; i < size; ++i) {
- SPhysiNode* pChildNode = (SPhysiNode*)nodesListGetNode(pPhyNode->pChildren, i);
- ops[i] = createOperatorTree(pChildNode, pTaskInfo, pHandle, pTagCond, pTagIndexCond, pUser);
- if (ops[i] == NULL) {
- taosMemoryFree(ops);
- return NULL;
- }
- }
-
- SOperatorInfo* pOptr = NULL;
- if (QUERY_NODE_PHYSICAL_PLAN_PROJECT == type) {
- pOptr = createProjectOperatorInfo(ops[0], (SProjectPhysiNode*)pPhyNode, pTaskInfo);
- } else if (QUERY_NODE_PHYSICAL_PLAN_HASH_AGG == type) {
- SAggPhysiNode* pAggNode = (SAggPhysiNode*)pPhyNode;
- if (pAggNode->pGroupKeys != NULL) {
- pOptr = createGroupOperatorInfo(ops[0], pAggNode, pTaskInfo);
- } else {
- pOptr = createAggregateOperatorInfo(ops[0], pAggNode, pTaskInfo);
- }
- } else if (QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL == type) {
- SIntervalPhysiNode* pIntervalPhyNode = (SIntervalPhysiNode*)pPhyNode;
- pOptr = createIntervalOperatorInfo(ops[0], pIntervalPhyNode, pTaskInfo);
- } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL == type) {
- pOptr = createStreamIntervalOperatorInfo(ops[0], pPhyNode, pTaskInfo);
- } else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL == type) {
- SMergeAlignedIntervalPhysiNode* pIntervalPhyNode = (SMergeAlignedIntervalPhysiNode*)pPhyNode;
- pOptr = createMergeAlignedIntervalOperatorInfo(ops[0], pIntervalPhyNode, pTaskInfo);
- } else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL == type) {
- SMergeIntervalPhysiNode* pIntervalPhyNode = (SMergeIntervalPhysiNode*)pPhyNode;
- pOptr = createMergeIntervalOperatorInfo(ops[0], pIntervalPhyNode, pTaskInfo);
- } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL == type) {
- int32_t children = 0;
- pOptr = createStreamFinalIntervalOperatorInfo(ops[0], pPhyNode, pTaskInfo, children);
- } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL == type) {
- int32_t children = pHandle->numOfVgroups;
- pOptr = createStreamFinalIntervalOperatorInfo(ops[0], pPhyNode, pTaskInfo, children);
- } else if (QUERY_NODE_PHYSICAL_PLAN_SORT == type) {
- pOptr = createSortOperatorInfo(ops[0], (SSortPhysiNode*)pPhyNode, pTaskInfo);
- } else if (QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT == type) {
- pOptr = createGroupSortOperatorInfo(ops[0], (SGroupSortPhysiNode*)pPhyNode, pTaskInfo);
- } else if (QUERY_NODE_PHYSICAL_PLAN_MERGE == type) {
- SMergePhysiNode* pMergePhyNode = (SMergePhysiNode*)pPhyNode;
- pOptr = createMultiwayMergeOperatorInfo(ops, size, pMergePhyNode, pTaskInfo);
- } else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION == type) {
- SSessionWinodwPhysiNode* pSessionNode = (SSessionWinodwPhysiNode*)pPhyNode;
- pOptr = createSessionAggOperatorInfo(ops[0], pSessionNode, pTaskInfo);
- } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION == type) {
- pOptr = createStreamSessionAggOperatorInfo(ops[0], pPhyNode, pTaskInfo);
- } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION == type) {
- int32_t children = 0;
- pOptr = createStreamFinalSessionAggOperatorInfo(ops[0], pPhyNode, pTaskInfo, children);
- } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION == type) {
- int32_t children = pHandle->numOfVgroups;
- pOptr = createStreamFinalSessionAggOperatorInfo(ops[0], pPhyNode, pTaskInfo, children);
- } else if (QUERY_NODE_PHYSICAL_PLAN_PARTITION == type) {
- pOptr = createPartitionOperatorInfo(ops[0], (SPartitionPhysiNode*)pPhyNode, pTaskInfo);
- } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION == type) {
- pOptr = createStreamPartitionOperatorInfo(ops[0], (SStreamPartitionPhysiNode*)pPhyNode, pTaskInfo);
- } else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE == type) {
- SStateWinodwPhysiNode* pStateNode = (SStateWinodwPhysiNode*)pPhyNode;
- pOptr = createStatewindowOperatorInfo(ops[0], pStateNode, pTaskInfo);
- } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE == type) {
- pOptr = createStreamStateAggOperatorInfo(ops[0], pPhyNode, pTaskInfo);
- } else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN == type) {
- pOptr = createMergeJoinOperatorInfo(ops, size, (SSortMergeJoinPhysiNode*)pPhyNode, pTaskInfo);
- } else if (QUERY_NODE_PHYSICAL_PLAN_FILL == type) {
- pOptr = createFillOperatorInfo(ops[0], (SFillPhysiNode*)pPhyNode, pTaskInfo);
- } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL == type) {
- pOptr = createStreamFillOperatorInfo(ops[0], (SStreamFillPhysiNode*)pPhyNode, pTaskInfo);
- } else if (QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC == type) {
- pOptr = createIndefinitOutputOperatorInfo(ops[0], pPhyNode, pTaskInfo);
- } else if (QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC == type) {
- pOptr = createTimeSliceOperatorInfo(ops[0], pPhyNode, pTaskInfo);
- } else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_EVENT == type) {
- pOptr = createEventwindowOperatorInfo(ops[0], pPhyNode, pTaskInfo);
- } else {
- terrno = TSDB_CODE_INVALID_PARA;
- taosMemoryFree(ops);
- return NULL;
- }
-
- taosMemoryFree(ops);
- if (pOptr) {
- pOptr->resultDataBlockId = pPhyNode->pOutputDataBlockDesc->dataBlockId;
- }
-
- return pOptr;
-}
-
-static int32_t extractTbscanInStreamOpTree(SOperatorInfo* pOperator, STableScanInfo** ppInfo) {
- if (pOperator->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
- if (pOperator->numOfDownstream == 0) {
- qError("failed to find stream scan operator");
- return TSDB_CODE_APP_ERROR;
- }
-
- if (pOperator->numOfDownstream > 1) {
- qError("join not supported for stream block scan");
- return TSDB_CODE_APP_ERROR;
- }
- return extractTbscanInStreamOpTree(pOperator->pDownstream[0], ppInfo);
- } else {
- SStreamScanInfo* pInfo = pOperator->info;
- *ppInfo = pInfo->pTableScanOp->info;
- return 0;
- }
-}
-
-int32_t extractTableScanNode(SPhysiNode* pNode, STableScanPhysiNode** ppNode) {
- if (pNode->pChildren == NULL || LIST_LENGTH(pNode->pChildren) == 0) {
- if (QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN == pNode->type) {
- *ppNode = (STableScanPhysiNode*)pNode;
- return 0;
- } else {
- terrno = TSDB_CODE_APP_ERROR;
- return -1;
- }
- } else {
- if (LIST_LENGTH(pNode->pChildren) != 1) {
- terrno = TSDB_CODE_APP_ERROR;
- return -1;
- }
- SPhysiNode* pChildNode = (SPhysiNode*)nodesListGetNode(pNode->pChildren, 0);
- return extractTableScanNode(pChildNode, ppNode);
- }
- return -1;
-}
-
int32_t createDataSinkParam(SDataSinkNode* pNode, void** pParam, SExecTaskInfo* pTask, SReadHandle* readHandle) {
switch (pNode->type) {
case QUERY_NODE_PHYSICAL_PLAN_QUERY_INSERT: {
@@ -1641,131 +1059,6 @@ int32_t createDataSinkParam(SDataSinkNode* pNode, void** pParam, SExecTaskInfo*
return TSDB_CODE_SUCCESS;
}
-int32_t createExecTaskInfo(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId,
- int32_t vgId, char* sql, EOPTR_EXEC_MODEL model) {
- *pTaskInfo = doCreateExecTaskInfo(pPlan->id.queryId, taskId, vgId, model, pPlan->dbFName);
- if (*pTaskInfo == NULL) {
- goto _complete;
- }
-
- if (pHandle) {
- if (pHandle->pStateBackend) {
- (*pTaskInfo)->streamInfo.pState = pHandle->pStateBackend;
- }
- }
-
- (*pTaskInfo)->sql = sql;
- sql = NULL;
-
- (*pTaskInfo)->pSubplan = pPlan;
- (*pTaskInfo)->pRoot =
- createOperatorTree(pPlan->pNode, *pTaskInfo, pHandle, pPlan->pTagCond, pPlan->pTagIndexCond, pPlan->user);
-
- if (NULL == (*pTaskInfo)->pRoot) {
- terrno = (*pTaskInfo)->code;
- goto _complete;
- }
-
- return TSDB_CODE_SUCCESS;
-
-_complete:
- taosMemoryFree(sql);
- doDestroyTask(*pTaskInfo);
- return terrno;
-}
-
-static void freeBlock(void* pParam) {
- SSDataBlock* pBlock = *(SSDataBlock**)pParam;
- blockDataDestroy(pBlock);
-}
-
-void doDestroyTask(SExecTaskInfo* pTaskInfo) {
- qDebug("%s execTask is freed", GET_TASKID(pTaskInfo));
- destroyOperatorInfo(pTaskInfo->pRoot);
- cleanupTableSchemaInfo(&pTaskInfo->schemaInfo);
- cleanupStreamInfo(&pTaskInfo->streamInfo);
-
- if (!pTaskInfo->localFetch.localExec) {
- nodesDestroyNode((SNode*)pTaskInfo->pSubplan);
- }
-
- taosArrayDestroyEx(pTaskInfo->pResultBlockList, freeBlock);
- taosArrayDestroy(pTaskInfo->stopInfo.pStopInfo);
- taosMemoryFreeClear(pTaskInfo->sql);
- taosMemoryFreeClear(pTaskInfo->id.str);
- taosMemoryFreeClear(pTaskInfo);
-}
-
-static int64_t getQuerySupportBufSize(size_t numOfTables) {
- size_t s1 = sizeof(STableQueryInfo);
- // size_t s3 = sizeof(STableCheckInfo); buffer consumption in tsdb
- return (int64_t)(s1 * 1.5 * numOfTables);
-}
-
-int32_t checkForQueryBuf(size_t numOfTables) {
- int64_t t = getQuerySupportBufSize(numOfTables);
- if (tsQueryBufferSizeBytes < 0) {
- return TSDB_CODE_SUCCESS;
- } else if (tsQueryBufferSizeBytes > 0) {
- while (1) {
- int64_t s = tsQueryBufferSizeBytes;
- int64_t remain = s - t;
- if (remain >= 0) {
- if (atomic_val_compare_exchange_64(&tsQueryBufferSizeBytes, s, remain) == s) {
- return TSDB_CODE_SUCCESS;
- }
- } else {
- return TSDB_CODE_QRY_NOT_ENOUGH_BUFFER;
- }
- }
- }
-
- // disable query processing if the value of tsQueryBufferSize is zero.
- return TSDB_CODE_QRY_NOT_ENOUGH_BUFFER;
-}
-
-void releaseQueryBuf(size_t numOfTables) {
- if (tsQueryBufferSizeBytes < 0) {
- return;
- }
-
- int64_t t = getQuerySupportBufSize(numOfTables);
-
- // restore value is not enough buffer available
- atomic_add_fetch_64(&tsQueryBufferSizeBytes, t);
-}
-
-int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SArray* pExecInfoList) {
- SExplainExecInfo execInfo = {0};
- SExplainExecInfo* pExplainInfo = taosArrayPush(pExecInfoList, &execInfo);
-
- pExplainInfo->numOfRows = operatorInfo->resultInfo.totalRows;
- pExplainInfo->startupCost = operatorInfo->cost.openCost;
- pExplainInfo->totalCost = operatorInfo->cost.totalCost;
- pExplainInfo->verboseLen = 0;
- pExplainInfo->verboseInfo = NULL;
-
- if (operatorInfo->fpSet.getExplainFn) {
- int32_t code =
- operatorInfo->fpSet.getExplainFn(operatorInfo, &pExplainInfo->verboseInfo, &pExplainInfo->verboseLen);
- if (code) {
- qError("%s operator getExplainFn failed, code:%s", GET_TASKID(operatorInfo->pTaskInfo), tstrerror(code));
- return code;
- }
- }
-
- int32_t code = 0;
- for (int32_t i = 0; i < operatorInfo->numOfDownstream; ++i) {
- code = getOperatorExplainExecInfo(operatorInfo->pDownstream[i], pExecInfoList);
- if (code != TSDB_CODE_SUCCESS) {
- // taosMemoryFreeClear(*pRes);
- return TSDB_CODE_OUT_OF_MEMORY;
- }
- }
-
- return TSDB_CODE_SUCCESS;
-}
-
int32_t setOutputBuf(SStreamState* pState, STimeWindow* win, SResultRow** pResult, int64_t tableGroupId,
SqlFunctionCtx* pCtx, int32_t numOfOutput, int32_t* rowEntryInfoOffset, SAggSupporter* pAggSup) {
SWinKey key = {
@@ -2007,25 +1300,3 @@ void qStreamCloseTsdbReader(void* task) {
}
}
}
-
-static void extractTableList(SArray* pList, const SOperatorInfo* pOperator) {
- if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
- SStreamScanInfo* pScanInfo = pOperator->info;
- STableScanInfo* pTableScanInfo = pScanInfo->pTableScanOp->info;
- taosArrayPush(pList, &pTableScanInfo->base.pTableListInfo);
- } else if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN) {
- STableScanInfo* pScanInfo = pOperator->info;
- taosArrayPush(pList, &pScanInfo->base.pTableListInfo);
- } else {
- if (pOperator->pDownstream != NULL && pOperator->pDownstream[0] != NULL) {
- extractTableList(pList, pOperator->pDownstream[0]);
- }
- }
-}
-
-SArray* getTableListInfo(const SExecTaskInfo* pTaskInfo) {
- SArray* pArray = taosArrayInit(0, POINTER_BYTES);
- SOperatorInfo* pOperator = pTaskInfo->pRoot;
- extractTableList(pArray, pOperator);
- return pArray;
-}
\ No newline at end of file
diff --git a/source/libs/executor/src/filloperator.c b/source/libs/executor/src/filloperator.c
index 234f1a666c..0ac9e6097f 100644
--- a/source/libs/executor/src/filloperator.c
+++ b/source/libs/executor/src/filloperator.c
@@ -20,16 +20,18 @@
#include "tmsg.h"
#include "ttypes.h"
-#include "executorimpl.h"
+#include "executorInt.h"
#include "tcommon.h"
#include "thash.h"
#include "ttime.h"
-#include "executorInt.h"
#include "function.h"
#include "querynodes.h"
#include "tdatablock.h"
#include "tfill.h"
+#include "operator.h"
+#include "querytask.h"
+
#define FILL_POS_INVALID 0
#define FILL_POS_START 1
diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c
index 3d9bacf39f..47338d4469 100644
--- a/source/libs/executor/src/groupoperator.c
+++ b/source/libs/executor/src/groupoperator.c
@@ -22,7 +22,8 @@
#include "tmsg.h"
#include "executorInt.h"
-#include "executorimpl.h"
+#include "operator.h"
+#include "querytask.h"
#include "tcompare.h"
#include "thash.h"
#include "ttypes.h"
@@ -926,7 +927,6 @@ int32_t setGroupResultOutputBuf(SOperatorInfo* pOperator, SOptrBasicInfo* binfo,
SResultRow* pResultRow =
doSetResultOutBufByKey(pBuf, pResultRowInfo, (char*)pData, bytes, true, groupId, pTaskInfo, false, pAggSup, false);
- assert(pResultRow != NULL);
setResultRowInitCtx(pResultRow, pCtx, numOfCols, pOperator->exprSupp.rowEntryInfoOffset);
return TSDB_CODE_SUCCESS;
diff --git a/source/libs/executor/src/joinoperator.c b/source/libs/executor/src/joinoperator.c
index 31ff11eec5..754b5f4737 100644
--- a/source/libs/executor/src/joinoperator.c
+++ b/source/libs/executor/src/joinoperator.c
@@ -13,11 +13,13 @@
* along with this program. If not, see .
*/
+#include "executorInt.h"
#include "filter.h"
-#include "executorimpl.h"
#include "function.h"
+#include "operator.h"
#include "os.h"
#include "querynodes.h"
+#include "querytask.h"
#include "tcompare.h"
#include "tdatablock.h"
#include "thash.h"
diff --git a/source/libs/executor/src/operator.c b/source/libs/executor/src/operator.c
new file mode 100644
index 0000000000..729178dc60
--- /dev/null
+++ b/source/libs/executor/src/operator.c
@@ -0,0 +1,578 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "filter.h"
+#include "function.h"
+#include "os.h"
+#include "tname.h"
+
+#include "tglobal.h"
+
+#include "executorInt.h"
+#include "index.h"
+#include "operator.h"
+#include "query.h"
+#include "querytask.h"
+#include "vnode.h"
+
+SOperatorFpSet createOperatorFpSet(__optr_open_fn_t openFn, __optr_fn_t nextFn, __optr_fn_t cleanup,
+ __optr_close_fn_t closeFn, __optr_reqBuf_fn_t reqBufFn,
+ __optr_explain_fn_t explain) {
+ SOperatorFpSet fpSet = {
+ ._openFn = openFn,
+ .getNextFn = nextFn,
+ .cleanupFn = cleanup,
+ .closeFn = closeFn,
+ .reqBufFn = reqBufFn,
+ .getExplainFn = explain,
+ };
+
+ return fpSet;
+}
+
+int32_t optrDummyOpenFn(SOperatorInfo* pOperator) {
+ OPTR_SET_OPENED(pOperator);
+ pOperator->cost.openCost = 0;
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t appendDownstream(SOperatorInfo* p, SOperatorInfo** pDownstream, int32_t num) {
+ p->pDownstream = taosMemoryCalloc(1, num * POINTER_BYTES);
+ if (p->pDownstream == NULL) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+
+ memcpy(p->pDownstream, pDownstream, num * POINTER_BYTES);
+ p->numOfDownstream = num;
+ return TSDB_CODE_SUCCESS;
+}
+
+void setOperatorCompleted(SOperatorInfo* pOperator) {
+ pOperator->status = OP_EXEC_DONE;
+ pOperator->cost.totalCost = (taosGetTimestampUs() - pOperator->pTaskInfo->cost.start) / 1000.0;
+ setTaskStatus(pOperator->pTaskInfo, TASK_COMPLETED);
+}
+
+void setOperatorInfo(SOperatorInfo* pOperator, const char* name, int32_t type, bool blocking, int32_t status,
+ void* pInfo, SExecTaskInfo* pTaskInfo) {
+ pOperator->name = (char*)name;
+ pOperator->operatorType = type;
+ pOperator->blocking = blocking;
+ pOperator->status = status;
+ pOperator->info = pInfo;
+ pOperator->pTaskInfo = pTaskInfo;
+}
+
+// each operator should be set their own function to return total cost buffer
+int32_t optrDefaultBufFn(SOperatorInfo* pOperator) {
+ if (pOperator->blocking) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+static int64_t getQuerySupportBufSize(size_t numOfTables) {
+ size_t s1 = sizeof(STableQueryInfo);
+ // size_t s3 = sizeof(STableCheckInfo); buffer consumption in tsdb
+ return (int64_t)(s1 * 1.5 * numOfTables);
+}
+
+int32_t checkForQueryBuf(size_t numOfTables) {
+ int64_t t = getQuerySupportBufSize(numOfTables);
+ if (tsQueryBufferSizeBytes < 0) {
+ return TSDB_CODE_SUCCESS;
+ } else if (tsQueryBufferSizeBytes > 0) {
+ while (1) {
+ int64_t s = tsQueryBufferSizeBytes;
+ int64_t remain = s - t;
+ if (remain >= 0) {
+ if (atomic_val_compare_exchange_64(&tsQueryBufferSizeBytes, s, remain) == s) {
+ return TSDB_CODE_SUCCESS;
+ }
+ } else {
+ return TSDB_CODE_QRY_NOT_ENOUGH_BUFFER;
+ }
+ }
+ }
+
+ // disable query processing if the value of tsQueryBufferSize is zero.
+ return TSDB_CODE_QRY_NOT_ENOUGH_BUFFER;
+}
+
+void releaseQueryBuf(size_t numOfTables) {
+ if (tsQueryBufferSizeBytes < 0) {
+ return;
+ }
+
+ int64_t t = getQuerySupportBufSize(numOfTables);
+
+ // restore value is not enough buffer available
+ atomic_add_fetch_64(&tsQueryBufferSizeBytes, t);
+}
+
+typedef enum {
+ OPTR_FN_RET_CONTINUE = 0x1,
+ OPTR_FN_RET_ABORT = 0x2,
+} ERetType;
+
+typedef struct STraverParam {
+ void* pRet;
+ int32_t code;
+ void* pParam;
+} STraverParam;
+
+// iterate the operator tree helper
+typedef ERetType (*optr_fn_t)(SOperatorInfo *pOperator, STraverParam *pParam, const char* pIdstr);
+
+void traverseOperatorTree(SOperatorInfo* pOperator, optr_fn_t fn, STraverParam* pParam, const char* id) {
+ if (pOperator == NULL) {
+ return;
+ }
+
+ ERetType ret = fn(pOperator, pParam, id);
+ if (ret == OPTR_FN_RET_ABORT || pParam->code != TSDB_CODE_SUCCESS) {
+ return;
+ }
+
+ for (int32_t i = 0; i < pOperator->numOfDownstream; ++i) {
+ traverseOperatorTree(pOperator->pDownstream[i], fn, pParam, id);
+ if (pParam->code != 0) {
+ break;
+ }
+ }
+}
+
+ERetType extractOperatorInfo(SOperatorInfo* pOperator, STraverParam* pParam, const char* pIdStr) {
+ STraverParam* p = pParam;
+ if (pOperator->operatorType == *(int32_t*)p->pParam) {
+ p->pRet = pOperator;
+ return OPTR_FN_RET_ABORT;
+ } else {
+ return OPTR_FN_RET_CONTINUE;
+ }
+}
+
+// QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN
+SOperatorInfo* extractOperatorInTree(SOperatorInfo* pOperator, int32_t type, const char* id) {
+ if (pOperator == NULL) {
+ qError("invalid operator, failed to find tableScanOperator %s", id);
+ terrno = TSDB_CODE_PAR_INTERNAL_ERROR;
+ return NULL;
+ }
+
+ STraverParam p = {.pParam = &type, .pRet = NULL};
+ traverseOperatorTree(pOperator, extractOperatorInfo, &p, id);
+ if (p.code != 0) {
+ terrno = p.code;
+ return NULL;
+ } else {
+ return p.pRet;
+ }
+}
+
+typedef struct SExtScanInfo {
+ int32_t order;
+ int32_t scanFlag;
+ int32_t inheritUsOrder;
+} SExtScanInfo;
+
+static ERetType extractScanInfo(SOperatorInfo* pOperator, STraverParam* pParam, const char* pIdStr) {
+ int32_t type = pOperator->operatorType;
+ SExtScanInfo* pInfo = pParam->pParam;
+
+ if (type == QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN || type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN ||
+ type == QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN || type == QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN ||
+ type == QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN || type == QUERY_NODE_PHYSICAL_PLAN_TABLE_COUNT_SCAN) {
+ pInfo->order = TSDB_ORDER_ASC;
+ pInfo->scanFlag= MAIN_SCAN;
+ return OPTR_FN_RET_ABORT;
+ } else if (type == QUERY_NODE_PHYSICAL_PLAN_EXCHANGE) {
+ if (!pInfo->inheritUsOrder) {
+ pInfo->order = TSDB_ORDER_ASC;
+ }
+ pInfo->scanFlag= MAIN_SCAN;
+ return OPTR_FN_RET_ABORT;
+ } else if (type == QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN) {
+ STableScanInfo* pTableScanInfo = pOperator->info;
+ pInfo->order = pTableScanInfo->base.cond.order;
+ pInfo->scanFlag= pTableScanInfo->base.scanFlag;
+ return OPTR_FN_RET_ABORT;
+ } else if (type == QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN) {
+ STableMergeScanInfo* pTableScanInfo = pOperator->info;
+ pInfo->order = pTableScanInfo->base.cond.order;
+ pInfo->scanFlag= pTableScanInfo->base.scanFlag;
+ return OPTR_FN_RET_ABORT;
+ } else {
+ return OPTR_FN_RET_CONTINUE;
+ }
+}
+
+int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scanFlag, bool inheritUsOrder) {
+ SExtScanInfo info = {.inheritUsOrder = inheritUsOrder, .order = *order};
+ STraverParam p = {.pParam = &info};
+
+ traverseOperatorTree(pOperator, extractScanInfo, &p, NULL);
+ *order = info.order;
+ *scanFlag = info.scanFlag;
+
+ ASSERT(*order == TSDB_ORDER_ASC || *order == TSDB_ORDER_DESC);
+ return p.code;
+}
+
+static ERetType doStopDataReader(SOperatorInfo* pOperator, STraverParam* pParam, const char* pIdStr) {
+ if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN) {
+ STableScanInfo* pInfo = pOperator->info;
+
+ if (pInfo->base.dataReader != NULL) {
+ tsdbReaderSetCloseFlag(pInfo->base.dataReader);
+ }
+ return OPTR_FN_RET_ABORT;
+ } else if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
+ SStreamScanInfo* pInfo = pOperator->info;
+
+ if (pInfo->pTableScanOp != NULL) {
+ STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info;
+ if (pTableScanInfo != NULL && pTableScanInfo->base.dataReader != NULL) {
+ tsdbReaderSetCloseFlag(pTableScanInfo->base.dataReader);
+ }
+ }
+
+ return OPTR_FN_RET_ABORT;
+ }
+
+ return OPTR_FN_RET_CONTINUE;
+}
+
+int32_t stopTableScanOperator(SOperatorInfo* pOperator, const char* pIdStr) {
+ STraverParam p = {0};
+ traverseOperatorTree(pOperator, doStopDataReader, &p, pIdStr);
+ return p.code;
+}
+
+SOperatorInfo* createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SReadHandle* pHandle, SNode* pTagCond,
+ SNode* pTagIndexCond, const char* pUser, const char* dbname) {
+ int32_t type = nodeType(pPhyNode);
+ const char* idstr = GET_TASKID(pTaskInfo);
+
+ if (pPhyNode->pChildren == NULL || LIST_LENGTH(pPhyNode->pChildren) == 0) {
+ SOperatorInfo* pOperator = NULL;
+ if (QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN == type) {
+ STableScanPhysiNode* pTableScanNode = (STableScanPhysiNode*)pPhyNode;
+
+ // NOTE: this is an patch to fix the physical plan
+ // TODO remove it later
+ if (pTableScanNode->scan.node.pLimit != NULL) {
+ pTableScanNode->groupSort = true;
+ }
+
+ STableListInfo* pTableListInfo = tableListCreate();
+ int32_t code =
+ createScanTableListInfo(&pTableScanNode->scan, pTableScanNode->pGroupTags, pTableScanNode->groupSort, pHandle,
+ pTableListInfo, pTagCond, pTagIndexCond, pTaskInfo);
+ if (code) {
+ pTaskInfo->code = code;
+ tableListDestroy(pTableListInfo);
+ qError("failed to createScanTableListInfo, code:%s, %s", tstrerror(code), idstr);
+ return NULL;
+ }
+
+ code = initQueriedTableSchemaInfo(pHandle, &pTableScanNode->scan, dbname, pTaskInfo);
+ if (code) {
+ pTaskInfo->code = code;
+ tableListDestroy(pTableListInfo);
+ return NULL;
+ }
+
+ pOperator = createTableScanOperatorInfo(pTableScanNode, pHandle, pTableListInfo, pTaskInfo);
+ if (NULL == pOperator) {
+ pTaskInfo->code = terrno;
+ tableListDestroy(pTableListInfo);
+ return NULL;
+ }
+
+ STableScanInfo* pScanInfo = pOperator->info;
+ pTaskInfo->cost.pRecoder = &pScanInfo->base.readRecorder;
+ } else if (QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN == type) {
+ STableMergeScanPhysiNode* pTableScanNode = (STableMergeScanPhysiNode*)pPhyNode;
+ STableListInfo* pTableListInfo = tableListCreate();
+
+ int32_t code = createScanTableListInfo(&pTableScanNode->scan, pTableScanNode->pGroupTags, true, pHandle,
+ pTableListInfo, pTagCond, pTagIndexCond, pTaskInfo);
+ if (code) {
+ pTaskInfo->code = code;
+ tableListDestroy(pTableListInfo);
+ qError("failed to createScanTableListInfo, code: %s", tstrerror(code));
+ return NULL;
+ }
+
+ code = initQueriedTableSchemaInfo(pHandle, &pTableScanNode->scan, dbname, pTaskInfo);
+ if (code) {
+ pTaskInfo->code = terrno;
+ tableListDestroy(pTableListInfo);
+ return NULL;
+ }
+
+ pOperator = createTableMergeScanOperatorInfo(pTableScanNode, pHandle, pTableListInfo, pTaskInfo);
+ if (NULL == pOperator) {
+ pTaskInfo->code = terrno;
+ tableListDestroy(pTableListInfo);
+ return NULL;
+ }
+
+ STableScanInfo* pScanInfo = pOperator->info;
+ pTaskInfo->cost.pRecoder = &pScanInfo->base.readRecorder;
+ } else if (QUERY_NODE_PHYSICAL_PLAN_EXCHANGE == type) {
+ pOperator = createExchangeOperatorInfo(pHandle ? pHandle->pMsgCb->clientRpc : NULL, (SExchangePhysiNode*)pPhyNode,
+ pTaskInfo);
+ } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN == type) {
+ STableScanPhysiNode* pTableScanNode = (STableScanPhysiNode*)pPhyNode;
+ STableListInfo* pTableListInfo = tableListCreate();
+
+ if (pHandle->vnode) {
+ int32_t code =
+ createScanTableListInfo(&pTableScanNode->scan, pTableScanNode->pGroupTags, pTableScanNode->groupSort,
+ pHandle, pTableListInfo, pTagCond, pTagIndexCond, pTaskInfo);
+ if (code) {
+ pTaskInfo->code = code;
+ tableListDestroy(pTableListInfo);
+ qError("failed to createScanTableListInfo, code: %s", tstrerror(code));
+ return NULL;
+ }
+ }
+
+ pTaskInfo->schemaInfo.qsw = extractQueriedColumnSchema(&pTableScanNode->scan);
+ pOperator = createStreamScanOperatorInfo(pHandle, pTableScanNode, pTagCond, pTableListInfo, pTaskInfo);
+ } else if (QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN == type) {
+ SSystemTableScanPhysiNode* pSysScanPhyNode = (SSystemTableScanPhysiNode*)pPhyNode;
+ pOperator = createSysTableScanOperatorInfo(pHandle, pSysScanPhyNode, pUser, pTaskInfo);
+ } else if (QUERY_NODE_PHYSICAL_PLAN_TABLE_COUNT_SCAN == type) {
+ STableCountScanPhysiNode* pTblCountScanNode = (STableCountScanPhysiNode*)pPhyNode;
+ pOperator = createTableCountScanOperatorInfo(pHandle, pTblCountScanNode, pTaskInfo);
+ } else if (QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN == type) {
+ STagScanPhysiNode* pScanPhyNode = (STagScanPhysiNode*)pPhyNode;
+ STableListInfo* pTableListInfo = tableListCreate();
+ int32_t code = createScanTableListInfo(pScanPhyNode, NULL, false, pHandle, pTableListInfo, pTagCond,
+ pTagIndexCond, pTaskInfo);
+ if (code != TSDB_CODE_SUCCESS) {
+ pTaskInfo->code = code;
+ qError("failed to getTableList, code: %s", tstrerror(code));
+ return NULL;
+ }
+
+ pOperator = createTagScanOperatorInfo(pHandle, pScanPhyNode, pTableListInfo, pTaskInfo);
+ } else if (QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN == type) {
+ SBlockDistScanPhysiNode* pBlockNode = (SBlockDistScanPhysiNode*)pPhyNode;
+ STableListInfo* pTableListInfo = tableListCreate();
+
+ if (pBlockNode->tableType == TSDB_SUPER_TABLE) {
+ SArray* pList = taosArrayInit(4, sizeof(STableKeyInfo));
+ int32_t code = vnodeGetAllTableList(pHandle->vnode, pBlockNode->uid, pList);
+ if (code != TSDB_CODE_SUCCESS) {
+ pTaskInfo->code = terrno;
+ return NULL;
+ }
+
+ size_t num = taosArrayGetSize(pList);
+ for (int32_t i = 0; i < num; ++i) {
+ STableKeyInfo* p = taosArrayGet(pList, i);
+ tableListAddTableInfo(pTableListInfo, p->uid, 0);
+ }
+
+ taosArrayDestroy(pList);
+ } else { // Create group with only one table
+ tableListAddTableInfo(pTableListInfo, pBlockNode->uid, 0);
+ }
+
+ pOperator = createDataBlockInfoScanOperator(pHandle, pBlockNode, pTableListInfo, pTaskInfo);
+ } else if (QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN == type) {
+ SLastRowScanPhysiNode* pScanNode = (SLastRowScanPhysiNode*)pPhyNode;
+ STableListInfo* pTableListInfo = tableListCreate();
+
+ int32_t code = createScanTableListInfo(&pScanNode->scan, pScanNode->pGroupTags, true, pHandle, pTableListInfo,
+ pTagCond, pTagIndexCond, pTaskInfo);
+ if (code != TSDB_CODE_SUCCESS) {
+ pTaskInfo->code = code;
+ return NULL;
+ }
+
+ code = initQueriedTableSchemaInfo(pHandle, &pScanNode->scan, dbname, pTaskInfo);
+ if (code != TSDB_CODE_SUCCESS) {
+ pTaskInfo->code = code;
+ return NULL;
+ }
+
+ pOperator = createCacherowsScanOperator(pScanNode, pHandle, pTableListInfo, pTaskInfo);
+ } else if (QUERY_NODE_PHYSICAL_PLAN_PROJECT == type) {
+ pOperator = createProjectOperatorInfo(NULL, (SProjectPhysiNode*)pPhyNode, pTaskInfo);
+ } else {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return NULL;
+ }
+
+ if (pOperator != NULL) { // todo moved away
+ pOperator->resultDataBlockId = pPhyNode->pOutputDataBlockDesc->dataBlockId;
+ }
+
+ return pOperator;
+ }
+
+ size_t size = LIST_LENGTH(pPhyNode->pChildren);
+ SOperatorInfo** ops = taosMemoryCalloc(size, POINTER_BYTES);
+ if (ops == NULL) {
+ return NULL;
+ }
+
+ for (int32_t i = 0; i < size; ++i) {
+ SPhysiNode* pChildNode = (SPhysiNode*)nodesListGetNode(pPhyNode->pChildren, i);
+ ops[i] = createOperator(pChildNode, pTaskInfo, pHandle, pTagCond, pTagIndexCond, pUser, dbname);
+ if (ops[i] == NULL) {
+ taosMemoryFree(ops);
+ return NULL;
+ }
+ }
+
+ SOperatorInfo* pOptr = NULL;
+ if (QUERY_NODE_PHYSICAL_PLAN_PROJECT == type) {
+ pOptr = createProjectOperatorInfo(ops[0], (SProjectPhysiNode*)pPhyNode, pTaskInfo);
+ } else if (QUERY_NODE_PHYSICAL_PLAN_HASH_AGG == type) {
+ SAggPhysiNode* pAggNode = (SAggPhysiNode*)pPhyNode;
+ if (pAggNode->pGroupKeys != NULL) {
+ pOptr = createGroupOperatorInfo(ops[0], pAggNode, pTaskInfo);
+ } else {
+ pOptr = createAggregateOperatorInfo(ops[0], pAggNode, pTaskInfo);
+ }
+ } else if (QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL == type) {
+ SIntervalPhysiNode* pIntervalPhyNode = (SIntervalPhysiNode*)pPhyNode;
+ pOptr = createIntervalOperatorInfo(ops[0], pIntervalPhyNode, pTaskInfo);
+ } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL == type) {
+ pOptr = createStreamIntervalOperatorInfo(ops[0], pPhyNode, pTaskInfo);
+ } else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL == type) {
+ SMergeAlignedIntervalPhysiNode* pIntervalPhyNode = (SMergeAlignedIntervalPhysiNode*)pPhyNode;
+ pOptr = createMergeAlignedIntervalOperatorInfo(ops[0], pIntervalPhyNode, pTaskInfo);
+ } else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL == type) {
+ SMergeIntervalPhysiNode* pIntervalPhyNode = (SMergeIntervalPhysiNode*)pPhyNode;
+ pOptr = createMergeIntervalOperatorInfo(ops[0], pIntervalPhyNode, pTaskInfo);
+ } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL == type) {
+ int32_t children = 0;
+ pOptr = createStreamFinalIntervalOperatorInfo(ops[0], pPhyNode, pTaskInfo, children);
+ } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL == type) {
+ int32_t children = pHandle->numOfVgroups;
+ pOptr = createStreamFinalIntervalOperatorInfo(ops[0], pPhyNode, pTaskInfo, children);
+ } else if (QUERY_NODE_PHYSICAL_PLAN_SORT == type) {
+ pOptr = createSortOperatorInfo(ops[0], (SSortPhysiNode*)pPhyNode, pTaskInfo);
+ } else if (QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT == type) {
+ pOptr = createGroupSortOperatorInfo(ops[0], (SGroupSortPhysiNode*)pPhyNode, pTaskInfo);
+ } else if (QUERY_NODE_PHYSICAL_PLAN_MERGE == type) {
+ SMergePhysiNode* pMergePhyNode = (SMergePhysiNode*)pPhyNode;
+ pOptr = createMultiwayMergeOperatorInfo(ops, size, pMergePhyNode, pTaskInfo);
+ } else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION == type) {
+ SSessionWinodwPhysiNode* pSessionNode = (SSessionWinodwPhysiNode*)pPhyNode;
+ pOptr = createSessionAggOperatorInfo(ops[0], pSessionNode, pTaskInfo);
+ } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION == type) {
+ pOptr = createStreamSessionAggOperatorInfo(ops[0], pPhyNode, pTaskInfo);
+ } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION == type) {
+ int32_t children = 0;
+ pOptr = createStreamFinalSessionAggOperatorInfo(ops[0], pPhyNode, pTaskInfo, children);
+ } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION == type) {
+ int32_t children = pHandle->numOfVgroups;
+ pOptr = createStreamFinalSessionAggOperatorInfo(ops[0], pPhyNode, pTaskInfo, children);
+ } else if (QUERY_NODE_PHYSICAL_PLAN_PARTITION == type) {
+ pOptr = createPartitionOperatorInfo(ops[0], (SPartitionPhysiNode*)pPhyNode, pTaskInfo);
+ } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION == type) {
+ pOptr = createStreamPartitionOperatorInfo(ops[0], (SStreamPartitionPhysiNode*)pPhyNode, pTaskInfo);
+ } else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE == type) {
+ SStateWinodwPhysiNode* pStateNode = (SStateWinodwPhysiNode*)pPhyNode;
+ pOptr = createStatewindowOperatorInfo(ops[0], pStateNode, pTaskInfo);
+ } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE == type) {
+ pOptr = createStreamStateAggOperatorInfo(ops[0], pPhyNode, pTaskInfo);
+ } else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN == type) {
+ pOptr = createMergeJoinOperatorInfo(ops, size, (SSortMergeJoinPhysiNode*)pPhyNode, pTaskInfo);
+ } else if (QUERY_NODE_PHYSICAL_PLAN_FILL == type) {
+ pOptr = createFillOperatorInfo(ops[0], (SFillPhysiNode*)pPhyNode, pTaskInfo);
+ } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL == type) {
+ pOptr = createStreamFillOperatorInfo(ops[0], (SStreamFillPhysiNode*)pPhyNode, pTaskInfo);
+ } else if (QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC == type) {
+ pOptr = createIndefinitOutputOperatorInfo(ops[0], pPhyNode, pTaskInfo);
+ } else if (QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC == type) {
+ pOptr = createTimeSliceOperatorInfo(ops[0], pPhyNode, pTaskInfo);
+ } else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_EVENT == type) {
+ pOptr = createEventwindowOperatorInfo(ops[0], pPhyNode, pTaskInfo);
+ } else {
+ terrno = TSDB_CODE_INVALID_PARA;
+ taosMemoryFree(ops);
+ return NULL;
+ }
+
+ taosMemoryFree(ops);
+ if (pOptr) {
+ pOptr->resultDataBlockId = pPhyNode->pOutputDataBlockDesc->dataBlockId;
+ }
+
+ return pOptr;
+}
+
+void destroyOperator(SOperatorInfo* pOperator) {
+ if (pOperator == NULL) {
+ return;
+ }
+
+ if (pOperator->fpSet.closeFn != NULL) {
+ pOperator->fpSet.closeFn(pOperator->info);
+ }
+
+ if (pOperator->pDownstream != NULL) {
+ for (int32_t i = 0; i < pOperator->numOfDownstream; ++i) {
+ destroyOperator(pOperator->pDownstream[i]);
+ }
+
+ taosMemoryFreeClear(pOperator->pDownstream);
+ pOperator->numOfDownstream = 0;
+ }
+
+ cleanupExprSupp(&pOperator->exprSupp);
+ taosMemoryFreeClear(pOperator);
+}
+
+int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SArray* pExecInfoList) {
+ SExplainExecInfo execInfo = {0};
+ SExplainExecInfo* pExplainInfo = taosArrayPush(pExecInfoList, &execInfo);
+
+ pExplainInfo->numOfRows = operatorInfo->resultInfo.totalRows;
+ pExplainInfo->startupCost = operatorInfo->cost.openCost;
+ pExplainInfo->totalCost = operatorInfo->cost.totalCost;
+ pExplainInfo->verboseLen = 0;
+ pExplainInfo->verboseInfo = NULL;
+
+ if (operatorInfo->fpSet.getExplainFn) {
+ int32_t code =
+ operatorInfo->fpSet.getExplainFn(operatorInfo, &pExplainInfo->verboseInfo, &pExplainInfo->verboseLen);
+ if (code) {
+ qError("%s operator getExplainFn failed, code:%s", GET_TASKID(operatorInfo->pTaskInfo), tstrerror(code));
+ return code;
+ }
+ }
+
+ int32_t code = 0;
+ for (int32_t i = 0; i < operatorInfo->numOfDownstream; ++i) {
+ code = getOperatorExplainExecInfo(operatorInfo->pDownstream[i], pExecInfoList);
+ if (code != TSDB_CODE_SUCCESS) {
+ // taosMemoryFreeClear(*pRes);
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
diff --git a/source/libs/executor/src/projectoperator.c b/source/libs/executor/src/projectoperator.c
index 86c49e0fc8..02f504bef0 100644
--- a/source/libs/executor/src/projectoperator.c
+++ b/source/libs/executor/src/projectoperator.c
@@ -13,9 +13,11 @@
* along with this program. If not, see .
*/
-#include "executorimpl.h"
+#include "executorInt.h"
#include "filter.h"
#include "functionMgt.h"
+#include "operator.h"
+#include "querytask.h"
typedef struct SProjectOperatorInfo {
SOptrBasicInfo binfo;
diff --git a/source/libs/executor/src/querytask.c b/source/libs/executor/src/querytask.c
new file mode 100644
index 0000000000..a4d8327b6a
--- /dev/null
+++ b/source/libs/executor/src/querytask.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "filter.h"
+#include "function.h"
+#include "functionMgt.h"
+#include "os.h"
+#include "querynodes.h"
+#include "tfill.h"
+#include "tname.h"
+
+#include "tdatablock.h"
+#include "tmsg.h"
+
+#include "executorInt.h"
+#include "index.h"
+#include "operator.h"
+#include "query.h"
+#include "querytask.h"
+#include "thash.h"
+#include "ttypes.h"
+#include "vnode.h"
+
+#define CLEAR_QUERY_STATUS(q, st) ((q)->status &= (~(st)))
+
+SExecTaskInfo* doCreateTask(uint64_t queryId, uint64_t taskId, int32_t vgId, EOPTR_EXEC_MODEL model) {
+ SExecTaskInfo* pTaskInfo = taosMemoryCalloc(1, sizeof(SExecTaskInfo));
+ if (pTaskInfo == NULL) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return NULL;
+ }
+
+ setTaskStatus(pTaskInfo, TASK_NOT_COMPLETED);
+ pTaskInfo->cost.created = taosGetTimestampUs();
+
+ pTaskInfo->execModel = model;
+ pTaskInfo->stopInfo.pStopInfo = taosArrayInit(4, sizeof(SExchangeOpStopInfo));
+ pTaskInfo->pResultBlockList = taosArrayInit(128, POINTER_BYTES);
+
+ taosInitRWLatch(&pTaskInfo->lock);
+
+ pTaskInfo->id.vgId = vgId;
+ pTaskInfo->id.queryId = queryId;
+ pTaskInfo->id.str = taosMemoryMalloc(64);
+ buildTaskId(taskId, queryId, pTaskInfo->id.str);
+
+ return pTaskInfo;
+}
+
+bool isTaskKilled(SExecTaskInfo* pTaskInfo) { return (0 != pTaskInfo->code); }
+
+void setTaskKilled(SExecTaskInfo* pTaskInfo, int32_t rspCode) {
+ pTaskInfo->code = rspCode;
+ stopTableScanOperator(pTaskInfo->pRoot, pTaskInfo->id.str);
+}
+
+void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status) {
+ if (status == TASK_NOT_COMPLETED) {
+ pTaskInfo->status = status;
+ } else {
+ // QUERY_NOT_COMPLETED is not compatible with any other status, so clear its position first
+ CLEAR_QUERY_STATUS(pTaskInfo, TASK_NOT_COMPLETED);
+ pTaskInfo->status |= status;
+ }
+}
+
+int32_t createExecTaskInfo(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId,
+ int32_t vgId, char* sql, EOPTR_EXEC_MODEL model) {
+ *pTaskInfo = doCreateTask(pPlan->id.queryId, taskId, vgId, model);
+ if (*pTaskInfo == NULL) {
+ taosMemoryFree(sql);
+ return terrno;
+ }
+
+ if (pHandle) {
+ if (pHandle->pStateBackend) {
+ (*pTaskInfo)->streamInfo.pState = pHandle->pStateBackend;
+ }
+ }
+
+ TSWAP((*pTaskInfo)->sql, sql);
+
+ (*pTaskInfo)->pSubplan = pPlan;
+ (*pTaskInfo)->pRoot = createOperator(pPlan->pNode, *pTaskInfo, pHandle, pPlan->pTagCond, pPlan->pTagIndexCond,
+ pPlan->user, pPlan->dbFName);
+
+ if (NULL == (*pTaskInfo)->pRoot) {
+ int32_t code = (*pTaskInfo)->code;
+ doDestroyTask(*pTaskInfo);
+ (*pTaskInfo) = NULL;
+ return code;
+ } else {
+ return TSDB_CODE_SUCCESS;
+ }
+}
+
+void cleanupQueriedTableScanInfo(SSchemaInfo* pSchemaInfo) {
+ taosMemoryFreeClear(pSchemaInfo->dbname);
+ taosMemoryFreeClear(pSchemaInfo->tablename);
+ tDeleteSSchemaWrapper(pSchemaInfo->sw);
+ tDeleteSSchemaWrapper(pSchemaInfo->qsw);
+}
+
+int32_t initQueriedTableSchemaInfo(SReadHandle* pHandle, SScanPhysiNode* pScanNode, const char* dbName, SExecTaskInfo* pTaskInfo) {
+ SMetaReader mr = {0};
+ if (pHandle == NULL) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ return terrno;
+ }
+
+ metaReaderInit(&mr, pHandle->meta, 0);
+ int32_t code = metaGetTableEntryByUidCache(&mr, pScanNode->uid);
+ if (code != TSDB_CODE_SUCCESS) {
+ qError("failed to get the table meta, uid:0x%" PRIx64 ", suid:0x%" PRIx64 ", %s", pScanNode->uid, pScanNode->suid,
+ GET_TASKID(pTaskInfo));
+
+ metaReaderClear(&mr);
+ return terrno;
+ }
+
+ SSchemaInfo* pSchemaInfo = &pTaskInfo->schemaInfo;
+
+ pSchemaInfo->tablename = taosStrdup(mr.me.name);
+ pSchemaInfo->dbname = taosStrdup(dbName);
+
+ if (mr.me.type == TSDB_SUPER_TABLE) {
+ pSchemaInfo->sw = tCloneSSchemaWrapper(&mr.me.stbEntry.schemaRow);
+ pSchemaInfo->tversion = mr.me.stbEntry.schemaTag.version;
+ } else if (mr.me.type == TSDB_CHILD_TABLE) {
+ tDecoderClear(&mr.coder);
+
+ tb_uid_t suid = mr.me.ctbEntry.suid;
+ code = metaGetTableEntryByUidCache(&mr, suid);
+ if (code != TSDB_CODE_SUCCESS) {
+ metaReaderClear(&mr);
+ return terrno;
+ }
+
+ pSchemaInfo->sw = tCloneSSchemaWrapper(&mr.me.stbEntry.schemaRow);
+ pSchemaInfo->tversion = mr.me.stbEntry.schemaTag.version;
+ } else {
+ pSchemaInfo->sw = tCloneSSchemaWrapper(&mr.me.ntbEntry.schemaRow);
+ }
+
+ metaReaderClear(&mr);
+
+ pSchemaInfo->qsw = extractQueriedColumnSchema(pScanNode);
+ return TSDB_CODE_SUCCESS;
+}
+
+SSchemaWrapper* extractQueriedColumnSchema(SScanPhysiNode* pScanNode) {
+ int32_t numOfCols = LIST_LENGTH(pScanNode->pScanCols);
+ int32_t numOfTags = LIST_LENGTH(pScanNode->pScanPseudoCols);
+
+ SSchemaWrapper* pqSw = taosMemoryCalloc(1, sizeof(SSchemaWrapper));
+ pqSw->pSchema = taosMemoryCalloc(numOfCols + numOfTags, sizeof(SSchema));
+
+ for (int32_t i = 0; i < numOfCols; ++i) {
+ STargetNode* pNode = (STargetNode*)nodesListGetNode(pScanNode->pScanCols, i);
+ SColumnNode* pColNode = (SColumnNode*)pNode->pExpr;
+
+ SSchema* pSchema = &pqSw->pSchema[pqSw->nCols++];
+ pSchema->colId = pColNode->colId;
+ pSchema->type = pColNode->node.resType.type;
+ pSchema->bytes = pColNode->node.resType.bytes;
+ tstrncpy(pSchema->name, pColNode->colName, tListLen(pSchema->name));
+ }
+
+ // this the tags and pseudo function columns, we only keep the tag columns
+ for (int32_t i = 0; i < numOfTags; ++i) {
+ STargetNode* pNode = (STargetNode*)nodesListGetNode(pScanNode->pScanPseudoCols, i);
+
+ int32_t type = nodeType(pNode->pExpr);
+ if (type == QUERY_NODE_COLUMN) {
+ SColumnNode* pColNode = (SColumnNode*)pNode->pExpr;
+
+ SSchema* pSchema = &pqSw->pSchema[pqSw->nCols++];
+ pSchema->colId = pColNode->colId;
+ pSchema->type = pColNode->node.resType.type;
+ pSchema->bytes = pColNode->node.resType.bytes;
+ tstrncpy(pSchema->name, pColNode->colName, tListLen(pSchema->name));
+ }
+ }
+
+ return pqSw;
+}
+
+static void cleanupStreamInfo(SStreamTaskInfo* pStreamInfo) { tDeleteSSchemaWrapper(pStreamInfo->schema); }
+
+static void freeBlock(void* pParam) {
+ SSDataBlock* pBlock = *(SSDataBlock**)pParam;
+ blockDataDestroy(pBlock);
+}
+
+void doDestroyTask(SExecTaskInfo* pTaskInfo) {
+ qDebug("%s execTask is freed", GET_TASKID(pTaskInfo));
+ destroyOperator(pTaskInfo->pRoot);
+ pTaskInfo->pRoot = NULL;
+
+ cleanupQueriedTableScanInfo(&pTaskInfo->schemaInfo);
+ cleanupStreamInfo(&pTaskInfo->streamInfo);
+
+ if (!pTaskInfo->localFetch.localExec) {
+ nodesDestroyNode((SNode*)pTaskInfo->pSubplan);
+ pTaskInfo->pSubplan = NULL;
+ }
+
+ taosArrayDestroyEx(pTaskInfo->pResultBlockList, freeBlock);
+ taosArrayDestroy(pTaskInfo->stopInfo.pStopInfo);
+ taosMemoryFreeClear(pTaskInfo->sql);
+ taosMemoryFreeClear(pTaskInfo->id.str);
+ taosMemoryFreeClear(pTaskInfo);
+}
+
+void buildTaskId(uint64_t taskId, uint64_t queryId, char* dst) {
+ char* p = dst;
+
+ int32_t offset = 6;
+ memcpy(p, "TID:0x", offset);
+ offset += tintToHex(taskId, &p[offset]);
+
+ memcpy(&p[offset], " QID:0x", 7);
+ offset += 7;
+ offset += tintToHex(queryId, &p[offset]);
+
+ p[offset] = 0;
+}
diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c
index 2389c7252e..130cca9cbb 100644
--- a/source/libs/executor/src/scanoperator.c
+++ b/source/libs/executor/src/scanoperator.c
@@ -13,7 +13,7 @@
* along with this program. If not, see .
*/
-#include "executorimpl.h"
+#include "executorInt.h"
#include "filter.h"
#include "function.h"
#include "functionMgt.h"
@@ -30,6 +30,8 @@
#include "tcompare.h"
#include "thash.h"
#include "ttypes.h"
+#include "operator.h"
+#include "querytask.h"
int32_t scanDebug = 0;
@@ -156,7 +158,7 @@ static bool overlapWithTimeWindow(SInterval* pInterval, SDataBlockInfo* pBlockIn
break;
}
- assert(w.skey < pBlockInfo->window.skey);
+ ASSERT(w.skey < pBlockInfo->window.skey);
if (pBlockInfo->window.skey <= TMIN(w.ekey, pBlockInfo->window.ekey)) {
return true;
}
@@ -1646,10 +1648,10 @@ static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) {
blockDataCleanup(pInfo->pRes);
SDataBlockInfo* pBlockInfo = &pInfo->pRes->info;
- while (tqNextDataBlock(pInfo->tqReader)) {
+ while (tqNextBlockImpl(pInfo->tqReader)) {
SSDataBlock block = {0};
- int32_t code = tqRetrieveDataBlock2(&block, pInfo->tqReader, NULL);
+ int32_t code = tqRetrieveDataBlock(&block, pInfo->tqReader, NULL);
if (code != TSDB_CODE_SUCCESS || block.info.rows == 0) {
continue;
}
@@ -1687,23 +1689,23 @@ static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) {
if (pTaskInfo->streamInfo.currentOffset.type == TMQ_OFFSET__LOG) {
while (1) {
- SFetchRet ret = {0};
- tqNextBlock(pInfo->tqReader, &ret);
- tqOffsetResetToLog(
- &pTaskInfo->streamInfo.currentOffset,
- pInfo->tqReader->pWalReader->curVersion - 1); // curVersion move to next, so currentOffset = curVersion - 1
+ SSDataBlock block = {0};
+ int32_t type = tqNextBlock(pInfo->tqReader, &block);
- if (ret.fetchType == FETCH_TYPE__DATA) {
- qDebug("doQueueScan get data from log %" PRId64 " rows, version:%" PRId64, ret.data.info.rows,
+ // curVersion move to next, so currentOffset = curVersion - 1
+ tqOffsetResetToLog(&pTaskInfo->streamInfo.currentOffset, pInfo->tqReader->pWalReader->curVersion - 1);
+
+ if (type == FETCH_TYPE__DATA) {
+ qDebug("doQueueScan get data from log %" PRId64 " rows, version:%" PRId64, block.info.rows,
pTaskInfo->streamInfo.currentOffset.version);
blockDataCleanup(pInfo->pRes);
- setBlockIntoRes(pInfo, &ret.data, true);
+ setBlockIntoRes(pInfo, &block, true);
if (pInfo->pRes->info.rows > 0) {
qDebug("doQueueScan get data from log %" PRId64 " rows, return, version:%" PRId64, pInfo->pRes->info.rows,
pTaskInfo->streamInfo.currentOffset.version);
return pInfo->pRes;
}
- } else if (ret.fetchType == FETCH_TYPE__NONE) {
+ } else if (type == FETCH_TYPE__NONE) {
qDebug("doQueueScan get none from log, return, version:%" PRId64, pTaskInfo->streamInfo.currentOffset.version);
return NULL;
}
@@ -1855,7 +1857,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
SSDataBlock* pSDB = doRangeScan(pInfo, pInfo->pUpdateRes, pInfo->primaryTsIndex, &pInfo->updateResIndex);
if (pSDB) {
STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info;
- uint64_t version = getReaderMaxVersion(pTableScanInfo->base.dataReader);
+ uint64_t version = tsdbGetReaderMaxVersion(pTableScanInfo->base.dataReader);
updateInfoSetScanRange(pInfo->pUpdateInfo, &pTableScanInfo->base.cond.twindows, pInfo->groupId, version);
pSDB->info.type = pInfo->scanMode == STREAM_SCAN_FROM_DATAREADER_RANGE ? STREAM_NORMAL : STREAM_PULL_DATA;
checkUpdateData(pInfo, true, pSDB, false);
@@ -2021,7 +2023,7 @@ FETCH_NEXT_BLOCK:
SSDataBlock* pSDB = doRangeScan(pInfo, pInfo->pUpdateRes, pInfo->primaryTsIndex, &pInfo->updateResIndex);
if (pSDB) {
STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info;
- uint64_t version = getReaderMaxVersion(pTableScanInfo->base.dataReader);
+ uint64_t version = tsdbGetReaderMaxVersion(pTableScanInfo->base.dataReader);
updateInfoSetScanRange(pInfo->pUpdateInfo, &pTableScanInfo->base.cond.twindows, pInfo->groupId, version);
pSDB->info.type = pInfo->scanMode == STREAM_SCAN_FROM_DATAREADER_RANGE ? STREAM_NORMAL : STREAM_PULL_DATA;
checkUpdateData(pInfo, true, pSDB, false);
@@ -2072,11 +2074,10 @@ FETCH_NEXT_BLOCK:
blockDataCleanup(pInfo->pRes);
- while (tqNextDataBlock(pInfo->tqReader)) {
+ while (tqNextBlockImpl(pInfo->tqReader)) {
SSDataBlock block = {0};
- int32_t code = tqRetrieveDataBlock2(&block, pInfo->tqReader, NULL);
-
+ int32_t code = tqRetrieveDataBlock(&block, pInfo->tqReader, NULL);
if (code != TSDB_CODE_SUCCESS || block.info.rows == 0) {
continue;
}
@@ -2305,7 +2306,7 @@ static void destroyStreamScanOperatorInfo(void* param) {
SStreamScanInfo* pStreamScan = (SStreamScanInfo*)param;
if (pStreamScan->pTableScanOp && pStreamScan->pTableScanOp->info) {
- destroyOperatorInfo(pStreamScan->pTableScanOp);
+ destroyOperator(pStreamScan->pTableScanOp);
}
if (pStreamScan->tqReader) {
@@ -2427,7 +2428,7 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
if (pHandle->initTqReader) {
ASSERT(pHandle->tqReader == NULL);
- pInfo->tqReader = tqOpenReader(pHandle->vnode);
+ pInfo->tqReader = tqReaderOpen(pHandle->vnode);
ASSERT(pInfo->tqReader);
} else {
ASSERT(pHandle->tqReader);
diff --git a/source/libs/executor/src/sortoperator.c b/source/libs/executor/src/sortoperator.c
index cb0f1aa068..10933f285c 100644
--- a/source/libs/executor/src/sortoperator.c
+++ b/source/libs/executor/src/sortoperator.c
@@ -13,8 +13,10 @@
* along with this program. If not, see .
*/
+#include "executorInt.h"
#include "filter.h"
-#include "executorimpl.h"
+#include "operator.h"
+#include "querytask.h"
#include "tdatablock.h"
typedef struct SSortOperatorInfo {
diff --git a/source/libs/executor/src/sysscanoperator.c b/source/libs/executor/src/sysscanoperator.c
index c78e6002cd..c75c49fe77 100644
--- a/source/libs/executor/src/sysscanoperator.c
+++ b/source/libs/executor/src/sysscanoperator.c
@@ -13,7 +13,7 @@
* along with this program. If not, see .
*/
-#include "executorimpl.h"
+#include "executorInt.h"
#include "filter.h"
#include "function.h"
#include "functionMgt.h"
@@ -31,6 +31,9 @@
#include "thash.h"
#include "ttypes.h"
#include "vnode.h"
+#include "operator.h"
+#include "querytask.h"
+
typedef int (*__optSysFilter)(void* a, void* b, int16_t dtype);
typedef int32_t (*__sys_filte)(void* pMeta, SNode* cond, SArray* result);
diff --git a/source/libs/executor/src/tfill.c b/source/libs/executor/src/tfill.c
index e59ea253cc..fc4e82b57f 100644
--- a/source/libs/executor/src/tfill.c
+++ b/source/libs/executor/src/tfill.c
@@ -20,7 +20,7 @@
#include "tmsg.h"
#include "ttypes.h"
-#include "executorimpl.h"
+#include "executorInt.h"
#include "tcommon.h"
#include "thash.h"
#include "ttime.h"
@@ -408,7 +408,7 @@ static int64_t appendFilledResult(SFillInfo* pFillInfo, SSDataBlock* pBlock, int
pFillInfo->numOfTotal += pFillInfo->numOfCurrent;
- assert(pFillInfo->numOfCurrent == resultCapacity);
+ ASSERT(pFillInfo->numOfCurrent == resultCapacity);
return resultCapacity;
}
@@ -558,7 +558,7 @@ int64_t getNumOfResultsAfterFillGap(SFillInfo* pFillInfo, TSKEY ekey, int32_t ma
numOfRes = taosTimeCountInterval(lastKey, pFillInfo->currentKey, pFillInfo->interval.sliding,
pFillInfo->interval.slidingUnit, pFillInfo->interval.precision);
numOfRes += 1;
- assert(numOfRes >= numOfRows);
+ ASSERT(numOfRes >= numOfRows);
} else { // reach the end of data
if ((ekey1 < pFillInfo->currentKey && FILL_IS_ASC_FILL(pFillInfo)) ||
(ekey1 >= pFillInfo->currentKey && !FILL_IS_ASC_FILL(pFillInfo))) {
@@ -593,14 +593,14 @@ int64_t taosFillResultDataBlock(SFillInfo* pFillInfo, SSDataBlock* p, int32_t ca
int32_t remain = taosNumOfRemainRows(pFillInfo);
int64_t numOfRes = getNumOfResultsAfterFillGap(pFillInfo, pFillInfo->end, capacity);
- assert(numOfRes <= capacity);
+ ASSERT(numOfRes <= capacity);
// no data existed for fill operation now, append result according to the fill strategy
if (remain == 0) {
appendFilledResult(pFillInfo, p, numOfRes);
} else {
fillResultImpl(pFillInfo, p, (int32_t)numOfRes);
- assert(numOfRes == pFillInfo->numOfCurrent);
+ ASSERT(numOfRes == pFillInfo->numOfCurrent);
}
qDebug("fill:%p, generated fill result, src block:%d, index:%d, brange:%" PRId64 "-%" PRId64 ", currentKey:%" PRId64
diff --git a/source/libs/executor/src/timesliceoperator.c b/source/libs/executor/src/timesliceoperator.c
index f0e25d8cc5..29e3668ec4 100644
--- a/source/libs/executor/src/timesliceoperator.c
+++ b/source/libs/executor/src/timesliceoperator.c
@@ -12,10 +12,12 @@
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see .
*/
-#include "executorimpl.h"
+#include "executorInt.h"
#include "filter.h"
#include "function.h"
#include "functionMgt.h"
+#include "operator.h"
+#include "querytask.h"
#include "tcommon.h"
#include "tcompare.h"
#include "tdatablock.h"
diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c
index be2bd0e6e2..bea01fa0d8 100644
--- a/source/libs/executor/src/timewindowoperator.c
+++ b/source/libs/executor/src/timewindowoperator.c
@@ -12,10 +12,12 @@
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see .
*/
-#include "executorimpl.h"
+#include "executorInt.h"
#include "filter.h"
#include "function.h"
#include "functionMgt.h"
+#include "operator.h"
+#include "querytask.h"
#include "tcommon.h"
#include "tcompare.h"
#include "tdatablock.h"
@@ -25,6 +27,12 @@
#define IS_FINAL_OP(op) ((op)->isFinal)
#define DEAULT_DELETE_MARK (1000LL * 60LL * 60LL * 24LL * 365LL * 10LL);
+typedef struct SStateWindowInfo {
+ SResultWindowInfo winInfo;
+ SStateKeys* pStateKey;
+} SStateWindowInfo;
+
+
typedef struct SSessionAggOperatorInfo {
SOptrBasicInfo binfo;
SAggSupporter aggSup;
@@ -153,7 +161,7 @@ FORCE_INLINE int32_t getForwardStepsInBlock(int32_t numOfRows, __block_search_fn
// }
}
- assert(forwardRows >= 0);
+ ASSERT(forwardRows >= 0);
return forwardRows;
}
@@ -165,8 +173,6 @@ int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order) {
return -1;
}
- assert(order == TSDB_ORDER_ASC || order == TSDB_ORDER_DESC);
-
TSKEY* keyList = (TSKEY*)pValue;
int32_t firstPos = 0;
int32_t lastPos = num - 1;
@@ -230,7 +236,7 @@ int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order) {
int32_t getNumOfRowsInTimeWindow(SDataBlockInfo* pDataBlockInfo, TSKEY* pPrimaryColumn, int32_t startPos, TSKEY ekey,
__block_search_fn_t searchFn, STableQueryInfo* item, int32_t order) {
- assert(startPos >= 0 && startPos < pDataBlockInfo->rows);
+ ASSERT(startPos >= 0 && startPos < pDataBlockInfo->rows);
int32_t num = -1;
int32_t step = GET_FORWARD_DIRECTION_FACTOR(order);
@@ -261,7 +267,6 @@ int32_t getNumOfRowsInTimeWindow(SDataBlockInfo* pDataBlockInfo, TSKEY* pPrimary
}
}
- assert(num >= 0);
return num;
}
@@ -433,7 +438,7 @@ static bool setTimeWindowInterpolationEndTs(SIntervalAggOperatorInfo* pInfo, SEx
}
int32_t nextRowIndex = endRowIndex + 1;
- assert(nextRowIndex >= 0);
+ ASSERT(nextRowIndex >= 0);
TSKEY nextKey = tsCols[nextRowIndex];
doTimeWindowInterpolation(pInfo->pPrevValues, pDataBlock, actualEndKey, endRowIndex, nextKey, nextRowIndex, key,
@@ -494,9 +499,9 @@ static int32_t getNextQualifiedWindow(SInterval* pInterval, STimeWindow* pNext,
*/
if (primaryKeys == NULL) {
if (ascQuery) {
- assert(pDataBlockInfo->window.skey <= pNext->ekey);
+ ASSERT(pDataBlockInfo->window.skey <= pNext->ekey);
} else {
- assert(pDataBlockInfo->window.ekey >= pNext->skey);
+ ASSERT(pDataBlockInfo->window.ekey >= pNext->skey);
}
} else {
if (ascQuery && primaryKeys[startPos] > pNext->ekey) {
@@ -533,7 +538,6 @@ static bool isResultRowInterpolated(SResultRow* pResult, SResultTsInterpType typ
}
static void setResultRowInterpo(SResultRow* pResult, SResultTsInterpType type) {
- assert(pResult != NULL && (type == RESULT_ROW_START_INTERP || type == RESULT_ROW_END_INTERP));
if (type == RESULT_ROW_START_INTERP) {
pResult->startInterp = true;
} else {
@@ -1610,7 +1614,7 @@ void destroyStreamFinalIntervalOperatorInfo(void* param) {
int32_t size = taosArrayGetSize(pInfo->pChildren);
for (int32_t i = 0; i < size; i++) {
SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, i);
- destroyOperatorInfo(pChildOp);
+ destroyOperator(pChildOp);
}
taosArrayDestroy(pInfo->pChildren);
}
@@ -2839,7 +2843,7 @@ void destroyStreamSessionAggOperatorInfo(void* param) {
int32_t size = taosArrayGetSize(pInfo->pChildren);
for (int32_t i = 0; i < size; i++) {
SOperatorInfo* pChild = taosArrayGetP(pInfo->pChildren, i);
- destroyOperatorInfo(pChild);
+ destroyOperator(pChild);
}
taosArrayDestroy(pInfo->pChildren);
}
@@ -3816,7 +3820,7 @@ void destroyStreamStateOperatorInfo(void* param) {
int32_t size = taosArrayGetSize(pInfo->pChildren);
for (int32_t i = 0; i < size; i++) {
SOperatorInfo* pChild = taosArrayGetP(pInfo->pChildren, i);
- destroyOperatorInfo(pChild);
+ destroyOperator(pChild);
}
taosArrayDestroy(pInfo->pChildren);
}
diff --git a/source/libs/executor/src/tsort.c b/source/libs/executor/src/tsort.c
index 6c8e581b3f..36e41b0c5d 100644
--- a/source/libs/executor/src/tsort.c
+++ b/source/libs/executor/src/tsort.c
@@ -155,7 +155,7 @@ void tsortDestroySortHandle(SSortHandle* pSortHandle) {
int64_t fetchUs = 0, fetchNum = 0;
tsortClearOrderdSource(pSortHandle->pOrderedSource, &fetchUs, &fetchNum);
- qError("all source fetch time: %" PRId64 "us num:%" PRId64 " %s", fetchUs, fetchNum, pSortHandle->idStr);
+ qDebug("all source fetch time: %" PRId64 "us num:%" PRId64 " %s", fetchUs, fetchNum, pSortHandle->idStr);
taosArrayDestroy(pSortHandle->pOrderedSource);
taosMemoryFreeClear(pSortHandle);
@@ -229,7 +229,7 @@ static int32_t doAddToBuf(SSDataBlock* pDataBlock, SSortHandle* pHandle) {
taosArrayPush(pPageIdList, &pageId);
int32_t size = blockDataGetSize(p) + sizeof(int32_t) + taosArrayGetSize(p->pDataBlock) * sizeof(int32_t);
- assert(size <= getBufPageSize(pHandle->pBuf));
+ ASSERT(size <= getBufPageSize(pHandle->pBuf));
blockDataToBuf(pPage, p);
@@ -316,7 +316,7 @@ static int32_t sortComparInit(SMsortComparParam* pParam, SArray* pSources, int32
}
int64_t et = taosGetTimestampUs();
- qError("init for merge sort completed, elapsed time:%.2f ms, %s", (et - st) / 1000.0, pHandle->idStr);
+ qDebug("init for merge sort completed, elapsed time:%.2f ms, %s", (et - st) / 1000.0, pHandle->idStr);
}
return code;
@@ -592,7 +592,7 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) {
int32_t size =
blockDataGetSize(pDataBlock) + sizeof(int32_t) + taosArrayGetSize(pDataBlock->pDataBlock) * sizeof(int32_t);
- assert(size <= getBufPageSize(pHandle->pBuf));
+ ASSERT(size <= getBufPageSize(pHandle->pBuf));
blockDataToBuf(pPage, pDataBlock);
diff --git a/source/libs/executor/test/executorTests.cpp b/source/libs/executor/test/executorTests.cpp
index b9a696170a..cefe12990d 100644
--- a/source/libs/executor/test/executorTests.cpp
+++ b/source/libs/executor/test/executorTests.cpp
@@ -24,15 +24,12 @@
#include "os.h"
#include "executor.h"
-#include "executorimpl.h"
+#include "executorInt.h"
#include "function.h"
+#include "operator.h"
#include "taos.h"
#include "tdatablock.h"
#include "tdef.h"
-#include "tglobal.h"
-#include "tmsg.h"
-#include "tname.h"
-#include "trpc.h"
#include "tvariant.h"
namespace {
diff --git a/source/libs/executor/test/lhashTests.cpp b/source/libs/executor/test/lhashTests.cpp
index 24570ff788..92f7652d8d 100644
--- a/source/libs/executor/test/lhashTests.cpp
+++ b/source/libs/executor/test/lhashTests.cpp
@@ -15,7 +15,7 @@
#include
#include
-#include "executorimpl.h"
+#include "executorInt.h"
#include "tlinearhash.h"
#pragma GCC diagnostic push
diff --git a/source/libs/executor/test/sortTests.cpp b/source/libs/executor/test/sortTests.cpp
index f35d07804e..8122d7d6a9 100644
--- a/source/libs/executor/test/sortTests.cpp
+++ b/source/libs/executor/test/sortTests.cpp
@@ -26,7 +26,7 @@
#include "os.h"
#include "executor.h"
-#include "executorimpl.h"
+#include "executorInt.h"
#include "taos.h"
#include "tcompare.h"
#include "tdatablock.h"
diff --git a/source/libs/index/src/indexFilter.c b/source/libs/index/src/indexFilter.c
index ec5fc5ad2a..02ed0d2d05 100644
--- a/source/libs/index/src/indexFilter.c
+++ b/source/libs/index/src/indexFilter.c
@@ -221,6 +221,71 @@ static FORCE_INLINE int32_t sifInitJsonParam(SNode *node, SIFParam *param, SIFCt
param->status = SFLT_COARSE_INDEX;
return 0;
}
+static int32_t sifNeedConvertCond(SNode *l, SNode *r) {
+ if (nodeType(l) != QUERY_NODE_COLUMN || nodeType(r) != QUERY_NODE_VALUE) {
+ return 0;
+ }
+ SColumnNode *c = (SColumnNode *)l;
+ SValueNode *v = (SValueNode *)r;
+ int32_t ctype = c->node.resType.type;
+ int32_t vtype = v->node.resType.type;
+ if (!IS_VAR_DATA_TYPE(ctype) && IS_VAR_DATA_TYPE(vtype)) {
+ return 1;
+ }
+ return 0;
+}
+static int32_t sifInitParamValByCol(SNode *r, SNode *l, SIFParam *param, SIFCtx *ctx) {
+ param->status = SFLT_COARSE_INDEX;
+ SColumnNode *cn = (SColumnNode *)r;
+ SValueNode *vn = (SValueNode *)l;
+ if (vn->typeData == TSDB_DATA_TYPE_NULL && (vn->literal == NULL || strlen(vn->literal) == 0)) {
+ param->status = SFLT_NOT_INDEX;
+ return 0;
+ }
+ SDataType *pType = &cn->node.resType;
+ int32_t type = pType->type;
+
+ SDataType *pVType = &vn->node.resType;
+ int32_t vtype = pVType->type;
+ char *pData = nodesGetValueFromNode(vn);
+ int32_t valLen = 0;
+ char **value = ¶m->condValue;
+
+ if (IS_VAR_DATA_TYPE(type)) {
+ int32_t dataLen = varDataTLen(pData);
+ if (type == TSDB_DATA_TYPE_JSON) {
+ if (*pData == TSDB_DATA_TYPE_NULL) {
+ dataLen = 0;
+ } else if (*pData == TSDB_DATA_TYPE_NCHAR) {
+ dataLen = varDataTLen(pData);
+ } else if (*pData == TSDB_DATA_TYPE_DOUBLE) {
+ dataLen = LONG_BYTES;
+ } else if (*pData == TSDB_DATA_TYPE_BOOL) {
+ dataLen = CHAR_BYTES;
+ }
+ dataLen += CHAR_BYTES;
+ }
+ valLen = dataLen;
+ } else {
+ valLen = pType->bytes;
+ }
+ char *tv = taosMemoryCalloc(1, valLen + 1);
+ if (tv == NULL) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+
+ memcpy(tv, pData, valLen);
+ *value = tv;
+
+ param->colId = -1;
+ param->colValType = (uint8_t)(vn->node.resType.type);
+ if (vn->literal != NULL && strlen(vn->literal) <= sizeof(param->colName)) {
+ memcpy(param->colName, vn->literal, strlen(vn->literal));
+ } else {
+ param->status = SFLT_NOT_INDEX;
+ }
+ return 0;
+}
static int32_t sifInitParam(SNode *node, SIFParam *param, SIFCtx *ctx) {
param->status = SFLT_COARSE_INDEX;
switch (nodeType(node)) {
@@ -317,8 +382,13 @@ static int32_t sifInitOperParams(SIFParam **params, SOperatorNode *node, SIFCtx
return TSDB_CODE_SUCCESS;
} else {
SIF_ERR_JRET(sifInitParam(node->pLeft, ¶mList[0], ctx));
+
if (nParam > 1) {
+ // if (sifNeedConvertCond(node->pLeft, node->pRight)) {
+ // SIF_ERR_JRET(sifInitParamValByCol(node->pLeft, node->pRight, ¶mList[1], ctx));
+ // } else {
SIF_ERR_JRET(sifInitParam(node->pRight, ¶mList[1], ctx));
+ // }
// if (paramList[0].colValType == TSDB_DATA_TYPE_JSON &&
// ((SOperatorNode *)(node))->opType == OP_TYPE_JSON_CONTAINS) {
// return TSDB_CODE_OUT_OF_MEMORY;
@@ -404,60 +474,149 @@ static FORCE_INLINE FilterFunc sifGetFilterFunc(EIndexQueryType type, bool *reve
}
return NULL;
}
+int32_t sifStr2Num(char *buf, int32_t len, int8_t type, void *val) {
+ // signed/unsigned/float
+ if (IS_SIGNED_NUMERIC_TYPE(type)) {
+ int64_t v = 0;
+ if (0 != toInteger(buf, len, 10, &v)) {
+ return -1;
+ }
+ if (type == TSDB_DATA_TYPE_BIGINT) {
+ *(int64_t *)val = v;
+ } else if (type == TSDB_DATA_TYPE_INT) {
+ *(int32_t *)val = v;
+ } else if (type == TSDB_DATA_TYPE_TINYINT) {
+ *(int8_t *)val = v;
+ } else if (type == TSDB_DATA_TYPE_SMALLINT) {
+ *(int16_t *)val = v;
+ }
+ } else if (IS_FLOAT_TYPE(type)) {
+ if (type == TSDB_DATA_TYPE_FLOAT) {
+ *(float *)val = taosStr2Float(buf, NULL);
+ } else {
+ *(double *)val = taosStr2Double(buf, NULL);
+ }
+ } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) {
+ uint64_t v = 0;
+ if (0 != toUInteger(buf, len, 10, &v)) {
+ return -1;
+ }
+ if (type == TSDB_DATA_TYPE_UBIGINT) {
+ *(uint64_t *)val = v;
+ } else if (type == TSDB_DATA_TYPE_UINT) {
+ *(uint32_t *)val = v;
+ } else if (type == TSDB_DATA_TYPE_UTINYINT) {
+ *(uint8_t *)val = v;
+ } else if (type == TSDB_DATA_TYPE_USMALLINT) {
+ *(uint16_t *)val = v;
+ }
+ } else {
+ return -1;
+ }
+ return 0;
+}
-static void sifSetFltParam(SIFParam *left, SIFParam *right, SDataTypeBuf *typedata, SMetaFltParam *param) {
- int8_t ltype = left->colValType, rtype = right->colValType;
+static int32_t sifSetFltParam(SIFParam *left, SIFParam *right, SDataTypeBuf *typedata, SMetaFltParam *param) {
+ int32_t code = 0;
+ int8_t ltype = left->colValType, rtype = right->colValType;
+ if (!IS_NUMERIC_TYPE(ltype) || !((IS_NUMERIC_TYPE(rtype)) || rtype == TSDB_DATA_TYPE_VARCHAR)) {
+ return -1;
+ }
if (ltype == TSDB_DATA_TYPE_FLOAT) {
float f = 0;
- SIF_DATA_CONVERT(rtype, right->condValue, f);
+ if (IS_NUMERIC_TYPE(rtype)) {
+ SIF_DATA_CONVERT(rtype, right->condValue, f);
+ } else {
+ SIF_ERR_RET(sifStr2Num(varDataVal(right->condValue), varDataLen(right->condValue), TSDB_DATA_TYPE_FLOAT, &f));
+ }
typedata->f = f;
param->val = &typedata->f;
} else if (ltype == TSDB_DATA_TYPE_DOUBLE) {
double d = 0;
- SIF_DATA_CONVERT(rtype, right->condValue, d);
+ if (IS_NUMERIC_TYPE(rtype)) {
+ SIF_DATA_CONVERT(rtype, right->condValue, d);
+ } else {
+ SIF_ERR_RET(sifStr2Num(varDataVal(right->condValue), varDataLen(right->condValue), TSDB_DATA_TYPE_DOUBLE, &d));
+ }
typedata->d = d;
param->val = &typedata->d;
} else if (ltype == TSDB_DATA_TYPE_BIGINT) {
int64_t i64 = 0;
- SIF_DATA_CONVERT(rtype, right->condValue, i64);
+ if (IS_NUMERIC_TYPE(rtype)) {
+ SIF_DATA_CONVERT(rtype, right->condValue, i64);
+ } else {
+ SIF_ERR_RET(sifStr2Num(varDataVal(right->condValue), varDataLen(right->condValue), TSDB_DATA_TYPE_BIGINT, &i64));
+ }
typedata->i64 = i64;
param->val = &typedata->i64;
} else if (ltype == TSDB_DATA_TYPE_INT) {
int32_t i32 = 0;
- SIF_DATA_CONVERT(rtype, right->condValue, i32);
+ if (IS_NUMERIC_TYPE(rtype)) {
+ SIF_DATA_CONVERT(rtype, right->condValue, i32);
+ } else {
+ SIF_ERR_RET(sifStr2Num(varDataVal(right->condValue), varDataLen(right->condValue), TSDB_DATA_TYPE_INT, &i32));
+ }
typedata->i32 = i32;
param->val = &typedata->i32;
} else if (ltype == TSDB_DATA_TYPE_SMALLINT) {
int16_t i16 = 0;
- SIF_DATA_CONVERT(rtype, right->condValue, i16);
+ if (IS_NUMERIC_TYPE(rtype)) {
+ SIF_DATA_CONVERT(rtype, right->condValue, i16);
+ } else {
+ SIF_ERR_RET(
+ sifStr2Num(varDataVal(right->condValue), varDataLen(right->condValue), TSDB_DATA_TYPE_SMALLINT, &i16));
+ }
+
typedata->i16 = i16;
param->val = &typedata->i16;
} else if (ltype == TSDB_DATA_TYPE_TINYINT) {
int8_t i8 = 0;
- SIF_DATA_CONVERT(rtype, right->condValue, i8)
+ if (IS_NUMERIC_TYPE(rtype)) {
+ SIF_DATA_CONVERT(rtype, right->condValue, i8);
+ } else {
+ SIF_ERR_RET(sifStr2Num(varDataVal(right->condValue), varDataLen(right->condValue), TSDB_DATA_TYPE_TINYINT, &i8));
+ }
typedata->i8 = i8;
param->val = &typedata->i8;
} else if (ltype == TSDB_DATA_TYPE_UBIGINT) {
uint64_t u64 = 0;
- SIF_DATA_CONVERT(rtype, right->condValue, u64);
+ if (IS_NUMERIC_TYPE(rtype)) {
+ SIF_DATA_CONVERT(rtype, right->condValue, u64);
+ } else {
+ SIF_ERR_RET(sifStr2Num(varDataVal(right->condValue), varDataLen(right->condValue), TSDB_DATA_TYPE_UBIGINT, &u64));
+ }
typedata->u64 = u64;
param->val = &typedata->u64;
} else if (ltype == TSDB_DATA_TYPE_UINT) {
uint32_t u32 = 0;
- SIF_DATA_CONVERT(rtype, right->condValue, u32);
+ if (IS_NUMERIC_TYPE(rtype)) {
+ SIF_DATA_CONVERT(rtype, right->condValue, u32);
+ } else {
+ SIF_ERR_RET(sifStr2Num(varDataVal(right->condValue), varDataLen(right->condValue), TSDB_DATA_TYPE_UINT, &u32));
+ }
typedata->u32 = u32;
param->val = &typedata->u32;
} else if (ltype == TSDB_DATA_TYPE_USMALLINT) {
uint16_t u16 = 0;
- SIF_DATA_CONVERT(rtype, right->condValue, u16);
+ if (IS_NUMERIC_TYPE(rtype)) {
+ SIF_DATA_CONVERT(rtype, right->condValue, u16);
+ } else {
+ SIF_ERR_RET(
+ sifStr2Num(varDataVal(right->condValue), varDataLen(right->condValue), TSDB_DATA_TYPE_USMALLINT, &u16));
+ }
typedata->u16 = u16;
param->val = &typedata->u16;
} else if (ltype == TSDB_DATA_TYPE_UTINYINT) {
uint8_t u8 = 0;
- SIF_DATA_CONVERT(rtype, right->condValue, u8);
+ if (IS_NUMERIC_TYPE(rtype)) {
+ SIF_DATA_CONVERT(rtype, right->condValue, u8);
+ } else {
+ SIF_ERR_RET(sifStr2Num(varDataVal(right->condValue), varDataLen(right->condValue), TSDB_DATA_TYPE_UTINYINT, &u8));
+ }
typedata->u8 = u8;
param->val = &typedata->u8;
}
+ return 0;
}
static int32_t sifDoIndex(SIFParam *left, SIFParam *right, int8_t operType, SIFParam *output) {
int ret = 0;
@@ -498,7 +657,7 @@ static int32_t sifDoIndex(SIFParam *left, SIFParam *right, int8_t operType, SIFP
param.val = buf;
}
} else {
- sifSetFltParam(left, right, &typedata, ¶m);
+ if (sifSetFltParam(left, right, &typedata, ¶m) != 0) return -1;
}
ret = metaFilterTableIds(arg->metaEx, ¶m, output->result);
}
diff --git a/source/libs/index/src/indexFstDfa.c b/source/libs/index/src/indexFstDfa.c
index 4d348e76f2..a3e26d8518 100644
--- a/source/libs/index/src/indexFstDfa.c
+++ b/source/libs/index/src/indexFstDfa.c
@@ -188,7 +188,6 @@ void dfaAdd(FstDfa *dfa, FstSparseSet *set, uint32_t ip) {
return;
}
bool succ = sparSetAdd(set, ip, NULL);
- // assert(succ == true);
Inst *inst = taosArrayGet(dfa->insts, ip);
if (inst->ty == MATCH || inst->ty == RANGE) {
// do nothing
diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c
index 3f571e22ae..f71eef7969 100644
--- a/source/libs/nodes/src/nodesUtilFuncs.c
+++ b/source/libs/nodes/src/nodesUtilFuncs.c
@@ -827,6 +827,8 @@ void nodesDestroyNode(SNode* pNode) {
SVnodeModifyOpStmt* pStmt = (SVnodeModifyOpStmt*)pNode;
destroyVgDataBlockArray(pStmt->pDataBlocks);
taosMemoryFreeClear(pStmt->pTableMeta);
+ nodesDestroyNode(pStmt->pTagCond);
+ taosArrayDestroy(pStmt->pTableTag);
taosHashCleanup(pStmt->pVgroupsHashObj);
taosHashCleanup(pStmt->pSubTableHashObj);
taosHashCleanup(pStmt->pTableNameHashObj);
@@ -953,8 +955,12 @@ void nodesDestroyNode(SNode* pNode) {
break;
case QUERY_NODE_SPLIT_VGROUP_STMT: // no pointer field
case QUERY_NODE_SYNCDB_STMT: // no pointer field
- case QUERY_NODE_GRANT_STMT: // no pointer field
- case QUERY_NODE_REVOKE_STMT: // no pointer field
+ break;
+ case QUERY_NODE_GRANT_STMT:
+ nodesDestroyNode(((SGrantStmt*)pNode)->pTagCond);
+ break;
+ case QUERY_NODE_REVOKE_STMT:
+ nodesDestroyNode(((SRevokeStmt*)pNode)->pTagCond);
break;
case QUERY_NODE_SHOW_DNODES_STMT:
case QUERY_NODE_SHOW_MNODES_STMT:
diff --git a/source/libs/parser/src/parAuthenticator.c b/source/libs/parser/src/parAuthenticator.c
index b06d48a690..1586d8128b 100644
--- a/source/libs/parser/src/parAuthenticator.c
+++ b/source/libs/parser/src/parAuthenticator.c
@@ -70,7 +70,7 @@ static EDealRes authSubquery(SAuthCxt* pCxt, SNode* pStmt) {
return TSDB_CODE_SUCCESS == authQuery(pCxt, pStmt) ? DEAL_RES_CONTINUE : DEAL_RES_ERROR;
}
-static int32_t mergeStableTagCond(SNode** pWhere, SNode** pTagCond) {
+static int32_t mergeStableTagCond(SNode** pWhere, SNode* pTagCond) {
SLogicConditionNode* pLogicCond = (SLogicConditionNode*)nodesMakeNode(QUERY_NODE_LOGIC_CONDITION);
if (NULL == pLogicCond) {
return TSDB_CODE_OUT_OF_MEMORY;
@@ -78,7 +78,7 @@ static int32_t mergeStableTagCond(SNode** pWhere, SNode** pTagCond) {
pLogicCond->node.resType.type = TSDB_DATA_TYPE_BOOL;
pLogicCond->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_BOOL].bytes;
pLogicCond->condType = LOGIC_COND_TYPE_AND;
- int32_t code = nodesListMakeStrictAppend(&pLogicCond->pParameterList, *pTagCond);
+ int32_t code = nodesListMakeStrictAppend(&pLogicCond->pParameterList, pTagCond);
if (TSDB_CODE_SUCCESS == code) {
code = nodesListMakeAppend(&pLogicCond->pParameterList, *pWhere);
}
@@ -106,7 +106,7 @@ static int32_t appendStableTagCond(SNode** pWhere, SNode* pTagCond) {
return nodesListStrictAppend(((SLogicConditionNode*)*pWhere)->pParameterList, pTagCondCopy);
}
- return mergeStableTagCond(pWhere, &pTagCondCopy);
+ return mergeStableTagCond(pWhere, pTagCondCopy);
}
static EDealRes authSelectImpl(SNode* pNode, void* pContext) {
diff --git a/source/libs/parser/src/parInsertSql.c b/source/libs/parser/src/parInsertSql.c
index eb2efd573d..02de9f227d 100644
--- a/source/libs/parser/src/parInsertSql.c
+++ b/source/libs/parser/src/parInsertSql.c
@@ -53,6 +53,7 @@ typedef struct SInsertParseContext {
bool missCache;
bool usingDuplicateTable;
bool forceUpdate;
+ bool needTableTagVal;
} SInsertParseContext;
typedef int32_t (*_row_append_fn_t)(SMsgBuf* pMsgBuf, const void* value, int32_t len, void* param);
@@ -577,28 +578,39 @@ static int32_t rewriteTagCondColumnImpl(STagVal* pVal, SNode** pNode) {
if (NULL == pValue) {
return TSDB_CODE_OUT_OF_MEMORY;
}
- pValue->node.resType.type = pVal->type;
+
+ pValue->node.resType = ((SColumnNode*)*pNode)->node.resType;
+ nodesDestroyNode(*pNode);
+ *pNode = (SNode*)pValue;
+
switch (pVal->type) {
case TSDB_DATA_TYPE_BOOL:
pValue->datum.b = *(int8_t*)(&pVal->i64);
+ *(bool*)&pValue->typeData = pValue->datum.b;
break;
case TSDB_DATA_TYPE_TINYINT:
pValue->datum.i = *(int8_t*)(&pVal->i64);
+ *(int8_t*)&pValue->typeData = pValue->datum.i;
break;
case TSDB_DATA_TYPE_SMALLINT:
pValue->datum.i = *(int16_t*)(&pVal->i64);
+ *(int16_t*)&pValue->typeData = pValue->datum.i;
break;
case TSDB_DATA_TYPE_INT:
pValue->datum.i = *(int32_t*)(&pVal->i64);
+ *(int32_t*)&pValue->typeData = pValue->datum.i;
break;
case TSDB_DATA_TYPE_BIGINT:
pValue->datum.i = pVal->i64;
+ pValue->typeData = pValue->datum.i;
break;
case TSDB_DATA_TYPE_FLOAT:
pValue->datum.d = *(float*)(&pVal->i64);
+ *(float*)&pValue->typeData = pValue->datum.d;
break;
case TSDB_DATA_TYPE_DOUBLE:
pValue->datum.d = *(double*)(&pVal->i64);
+ *(double*)&pValue->typeData = pValue->datum.d;
break;
case TSDB_DATA_TYPE_VARCHAR:
case TSDB_DATA_TYPE_NCHAR:
@@ -611,18 +623,23 @@ static int32_t rewriteTagCondColumnImpl(STagVal* pVal, SNode** pNode) {
break;
case TSDB_DATA_TYPE_TIMESTAMP:
pValue->datum.i = pVal->i64;
+ pValue->typeData = pValue->datum.i;
break;
case TSDB_DATA_TYPE_UTINYINT:
pValue->datum.i = *(uint8_t*)(&pVal->i64);
+ *(uint8_t*)&pValue->typeData = pValue->datum.i;
break;
case TSDB_DATA_TYPE_USMALLINT:
pValue->datum.i = *(uint16_t*)(&pVal->i64);
+ *(uint16_t*)&pValue->typeData = pValue->datum.i;
break;
case TSDB_DATA_TYPE_UINT:
pValue->datum.i = *(uint32_t*)(&pVal->i64);
+ *(uint32_t*)&pValue->typeData = pValue->datum.i;
break;
case TSDB_DATA_TYPE_UBIGINT:
pValue->datum.i = *(uint64_t*)(&pVal->i64);
+ *(uint64_t*)&pValue->typeData = pValue->datum.i;
break;
case TSDB_DATA_TYPE_JSON:
case TSDB_DATA_TYPE_VARBINARY:
@@ -667,16 +684,15 @@ static int32_t checkTagCondResult(SNode* pResult) {
: TSDB_CODE_PAR_PERMISSION_DENIED;
}
-int32_t checkSubtablePrivilege(SArray* pTagVals, SArray* pTagName, SNode* pCond) {
- int32_t code = setTagVal(pTagVals, pTagName, pCond);
- SNode* pNew = NULL;
+static int32_t checkSubtablePrivilege(SArray* pTagVals, SArray* pTagName, SNode** pCond) {
+ int32_t code = setTagVal(pTagVals, pTagName, *pCond);
if (TSDB_CODE_SUCCESS == code) {
- code = scalarCalculateConstants(pCond, &pNew);
+ code = scalarCalculateConstants(*pCond, pCond);
}
if (TSDB_CODE_SUCCESS == code) {
- code = checkTagCondResult(pNew);
+ code = checkTagCondResult(*pCond);
}
- nodesDestroyNode(pNew);
+ NODES_DESTORY_NODE(*pCond);
return code;
}
@@ -716,6 +732,10 @@ static int32_t parseTagsClauseImpl(SInsertParseContext* pCxt, SVnodeModifyOpStmt
}
}
+ if (TSDB_CODE_SUCCESS == code && NULL != pStmt->pTagCond) {
+ code = checkSubtablePrivilege(pTagVals, pTagName, &pStmt->pTagCond);
+ }
+
if (TSDB_CODE_SUCCESS == code && !isParseBindParam && !isJson) {
code = tTagNew(pTagVals, 1, false, &pTag);
}
@@ -843,7 +863,7 @@ static void setUserAuthInfo(SParseContext* pCxt, SName* pTbName, SUserAuthInfo*
pInfo->type = AUTH_TYPE_WRITE;
}
-static int32_t checkAuth(SParseContext* pCxt, SName* pTbName, bool* pMissCache) {
+static int32_t checkAuth(SParseContext* pCxt, SName* pTbName, bool* pMissCache, SNode** pTagCond) {
int32_t code = TSDB_CODE_SUCCESS;
SUserAuthInfo authInfo = {0};
setUserAuthInfo(pCxt, pTbName, &authInfo);
@@ -863,11 +883,28 @@ static int32_t checkAuth(SParseContext* pCxt, SName* pTbName, bool* pMissCache)
*pMissCache = true;
} else if (!authRes.pass) {
code = TSDB_CODE_PAR_PERMISSION_DENIED;
+ } else if (NULL != authRes.pCond) {
+ *pTagCond = authRes.pCond;
}
}
return code;
}
+static int32_t checkAuthForTable(SParseContext* pCxt, SName* pTbName, bool* pMissCache, bool* pNeedTableTagVal) {
+ SNode* pTagCond = NULL;
+ int32_t code = checkAuth(pCxt, pTbName, pMissCache, &pTagCond);
+ if (TSDB_CODE_SUCCESS == code) {
+ *pNeedTableTagVal = ((*pMissCache) || (NULL != pTagCond));
+ *pMissCache = (NULL != pTagCond);
+ }
+ nodesDestroyNode(pTagCond);
+ return code;
+}
+
+static int32_t checkAuthForStable(SParseContext* pCxt, SName* pTbName, bool* pMissCache, SNode** pTagCond) {
+ return checkAuth(pCxt, pTbName, pMissCache, pTagCond);
+}
+
static int32_t getTableMeta(SInsertParseContext* pCxt, SName* pTbName, bool isStb, STableMeta** pTableMeta,
bool* pMissCache) {
SParseContext* pComCxt = pCxt->pComCxt;
@@ -970,7 +1007,7 @@ static int32_t getTargetTableSchema(SInsertParseContext* pCxt, SVnodeModifyOpStm
return TSDB_CODE_SUCCESS;
}
- int32_t code = checkAuth(pCxt->pComCxt, &pStmt->targetTableName, &pCxt->missCache);
+ int32_t code = checkAuthForTable(pCxt->pComCxt, &pStmt->targetTableName, &pCxt->missCache, &pCxt->needTableTagVal);
if (TSDB_CODE_SUCCESS == code && !pCxt->missCache) {
code = getTableMetaAndVgroup(pCxt, pStmt, &pCxt->missCache);
}
@@ -993,7 +1030,7 @@ static int32_t getUsingTableSchema(SInsertParseContext* pCxt, SVnodeModifyOpStmt
return TSDB_CODE_SUCCESS;
}
- int32_t code = checkAuth(pCxt->pComCxt, &pStmt->targetTableName, &pCxt->missCache);
+ int32_t code = checkAuthForStable(pCxt->pComCxt, &pStmt->usingTableName, &pCxt->missCache, &pStmt->pTagCond);
if (TSDB_CODE_SUCCESS == code && !pCxt->missCache) {
code = getTableMeta(pCxt, &pStmt->usingTableName, true, &pStmt->pTableMeta, &pCxt->missCache);
}
@@ -1606,6 +1643,8 @@ static int32_t parseInsertTableClauseBottom(SInsertParseContext* pCxt, SVnodeMod
static void resetEnvPreTable(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt) {
insDestroyBoundColInfo(&pCxt->tags);
taosMemoryFreeClear(pStmt->pTableMeta);
+ nodesDestroyNode(pStmt->pTagCond);
+ taosArrayDestroy(pStmt->pTableTag);
tdDestroySVCreateTbReq(pStmt->pCreateTblReq);
taosMemoryFreeClear(pStmt->pCreateTblReq);
pCxt->missCache = false;
@@ -1780,14 +1819,18 @@ static int32_t createInsertQuery(SInsertParseContext* pCxt, SQuery** pOutput) {
return code;
}
-static int32_t checkAuthFromMetaData(const SArray* pUsers) {
+static int32_t checkAuthFromMetaData(const SArray* pUsers, SNode** pTagCond) {
if (1 != taosArrayGetSize(pUsers)) {
return TSDB_CODE_FAILED;
}
SMetaRes* pRes = taosArrayGet(pUsers, 0);
if (TSDB_CODE_SUCCESS == pRes->code) {
- return (*(bool*)pRes->pRes) ? TSDB_CODE_SUCCESS : TSDB_CODE_PAR_PERMISSION_DENIED;
+ SUserAuthRes* pAuth = pRes->pRes;
+ if (NULL != pAuth->pCond) {
+ *pTagCond = nodesCloneNode(pAuth->pCond);
+ }
+ return pAuth->pass ? TSDB_CODE_SUCCESS : TSDB_CODE_PAR_PERMISSION_DENIED;
}
return pRes->code;
}
@@ -1826,9 +1869,40 @@ static int32_t getTableVgroupFromMetaData(const SArray* pTables, SVnodeModifyOpS
sizeof(SVgroupInfo));
}
+static int32_t buildTagNameFromMeta(STableMeta* pMeta, SArray** pTagName) {
+ *pTagName = taosArrayInit(pMeta->tableInfo.numOfTags, TSDB_COL_NAME_LEN);
+ if (NULL == *pTagName) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ SSchema* pSchema = getTableTagSchema(pMeta);
+ for (int32_t i = 0; i < pMeta->tableInfo.numOfTags; ++i) {
+ taosArrayPush(*pTagName, pSchema[i].name);
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t checkSubtablePrivilegeForTable(const SArray* pTables, SVnodeModifyOpStmt* pStmt) {
+ if (1 != taosArrayGetSize(pTables)) {
+ return TSDB_CODE_FAILED;
+ }
+
+ SMetaRes* pRes = taosArrayGet(pTables, 0);
+ if (TSDB_CODE_SUCCESS != pRes->code) {
+ return pRes->code;
+ }
+
+ SArray* pTagName = NULL;
+ int32_t code = buildTagNameFromMeta(pStmt->pTableMeta, &pTagName);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = checkSubtablePrivilege((SArray*)pRes->pRes, pTagName, &pStmt->pTagCond);
+ }
+ taosArrayDestroy(pTagName);
+ return code;
+}
+
static int32_t getTableSchemaFromMetaData(SInsertParseContext* pCxt, const SMetaData* pMetaData,
SVnodeModifyOpStmt* pStmt, bool isStb) {
- int32_t code = checkAuthFromMetaData(pMetaData->pUser);
+ int32_t code = checkAuthFromMetaData(pMetaData->pUser, &pStmt->pTagCond);
if (TSDB_CODE_SUCCESS == code) {
code = getTableMetaFromMetaData(pMetaData->pTableMeta, &pStmt->pTableMeta);
}
@@ -1841,6 +1915,9 @@ static int32_t getTableSchemaFromMetaData(SInsertParseContext* pCxt, const SMeta
if (TSDB_CODE_SUCCESS == code) {
code = getTableVgroupFromMetaData(pMetaData->pTableHash, pStmt, isStb);
}
+ if (TSDB_CODE_SUCCESS == code && !isStb && NULL != pStmt->pTagCond) {
+ code = checkSubtablePrivilegeForTable(pMetaData->pTableTag, pStmt);
+ }
return code;
}
@@ -1860,6 +1937,8 @@ static void clearCatalogReq(SCatalogReq* pCatalogReq) {
pCatalogReq->pTableHash = NULL;
taosArrayDestroy(pCatalogReq->pUser);
pCatalogReq->pUser = NULL;
+ taosArrayDestroy(pCatalogReq->pTableTag);
+ pCatalogReq->pTableTag = NULL;
}
static int32_t setVnodeModifOpStmt(SInsertParseContext* pCxt, SCatalogReq* pCatalogReq, const SMetaData* pMetaData,
@@ -2033,8 +2112,15 @@ static int32_t buildInsertUserAuthReq(const char* pUser, SName* pName, SArray**
return TSDB_CODE_SUCCESS;
}
+static int32_t buildInsertTableTagReq(SName* pName, SArray** pTables) { return buildInsertTableReq(pName, pTables); }
+
static int32_t buildInsertCatalogReq(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt, SCatalogReq* pCatalogReq) {
- int32_t code = buildInsertUserAuthReq(pCxt->pComCxt->pUser, &pStmt->targetTableName, &pCatalogReq->pUser);
+ int32_t code = buildInsertUserAuthReq(
+ pCxt->pComCxt->pUser, (0 == pStmt->usingTableName.type ? &pStmt->targetTableName : &pStmt->usingTableName),
+ &pCatalogReq->pUser);
+ if (TSDB_CODE_SUCCESS == code && pCxt->needTableTagVal) {
+ code = buildInsertTableTagReq(&pStmt->targetTableName, &pCatalogReq->pTableTag);
+ }
if (TSDB_CODE_SUCCESS == code) {
if (0 == pStmt->usingTableName.type) {
code = buildInsertDbReq(&pStmt->targetTableName, &pCatalogReq->pTableMeta);
diff --git a/source/libs/parser/src/parInsertUtil.c b/source/libs/parser/src/parInsertUtil.c
index ac504b9809..a3b067b94d 100644
--- a/source/libs/parser/src/parInsertUtil.c
+++ b/source/libs/parser/src/parInsertUtil.c
@@ -324,7 +324,7 @@ void insDestroyVgroupDataCxt(SVgroupDataCxt* pVgCxt) {
return;
}
- tDestroySSubmitReq2(pVgCxt->pData, TSDB_MSG_FLG_ENCODE);
+ tDestroySSubmitReq(pVgCxt->pData, TSDB_MSG_FLG_ENCODE);
taosMemoryFree(pVgCxt->pData);
taosMemoryFree(pVgCxt);
}
diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c
index 21ae3c74a2..e14b750aba 100644
--- a/source/libs/parser/src/parTranslater.c
+++ b/source/libs/parser/src/parTranslater.c
@@ -1310,7 +1310,8 @@ static EDealRes translateOperator(STranslateContext* pCxt, SOperatorNode* pOp) {
}
static EDealRes haveVectorFunction(SNode* pNode, void* pContext) {
- if (isAggFunc(pNode) || isIndefiniteRowsFunc(pNode) || isWindowPseudoColumnFunc(pNode) || isInterpPseudoColumnFunc(pNode)) {
+ if (isAggFunc(pNode) || isIndefiniteRowsFunc(pNode) || isWindowPseudoColumnFunc(pNode) ||
+ isInterpPseudoColumnFunc(pNode)) {
*((bool*)pContext) = true;
return DEAL_RES_END;
}
@@ -2577,8 +2578,13 @@ static int32_t translateTable(STranslateContext* pCxt, SNode* pTable) {
if (TSDB_SUPER_TABLE == pRealTable->pMeta->tableType) {
pCxt->stableQuery = true;
}
- if (TSDB_SYSTEM_TABLE == pRealTable->pMeta->tableType && isSelectStmt(pCxt->pCurrStmt)) {
- ((SSelectStmt*)pCxt->pCurrStmt)->isTimeLineResult = false;
+ if (TSDB_SYSTEM_TABLE == pRealTable->pMeta->tableType) {
+ if (isSelectStmt(pCxt->pCurrStmt)) {
+ ((SSelectStmt*)pCxt->pCurrStmt)->isTimeLineResult = false;
+ } else if (isDeleteStmt(pCxt->pCurrStmt)) {
+ code = TSDB_CODE_TSC_INVALID_OPERATION;
+ break;
+ }
}
code = addNamespace(pCxt, pRealTable);
}
@@ -6642,6 +6648,7 @@ static int32_t translateGrant(STranslateContext* pCxt, SGrantStmt* pStmt) {
if (TSDB_CODE_SUCCESS == code) {
code = buildCmdMsg(pCxt, TDMT_MND_ALTER_USER, (FSerializeFunc)tSerializeSAlterUserReq, &req);
}
+ tFreeSAlterUserReq(&req);
return code;
}
diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c
index c9ee83a647..39783868b3 100644
--- a/source/libs/planner/src/planLogicCreater.c
+++ b/source/libs/planner/src/planLogicCreater.c
@@ -1443,7 +1443,7 @@ static int32_t createDeleteRootLogicNode(SLogicPlanContext* pCxt, SDeleteStmt* p
static int32_t createDeleteScanLogicNode(SLogicPlanContext* pCxt, SDeleteStmt* pDelete, SLogicNode** pLogicNode) {
SScanLogicNode* pScan = NULL;
- int32_t code = makeScanLogicNode(pCxt, (SRealTableNode*)pDelete->pFromTable, false, (SLogicNode**)&pScan);
+ int32_t code = makeScanLogicNode(pCxt, (SRealTableNode*)pDelete->pFromTable, false, (SLogicNode**)&pScan);
// set columns to scan
if (TSDB_CODE_SUCCESS == code) {
diff --git a/source/libs/qcom/src/querymsg.c b/source/libs/qcom/src/querymsg.c
index b62a3e4932..01b136d5e0 100644
--- a/source/libs/qcom/src/querymsg.c
+++ b/source/libs/qcom/src/querymsg.c
@@ -407,7 +407,7 @@ int32_t queryCreateTableMetaFromMsg(STableMetaRsp *msg, bool isStb, STableMeta *
pTableMeta->tableInfo.rowSize += pTableMeta->schema[i].bytes;
}
- qDebug("table %s uid %" PRIx64 " meta returned, type %d vgId:%d db %s stb %s suid %" PRIx64 " sver %d tver %d" PRIx64
+ qDebug("table %s uid %" PRIx64 " meta returned, type %d vgId:%d db %s stb %s suid %" PRIx64 " sver %d tver %d"
" tagNum %d colNum %d precision %d rowSize %d",
msg->tbName, pTableMeta->uid, pTableMeta->tableType, pTableMeta->vgId, msg->dbFName, msg->stbName,
pTableMeta->suid, pTableMeta->sversion, pTableMeta->tversion, pTableMeta->tableInfo.numOfTags,
diff --git a/source/libs/scalar/src/scalar.c b/source/libs/scalar/src/scalar.c
index 0521076d23..d9295656e8 100644
--- a/source/libs/scalar/src/scalar.c
+++ b/source/libs/scalar/src/scalar.c
@@ -53,6 +53,7 @@ int32_t sclCreateColumnInfoData(SDataType *pType, int32_t numOfRows, SScalarPara
int32_t code = colInfoDataEnsureCapacity(pColumnData, numOfRows, true);
if (code != TSDB_CODE_SUCCESS) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
+ colDataDestroy(pColumnData);
taosMemoryFree(pColumnData);
return terrno;
}
@@ -1061,17 +1062,20 @@ int32_t sclConvertOpValueNodeTs(SOperatorNode *node, SScalarCtx *ctx) {
if (node->pLeft && SCL_IS_VAR_VALUE_NODE(node->pLeft)) {
if (node->pRight && (TSDB_DATA_TYPE_TIMESTAMP == ((SExprNode *)node->pRight)->resType.type)) {
- SCL_ERR_JRET(sclConvertToTsValueNode(sclGetOpValueNodeTsPrecision(node->pLeft, node->pRight), (SValueNode*)node->pLeft));
+ SCL_ERR_JRET(
+ sclConvertToTsValueNode(sclGetOpValueNodeTsPrecision(node->pLeft, node->pRight), (SValueNode *)node->pLeft));
}
} else if (node->pRight && SCL_IS_NOTNULL_CONST_NODE(node->pRight)) {
if (node->pLeft && (TSDB_DATA_TYPE_TIMESTAMP == ((SExprNode *)node->pLeft)->resType.type)) {
if (SCL_IS_VAR_VALUE_NODE(node->pRight)) {
- SCL_ERR_JRET(sclConvertToTsValueNode(sclGetOpValueNodeTsPrecision(node->pLeft, node->pRight), (SValueNode*)node->pRight));
+ SCL_ERR_JRET(sclConvertToTsValueNode(sclGetOpValueNodeTsPrecision(node->pLeft, node->pRight),
+ (SValueNode *)node->pRight));
} else if (QUERY_NODE_NODE_LIST == node->pRight->type) {
- SNode* pNode;
- FOREACH(pNode, ((SNodeListNode*)node->pRight)->pNodeList) {
+ SNode *pNode;
+ FOREACH(pNode, ((SNodeListNode *)node->pRight)->pNodeList) {
if (SCL_IS_VAR_VALUE_NODE(pNode)) {
- SCL_ERR_JRET(sclConvertToTsValueNode(sclGetOpValueNodeTsPrecision(node->pLeft, pNode), (SValueNode*)pNode));
+ SCL_ERR_JRET(
+ sclConvertToTsValueNode(sclGetOpValueNodeTsPrecision(node->pLeft, pNode), (SValueNode *)pNode));
}
}
}
@@ -1086,8 +1090,6 @@ _return:
return DEAL_RES_ERROR;
}
-
-
int32_t sclConvertCaseWhenValueNodeTs(SCaseWhenNode *node, SScalarCtx *ctx) {
int32_t code = 0;
@@ -1096,19 +1098,20 @@ int32_t sclConvertCaseWhenValueNodeTs(SCaseWhenNode *node, SScalarCtx *ctx) {
}
if (SCL_IS_VAR_VALUE_NODE(node->pCase)) {
- SNode* pNode;
+ SNode *pNode;
FOREACH(pNode, node->pWhenThenList) {
SExprNode *pExpr = (SExprNode *)((SWhenThenNode *)pNode)->pWhen;
if (TSDB_DATA_TYPE_TIMESTAMP == pExpr->resType.type) {
- SCL_ERR_JRET(sclConvertToTsValueNode(pExpr->resType.precision, (SValueNode*)node->pCase));
+ SCL_ERR_JRET(sclConvertToTsValueNode(pExpr->resType.precision, (SValueNode *)node->pCase));
break;
}
}
} else if (TSDB_DATA_TYPE_TIMESTAMP == ((SExprNode *)node->pCase)->resType.type) {
- SNode* pNode;
+ SNode *pNode;
FOREACH(pNode, node->pWhenThenList) {
if (SCL_IS_VAR_VALUE_NODE(((SWhenThenNode *)pNode)->pWhen)) {
- SCL_ERR_JRET(sclConvertToTsValueNode(((SExprNode *)node->pCase)->resType.precision, (SValueNode*)((SWhenThenNode *)pNode)->pWhen));
+ SCL_ERR_JRET(sclConvertToTsValueNode(((SExprNode *)node->pCase)->resType.precision,
+ (SValueNode *)((SWhenThenNode *)pNode)->pWhen));
}
}
}
@@ -1271,7 +1274,6 @@ EDealRes sclRewriteLogic(SNode **pNode, SScalarCtx *ctx) {
return DEAL_RES_CONTINUE;
}
-
EDealRes sclRewriteOperator(SNode **pNode, SScalarCtx *ctx) {
SOperatorNode *node = (SOperatorNode *)*pNode;
diff --git a/source/libs/scalar/src/sclfunc.c b/source/libs/scalar/src/sclfunc.c
index 24b25cec80..df32707529 100644
--- a/source/libs/scalar/src/sclfunc.c
+++ b/source/libs/scalar/src/sclfunc.c
@@ -1662,73 +1662,6 @@ int32_t charLengthFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam
return doLengthFunction(pInput, inputNum, pOutput, tcharlength);
}
-#if 0
-static void reverseCopy(char* dest, const char* src, int16_t type, int32_t numOfRows) {
- switch(type) {
- case TSDB_DATA_TYPE_TINYINT:
- case TSDB_DATA_TYPE_UTINYINT:{
- int8_t* p = (int8_t*) dest;
- int8_t* pSrc = (int8_t*) src;
-
- for(int32_t i = 0; i < numOfRows; ++i) {
- p[i] = pSrc[numOfRows - i - 1];
- }
- return;
- }
-
- case TSDB_DATA_TYPE_SMALLINT:
- case TSDB_DATA_TYPE_USMALLINT:{
- int16_t* p = (int16_t*) dest;
- int16_t* pSrc = (int16_t*) src;
-
- for(int32_t i = 0; i < numOfRows; ++i) {
- p[i] = pSrc[numOfRows - i - 1];
- }
- return;
- }
- case TSDB_DATA_TYPE_INT:
- case TSDB_DATA_TYPE_UINT: {
- int32_t* p = (int32_t*) dest;
- int32_t* pSrc = (int32_t*) src;
-
- for(int32_t i = 0; i < numOfRows; ++i) {
- p[i] = pSrc[numOfRows - i - 1];
- }
- return;
- }
- case TSDB_DATA_TYPE_BIGINT:
- case TSDB_DATA_TYPE_UBIGINT: {
- int64_t* p = (int64_t*) dest;
- int64_t* pSrc = (int64_t*) src;
-
- for(int32_t i = 0; i < numOfRows; ++i) {
- p[i] = pSrc[numOfRows - i - 1];
- }
- return;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- float* p = (float*) dest;
- float* pSrc = (float*) src;
-
- for(int32_t i = 0; i < numOfRows; ++i) {
- p[i] = pSrc[numOfRows - i - 1];
- }
- return;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- double* p = (double*) dest;
- double* pSrc = (double*) src;
-
- for(int32_t i = 0; i < numOfRows; ++i) {
- p[i] = pSrc[numOfRows - i - 1];
- }
- return;
- }
- default: assert(0);
- }
-}
-#endif
-
bool getTimePseudoFuncEnv(SFunctionNode *UNUSED_PARAM(pFunc), SFuncExecEnv *pEnv) {
pEnv->calcMemSize = sizeof(int64_t);
return true;
diff --git a/source/libs/stream/src/stream.c b/source/libs/stream/src/stream.c
index 86ba91f76d..046dab380e 100644
--- a/source/libs/stream/src/stream.c
+++ b/source/libs/stream/src/stream.c
@@ -52,7 +52,7 @@ void streamCleanUp() {
void streamSchedByTimer(void* param, void* tmrId) {
SStreamTask* pTask = (void*)param;
- if (atomic_load_8(&pTask->status.taskStatus) == TASK_STATUS__DROPPING) {
+ if (streamTaskShouldStop(&pTask->status)) {
streamMetaReleaseTask(NULL, pTask);
return;
}
diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c
index 325d315262..e711700ef2 100644
--- a/source/libs/stream/src/streamExec.c
+++ b/source/libs/stream/src/streamExec.c
@@ -165,20 +165,24 @@ int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz) {
batchCnt++;
- qDebug("task %d scan exec block num %d, block limit %d", pTask->id.taskId, batchCnt, batchSz);
+ qDebug("s-task:%s scan exec block num %d, block limit %d", pTask->id.idStr, batchCnt, batchSz);
- if (batchCnt >= batchSz) break;
+ if (batchCnt >= batchSz) {
+ break;
+ }
}
+
if (taosArrayGetSize(pRes) == 0) {
if (finished) {
taosArrayDestroy(pRes);
- qDebug("task %d finish recover exec task ", pTask->id.taskId);
+ qDebug("s-task:%s finish recover exec task ", pTask->id.idStr);
break;
} else {
- qDebug("task %d continue recover exec task ", pTask->id.taskId);
+ qDebug("s-task:%s continue recover exec task ", pTask->id.idStr);
continue;
}
}
+
SStreamDataBlock* qRes = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM, 0);
if (qRes == NULL) {
taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes);
diff --git a/source/libs/stream/src/streamRecover.c b/source/libs/stream/src/streamRecover.c
index 55c745e417..0d1440fbde 100644
--- a/source/libs/stream/src/streamRecover.c
+++ b/source/libs/stream/src/streamRecover.c
@@ -20,6 +20,8 @@ int32_t streamTaskLaunchRecover(SStreamTask* pTask, int64_t version) {
if (pTask->taskLevel == TASK_LEVEL__SOURCE) {
atomic_store_8(&pTask->status.taskStatus, TASK_STATUS__RECOVER_PREPARE);
+ qDebug("s-task:%s set task status:%d and start recover", pTask->id.idStr, pTask->status.taskStatus);
+
streamSetParamForRecover(pTask);
streamSourceRecoverPrepareStep1(pTask, version);
@@ -197,7 +199,6 @@ int32_t streamBuildSourceRecover1Req(SStreamTask* pTask, SStreamRecoverStep1Req*
}
int32_t streamSourceRecoverScanStep1(SStreamTask* pTask) {
- //
return streamScanExec(pTask, 100);
}
@@ -210,8 +211,11 @@ int32_t streamBuildSourceRecover2Req(SStreamTask* pTask, SStreamRecoverStep2Req*
int32_t streamSourceRecoverScanStep2(SStreamTask* pTask, int64_t ver) {
void* exec = pTask->exec.pExecutor;
+
+ qDebug("s-task:%s recover step2(blocking stage) started", pTask->id.idStr);
if (qStreamSourceRecoverStep2(exec, ver) < 0) {
}
+
return streamScanExec(pTask, 100);
}
diff --git a/source/os/src/osFile.c b/source/os/src/osFile.c
index aab547223f..dd670595f0 100644
--- a/source/os/src/osFile.c
+++ b/source/os/src/osFile.c
@@ -538,10 +538,11 @@ int32_t taosFStatFile(TdFilePtr pFile, int64_t *size, int32_t *mtime) {
return -1;
}
- struct stat fileStat;
#ifdef WINDOWS
- int32_t code = _fstat(pFile->fd, &fileStat);
+ struct __stat64 fileStat;
+ int32_t code = _fstat64(pFile->fd, &fileStat);
#else
+ struct stat fileStat;
int32_t code = fstat(pFile->fd, &fileStat);
#endif
if (code < 0) {
diff --git a/source/os/src/osLocale.c b/source/os/src/osLocale.c
index 7008c38576..129faaacc8 100644
--- a/source/os/src/osLocale.c
+++ b/source/os/src/osLocale.c
@@ -171,7 +171,7 @@ void taosGetSystemLocale(char *outLocale, char *outCharset) {
strcpy(outLocale, "en_US.UTF-8");
} else {
tstrncpy(outLocale, locale, TD_LOCALE_LEN);
- // printf("locale not configured, set to system default:%s", outLocale);
+ printf("locale not configured, set to system default:%s\n", outLocale);
}
// if user does not specify the charset, extract it from locale
diff --git a/source/util/src/tarray.c b/source/util/src/tarray.c
index b15bb519e7..f8ce680531 100644
--- a/source/util/src/tarray.c
+++ b/source/util/src/tarray.c
@@ -69,8 +69,6 @@ SArray* taosArrayInit_s(size_t elemSize, size_t initialSize) {
}
static int32_t taosArrayResize(SArray* pArray) {
- assert(pArray->size >= pArray->capacity);
-
size_t size = pArray->capacity;
size = (size << 1u);
@@ -252,12 +250,15 @@ void* taosArrayInsert(SArray* pArray, size_t index, void* pData) {
}
void taosArraySet(SArray* pArray, size_t index, void* pData) {
- assert(index < pArray->size);
+ ASSERT(index < pArray->size);
memcpy(TARRAY_GET_ELEM(pArray, index), pData, pArray->elemSize);
}
void taosArrayPopFrontBatch(SArray* pArray, size_t cnt) {
- assert(cnt <= pArray->size);
+ if (cnt > pArray->size) {
+ cnt = pArray->size;
+ }
+
pArray->size = pArray->size - cnt;
if (pArray->size == 0 || cnt == 0) {
return;
@@ -266,12 +267,15 @@ void taosArrayPopFrontBatch(SArray* pArray, size_t cnt) {
}
void taosArrayPopTailBatch(SArray* pArray, size_t cnt) {
- assert(cnt <= pArray->size);
+ if (cnt >= pArray->size) {
+ cnt = pArray->size;
+ }
+
pArray->size = pArray->size - cnt;
}
void taosArrayRemove(SArray* pArray, size_t index) {
- assert(index < pArray->size);
+ ASSERT(index < pArray->size);
if (index == pArray->size - 1) {
taosArrayPop(pArray);
@@ -483,7 +487,8 @@ void* taosDecodeArray(const void* buf, SArray** pArray, FDecode decode, int32_t
// todo remove it
// order array
void taosArraySortPWithExt(SArray* pArray, __ext_compar_fn_t fn, const void* param) {
- taosArrayGetSize(pArray) > 8 ? taosArrayQuickSort(pArray, fn, param) : taosArrayInsertSort(pArray, fn, param);
+ taosqsort(pArray->pData, pArray->size, pArray->elemSize, param, fn);
+// taosArrayGetSize(pArray) > 8 ? taosArrayQuickSort(pArray, fn, param) : taosArrayInsertSort(pArray, fn, param);
}
void taosArraySwap(SArray* a, SArray* b) {
diff --git a/source/util/src/tcache.c b/source/util/src/tcache.c
index 1d480e7beb..28d9b412a0 100644
--- a/source/util/src/tcache.c
+++ b/source/util/src/tcache.c
@@ -243,11 +243,6 @@ static FORCE_INLINE STrashElem *doRemoveElemInTrashcan(SCacheObj *pCacheObj, STr
if (next) {
next->prev = pElem->prev;
}
-
- if (pCacheObj->numOfElemsInTrash == 0) {
- assert(pCacheObj->pTrash == NULL);
- }
-
return next;
}
@@ -261,8 +256,6 @@ static FORCE_INLINE void doDestroyTrashcanElem(SCacheObj *pCacheObj, STrashElem
}
static void pushfrontNodeInEntryList(SCacheEntry *pEntry, SCacheNode *pNode) {
- assert(pNode != NULL && pEntry != NULL);
-
pNode->pNext = pEntry->next;
pEntry->next = pNode;
pEntry->num += 1;
@@ -271,7 +264,6 @@ static void pushfrontNodeInEntryList(SCacheEntry *pEntry, SCacheNode *pNode) {
static void removeNodeInEntryList(SCacheEntry *pe, SCacheNode *prev, SCacheNode *pNode) {
if (prev == NULL) {
- ASSERT(pe->next == pNode);
pe->next = pNode->pNext;
} else {
prev->pNext = pNode->pNext;
@@ -471,7 +463,6 @@ void *taosCacheAcquireByKey(SCacheObj *pCacheObj, const void *key, size_t keyLen
SCacheNode *pNode = doSearchInEntryList(pe, key, keyLen, &prev);
if (pNode != NULL) {
int32_t ref = T_REF_INC(pNode);
- ASSERT(ref > 0);
}
taosRUnLockLatch(&pe->latch);
@@ -503,7 +494,7 @@ void *taosCacheAcquireByData(SCacheObj *pCacheObj, void *data) {
uDebug("cache:%s, data: %p acquired by data in cache, refcnt:%d", pCacheObj->name, ptNode->data, ref);
// the data if referenced by at least one object, so the reference count must be greater than the value of 2.
- assert(ref >= 2);
+ ASSERT(ref >= 2);
return data;
}
@@ -516,7 +507,6 @@ void *taosCacheTransferData(SCacheObj *pCacheObj, void **data) {
return NULL;
}
- assert(T_REF_VAL_GET(ptNode) >= 1);
char *d = *data;
// clear its reference to old area
@@ -575,19 +565,19 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) {
if (ref == 1) {
// If it is the last ref, remove it from trashcan linked-list first, and then destroy it.Otherwise, it may be
// destroyed by refresh worker if decrease ref count before removing it from linked-list.
- assert(pNode->pTNodeHeader->pData == pNode);
+ ASSERT(pNode->pTNodeHeader->pData == pNode);
__trashcan_wr_lock(pCacheObj);
doRemoveElemInTrashcan(pCacheObj, pNode->pTNodeHeader);
__trashcan_unlock(pCacheObj);
ref = T_REF_DEC(pNode);
- assert(ref == 0);
+ ASSERT(ref == 0);
doDestroyTrashcanElem(pCacheObj, pNode->pTNodeHeader);
} else {
ref = T_REF_DEC(pNode);
- assert(ref >= 0);
+ ASSERT(ref >= 0);
}
} else {
// NOTE: remove it from hash in the first place, otherwise, the pNode may have been released by other thread
@@ -609,13 +599,12 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) {
"others already, prev must in trashcan",
pCacheObj->name, pNode->key, p->data, T_REF_VAL_GET(p), pNode->data, T_REF_VAL_GET(pNode));
- assert(p->pTNodeHeader == NULL && pNode->pTNodeHeader != NULL);
+ ASSERT(p->pTNodeHeader == NULL && pNode->pTNodeHeader != NULL);
} else {
removeNodeInEntryList(pe, prev, p);
uDebug("cache:%s, key:%p, %p successfully removed from hash table, refcnt:%d", pCacheObj->name, pNode->key,
pNode->data, ref);
if (ref > 0) {
- assert(pNode->pTNodeHeader == NULL);
taosAddToTrashcan(pCacheObj, pNode);
} else { // ref == 0
atomic_sub_fetch_64(&pCacheObj->sizeInBytes, pNode->size);
@@ -736,7 +725,7 @@ SCacheNode *taosCreateCacheNode(const char *key, size_t keyLen, const char *pDat
void taosAddToTrashcan(SCacheObj *pCacheObj, SCacheNode *pNode) {
if (pNode->inTrashcan) { /* node is already in trash */
- assert(pNode->pTNodeHeader != NULL && pNode->pTNodeHeader->pData == pNode);
+ ASSERT(pNode->pTNodeHeader != NULL && pNode->pTNodeHeader->pData == pNode);
return;
}
@@ -782,7 +771,7 @@ void taosTrashcanEmpty(SCacheObj *pCacheObj, bool force) {
STrashElem *pElem = pCacheObj->pTrash;
while (pElem) {
T_REF_VAL_CHECK(pElem->pData);
- assert(pElem->next != pElem && pElem->prev != pElem);
+ ASSERT(pElem->next != pElem && pElem->prev != pElem);
if (force || (T_REF_VAL_GET(pElem->pData) == 0)) {
uDebug("cache:%s, key:%p, %p removed from trashcan. numOfElem in trashcan:%d", pCacheObj->name, pElem->pData->key,
@@ -814,8 +803,6 @@ void doCleanupDataCache(SCacheObj *pCacheObj) {
}
static void doCacheRefresh(SCacheObj *pCacheObj, int64_t time, __cache_trav_fn_t fp, void *param1) {
- assert(pCacheObj != NULL);
-
SCacheObjTravSup sup = {.pCacheObj = pCacheObj, .fp = fp, .time = time, .param1 = param1};
doTraverseElems(pCacheObj, doRemoveExpiredFn, &sup);
}
@@ -827,9 +814,7 @@ void taosCacheRefreshWorkerUnexpectedStopped(void) {
}
void *taosCacheTimedRefresh(void *handle) {
- assert(pCacheArrayList != NULL);
uDebug("cache refresh thread starts");
-
setThreadName("cacheRefresh");
const int32_t SLEEP_DURATION = 500; // 500 ms
@@ -928,7 +913,6 @@ void taosStopCacheRefreshWorker(void) {
size_t taosCacheGetNumOfObj(const SCacheObj *pCacheObj) { return pCacheObj->numOfElems + pCacheObj->numOfElemsInTrash; }
SCacheIter *taosCacheCreateIter(const SCacheObj *pCacheObj) {
- ASSERT(pCacheObj != NULL);
SCacheIter *pIter = taosMemoryCalloc(1, sizeof(SCacheIter));
pIter->pCacheObj = (SCacheObj *)pCacheObj;
pIter->entryIndex = -1;
@@ -978,12 +962,8 @@ bool taosCacheIterNext(SCacheIter *pIter) {
SCacheNode *pNode = pEntry->next;
for (int32_t i = 0; i < pEntry->num; ++i) {
- ASSERT(pNode != NULL);
-
pIter->pCurrent[i] = pNode;
int32_t ref = T_REF_INC(pIter->pCurrent[i]);
- ASSERT(ref >= 1);
-
pNode = pNode->pNext;
}
diff --git a/source/util/src/thash.c b/source/util/src/thash.c
index 926dc304a4..cf4f17bfbc 100644
--- a/source/util/src/thash.c
+++ b/source/util/src/thash.c
@@ -150,7 +150,6 @@ static FORCE_INLINE SHashNode *doSearchInEntryList(SHashObj *pHashObj, SHashEntr
//atomic_add_fetch_64(&pHashObj->compTimes, 1);
if ((pNode->keyLen == keyLen) && ((*(pHashObj->equalFp))(GET_HASH_NODE_KEY(pNode), key, keyLen) == 0) &&
pNode->removed == 0) {
- assert(pNode->hashVal == hashVal);
break;
}
@@ -189,8 +188,6 @@ static SHashNode *doCreateHashNode(const void *key, size_t keyLen, const void *p
*/
static FORCE_INLINE void doUpdateHashNode(SHashObj *pHashObj, SHashEntry *pe, SHashNode *prev, SHashNode *pNode,
SHashNode *pNewNode) {
- assert(pNode->keyLen == pNewNode->keyLen);
-
atomic_sub_fetch_16(&pNode->refCount, 1);
if (prev != NULL) {
prev->next = pNewNode;
@@ -236,7 +233,7 @@ static FORCE_INLINE bool taosHashTableEmpty(const SHashObj *pHashObj) { return t
SHashObj *taosHashInit(size_t capacity, _hash_fn_t fn, bool update, SHashLockTypeE type) {
if (fn == NULL) {
- assert(0);
+ terrno = TSDB_CODE_INVALID_PARA;
return NULL;
}
@@ -262,8 +259,6 @@ SHashObj *taosHashInit(size_t capacity, _hash_fn_t fn, bool update, SHashLockTyp
pHashObj->freeFp = NULL;
pHashObj->callbackFp = NULL;
- ASSERT((pHashObj->capacity & (pHashObj->capacity - 1)) == 0);
-
pHashObj->hashList = (SHashEntry **)taosMemoryMalloc(pHashObj->capacity * sizeof(void *));
if (pHashObj->hashList == NULL) {
taosMemoryFree(pHashObj);
@@ -342,19 +337,10 @@ int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, const vo
taosHashEntryWLock(pHashObj, pe);
SHashNode *pNode = pe->next;
-#if 0
- if (pe->num > 0) {
- assert(pNode != NULL);
- } else {
- assert(pNode == NULL);
- }
-#endif
-
SHashNode *prev = NULL;
while (pNode) {
if ((pNode->keyLen == keyLen) && (*(pHashObj->equalFp))(GET_HASH_NODE_KEY(pNode), key, keyLen) == 0 &&
pNode->removed == 0) {
- assert(pNode->hashVal == hashVal);
break;
}
@@ -370,8 +356,6 @@ int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, const vo
}
pushfrontNodeInEntryList(pe, pNewNode);
- assert(pe->next != NULL);
-
taosHashEntryWUnlock(pHashObj, pe);
// enable resize
@@ -446,14 +430,6 @@ void *taosHashGetImpl(SHashObj *pHashObj, const void *key, size_t keyLen, void *
char *data = NULL;
taosHashEntryRLock(pHashObj, pe);
-#if 0
- if (pe->num > 0) {
- assert(pe->next != NULL);
- } else {
- assert(pe->next == NULL);
- }
-#endif
-
SHashNode *pNode = doSearchInEntryList(pHashObj, pe, key, keyLen, hashVal);
if (pNode != NULL) {
if (pHashObj->callbackFp != NULL) {
@@ -514,8 +490,6 @@ int32_t taosHashRemove(SHashObj *pHashObj, const void *key, size_t keyLen) {
// double check after locked
if (pe->num == 0) {
- assert(pe->next == NULL);
-
taosHashEntryWUnlock(pHashObj, pe);
taosHashRUnlock(pHashObj);
return -1;
@@ -568,13 +542,10 @@ void taosHashClear(SHashObj *pHashObj) {
for (int32_t i = 0; i < pHashObj->capacity; ++i) {
SHashEntry *pEntry = pHashObj->hashList[i];
if (pEntry->num == 0) {
- assert(pEntry->next == NULL);
continue;
}
pNode = pEntry->next;
- assert(pNode != NULL);
-
while (pNode) {
pNext = pNode->next;
FREE_HASH_NODE(pHashObj->freeFp, pNode);
@@ -671,14 +642,11 @@ void taosHashTableResize(SHashObj *pHashObj) {
SHashNode *pPrev = NULL;
if (pe->num == 0) {
- assert(pe->next == NULL);
continue;
}
pNode = pe->next;
- assert(pNode != NULL);
-
while (pNode != NULL) {
int32_t newIdx = HASH_INDEX(pNode->hashVal, pHashObj->capacity);
pNext = pNode->next;
@@ -728,12 +696,8 @@ SHashNode *doCreateHashNode(const void *key, size_t keyLen, const void *pData, s
}
void pushfrontNodeInEntryList(SHashEntry *pEntry, SHashNode *pNode) {
- assert(pNode != NULL && pEntry != NULL);
-
pNode->next = pEntry->next;
pEntry->next = pNode;
-
- ASSERT(pNode->next != pNode);
pEntry->num += 1;
}
@@ -847,19 +811,7 @@ void *taosHashIterate(SHashObj *pHashObj, void *p) {
/*uint16_t prevRef = atomic_load_16(&pNode->refCount);*/
uint16_t afterRef = atomic_add_fetch_16(&pNode->refCount, 1);
-#if 0
- ASSERT(prevRef < afterRef);
- // the reference count value is overflow, which will cause the delete node operation immediately.
- if (prevRef > afterRef) {
- uError("hash entry ref count overflow, prev ref:%d, current ref:%d", prevRef, afterRef);
- // restore the value
- atomic_sub_fetch_16(&pNode->refCount, 1);
- data = NULL;
- } else {
- data = GET_HASH_NODE_DATA(pNode);
- }
-#endif
data = GET_HASH_NODE_DATA(pNode);
if (afterRef >= MAX_WARNING_REF_COUNT) {
diff --git a/source/util/src/tlist.c b/source/util/src/tlist.c
index 1b12ea0cdd..5d729d27d6 100644
--- a/source/util/src/tlist.c
+++ b/source/util/src/tlist.c
@@ -104,7 +104,6 @@ SListNode *tdListPopNode(SList *list, SListNode *node) {
// Move all node elements from src to dst, the dst is assumed as an empty list
void tdListMove(SList *src, SList *dst) {
- // assert(dst->eleSize == src->eleSize);
SListNode *node = NULL;
while ((node = tdListPopHead(src)) != NULL) {
tdListAppendNode(dst, node);
diff --git a/source/util/src/tlog.c b/source/util/src/tlog.c
index 2a18f420cd..d415379f92 100644
--- a/source/util/src/tlog.c
+++ b/source/util/src/tlog.c
@@ -347,7 +347,6 @@ static int32_t taosOpenLogFile(char *fn, int32_t maxLines, int32_t maxFileNum) {
char name[LOG_FILE_NAME_LEN + 50] = "\0";
int32_t logstat0_mtime, logstat1_mtime;
- int32_t size;
tsLogObj.maxLines = maxLines;
tsLogObj.fileNum = maxFileNum;
@@ -395,8 +394,7 @@ static int32_t taosOpenLogFile(char *fn, int32_t maxLines, int32_t maxFileNum) {
printf("\nfailed to fstat log file:%s, reason:%s\n", fileName, strerror(errno));
return -1;
}
- size = (int32_t)filesize;
- tsLogObj.lines = size / 60;
+ tsLogObj.lines = (int32_t)(filesize / 60);
taosLSeekFile(tsLogObj.logHandle->pFile, 0, SEEK_END);
diff --git a/source/util/src/tlosertree.c b/source/util/src/tlosertree.c
index bf99212b78..c476baa790 100644
--- a/source/util/src/tlosertree.c
+++ b/source/util/src/tlosertree.c
@@ -115,8 +115,6 @@ void tMergeTreeAdjust(SMultiwayMergeTreeInfo* pTree, int32_t idx) {
}
void tMergeTreeRebuild(SMultiwayMergeTreeInfo* pTree) {
- ASSERT((pTree->totalSources & 0x1) == 0);
-
tMergeTreeInit(pTree);
for (int32_t i = pTree->totalSources - 1; i >= pTree->numOfSources; i--) {
tMergeTreeAdjust(pTree, i);
diff --git a/source/util/src/tlrucache.c b/source/util/src/tlrucache.c
index f4172fbb44..e182800d9c 100644
--- a/source/util/src/tlrucache.c
+++ b/source/util/src/tlrucache.c
@@ -85,13 +85,13 @@ struct SLRUEntry {
#define TAOS_LRU_ENTRY_REF(h) (++(h)->refs)
static bool taosLRUEntryUnref(SLRUEntry *entry) {
- assert(entry->refs > 0);
+ ASSERT(entry->refs > 0);
--entry->refs;
return entry->refs == 0;
}
static void taosLRUEntryFree(SLRUEntry *entry) {
- assert(entry->refs == 0);
+ ASSERT(entry->refs == 0);
if (entry->deleter) {
(*entry->deleter)(entry->keyData, entry->keyLength, entry->value);
@@ -127,7 +127,7 @@ static void taosLRUEntryTableApply(SLRUEntryTable *table, _taos_lru_table_func_t
SLRUEntry *h = table->list[i];
while (h) {
SLRUEntry *n = h->nextHash;
- assert(TAOS_LRU_ENTRY_IN_CACHE(h));
+ ASSERT(TAOS_LRU_ENTRY_IN_CACHE(h));
func(h);
h = n;
}
@@ -184,7 +184,7 @@ static void taosLRUEntryTableResize(SLRUEntryTable *table) {
++count;
}
}
- assert(table->elems == count);
+ ASSERT(table->elems == count);
taosMemoryFree(table->list);
table->list = newList;
@@ -240,17 +240,16 @@ struct SLRUCacheShard {
static void taosLRUCacheShardMaintainPoolSize(SLRUCacheShard *shard) {
while (shard->highPriPoolUsage > shard->highPriPoolCapacity) {
shard->lruLowPri = shard->lruLowPri->next;
- assert(shard->lruLowPri != &shard->lru);
+ ASSERT(shard->lruLowPri != &shard->lru);
TAOS_LRU_ENTRY_SET_IN_HIGH_POOL(shard->lruLowPri, false);
- assert(shard->highPriPoolUsage >= shard->lruLowPri->totalCharge);
+ ASSERT(shard->highPriPoolUsage >= shard->lruLowPri->totalCharge);
shard->highPriPoolUsage -= shard->lruLowPri->totalCharge;
}
}
static void taosLRUCacheShardLRUInsert(SLRUCacheShard *shard, SLRUEntry *e) {
- assert(e->next == NULL);
- assert(e->prev == NULL);
+ ASSERT(e->next == NULL && e->prev == NULL);
if (shard->highPriPoolRatio > 0 && (TAOS_LRU_ENTRY_IS_HIGH_PRI(e) || TAOS_LRU_ENTRY_HAS_HIT(e))) {
e->next = &shard->lru;
@@ -277,8 +276,7 @@ static void taosLRUCacheShardLRUInsert(SLRUCacheShard *shard, SLRUEntry *e) {
}
static void taosLRUCacheShardLRURemove(SLRUCacheShard *shard, SLRUEntry *e) {
- assert(e->next);
- assert(e->prev);
+ ASSERT(e->next && e->prev);
if (shard->lruLowPri == e) {
shard->lruLowPri = e->prev;
@@ -287,10 +285,10 @@ static void taosLRUCacheShardLRURemove(SLRUCacheShard *shard, SLRUEntry *e) {
e->prev->next = e->next;
e->prev = e->next = NULL;
- assert(shard->lruUsage >= e->totalCharge);
+ ASSERT(shard->lruUsage >= e->totalCharge);
shard->lruUsage -= e->totalCharge;
if (TAOS_LRU_ENTRY_IN_HIGH_POOL(e)) {
- assert(shard->highPriPoolUsage >= e->totalCharge);
+ ASSERT(shard->highPriPoolUsage >= e->totalCharge);
shard->highPriPoolUsage -= e->totalCharge;
}
}
@@ -298,13 +296,13 @@ static void taosLRUCacheShardLRURemove(SLRUCacheShard *shard, SLRUEntry *e) {
static void taosLRUCacheShardEvictLRU(SLRUCacheShard *shard, size_t charge, SArray *deleted) {
while (shard->usage + charge > shard->capacity && shard->lru.next != &shard->lru) {
SLRUEntry *old = shard->lru.next;
- assert(TAOS_LRU_ENTRY_IN_CACHE(old) && !TAOS_LRU_ENTRY_HAS_REFS(old));
+ ASSERT(TAOS_LRU_ENTRY_IN_CACHE(old) && !TAOS_LRU_ENTRY_HAS_REFS(old));
taosLRUCacheShardLRURemove(shard, old);
taosLRUEntryTableRemove(&shard->table, old->keyData, old->keyLength, old->hash);
TAOS_LRU_ENTRY_SET_IN_CACHE(old, false);
- assert(shard->usage >= old->totalCharge);
+ ASSERT(shard->usage >= old->totalCharge);
shard->usage -= old->totalCharge;
taosArrayPush(deleted, &old);
@@ -391,11 +389,11 @@ static LRUStatus taosLRUCacheShardInsertEntry(SLRUCacheShard *shard, SLRUEntry *
if (old != NULL) {
status = TAOS_LRU_STATUS_OK_OVERWRITTEN;
- assert(TAOS_LRU_ENTRY_IN_CACHE(old));
+ ASSERT(TAOS_LRU_ENTRY_IN_CACHE(old));
TAOS_LRU_ENTRY_SET_IN_CACHE(old, false);
if (!TAOS_LRU_ENTRY_HAS_REFS(old)) {
taosLRUCacheShardLRURemove(shard, old);
- assert(shard->usage >= old->totalCharge);
+ ASSERT(shard->usage >= old->totalCharge);
shard->usage -= old->totalCharge;
taosArrayPush(lastReferenceList, &old);
@@ -455,7 +453,7 @@ static LRUHandle *taosLRUCacheShardLookup(SLRUCacheShard *shard, const void *key
taosThreadMutexLock(&shard->mutex);
e = taosLRUEntryTableLookup(&shard->table, key, keyLen, hash);
if (e != NULL) {
- assert(TAOS_LRU_ENTRY_IN_CACHE(e));
+ ASSERT(TAOS_LRU_ENTRY_IN_CACHE(e));
if (!TAOS_LRU_ENTRY_HAS_REFS(e)) {
taosLRUCacheShardLRURemove(shard, e);
}
@@ -474,12 +472,12 @@ static void taosLRUCacheShardErase(SLRUCacheShard *shard, const void *key, size_
SLRUEntry *e = taosLRUEntryTableRemove(&shard->table, key, keyLen, hash);
if (e != NULL) {
- assert(TAOS_LRU_ENTRY_IN_CACHE(e));
+ ASSERT(TAOS_LRU_ENTRY_IN_CACHE(e));
TAOS_LRU_ENTRY_SET_IN_CACHE(e, false);
if (!TAOS_LRU_ENTRY_HAS_REFS(e)) {
taosLRUCacheShardLRURemove(shard, e);
- assert(shard->usage >= e->totalCharge);
+ ASSERT(shard->usage >= e->totalCharge);
shard->usage -= e->totalCharge;
lastReference = true;
}
@@ -499,11 +497,11 @@ static void taosLRUCacheShardEraseUnrefEntries(SLRUCacheShard *shard) {
while (shard->lru.next != &shard->lru) {
SLRUEntry *old = shard->lru.next;
- assert(TAOS_LRU_ENTRY_IN_CACHE(old) && !TAOS_LRU_ENTRY_HAS_REFS(old));
+ ASSERT(TAOS_LRU_ENTRY_IN_CACHE(old) && !TAOS_LRU_ENTRY_HAS_REFS(old));
taosLRUCacheShardLRURemove(shard, old);
taosLRUEntryTableRemove(&shard->table, old->keyData, old->keyLength, old->hash);
TAOS_LRU_ENTRY_SET_IN_CACHE(old, false);
- assert(shard->usage >= old->totalCharge);
+ ASSERT(shard->usage >= old->totalCharge);
shard->usage -= old->totalCharge;
taosArrayPush(lastReferenceList, &old);
@@ -524,7 +522,7 @@ static bool taosLRUCacheShardRef(SLRUCacheShard *shard, LRUHandle *handle) {
SLRUEntry *e = (SLRUEntry *)handle;
taosThreadMutexLock(&shard->mutex);
- assert(TAOS_LRU_ENTRY_HAS_REFS(e));
+ ASSERT(TAOS_LRU_ENTRY_HAS_REFS(e));
TAOS_LRU_ENTRY_REF(e);
taosThreadMutexUnlock(&shard->mutex);
@@ -545,7 +543,7 @@ static bool taosLRUCacheShardRelease(SLRUCacheShard *shard, LRUHandle *handle, b
lastReference = taosLRUEntryUnref(e);
if (lastReference && TAOS_LRU_ENTRY_IN_CACHE(e)) {
if (shard->usage > shard->capacity || eraseIfLastRef) {
- assert(shard->lru.next == &shard->lru || eraseIfLastRef);
+ ASSERT(shard->lru.next == &shard->lru || eraseIfLastRef);
taosLRUEntryTableRemove(&shard->table, e->keyData, e->keyLength, e->hash);
TAOS_LRU_ENTRY_SET_IN_CACHE(e, false);
@@ -557,7 +555,7 @@ static bool taosLRUCacheShardRelease(SLRUCacheShard *shard, LRUHandle *handle, b
}
if (lastReference && e->value) {
- assert(shard->usage >= e->totalCharge);
+ ASSERT(shard->usage >= e->totalCharge);
shard->usage -= e->totalCharge;
}
@@ -595,7 +593,7 @@ static size_t taosLRUCacheShardGetPinnedUsage(SLRUCacheShard *shard) {
taosThreadMutexLock(&shard->mutex);
- assert(shard->usage >= shard->lruUsage);
+ ASSERT(shard->usage >= shard->lruUsage);
usage = shard->usage - shard->lruUsage;
taosThreadMutexUnlock(&shard->mutex);
@@ -687,7 +685,7 @@ void taosLRUCacheCleanup(SLRUCache *cache) {
if (cache) {
if (cache->shards) {
int numShards = cache->numShards;
- assert(numShards > 0);
+ ASSERT(numShards > 0);
for (int i = 0; i < numShards; ++i) {
taosLRUCacheShardCleanup(&cache->shards[i]);
}
diff --git a/source/util/src/tsched.c b/source/util/src/tsched.c
index 467f26b362..5c1706e405 100644
--- a/source/util/src/tsched.c
+++ b/source/util/src/tsched.c
@@ -137,7 +137,6 @@ void *taosProcessSchedQueue(void *scheduler) {
while (1) {
if ((ret = tsem_wait(&pSched->fullSem)) != 0) {
uFatal("wait %s fullSem failed(%s)", pSched->label, strerror(errno));
- ASSERT(0);
}
if (atomic_load_8(&pSched->stop)) {
break;
@@ -145,7 +144,6 @@ void *taosProcessSchedQueue(void *scheduler) {
if ((ret = taosThreadMutexLock(&pSched->queueMutex)) != 0) {
uFatal("lock %s queueMutex failed(%s)", pSched->label, strerror(errno));
- ASSERT(0);
}
msg = pSched->queue[pSched->fullSlot];
@@ -154,12 +152,10 @@ void *taosProcessSchedQueue(void *scheduler) {
if ((ret = taosThreadMutexUnlock(&pSched->queueMutex)) != 0) {
uFatal("unlock %s queueMutex failed(%s)", pSched->label, strerror(errno));
- ASSERT(0);
}
if ((ret = tsem_post(&pSched->emptySem)) != 0) {
uFatal("post %s emptySem failed(%s)", pSched->label, strerror(errno));
- ASSERT(0);
}
if (msg.fp)
@@ -187,12 +183,10 @@ int taosScheduleTask(void *queueScheduler, SSchedMsg *pMsg) {
if ((ret = tsem_wait(&pSched->emptySem)) != 0) {
uFatal("wait %s emptySem failed(%s)", pSched->label, strerror(errno));
- ASSERT(0);
}
if ((ret = taosThreadMutexLock(&pSched->queueMutex)) != 0) {
uFatal("lock %s queueMutex failed(%s)", pSched->label, strerror(errno));
- ASSERT(0);
}
pSched->queue[pSched->emptySlot] = *pMsg;
@@ -200,12 +194,10 @@ int taosScheduleTask(void *queueScheduler, SSchedMsg *pMsg) {
if ((ret = taosThreadMutexUnlock(&pSched->queueMutex)) != 0) {
uFatal("unlock %s queueMutex failed(%s)", pSched->label, strerror(errno));
- ASSERT(0);
}
if ((ret = tsem_post(&pSched->fullSem)) != 0) {
uFatal("post %s fullSem failed(%s)", pSched->label, strerror(errno));
- ASSERT(0);
}
return ret;
}
diff --git a/source/util/src/tsimplehash.c b/source/util/src/tsimplehash.c
index ec1991923f..4c7983a983 100644
--- a/source/util/src/tsimplehash.c
+++ b/source/util/src/tsimplehash.c
@@ -361,10 +361,6 @@ int32_t tSimpleHashIterateRemove(SSHashObj *pHashObj, const void *key, size_t ke
return TSDB_CODE_SUCCESS;
}
-static void destroyItems(void* pItem) {
- taosMemoryFree(*(void**)pItem);
-}
-
void tSimpleHashClear(SSHashObj *pHashObj) {
if (!pHashObj || taosHashTableEmpty(pHashObj)) {
return;
diff --git a/source/util/src/tskiplist.c b/source/util/src/tskiplist.c
index c72c5c70ae..222e0e8a51 100644
--- a/source/util/src/tskiplist.c
+++ b/source/util/src/tskiplist.c
@@ -268,8 +268,9 @@ SSkipListIterator *tSkipListCreateIter(SSkipList *pSkipList) {
}
SSkipListIterator *tSkipListCreateIterFromVal(SSkipList *pSkipList, const char *val, int32_t type, int32_t order) {
- ASSERT(order == TSDB_ORDER_ASC || order == TSDB_ORDER_DESC);
- ASSERT(pSkipList != NULL);
+ if (order != TSDB_ORDER_ASC && order != TSDB_ORDER_DESC) {
+ return NULL;
+ }
SSkipListIterator *iter = doCreateSkipListIterator(pSkipList, order);
if (val == NULL) {
@@ -585,7 +586,6 @@ static FORCE_INLINE int32_t getSkipListRandLevel(SSkipList *pSkipList) {
}
}
- ASSERT(level <= pSkipList->maxLevel);
return level;
}
diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task
index 6e662a9a15..7e58ea9a13 100644
--- a/tests/parallel_test/cases.task
+++ b/tests/parallel_test/cases.task
@@ -51,6 +51,16 @@
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery_math.py -Q 3
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery_time.py -Q 3
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery_26.py -Q 3
+,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/select_null.py
+,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/select_null.py -R
+,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/select_null.py -Q 2
+,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/select_null.py -Q 3
+,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/select_null.py -Q 4
+,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/slimit.py
+,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/slimit.py -R
+,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/slimit.py -Q 2
+,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/slimit.py -Q 3
+,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/slimit.py -Q 4
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/create_wrong_topic.py
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/dropDbR3ConflictTransaction.py -N 3
@@ -127,11 +137,13 @@
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_control.py
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_manage.py
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/fsync.py
+,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/multilevel.py
,,n,system-test,python3 ./test.py -f 0-others/compatibility.py
,,n,system-test,python3 ./test.py -f 0-others/tag_index_basic.py
,,n,system-test,python3 ./test.py -f 0-others/udfpy_main.py
,,n,system-test,python3 ./test.py -N 3 -f 0-others/walRetention.py
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/alter_database.py
+,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/alter_replica.py -N 3
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/influxdb_line_taosc_insert.py
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/opentsdb_telnet_line_taosc_insert.py
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/opentsdb_json_taosc_insert.py
@@ -337,6 +349,7 @@
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/tb_100w_data_order.py
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/delete_childtable.py
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/delete_normaltable.py
+,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/delete_systable.py
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/keep_expired.py
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/drop.py
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/drop.py -N 3 -M 3 -i False -n 3
@@ -722,6 +735,7 @@
,,y,script,./test.sh -f tsim/user/privilege_db.sim
,,y,script,./test.sh -f tsim/user/privilege_sysinfo.sim
,,y,script,./test.sh -f tsim/user/privilege_topic.sim
+,,y,script,./test.sh -f tsim/user/privilege_table.sim
,,y,script,./test.sh -f tsim/db/alter_option.sim
,,y,script,./test.sh -f tsim/db/alter_replica_31.sim
,,y,script,./test.sh -f tsim/db/basic1.sim
diff --git a/tests/parallel_test/run.sh b/tests/parallel_test/run.sh
index de343752c6..f05e0dfc83 100755
--- a/tests/parallel_test/run.sh
+++ b/tests/parallel_test/run.sh
@@ -303,7 +303,7 @@ function run_thread() {
if [ ! -z "$corefile" ]; then
echo -e "\e[34m corefiles: $corefile \e[0m"
local build_dir=$log_dir/build_${hosts[index]}
- local remote_build_dir="${workdirs[index]}/{DEBUGPATH}/build"
+ local remote_build_dir="${workdirs[index]}/${DEBUGPATH}/build"
# if [ $ent -ne 0 ]; then
# remote_build_dir="${workdirs[index]}/{DEBUGPATH}/build"
# fi
diff --git a/tests/script/tsim/tag/bigint.sim b/tests/script/tsim/tag/bigint.sim
index 26a5addf6a..34fcc09411 100644
--- a/tests/script/tsim/tag/bigint.sim
+++ b/tests/script/tsim/tag/bigint.sim
@@ -123,6 +123,17 @@ sql select * from $mt where tgcol = 1
if $rows != 100 then
return -1
endi
+
+sql select * from $mt where tgcol = '1'
+if $rows != 100 then
+ return -1
+endi
+
+sql select * from $mt where tgcol = "1"
+if $rows != 100 then
+ return -1
+endi
+
sql select * from $mt where tgcol <> 1
if $rows != 100 then
return -1
diff --git a/tests/script/tsim/tag/double.sim b/tests/script/tsim/tag/double.sim
index fbdf973337..acc026c13d 100644
--- a/tests/script/tsim/tag/double.sim
+++ b/tests/script/tsim/tag/double.sim
@@ -123,6 +123,15 @@ sql select * from $mt where tgcol = 1
if $rows != 100 then
return -1
endi
+
+sql select * from $mt where tgcol = '1';
+if $rows != 100 then
+ return -1
+endi
+sql select * from $mt where tgcol = "1.0"
+if $rows != 100 then
+ return -1
+endi
sql select * from $mt where tgcol <> 1
if $rows != 100 then
return -1
diff --git a/tests/script/tsim/tag/float.sim b/tests/script/tsim/tag/float.sim
index 10fac93d5d..f62feba057 100644
--- a/tests/script/tsim/tag/float.sim
+++ b/tests/script/tsim/tag/float.sim
@@ -123,6 +123,16 @@ sql select * from $mt where tgcol = 1
if $rows != 100 then
return -1
endi
+
+sql select * from $mt where tgcol = "1.0"
+if $rows != 100 then
+ return -1
+endi
+
+sql select * from $mt where tgcol = "1"
+if $rows != 100 then
+ return -1
+endi
sql select * from $mt where tgcol <> 1
if $rows != 100 then
return -1
diff --git a/tests/script/tsim/tag/int.sim b/tests/script/tsim/tag/int.sim
index ac8d31db3b..084b7e74d7 100644
--- a/tests/script/tsim/tag/int.sim
+++ b/tests/script/tsim/tag/int.sim
@@ -123,6 +123,16 @@ sql select * from $mt where tgcol = 1
if $rows != 100 then
return -1
endi
+
+sql select * from $mt where tgcol = '1'
+if $rows != 100 then
+ return -1
+endi
+
+sql select * from $mt where tgcol = "1";
+if $rows != 100 then
+ return -1
+endi
sql select * from $mt where tgcol <> 1
if $rows != 100 then
return -1
diff --git a/tests/script/tsim/tag/int_float.sim b/tests/script/tsim/tag/int_float.sim
index 009629aac9..3034f8b64e 100644
--- a/tests/script/tsim/tag/int_float.sim
+++ b/tests/script/tsim/tag/int_float.sim
@@ -85,10 +85,22 @@ sql select * from $mt where tgcol <> 1
if $rows != 100 then
return -1
endi
+
sql select * from $mt where tgcol = 1
if $rows != 100 then
return -1
endi
+
+sql select * from $mt where tgcol = '1'
+if $rows != 100 then
+ return -1
+endi
+
+sql select * from $mt where tgcol = "1"
+if $rows != 100 then
+ return -1
+endi
+
sql select * from $mt where tgcol <> 1
if $rows != 100 then
return -1
diff --git a/tests/script/tsim/tag/tinyint.sim b/tests/script/tsim/tag/tinyint.sim
index 8560def34c..89b0134bb3 100644
--- a/tests/script/tsim/tag/tinyint.sim
+++ b/tests/script/tsim/tag/tinyint.sim
@@ -115,14 +115,36 @@ sql select * from $mt where tgcol = 0
if $rows != 100 then
return -1
endi
+
+sql select * from $mt where tgcol = '0'
+if $rows != 100 then
+ return -1
+endi
+
+sql select * from $mt where tgcol = "0"
+if $rows != 100 then
+ return -1
+endi
sql select * from $mt where tgcol <> 0
if $rows != 100 then
return -1
endi
+
sql select * from $mt where tgcol = 1
if $rows != 100 then
return -1
endi
+sql select * from $mt where tgcol = "1"
+if $rows != 100 then
+ return -1
+endi
+
+sql select * from $mt where tgcol = '1'
+if $rows != 100 then
+ return -1
+endi
+
+
sql select * from $mt where tgcol <> 1
if $rows != 100 then
return -1
diff --git a/tests/script/tsim/user/privilege_table.sim b/tests/script/tsim/user/privilege_table.sim
new file mode 100644
index 0000000000..05f91ff5b0
--- /dev/null
+++ b/tests/script/tsim/user/privilege_table.sim
@@ -0,0 +1,302 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
+sql connect
+
+print =============== init env
+sql drop database if exists test;
+sql create database test vgroups 1;
+sql use test;
+sql create stable st1(ts timestamp, i int) tags(id int, loc varchar(20));
+sql create table st1s1 using st1 tags(1, 'beijing');
+sql create table st1s2 using st1 tags(2, 'shanghai');
+sql insert into st1s1 values(now, 1) st1s2 values(now, 2);
+sql create stable st2(ts timestamp, i int) tags(id int, loc varchar(20));
+sql create table st2s1 using st2 tags(1, 'beijing');
+sql create table st2s2 using st2 tags(2, 'shanghai');
+sql insert into st2s1 values(now, 1) st2s2 values(now, 2);
+sql create user wxy pass 'taosdata';
+
+print =============== case 1: database unauthorized and table unauthorized
+sql close
+sql connect wxy
+
+sql reset query cache;
+sql_error select * from test.st1;
+sql_error insert into test.st1s1 values(now, 10) test.st1s2 values(now, 20);
+sql_error select * from test.st2;
+sql_error insert into test.st2s1 values(now, 10) test.st2s2 values(now, 20);
+
+
+print =============== case 2: database unauthorized and table read privilege
+sql close
+sql connect
+
+sql grant read on test.st1 to wxy;
+
+sql close
+sql connect wxy
+
+sql reset query cache;
+sql select * from test.st1;
+if $rows != 2 then
+ return -1
+endi
+sql_error insert into test.st1s1 values(now, 10) test.st1s2 values(now, 20);
+sql_error select * from test.st2;
+sql_error insert into test.st2s1 values(now, 10) test.st2s2 values(now, 20);
+
+print =============== case 3: database unauthorized and table read privilege with condition
+sql close
+sql connect
+
+sql revoke read on test.st1 from wxy;
+sql grant read on test.st1 with id = 1 to wxy;
+
+sql close
+sql connect wxy
+
+sql reset query cache;
+sql select * from test.st1;
+if $rows != 1 then
+ return -1
+endi
+sql_error insert into test.st1s1 values(now, 10);
+sql_error insert into test.st1s2 values(now, 20);
+sql_error select * from test.st2;
+sql_error insert into test.st2s1 values(now, 10) test.st2s2 values(now, 20);
+
+print =============== case 4: database unauthorized and table write privilege
+sql close
+sql connect
+
+sql revoke read on test.st1 with id = 1 from wxy;
+sql grant write on test.st1 to wxy;
+
+sql close
+sql connect wxy
+
+sql reset query cache;
+sql_error select tbname, * from test.st1;
+sql insert into test.st1s1 values(now, 10);
+sql insert into test.st1s2 values(now, 20);
+sql_error select * from test.st2;
+sql_error insert into test.st2s1 values(now, 10) test.st2s2 values(now, 20);
+
+print =============== case 5: database unauthorized and table write privilege with condition
+sql close
+sql connect
+
+sql revoke write on test.st1 from wxy;
+sql grant write on test.st1 with id = 1 to wxy;
+
+sql close
+sql connect wxy
+
+sql reset query cache;
+sql_error select tbname, * from test.st1;
+sql insert into test.st1s1 values(now, 10);
+sql insert into test.st1s3 using test.st1 tags(1, 'dachang') values(now, 100);
+sql_error insert into test.st1s2 values(now, 20);
+sql_error insert into test.st1s4 using test.st1 tags(3, 'dachang') values(now, 300);
+sql_error select * from test.st2;
+sql_error insert into test.st2s1 values(now, 10) test.st2s2 values(now, 20);
+
+print =============== case 6: database read privilege and table unauthorized
+sql close
+sql connect
+
+sql revoke write on test.st1 with id = 1 from wxy;
+sql grant read on test.* to wxy;
+
+sql close
+sql connect wxy
+
+sql reset query cache;
+sql select * from test.st1;
+if $rows != 6 then
+ return -1
+endi
+sql_error insert into test.st1s1 values(now, 10) test.st1s2 values(now, 20);
+sql select * from test.st2;
+if $rows != 2 then
+ return -1
+endi
+sql_error insert into test.st2s1 values(now, 10) test.st2s2 values(now, 20);
+
+print =============== case 7: database read privilege and table read privilege
+sql close
+sql connect
+
+sql grant read on test.st1 to wxy;
+
+sql close
+sql connect wxy
+
+sql reset query cache;
+sql select * from test.st1;
+if $rows != 6 then
+ return -1
+endi
+sql_error insert into test.st1s1 values(now, 10) test.st1s2 values(now, 20);
+sql select * from test.st2;
+if $rows != 2 then
+ return -1
+endi
+sql_error insert into test.st2s1 values(now, 10) test.st2s2 values(now, 20);
+
+print =============== case 8: database read privilege and table read privilege with condition
+sql close
+sql connect
+
+sql revoke read on test.st1 from wxy;
+sql grant read on test.st1 with id = 1 to wxy;
+
+sql close
+sql connect wxy
+
+sql reset query cache;
+sql select * from test.st1;
+if $rows != 4 then
+ return -1
+endi
+sql_error insert into test.st1s1 values(now, 10) test.st1s2 values(now, 20);
+sql select * from test.st2;
+if $rows != 2 then
+ return -1
+endi
+sql_error insert into test.st2s1 values(now, 10) test.st2s2 values(now, 20);
+
+print =============== case 9: database read privilege and table write privilege
+sql close
+sql connect
+
+sql revoke read on test.st1 with id = 1 from wxy;
+sql grant write on test.st1 to wxy;
+
+sql close
+sql connect wxy
+
+sql reset query cache;
+sql select * from test.st1;
+if $rows != 6 then
+ return -1
+endi
+sql insert into test.st1s1 values(now, 10) test.st1s2 values(now, 20);
+sql select * from test.st2;
+if $rows != 2 then
+ return -1
+endi
+sql_error insert into test.st2s1 values(now, 10) test.st2s2 values(now, 20);
+
+print =============== case 10: database read privilege and table write privilege with condition
+sql close
+sql connect
+
+sql revoke write on test.st1 from wxy;
+sql grant write on test.st1 with id = 1 to wxy;
+
+sql close
+sql connect wxy
+
+sql reset query cache;
+sql select * from test.st1;
+if $rows != 8 then
+ return -1
+endi
+sql insert into test.st1s1 values(now, 10);
+sql_error insert into test.st1s2 values(now, 20);
+sql select * from test.st2;
+if $rows != 2 then
+ return -1
+endi
+sql_error insert into test.st2s1 values(now, 10) test.st2s2 values(now, 20);
+
+print =============== case 11: database write privilege and table unauthorized
+sql close
+sql connect
+
+sql revoke read on test.* from wxy;
+sql revoke write on test.st1 with id = 1 from wxy;
+sql grant write on test.* to wxy;
+
+sql close
+sql connect wxy
+
+sql reset query cache;
+sql_error select * from test.st1;
+sql insert into test.st1s1 values(now, 10) test.st1s2 values(now, 20);
+sql_error select * from test.st2;
+sql insert into test.st2s1 values(now, 10) test.st2s2 values(now, 20);
+
+print =============== case 12: database write privilege and table read privilege
+sql close
+sql connect
+
+sql grant read on test.st1 to wxy;
+
+sql close
+sql connect wxy
+
+sql reset query cache;
+sql select * from test.st1;
+if $rows != 11 then
+ return -1
+endi
+sql insert into test.st1s1 values(now, 10) test.st1s2 values(now, 20);
+sql_error select * from test.st2;
+sql insert into test.st2s1 values(now, 10) test.st2s2 values(now, 20);
+
+print =============== case 13: database write privilege and table read privilege with condition
+sql close
+sql connect
+
+sql revoke read on test.st1 from wxy;
+sql grant read on test.st1 with id = 1 to wxy;
+
+sql close
+sql connect wxy
+
+sql reset query cache;
+sql select * from test.st1;
+if $rows != 8 then
+ return -1
+endi
+sql insert into test.st1s1 values(now, 10) test.st1s2 values(now, 20);
+sql_error select * from test.st2;
+sql insert into test.st2s1 values(now, 10) test.st2s2 values(now, 20);
+
+print =============== case 14: database write privilege and table write privilege
+sql close
+sql connect
+
+sql revoke read on test.st1 with id = 1 from wxy;
+sql grant write on test.st1 to wxy;
+
+sql close
+sql connect wxy
+
+sql reset query cache;
+sql_error select * from test.st1;
+sql insert into test.st1s1 values(now, 10) test.st1s2 values(now, 20);
+sql_error select * from test.st2;
+sql insert into test.st2s1 values(now, 10) test.st2s2 values(now, 20);
+
+print =============== case 15: database write privilege and table write privilege with condition
+sql close
+sql connect
+
+sql revoke write on test.st1 from wxy;
+sql grant write on test.st1 with id = 1 to wxy;
+
+sql close
+sql connect wxy
+
+sql reset query cache;
+sql_error select * from test.st1;
+sql insert into test.st1s1 values(now, 10);
+sql_error insert into test.st1s2 values(now, 20);
+sql_error select * from test.st2;
+sql insert into test.st2s1 values(now, 10) test.st2s2 values(now, 20);
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/win-test-file b/tests/script/win-test-file
index 2d5a1b3108..3b358993fa 100644
--- a/tests/script/win-test-file
+++ b/tests/script/win-test-file
@@ -3,6 +3,7 @@
./test.sh -f tsim/user/privilege_db.sim
./test.sh -f tsim/user/privilege_sysinfo.sim
./test.sh -f tsim/user/privilege_topic.sim
+./test.sh -f tsim/user/privilege_table.sim
./test.sh -f tsim/db/alter_option.sim
rem ./test.sh -f tsim/db/alter_replica_13.sim
./test.sh -f tsim/db/alter_replica_31.sim
diff --git a/tests/system-test/0-others/compatibility.py b/tests/system-test/0-others/compatibility.py
index 33d1dac4b5..22e319fdaf 100644
--- a/tests/system-test/0-others/compatibility.py
+++ b/tests/system-test/0-others/compatibility.py
@@ -17,12 +17,12 @@ from util.dnodes import TDDnode
from util.cluster import *
import subprocess
-BASEVERSION = "3.0.1.8"
+BASEVERSION = "3.0.2.3"
class TDTestCase:
def caseDescription(self):
- '''
+ f'''
3.0 data compatibility test
- case1: basedata version is 3.0.1.8
+ case1: basedata version is {BASEVERSION}
'''
return
diff --git a/tests/system-test/0-others/multilevel.py b/tests/system-test/0-others/multilevel.py
new file mode 100644
index 0000000000..7ad4eba645
--- /dev/null
+++ b/tests/system-test/0-others/multilevel.py
@@ -0,0 +1,263 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.common import *
+from util.sqlset import *
+
+class TDTestCase:
+ def init(self, conn, logSql, replicaVar=1):
+
+ self.replicaVar = int(replicaVar)
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+ self.setsql = TDSetSql()
+
+ def basic(self):
+ tdLog.info("============== basic test ===============")
+ cfg={
+ '/mnt/data1' : 'dataDir',
+ '/mnt/data2 0 0' : 'dataDir'
+ }
+ tdSql.createDir('/mnt/data1')
+ tdSql.createDir('/mnt/data2')
+
+ tdLog.info("================= step1")
+ tdDnodes.stop(1)
+ tdDnodes.deploy(1,cfg)
+ tdDnodes.start(1)
+
+ tdLog.info("================= step2")
+ tdSql.haveFile('/mnt/data1/',1)
+ tdSql.haveFile('/mnt/data2/',0)
+ tdDnodes.stop(1)
+ def dir_not_exist(self):
+ tdLog.info("============== dir_not_exist test ===============")
+ cfg={
+ '/mnt/data1 0 0' : 'dataDir',
+ '/mnt/data2 0 0' : 'dataDir'
+ }
+ tdSql.createDir('/mnt/data1')
+ os.system('rm -rf /mnt/data2')
+
+
+ tdLog.info("================= step1")
+ tdDnodes.deploy(1,cfg)
+ tdDnodes.startWithoutSleep(1)
+
+ tdLog.info("================= step2")
+ tdSql.taosdStatus(0)
+
+ def dir_permission_denied(self):
+ tdDnodes.stop(1)
+ tdLog.info("============== dir_permission_denied test ===============")
+ cfg={
+ '/mnt/data1 0 0' : 'dataDir',
+ '/mnt/data2 0 0' : 'dataDir'
+ }
+ tdSql.createDir('/mnt/data1')
+ tdSql.createDir('/mnt/data2')
+ os.system('chmod 111 /mnt/data2')
+
+ tdLog.info("================= step1")
+ tdDnodes.deploy(1,cfg)
+ tdDnodes.startWithoutSleep(1)
+
+ tdLog.info("================= step2")
+ tdSql.taosdStatus(0)
+
+ def file_distribution_same_level(self):
+ tdLog.info("============== file_distribution_same_level test ===============")
+ dataDir = ['data00','data01','data02','data03','data04']
+ dataDict = {'data00':0,'data01':0,'data02':0,'data03':0,'data04':0}
+ tdDnodes.stop(1)
+ self.ntables = 1000
+ self.ts = 1520000010000
+ tdLog.info("================= step1")
+ cfg={
+ '/mnt/data00 0 1' : 'dataDir',
+ '/mnt/data01 0 0' : 'dataDir',
+ '/mnt/data02 0 0' : 'dataDir',
+ '/mnt/data03 0 0' : 'dataDir',
+ '/mnt/data04 0 0' : 'dataDir'
+ }
+ dir_list = ['/mnt/data00','/mnt/data01','/mnt/data02','/mnt/data03','/mnt/data04']
+ for i in dir_list:
+ tdSql.createDir(i)
+ tdDnodes.deploy(1,cfg)
+ tdDnodes.start(1)
+
+ tdSql.execute("create database test duration 1")
+ tdSql.execute("use test")
+
+ tdSql.execute("create table stb(ts timestamp, c int) tags(t int)")
+
+ for i in range(self.ntables):
+ tdSql.execute("create table tb%d using stb tags(%d)" %(i, i))
+ tdSql.execute("insert into tb%d values(%d, 1)" % (i,self.ts + int (i / 100) * 86400000))
+
+ tdLog.info("================= step2")
+ tdDnodes.stop(1)
+ tdDnodes.start(1)
+ tdSql.query("select * from test.stb")
+ tdSql.checkRows(1000)
+ tdLog.info("================= step3")
+ tdSql.execute('drop database test')
+ for i in range(50):
+ tdSql.execute("create database test%d duration 1" %(i))
+ tdSql.execute("use test%d" %(i))
+ tdSql.execute("create table tb (ts timestamp,i int)")
+ for j in range(10):
+ tdSql.execute("insert into tb values(%d, 1)" % (self.ts + int (i / 100) * 86400000))
+ tdDnodes.stop(1)
+ tdDnodes.start(1)
+ flag = True
+ for i in range(4):
+ if dataDict[dataDir[i]] == dataDict[dataDir[i+1]]:
+ flag = flag & True
+ else:
+ flag = flag & False
+ break
+ if not flag : tdLog.exit("%s failed, expect not occured" % (sys.argv[0]))
+
+ def three_level_basic(self):
+ tdLog.info("============== three_level_basic test ===============")
+ tdDnodes.stop(1)
+ # Test1 1 dataDir
+ cfg={
+ '/mnt/data000 0 1' : 'dataDir',
+ '/mnt/data001 0 0' : 'dataDir',
+ '/mnt/data002 0 0' : 'dataDir',
+ '/mnt/data010 1 0' : 'dataDir',
+ '/mnt/data011 1 0' : 'dataDir',
+ '/mnt/data012 1 0' : 'dataDir',
+ '/mnt/data020 2 0' : 'dataDir',
+ '/mnt/data021 2 0' : 'dataDir',
+ '/mnt/data022 2 0' : 'dataDir'
+ }
+ dir_list = ['/mnt/data000','/mnt/data001','/mnt/data002','/mnt/data010','/mnt/data011','/mnt/data012','/mnt/data020','/mnt/data021''/mnt/data022']
+ for i in dir_list:
+ tdSql.createDir(i)
+
+ tdDnodes.deploy(1,cfg)
+ tdDnodes.start(1)
+ for i in dir_list:
+ if i == '/mnt/data000':
+ tdSql.haveFile(i,1)
+ else:
+ tdSql.haveFile(i,0)
+
+ def more_than_16_disks(self):
+ tdLog.info("============== more_than_16_disks test ===============")
+ cfg={}
+ for i in range(17):
+ if i == 0 :
+ datadir = '/mnt/data%d 0 1' % (i+1)
+ else:
+ datadir = '/mnt/data%d 0 0' % (i+1)
+ cfg.update({ datadir : 'dataDir' })
+ tdSql.createDir('/mnt/data%d' % (i+1))
+
+ tdLog.info("================= step1")
+ tdDnodes.stop(1)
+ tdDnodes.deploy(1,cfg)
+ tdDnodes.startWithoutSleep(1)
+
+ tdLog.info("================= step2")
+ tdSql.taosdStatus(0)
+
+ def missing_middle_level(self):
+ tdLog.info("============== missing_middle_level test ===============")
+ tdDnodes.stop(1)
+ # Test1 1 dataDir
+ cfg={
+ '/mnt/data1 1 0' : 'dataDir'
+ }
+ tdSql.createDir('/mnt/data1')
+
+ tdDnodes.deploy(1,cfg)
+ tdDnodes.startWithoutSleep(1)
+
+ tdSql.taosdStatus(0)
+ tdDnodes.stop(1)
+ # Test2 2 dataDir
+ cfg = {
+ '/mnt/data1 0 1' : 'dataDir',
+ '/mnt/data1 2 0' : 'dataDir'
+ }
+ tdSql.createDir('/mnt/data1')
+
+ tdDnodes.deploy(1,cfg)
+ tdDnodes.startWithoutSleep(1)
+
+ tdSql.taosdStatus(0)
+
+ def trim_database(self):
+ tdLog.info("============== trim_database test ===============")
+ tdDnodes.stop(1)
+ cfg = {
+ '/mnt/data1 0 1' : 'dataDir'
+
+ }
+ tdSql.createDir('/mnt/data1')
+ tdDnodes.deploy(1,cfg)
+ tdDnodes.start(1)
+
+ tdSql.execute('create database dbtest')
+ tdSql.execute('use dbtest')
+ tdSql.execute('create table stb (ts timestamp,c0 int) tags(t0 int)')
+ tdSql.execute('create table tb1 using stb tags(1)')
+ for i in range(10,30):
+ tdSql.execute(f'insert into tb1 values(now-{i}d,10)')
+ tdSql.execute('flush database dbtest')
+ tdSql.haveFile('/mnt/data1/',1)
+ tdDnodes.stop(1)
+ cfg={
+ '/mnt/data1 0 1' : 'dataDir',
+ '/mnt/data2 1 0' : 'dataDir',
+ '/mnt/data3 2 0' : 'dataDir',
+ }
+ tdSql.createDir('/mnt/data2')
+ tdSql.createDir('/mnt/data3')
+ tdDnodes.deploy(1,cfg)
+ tdDnodes.start(1)
+ tdSql.haveFile('/mnt/data1/',1)
+ tdSql.haveFile('/mnt/data2/',0)
+ tdSql.haveFile('/mnt/data3/',0)
+ tdSql.execute('alter database dbtest keep 10d,365d,3650d')
+ tdSql.execute('trim database dbtest')
+ time.sleep(3)
+ tdSql.haveFile('/mnt/data1/',1)
+ tdSql.haveFile('/mnt/data2/',1)
+
+ def run(self):
+ self.basic()
+ self.dir_not_exist()
+ self.dir_permission_denied()
+ self.file_distribution_same_level()
+ self.three_level_basic()
+ self.more_than_16_disks()
+ self.trim_database()
+ self.missing_middle_level()
+
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/system-test/1-insert/alter_replica.py b/tests/system-test/1-insert/alter_replica.py
new file mode 100644
index 0000000000..900b64d943
--- /dev/null
+++ b/tests/system-test/1-insert/alter_replica.py
@@ -0,0 +1,114 @@
+import taos
+import sys
+import time
+import socket
+import os
+import threading
+import datetime
+
+from util.log import *
+from util.sql import *
+from util.cases import *
+from util.dnodes import *
+
+
+class TDTestCase:
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ def checkVgroups(self, dbName, vgNum):
+ sleepNum = vgNum * 60
+ flag = 0
+ while (sleepNum > 0):
+ sql = f'show {dbName}.vgroups'
+ tdSql.query(sql)
+ flag = 0
+ for vgid in range (vgNum) :
+ v1_status = tdSql.queryResult[vgid][4]
+ v2_status = tdSql.queryResult[vgid][6]
+ v3_status = tdSql.queryResult[vgid][8]
+ if ((v1_status == 'leader') and (v2_status == 'follower') and (v3_status == 'follower')) \
+ or ((v2_status == 'leader') and (v1_status == 'follower') and (v3_status == 'follower')) \
+ or ((v3_status == 'leader') and (v2_status == 'follower') and (v1_status == 'follower')):
+ continue
+ else:
+ sleepNum = sleepNum - 1
+ time.sleep(1)
+ flag = 1
+ break
+ if (0 == flag):
+ return 0
+ tdLog.debug("vgroup[%d] status: %s, %s, %s" %(vgid,v1_status,v2_status,v3_status))
+ return -1
+
+ def alter_replica(self):
+ # create db and alter replica
+ tdLog.debug("====alter db repica 1====")
+ vgNum = 3
+ dbName = 'db1'
+ sql = f'create database {dbName} vgroups {vgNum}'
+ tdSql.execute(sql)
+ sql = f'alter database {dbName} replica 3'
+ tdSql.execute(sql)
+ tdLog.debug("start check time: %s"%(str(datetime.datetime.now())))
+ res = self.checkVgroups(dbName, vgNum)
+ tdLog.debug("end check time: %s"%(str(datetime.datetime.now())))
+ if (0 != res):
+ tdLog.exit(f'fail: alter database {dbName} replica 3')
+
+ # create db, stable, child tables, and insert data, then alter replica
+ tdLog.debug("====alter db repica 2====")
+ dbName = 'db2'
+ sql = f'create database {dbName} vgroups {vgNum}'
+ tdSql.execute(sql)
+ sql = f'use {dbName}'
+ tdSql.execute(sql)
+ sql = f'create stable stb (ts timestamp, c int) tags (t int)'
+ tdSql.execute(sql)
+ sql = f'create table ctb using stb tags (1)'
+ tdSql.execute(sql)
+ sql = f'insert into ctb values (now, 1) (now+1s, 2) (now+2s, 3)'
+ tdSql.execute(sql)
+ sql = f'alter database {dbName} replica 3'
+ tdSql.execute(sql)
+ tdLog.debug("start check time: %s"%(str(datetime.datetime.now())))
+ res = self.checkVgroups(dbName, vgNum)
+ tdLog.debug("end check time: %s"%(str(datetime.datetime.now())))
+ if (0 != res):
+ tdLog.exit(f'fail: alter database {dbName} replica 3')
+
+ # firstly create db, stable, child tables, and insert data, then drop stable, and then alter replica
+ tdLog.debug("====alter db repica 3====")
+ dbName = 'db3'
+ sql = f'create database {dbName} vgroups {vgNum}'
+ tdSql.execute(sql)
+ sql = f'use {dbName}'
+ tdSql.execute(sql)
+ sql = f'create stable stb (ts timestamp, c int) tags (t int)'
+ tdSql.execute(sql)
+ sql = f'create table ctb using stb tags (1)'
+ tdSql.execute(sql)
+ sql = f'insert into ctb values (now, 1) (now+1s, 2) (now+2s, 3)'
+ tdSql.execute(sql)
+ sql = f'drop table stb'
+ tdSql.execute(sql)
+ sql = f'alter database {dbName} replica 3'
+ tdSql.execute(sql)
+ tdLog.debug("start check time: %s"%(str(datetime.datetime.now())))
+ res = self.checkVgroups(dbName, vgNum)
+ tdLog.debug("end check time: %s"%(str(datetime.datetime.now())))
+ if (0 != res):
+ tdLog.exit(f'fail: alter database {dbName} replica 3')
+
+ def run(self):
+ self.alter_replica()
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+
+tdCases.addLinux(__file__, TDTestCase())
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/1-insert/database_pre_suf.py b/tests/system-test/1-insert/database_pre_suf.py
index a6ff95ab3f..2e993b9a40 100755
--- a/tests/system-test/1-insert/database_pre_suf.py
+++ b/tests/system-test/1-insert/database_pre_suf.py
@@ -24,9 +24,7 @@ from util.dnodes import tdDnodes
from util.dnodes import *
class TDTestCase:
- updatecfgDict = {'maxSQLLength':1048576,'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143}
+ updatecfgDict = {'maxSQLLength':1048576,'debugFlag': 135}
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
@@ -39,7 +37,9 @@ class TDTestCase:
self.db = "pre_suf"
- def dropandcreateDB_random(self,database,n,vgroups,table_prefix,table_suffix,check_result):
+ def dropandcreateDB_random(self,database,n,vgroups,table_prefix,table_suffix,check_result_positive,check_result_negative):
+ #check_result_positive 检查前缀后缀是正数的,check_result_negative 检查前缀后缀是负数的(TS-3249)
+ tdLog.info(f"create start:n:{n},vgroups:{vgroups},table_prefix:{table_prefix},table_suffix:{table_suffix},check_result_positive:{check_result_positive},check_result_negative:{check_result_negative}")
ts = 1630000000000
num_random = 100
fake = Faker('zh_CN')
@@ -56,6 +56,7 @@ class TDTestCase:
q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) \
tags(loc nchar(100) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, t_bool bool , t_binary binary(100) , t_nchar nchar(100) ,t_float float , t_double double , t_ts timestamp);''')
+ #positive
for i in range(10*n):
tdSql.execute('''create table bj_%d (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp ) ;'''%i)
tdSql.execute('''create table sh_%d (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp ) ;'''%i)
@@ -106,11 +107,60 @@ class TDTestCase:
fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
-
+ #negative
+ for i in range(10*n):
+ tdSql.execute('''create table bj_table_%d_r_negative (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp ) ;'''%i)
+ tdSql.execute('''create table sh_table_%d_r_negative (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp ) ;'''%i)
+ tdSql.execute('''create table hn_table_%d_r_negative \
+ (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \
+ q_binary1 binary(100) , q_nchar1 nchar(100) ,q_binary2 binary(100) , q_nchar2 nchar(100) ,q_binary3 binary(100) , q_nchar3 nchar(100) ,q_binary4 binary(100) , q_nchar4 nchar(100) ,\
+ q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\
+ q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) ;'''%i)
+
+ tdSql.execute('''create table bj_stable_1_%d_negative using stable_1 tags('bj_stable_1_%d', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;'''
+ %(i,i,fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
+ fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
+ fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
+ tdSql.execute('''create table sh_table_%d_a_negative using stable_1 tags('sh_a_table_1_%d', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;'''
+ %(i,i,fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
+ fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
+ fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
+ tdSql.execute('''create table sh_table_%d_b_negative using stable_1 tags('sh_b_table_1_%d', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;'''
+ %(i,i,fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
+ fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
+ fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
+ tdSql.execute('''create table sh_table_%d_c_negative using stable_1 tags('sh_c_table_1_%d', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;'''
+ %(i,i,fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
+ fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
+ fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
+
+ tdSql.execute('''create table bj_table_%d_a_negative using stable_1 tags('bj_a_table_1_%d', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;'''
+ %(i,i,fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
+ fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
+ fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
+ tdSql.execute('''create table bj_table_%d_b_negative using stable_1 tags('bj_b_table_1_%d', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;'''
+ %(i,i,fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
+ fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
+ fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
+ tdSql.execute('''create table bj_table_%d_c_negative using stable_1 tags('bj_c_table_1_%d', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;'''
+ %(i,i,fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
+ fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
+ fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
+
+
+ tdSql.execute('''create table tj_table_%d_a_negative using stable_2 tags('tj_a_table_2_%d', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;'''
+ %(i,i,fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
+ fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
+ fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
+ tdSql.execute('''create table tj_table_%d_b_negative using stable_2 tags('tj_b_table_2_%d', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;'''
+ %(i,i,fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
+ fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
+ fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
+
# create stream
tdSql.execute('''create stream current_stream trigger at_once IGNORE EXPIRED 0 into stream_max_stable_1 as select _wstart as startts, _wend as wend, max(q_int) as max_int, min(q_bigint) as min_int from stable_1 where ts is not null interval (5s);''')
- # insert data
+ # insert data positive
for i in range(num_random*n):
tdSql.execute('''insert into bj_stable_1_1 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double , q_bool , q_binary , q_nchar, q_ts,\
q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \
@@ -180,8 +230,60 @@ class TDTestCase:
fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.pystr() , ts + i, fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() ,
fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr()))
+ # insert data negative
+ for i in range(num_random*n):
+ tdSql.execute('''insert into bj_stable_1_1_negative (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double , q_bool , q_binary , q_nchar, q_ts,\
+ q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \
+ values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \
+ 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;'''
+ % (ts + i*1000, fake.random_int(min=-2147483647, max=2147483647, step=1),
+ fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
+ fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
+ fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.pystr() , ts + i , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() ,
+ fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr()))
+
+ tdSql.execute('''insert into bj_stable_1_2_negative (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\
+ q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8)\
+ values(%d, %d, %d, %d, %d, %f, %f, 1, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \
+ 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;'''
+ % (ts + i*1000, fake.random_int(min=0, max=2147483647, step=1),
+ fake.random_int(min=0, max=9223372036854775807, step=1),
+ fake.random_int(min=0, max=32767, step=1) , fake.random_int(min=0, max=127, step=1) ,
+ fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.pystr() , ts + i, fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() ,
+ fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr()))
+
+ tdSql.execute('''insert into bj_stable_1_3_negative (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\
+ q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \
+ values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \
+ 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;'''
+ % (ts + i*1000, fake.random_int(min=-0, max=2147483647, step=1),
+ fake.random_int(min=-0, max=9223372036854775807, step=1),
+ fake.random_int(min=-0, max=32767, step=1) , fake.random_int(min=-0, max=127, step=1) ,
+ fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.pystr() , ts + i, fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() ,
+ fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr()))
+
+ tdSql.execute('''insert into bj_stable_1_4_negative (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\
+ q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \
+ values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \
+ 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;'''
+ % (ts + i*1000 +1, fake.random_int(min=-0, max=2147483647, step=1),
+ fake.random_int(min=-0, max=9223372036854775807, step=1),
+ fake.random_int(min=-0, max=32767, step=1) , fake.random_int(min=-0, max=127, step=1) ,
+ fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.pystr() , ts + i, fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() ,
+ fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr()))
+
+ tdSql.execute('''insert into bj_stable_1_5_negative (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\
+ q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \
+ values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \
+ 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;'''
+ % (ts + i*1000 +10, fake.random_int(min=-0, max=2147483647, step=1),
+ fake.random_int(min=-0, max=9223372036854775807, step=1),
+ fake.random_int(min=-0, max=32767, step=1) , fake.random_int(min=-0, max=127, step=1) ,
+ fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.pystr() , ts + i, fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() ,
+ fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr()))
+
tdSql.query("select count(*) from stable_1;")
- tdSql.checkData(0,0,5*num_random*n)
+ tdSql.checkData(0,0,10*num_random*n)
tdSql.query("select count(*) from hn_table_1_r;")
tdSql.checkData(0,0,num_random*n)
@@ -220,39 +322,28 @@ class TDTestCase:
tdSql.query(" select * from information_schema.ins_databases where name = '%s';" %database)
- print(tdSql.queryResult)
+ tdLog.info(tdSql.queryResult)
- # tdSql.query(" select table_prefix,table_suffix from information_schema.ins_databases where name = '%s';" %database)
- # print(tdSql.queryResult)
- #TD-19082
-
- #tdSql.query(" select * from information_schema.ins_tables where db_name = '%s';" %database)
- #print(tdSql.queryResult)
-
- tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s';" %database)
+ tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' limit 3;" %database)
queryRows = len(tdSql.queryResult)
for i in range(queryRows):
- print("row=%d, vgroup_id=%s, tbname=%s " %(i,tdSql.queryResult[i][1],tdSql.queryResult[i][0]))
+ tdLog.info("row=%d, vgroup_id=%s, tbname=%s " %(i,tdSql.queryResult[i][1],tdSql.queryResult[i][0]))
tdLog.info("\n=============flush database ====================\n")
tdSql.execute(" flush database %s;" %database)
tdSql.query(" select * from information_schema.ins_databases where name = '%s';" %database)
- print(tdSql.queryResult)
-
- # tdSql.query(" select table_prefix,table_suffix from information_schema.ins_databases where name = '%s';" %database)
- # print(tdSql.queryResult)
- #TD-19082
-
- tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s';" %database)
+ tdLog.info(tdSql.queryResult)
+
+ tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' limit 3;" %database)
queryRows = len(tdSql.queryResult)
for i in range(queryRows):
- print("row=%d, vgroup_id=%s, tbname=%s " %(i,tdSql.queryResult[i][1],tdSql.queryResult[i][0]))
+ tdLog.info("row=%d, vgroup_id=%s, tbname=%s " %(i,tdSql.queryResult[i][1],tdSql.queryResult[i][0]))
# check in one vgroup
- if check_result == 'Y':
+ if check_result_positive == 'Y':
#base table : sh_table_0_a
tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name='sh_table_0_a';" %(database))
base_value_table_name = tdSql.queryResult[0][0]
@@ -324,8 +415,100 @@ class TDTestCase:
tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name='tj_table_%d_b';" %(database,i))
self.value_check(base_value_table_name,base_value_table_vgroup)
+ elif check_result_negative == 'Y':
+ #base table : sh_table_0_a
+ tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name='sh_table_0_a_negative';" %(database))
+ base_value_table_name = tdSql.queryResult[0][0]
+ base_value_table_vgroup = tdSql.queryResult[0][1]
+
+ #check table :sh_table_i_a
+ check_rows = tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name like 'sh_table_%%_a_negative';" %(database))
+ for i in range(check_rows):
+ tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name='sh_table_%d_a_negative';" %(database,i))
+ self.value_check(base_value_table_name,base_value_table_vgroup)
+
+ #check table :sh_table_i_b
+ check_rows = tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name like 'sh_table_%%_b_negative';" %(database))
+ for i in range(check_rows):
+ tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name='sh_table_%d_b_negative';" %(database,i))
+ self.value_check(base_value_table_name,base_value_table_vgroup)
+
+ #check table :sh_table_i_c
+ check_rows = tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name like 'sh_table_%%_c_negative';" %(database))
+ for i in range(check_rows):
+ tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name='sh_table_%d_c_negative';" %(database,i))
+ self.value_check(base_value_table_name,base_value_table_vgroup)
+
+ #check table :sh_table_i_r
+ check_rows = tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name like 'sh_table_%%_r_negative';" %(database))
+ for i in range(check_rows):
+ tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name='sh_table_%d_r_negative';" %(database,i))
+ self.value_check(base_value_table_name,base_value_table_vgroup)
+
+ #base table : bj_table_0_a
+ tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name='bj_table_0_a_negative';" %(database))
+ base_value_table_name = tdSql.queryResult[0][0]
+ base_value_table_vgroup = tdSql.queryResult[0][1]
+
+ #check table :bj_table_i_a
+ check_rows = tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name like 'bj_table_%%_a_negative';" %(database))
+ for i in range(check_rows):
+ tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name='bj_table_%d_a_negative';" %(database,i))
+ self.value_check(base_value_table_name,base_value_table_vgroup)
+
+ #check table :bj_table_i_b
+ check_rows = tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name like 'bj_table_%%_b_negative';" %(database))
+ for i in range(check_rows):
+ tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name='bj_table_%d_b_negative';" %(database,i))
+ self.value_check(base_value_table_name,base_value_table_vgroup)
+
+ #check table :bj_table_i_c
+ check_rows = tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name like 'bj_table_%%_c_negative';" %(database))
+ for i in range(check_rows):
+ tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name='bj_table_%d_c_negative';" %(database,i))
+ self.value_check(base_value_table_name,base_value_table_vgroup)
+
+ #check table :bj_table_i_r
+ check_rows = tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name like 'bj_table_%%_r_negative';" %(database))
+ for i in range(check_rows):
+ tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name='bj_table_%d_r_negative';" %(database,i))
+ self.value_check(base_value_table_name,base_value_table_vgroup)
+
+ #base table : hn_table_0_r
+ tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name='hn_table_0_r_negative';" %(database))
+ base_value_table_name = tdSql.queryResult[0][0]
+ base_value_table_vgroup = tdSql.queryResult[0][1]
+
+ #check table :hn_table_i_r
+ check_rows = tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name like 'hn_table_%%_r_negative';" %(database))
+ for i in range(check_rows):
+ tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name='hn_table_%d_r_negative';" %(database,i))
+ self.value_check(base_value_table_name,base_value_table_vgroup)
+
+
+ #base table : tj_table_0_r
+ tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name='tj_table_0_a_negative';" %(database))
+ base_value_table_name = tdSql.queryResult[0][0]
+ base_value_table_vgroup = tdSql.queryResult[0][1]
+
+ #check table :tj_table_i_a
+ check_rows = tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name like 'tj_table_%%_a_negative';" %(database))
+ for i in range(check_rows):
+ tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name='tj_table_%d_a_negative';" %(database,i))
+ self.value_check(base_value_table_name,base_value_table_vgroup)
+
+ #check table :tj_table_i_b
+ check_rows = tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name like 'tj_table_%%_b_negative';" %(database))
+ for i in range(check_rows):
+ tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name='tj_table_%d_b_negative';" %(database,i))
+ self.value_check(base_value_table_name,base_value_table_vgroup)
+
+
else:
pass
+
+ tdLog.info(f"create end:n:{n},vgroups:{vgroups},table_prefix:{table_prefix},table_suffix:{table_suffix},check_result_positive:{check_result_positive},check_result_negative:{check_result_negative}")
+
def value_check(self,base_value_table_name,base_value_table_vgroup):
check_value_table_name = tdSql.queryResult[0][0]
@@ -348,17 +531,28 @@ class TDTestCase:
os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename))
- #(self,database,n,vgroups,table_prefix,table_suffix)
- self.dropandcreateDB_random("%s" %self.db, 1,2,0,0,'N')
- self.dropandcreateDB_random("%s" %self.db, 1,2,0,2,'N')
- self.dropandcreateDB_random("%s" %self.db, 1,2,2,0,'N')
- self.dropandcreateDB_random("%s" %self.db, 1,2,3,3,'Y')
- self.dropandcreateDB_random("%s" %self.db, 1,3,3,3,'Y')
- self.dropandcreateDB_random("%s" %self.db, 1,4,4,4,'Y')
- self.dropandcreateDB_random("%s" %self.db, 1,5,5,5,'Y')
+ #(self,database,n,vgroups,table_prefix,table_suffix,check_result_positive,check_result_negative):
+ #check_result_positive 检查前缀后缀是正数的,check_result_negative 检查前缀后缀是负数的(TS-3249)
+ # self.dropandcreateDB_random("%s" %self.db, 1,2,0,0,'N','N')
+ # self.dropandcreateDB_random("%s" %self.db, 1,2,0,2,'N','N')
+ # self.dropandcreateDB_random("%s" %self.db, 1,2,2,0,'N','N')
+
+ self.dropandcreateDB_random("%s" %self.db, 1,random.randint(1,5),random.randint(0,3),random.randint(0,3),'N','N')
+ self.dropandcreateDB_random("%s" %self.db, 1,random.randint(1,5),random.randint(-10,0),random.randint(-10,0),'N','N')
+ self.dropandcreateDB_random("%s" %self.db, 1,random.randint(1,5),random.randint(-191,0),random.randint(-191,0),'N','N')
+ self.dropandcreateDB_random("%s" %self.db, 1,random.randint(1,5),random.randint(0,100),random.randint(0,91),'N','N')
+
+ # self.dropandcreateDB_random("%s" %self.db, 1,2,3,3,'Y','N')
+ # self.dropandcreateDB_random("%s" %self.db, 1,3,3,3,'Y','N')
+ # self.dropandcreateDB_random("%s" %self.db, 1,4,4,4,'Y','N')
+ # self.dropandcreateDB_random("%s" %self.db, 1,5,5,5,'Y','N')
+ self.dropandcreateDB_random("%s" %self.db, 1,random.randint(1,5),random.randint(3,5),random.randint(3,5),'Y','N')
+
+ self.dropandcreateDB_random("%s" %self.db, 1,random.randint(1,5),random.randint(-5,-1),0,'N','Y')
+ self.dropandcreateDB_random("%s" %self.db, 1,random.randint(1,5),random.randint(-5,-1),random.randint(-9,-0),'N','Y')
- #taos -f sql
+ # #taos -f sql
print("taos -f sql start!")
taos_cmd1 = "taos -f %s/%s.sql" % (self.testcasePath,self.testcaseFilename)
_ = subprocess.check_output(taos_cmd1, shell=True)
diff --git a/tests/system-test/1-insert/delete_systable.py b/tests/system-test/1-insert/delete_systable.py
new file mode 100644
index 0000000000..40422a7515
--- /dev/null
+++ b/tests/system-test/1-insert/delete_systable.py
@@ -0,0 +1,111 @@
+
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import random
+import string
+
+from numpy import logspace
+from util import constant
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.common import *
+from util.sqlset import TDSetSql
+
+info_schema_db = "information_schema"
+perf_schema_db = "performance_schema"
+
+info_schema_tables = [
+ "ins_dnodes",
+ "ins_mnodes",
+ "ins_modules",
+ "ins_qnodes",
+ "ins_snodes",
+ "ins_cluster",
+ "ins_databases",
+ "ins_functions",
+ "ins_indexes",
+ "ins_stables",
+ "ins_tables",
+ "ins_tags",
+ "ins_columns",
+ "ins_users",
+ "ins_grants",
+ "ins_vgroups",
+ "ins_configs",
+ "ins_dnode_variables",
+ "ins_topics",
+ "ins_subscriptions",
+ "ins_streams",
+ "ins_streams_tasks",
+ "ins_vnodes",
+ "ins_user_privileges"
+]
+
+perf_schema_tables = [
+ "perf_connections",
+ "perf_queries",
+ "perf_consumers",
+ "perf_trans",
+ "perf_apps"
+]
+
+class TDTestCase:
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+
+ def delete_systb(self):
+ tdSql.execute(f'use {info_schema_db}')
+ for i in info_schema_tables:
+ tdSql.error(f'delete from {i}')
+ tdSql.error(f'delete from {info_schema_db}.{i}')
+ tdSql.error(f'delete from {i} where ts >= 0')
+ tdSql.error(f'delete from {info_schema_db}.{i} where ts >= 0')
+
+ tdSql.execute(f'use {perf_schema_db}')
+ for i in perf_schema_tables:
+ tdSql.error(f'delete from {i}')
+ tdSql.error(f'delete from {perf_schema_db}.{i}')
+ tdSql.error(f'delete from {i} where ts >= 0')
+ tdSql.error(f'delete from {perf_schema_db}.{i} where ts >= 0')
+
+ def drop_systb(self):
+ tdSql.execute(f'use {info_schema_db}')
+ for i in info_schema_tables:
+ tdSql.error(f'drop table {i}')
+ tdSql.error(f'drop {info_schema_db}.{i}')
+ tdSql.error(f'drop database {info_schema_db}')
+
+ tdSql.execute(f'use {perf_schema_db}')
+ for i in perf_schema_tables:
+ tdSql.error(f'drop table {i}')
+ tdSql.error(f'drop table {perf_schema_db}.{i}')
+ tdSql.error(f'drop database {perf_schema_db}')
+
+ def delete_from_systb(self):
+ self.delete_systb()
+ self.drop_systb()
+ def run(self):
+ self.delete_from_systb()
+ tdDnodes.stoptaosd(1)
+ tdDnodes.starttaosd(1)
+ self.delete_from_systb()
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/system-test/2-query/select_null.py b/tests/system-test/2-query/select_null.py
new file mode 100755
index 0000000000..68eea8bc67
--- /dev/null
+++ b/tests/system-test/2-query/select_null.py
@@ -0,0 +1,446 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import random
+import os
+import time
+import taos
+import subprocess
+from faker import Faker
+from util.log import tdLog
+from util.cases import tdCases
+from util.sql import tdSql
+from util.dnodes import tdDnodes
+from util.dnodes import *
+
+class TDTestCase:
+
+ def init(self, conn, logSql, replicaVar):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ self.testcasePath = os.path.split(__file__)[0]
+ self.testcaseFilename = os.path.split(__file__)[-1]
+ os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename))
+
+ self.db = "sel_null"
+
+ def insert_data(self,database,vgroups):
+ num_random = 10
+ tdSql.execute('''drop database if exists %s ;''' %database)
+ tdSql.execute('''create database %s keep 36500 vgroups %d PRECISION 'us';'''%(database,vgroups))
+ tdSql.execute('''use %s;'''%database)
+
+ tdSql.execute('''create stable %s.stb0 (ts timestamp , c0 int , c1 double , c0null int , c1null double ) tags( t0 tinyint , t1 varchar(16) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint , t_bool bool , t_binary binary(100) , t_nchar nchar(100) ,t_float float , t_double double , t_ts timestamp);'''%database)
+
+ for i in range(5):
+ tdSql.execute('''create table %s.stb0_%d using %s.stb0 tags(%d,'varchar%d',%d,%d, %d, %d,%d,'binary%d','nchar%d',%d,%d,%d ) ;'''%(database,i,database,i,i,i,i,i,i,i,i,i,i,i,i))
+
+ # insert data
+ for i in range(num_random):
+ for j in range(50):
+ tdSql.execute('''insert into %s.stb0_0 (ts , c1 , c0) values(now, %d, %d) ;''' % (database,j,j))
+ tdSql.execute('''insert into %s.stb0_1 (ts , c1 , c0) values(now, %d, %d) ;''' % (database,j,j))
+ tdSql.execute('''insert into %s.stb0_2 (ts , c1 , c0) values(now, %d, %d) ;''' % (database,j,j))
+ tdSql.execute('''insert into %s.stb0_3 (ts , c1 , c0) values(now, %d, %d) ;''' % (database,j,j))
+ tdSql.execute('''insert into %s.stb0_4 (ts , c1 , c0) values(now, %d, %d) ;''' % (database,j,j))
+
+ tdSql.query("select count(*) from %s.stb0;" %database)
+ tdSql.checkData(0,0,5*num_random*50)
+ tdSql.query("select count(*) from %s.stb0_0;"%database)
+ tdSql.checkData(0,0,num_random*50)
+
+ def ts_3085(self,database):
+ sql = "select count(c0null) from(select * from %s.stb0 limit 20,4) "%(database)
+ tdSql.query(sql)
+ tdSql.checkData(0,0,0)
+
+ offset = random.randint(10,100)
+ for i in range(offset):
+ sql = "select count(c0null) from(select * from %s.stb0 limit %d,%d) "%(database,offset,i)
+ tdSql.query(sql)
+ tdSql.checkData(0,0,0)
+ sql = "select count(c1null) from(select * from %s.stb0 limit %d,%d) "%(database,offset,i)
+ tdSql.query(sql)
+ tdSql.checkData(0,0,0)
+ sql = "select count(c0) from(select * from %s.stb0 limit %d,%d) "%(database,offset,i)
+ tdSql.query(sql)
+ tdSql.checkData(0,0,i)
+ sql = "select count(c1) from(select * from %s.stb0 limit %d,%d) "%(database,offset,i)
+ tdSql.query(sql)
+ tdSql.checkData(0,0,i)
+ sql = "select count(t0) from(select * from %s.stb0 limit %d,%d) "%(database,offset,i)
+ tdSql.query(sql)
+ tdSql.checkData(0,0,i)
+ sql = "select count(t1) from(select * from %s.stb0 limit %d,%d) "%(database,offset,i)
+ tdSql.query(sql)
+ tdSql.checkData(0,0,i)
+
+
+ def ts_2974_max(self,database):
+ sql = "select max(c0) from %s.stb0 where ts= 1 or rows <= 4):
+ tdLog.info("sql checkrows success")
+ else:
+ tdLog.exit(f"checkEqual error, sql_rows=={rows}")
+
+
+ self.sql_query_time_cost(nest_sql)
+ rows = tdSql.queryRows
+ if (rows >= 1 or rows <= 4):
+ tdLog.info("sql checkrows success")
+ else:
+ tdLog.exit(f"checkEqual error, sql_rows=={rows}")
+
+ sql_0 = re.sub(r'\d+',"0",sql)
+ self.sql_query_time_cost(sql_0)
+ tdSql.checkRows(0)
+
+ nest_sql_0 = re.sub(r'\d\d',"0",nest_sql) # \d是匹配数字字符[0-9],+匹配一个或多个
+ self.sql_query_time_cost(nest_sql_0)
+ tdSql.checkRows(0)
+
+ sql = sql.replace('limit','slimit')
+ tdSql.error(sql)
+ nest_sql = nest_sql.replace('limit','slimit')
+ tdSql.error(nest_sql)
+
+ sql_0 = re.sub(r'\d+',"0",sql)
+ tdSql.error(sql_0)
+ nest_sql_0 = re.sub(r'\d\d',"0",nest_sql)
+ tdSql.error(nest_sql_0)
+
+ def sql_limit_retun_tables_slimit_return_error(self,sql,num,tables,per_table_num,base_fun,replace_fun):
+ #sql limit n =n;sql limit 0 = 0 ;sql slmit n = error;sql slimit 0 = error
+ sql = sql.replace('%s'%base_fun,'%s'%replace_fun)
+
+ nest_sql =" select * from (%s) " %sql
+ self.sql_query_time_cost(sql)
+ tdSql.checkRows(tables)
+ self.sql_query_time_cost(nest_sql)
+ tdSql.checkRows(tables)
+
+ sql_0 = re.sub(r'\d\d',"0",sql)
+ self.sql_query_time_cost(sql_0)
+ tdSql.checkRows(0)
+
+ nest_sql_0 = re.sub(r'\d\d',"0",nest_sql) # \d是匹配数字字符[0-9],+匹配一个或多个
+ self.sql_query_time_cost(nest_sql_0)
+ tdSql.checkRows(0)
+
+ sql = sql.replace('limit','slimit')
+ tdSql.error(sql)
+ nest_sql = nest_sql.replace('limit','slimit')
+ tdSql.error(nest_sql)
+
+ sql_0 = re.sub(r'\d+',"0",sql)
+ tdSql.error(sql_0)
+ nest_sql_0 = re.sub(r'\d\d',"0",nest_sql)
+ tdSql.error(nest_sql_0)
+
+ def sql_limit_retun_tables_slimit_return_n(self,sql,num,tables,per_table_num,base_fun,replace_fun):
+ #sql limit n =tables;sql limit 0 = 0 ;sql slmit n = n;sql slimit 0 = 0
+ sql = sql.replace('%s'%base_fun,'%s'%replace_fun)
+
+ nest_sql =" select * from (%s) " %sql
+ self.sql_query_time_cost(sql)
+ tdSql.checkRows(tables)
+ self.sql_query_time_cost(nest_sql)
+ tdSql.checkRows(tables)
+
+ sql_0 = re.sub(r'\d\d',"0",sql)
+ self.sql_query_time_cost(sql_0)
+ tdSql.checkRows(0)
+
+ nest_sql_0 = re.sub(r'\d\d',"0",nest_sql) # \d是匹配数字字符[0-9],+匹配一个或多个
+ self.sql_query_time_cost(nest_sql_0)
+ tdSql.checkRows(0)
+
+ sql = sql.replace('limit','slimit')
+ self.sql_query_time_cost(sql)
+ tdSql.checkRows(num)
+ nest_sql = nest_sql.replace('limit','slimit')
+ self.sql_query_time_cost(nest_sql)
+ tdSql.checkRows(num)
+
+ sql_0 = re.sub(r'\d\d',"0",sql)
+ self.sql_query_time_cost(sql_0)
+ tdSql.checkRows(0)
+ nest_sql_0 = re.sub(r'\d\d',"0",nest_sql)
+ self.sql_query_time_cost(nest_sql_0)
+ tdSql.checkRows(0)
+
+ def sql_data_limit_retun_tables_slimit_return_n(self,sql,num,tables,per_table_num,base_fun,replace_fun):
+ #sql limit n =tables;sql limit 0 = 0 ;sql slmit n = n;sql slimit 0 = 0
+ sql = sql.replace('%s'%base_fun,'%s'%replace_fun)
+
+ nest_sql =" select * from (%s) " %sql
+ self.sql_query_time_cost(sql)
+ tdSql.checkData(0,0,tables)
+ self.sql_query_time_cost(nest_sql)
+ tdSql.checkData(0,0,tables)
+
+ sql_0 = re.sub(r'\d\d',"0",sql)
+ self.sql_query_time_cost(sql_0)
+ tdSql.checkData(0,0,0)
+
+ nest_sql_0 = re.sub(r'\d\d',"0",nest_sql) # \d是匹配数字字符[0-9],+匹配一个或多个
+ self.sql_query_time_cost(nest_sql_0)
+ tdSql.checkData(0,0,0)
+
+ sql = sql.replace('limit','slimit')
+ self.sql_query_time_cost(sql)
+ tdSql.checkData(0,0,num)
+ nest_sql = nest_sql.replace('limit','slimit')
+ self.sql_query_time_cost(nest_sql)
+ tdSql.checkData(0,0,num)
+
+ sql_0 = re.sub(r'\d\d',"0",sql)
+ self.sql_query_time_cost(sql_0)
+ tdSql.checkData(0,0,0)
+ nest_sql_0 = re.sub(r'\d\d',"0",nest_sql)
+ self.sql_query_time_cost(nest_sql_0)
+ tdSql.checkData(0,0,0)
+
+ def sql_limit_retun_n_slimit_return_tables(self,sql,num,tables,per_table_num,base_fun,replace_fun):
+ #sql limit n =n;sql limit 0 = 0 ;sql slmit n = 100;sql slimit 0 = 0
+ sql = sql.replace('%s'%base_fun,'%s'%replace_fun)
+
+ nest_sql =" select * from (%s) " %sql
+ self.sql_query_time_cost(sql)
+ tdSql.checkRows(num)
+ self.sql_query_time_cost(nest_sql)
+ tdSql.checkRows(num)
+
+ sql_0 = re.sub(r'\d\d',"0",sql)
+ self.sql_query_time_cost(sql_0)
+ tdSql.checkRows(0)
+
+ nest_sql_0 = re.sub(r'\d\d',"0",nest_sql) # \d是匹配数字字符[0-9],+匹配一个或多个
+ self.sql_query_time_cost(nest_sql_0)
+ tdSql.checkRows(0)
+
+ sql = sql.replace('limit','slimit')
+ self.sql_query_time_cost(sql)
+ tdSql.checkRows(tables)
+ nest_sql = nest_sql.replace('limit','slimit')
+ self.sql_query_time_cost(nest_sql)
+ tdSql.checkRows(tables)
+
+ sql_0 = re.sub(r'\d\d',"0",sql)
+ self.sql_query_time_cost(sql_0)
+ tdSql.checkRows(0)
+ nest_sql_0 = re.sub(r'\d\d',"0",nest_sql)
+ self.sql_query_time_cost(nest_sql_0)
+ tdSql.checkRows(0)
+
+ def sql_data_limit_retun_n_slimit_return_tables(self,sql,num,tables,per_table_num,base_fun,replace_fun):
+ #sql limit n =n;sql limit 0 = 0 ;sql slmit n = 100;sql slimit 0 = 0
+ sql = sql.replace('%s'%base_fun,'%s'%replace_fun)
+
+ nest_sql =" select * from (%s) " %sql
+ self.sql_query_time_cost(sql)
+ tdSql.checkData(0,0,num)
+ self.sql_query_time_cost(nest_sql)
+ tdSql.checkData(0,0,num)
+
+ sql_0 = re.sub(r'\d\d',"0",sql)
+ self.sql_query_time_cost(sql_0)
+ tdSql.checkData(0,0,0)
+
+ nest_sql_0 = re.sub(r'\d\d',"0",nest_sql) # \d是匹配数字字符[0-9],+匹配一个或多个
+ self.sql_query_time_cost(nest_sql_0)
+ tdSql.checkData(0,0,0)
+
+ sql = sql.replace('limit','slimit')
+ self.sql_query_time_cost(sql)
+ tdSql.checkData(0,0,tables)
+ nest_sql = nest_sql.replace('limit','slimit')
+ self.sql_query_time_cost(nest_sql)
+ tdSql.checkData(0,0,tables)
+
+ sql_0 = re.sub(r'\d\d',"0",sql)
+ self.sql_query_time_cost(sql_0)
+ tdSql.checkData(0,0,0)
+ nest_sql_0 = re.sub(r'\d\d',"0",nest_sql)
+ self.sql_query_time_cost(nest_sql_0)
+ tdSql.checkData(0,0,0)
+
+ def sql_limit_retun_tables_times_n_slimit_return_error(self,sql,num,tables,per_table_num,base_fun,replace_fun):
+ #sql limit n =n*tables;sql limit 0 = 0 ;sql slmit n = tables*n;sql slimit 0 = 0
+ #interval
+ sql = sql.replace('%s'%base_fun,'%s'%replace_fun)
+
+ nest_sql =" select * from (%s) " %sql
+ self.sql_query_time_cost(sql)
+ tdSql.checkRows(num*tables)
+ self.sql_query_time_cost(nest_sql)
+ tdSql.checkRows(num*tables)
+
+ sql_0 = re.sub(r'\d\d',"0",sql)
+ self.sql_query_time_cost(sql_0)
+ tdSql.checkRows(0)
+
+ nest_sql_0 = re.sub(r'\d\d',"0",nest_sql) # \d是匹配数字字符[0-9],+匹配一个或多个
+ self.sql_query_time_cost(nest_sql_0)
+ tdSql.checkRows(0)
+
+ sql = sql.replace('limit','slimit')
+ tdSql.error(sql)
+ nest_sql = nest_sql.replace('limit','slimit')
+ tdSql.error(nest_sql)
+
+ sql_0 = re.sub(r'\d\d',"0",sql)
+ tdSql.error(sql_0)
+ nest_sql_0 = re.sub(r'\d\d',"0",nest_sql)
+ tdSql.error(nest_sql_0)
+
+ def sql_limit_retun_tables_times_n_slimit_return_per_table_num_times_n(self,sql,num,tables,per_table_num,base_fun,replace_fun):
+ #sql limit n =n*tables;sql limit 0 = 0 ;sql slmit n = per_table_num*n;sql slimit 0 = 0
+ #interval
+ sql = sql.replace('%s'%base_fun,'%s'%replace_fun)
+
+ nest_sql =" select * from (%s) " %sql
+ self.sql_query_time_cost(sql)
+ tdSql.checkRows(num*tables)
+ self.sql_query_time_cost(nest_sql)
+ tdSql.checkRows(num*tables)
+
+ sql_0 = re.sub(r'\d\d',"0",sql)
+ self.sql_query_time_cost(sql_0)
+ tdSql.checkRows(0)
+
+ nest_sql_0 = re.sub(r'\d\d',"0",nest_sql) # \d是匹配数字字符[0-9],+匹配一个或多个
+ self.sql_query_time_cost(nest_sql_0)
+ tdSql.checkRows(0)
+
+ sql = sql.replace('limit','slimit')
+ self.sql_query_time_cost(sql)
+ tdSql.checkRows(num*per_table_num)
+ nest_sql = nest_sql.replace('limit','slimit')
+ self.sql_query_time_cost(nest_sql)
+ tdSql.checkRows(num*per_table_num)
+
+ sql_0 = re.sub(r'\d\d',"0",sql)
+ self.sql_query_time_cost(sql_0)
+ tdSql.checkRows(0)
+ nest_sql_0 = re.sub(r'\d\d',"0",nest_sql)
+ self.sql_query_time_cost(nest_sql_0)
+ tdSql.checkRows(0)
+
+ def sql_data_limit_retun_tables_times_n_slimit_return_per_table_num_times_n(self,sql,num,tables,per_table_num,base_fun,replace_fun):
+ #sql limit n =n*tables;sql limit 0 = 0 ;sql slmit n = per_table_num*n;sql slimit 0 = 0
+ #interval
+ sql = sql.replace('%s'%base_fun,'%s'%replace_fun)
+
+ nest_sql =" select * from (%s) " %sql
+ self.sql_query_time_cost(sql)
+ tdSql.checkData(0,0,num*tables)
+ self.sql_query_time_cost(nest_sql)
+ tdSql.checkData(0,0,num*tables)
+
+ sql_0 = re.sub(r'\d\d',"0",sql)
+ self.sql_query_time_cost(sql_0)
+ tdSql.checkData(0,0,0)
+
+ nest_sql_0 = re.sub(r'\d\d',"0",nest_sql) # \d是匹配数字字符[0-9],+匹配一个或多个
+ self.sql_query_time_cost(nest_sql_0)
+ tdSql.checkData(0,0,0)
+
+ sql = sql.replace('limit','slimit')
+ self.sql_query_time_cost(sql)
+ tdSql.checkData(0,0,num*per_table_num)
+ nest_sql = nest_sql.replace('limit','slimit')
+ self.sql_query_time_cost(nest_sql)
+ tdSql.checkData(0,0,num*per_table_num)
+
+ sql_0 = re.sub(r'\d\d',"0",sql)
+ self.sql_query_time_cost(sql_0)
+ tdSql.checkData(0,0,0)
+ nest_sql_0 = re.sub(r'\d\d',"0",nest_sql)
+ self.sql_query_time_cost(nest_sql_0)
+ tdSql.checkData(0,0,0)
+
+ def sql_limit_retun_n_slimit_return_per_table_num_times_tables(self,sql,num,tables,per_table_num,base_fun,replace_fun):
+ #sql limit n =n*tables;sql limit 0 = 0 ;sql slmit n = per_table_num*n;sql slimit 0 = 0
+ #interval
+ sql = sql.replace('%s'%base_fun,'%s'%replace_fun)
+
+ nest_sql =" select * from (%s) " %sql
+ self.sql_query_time_cost(sql)
+ tdSql.checkRows(num)
+ self.sql_query_time_cost(nest_sql)
+ tdSql.checkRows(num)
+
+ sql_0 = re.sub(r'\d\d',"0",sql)
+ self.sql_query_time_cost(sql_0)
+ tdSql.checkRows(0)
+
+ nest_sql_0 = re.sub(r'\d\d',"0",nest_sql) # \d是匹配数字字符[0-9],+匹配一个或多个
+ self.sql_query_time_cost(nest_sql_0)
+ tdSql.checkRows(0)
+
+ sql = sql.replace('limit','slimit')
+ self.sql_query_time_cost(sql)
+ tdSql.checkRows(tables*per_table_num)
+ nest_sql = nest_sql.replace('limit','slimit')
+ self.sql_query_time_cost(nest_sql)
+ tdSql.checkRows(tables*per_table_num)
+
+ sql_0 = re.sub(r'\d\d',"0",sql)
+ self.sql_query_time_cost(sql_0)
+ tdSql.checkRows(0)
+ nest_sql_0 = re.sub(r'\d\d',"0",nest_sql)
+ self.sql_query_time_cost(nest_sql_0)
+ tdSql.checkRows(0)
+
+ def sql_data_limit_retun_n_slimit_return_per_table_num_times_tables(self,sql,num,tables,per_table_num,base_fun,replace_fun):
+ #sql limit n =n*tables;sql limit 0 = 0 ;sql slmit n = per_table_num*n;sql slimit 0 = 0
+ #interval
+ sql = sql.replace('%s'%base_fun,'%s'%replace_fun)
+
+ nest_sql =" select * from (%s) " %sql
+ self.sql_query_time_cost(sql)
+ tdSql.checkData(0,0,num)
+ self.sql_query_time_cost(nest_sql)
+ tdSql.checkData(0,0,num)
+
+ sql_0 = re.sub(r'\d\d',"0",sql)
+ self.sql_query_time_cost(sql_0)
+ tdSql.checkData(0,0,0)
+
+ nest_sql_0 = re.sub(r'\d\d',"0",nest_sql) # \d是匹配数字字符[0-9],+匹配一个或多个
+ self.sql_query_time_cost(nest_sql_0)
+ tdSql.checkData(0,0,0)
+
+ sql = sql.replace('limit','slimit')
+ self.sql_query_time_cost(sql)
+ tdSql.checkData(0,0,tables*per_table_num)
+ nest_sql = nest_sql.replace('limit','slimit')
+ self.sql_query_time_cost(nest_sql)
+ tdSql.checkData(0,0,tables*per_table_num)
+
+ sql_0 = re.sub(r'\d\d',"0",sql)
+ self.sql_query_time_cost(sql_0)
+ tdSql.checkData(0,0,0)
+ nest_sql_0 = re.sub(r'\d\d',"0",nest_sql)
+ self.sql_query_time_cost(nest_sql_0)
+ tdSql.checkData(0,0,0)
+
+ def sql_limit_not_test_slimitkeep_return_per_table_num_times_n(self,sql,num,tables,per_table_num,base_fun,replace_fun):
+ #sql sql slmit n = per_table_num*n;sql slimit 0 = 0
+ sql = sql.replace('%s'%base_fun,'%s'%replace_fun)
+
+ nest_sql =" select * from (%s) " %sql
+ sql = sql.replace('limit','limit')
+ self.sql_query_time_cost(sql)
+ tdSql.checkRows(num*per_table_num)
+
+ self.sql_query_time_cost(nest_sql)
+ tdSql.checkRows(num*per_table_num)
+
+ sql_0 = re.sub(r'\d\d',"0",sql)
+ self.sql_query_time_cost(sql_0)
+ tdSql.checkRows(0)
+ nest_sql_0 = re.sub(r'\d\d',"0",nest_sql)
+ self.sql_query_time_cost(nest_sql_0)
+ tdSql.checkRows(0)
+
+ def sql_data_limit_not_test_slimitkeep_return_per_table_num_times_n(self,sql,num,tables,per_table_num,base_fun,replace_fun):
+ #sql sql slmit n = per_table_num*n;sql slimit 0 = 0
+ sql = sql.replace('%s'%base_fun,'%s'%replace_fun)
+
+ nest_sql =" select * from (%s) " %sql
+ sql = sql.replace('limit','limit')
+ self.sql_query_time_cost(sql)
+ tdSql.checkData(0,0,num*per_table_num)
+
+ self.sql_query_time_cost(nest_sql)
+ tdSql.checkData(0,0,num*per_table_num)
+
+ sql_0 = re.sub(r'\d\d',"0",sql)
+ self.sql_query_time_cost(sql_0)
+ tdSql.checkData(0,0,0)
+ nest_sql_0 = re.sub(r'\d\d',"0",nest_sql)
+ self.sql_query_time_cost(nest_sql_0)
+ tdSql.checkData(0,0,0)
+
+ def sql_limit_not_test_slimitkeep_return_per_table_num_times_tables(self,sql,num,tables,per_table_num,base_fun,replace_fun):
+ #sql sql slmit n = per_table_num*tables;sql slimit 0 = 0
+ sql = sql.replace('%s'%base_fun,'%s'%replace_fun)
+
+ nest_sql =" select * from (%s) " %sql
+ self.sql_query_time_cost(sql)
+ tdSql.checkRows(tables*per_table_num)
+
+ self.sql_query_time_cost(nest_sql)
+ tdSql.checkRows(tables*per_table_num)
+
+ sql_0 = re.sub(r'\d\d',"0",sql)
+ self.sql_query_time_cost(sql_0)
+ tdSql.checkRows(0)
+ nest_sql_0 = re.sub(r'\d\d',"0",nest_sql)
+ self.sql_query_time_cost(nest_sql_0)
+ tdSql.checkRows(0)
+
+ def sql_data_limit_not_test_slimitkeep_return_per_table_num_times_tables(self,sql,num,tables,per_table_num,base_fun,replace_fun):
+ #sql sql slmit n = per_table_num*tables;sql slimit 0 = 0
+ sql = sql.replace('%s'%base_fun,'%s'%replace_fun)
+
+ nest_sql =" select * from (%s) " %sql
+ self.sql_query_time_cost(sql)
+ tdSql.checkData(0,0,tables*per_table_num)
+
+ self.sql_query_time_cost(nest_sql)
+ tdSql.checkData(0,0,tables*per_table_num)
+
+ sql_0 = re.sub(r'\d\d',"0",sql)
+ self.sql_query_time_cost(sql_0)
+ tdSql.checkData(0,0,0)
+ nest_sql_0 = re.sub(r'\d\d',"0",nest_sql)
+ self.sql_query_time_cost(nest_sql_0)
+ tdSql.checkData(0,0,0)
+
+ def sql_limit_retun_n_times_n2_slimitkeep_return_n_times_n2(self,sql,num,num2,tables,per_table_num,base_fun,replace_fun):
+ #sql limit n =n*num2;sql limit 0 = 0 ;sql slmit n = num2*n;sql slimit 0 = 0
+ sql = sql.replace('%s'%base_fun,'%s'%replace_fun)
+
+ nest_sql =" select * from (%s) " %sql
+ self.sql_query_time_cost(sql)
+ tdSql.checkRows(num*num2)
+ self.sql_query_time_cost(nest_sql)
+ tdSql.checkRows(num*num2)
+
+ sql_0 = re.sub(r'\d\d',"0",sql)
+ self.sql_query_time_cost(sql_0)
+ tdSql.checkRows(0)
+ nest_sql_0 = re.sub(r'\d\d',"0",nest_sql)
+ self.sql_query_time_cost(nest_sql_0)
+ tdSql.checkRows(0)
+
+ sql = sql.replace('limit','limit')
+ self.sql_query_time_cost(sql)
+ tdSql.checkRows(num*num2)
+ nest_sql = nest_sql.replace('limit','limit')
+ self.sql_query_time_cost(nest_sql)
+ tdSql.checkRows(num*num2)
+
+ sql_0 = re.sub(r'\d\d',"0",sql)
+ self.sql_query_time_cost(sql_0)
+ tdSql.checkRows(0)
+ nest_sql_0 = re.sub(r'\d\d',"0",nest_sql)
+ self.sql_query_time_cost(nest_sql_0)
+ tdSql.checkRows(0)
+
+ def sql_data_limit_retun_n_times_n2_slimitkeep_return_n_times_n2(self,sql,num,num2,tables,per_table_num,base_fun,replace_fun):
+ #sql limit n =n*num2;sql limit 0 = 0 ;sql slmit n = num2*n;sql slimit 0 = 0
+ sql = sql.replace('%s'%base_fun,'%s'%replace_fun)
+
+ nest_sql =" select * from (%s) " %sql
+ self.sql_query_time_cost(sql)
+ tdSql.checkData(0,0,num*num2)
+ self.sql_query_time_cost(nest_sql)
+ tdSql.checkData(0,0,num*num2)
+
+ sql_0 = re.sub(r'\d\d',"0",sql)
+ self.sql_query_time_cost(sql_0)
+ tdSql.checkData(0,0,0)
+ nest_sql_0 = re.sub(r'\d\d',"0",nest_sql)
+ self.sql_query_time_cost(nest_sql_0)
+ tdSql.checkData(0,0,0)
+
+ sql = sql.replace('limit','limit')
+ self.sql_query_time_cost(sql)
+ tdSql.checkData(0,0,num*num2)
+ nest_sql = nest_sql.replace('limit','limit')
+ self.sql_query_time_cost(nest_sql)
+ tdSql.checkData(0,0,num*num2)
+
+ sql_0 = re.sub(r'\d\d',"0",sql)
+ self.sql_query_time_cost(sql_0)
+ tdSql.checkData(0,0,0)
+ nest_sql_0 = re.sub(r'\d\d',"0",nest_sql)
+ self.sql_query_time_cost(nest_sql_0)
+ tdSql.checkData(0,0,0)
+
+ def sql_limit_times_slimitkeep_return_n2(self,sql,num,num2,tables,per_table_num,base_fun,replace_fun):
+ #sql limit n =n*num2;sql limit 0 = 0 ;sql slmit n = num2*n;sql slimit 0 = 0
+ sql = sql.replace('%s'%base_fun,'%s'%replace_fun)
+
+ nest_sql =" select * from (%s) " %sql
+ self.sql_query_time_cost(sql)
+ tdSql.checkRows(num2)
+ self.sql_query_time_cost(nest_sql)
+ tdSql.checkRows(num2)
+
+ sql_0 = re.sub(r'\d\d',"0",sql)
+ self.sql_query_time_cost(sql_0)
+ tdSql.checkRows(0)
+ nest_sql_0 = re.sub(r'\d\d',"0",nest_sql)
+ self.sql_query_time_cost(nest_sql_0)
+ tdSql.checkRows(0)
+
+ def sql_data_limit_times_slimitkeep_return_n2(self,sql,num,num2,tables,per_table_num,base_fun,replace_fun):
+ #sql limit n =n*num2;sql limit 0 = 0 ;sql slmit n = num2*n;sql slimit 0 = 0
+ sql = sql.replace('%s'%base_fun,'%s'%replace_fun)
+
+ nest_sql =" select * from (%s) " %sql
+ self.sql_query_time_cost(sql)
+ tdSql.checkData(0,0,num2)
+ self.sql_query_time_cost(nest_sql)
+ tdSql.checkData(0,0,num2)
+
+ sql_0 = re.sub(r'\d\d',"0",sql)
+ self.sql_query_time_cost(sql_0)
+ tdSql.checkData(0,0,0)
+ nest_sql_0 = re.sub(r'\d\d',"0",nest_sql)
+ self.sql_query_time_cost(nest_sql_0)
+ tdSql.checkData(0,0,0)
+
+ def sql_retun_error(self,sql,base_fun,replace_fun):
+ #sql limit n = error;sql limit 0 = error ;sql slmit n = error ;sql slimit 0 = error
+ sql = sql.replace('%s'%base_fun,'%s'%replace_fun)
+
+ nest_sql =" select * from (%s) " %sql
+ tdSql.error(sql)
+ tdSql.error(nest_sql)
+
+ sql_0 = re.sub(r'\d+',"0",sql)
+ tdSql.error(sql)
+ nest_sql_0 = re.sub(r'\d\d',"0",nest_sql)
+ tdSql.error(nest_sql_0)
+
+ sql = sql.replace('limit','slimit')
+ tdSql.error(sql)
+ nest_sql = nest_sql.replace('limit','slimit')
+ tdSql.error(nest_sql)
+
+ sql_0 = re.sub(r'\d+',"0",sql)
+ tdSql.error(sql_0)
+ nest_sql_0 = re.sub(r'\d\d',"0",nest_sql)
+ tdSql.error(nest_sql_0)
+
+ def fun_base(self,dbname,num,num2,tables,per_table_num,dbnamejoin,base_fun,replace_fun):
+
+ tdLog.info("base query ---------1----------")
+ sql = "select * from %s.meters limit %d" %(dbname,num)
+ self.sql_limit_retun_n_slimit_return_error(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_data_limit_retun_n_slimit_return_error(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select a.* from %s.meters a,%s.meters b where a.ts = b.ts limit %d" %(dbname,dbnamejoin,num)
+ self.sql_limit_retun_n_slimit_return_error(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_data_limit_retun_n_slimit_return_error(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+
+
+ tdLog.info("base query ---------2----------")
+ sql = "select * from %s.meters where ts is not null limit %d" %(dbname,num)
+ self.sql_limit_retun_n_slimit_return_error(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_data_limit_retun_n_slimit_return_error(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select a.* from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts limit %d" %(dbname,dbnamejoin,num)
+ self.sql_limit_retun_n_slimit_return_error(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_data_limit_retun_n_slimit_return_error(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+
+
+ tdLog.info("base query ---------3----------")
+ sql = "select * from %s.meters where ts is not null order by ts limit %d" %(dbname,num)
+ self.sql_limit_retun_n_slimit_return_error(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_data_limit_retun_n_slimit_return_error(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select a.* from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts order by b.ts limit %d" %(dbname,dbnamejoin,num)
+ self.sql_limit_retun_n_slimit_return_error(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_data_limit_retun_n_slimit_return_error(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+
+ tdLog.info("base query ---------4----------")
+ sql = "select * from %s.meters where ts is not null order by ts desc limit %d" %(dbname,num)
+ self.sql_limit_retun_n_slimit_return_error(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_data_limit_retun_n_slimit_return_error(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select a.* from %s.meters a,%s.meters b where b.ts is not null and a.ts = b.ts order by a.ts desc limit %d" %(dbname,dbnamejoin,num)
+ self.sql_limit_retun_n_slimit_return_error(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_data_limit_retun_n_slimit_return_error(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+ def fun_count(self,dbname,num,num2,tables,per_table_num,dbnamejoin,base_fun,replace_fun):
+
+ tdLog.info("count query ---------1----------")
+ sql = "select count(*) from %s.meters limit %d" %(dbname,num)
+ self.sql_limit_retun_1_slimit_return_error(sql,tables,per_table_num,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_data_limit_retun_1_slimit_return_error(sql,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_data_limit_retun_1_slimit_return_error(sql_union,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_data_limit_retun_1_slimit_return_error(sql_union_all,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select count(a.*) from %s.meters a,%s.meters b where a.ts = b.ts limit %d" %(dbname,dbnamejoin,num)
+ self.sql_limit_retun_1_slimit_return_error(sql_join,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_data_limit_retun_1_slimit_return_error(sql_join,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_data_limit_retun_1_slimit_return_error(sql_union,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_data_limit_retun_1_slimit_return_error(sql_union_all,tables,per_table_num,base_fun,replace_fun)
+
+
+
+
+ tdLog.info("count query ---------2----------")
+ sql = "select count(*) from %s.meters where ts is not null limit %d" %(dbname,num)
+ self.sql_limit_retun_1_slimit_return_error(sql,tables,per_table_num,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_data_limit_retun_1_slimit_return_error(sql,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_data_limit_retun_1_slimit_return_error(sql_union,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_data_limit_retun_1_slimit_return_error(sql_union_all,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select count(a.*) from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts limit %d" %(dbname,dbnamejoin,num)
+ self.sql_limit_retun_1_slimit_return_error(sql_join,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_data_limit_retun_1_slimit_return_error(sql_join,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_data_limit_retun_1_slimit_return_error(sql_union,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_data_limit_retun_1_slimit_return_error(sql_union_all,tables,per_table_num,base_fun,replace_fun)
+
+
+
+ tdLog.info("count query ---------3----------")
+ sql = "select count(*) from %s.meters where ts is not null order by ts limit %d" %(dbname,num)
+ self.sql_retun_error(sql,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_retun_error(sql,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_retun_error(sql_union,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_retun_error(sql_union_all,base_fun,replace_fun)
+
+ sql_join = "select count(a.*) from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts order by b.ts limit %d" %(dbname,dbnamejoin,num)
+ self.sql_retun_error(sql_join,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_retun_error(sql_join,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_retun_error(sql_union,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_retun_error(sql_union_all,base_fun,replace_fun)
+
+
+
+ tdLog.info("count query ---------4----------")
+ sql = "select count(*) from %s.meters where ts is not null order by ts desc limit %d" %(dbname,num)
+ self.sql_retun_error(sql,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_retun_error(sql,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_retun_error(sql_union,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_retun_error(sql_union_all,base_fun,replace_fun)
+
+ sql_join = "select count(a.*) from %s.meters a,%s.meters b where b.ts is not null and a.ts = b.ts order by a.ts desc limit %d" %(dbname,dbnamejoin,num)
+ self.sql_retun_error(sql_join,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_retun_error(sql_join,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_retun_error(sql_union,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_retun_error(sql_union_all,base_fun,replace_fun)
+
+
+
+ tdLog.info("count query ---------5----------")
+ sql = "select count(*) from %s.meters where ts is not null group by tbname limit %d" %(dbname,num)
+ self.sql_limit_retun_tables_slimit_return_n(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_data_limit_retun_tables_slimit_return_n(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_data_limit_retun_tables_slimit_return_n(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_data_limit_retun_tables_slimit_return_n(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select count(*) from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts group by b.tbname limit %d" %(dbname,dbnamejoin,num)
+ self.sql_limit_retun_tables_slimit_return_n(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_data_limit_retun_tables_slimit_return_n(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_data_limit_retun_tables_slimit_return_n(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_data_limit_retun_tables_slimit_return_n(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+
+
+
+ tdLog.info("count query ---------6----------")
+ sql = "select count(*) from %s.meters where ts is not null partition by tbname limit %d" %(dbname,num)
+ self.sql_limit_retun_tables_slimit_return_n(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_data_limit_retun_tables_slimit_return_n(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_data_limit_retun_tables_slimit_return_n(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_data_limit_retun_tables_slimit_return_n(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select count(*) from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts partition by b.tbname limit %d" %(dbname,dbnamejoin,num)
+ self.sql_limit_retun_tables_slimit_return_n(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_data_limit_retun_tables_slimit_return_n(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_data_limit_retun_tables_slimit_return_n(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_data_limit_retun_tables_slimit_return_n(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+
+
+ tdLog.info("count query ---------7----------")
+ sql = "select count(*) cc from %s.meters where ts is not null group by tbname order by cc limit %d" %(dbname,num)
+ self.sql_limit_retun_n_slimit_return_tables(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_data_limit_retun_n_slimit_return_tables(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_data_limit_retun_n_slimit_return_tables(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_data_limit_retun_n_slimit_return_tables(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select count(*) cc from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts group by b.tbname order by cc limit %d" %(dbname,dbnamejoin,num)
+ self.sql_limit_retun_n_slimit_return_tables(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_data_limit_retun_n_slimit_return_tables(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_data_limit_retun_n_slimit_return_tables(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_data_limit_retun_n_slimit_return_tables(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+
+
+ tdLog.info("count query ---------8----------")
+ sql = "select count(*) cc from %s.meters where ts is not null partition by tbname order by cc limit %d" %(dbname,num)
+ self.sql_limit_retun_n_slimit_return_tables(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_data_limit_retun_n_slimit_return_tables(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_data_limit_retun_n_slimit_return_tables(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_data_limit_retun_n_slimit_return_tables(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select count(*) cc from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts partition by b.tbname order by cc limit %d" %(dbname,dbnamejoin,num)
+ self.sql_limit_retun_n_slimit_return_tables(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_data_limit_retun_n_slimit_return_tables(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_data_limit_retun_n_slimit_return_tables(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_data_limit_retun_n_slimit_return_tables(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+
+
+ tdLog.info("count query ---------9----------")
+ sql = "select count(*) cc from %s.meters where ts is not null interval(1a) limit %d" %(dbname,num)
+ self.sql_limit_retun_n_slimit_return_error(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_data_limit_retun_n_slimit_return_error(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select count(*) cc from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts interval(1a) limit %d" %(dbname,dbnamejoin,num)
+ self.sql_limit_retun_n_slimit_return_error(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_data_limit_retun_n_slimit_return_error(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+
+
+ tdLog.info("count query ---------10----------")
+ sql = "select count(*) cc from %s.meters where ts is not null interval(1a) order by cc asc limit %d" %(dbname,num)
+ self.sql_limit_retun_n_slimit_return_error(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_data_limit_retun_n_slimit_return_error(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select count(*) cc from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts interval(1a) order by cc asc limit %d" %(dbname,dbnamejoin,num)
+ self.sql_limit_retun_n_slimit_return_error(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_data_limit_retun_n_slimit_return_error(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+
+
+ tdLog.info("count query ---------11----------")
+ sql = "select count(*) cc from %s.meters where ts is not null interval(1a) order by cc desc limit %d" %(dbname,num)
+ self.sql_limit_retun_n_slimit_return_error(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_data_limit_retun_n_slimit_return_error(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select count(*) cc from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts interval(1a) order by cc desc limit %d" %(dbname,dbnamejoin,num)
+ self.sql_limit_retun_n_slimit_return_error(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_data_limit_retun_n_slimit_return_error(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+
+
+ tdLog.info("count query ---------12----------")
+ sql = "select tbname,count(*) cc from %s.meters where ts is not null interval(1a) group by tbname limit %d" %(dbname,num)
+ self.sql_retun_error(sql,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_retun_error(sql,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_retun_error(sql_union,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_retun_error(sql_union_all,base_fun,replace_fun)
+
+ sql_join = "select a.tbname,count(*) cc from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts interval(1a) group by b.tbname limit %d" %(dbname,dbnamejoin,num)
+ self.sql_retun_error(sql_join,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_retun_error(sql_join,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_retun_error(sql_union,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_retun_error(sql_union_all,base_fun,replace_fun)
+
+
+
+ tdLog.info("count query ---------13----------")
+ sql = "select tbname,count(*) cc from %s.meters where ts is not null interval(1a) partition by tbname limit %d" %(dbname,num)
+ self.sql_retun_error(sql,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_retun_error(sql,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_retun_error(sql_union,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_retun_error(sql_union_all,base_fun,replace_fun)
+
+ sql_join = "select a.tbname,count(*) cc from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts interval(1a) partition by b.tbname limit %d" %(dbname,dbnamejoin,num)
+ self.sql_retun_error(sql_join,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_retun_error(sql_join,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_retun_error(sql_union,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_retun_error(sql_union_all,base_fun,replace_fun)
+
+
+
+ tdLog.info("count query ---------14----------")
+ sql = "select tbname,count(*) cc from %s.meters where ts is not null partition by tbname interval(1a) limit %d" %(dbname,num)
+ self.sql_limit_retun_tables_times_n_slimit_return_per_table_num_times_n(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_data_limit_retun_tables_times_n_slimit_return_per_table_num_times_n(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_data_limit_retun_tables_times_n_slimit_return_per_table_num_times_n(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_data_limit_retun_tables_times_n_slimit_return_per_table_num_times_n(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select a.tbname,count(*) cc from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts partition by a.tbname interval(1a) limit %d" %(dbname,dbnamejoin,num)
+ self.sql_limit_retun_tables_times_n_slimit_return_per_table_num_times_n(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_data_limit_retun_tables_times_n_slimit_return_per_table_num_times_n(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_data_limit_retun_tables_times_n_slimit_return_per_table_num_times_n(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_data_limit_retun_tables_times_n_slimit_return_per_table_num_times_n(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+
+
+ tdLog.info("count query ---------15----------")
+ sql = "select tbname,count(*) cc from %s.meters where ts is not null partition by tbname interval(1a) order by cc asc limit %d" %(dbname,num)
+ self.sql_limit_retun_n_slimit_return_per_table_num_times_tables(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_data_limit_retun_n_slimit_return_per_table_num_times_tables(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_data_limit_retun_n_slimit_return_per_table_num_times_tables(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_data_limit_retun_n_slimit_return_per_table_num_times_tables(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select a.tbname,count(*) cc from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts partition by a.tbname interval(1a) order by cc asc limit %d" %(dbname,dbnamejoin,num)
+ self.sql_limit_retun_n_slimit_return_per_table_num_times_tables(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_data_limit_retun_n_slimit_return_per_table_num_times_tables(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_data_limit_retun_n_slimit_return_per_table_num_times_tables(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_data_limit_retun_n_slimit_return_per_table_num_times_tables(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+
+
+ tdLog.info("count query ---------16----------")
+ sql = "select tbname,count(*) cc from %s.meters where ts is not null partition by tbname interval(1a) order by cc desc limit %d" %(dbname,num)
+ self.sql_limit_retun_n_slimit_return_per_table_num_times_tables(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_data_limit_retun_n_slimit_return_per_table_num_times_tables(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_data_limit_retun_n_slimit_return_per_table_num_times_tables(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_data_limit_retun_n_slimit_return_per_table_num_times_tables(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select a.tbname,count(*) cc from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts partition by a.tbname interval(1a) order by cc desc limit %d" %(dbname,dbnamejoin,num)
+ self.sql_limit_retun_n_slimit_return_per_table_num_times_tables(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_data_limit_retun_n_slimit_return_per_table_num_times_tables(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_data_limit_retun_n_slimit_return_per_table_num_times_tables(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_data_limit_retun_n_slimit_return_per_table_num_times_tables(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+
+
+ tdLog.info("count query ---------17----------")
+ sql = "select tbname,count(*) cc from %s.meters where ts is not null partition by tbname interval(1a) slimit %d" %(dbname,num)
+ self.sql_limit_not_test_slimitkeep_return_per_table_num_times_n(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_data_limit_not_test_slimitkeep_return_per_table_num_times_n(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_data_limit_not_test_slimitkeep_return_per_table_num_times_n(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_data_limit_not_test_slimitkeep_return_per_table_num_times_n(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select a.tbname,count(*) cc from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts partition by a.tbname interval(1a) slimit %d" %(dbname,dbnamejoin,num)
+ self.sql_limit_not_test_slimitkeep_return_per_table_num_times_n(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_data_limit_not_test_slimitkeep_return_per_table_num_times_n(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_data_limit_not_test_slimitkeep_return_per_table_num_times_n(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_data_limit_not_test_slimitkeep_return_per_table_num_times_n(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+
+
+ tdLog.info("count query ---------18----------")
+ sql = "select tbname,count(*) cc from %s.meters where ts is not null partition by tbname interval(1a) order by cc asc slimit %d" %(dbname,num)
+ self.sql_limit_not_test_slimitkeep_return_per_table_num_times_tables(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_data_limit_not_test_slimitkeep_return_per_table_num_times_tables(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_data_limit_not_test_slimitkeep_return_per_table_num_times_tables(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_data_limit_not_test_slimitkeep_return_per_table_num_times_tables(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select a.tbname,count(*) cc from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts partition by a.tbname interval(1a) order by cc asc slimit %d" %(dbname,dbnamejoin,num)
+ self.sql_limit_not_test_slimitkeep_return_per_table_num_times_tables(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_data_limit_not_test_slimitkeep_return_per_table_num_times_tables(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_data_limit_not_test_slimitkeep_return_per_table_num_times_tables(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_data_limit_not_test_slimitkeep_return_per_table_num_times_tables(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+
+ tdLog.info("count query ---------19----------")
+ sql = "select tbname,count(*) cc from %s.meters where ts is not null partition by tbname interval(1a) order by cc desc slimit %d" %(dbname,num)
+ self.sql_limit_not_test_slimitkeep_return_per_table_num_times_tables(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_data_limit_not_test_slimitkeep_return_per_table_num_times_tables(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_data_limit_not_test_slimitkeep_return_per_table_num_times_tables(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_data_limit_not_test_slimitkeep_return_per_table_num_times_tables(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select a.tbname,count(*) cc from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts partition by a.tbname interval(1a) order by cc desc slimit %d" %(dbname,dbnamejoin,num)
+ self.sql_limit_not_test_slimitkeep_return_per_table_num_times_tables(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_data_limit_not_test_slimitkeep_return_per_table_num_times_tables(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_data_limit_not_test_slimitkeep_return_per_table_num_times_tables(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_data_limit_not_test_slimitkeep_return_per_table_num_times_tables(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+
+
+ tdLog.info("count query ---------20----------")
+ sql = "select tbname,count(*) cc from %s.meters where ts is not null partition by tbname interval(1a) slimit %d limit %d" %(dbname,num,num2)
+ self.sql_limit_retun_n_times_n2_slimitkeep_return_n_times_n2(sql,num,num2,tables,per_table_num,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_data_limit_retun_n_times_n2_slimitkeep_return_n_times_n2(sql,num,num2,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_data_limit_retun_n_times_n2_slimitkeep_return_n_times_n2(sql_union,num,num2,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_data_limit_retun_n_times_n2_slimitkeep_return_n_times_n2(sql_union_all,num,num2,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select a.tbname,count(*) cc from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts partition by a.tbname interval(1a) slimit %d limit %d" %(dbname,dbnamejoin,num,num2)
+ self.sql_limit_retun_n_times_n2_slimitkeep_return_n_times_n2(sql_join,num,num2,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_data_limit_retun_n_times_n2_slimitkeep_return_n_times_n2(sql_join,num,num2,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_data_limit_retun_n_times_n2_slimitkeep_return_n_times_n2(sql_union,num,num2,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_data_limit_retun_n_times_n2_slimitkeep_return_n_times_n2(sql_union_all,num,num2,tables,per_table_num,base_fun,replace_fun)
+
+
+ tdLog.info("count query ---------21----------")
+ sql = "select tbname,count(*) cc from %s.meters where ts is not null partition by tbname interval(1a) order by cc asc slimit %d limit %d" %(dbname,num,num2)
+ self.sql_limit_times_slimitkeep_return_n2(sql,num,num2,tables,per_table_num,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_data_limit_times_slimitkeep_return_n2(sql,num,num2,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_data_limit_times_slimitkeep_return_n2(sql_union,num,num2,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_data_limit_times_slimitkeep_return_n2(sql_union_all,num,num2,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select a.tbname,count(*) cc from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts partition by a.tbname interval(1a) order by cc asc slimit %d limit %d" %(dbname,dbnamejoin,num,num2)
+ self.sql_limit_times_slimitkeep_return_n2(sql_join,num,num2,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_data_limit_times_slimitkeep_return_n2(sql_join,num,num2,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_data_limit_times_slimitkeep_return_n2(sql_union,num,num2,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_data_limit_times_slimitkeep_return_n2(sql_union_all,num,num2,tables,per_table_num,base_fun,replace_fun)
+
+
+ tdLog.info("count query ---------22----------")
+ sql = "select tbname,count(*) cc from %s.meters where ts is not null partition by tbname interval(1a) order by cc desc slimit %d limit %d" %(dbname,num,num2)
+ self.sql_limit_times_slimitkeep_return_n2(sql,num,num2,tables,per_table_num,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_data_limit_times_slimitkeep_return_n2(sql,num,num2,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_data_limit_times_slimitkeep_return_n2(sql_union,num,num2,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_data_limit_times_slimitkeep_return_n2(sql_union_all,num,num2,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select a.tbname,count(*) cc from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts partition by a.tbname interval(1a) order by cc desc slimit %d limit %d" %(dbname,dbnamejoin,num,num2)
+ self.sql_limit_times_slimitkeep_return_n2(sql_join,num,num2,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_data_limit_times_slimitkeep_return_n2(sql_join,num,num2,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_data_limit_times_slimitkeep_return_n2(sql_union,num,num2,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_data_limit_times_slimitkeep_return_n2(sql_union_all,num,num2,tables,per_table_num,base_fun,replace_fun)
+
+
+ def fun_last(self,dbname,num,num2,tables,per_table_num,dbnamejoin,base_fun,replace_fun):
+
+ tdLog.info("last query ---------1----------")
+ sql = "select last(*) from %s.meters limit %d" %(dbname,num)
+ self.sql_limit_retun_1_slimit_return_error(sql,tables,per_table_num,base_fun,replace_fun)
+ sql = "select last(*) from (%s)" %sql
+ self.sql_limit_retun_1_slimit_return_error(sql,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_last_limit_retun_1_slimit_return_error(sql_union,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_last_limit_retun_1_slimit_return_error(sql_union_all,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select last(a.*) from %s.meters a,%s.meters b where a.ts = b.ts limit %d" %(dbname,dbnamejoin,num)
+ self.sql_limit_retun_1_slimit_return_error(sql_join,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select last(*) from (%s)" %sql_join
+ self.sql_limit_retun_1_slimit_return_error(sql_join,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_last_limit_retun_1_slimit_return_error(sql_union,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_last_limit_retun_1_slimit_return_error(sql_union_all,tables,per_table_num,base_fun,replace_fun)
+
+
+
+
+ tdLog.info("last query ---------2----------")
+ sql = "select last(*) from %s.meters where ts is not null limit %d" %(dbname,num)
+ self.sql_limit_retun_1_slimit_return_error(sql,tables,per_table_num,base_fun,replace_fun)
+ sql = "select last(*) from (%s)" %sql
+ self.sql_limit_retun_1_slimit_return_error(sql,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_last_limit_retun_1_slimit_return_error(sql_union,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_last_limit_retun_1_slimit_return_error(sql_union_all,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select last(a.*) from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts limit %d" %(dbname,dbnamejoin,num)
+ self.sql_limit_retun_1_slimit_return_error(sql_join,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select last(*) from (%s)" %sql_join
+ self.sql_limit_retun_1_slimit_return_error(sql_join,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_last_limit_retun_1_slimit_return_error(sql_union,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_last_limit_retun_1_slimit_return_error(sql_union_all,tables,per_table_num,base_fun,replace_fun)
+
+
+
+ tdLog.info("last query ---------3----------")
+ sql = "select last(*) from %s.meters where ts is not null order by ts limit %d" %(dbname,num)
+ self.sql_retun_error(sql,base_fun,replace_fun)
+ sql = "select last(*) from (%s)" %sql
+ self.sql_retun_error(sql,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_retun_error(sql_union,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_retun_error(sql_union_all,base_fun,replace_fun)
+
+ sql_join = "select last(a.*) from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts order by b.ts limit %d" %(dbname,dbnamejoin,num)
+ self.sql_retun_error(sql_join,base_fun,replace_fun)
+ sql_join = "select last(*) from (%s)" %sql_join
+ self.sql_retun_error(sql_join,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_retun_error(sql_union,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_retun_error(sql_union_all,base_fun,replace_fun)
+
+
+
+ tdLog.info("last query ---------4----------")
+ sql = "select last(*) from %s.meters where ts is not null order by ts desc limit %d" %(dbname,num)
+ self.sql_retun_error(sql,base_fun,replace_fun)
+ sql = "select last(*) from (%s)" %sql
+ self.sql_retun_error(sql,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_retun_error(sql_union,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_retun_error(sql_union_all,base_fun,replace_fun)
+
+ sql_join = "select last(a.*) from %s.meters a,%s.meters b where b.ts is not null and a.ts = b.ts order by a.ts desc limit %d" %(dbname,dbnamejoin,num)
+ self.sql_retun_error(sql_join,base_fun,replace_fun)
+ sql_join = "select last(*) from (%s)" %sql_join
+ self.sql_retun_error(sql_join,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_retun_error(sql_union,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_retun_error(sql_union_all,base_fun,replace_fun)
+
+
+
+ tdLog.info("last query ---------5----------")
+ sql = "select last(*) from %s.meters where ts is not null group by tbname limit %d" %(dbname,num)
+ self.sql_limit_retun_tables_slimit_return_n(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_data_limit_retun_tables_slimit_return_n(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_data_limit_retun_tables_slimit_return_n(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_data_limit_retun_tables_slimit_return_n(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select last(a.*) from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts group by b.tbname limit %d" %(dbname,dbnamejoin,num)
+ self.sql_limit_retun_tables_slimit_return_n(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_data_limit_retun_tables_slimit_return_n(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_data_limit_retun_tables_slimit_return_n(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_data_limit_retun_tables_slimit_return_n(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+
+
+
+ tdLog.info("last query ---------6----------")
+ sql = "select last(*) from %s.meters where ts is not null partition by tbname limit %d" %(dbname,num)
+ self.sql_limit_retun_tables_slimit_return_n(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_data_limit_retun_tables_slimit_return_n(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_data_limit_retun_tables_slimit_return_n(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_data_limit_retun_tables_slimit_return_n(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select last(a.*) from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts partition by b.tbname limit %d" %(dbname,dbnamejoin,num)
+ self.sql_limit_retun_tables_slimit_return_n(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_data_limit_retun_tables_slimit_return_n(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_data_limit_retun_tables_slimit_return_n(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_data_limit_retun_tables_slimit_return_n(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+
+
+ tdLog.info("last query ---------7----------")
+ sql = "select last(ts) cc from %s.meters where ts is not null group by tbname order by cc limit %d" %(dbname,num)
+ self.sql_limit_retun_n_slimit_return_tables(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_data_limit_retun_n_slimit_return_tables(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_data_limit_retun_n_slimit_return_tables(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_data_limit_retun_n_slimit_return_tables(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select last(a.ts) cc from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts group by b.tbname order by cc limit %d" %(dbname,dbnamejoin,num)
+ self.sql_limit_retun_n_slimit_return_tables(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_data_limit_retun_n_slimit_return_tables(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_data_limit_retun_n_slimit_return_tables(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_data_limit_retun_n_slimit_return_tables(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+
+
+ tdLog.info("last query ---------8----------")
+ sql = "select last(ts) cc from %s.meters where ts is not null partition by tbname order by cc limit %d" %(dbname,num)
+ self.sql_limit_retun_n_slimit_return_tables(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_data_limit_retun_n_slimit_return_tables(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_data_limit_retun_n_slimit_return_tables(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_data_limit_retun_n_slimit_return_tables(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select last(a.ts) cc from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts partition by b.tbname order by cc limit %d" %(dbname,dbnamejoin,num)
+ self.sql_limit_retun_n_slimit_return_tables(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_data_limit_retun_n_slimit_return_tables(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_data_limit_retun_n_slimit_return_tables(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_data_limit_retun_n_slimit_return_tables(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+
+
+ tdLog.info("last query ---------9----------")
+ sql = "select last(*) from %s.meters where ts is not null interval(1a) limit %d" %(dbname,num)
+ self.sql_limit_retun_n_slimit_return_error(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_data_limit_retun_n_slimit_return_error(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select last(a.ts) cc from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts interval(1a) limit %d" %(dbname,dbnamejoin,num)
+ self.sql_limit_retun_n_slimit_return_error(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_data_limit_retun_n_slimit_return_error(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+
+
+ tdLog.info("last query ---------10----------")
+ sql = "select last(ts) cc from %s.meters where ts is not null interval(1a) order by cc asc limit %d" %(dbname,num)
+ self.sql_limit_retun_n_slimit_return_error(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_data_limit_retun_n_slimit_return_error(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select last(a.ts) cc from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts interval(1a) order by cc asc limit %d" %(dbname,dbnamejoin,num)
+ self.sql_limit_retun_n_slimit_return_error(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_data_limit_retun_n_slimit_return_error(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+
+
+ tdLog.info("last query ---------11----------")
+ sql = "select last(ts) cc from %s.meters where ts is not null interval(1a) order by cc desc limit %d" %(dbname,num)
+ self.sql_limit_retun_n_slimit_return_error(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_data_limit_retun_n_slimit_return_error(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select last(a.ts) cc from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts interval(1a) order by cc desc limit %d" %(dbname,dbnamejoin,num)
+ self.sql_limit_retun_n_slimit_return_error(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_data_limit_retun_n_slimit_return_error(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_data_limit_retun_n_slimit_return_error(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+
+
+ tdLog.info("last query ---------12----------")
+ sql = "select tbname,last(ts) cc from %s.meters where ts is not null interval(1a) group by tbname limit %d" %(dbname,num)
+ self.sql_retun_error(sql,base_fun,replace_fun)
+ sql = "select last(*) from (%s)" %sql
+ self.sql_retun_error(sql,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_retun_error(sql_union,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_retun_error(sql_union_all,base_fun,replace_fun)
+
+ sql_join = "select a.tbname,last(a.ts) cc from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts interval(1a) group by b.tbname limit %d" %(dbname,dbnamejoin,num)
+ self.sql_retun_error(sql_join,base_fun,replace_fun)
+ sql_join = "select last(*) from (%s)" %sql_join
+ self.sql_retun_error(sql_join,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_retun_error(sql_union,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_retun_error(sql_union_all,base_fun,replace_fun)
+
+
+
+ tdLog.info("last query ---------13----------")
+ sql = "select tbname,last(ts) cc from %s.meters where ts is not null interval(1a) partition by tbname limit %d" %(dbname,num)
+ self.sql_retun_error(sql,base_fun,replace_fun)
+ sql = "select last(*) from (%s)" %sql
+ self.sql_retun_error(sql,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_retun_error(sql_union,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_retun_error(sql_union_all,base_fun,replace_fun)
+
+ sql_join = "select a.tbname,last(a.ts) cc from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts interval(1a) partition by b.tbname limit %d" %(dbname,dbnamejoin,num)
+ self.sql_retun_error(sql_join,base_fun,replace_fun)
+ sql_join = "select last(*) from (%s)" %sql_join
+ self.sql_retun_error(sql_join,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_retun_error(sql_union,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_retun_error(sql_union_all,base_fun,replace_fun)
+
+
+
+ tdLog.info("last query ---------14----------")
+ sql = "select tbname,last(*) cc from %s.meters where ts is not null partition by tbname interval(1a) limit %d" %(dbname,num)
+ self.sql_limit_retun_tables_times_n_slimit_return_per_table_num_times_n(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_data_limit_retun_tables_times_n_slimit_return_per_table_num_times_n(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_data_limit_retun_tables_times_n_slimit_return_per_table_num_times_n(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_data_limit_retun_tables_times_n_slimit_return_per_table_num_times_n(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select a.tbname,last(a.ts) cc from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts partition by a.tbname interval(1a) limit %d" %(dbname,dbnamejoin,num)
+ self.sql_limit_retun_tables_times_n_slimit_return_per_table_num_times_n(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_data_limit_retun_tables_times_n_slimit_return_per_table_num_times_n(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_data_limit_retun_tables_times_n_slimit_return_per_table_num_times_n(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_data_limit_retun_tables_times_n_slimit_return_per_table_num_times_n(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+
+
+ tdLog.info("last query ---------15----------")
+ sql = "select tbname,last(ts) cc from %s.meters where ts is not null partition by tbname interval(1a) order by cc asc limit %d" %(dbname,num)
+ self.sql_limit_retun_n_slimit_return_per_table_num_times_tables(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_data_limit_retun_n_slimit_return_per_table_num_times_tables(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_data_limit_retun_n_slimit_return_per_table_num_times_tables(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_data_limit_retun_n_slimit_return_per_table_num_times_tables(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select a.tbname,last(a.ts) cc from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts partition by a.tbname interval(1a) order by cc asc limit %d" %(dbname,dbnamejoin,num)
+ self.sql_limit_retun_n_slimit_return_per_table_num_times_tables(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_data_limit_retun_n_slimit_return_per_table_num_times_tables(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_data_limit_retun_n_slimit_return_per_table_num_times_tables(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_data_limit_retun_n_slimit_return_per_table_num_times_tables(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+
+
+ tdLog.info("last query ---------16----------")
+ sql = "select tbname,last(ts) cc from %s.meters where ts is not null partition by tbname interval(1a) order by cc desc limit %d" %(dbname,num)
+ self.sql_limit_retun_n_slimit_return_per_table_num_times_tables(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_data_limit_retun_n_slimit_return_per_table_num_times_tables(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_data_limit_retun_n_slimit_return_per_table_num_times_tables(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_data_limit_retun_n_slimit_return_per_table_num_times_tables(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select a.tbname,last(a.ts) cc from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts partition by a.tbname interval(1a) order by cc desc limit %d" %(dbname,dbnamejoin,num)
+ self.sql_limit_retun_n_slimit_return_per_table_num_times_tables(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_data_limit_retun_n_slimit_return_per_table_num_times_tables(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_data_limit_retun_n_slimit_return_per_table_num_times_tables(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_data_limit_retun_n_slimit_return_per_table_num_times_tables(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+
+
+ tdLog.info("last query ---------17----------")
+ sql = "select tbname,last(*) cc from %s.meters where ts is not null partition by tbname interval(1a) slimit %d" %(dbname,num)
+ self.sql_limit_not_test_slimitkeep_return_per_table_num_times_n(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_data_limit_not_test_slimitkeep_return_per_table_num_times_n(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_data_limit_not_test_slimitkeep_return_per_table_num_times_n(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_data_limit_not_test_slimitkeep_return_per_table_num_times_n(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select a.tbname,last(a.ts) cc from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts partition by a.tbname interval(1a) slimit %d" %(dbname,dbnamejoin,num)
+ self.sql_limit_not_test_slimitkeep_return_per_table_num_times_n(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_data_limit_not_test_slimitkeep_return_per_table_num_times_n(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_data_limit_not_test_slimitkeep_return_per_table_num_times_n(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_data_limit_not_test_slimitkeep_return_per_table_num_times_n(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+
+
+ tdLog.info("last query ---------18----------")
+ sql = "select tbname,last(ts) cc from %s.meters where ts is not null partition by tbname interval(1a) order by cc asc slimit %d" %(dbname,num)
+ self.sql_limit_not_test_slimitkeep_return_per_table_num_times_tables(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_data_limit_not_test_slimitkeep_return_per_table_num_times_tables(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_data_limit_not_test_slimitkeep_return_per_table_num_times_tables(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_data_limit_not_test_slimitkeep_return_per_table_num_times_tables(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select a.tbname,last(a.ts) cc from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts partition by a.tbname interval(1a) order by cc asc slimit %d" %(dbname,dbnamejoin,num)
+ self.sql_limit_not_test_slimitkeep_return_per_table_num_times_tables(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_data_limit_not_test_slimitkeep_return_per_table_num_times_tables(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_data_limit_not_test_slimitkeep_return_per_table_num_times_tables(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_data_limit_not_test_slimitkeep_return_per_table_num_times_tables(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+
+ tdLog.info("last query ---------19----------")
+ sql = "select tbname,last(ts) cc from %s.meters where ts is not null partition by tbname interval(1a) order by cc desc slimit %d" %(dbname,num)
+ self.sql_limit_not_test_slimitkeep_return_per_table_num_times_tables(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_data_limit_not_test_slimitkeep_return_per_table_num_times_tables(sql,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_data_limit_not_test_slimitkeep_return_per_table_num_times_tables(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_data_limit_not_test_slimitkeep_return_per_table_num_times_tables(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select a.tbname,last(a.ts) cc from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts partition by a.tbname interval(1a) order by cc desc slimit %d" %(dbname,dbnamejoin,num)
+ self.sql_limit_not_test_slimitkeep_return_per_table_num_times_tables(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_data_limit_not_test_slimitkeep_return_per_table_num_times_tables(sql_join,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_data_limit_not_test_slimitkeep_return_per_table_num_times_tables(sql_union,num,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_data_limit_not_test_slimitkeep_return_per_table_num_times_tables(sql_union_all,num,tables,per_table_num,base_fun,replace_fun)
+
+
+
+ tdLog.info("last query ---------20----------")
+ sql = "select tbname,last(*) cc from %s.meters where ts is not null partition by tbname interval(1a) slimit %d limit %d" %(dbname,num,num2)
+ self.sql_limit_retun_n_times_n2_slimitkeep_return_n_times_n2(sql,num,num2,tables,per_table_num,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_data_limit_retun_n_times_n2_slimitkeep_return_n_times_n2(sql,num,num2,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_data_limit_retun_n_times_n2_slimitkeep_return_n_times_n2(sql_union,num,num2,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_data_limit_retun_n_times_n2_slimitkeep_return_n_times_n2(sql_union_all,num,num2,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select a.tbname,last(a.*) cc from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts partition by a.tbname interval(1a) slimit %d limit %d" %(dbname,dbnamejoin,num,num2)
+ self.sql_limit_retun_n_times_n2_slimitkeep_return_n_times_n2(sql_join,num,num2,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_data_limit_retun_n_times_n2_slimitkeep_return_n_times_n2(sql_join,num,num2,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_data_limit_retun_n_times_n2_slimitkeep_return_n_times_n2(sql_union,num,num2,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_data_limit_retun_n_times_n2_slimitkeep_return_n_times_n2(sql_union_all,num,num2,tables,per_table_num,base_fun,replace_fun)
+
+
+ tdLog.info("last query ---------21----------")
+ sql = "select tbname,last(ts) cc from %s.meters where ts is not null partition by tbname interval(1a) order by cc asc slimit %d limit %d" %(dbname,num,num2)
+ self.sql_limit_times_slimitkeep_return_n2(sql,num,num2,tables,per_table_num,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_data_limit_times_slimitkeep_return_n2(sql,num,num2,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_data_limit_times_slimitkeep_return_n2(sql_union,num,num2,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_data_limit_times_slimitkeep_return_n2(sql_union_all,num,num2,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select a.tbname,last(a.ts) cc from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts partition by a.tbname interval(1a) order by cc asc slimit %d limit %d" %(dbname,dbnamejoin,num,num2)
+ self.sql_limit_times_slimitkeep_return_n2(sql_join,num,num2,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_data_limit_times_slimitkeep_return_n2(sql_join,num,num2,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_data_limit_times_slimitkeep_return_n2(sql_union,num,num2,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_data_limit_times_slimitkeep_return_n2(sql_union_all,num,num2,tables,per_table_num,base_fun,replace_fun)
+
+
+ tdLog.info("last query ---------22----------")
+ sql = "select tbname,last(ts) cc from %s.meters where ts is not null partition by tbname interval(1a) order by cc desc slimit %d limit %d" %(dbname,num,num2)
+ self.sql_limit_times_slimitkeep_return_n2(sql,num,num2,tables,per_table_num,base_fun,replace_fun)
+ sql = "select count(*) from (%s)" %sql
+ self.sql_data_limit_times_slimitkeep_return_n2(sql,num,num2,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql,sql)
+ self.sql_data_limit_times_slimitkeep_return_n2(sql_union,num,num2,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql,sql_union)
+ self.sql_data_limit_times_slimitkeep_return_n2(sql_union_all,num,num2,tables,per_table_num,base_fun,replace_fun)
+
+ sql_join = "select a.tbname,last(a.ts) cc from %s.meters a,%s.meters b where a.ts is not null and a.ts = b.ts partition by a.tbname interval(1a) order by cc desc slimit %d limit %d" %(dbname,dbnamejoin,num,num2)
+ self.sql_limit_times_slimitkeep_return_n2(sql_join,num,num2,tables,per_table_num,base_fun,replace_fun)
+ sql_join = "select count(*) from (%s)" %sql_join
+ self.sql_data_limit_times_slimitkeep_return_n2(sql_join,num,num2,tables,per_table_num,base_fun,replace_fun)
+ sql_union = "(%s) union (%s)" %(sql_join,sql_join)
+ self.sql_data_limit_times_slimitkeep_return_n2(sql_union,num,num2,tables,per_table_num,base_fun,replace_fun)
+ sql_union_all = "(%s) union all (%s)" %(sql_join,sql_union)
+ self.sql_data_limit_times_slimitkeep_return_n2(sql_union_all,num,num2,tables,per_table_num,base_fun,replace_fun)
+
+
+
+
+
+ def sql_base_check(self,sql1,sql2):
+ tdSql.query(sql1)
+ sql1_result = tdSql.getData(0,0)
+ tdLog.info("sql:%s , result: %s" %(sql1,sql1_result))
+
+ tdSql.query(sql2)
+ sql2_result = tdSql.getData(0,0)
+ tdLog.info("sql:%s , result: %s" %(sql2,sql2_result))
+
+ if sql1_result==sql2_result:
+ tdLog.info(f"checkEqual success, sql1_result={sql1_result},sql2_result={sql2_result}")
+ else :
+ tdLog.exit(f"checkEqual error, sql1_result=={sql1_result},sql2_result={sql2_result}")
+
+ def run_limit_slimit_sql(self,dbname,tables,per_table_num,dbnamejoin):
+
+ num,num2 = random.randint(10,100),random.randint(10,100)
+ self.sql_base(dbname,num,num2,tables,per_table_num,dbnamejoin)
+
+ tdSql.execute(" flush database %s;" %dbname)
+
+ self.sql_base(dbname,num,num2,tables,per_table_num,dbnamejoin)
+
+ def check_sub(self,dbname):
+
+ sql = "select count(*) from (select distinct(tbname) from %s.meters)" %dbname
+ self.sql_query_time_cost(sql)
+ num = tdSql.getData(0,0)
+
+ for i in range(0,num):
+ sql1 = "select count(*) from %s.d%d" %(dbname,i)
+ self.sql_query_time_cost(sql1)
+ sql1_result = tdSql.getData(0,0)
+ tdLog.info("sql:%s , result: %s" %(sql1,sql1_result))
+
+
+ def sql_base(self,dbname,num,num2,tables,per_table_num,dbnamejoin):
+
+ sql = "select count(*) from %s.meters" %dbname
+ self.sql_query_time_cost(sql)
+ tdSql.checkData(0,0,tables*per_table_num)
+ sql = "select count(*) from %s.meters" %dbnamejoin
+ self.sql_query_time_cost(sql)
+
+ self.fun_base(dbname,num,num2,tables,per_table_num,dbnamejoin,'*','*')
+ # self.fun_count(dbname,num,num2,tables,per_table_num,dbnamejoin,'count','count')
+ # self.fun_last(dbname,num,num2,tables,per_table_num,dbnamejoin,'last','last')
+ # #self.fun_last(dbname,num,num2,tables,per_table_num,dbnamejoin,'last','last_row')
+ # self.fun_last(dbname,num,num2,tables,per_table_num,dbnamejoin,'last','first')
+
+ def test(self,dbname,tables,per_table_num,vgroups,replica,dbnamejoin):
+ self.run_benchmark(dbname,tables,per_table_num,vgroups,replica)
+ self.run_benchmark(dbnamejoin,tables,per_table_num,vgroups,replica)
+ self.run_limit_slimit_sql(dbname,tables,per_table_num,dbnamejoin)
+
+ def run(self):
+ startTime = time.time()
+
+ dbname = 'test'
+ dbnamejoin = 'testjoin'
+ vgroups = random.randint(1,8)
+ tables = random.randint(100,300)
+ per_table_num = random.randint(100,500)
+ replica = 1
+ #self.test('test',tables,per_table_num,vgroup,1)
+ #self.test('test',10000,150,vgroup,1)
+
+ self.test('test',100,150,vgroups,1,'testjoin') #方便调试,调试时不执行下面3个
+
+ # self.run_benchmark(dbname,tables,per_table_num,vgroups,replica)
+ # self.run_benchmark(dbnamejoin,tables*vgroups,per_table_num*vgroups,vgroups*2,replica) #方便测试不同数据量
+ # self.run_limit_slimit_sql(dbname,tables,per_table_num,dbnamejoin)
+
+ endTime = time.time()
+ print("total time %ds" % (endTime - startTime))
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/system-test/6-cluster/clusterCommonCreate.py b/tests/system-test/6-cluster/clusterCommonCreate.py
index 6e699e2396..a06c1233d8 100644
--- a/tests/system-test/6-cluster/clusterCommonCreate.py
+++ b/tests/system-test/6-cluster/clusterCommonCreate.py
@@ -94,26 +94,26 @@ class ClusterComCreate:
tdLog.info(shellCmd)
os.system(shellCmd)
- def getStartConsumeNotifyFromTmqsim(self,cdbName='cdb'):
- while 1:
- tdSql.query("select * from %s.notifyinfo"%cdbName)
- #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
- if (tdSql.getRows() == 1) and (tdSql.getData(0, 1) == 0):
- break
- else:
- time.sleep(0.1)
- return
-
- def getStartCommitNotifyFromTmqsim(self,cdbName='cdb'):
- while 1:
- tdSql.query("select * from %s.notifyinfo"%cdbName)
- #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
- if tdSql.getRows() == 2 :
- print(tdSql.getData(0, 1), tdSql.getData(1, 1))
- if tdSql.getData(1, 1) == 1:
- break
- time.sleep(0.1)
- return
+ # def getStartConsumeNotifyFromTmqsim(self,cdbName='cdb'):
+ # while 1:
+ # tdSql.query("select * from %s.notifyinfo"%cdbName)
+ # #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
+ # if (tdSql.getRows() == 1) and (tdSql.getData(0, 1) == 0):
+ # break
+ # else:
+ # time.sleep(0.1)
+ # return
+ #
+ # def getStartCommitNotifyFromTmqsim(self,cdbName='cdb'):
+ # while 1:
+ # tdSql.query("select * from %s.notifyinfo"%cdbName)
+ # #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
+ # if tdSql.getRows() == 2 :
+ # print(tdSql.getData(0, 1), tdSql.getData(1, 1))
+ # if tdSql.getData(1, 1) == 1:
+ # break
+ # time.sleep(0.1)
+ # return
def create_database(self,tsql, dbName,dropFlag=1,vgroups=4,replica=1):
if dropFlag == 1:
diff --git a/tests/system-test/7-tmq/subscribeDb3.py b/tests/system-test/7-tmq/subscribeDb3.py
index 1de9b62bcd..5b5326cfba 100644
--- a/tests/system-test/7-tmq/subscribeDb3.py
+++ b/tests/system-test/7-tmq/subscribeDb3.py
@@ -10,6 +10,8 @@ from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
+sys.path.append("./7-tmq")
+from tmqCommon import *
class TDTestCase:
hostname = socket.gethostname()
@@ -67,26 +69,26 @@ class TDTestCase:
tdLog.info("consume info sql: %s"%sql)
tdSql.query(sql)
- def getStartConsumeNotifyFromTmqsim(self,cdbName='cdb'):
- while 1:
- tdSql.query("select * from %s.notifyinfo"%cdbName)
- #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
- if (tdSql.getRows() == 1) and (tdSql.getData(0, 1) == 0):
- break
- else:
- time.sleep(0.1)
- return
-
- def getStartCommitNotifyFromTmqsim(self,cdbName='cdb'):
- while 1:
- tdSql.query("select * from %s.notifyinfo"%cdbName)
- #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
- if tdSql.getRows() == 2 :
- tdLog.info("row[0][1]: %d, row[1][1]: %d"%(tdSql.getData(0, 1), tdSql.getData(1, 1)))
- if tdSql.getData(1, 1) == 1:
- break
- time.sleep(0.1)
- return
+ # def getStartConsumeNotifyFromTmqsim(self,cdbName='cdb'):
+ # while 1:
+ # tdSql.query("select * from %s.notifyinfo"%cdbName)
+ # #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
+ # if (tdSql.getRows() == 1) and (tdSql.getData(0, 1) == 0):
+ # break
+ # else:
+ # time.sleep(0.1)
+ # return
+ #
+ # def getStartCommitNotifyFromTmqsim(self,cdbName='cdb'):
+ # while 1:
+ # tdSql.query("select * from %s.notifyinfo"%cdbName)
+ # #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
+ # if tdSql.getRows() == 2 :
+ # tdLog.info("row[0][1]: %d, row[1][1]: %d"%(tdSql.getData(0, 1), tdSql.getData(1, 1)))
+ # if tdSql.getData(1, 1) == 1:
+ # break
+ # time.sleep(0.1)
+ # return
def selectConsumeResult(self,expectRows,cdbName='cdb'):
resultList=[]
@@ -233,7 +235,7 @@ class TDTestCase:
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
tdLog.info("wait the notify info of start consume")
- self.getStartConsumeNotifyFromTmqsim()
+ tmqCom.getStartConsumeNotifyFromTmqsim()
tdLog.info("pkill consume processor")
if (platform.system().lower() == 'windows'):
@@ -311,7 +313,7 @@ class TDTestCase:
# time.sleep(6)
tdLog.info("start to wait commit notify")
- self.getStartCommitNotifyFromTmqsim()
+ tmqCom.getStartCommitNotifyFromTmqsim()
tdLog.info("pkill consume processor")
if (platform.system().lower() == 'windows'):
diff --git a/tests/system-test/7-tmq/tmqCommon.py b/tests/system-test/7-tmq/tmqCommon.py
index f63c70a4c6..44eef8bf24 100644
--- a/tests/system-test/7-tmq/tmqCommon.py
+++ b/tests/system-test/7-tmq/tmqCommon.py
@@ -145,31 +145,29 @@ class TMQCom:
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
tdLog.debug("%s is stopped by kill -INT" % (processorName))
- def getStartConsumeNotifyFromTmqsim(self,cdbName='cdb',rows=1):
+ def getStartConsumeNotifyFromTmqsim(self,cdbName='cdb'):
loopFlag = 1
while loopFlag:
tdSql.query("select * from %s.notifyinfo"%cdbName)
#tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
actRows = tdSql.getRows()
- if (actRows >= rows):
- for i in range(actRows):
- if tdSql.getData(i, 1) == 0:
- loopFlag = 0
- break
+ for i in range(actRows):
+ if tdSql.getData(i, 1) == 0:
+ loopFlag = 0
+ break
time.sleep(0.02)
return
- def getStartCommitNotifyFromTmqsim(self,cdbName='cdb',rows=2):
+ def getStartCommitNotifyFromTmqsim(self,cdbName='cdb'):
loopFlag = 1
while loopFlag:
tdSql.query("select * from %s.notifyinfo"%cdbName)
#tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
actRows = tdSql.getRows()
- if (actRows >= rows):
- for i in range(actRows):
- if tdSql.getData(i, 1) == 1:
- loopFlag = 0
- break
+ for i in range(actRows):
+ if tdSql.getData(i, 1) == 1:
+ loopFlag = 0
+ break
time.sleep(0.02)
return
diff --git a/tests/system-test/7-tmq/tmqConsumerGroup.py b/tests/system-test/7-tmq/tmqConsumerGroup.py
index f05f600f27..ae9671bcf4 100644
--- a/tests/system-test/7-tmq/tmqConsumerGroup.py
+++ b/tests/system-test/7-tmq/tmqConsumerGroup.py
@@ -100,7 +100,7 @@ class TDTestCase:
tdLog.info("wait consumer commit notify")
# tmqCom.getStartCommitNotifyFromTmqsim(rows=4)
- tmqCom.getStartConsumeNotifyFromTmqsim(rows=2)
+ tmqCom.getStartConsumeNotifyFromTmqsim()
tdLog.info("pkill one consume processor")
tmqCom.stopTmqSimProcess('tmq_sim_new')
diff --git a/tests/system-test/7-tmq/tmqDnodeRestart1.py b/tests/system-test/7-tmq/tmqDnodeRestart1.py
index cff55a1239..2bde32800b 100644
--- a/tests/system-test/7-tmq/tmqDnodeRestart1.py
+++ b/tests/system-test/7-tmq/tmqDnodeRestart1.py
@@ -121,7 +121,7 @@ class TDTestCase:
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
# time.sleep(3)
- tmqCom.getStartCommitNotifyFromTmqsim('cdb',1)
+ tmqCom.getStartCommitNotifyFromTmqsim()
tdLog.info("create some new child table and insert data for latest mode")
paraDict["batchNum"] = 100
@@ -205,7 +205,7 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tmqCom.getStartCommitNotifyFromTmqsim('cdb',1)
+ tmqCom.getStartCommitNotifyFromTmqsim()
tdLog.info("create some new child table and insert data for latest mode")
paraDict["batchNum"] = 10
diff --git a/utils/test/c/sml_test.c b/utils/test/c/sml_test.c
index 21964403ea..f1f4bbc1fd 100644
--- a/utils/test/c/sml_test.c
+++ b/utils/test/c/sml_test.c
@@ -1132,6 +1132,33 @@ int sml_td22900_Test() {
return code;
}
+int sml_td23881_Test() {
+ TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
+
+ TAOS_RES *pRes =
+ taos_query(taos, "CREATE DATABASE IF NOT EXISTS line_23881 PRECISION 'ns'");
+ taos_free_result(pRes);
+
+ char tmp[16375] = {0};
+ memset(tmp, 'a', 16374);
+ char sql[102400] = {0};
+ sprintf(sql,"lujixfvqor,t0=t c0=f,c1=\"%s\",c2=\"%s\",c3=\"%s\",c4=\"wthvqxcsrlps\" 1626006833639000000", tmp, tmp, tmp);
+
+ pRes = taos_query(taos, "use line_23881");
+ taos_free_result(pRes);
+
+ int totalRows = 0;
+ pRes = taos_schemaless_insert_raw(taos, sql, strlen(sql), &totalRows, TSDB_SML_LINE_PROTOCOL,
+ TSDB_SML_TIMESTAMP_NANO_SECONDS);
+
+ printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes));
+ int code = taos_errno(pRes);
+ taos_free_result(pRes);
+ taos_close(taos);
+
+ return code;
+}
+
int sml_ttl_Test() {
TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
@@ -1301,6 +1328,8 @@ int main(int argc, char *argv[]) {
}
int ret = 0;
+ ret = sml_td23881_Test();
+ ASSERT(ret);
ret = sml_escape_Test();
ASSERT(!ret);
ret = sml_ts3116_Test();