Merge branch 'feature/TD-14761' of https://github.com/taosdata/TDengine into feature/TD-14761

This commit is contained in:
wangmm0220 2022-07-21 14:00:21 +08:00
commit d2430455a1
93 changed files with 5266 additions and 2442 deletions

View File

@ -54,12 +54,12 @@ enum {
enum {
STREAM_INPUT__DATA_SUBMIT = 1,
STREAM_INPUT__DATA_BLOCK,
STREAM_INPUT__MERGED_SUBMIT,
// STREAM_INPUT__TABLE_SCAN,
STREAM_INPUT__TQ_SCAN,
STREAM_INPUT__DATA_RETRIEVE,
STREAM_INPUT__GET_RES,
STREAM_INPUT__CHECKPOINT,
STREAM_INPUT__DROP,
};
typedef enum EStreamType {

View File

@ -64,7 +64,7 @@ qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, SReadHandle* readers);
* @param SReadHandle
* @return
*/
qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* readers);
qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* readers, int32_t* numOfCols);
/**
* Set the input data block for the stream scan.

View File

@ -77,6 +77,13 @@ typedef struct {
SSubmitReq* data;
} SStreamDataSubmit;
typedef struct {
int8_t type;
int64_t ver;
SArray* dataRefs; // SArray<int32_t*>
SArray* reqs; // SArray<SSubmitReq*>
} SStreamMergedSubmit;
typedef struct {
int8_t type;

View File

@ -493,10 +493,11 @@ int32_t* taosGetErrno();
//parser
#define TSDB_CODE_PAR_SYNTAX_ERROR TAOS_DEF_ERROR_CODE(0, 0x2600)
#define TSDB_CODE_PAR_INVALID_COLUMN TAOS_DEF_ERROR_CODE(0, 0x2601)
#define TSDB_CODE_PAR_TABLE_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x2602)
#define TSDB_CODE_PAR_AMBIGUOUS_COLUMN TAOS_DEF_ERROR_CODE(0, 0x2603)
#define TSDB_CODE_PAR_WRONG_VALUE_TYPE TAOS_DEF_ERROR_CODE(0, 0x2604)
#define TSDB_CODE_PAR_INCOMPLETE_SQL TAOS_DEF_ERROR_CODE(0, 0x2601)
#define TSDB_CODE_PAR_INVALID_COLUMN TAOS_DEF_ERROR_CODE(0, 0x2602)
#define TSDB_CODE_PAR_TABLE_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x2603)
#define TSDB_CODE_PAR_AMBIGUOUS_COLUMN TAOS_DEF_ERROR_CODE(0, 0x2604)
#define TSDB_CODE_PAR_WRONG_VALUE_TYPE TAOS_DEF_ERROR_CODE(0, 0x2605)
#define TSDB_CODE_PAR_ILLEGAL_USE_AGG_FUNCTION TAOS_DEF_ERROR_CODE(0, 0x2608)
#define TSDB_CODE_PAR_WRONG_NUMBER_OF_SELECT TAOS_DEF_ERROR_CODE(0, 0x2609)
#define TSDB_CODE_PAR_GROUPBY_LACK_EXPRESSION TAOS_DEF_ERROR_CODE(0, 0x260A)
@ -504,7 +505,6 @@ int32_t* taosGetErrno();
#define TSDB_CODE_PAR_NOT_SINGLE_GROUP TAOS_DEF_ERROR_CODE(0, 0x260C)
#define TSDB_CODE_PAR_TAGS_NOT_MATCHED TAOS_DEF_ERROR_CODE(0, 0x260D)
#define TSDB_CODE_PAR_INVALID_TAG_NAME TAOS_DEF_ERROR_CODE(0, 0x260E)
#define TSDB_CODE_PAR_INCOMPLETE_SQL TAOS_DEF_ERROR_CODE(0, 0x260F)
#define TSDB_CODE_PAR_NAME_OR_PASSWD_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x2610)
#define TSDB_CODE_PAR_PASSWD_EMPTY TAOS_DEF_ERROR_CODE(0, 0x2611)
#define TSDB_CODE_PAR_INVALID_PORT TAOS_DEF_ERROR_CODE(0, 0x2612)
@ -514,19 +514,9 @@ int32_t* taosGetErrno();
#define TSDB_CODE_PAR_DB_NOT_SPECIFIED TAOS_DEF_ERROR_CODE(0, 0x2616)
#define TSDB_CODE_PAR_INVALID_IDENTIFIER_NAME TAOS_DEF_ERROR_CODE(0, 0x2617)
#define TSDB_CODE_PAR_CORRESPONDING_STABLE_ERR TAOS_DEF_ERROR_CODE(0, 0x2618)
#define TSDB_CODE_PAR_INVALID_RANGE_OPTION TAOS_DEF_ERROR_CODE(0, 0x2619)
#define TSDB_CODE_PAR_INVALID_STR_OPTION TAOS_DEF_ERROR_CODE(0, 0x261A)
#define TSDB_CODE_PAR_INVALID_ENUM_OPTION TAOS_DEF_ERROR_CODE(0, 0x261B)
#define TSDB_CODE_PAR_INVALID_KEEP_NUM TAOS_DEF_ERROR_CODE(0, 0x261D)
#define TSDB_CODE_PAR_INVALID_KEEP_ORDER TAOS_DEF_ERROR_CODE(0, 0x261E)
#define TSDB_CODE_PAR_INVALID_KEEP_VALUE TAOS_DEF_ERROR_CODE(0, 0x261F)
#define TSDB_CODE_PAR_INVALID_COMMENT_OPTION TAOS_DEF_ERROR_CODE(0, 0x2620)
#define TSDB_CODE_PAR_INVALID_F_RANGE_OPTION TAOS_DEF_ERROR_CODE(0, 0x2621)
#define TSDB_CODE_PAR_INVALID_ROLLUP_OPTION TAOS_DEF_ERROR_CODE(0, 0x2622)
#define TSDB_CODE_PAR_INVALID_RETENTIONS_OPTION TAOS_DEF_ERROR_CODE(0, 0x2623)
#define TSDB_CODE_PAR_INVALID_DB_OPTION TAOS_DEF_ERROR_CODE(0, 0x2619)
#define TSDB_CODE_PAR_INVALID_TABLE_OPTION TAOS_DEF_ERROR_CODE(0, 0x261A)
#define TSDB_CODE_PAR_GROUPBY_WINDOW_COEXIST TAOS_DEF_ERROR_CODE(0, 0x2624)
#define TSDB_CODE_PAR_INVALID_OPTION_UNIT TAOS_DEF_ERROR_CODE(0, 0x2625)
#define TSDB_CODE_PAR_INVALID_KEEP_UNIT TAOS_DEF_ERROR_CODE(0, 0x2626)
#define TSDB_CODE_PAR_AGG_FUNC_NESTING TAOS_DEF_ERROR_CODE(0, 0x2627)
#define TSDB_CODE_PAR_INVALID_STATE_WIN_TYPE TAOS_DEF_ERROR_CODE(0, 0x2628)
#define TSDB_CODE_PAR_INVALID_STATE_WIN_COL TAOS_DEF_ERROR_CODE(0, 0x2629)
@ -580,7 +570,6 @@ int32_t* taosGetErrno();
#define TSDB_CODE_PAR_WINDOW_NOT_ALLOWED_FUNC TAOS_DEF_ERROR_CODE(0, 0x2659)
#define TSDB_CODE_PAR_STREAM_NOT_ALLOWED_FUNC TAOS_DEF_ERROR_CODE(0, 0x265A)
#define TSDB_CODE_PAR_GROUP_BY_NOT_ALLOWED_FUNC TAOS_DEF_ERROR_CODE(0, 0x265B)
#define TSDB_CODE_PAR_INVALID_TABLE_OPTION TAOS_DEF_ERROR_CODE(0, 0x265C)
#define TSDB_CODE_PAR_INVALID_INTERP_CLAUSE TAOS_DEF_ERROR_CODE(0, 0x265D)
#define TSDB_CODE_PAR_NO_VALID_FUNC_IN_WIN TAOS_DEF_ERROR_CODE(0, 0x265E)
#define TSDB_CODE_PAR_ONLY_SUPPORT_SINGLE_TABLE TAOS_DEF_ERROR_CODE(0, 0x265F)

View File

@ -87,6 +87,14 @@ void taosArrayRemoveBatch(SArray* pArray, const int32_t* pData, int32_t numOfEle
*/
void taosArrayRemoveDuplicate(SArray* pArray, __compar_fn_t comparFn, void (*fp)(void*));
/**
*
* @param pArray
* @param comparFn
* @param fp
*/
void taosArrayRemoveDuplicateP(SArray* pArray, __compar_fn_t comparFn, void (*fp)(void*));
/**
* add all element from the source array list into the destination
* @param pArray

View File

@ -38,4 +38,4 @@ fi
# there can not libtaos.so*, otherwise ln -s error
${csudo}rm -f ${install_main_dir}/driver/libtaos.* || :
${csudo}rm -f ${install_main_dir}/driver/libtaosws.* || :
[ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}rm -f ${install_main_dir}/driver/libtaosws.so || :

View File

@ -32,9 +32,9 @@ else
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
${csudo}rm -f ${inc_link_dir}/taosudf.h || :
${csudo}rm -f ${inc_link_dir}/taosws.h || :
[ -f ${inc_link_dir}/taosws.h ] && ${csudo}rm -f ${inc_link_dir}/taosws.h || :
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
${csudo}rm -f ${lib_link_dir}/libtaosws.* || :
[ -f ${lib_link_dir}/libtaosws.so ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.so || :
${csudo}rm -f ${log_link_dir} || :
${csudo}rm -f ${data_link_dir} || :

View File

@ -68,12 +68,12 @@ fi
cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver
cp ${compile_dir}/build/lib/${wslibfile} ${pkg_dir}${install_home_path}/driver ||:
[ -f ${compile_dir}/build/lib/${wslibfile} ] && cp ${compile_dir}/build/lib/${wslibfile} ${pkg_dir}${install_home_path}/driver ||:
cp ${compile_dir}/../include/client/taos.h ${pkg_dir}${install_home_path}/include
cp ${compile_dir}/../include/common/taosdef.h ${pkg_dir}${install_home_path}/include
cp ${compile_dir}/../include/util/taoserror.h ${pkg_dir}${install_home_path}/include
cp ${compile_dir}/../include/libs/function/taosudf.h ${pkg_dir}${install_home_path}/include
cp ${compile_dir}/build/include/taosws.h ${pkg_dir}${install_home_path}/include ||:
[ -f ${compile_dir}/build/include/taosws.h ] && cp ${compile_dir}/build/include/taosws.h ${pkg_dir}${install_home_path}/include ||:
cp -r ${top_dir}/examples/* ${pkg_dir}${install_home_path}/examples
#cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector
#cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector

View File

@ -75,12 +75,12 @@ if [ -f %{_compiledir}/build/bin/taosadapter ]; then
cp %{_compiledir}/build/bin/taosadapter %{buildroot}%{homepath}/bin ||:
fi
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
cp %{_compiledir}/build/lib/${wslibfile} %{buildroot}%{homepath}/driver ||:
[ -f %{_compiledir}/build/lib/${wslibfile} ] && cp %{_compiledir}/build/lib/${wslibfile} %{buildroot}%{homepath}/driver ||:
cp %{_compiledir}/../include/client/taos.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../include/common/taosdef.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../include/util/taoserror.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../include/libs/function/taosudf.h %{buildroot}%{homepath}/include
cp %{_compiledir}/build/include/taosws.h %{buildroot}%{homepath}/include ||:
[ -f %{_compiledir}/build/include/taosws.h ] && cp %{_compiledir}/build/include/taosws.h %{buildroot}%{homepath}/include ||:
#cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector
#cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector
#cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector

View File

@ -229,13 +229,13 @@ function install_lib() {
${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
${csudo}ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
[ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}ln -s ${install_main_dir}/driver/libtaosws.so ${lib_link_dir}/libtaosws.so || :
[ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}ln -sf ${install_main_dir}/driver/libtaosws.so ${lib_link_dir}/libtaosws.so || :
if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then
${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
${csudo}ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
${csudo}ln -s ${install_main_dir}/libtaosws.so ${lib64_link_dir}/libtaosws.so || :
[ -f ${install_main_dir}/libtaosws.so ] && ${csudo}ln -sf ${install_main_dir}/libtaosws.so ${lib64_link_dir}/libtaosws.so || :
fi
${csudo}ldconfig
@ -320,7 +320,7 @@ function install_jemalloc() {
function install_header() {
${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h ${inc_link_dir}/taosudf.h || :
${csudo}rm -f ${inc_link_dir}/taosws.h || :
[ -f ${inc_link_dir}/taosws.h ] && ${csudo}rm -f ${inc_link_dir}/taosws.h || :
${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/*
${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
@ -328,7 +328,7 @@ function install_header() {
${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
${csudo}ln -s ${install_main_dir}/include/taosudf.h ${inc_link_dir}/taosudf.h
[ -f ${install_main_dir}/include/taosws.h ] && ${csudo}ln -s ${install_main_dir}/include/taosws.h ${inc_link_dir}/taosws.h || :
[ -f ${install_main_dir}/include/taosws.h ] && ${csudo}ln -sf ${install_main_dir}/include/taosws.h ${inc_link_dir}/taosws.h || :
}
function add_newHostname_to_hosts() {

View File

@ -116,7 +116,8 @@ function install_bin() {
function clean_lib() {
sudo rm -f /usr/lib/libtaos.* || :
sudo rm -f /usr/lib/libtaosws.* || :
[ -f /usr/lib/libtaosws.so ] && sudo rm -f /usr/lib/libtaosws.so || :
[ -f /usr/lib64/libtaosws.so ] && sudo rm -f /usr/lib64/libtaosws.so || :
sudo rm -rf ${lib_dir} || :
}
@ -125,8 +126,9 @@ function install_lib() {
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
${csudo}rm -f ${lib_link_dir}/libtaosws.* || :
${csudo}rm -f ${lib64_link_dir}/libtaosws.* || :
[ -f ${lib_link_dir}/libtaosws.so ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.so || :
[ -f ${lib64_link_dir}/libtaosws.so ] && ${csudo}rm -f ${lib64_link_dir}/libtaosws.so || :
#${csudo}rm -rf ${v15_java_app_dir} || :
${csudo}cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/*
@ -135,19 +137,19 @@ function install_lib() {
${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
${csudo}ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
[ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}ln -s ${install_main_dir}/driver/libtaosws.so ${lib_link_dir}/libtaosws.so
[ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}ln -sf ${install_main_dir}/driver/libtaosws.so ${lib_link_dir}/libtaosws.so ||:
if [ -d "${lib64_link_dir}" ]; then
${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
${csudo}ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
[ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}ln -s ${install_main_dir}/driver/libtaosws.so ${lib64_link_dir}/libtaosws.so || :
[ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}ln -sf ${install_main_dir}/driver/libtaosws.so ${lib64_link_dir}/libtaosws.so || :
fi
else
${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib
${csudo}ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib
[ -f ${install_main_dir}/driver/libtaosws.dylib ] && ${csudo}ln -s ${install_main_dir}/driver/libtaosws.dylib ${lib_link_dir}/libtaosws.dylib
[ -f ${install_main_dir}/driver/libtaosws.dylib ] && ${csudo}ln -sf ${install_main_dir}/driver/libtaosws.dylib ${lib_link_dir}/libtaosws.dylib ||:
fi
if [ "$osType" != "Darwin" ]; then
@ -165,7 +167,7 @@ function install_header() {
${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
${csudo}ln -s ${install_main_dir}/include/taosudf.h ${inc_link_dir}/taosudf.h
[ -f ${install_main_dir}/include/taosws.h ] && ${csudo}ln -s ${install_main_dir}/include/taosws.h ${inc_link_dir}/taos.h
[ -f ${install_main_dir}/include/taosws.h ] && ${csudo}ln -sf ${install_main_dir}/include/taosws.h ${inc_link_dir}/taosws.h ||:
}
function install_jemalloc() {

View File

@ -294,10 +294,10 @@ function install_avro() {
function install_lib() {
# Remove links
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
${csudo}rm -f ${lib_link_dir}/libtaosws.* || :
[ -f ${lib_link_dir}/libtaosws.so ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.so || :
if [ "$osType" != "Darwin" ]; then
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
${csudo}rm -f ${lib64_link_dir}/libtaosws.* || :
[ -f ${lib64_link_dir}/libtaosws.so ] && ${csudo}rm -f ${lib64_link_dir}/libtaosws.so || :
fi
if [ "$osType" != "Darwin" ]; then
@ -362,13 +362,13 @@ function install_header() {
if [ "$osType" != "Darwin" ]; then
${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h ${inc_link_dir}/taosudf.h || :
${csudo}rm -f ${inc_link_dir}/taosws.h ||:
[ -f ${inc_link_dir}/taosws.h ] && ${csudo}rm -f ${inc_link_dir}/taosws.h ||:
${csudo}cp -f ${source_dir}/include/client/taos.h ${source_dir}/include/common/taosdef.h ${source_dir}/include/util/taoserror.h ${source_dir}/include/libs/function/taosudf.h \
${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/*
if [ -f ${binary_dir}/build/include/taosws.h ]; then
${csudo}cp -f ${binary_dir}/build/include/taosws.h ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/taosws.h ||:
${csudo}ln -s ${install_main_dir}/include/taosws.h ${inc_link_dir}/taosws.h ||:
${csudo}ln -sf ${install_main_dir}/include/taosws.h ${inc_link_dir}/taosws.h ||:
fi
${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h

View File

@ -93,7 +93,7 @@ else
fi
lib_files="${build_dir}/lib/libtaos.so.${version}"
wslib_files="${build_dir}/lib/libtaosws.so."
wslib_files="${build_dir}/lib/libtaosws.so"
header_files="${code_dir}/include/client/taos.h ${code_dir}/include/common/taosdef.h ${code_dir}/include/util/taoserror.h ${code_dir}/include/libs/function/taosudf.h"
wsheader_files="${build_dir}/include/taosws.h"

View File

@ -82,33 +82,33 @@ function kill_taosd() {
function install_include() {
${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h ${inc_link_dir}/taosudf.h || :
${csudo}rm -f ${inc_link_dir}/taosws.h
[ -f ${inc_link_dir}/taosws.h ] && ${csudo}rm -f ${inc_link_dir}/taosws.h ||:
${csudo}ln -s ${inc_dir}/taos.h ${inc_link_dir}/taos.h
${csudo}ln -s ${inc_dir}/taosdef.h ${inc_link_dir}/taosdef.h
${csudo}ln -s ${inc_dir}/taoserror.h ${inc_link_dir}/taoserror.h
${csudo}ln -s ${inc_dir}/taosudf.h ${inc_link_dir}/taosudf.h
[ -f ${inc_dir}/taosws.h ] && ${csudo}ln -s ${inc_dir}/taosudf.h ${inc_link_dir}/taosudf.h ||:
[ -f ${inc_dir}/taosws.h ] && ${csudo}ln -sf ${inc_dir}/taosws.h ${inc_link_dir}/taosws.h ||:
}
function install_lib() {
${csudo}rm -f ${lib_link_dir}/libtaos* || :
${csudo}rm -f ${lib64_link_dir}/libtaos* || :
${csudo}rm -f ${lib_link_dir}/libtaosws* || :
${csudo}rm -f ${lib64_link_dir}/libtaosws* || :
[ -f ${lib_link_dir}/libtaosws.so ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.so || :
[ -f ${lib64_link_dir}/libtaosws.so ] && ${csudo}rm -f ${lib64_link_dir}/libtaosws.so || :
${csudo}ln -s ${lib_dir}/libtaos.* ${lib_link_dir}/libtaos.so.1
${csudo}ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
[ -f ${lib_dir}/libtaosws.so ]${csudo}ln -s ${lib_dir}/libtaosws.so ${lib_link_dir}/libtaosws.so
[ -f ${lib_dir}/libtaosws.so ] && ${csudo}ln -sf ${lib_dir}/libtaosws.so ${lib_link_dir}/libtaosws.so ||:
if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then
${csudo}ln -s ${lib_dir}/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
${csudo}ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
[ -ff ${lib_dir}/libtaosws.so ] && ${csudo}ln -s ${lib_dir}/libtaosws.so ${lib64_link_dir}/libtaosws.so || :
[ -f ${lib_dir}/libtaosws.so ] && ${csudo}ln -sf ${lib_dir}/libtaosws.so ${lib64_link_dir}/libtaosws.so || :
fi
${csudo}ldconfig

View File

@ -102,10 +102,10 @@ function clean_local_bin() {
function clean_lib() {
# Remove link
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
${csudo}rm -f ${lib_link_dir}/libtaosws.* || :
[ -f ${lib_link_dir}/libtaosws.so ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.so || :
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
${csudo}rm -f ${lib64_link_dir}/libtaosws.* || :
[ -f ${lib64_link_dir}/libtaosws.so ] && ${csudo}rm -f ${lib64_link_dir}/libtaosws.so || :
#${csudo}rm -rf ${v15_java_app_dir} || :
}
@ -116,7 +116,7 @@ function clean_header() {
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
${csudo}rm -f ${inc_link_dir}/taosudf.h || :
${csudo}rm -f ${inc_link_dir}/taosws.h || :
[ -f ${inc_link_dir}/taosws.h ] && ${csudo}rm -f ${inc_link_dir}/taosws.h || :
}
function clean_config() {

View File

@ -452,7 +452,7 @@ static int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) {
int32_t code = -1;
SArray *newSub = subscribe.topicNames;
taosArraySortString(newSub, taosArrayCompareString);
taosArrayRemoveDuplicate(newSub, taosArrayCompareString, taosMemoryFree);
taosArrayRemoveDuplicateP(newSub, taosArrayCompareString, taosMemoryFree);
int32_t newTopicNum = taosArrayGetSize(newSub);
// check topic existance

View File

@ -708,9 +708,13 @@ int32_t mndSetRpcInfoForDbTrans(SMnode *pMnode, SRpcMsg *pMsg, EOperType oper, c
if (pTrans->oper == oper) {
if (strcasecmp(dbname, pTrans->dbname1) == 0) {
mDebug("trans:%d, db:%s oper:%d matched with input", pTrans->id, dbname, oper);
if (taosArrayPush(pTrans->pRpcArray, &pMsg->info) != NULL) {
if (pTrans->pRpcArray == NULL) {
pTrans->pRpcArray = taosArrayInit(1, sizeof(SRpcHandleInfo));
}
if (pTrans->pRpcArray != NULL && taosArrayPush(pTrans->pRpcArray, &pMsg->info) != NULL) {
code = 0;
}
sdbRelease(pMnode->pSdb, pTrans);
break;
}

View File

@ -88,7 +88,7 @@ typedef struct {
STqExecTb execTb;
STqExecDb execDb;
};
int32_t numOfCols; // number of out pout column, temporarily used
} STqExecHandle;
typedef struct {

View File

@ -91,7 +91,7 @@ int metaBegin(SMeta* pMeta);
int metaCommit(SMeta* pMeta);
int metaCreateSTable(SMeta* pMeta, int64_t version, SVCreateStbReq* pReq);
int metaAlterSTable(SMeta* pMeta, int64_t version, SVCreateStbReq* pReq);
int metaDropSTable(SMeta* pMeta, int64_t verison, SVDropStbReq* pReq);
int metaDropSTable(SMeta* pMeta, int64_t verison, SVDropStbReq* pReq, SArray* tbUidList);
int metaCreateTable(SMeta* pMeta, int64_t version, SVCreateTbReq* pReq);
int metaDropTable(SMeta* pMeta, int64_t version, SVDropTbReq* pReq, SArray* tbUids);
int metaTtlDropTable(SMeta* pMeta, int64_t ttl, SArray* tbUids);
@ -275,8 +275,8 @@ struct SVnode {
#define VND_TSDB(vnd) ((vnd)->pTsdb)
#define VND_RSMA0(vnd) ((vnd)->pTsdb)
#define VND_RSMA1(vnd) ((vnd)->pSma->pRSmaTsdb1)
#define VND_RSMA2(vnd) ((vnd)->pSma->pRSmaTsdb2)
#define VND_RSMA1(vnd) ((vnd)->pSma->pRSmaTsdb[TSDB_RETENTION_L0])
#define VND_RSMA2(vnd) ((vnd)->pSma->pRSmaTsdb[TSDB_RETENTION_L1])
#define VND_RETENTIONS(vnd) (&(vnd)->config.tsdbCfg.retentions)
#define VND_IS_RSMA(v) ((v)->config.isRsma == 1)
#define VND_IS_TSMA(v) ((v)->config.isTsma == 1)
@ -291,8 +291,7 @@ struct SSma {
bool locked;
TdThreadMutex mutex;
SVnode* pVnode;
STsdb* pRSmaTsdb1;
STsdb* pRSmaTsdb2;
STsdb* pRSmaTsdb[TSDB_RETENTION_L2];
void* pTSmaEnv;
void* pRSmaEnv;
};
@ -307,8 +306,8 @@ struct SSma {
#define SMA_TSMA_ENV(s) ((s)->pTSmaEnv)
#define SMA_RSMA_ENV(s) ((s)->pRSmaEnv)
#define SMA_RSMA_TSDB0(s) ((s)->pVnode->pTsdb)
#define SMA_RSMA_TSDB1(s) ((s)->pRSmaTsdb1)
#define SMA_RSMA_TSDB2(s) ((s)->pRSmaTsdb2)
#define SMA_RSMA_TSDB1(s) ((s)->pRSmaTsdb[TSDB_RETENTION_L0])
#define SMA_RSMA_TSDB2(s) ((s)->pRSmaTsdb[TSDB_RETENTION_L1])
// sma
void smaHandleRes(void* pVnode, int64_t smaId, const SArray* data);

View File

@ -212,7 +212,7 @@ _err:
return -1;
}
int metaDropSTable(SMeta *pMeta, int64_t verison, SVDropStbReq *pReq) {
int metaDropSTable(SMeta *pMeta, int64_t verison, SVDropStbReq *pReq, SArray *tbUidList) {
void *pKey = NULL;
int nKey = 0;
void *pData = NULL;
@ -228,8 +228,7 @@ int metaDropSTable(SMeta *pMeta, int64_t verison, SVDropStbReq *pReq) {
}
// drop all child tables
TBC *pCtbIdxc = NULL;
SArray *pArray = taosArrayInit(8, sizeof(tb_uid_t));
TBC *pCtbIdxc = NULL;
tdbTbcOpen(pMeta->pCtbIdx, &pCtbIdxc, &pMeta->txn);
rc = tdbTbcMoveTo(pCtbIdxc, &(SCtbIdxKey){.suid = pReq->suid, .uid = INT64_MIN}, sizeof(SCtbIdxKey), &c);
@ -249,20 +248,18 @@ int metaDropSTable(SMeta *pMeta, int64_t verison, SVDropStbReq *pReq) {
break;
}
taosArrayPush(pArray, &(((SCtbIdxKey *)pKey)->uid));
taosArrayPush(tbUidList, &(((SCtbIdxKey *)pKey)->uid));
}
tdbTbcClose(pCtbIdxc);
metaWLock(pMeta);
for (int32_t iChild = 0; iChild < taosArrayGetSize(pArray); iChild++) {
tb_uid_t uid = *(tb_uid_t *)taosArrayGet(pArray, iChild);
for (int32_t iChild = 0; iChild < taosArrayGetSize(tbUidList); iChild++) {
tb_uid_t uid = *(tb_uid_t *)taosArrayGet(tbUidList, iChild);
metaDropTableByUid(pMeta, uid, NULL);
}
taosArrayDestroy(pArray);
// drop super table
_drop_super_table:
tdbTbGet(pMeta->pUidIdx, &pReq->suid, sizeof(tb_uid_t), &pData, &nData);
@ -374,6 +371,13 @@ int metaCreateTable(SMeta *pMeta, int64_t version, SVCreateTbReq *pReq) {
}
metaReaderClear(&mr);
if (pReq->type == TSDB_CHILD_TABLE) {
tb_uid_t suid = metaGetTableEntryUidByName(pMeta, pReq->ctb.name);
if (suid == 0) {
terrno = TSDB_CODE_PAR_TABLE_NOT_EXIST;
return -1;
}
}
// build SMetaEntry
me.version = version;
me.type = pReq->type;

View File

@ -123,11 +123,9 @@ int32_t smaOpen(SVnode *pVnode) {
}
// restore the rsma
#if 1
if (rsmaRestore(pSma) < 0) {
goto _err;
}
#endif
}
return 0;

View File

@ -605,7 +605,7 @@ static int32_t tdRSmaFetchAndSubmitResult(SRSmaInfoItem *pItem, STSchema *pTSche
snprintf(flag, 10, "level %" PRIi8, pItem->level);
blockDebugShowDataBlocks(pResult, flag);
#endif
STsdb *sinkTsdb = (pItem->level == TSDB_RETENTION_L1 ? pSma->pRSmaTsdb1 : pSma->pRSmaTsdb2);
STsdb *sinkTsdb = (pItem->level == TSDB_RETENTION_L1 ? pSma->pRSmaTsdb[0] : pSma->pRSmaTsdb[1]);
SSubmitReq *pReq = NULL;
// TODO: the schema update should be handled
if (buildSubmitReqFromDataBlock(&pReq, pResult, pTSchema, SMA_VID(pSma), suid) < 0) {
@ -949,13 +949,8 @@ _err:
* @return int32_t
*/
static int32_t tdRSmaRestoreTSDataReload(SSma *pSma, int64_t committed) {
// TODO
smaDebug("vgId:%d, rsma restore from %" PRIi64 ", ts data reload success", SMA_VID(pSma), committed);
// NOTHING TODO: the data would be restored from the unified WAL replay procedure
return TSDB_CODE_SUCCESS;
_err:
smaError("vgId:%d, rsma restore from %" PRIi64 ", ts data reload failed since %s", SMA_VID(pSma), committed,
terrstr());
return TSDB_CODE_FAILED;
}
int32_t tdProcessRSmaRestoreImpl(SSma *pSma) {

View File

@ -506,7 +506,7 @@ int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen) {
.initTqReader = true,
.version = ver,
};
pHandle->execHandle.execCol.task[i] = qCreateQueueExecTaskInfo(pHandle->execHandle.execCol.qmsg, &handle);
pHandle->execHandle.execCol.task[i] = qCreateQueueExecTaskInfo(pHandle->execHandle.execCol.qmsg, &handle, &pHandle->execHandle.numOfCols);
ASSERT(pHandle->execHandle.execCol.task[i]);
void* scanner = NULL;
qExtractStreamScanner(pHandle->execHandle.execCol.task[i], &scanner);

View File

@ -15,7 +15,7 @@
#include "tq.h"
static int32_t tqAddBlockDataToRsp(const SSDataBlock* pBlock, SMqDataRsp* pRsp) {
static int32_t tqAddBlockDataToRsp(const SSDataBlock* pBlock, SMqDataRsp* pRsp, int32_t numOfCols) {
int32_t dataStrLen = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock);
void* buf = taosMemoryCalloc(1, dataStrLen);
if (buf == NULL) return -1;
@ -29,7 +29,7 @@ static int32_t tqAddBlockDataToRsp(const SSDataBlock* pBlock, SMqDataRsp* pRsp)
// TODO enable compress
int32_t actualLen = 0;
blockEncode(pBlock, pRetrieve->data, &actualLen, taosArrayGetSize(pBlock->pDataBlock), false);
blockEncode(pBlock, pRetrieve->data, &actualLen, numOfCols, false);
actualLen += sizeof(SRetrieveTableRsp);
ASSERT(actualLen <= dataStrLen);
taosArrayPush(pRsp->blockDataLen, &actualLen);
@ -49,8 +49,8 @@ static int32_t tqAddBlockSchemaToRsp(const STqExecHandle* pExec, int32_t workerI
static int32_t tqAddTbNameToRsp(const STQ* pTq, int64_t uid, SMqDataRsp* pRsp) {
SMetaReader mr = {0};
metaReaderInit(&mr, pTq->pVnode->pMeta, 0);
// TODO add reference to gurantee success
if (metaGetTableEntryByUid(&mr, uid) < 0) {
ASSERT(0);
return -1;
}
char* tbName = strdup(mr.me.name);
@ -87,16 +87,18 @@ int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVa
tqDebug("task execute end, get %p", pDataBlock);
if (pDataBlock != NULL) {
tqAddBlockDataToRsp(pDataBlock, pRsp);
pRsp->blockNum++;
if (pRsp->withTbName) {
if (pOffset->type == TMQ_OFFSET__LOG) {
int64_t uid = pExec->pExecReader[0]->msgIter.uid;
tqAddTbNameToRsp(pTq, uid, pRsp);
if (tqAddTbNameToRsp(pTq, uid, pRsp) < 0) {
continue;
}
} else {
pRsp->withTbName = 0;
}
}
tqAddBlockDataToRsp(pDataBlock, pRsp, pExec->numOfCols);
pRsp->blockNum++;
if (pOffset->type == TMQ_OFFSET__LOG) {
continue;
} else {
@ -193,13 +195,14 @@ int32_t tqLogScanExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataR
SSDataBlock block = {0};
if (tqRetrieveDataBlock(&block, pReader) < 0) {
if (terrno == TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND) continue;
ASSERT(0);
}
tqAddBlockDataToRsp(&block, pRsp);
if (pRsp->withTbName) {
int64_t uid = pExec->pExecReader[workerId]->msgIter.uid;
tqAddTbNameToRsp(pTq, uid, pRsp);
if (tqAddTbNameToRsp(pTq, uid, pRsp) < 0) {
continue;
}
}
tqAddBlockDataToRsp(&block, pRsp, taosArrayGetSize(block.pDataBlock));
tqAddBlockSchemaToRsp(pExec, workerId, pRsp);
pRsp->blockNum++;
}
@ -211,13 +214,14 @@ int32_t tqLogScanExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataR
SSDataBlock block = {0};
if (tqRetrieveDataBlock(&block, pReader) < 0) {
if (terrno == TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND) continue;
ASSERT(0);
}
tqAddBlockDataToRsp(&block, pRsp);
if (pRsp->withTbName) {
int64_t uid = pExec->pExecReader[workerId]->msgIter.uid;
tqAddTbNameToRsp(pTq, uid, pRsp);
if (tqAddTbNameToRsp(pTq, uid, pRsp) < 0) {
continue;
}
}
tqAddBlockDataToRsp(&block, pRsp, taosArrayGetSize(block.pDataBlock));
tqAddBlockSchemaToRsp(pExec, workerId, pRsp);
pRsp->blockNum++;
}

View File

@ -92,7 +92,8 @@ int32_t tqMetaOpen(STQ* pTq) {
.initTqReader = true,
.version = handle.snapshotVer,
};
handle.execHandle.execCol.task[i] = qCreateQueueExecTaskInfo(handle.execHandle.execCol.qmsg, &reader);
handle.execHandle.execCol.task[i] = qCreateQueueExecTaskInfo(handle.execHandle.execCol.qmsg, &reader, &handle.execHandle.numOfCols);
ASSERT(handle.execHandle.execCol.task[i]);
void* scanner = NULL;
qExtractStreamScanner(handle.execHandle.execCol.task[i], &scanner);

View File

@ -299,6 +299,7 @@ int32_t tqRetrieveDataBlock(SSDataBlock* pBlock, STqReader* pReader) {
}
if (blockDataEnsureCapacity(pBlock, pReader->msgIter.numOfRows) < 0) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto FAIL;
}

View File

@ -49,8 +49,11 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, boo
}
SVCreateTbReq createTbReq = {0};
SName name = {0};
tNameFromString(&name, stbFullName, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
createTbReq.name = buildCtbNameByGroupId(stbFullName, pDataBlock->info.groupId);
createTbReq.ctb.name = strdup(stbFullName);
createTbReq.ctb.name = strdup((char*)tNameGetTableName(&name)); // strdup(stbFullName);
createTbReq.flags = 0;
createTbReq.type = TSDB_CHILD_TABLE;
createTbReq.ctb.suid = suid;
@ -175,6 +178,8 @@ void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data) {
const SArray* pRes = (const SArray*)data;
SVnode* pVnode = (SVnode*)vnode;
tqDebug("task write into table, vgId %d, block num: %d", pVnode->config.vgId, (int32_t)pRes->size);
ASSERT(pTask->tbSink.pTSchema);
SSubmitReq* pReq = tdBlockToSubmit(pRes, pTask->tbSink.pTSchema, true, pTask->tbSink.stbUid,
pTask->tbSink.stbFullName, pVnode->config.vgId);

View File

@ -1455,7 +1455,6 @@ static bool keyOverlapFileBlock(TSDBKEY key, SBlock* pBlock, SVersionRange* pVer
(pBlock->minVersion <= pVerRange->maxVer);
}
static bool doCheckforDatablockOverlap(STableBlockScanInfo* pBlockScanInfo, const SBlock* pBlock) {
size_t num = taosArrayGetSize(pBlockScanInfo->delSkyline);
@ -1507,7 +1506,7 @@ static bool overlapWithDelSkyline(STableBlockScanInfo* pBlockScanInfo, const SBl
return doCheckforDatablockOverlap(pBlockScanInfo, pBlock);
} else {
int32_t index = pBlockScanInfo->fileDelIndex;
while(1) {
while (1) {
TSDBKEY* p = taosArrayGet(pBlockScanInfo->delSkyline, index);
if (p->ts > pBlock->minKey.ts && index > 0) {
index -= 1;
@ -1961,9 +1960,11 @@ int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, STsdbReader*
SDelIdx idx = {.suid = pReader->suid, .uid = pBlockScanInfo->uid};
SDelIdx* pIdx = taosArraySearch(aDelIdx, &idx, tCmprDelIdx, TD_EQ);
code = tsdbReadDelData(pDelFReader, pIdx, pDelData, NULL);
if (code != TSDB_CODE_SUCCESS) {
goto _err;
if (pIdx != NULL) {
code = tsdbReadDelData(pDelFReader, pIdx, pDelData, NULL);
if (code != TSDB_CODE_SUCCESS) {
goto _err;
}
}
}

View File

@ -27,6 +27,8 @@ struct SVSnapReader {
// tsdb
int8_t tsdbDone;
STsdbSnapReader *pTsdbReader;
// rsma
int8_t rsmaDone[TSDB_RETENTION_L2];
};
int32_t vnodeSnapReaderOpen(SVnode *pVnode, int64_t sver, int64_t ever, SVSnapReader **ppReader) {
@ -115,6 +117,42 @@ int32_t vnodeSnapRead(SVSnapReader *pReader, uint8_t **ppData, uint32_t *nData)
}
}
// RSMA ==============
#if 0
if (VND_IS_RSMA(pReader->pVnode)) {
// RSMA1/RSMA2
for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
if (!pReader->rsmaDone[i]) {
if (!pReader->pVnode->pSma->pRSmaTsdb[i]) {
// no valid tsdb
pReader->rsmaDone[i] = 1;
continue;
}
if (pReader->pTsdbReader == NULL) {
code = tsdbSnapReaderOpen(pReader->pVnode->pSma->pRSmaTsdb[i], pReader->sver, pReader->ever,
&pReader->pTsdbReader);
if (code) goto _err;
}
code = tsdbSnapRead(pReader->pTsdbReader, ppData);
if (code) {
goto _err;
} else {
if (*ppData) {
goto _exit;
} else {
pReader->tsdbDone = 1;
code = tsdbSnapReaderClose(&pReader->pTsdbReader);
if (code) goto _err;
}
}
}
}
// QTaskInfoFile
// TODO ...
}
#endif
*ppData = NULL;
*nData = 0;

View File

@ -557,6 +557,7 @@ static int32_t vnodeProcessDropStbReq(SVnode *pVnode, int64_t version, void *pRe
SVDropStbReq req = {0};
int32_t rcode = TSDB_CODE_SUCCESS;
SDecoder decoder = {0};
SArray *tbUidList = NULL;
pRsp->msgType = TDMT_VND_CREATE_STB_RSP;
pRsp->pCont = NULL;
@ -570,7 +571,14 @@ static int32_t vnodeProcessDropStbReq(SVnode *pVnode, int64_t version, void *pRe
}
// process request
if (metaDropSTable(pVnode->pMeta, version, &req) < 0) {
tbUidList = taosArrayInit(8, sizeof(int64_t));
if (tbUidList == NULL) goto _exit;
if (metaDropSTable(pVnode->pMeta, version, &req, tbUidList) < 0) {
rcode = terrno;
goto _exit;
}
if (tqUpdateTbUidList(pVnode->pTq, tbUidList, false) < 0) {
rcode = terrno;
goto _exit;
}
@ -582,6 +590,7 @@ static int32_t vnodeProcessDropStbReq(SVnode *pVnode, int64_t version, void *pRe
// return rsp
_exit:
if (tbUidList) taosArrayDestroy(tbUidList);
pRsp->code = rcode;
tDecoderClear(&decoder);
return 0;

View File

@ -159,27 +159,28 @@ typedef struct {
int64_t recoverEndVer;
} SStreamTaskInfo;
typedef struct {
char* tablename;
char* dbname;
int32_t tversion;
SSchemaWrapper* sw;
} SSchemaInfo;
typedef struct SExecTaskInfo {
STaskIdInfo id;
uint32_t status;
STimeWindow window;
STaskCostInfo cost;
int64_t owner; // if it is in execution
int32_t code;
STaskIdInfo id;
uint32_t status;
STimeWindow window;
STaskCostInfo cost;
int64_t owner; // if it is in execution
int32_t code;
SStreamTaskInfo streamInfo;
struct {
char* tablename;
char* dbname;
int32_t tversion;
SSchemaWrapper* sw;
} schemaVer;
STableListInfo tableqinfoList; // this is a table list
const char* sql; // query sql string
jmp_buf env; // jump to this position when error happens.
EOPTR_EXEC_MODEL execModel; // operator execution model [batch model|stream model]
SStreamTaskInfo streamInfo;
SSchemaInfo schemaInfo;
STableListInfo tableqinfoList; // this is a table list
const char* sql; // query sql string
jmp_buf env; // jump to this position when error happens.
EOPTR_EXEC_MODEL execModel; // operator execution model [batch model|stream model]
SSubplan* pSubplan;
struct SOperatorInfo* pRoot;
} SExecTaskInfo;
@ -248,13 +249,13 @@ typedef struct SLoadRemoteDataInfo {
} SLoadRemoteDataInfo;
typedef struct SLimitInfo {
SLimit limit;
SLimit slimit;
uint64_t currentGroupId;
int64_t remainGroupOffset;
int64_t numOfOutputGroups;
int64_t remainOffset;
int64_t numOfOutputRows;
SLimit limit;
SLimit slimit;
uint64_t currentGroupId;
int64_t remainGroupOffset;
int64_t numOfOutputGroups;
int64_t remainOffset;
int64_t numOfOutputRows;
} SLimitInfo;
typedef struct SExchangeInfo {

View File

@ -227,14 +227,14 @@ int32_t extractTargetSlotId(const SArray* pColMatchInfo, SExecTaskInfo* pTaskInf
for (int32_t i = 0; i < numOfCols; ++i) {
SColMatchInfo* pColMatch = taosArrayGet(pColMatchInfo, i);
for (int32_t j = 0; j < pTaskInfo->schemaVer.sw->nCols; ++j) {
if (pColMatch->colId == pTaskInfo->schemaVer.sw->pSchema[j].colId &&
for (int32_t j = 0; j < pTaskInfo->schemaInfo.sw->nCols; ++j) {
if (pColMatch->colId == pTaskInfo->schemaInfo.sw->pSchema[j].colId &&
pColMatch->colId == PRIMARYKEY_TIMESTAMP_COL_ID) {
(*pSlotIds)[pColMatch->targetSlotId] = -1;
break;
}
if (pColMatch->colId == pTaskInfo->schemaVer.sw->pSchema[j].colId) {
if (pColMatch->colId == pTaskInfo->schemaInfo.sw->pSchema[j].colId) {
(*pSlotIds)[pColMatch->targetSlotId] = j;
break;
}

View File

@ -67,15 +67,15 @@ static bool needCompress(const SSDataBlock* pData, int32_t numOfCols) {
return false;
}
// clang-format off
// data format:
// +----------------+--------------+----------+--------------------------------------------+--------------------------------------+-------------+-----------+-------------+-----------+
// |SDataCacheEntry | total length | group id | col1_schema | col2_schema | col3_schema ...| column#1 length, column#2
// length ... | col1 bitmap | col1 data | col2 bitmap | col2 data | .... | | (4 bytes) |(8 bytes)
// |(sizeof(int16_t)+sizeof(int32_t))*numOfCols | sizeof(int32_t) * numOfCols | actual size | |
// actual size | |
// +----------------+--------------+----------+--------------------------------------------+--------------------------------------+-------------+-----------+-------------+-----------+
// +----------------+--------------+-----------------+--------------------------------------------+------------------------------------+-------------+-----------+-------------+-----------+
// |SDataCacheEntry | total length | group id | col1_schema | col2_schema | col3_schema... | column#1 length, column#2 length...| col1 bitmap | col1 data | col2 bitmap | col2 data | .... | | (4 bytes) |(8 bytes)
// | |sizeof(int32) |sizeof(uint64_t) |(sizeof(int16_t)+sizeof(int32_t))*numOfCols | sizeof(int32_t) * numOfCols | actual size | |
// +----------------+--------------+-----------------+--------------------------------------------+------------------------------------+-------------+-----------+-------------+-----------+
// The length of bitmap is decided by number of rows of this data block, and the length of each column data is
// recorded in the first segment, next to the struct header
// clang-format on
static void toDataCacheEntry(SDataDispatchHandle* pHandle, const SInputData* pInput, SDataDispatchBuf* pBuf) {
int32_t numOfCols = 0;
SNode* pNode;

View File

@ -842,30 +842,10 @@ static void getInitialStartTimeWindow(SInterval* pInterval, TSKEY ts, STimeWindo
}
static STimeWindow doCalculateTimeWindow(int64_t ts, SInterval* pInterval) {
STimeWindow w = {0};
if (pInterval->intervalUnit == 'n' || pInterval->intervalUnit == 'y') {
w.skey = taosTimeTruncate(ts, pInterval, pInterval->precision);
w.ekey = taosTimeAdd(w.skey, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1;
} else {
int64_t st = w.skey;
if (pInterval->offset > 0) {
st = taosTimeAdd(st, pInterval->offset, pInterval->offsetUnit, pInterval->precision);
}
if (st > ts) {
st -= ((st - ts + pInterval->sliding - 1) / pInterval->sliding) * pInterval->sliding;
}
int64_t et = st + pInterval->interval - 1;
if (et < ts) {
st += ((ts - et + pInterval->sliding - 1) / pInterval->sliding) * pInterval->sliding;
}
w.skey = st;
w.ekey = taosTimeAdd(w.skey, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1;
}
STimeWindow w = {0};
w.skey = taosTimeTruncate(ts, pInterval, pInterval->precision);
w.ekey = taosTimeAdd(w.skey, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1;
return w;
}

View File

@ -42,17 +42,32 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu
// TODO: if a block was set but not consumed,
// prevent setting a different type of block
pInfo->blockType = type;
pInfo->validBlockIndex = 0;
taosArrayClear(pInfo->pBlockLists);
if (type == STREAM_INPUT__DATA_SUBMIT) {
if (tqReaderSetDataMsg(pInfo->tqReader, input, 0) < 0) {
qError("submit msg messed up when initing stream block, %s" PRIx64, id);
return TSDB_CODE_QRY_APP_ERROR;
if (type == STREAM_INPUT__MERGED_SUBMIT) {
ASSERT(numOfBlocks > 1);
for (int32_t i = 0; i < numOfBlocks; i++) {
SSubmitReq* pReq = *(void**)POINTER_SHIFT(input, i * sizeof(void*));
taosArrayPush(pInfo->pBlockLists, &pReq);
}
pInfo->blockType = STREAM_INPUT__DATA_SUBMIT;
} else if (type == STREAM_INPUT__DATA_SUBMIT) {
/*if (tqReaderSetDataMsg(pInfo->tqReader, input, 0) < 0) {*/
/*qError("submit msg messed up when initing stream block, %s" PRIx64, id);*/
/*return TSDB_CODE_QRY_APP_ERROR;*/
/*}*/
ASSERT(numOfBlocks == 1);
/*if (numOfBlocks == 1) {*/
taosArrayPush(pInfo->pBlockLists, &input);
pInfo->blockType = STREAM_INPUT__DATA_SUBMIT;
/*} else {*/
/*}*/
} else if (type == STREAM_INPUT__DATA_BLOCK) {
for (int32_t i = 0; i < numOfBlocks; ++i) {
SSDataBlock* pDataBlock = &((SSDataBlock*)input)[i];
// TODO optimize
SSDataBlock* p = createOneDataBlock(pDataBlock, false);
p->info = pDataBlock->info;
@ -60,6 +75,7 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu
taosArrayAddAll(p->pDataBlock, pDataBlock->pDataBlock);
taosArrayPush(pInfo->pBlockLists, &p);
}
pInfo->blockType = STREAM_INPUT__DATA_BLOCK;
} else {
ASSERT(0);
}
@ -104,27 +120,40 @@ int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numO
return code;
}
qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* readers) {
qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* readers, int32_t* numOfCols) {
if (msg == NULL) {
// TODO create raw scan
return NULL;
}
struct SSubplan* plan = NULL;
int32_t code = qStringToSubplan(msg, &plan);
struct SSubplan* pPlan = NULL;
int32_t code = qStringToSubplan(msg, &pPlan);
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
return NULL;
}
qTaskInfo_t pTaskInfo = NULL;
code = qCreateExecTask(readers, 0, 0, plan, &pTaskInfo, NULL, NULL, OPTR_EXEC_MODEL_QUEUE);
code = qCreateExecTask(readers, 0, 0, pPlan, &pTaskInfo, NULL, NULL, OPTR_EXEC_MODEL_QUEUE);
if (code != TSDB_CODE_SUCCESS) {
// TODO: destroy SSubplan & pTaskInfo
nodesDestroyNode((SNode*)pPlan);
qDestroyTask(pTaskInfo);
terrno = code;
return NULL;
}
// extract the number of output columns
SDataBlockDescNode* pDescNode = pPlan->pNode->pOutputDataBlockDesc;
*numOfCols = 0;
SNode* pNode;
FOREACH(pNode, pDescNode->pSlots) {
SSlotDescNode* pSlotDesc = (SSlotDescNode*)pNode;
if (pSlotDesc->output) {
++(*numOfCols);
}
}
return pTaskInfo;
}
@ -135,17 +164,18 @@ qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, SReadHandle* readers) {
/*qDebugL("stream task string %s", (const char*)msg);*/
struct SSubplan* plan = NULL;
int32_t code = qStringToSubplan(msg, &plan);
struct SSubplan* pPlan = NULL;
int32_t code = qStringToSubplan(msg, &pPlan);
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
return NULL;
}
qTaskInfo_t pTaskInfo = NULL;
code = qCreateExecTask(readers, 0, 0, plan, &pTaskInfo, NULL, NULL, OPTR_EXEC_MODEL_STREAM);
code = qCreateExecTask(readers, 0, 0, pPlan, &pTaskInfo, NULL, NULL, OPTR_EXEC_MODEL_STREAM);
if (code != TSDB_CODE_SUCCESS) {
// TODO: destroy SSubplan & pTaskInfo
nodesDestroyNode((SNode*)pPlan);
qDestroyTask(pTaskInfo);
terrno = code;
return NULL;
}
@ -153,7 +183,8 @@ qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, SReadHandle* readers) {
return pTaskInfo;
}
static SArray* filterQualifiedChildTables(const SStreamScanInfo* pScanInfo, const SArray* tableIdList, const char* idstr) {
static SArray* filterQualifiedChildTables(const SStreamScanInfo* pScanInfo, const SArray* tableIdList,
const char* idstr) {
SArray* qa = taosArrayInit(4, sizeof(tb_uid_t));
// let's discard the tables those are not created according to the queried super table.
@ -227,19 +258,19 @@ int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* table
ASSERT(tinfo != NULL && dbName != NULL && tableName != NULL);
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
if (pTaskInfo->schemaVer.sw == NULL) {
if (pTaskInfo->schemaInfo.sw == NULL) {
return TSDB_CODE_SUCCESS;
}
*sversion = pTaskInfo->schemaVer.sw->version;
*tversion = pTaskInfo->schemaVer.tversion;
if (pTaskInfo->schemaVer.dbname) {
strcpy(dbName, pTaskInfo->schemaVer.dbname);
*sversion = pTaskInfo->schemaInfo.sw->version;
*tversion = pTaskInfo->schemaInfo.tversion;
if (pTaskInfo->schemaInfo.dbname) {
strcpy(dbName, pTaskInfo->schemaInfo.dbname);
} else {
dbName[0] = 0;
}
if (pTaskInfo->schemaVer.tablename) {
strcpy(tableName, pTaskInfo->schemaVer.tablename);
if (pTaskInfo->schemaInfo.tablename) {
strcpy(tableName, pTaskInfo->schemaInfo.tablename);
} else {
tableName[0] = 0;
}

View File

@ -39,6 +39,7 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId,
taosThreadOnce(&initPoolOnce, initRefPool);
atexit(cleanupRefPool);
int32_t code = createExecTaskInfoImpl(pSubplan, pTask, readHandle, taskId, sql, model);
if (code != TSDB_CODE_SUCCESS) {
goto _error;

View File

@ -4123,7 +4123,7 @@ static SExecTaskInfo* createExecTaskInfo(uint64_t queryId, uint64_t taskId, EOPT
SExecTaskInfo* pTaskInfo = taosMemoryCalloc(1, sizeof(SExecTaskInfo));
setTaskStatus(pTaskInfo, TASK_NOT_COMPLETED);
pTaskInfo->schemaVer.dbname = strdup(dbFName);
pTaskInfo->schemaInfo.dbname = strdup(dbFName);
pTaskInfo->cost.created = taosGetTimestampMs();
pTaskInfo->id.queryId = queryId;
pTaskInfo->execModel = model;
@ -4149,35 +4149,35 @@ int32_t extractTableSchemaInfo(SReadHandle* pHandle, uint64_t uid, SExecTaskInfo
return terrno;
}
pTaskInfo->schemaVer.tablename = strdup(mr.me.name);
pTaskInfo->schemaInfo.tablename = strdup(mr.me.name);
if (mr.me.type == TSDB_SUPER_TABLE) {
pTaskInfo->schemaVer.sw = tCloneSSchemaWrapper(&mr.me.stbEntry.schemaRow);
pTaskInfo->schemaVer.tversion = mr.me.stbEntry.schemaTag.version;
pTaskInfo->schemaInfo.sw = tCloneSSchemaWrapper(&mr.me.stbEntry.schemaRow);
pTaskInfo->schemaInfo.tversion = mr.me.stbEntry.schemaTag.version;
} else if (mr.me.type == TSDB_CHILD_TABLE) {
tDecoderClear(&mr.coder);
tb_uid_t suid = mr.me.ctbEntry.suid;
metaGetTableEntryByUid(&mr, suid);
pTaskInfo->schemaVer.sw = tCloneSSchemaWrapper(&mr.me.stbEntry.schemaRow);
pTaskInfo->schemaVer.tversion = mr.me.stbEntry.schemaTag.version;
pTaskInfo->schemaInfo.sw = tCloneSSchemaWrapper(&mr.me.stbEntry.schemaRow);
pTaskInfo->schemaInfo.tversion = mr.me.stbEntry.schemaTag.version;
} else {
pTaskInfo->schemaVer.sw = tCloneSSchemaWrapper(&mr.me.ntbEntry.schemaRow);
pTaskInfo->schemaInfo.sw = tCloneSSchemaWrapper(&mr.me.ntbEntry.schemaRow);
}
metaReaderClear(&mr);
return TSDB_CODE_SUCCESS;
}
static void cleanupTableSchemaInfo(SExecTaskInfo* pTaskInfo) {
taosMemoryFreeClear(pTaskInfo->schemaVer.dbname);
if (pTaskInfo->schemaVer.sw == NULL) {
static void cleanupTableSchemaInfo(SSchemaInfo* pSchemaInfo) {
taosMemoryFreeClear(pSchemaInfo->dbname);
if (pSchemaInfo->sw == NULL) {
return;
}
taosMemoryFree(pTaskInfo->schemaVer.sw->pSchema);
taosMemoryFree(pTaskInfo->schemaVer.sw);
taosMemoryFree(pTaskInfo->schemaVer.tablename);
taosMemoryFree(pSchemaInfo->tablename);
taosMemoryFree(pSchemaInfo->sw->pSchema);
taosMemoryFree(pSchemaInfo->sw);
}
static int32_t sortTableGroup(STableListInfo* pTableListInfo, int32_t groupNum) {
@ -4935,6 +4935,7 @@ int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SRead
}
(*pTaskInfo)->sql = sql;
(*pTaskInfo)->pSubplan = pPlan;
(*pTaskInfo)->pRoot = createOperatorTree(pPlan->pNode, *pTaskInfo, pHandle, &(*pTaskInfo)->tableqinfoList, pPlan->pTagCond, pPlan->pTagIndexCond, pPlan->user);
if (NULL == (*pTaskInfo)->pRoot) {
@ -4973,7 +4974,9 @@ void doDestroyTask(SExecTaskInfo* pTaskInfo) {
doDestroyTableList(&pTaskInfo->tableqinfoList);
destroyOperatorInfo(pTaskInfo->pRoot);
cleanupTableSchemaInfo(pTaskInfo);
cleanupTableSchemaInfo(&pTaskInfo->schemaInfo);
nodesDestroyNode((SNode*)pTaskInfo->pSubplan);
taosMemoryFreeClear(pTaskInfo->sql);
taosMemoryFreeClear(pTaskInfo->id.str);

View File

@ -1392,24 +1392,49 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
}
SDataBlockInfo* pBlockInfo = &pInfo->pRes->info;
blockDataCleanup(pInfo->pRes);
while (tqNextDataBlock(pInfo->tqReader)) {
SSDataBlock block = {0};
int32_t totBlockNum = taosArrayGetSize(pInfo->pBlockLists);
// todo refactor
int32_t code = tqRetrieveDataBlock(&block, pInfo->tqReader);
while (1) {
if (pInfo->tqReader->pMsg == NULL) {
if (pInfo->validBlockIndex >= totBlockNum) {
return NULL;
}
if (code != TSDB_CODE_SUCCESS || block.info.rows == 0) {
pTaskInfo->code = code;
return NULL;
int32_t current = pInfo->validBlockIndex++;
SSubmitReq* pSubmit = taosArrayGetP(pInfo->pBlockLists, current);
if (tqReaderSetDataMsg(pInfo->tqReader, pSubmit, 0) < 0) {
qError("submit msg messed up when initing stream submit block %p, current %d, total %d", pSubmit, current,
totBlockNum);
pInfo->tqReader->pMsg = NULL;
continue;
}
}
setBlockIntoRes(pInfo, &block);
blockDataCleanup(pInfo->pRes);
while (tqNextDataBlock(pInfo->tqReader)) {
SSDataBlock block = {0};
int32_t code = tqRetrieveDataBlock(&block, pInfo->tqReader);
if (code != TSDB_CODE_SUCCESS || block.info.rows == 0) {
continue;
}
setBlockIntoRes(pInfo, &block);
if (pBlockInfo->rows > 0) {
break;
}
}
if (pBlockInfo->rows > 0) {
break;
} else {
pInfo->tqReader->pMsg = NULL;
continue;
}
/*blockDataCleanup(pInfo->pRes);*/
}
// record the scan action.
@ -2557,30 +2582,30 @@ typedef struct STableMergeScanInfo {
SArray* pSortInfo;
SSortHandle* pSortHandle;
SSDataBlock* pSortInputBlock;
int64_t startTs; // sort start time
SArray* sortSourceParams;
SSDataBlock* pSortInputBlock;
int64_t startTs; // sort start time
SArray* sortSourceParams;
SFileBlockLoadRecorder readRecorder;
int64_t numOfRows;
SScanInfo scanInfo;
int32_t scanTimes;
SNode* pFilterNode; // filter info, which is push down by optimizer
SqlFunctionCtx* pCtx; // which belongs to the direct upstream operator operator query context
SResultRowInfo* pResultRowInfo;
int32_t* rowEntryInfoOffset;
SExprInfo* pExpr;
SSDataBlock* pResBlock;
SArray* pColMatchInfo;
int32_t numOfOutput;
int64_t numOfRows;
SScanInfo scanInfo;
int32_t scanTimes;
SNode* pFilterNode; // filter info, which is push down by optimizer
SqlFunctionCtx* pCtx; // which belongs to the direct upstream operator operator query context
SResultRowInfo* pResultRowInfo;
int32_t* rowEntryInfoOffset;
SExprInfo* pExpr;
SSDataBlock* pResBlock;
SArray* pColMatchInfo;
int32_t numOfOutput;
SExprInfo* pPseudoExpr;
int32_t numOfPseudoExpr;
SqlFunctionCtx* pPseudoCtx;
SQueryTableDataCond cond;
int32_t scanFlag; // table scan flag to denote if it is a repeat/reverse/main scan
int32_t dataBlockLoadFlag;
int32_t scanFlag; // table scan flag to denote if it is a repeat/reverse/main scan
int32_t dataBlockLoadFlag;
// if the upstream is an interval operator, the interval info is also kept here to get the time
// window to check if current data block needs to be loaded.
SInterval interval;
@ -2588,7 +2613,8 @@ typedef struct STableMergeScanInfo {
} STableMergeScanInfo;
int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags, bool groupSort, SReadHandle* pHandle,
STableListInfo* pTableListInfo, SNode* pTagCond, SNode* pTagIndexCond, const char* idStr) {
STableListInfo* pTableListInfo, SNode* pTagCond, SNode* pTagIndexCond,
const char* idStr) {
int32_t code = getTableList(pHandle->meta, pHandle->vnode, pScanNode, pTagCond, pTagIndexCond, pTableListInfo);
if (code != TSDB_CODE_SUCCESS) {
return code;

View File

@ -544,8 +544,9 @@ bool taosFillHasMoreResults(SFillInfo* pFillInfo) {
return true;
}
if (pFillInfo->numOfTotal > 0 && (((pFillInfo->end > pFillInfo->start) && FILL_IS_ASC_FILL(pFillInfo)) ||
(pFillInfo->end < pFillInfo->start && !FILL_IS_ASC_FILL(pFillInfo)))) {
bool ascFill = FILL_IS_ASC_FILL(pFillInfo);
if (pFillInfo->numOfTotal > 0 &&
(((pFillInfo->end > pFillInfo->start) && ascFill) || (pFillInfo->end < pFillInfo->start && !ascFill))) {
return getNumOfResultsAfterFillGap(pFillInfo, pFillInfo->end, 4096) > 0;
}

View File

@ -1543,6 +1543,7 @@ void destroyStreamFinalIntervalOperatorInfo(void* param, int32_t numOfOutput) {
taosArrayDestroy(pInfo->pPullWins);
blockDataDestroy(pInfo->pPullDataRes);
taosArrayDestroy(pInfo->pRecycledPages);
blockDataDestroy(pInfo->pUpdateRes);
if (pInfo->pChildren) {
int32_t size = taosArrayGetSize(pInfo->pChildren);

View File

@ -192,9 +192,9 @@ static bool validateTimezoneFormat(const SValueNode* pVal) {
}
void static addTimezoneParam(SNodeList* pList) {
char buf[6] = {0};
time_t t = taosTime(NULL);
struct tm tmInfo;
char buf[6] = {0};
time_t t = taosTime(NULL);
struct tm tmInfo;
taosLocalTime(&t, &tmInfo);
strftime(buf, sizeof(buf), "%z", &tmInfo);
int32_t len = (int32_t)strlen(buf);
@ -976,7 +976,7 @@ static int32_t translateHistogram(SFunctionNode* pFunc, char* pErrBuf, int32_t l
// param1 ~ param3
if (((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type != TSDB_DATA_TYPE_BINARY ||
((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type != TSDB_DATA_TYPE_BINARY ||
((SExprNode*)nodesListGetNode(pFunc->pParameterList, 3))->resType.type != TSDB_DATA_TYPE_BIGINT) {
!IS_INTEGER_TYPE(((SExprNode*)nodesListGetNode(pFunc->pParameterList, 3))->resType.type)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@ -1034,7 +1034,7 @@ static int32_t translateHistogramImpl(SFunctionNode* pFunc, char* pErrBuf, int32
// param1 ~ param3
if (((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type != TSDB_DATA_TYPE_BINARY ||
((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type != TSDB_DATA_TYPE_BINARY ||
((SExprNode*)nodesListGetNode(pFunc->pParameterList, 3))->resType.type != TSDB_DATA_TYPE_BIGINT) {
!IS_INTEGER_TYPE(((SExprNode*)nodesListGetNode(pFunc->pParameterList, 3))->resType.type)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@ -1133,9 +1133,10 @@ static bool validateStateOper(const SValueNode* pVal) {
if (TSDB_DATA_TYPE_BINARY != pVal->node.resType.type) {
return false;
}
return (0 == strncasecmp(varDataVal(pVal->datum.p), "GT", 2) || 0 == strncasecmp(varDataVal(pVal->datum.p), "GE", 2) ||
0 == strncasecmp(varDataVal(pVal->datum.p), "LT", 2) || 0 == strncasecmp(varDataVal(pVal->datum.p), "LE", 2) ||
0 == strncasecmp(varDataVal(pVal->datum.p), "EQ", 2) || 0 == strncasecmp(varDataVal(pVal->datum.p), "NE", 2));
return (
0 == strncasecmp(varDataVal(pVal->datum.p), "GT", 2) || 0 == strncasecmp(varDataVal(pVal->datum.p), "GE", 2) ||
0 == strncasecmp(varDataVal(pVal->datum.p), "LT", 2) || 0 == strncasecmp(varDataVal(pVal->datum.p), "LE", 2) ||
0 == strncasecmp(varDataVal(pVal->datum.p), "EQ", 2) || 0 == strncasecmp(varDataVal(pVal->datum.p), "NE", 2));
}
static int32_t translateStateCount(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
@ -1348,7 +1349,8 @@ static int32_t translateTail(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
SValueNode* pValue = (SValueNode*)pParamNode;
if (pValue->datum.i < ((i > 1) ? 0 : 1) || pValue->datum.i > 100) {
if ((IS_SIGNED_NUMERIC_TYPE(pValue->node.resType.type) ? pValue->datum.i : pValue->datum.u) < ((i > 1) ? 0 : 1) ||
(IS_SIGNED_NUMERIC_TYPE(pValue->node.resType.type) ? pValue->datum.i : pValue->datum.u) > 100) {
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
"TAIL function second parameter should be in range [1, 100], "
"third parameter should be in range [0, 100]");
@ -1423,6 +1425,17 @@ static int32_t translateIrate(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
}
static int32_t translateFirstLast(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
// forbid null as first/last input, since first(c0, null, 1) may have different number of input
int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList);
for (int32_t i = 0; i < numOfParams; ++i) {
uint8_t nodeType = nodeType(nodesListGetNode(pFunc->pParameterList, i));
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, i))->resType.type;
if (IS_NULL_TYPE(paraType) && QUERY_NODE_VALUE == nodeType) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
}
pFunc->node.resType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType;
return TSDB_CODE_SUCCESS;
}
@ -1433,6 +1446,15 @@ static int32_t translateFirstLastImpl(SFunctionNode* pFunc, char* pErrBuf, int32
uint8_t paraType = ((SExprNode*)pPara)->resType.type;
int32_t paraBytes = ((SExprNode*)pPara)->resType.bytes;
if (isPartial) {
int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList);
for (int32_t i = 0; i < numOfParams; ++i) {
uint8_t nodeType = nodeType(nodesListGetNode(pFunc->pParameterList, i));
uint8_t pType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, i))->resType.type;
if (IS_NULL_TYPE(pType) && QUERY_NODE_VALUE == nodeType) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
}
pFunc->node.resType =
(SDataType){.bytes = getFirstLastInfoSize(paraBytes) + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY};
} else {
@ -1646,9 +1668,9 @@ static int32_t translateCast(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
if (IS_VAR_DATA_TYPE(para2Type)) {
para2Bytes -= VARSTR_HEADER_SIZE;
}
if (para2Bytes <= 0 || para2Bytes > 1000) { // cast dst var type length limits to 1000
if (para2Bytes <= 0 || para2Bytes > 4096) { // cast dst var type length limits to 4096 bytes
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
"CAST function converted length should be in range [0, 1000]");
"CAST function converted length should be in range [0, 4096] bytes");
}
// add database precision as param

View File

@ -80,7 +80,7 @@ typedef struct STopBotRes {
} STopBotRes;
typedef struct SFirstLastRes {
bool hasResult;
bool hasResult;
// used for last_row function only, isNullRes in SResultRowEntry can not be passed to downstream.So,
// this attribute is required
bool isNull;
@ -2380,7 +2380,8 @@ int32_t percentileFunction(SqlFunctionCtx* pCtx) {
int32_t percentileFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
SVariant* pVal = &pCtx->param[1].param;
double v = (pVal->nType == TSDB_DATA_TYPE_BIGINT) ? pVal->i : pVal->d;
double v =
(IS_SIGNED_NUMERIC_TYPE(pVal->nType) ? pVal->i : (IS_UNSIGNED_NUMERIC_TYPE(pVal->nType) ? pVal->u : pVal->d));
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
SPercentileInfo* ppInfo = (SPercentileInfo*)GET_ROWCELL_INTERBUF(pResInfo);
@ -2439,7 +2440,8 @@ bool apercentileFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResult
SAPercentileInfo* pInfo = GET_ROWCELL_INTERBUF(pResultInfo);
SVariant* pVal = &pCtx->param[1].param;
pInfo->percent = (pVal->nType == TSDB_DATA_TYPE_BIGINT) ? pVal->i : pVal->d;
pInfo->percent =
(IS_SIGNED_NUMERIC_TYPE(pVal->nType) ? pVal->i : (IS_UNSIGNED_NUMERIC_TYPE(pVal->nType) ? pVal->u : pVal->d));
if (pCtx->numOfParams == 2) {
pInfo->algo = APERCT_ALGO_DEFAULT;
@ -2640,9 +2642,7 @@ int32_t apercentileCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx)
return TSDB_CODE_SUCCESS;
}
int32_t getFirstLastInfoSize(int32_t resBytes) {
return sizeof(SFirstLastRes) + resBytes;
}
int32_t getFirstLastInfoSize(int32_t resBytes) { return sizeof(SFirstLastRes) + resBytes; }
bool getFirstLastFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) {
SColumnNode* pNode = (SColumnNode*)nodesListGetNode(pFunc->pParameterList, 0);
@ -2968,7 +2968,7 @@ int32_t firstLastPartialFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
return 1;
}
//todo rewrite:
// todo rewrite:
int32_t lastCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
SResultRowEntryInfo* pDResInfo = GET_RES_INFO(pDestCtx);
char* pDBuf = GET_ROWCELL_INTERBUF(pDResInfo);
@ -2986,7 +2986,7 @@ int32_t lastCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
return TSDB_CODE_SUCCESS;
}
static void doSaveLastrow(SqlFunctionCtx *pCtx, char* pData, int32_t rowIndex, int64_t cts, SFirstLastRes* pInfo) {
static void doSaveLastrow(SqlFunctionCtx* pCtx, char* pData, int32_t rowIndex, int64_t cts, SFirstLastRes* pInfo) {
SInputColumnInfoData* pInput = &pCtx->input;
SColumnInfoData* pInputCol = pInput->pData[0];

View File

@ -159,6 +159,8 @@ const char* nodesNodeName(ENodeType type) {
return "ShowStreamsStmt";
case QUERY_NODE_SHOW_TABLES_STMT:
return "ShowTablesStmt";
case QUERY_NODE_SHOW_TAGS_STMT:
return "ShowTagsStmt";
case QUERY_NODE_SHOW_USERS_STMT:
return "ShowUsersStmt";
case QUERY_NODE_SHOW_LICENCE_STMT:
@ -4295,6 +4297,7 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) {
return alterDnodeStmtToJson(pObj, pJson);
case QUERY_NODE_SHOW_DATABASES_STMT:
case QUERY_NODE_SHOW_TABLES_STMT:
case QUERY_NODE_SHOW_TAGS_STMT:
break;
case QUERY_NODE_CREATE_TOPIC_STMT:
return createTopicStmtToJson(pObj, pJson);

View File

@ -216,6 +216,7 @@ SNode* nodesMakeNode(ENodeType type) {
case QUERY_NODE_SHOW_LOCAL_VARIABLES_STMT:
case QUERY_NODE_SHOW_TRANSACTIONS_STMT:
case QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT:
case QUERY_NODE_SHOW_TAGS_STMT:
return makeNode(type, sizeof(SShowStmt));
case QUERY_NODE_SHOW_DNODE_VARIABLES_STMT:
return makeNode(type, sizeof(SShowDnodeVariablesStmt));
@ -678,7 +679,8 @@ void nodesDestroyNode(SNode* pNode) {
case QUERY_NODE_SHOW_VARIABLES_STMT:
case QUERY_NODE_SHOW_LOCAL_VARIABLES_STMT:
case QUERY_NODE_SHOW_TRANSACTIONS_STMT:
case QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT: {
case QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT:
case QUERY_NODE_SHOW_TAGS_STMT: {
SShowStmt* pStmt = (SShowStmt*)pNode;
nodesDestroyNode(pStmt->pDbName);
nodesDestroyNode(pStmt->pTbName);

View File

@ -82,7 +82,6 @@ alter_account_option ::= STATE literal.
/************************************************ create/alter/drop user **********************************************/
cmd ::= CREATE USER user_name(A) PASS NK_STRING(B) sysinfo_opt(C). { pCxt->pRootNode = createCreateUserStmt(pCxt, &A, &B, C); }
cmd ::= ALTER USER user_name(A) PASS NK_STRING(B). { pCxt->pRootNode = createAlterUserStmt(pCxt, &A, TSDB_ALTER_USER_PASSWD, &B); }
//cmd ::= ALTER USER user_name(A) PRIVILEGE NK_STRING(B). { pCxt->pRootNode = createAlterUserStmt(pCxt, &A, TSDB_ALTER_USER_PRIVILEGES, &B); }
cmd ::= ALTER USER user_name(A) ENABLE NK_INTEGER(B). { pCxt->pRootNode = createAlterUserStmt(pCxt, &A, TSDB_ALTER_USER_ENABLE, &B); }
cmd ::= ALTER USER user_name(A) SYSINFO NK_INTEGER(B). { pCxt->pRootNode = createAlterUserStmt(pCxt, &A, TSDB_ALTER_USER_SYSINFO, &B); }
cmd ::= DROP USER user_name(A). { pCxt->pRootNode = createDropUserStmt(pCxt, &A); }
@ -198,15 +197,15 @@ alter_db_options(A) ::= alter_db_options(B) alter_db_option(C).
%type alter_db_option { SAlterOption }
%destructor alter_db_option { }
alter_db_option(A) ::= BUFFER NK_INTEGER(B). { A.type = DB_OPTION_BUFFER; A.val = B; }
//alter_db_option(A) ::= BUFFER NK_INTEGER(B). { A.type = DB_OPTION_BUFFER; A.val = B; }
alter_db_option(A) ::= CACHEMODEL NK_STRING(B). { A.type = DB_OPTION_CACHEMODEL; A.val = B; }
alter_db_option(A) ::= CACHESIZE NK_INTEGER(B). { A.type = DB_OPTION_CACHESIZE; A.val = B; }
alter_db_option(A) ::= FSYNC NK_INTEGER(B). { A.type = DB_OPTION_FSYNC; A.val = B; }
alter_db_option(A) ::= KEEP integer_list(B). { A.type = DB_OPTION_KEEP; A.pList = B; }
alter_db_option(A) ::= KEEP variable_list(B). { A.type = DB_OPTION_KEEP; A.pList = B; }
alter_db_option(A) ::= PAGES NK_INTEGER(B). { A.type = DB_OPTION_PAGES; A.val = B; }
alter_db_option(A) ::= REPLICA NK_INTEGER(B). { A.type = DB_OPTION_REPLICA; A.val = B; }
alter_db_option(A) ::= STRICT NK_INTEGER(B). { A.type = DB_OPTION_STRICT; A.val = B; }
//alter_db_option(A) ::= PAGES NK_INTEGER(B). { A.type = DB_OPTION_PAGES; A.val = B; }
//alter_db_option(A) ::= REPLICA NK_INTEGER(B). { A.type = DB_OPTION_REPLICA; A.val = B; }
//alter_db_option(A) ::= STRICT NK_STRING(B). { A.type = DB_OPTION_STRICT; A.val = B; }
alter_db_option(A) ::= WAL NK_INTEGER(B). { A.type = DB_OPTION_WAL; A.val = B; }
%type integer_list { SNodeList* }
@ -394,6 +393,7 @@ cmd ::= SHOW TRANSACTIONS.
cmd ::= SHOW TABLE DISTRIBUTED full_table_name(A). { pCxt->pRootNode = createShowTableDistributedStmt(pCxt, A); }
cmd ::= SHOW CONSUMERS. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_CONSUMERS_STMT); }
cmd ::= SHOW SUBSCRIPTIONS. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT); }
cmd ::= SHOW TAGS FROM table_name_cond(A) from_db_opt(B). { pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_TAGS_STMT, B, A, OP_TYPE_EQUAL); }
db_name_cond_opt(A) ::= . { A = createDefaultDatabaseCondValue(pCxt); }
db_name_cond_opt(A) ::= db_name(B) NK_DOT. { A = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &B); }
@ -525,7 +525,7 @@ cmd ::= INSERT INTO full_table_name(A)
cmd ::= INSERT INTO full_table_name(A) query_expression(B). { pCxt->pRootNode = createInsertStmt(pCxt, A, NULL, B); }
/************************************************ literal *************************************************************/
literal(A) ::= NK_INTEGER(B). { A = createRawExprNode(pCxt, &B, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &B)); }
literal(A) ::= NK_INTEGER(B). { A = createRawExprNode(pCxt, &B, createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &B)); }
literal(A) ::= NK_FLOAT(B). { A = createRawExprNode(pCxt, &B, createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &B)); }
literal(A) ::= NK_STRING(B). { A = createRawExprNode(pCxt, &B, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &B)); }
literal(A) ::= NK_BOOL(B). { A = createRawExprNode(pCxt, &B, createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &B)); }
@ -536,8 +536,8 @@ literal(A) ::= NK_QUESTION(B).
duration_literal(A) ::= NK_VARIABLE(B). { A = createRawExprNode(pCxt, &B, createDurationValueNode(pCxt, &B)); }
signed(A) ::= NK_INTEGER(B). { A = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &B); }
signed(A) ::= NK_PLUS NK_INTEGER(B). { A = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &B); }
signed(A) ::= NK_INTEGER(B). { A = createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &B); }
signed(A) ::= NK_PLUS NK_INTEGER(B). { A = createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &B); }
signed(A) ::= NK_MINUS(B) NK_INTEGER(C). {
SToken t = B;
t.n = (C.z + C.n) - B.z;

View File

@ -393,9 +393,16 @@ SNode* createOperatorNode(SAstCreateContext* pCxt, EOperatorType type, SNode* pL
SValueNode* pVal = (SValueNode*)pLeft;
char* pNewLiteral = taosMemoryCalloc(1, strlen(pVal->literal) + 2);
CHECK_OUT_OF_MEM(pNewLiteral);
sprintf(pNewLiteral, "-%s", pVal->literal);
if ('+' == pVal->literal[0]) {
sprintf(pNewLiteral, "-%s", pVal->literal + 1);
} else if ('-' == pVal->literal[0]) {
sprintf(pNewLiteral, "%s", pVal->literal + 1);
} else {
sprintf(pNewLiteral, "-%s", pVal->literal);
}
taosMemoryFree(pVal->literal);
pVal->literal = pNewLiteral;
pVal->node.resType.type = TSDB_DATA_TYPE_BIGINT;
return pLeft;
}
SOperatorNode* op = (SOperatorNode*)nodesMakeNode(QUERY_NODE_OPERATOR);
@ -1343,7 +1350,11 @@ SNode* createAlterDnodeStmt(SAstCreateContext* pCxt, const SToken* pDnode, const
CHECK_PARSER_STATUS(pCxt);
SAlterDnodeStmt* pStmt = (SAlterDnodeStmt*)nodesMakeNode(QUERY_NODE_ALTER_DNODE_STMT);
CHECK_OUT_OF_MEM(pStmt);
pStmt->dnodeId = taosStr2Int32(pDnode->z, NULL, 10);
if (NULL != pDnode) {
pStmt->dnodeId = taosStr2Int32(pDnode->z, NULL, 10);
} else {
pStmt->dnodeId = -1;
}
trimString(pConfig->z, pConfig->n, pStmt->config, sizeof(pStmt->config));
if (NULL != pValue) {
trimString(pValue->z, pValue->n, pStmt->value, sizeof(pStmt->value));

View File

@ -372,8 +372,8 @@ static int32_t collectMetaKeyFromShowTables(SCollectMetaKeyCxt* pCxt, SShowStmt*
}
static int32_t collectMetaKeyFromShowTags(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
int32_t code = reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB,
TSDB_INS_TABLE_USER_TAGS, pCxt->pMetaCache);
int32_t code = reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_USER_TAGS,
pCxt->pMetaCache);
if (TSDB_CODE_SUCCESS == code) {
if (NULL != pStmt->pDbName) {
code = reserveDbVgInfoInCache(pCxt->pParseCxt->acctId, ((SValueNode*)pStmt->pDbName)->literal, pCxt->pMetaCache);

View File

@ -826,7 +826,7 @@ static EDealRes translateNormalValue(STranslateContext* pCxt, SValueNode* pVal,
}
case TSDB_DATA_TYPE_UTINYINT: {
code = toUInteger(pVal->literal, strlen(pVal->literal), 10, &pVal->datum.u);
if (strict && (TSDB_CODE_SUCCESS != code || !IS_VALID_UTINYINT(pVal->datum.i))) {
if (strict && (TSDB_CODE_SUCCESS != code || !IS_VALID_UTINYINT(pVal->datum.u))) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal);
}
*(uint8_t*)&pVal->typeData = pVal->datum.u;
@ -834,7 +834,7 @@ static EDealRes translateNormalValue(STranslateContext* pCxt, SValueNode* pVal,
}
case TSDB_DATA_TYPE_USMALLINT: {
code = toUInteger(pVal->literal, strlen(pVal->literal), 10, &pVal->datum.u);
if (strict && (TSDB_CODE_SUCCESS != code || !IS_VALID_USMALLINT(pVal->datum.i))) {
if (strict && (TSDB_CODE_SUCCESS != code || !IS_VALID_USMALLINT(pVal->datum.u))) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal);
}
*(uint16_t*)&pVal->typeData = pVal->datum.u;
@ -842,7 +842,7 @@ static EDealRes translateNormalValue(STranslateContext* pCxt, SValueNode* pVal,
}
case TSDB_DATA_TYPE_UINT: {
code = toUInteger(pVal->literal, strlen(pVal->literal), 10, &pVal->datum.u);
if (strict && (TSDB_CODE_SUCCESS != code || !IS_VALID_UINT(pVal->datum.i))) {
if (strict && (TSDB_CODE_SUCCESS != code || !IS_VALID_UINT(pVal->datum.u))) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal);
}
*(uint32_t*)&pVal->typeData = pVal->datum.u;
@ -850,7 +850,7 @@ static EDealRes translateNormalValue(STranslateContext* pCxt, SValueNode* pVal,
}
case TSDB_DATA_TYPE_UBIGINT: {
code = toUInteger(pVal->literal, strlen(pVal->literal), 10, &pVal->datum.u);
if (strict && (TSDB_CODE_SUCCESS != code || !IS_VALID_UBIGINT(pVal->datum.i))) {
if (strict && (TSDB_CODE_SUCCESS != code || !IS_VALID_UBIGINT(pVal->datum.u))) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal);
}
*(uint64_t*)&pVal->typeData = pVal->datum.u;
@ -858,11 +858,18 @@ static EDealRes translateNormalValue(STranslateContext* pCxt, SValueNode* pVal,
}
case TSDB_DATA_TYPE_FLOAT: {
pVal->datum.d = taosStr2Double(pVal->literal, NULL);
if (strict && !IS_VALID_FLOAT(pVal->datum.d)) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal);
}
*(float*)&pVal->typeData = pVal->datum.d;
break;
}
case TSDB_DATA_TYPE_DOUBLE: {
pVal->datum.d = taosStr2Double(pVal->literal, NULL);
if (strict && (((pVal->datum.d == HUGE_VAL || pVal->datum.d == -HUGE_VAL) && errno == ERANGE) ||
isinf(pVal->datum.d) || isnan(pVal->datum.d))) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal);
}
*(double*)&pVal->typeData = pVal->datum.d;
break;
}
@ -936,6 +943,9 @@ static EDealRes translateValueImpl(STranslateContext* pCxt, SValueNode* pVal, SD
pVal->node.resType = targetDt;
pVal->node.resType.scale = pVal->unit;
pVal->translate = true;
if (!strict && TSDB_DATA_TYPE_UBIGINT == pVal->node.resType.type && pVal->datum.u <= INT64_MAX) {
pVal->node.resType.type = TSDB_DATA_TYPE_BIGINT;
}
return res;
}
@ -2955,14 +2965,25 @@ static int32_t buildCreateDbReq(STranslateContext* pCxt, SCreateDatabaseStmt* pS
return buildCreateDbRetentions(pStmt->pOptions->pRetentions, pReq);
}
static int32_t checkRangeOption(STranslateContext* pCxt, const char* pName, int32_t val, int32_t minVal,
static int32_t checkRangeOption(STranslateContext* pCxt, int32_t code, const char* pName, int32_t val, int32_t minVal,
int32_t maxVal) {
if (val >= 0 && (val < minVal || val > maxVal)) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_RANGE_OPTION, pName, val, minVal, maxVal);
return generateSyntaxErrMsgExt(&pCxt->msgBuf, code, "Invalid option %s: %" PRId64 " valid range: [%d, %d]", pName,
val, minVal, maxVal);
}
return TSDB_CODE_SUCCESS;
}
static int32_t checkDbRangeOption(STranslateContext* pCxt, const char* pName, int32_t val, int32_t minVal,
int32_t maxVal) {
return checkRangeOption(pCxt, TSDB_CODE_PAR_INVALID_DB_OPTION, pName, val, minVal, maxVal);
}
static int32_t checkTableRangeOption(STranslateContext* pCxt, const char* pName, int32_t val, int32_t minVal,
int32_t maxVal) {
return checkRangeOption(pCxt, TSDB_CODE_PAR_INVALID_TABLE_OPTION, pName, val, minVal, maxVal);
}
static int32_t checkDbDaysOption(STranslateContext* pCxt, SDatabaseOptions* pOptions) {
if (NULL != pOptions->pDaysPerFile) {
if (DEAL_RES_ERROR == translateValue(pCxt, pOptions->pDaysPerFile)) {
@ -2970,12 +2991,13 @@ static int32_t checkDbDaysOption(STranslateContext* pCxt, SDatabaseOptions* pOpt
}
if (TIME_UNIT_MINUTE != pOptions->pDaysPerFile->unit && TIME_UNIT_HOUR != pOptions->pDaysPerFile->unit &&
TIME_UNIT_DAY != pOptions->pDaysPerFile->unit) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_OPTION_UNIT, "daysPerFile",
pOptions->pDaysPerFile->unit, TIME_UNIT_MINUTE, TIME_UNIT_HOUR, TIME_UNIT_DAY);
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_DB_OPTION,
"Invalid option duration unit: %c, only %c, %c, %c allowed",
pOptions->pDaysPerFile->unit, TIME_UNIT_MINUTE, TIME_UNIT_HOUR, TIME_UNIT_DAY);
}
pOptions->daysPerFile = getBigintFromValueNode(pOptions->pDaysPerFile);
}
return checkRangeOption(pCxt, "daysPerFile", pOptions->daysPerFile, TSDB_MIN_DAYS_PER_FILE, TSDB_MAX_DAYS_PER_FILE);
return checkDbRangeOption(pCxt, "daysPerFile", pOptions->daysPerFile, TSDB_MIN_DAYS_PER_FILE, TSDB_MAX_DAYS_PER_FILE);
}
static int32_t checkDbKeepOption(STranslateContext* pCxt, SDatabaseOptions* pOptions) {
@ -2985,7 +3007,7 @@ static int32_t checkDbKeepOption(STranslateContext* pCxt, SDatabaseOptions* pOpt
int32_t numOfKeep = LIST_LENGTH(pOptions->pKeep);
if (numOfKeep > 3 || numOfKeep < 1) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_KEEP_NUM);
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_DB_OPTION, "Invalid number of keep options");
}
SNode* pNode = NULL;
@ -2996,7 +3018,8 @@ static int32_t checkDbKeepOption(STranslateContext* pCxt, SDatabaseOptions* pOpt
}
if (pVal->isDuration && TIME_UNIT_MINUTE != pVal->unit && TIME_UNIT_HOUR != pVal->unit &&
TIME_UNIT_DAY != pVal->unit) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_KEEP_UNIT, pVal->unit);
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_DB_OPTION,
"Invalid option keep unit: %c, only m, h, d allowed", pVal->unit);
}
if (!pVal->isDuration) {
pVal->datum.i = pVal->datum.i * 1440;
@ -3018,12 +3041,15 @@ static int32_t checkDbKeepOption(STranslateContext* pCxt, SDatabaseOptions* pOpt
if (pOptions->keep[0] < TSDB_MIN_KEEP || pOptions->keep[1] < TSDB_MIN_KEEP || pOptions->keep[2] < TSDB_MIN_KEEP ||
pOptions->keep[0] > TSDB_MAX_KEEP || pOptions->keep[1] > TSDB_MAX_KEEP || pOptions->keep[2] > TSDB_MAX_KEEP) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_KEEP_VALUE, pOptions->keep[0], pOptions->keep[1],
pOptions->keep[2], TSDB_MIN_KEEP, TSDB_MAX_KEEP);
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_DB_OPTION,
"Invalid option keep: %" PRId64 ", %" PRId64 ", %" PRId64 " valid range: [%dm, %dm]",
pOptions->keep[0], pOptions->keep[1], pOptions->keep[2], TSDB_MIN_KEEP,
TSDB_MAX_KEEP);
}
if (!((pOptions->keep[0] <= pOptions->keep[1]) && (pOptions->keep[1] <= pOptions->keep[2]))) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_KEEP_ORDER);
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_DB_OPTION,
"Invalid keep value, should be keep0 <= keep1 <= keep2");
}
return TSDB_CODE_SUCCESS;
@ -3040,8 +3066,8 @@ static int32_t checkDbCacheModelOption(STranslateContext* pCxt, SDatabaseOptions
} else if (0 == strcasecmp(pOptions->cacheModelStr, TSDB_CACHE_MODEL_BOTH_STR)) {
pOptions->cacheModel = TSDB_CACHE_MODEL_BOTH;
} else {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STR_OPTION, "cacheModel",
pOptions->cacheModelStr);
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_DB_OPTION, "Invalid option cacheModel: %s",
pOptions->cacheModelStr);
}
}
return TSDB_CODE_SUCCESS;
@ -3056,7 +3082,8 @@ static int32_t checkDbPrecisionOption(STranslateContext* pCxt, SDatabaseOptions*
} else if (0 == strcasecmp(pOptions->precisionStr, TSDB_TIME_PRECISION_NANO_STR)) {
pOptions->precision = TSDB_TIME_PRECISION_NANO;
} else {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STR_OPTION, "precision", pOptions->precisionStr);
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_DB_OPTION, "Invalid option precision: %s",
pOptions->precisionStr);
}
}
return TSDB_CODE_SUCCESS;
@ -3069,7 +3096,8 @@ static int32_t checkDbStrictOption(STranslateContext* pCxt, SDatabaseOptions* pO
} else if (0 == strcasecmp(pOptions->strictStr, TSDB_DB_STRICT_ON_STR)) {
pOptions->strict = TSDB_DB_STRICT_ON;
} else {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STR_OPTION, "strict", pOptions->strictStr);
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_DB_OPTION, "Invalid option strict: %s",
pOptions->strictStr);
}
}
return TSDB_CODE_SUCCESS;
@ -3077,7 +3105,8 @@ static int32_t checkDbStrictOption(STranslateContext* pCxt, SDatabaseOptions* pO
static int32_t checkDbEnumOption(STranslateContext* pCxt, const char* pName, int32_t val, int32_t v1, int32_t v2) {
if (val >= 0 && val != v1 && val != v2) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ENUM_OPTION, pName, val, v1, v2);
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_DB_OPTION,
"Invalid option %s: %" PRId64 ", only %d, %d allowed", pName, val, v1, v2);
}
return TSDB_CODE_SUCCESS;
}
@ -3088,7 +3117,7 @@ static int32_t checkDbRetentionsOption(STranslateContext* pCxt, SNodeList* pRete
}
if (LIST_LENGTH(pRetentions) > 3) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_RETENTIONS_OPTION);
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_DB_OPTION, "Invalid option retentions");
}
SValueNode* pPrevFreq = NULL;
@ -3108,7 +3137,7 @@ static int32_t checkDbRetentionsOption(STranslateContext* pCxt, SNodeList* pRete
if (pFreq->datum.i <= 0 || 'n' == pFreq->unit || 'y' == pFreq->unit || pFreq->datum.i >= pKeep->datum.i ||
(NULL != pPrevFreq && pPrevFreq->datum.i >= pFreq->datum.i) ||
(NULL != pPrevKeep && pPrevKeep->datum.i > pKeep->datum.i)) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_RETENTIONS_OPTION);
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_DB_OPTION, "Invalid option retentions");
}
pPrevFreq = pFreq;
pPrevKeep = pKeep;
@ -3139,39 +3168,41 @@ static int32_t checkOptionsDependency(STranslateContext* pCxt, const char* pDbNa
static int32_t checkDatabaseOptions(STranslateContext* pCxt, const char* pDbName, SDatabaseOptions* pOptions) {
int32_t code =
checkRangeOption(pCxt, "buffer", pOptions->buffer, TSDB_MIN_BUFFER_PER_VNODE, TSDB_MAX_BUFFER_PER_VNODE);
checkDbRangeOption(pCxt, "buffer", pOptions->buffer, TSDB_MIN_BUFFER_PER_VNODE, TSDB_MAX_BUFFER_PER_VNODE);
if (TSDB_CODE_SUCCESS == code) {
code = checkDbCacheModelOption(pCxt, pOptions);
}
if (TSDB_CODE_SUCCESS == code) {
code = checkRangeOption(pCxt, "cacheSize", pOptions->cacheLastSize, TSDB_MIN_DB_CACHE_SIZE, TSDB_MAX_DB_CACHE_SIZE);
code =
checkDbRangeOption(pCxt, "cacheSize", pOptions->cacheLastSize, TSDB_MIN_DB_CACHE_SIZE, TSDB_MAX_DB_CACHE_SIZE);
}
if (TSDB_CODE_SUCCESS == code) {
code = checkRangeOption(pCxt, "compression", pOptions->compressionLevel, TSDB_MIN_COMP_LEVEL, TSDB_MAX_COMP_LEVEL);
code =
checkDbRangeOption(pCxt, "compression", pOptions->compressionLevel, TSDB_MIN_COMP_LEVEL, TSDB_MAX_COMP_LEVEL);
}
if (TSDB_CODE_SUCCESS == code) {
code = checkDbDaysOption(pCxt, pOptions);
}
if (TSDB_CODE_SUCCESS == code) {
code = checkRangeOption(pCxt, "fsyncPeriod", pOptions->fsyncPeriod, TSDB_MIN_FSYNC_PERIOD, TSDB_MAX_FSYNC_PERIOD);
code = checkDbRangeOption(pCxt, "fsyncPeriod", pOptions->fsyncPeriod, TSDB_MIN_FSYNC_PERIOD, TSDB_MAX_FSYNC_PERIOD);
}
if (TSDB_CODE_SUCCESS == code) {
code = checkRangeOption(pCxt, "maxRowsPerBlock", pOptions->maxRowsPerBlock, TSDB_MIN_MAXROWS_FBLOCK,
TSDB_MAX_MAXROWS_FBLOCK);
code = checkDbRangeOption(pCxt, "maxRowsPerBlock", pOptions->maxRowsPerBlock, TSDB_MIN_MAXROWS_FBLOCK,
TSDB_MAX_MAXROWS_FBLOCK);
}
if (TSDB_CODE_SUCCESS == code) {
code = checkRangeOption(pCxt, "minRowsPerBlock", pOptions->minRowsPerBlock, TSDB_MIN_MINROWS_FBLOCK,
TSDB_MAX_MINROWS_FBLOCK);
code = checkDbRangeOption(pCxt, "minRowsPerBlock", pOptions->minRowsPerBlock, TSDB_MIN_MINROWS_FBLOCK,
TSDB_MAX_MINROWS_FBLOCK);
}
if (TSDB_CODE_SUCCESS == code) {
code = checkDbKeepOption(pCxt, pOptions);
}
if (TSDB_CODE_SUCCESS == code) {
code = checkRangeOption(pCxt, "pages", pOptions->pages, TSDB_MIN_PAGES_PER_VNODE, TSDB_MAX_PAGES_PER_VNODE);
code = checkDbRangeOption(pCxt, "pages", pOptions->pages, TSDB_MIN_PAGES_PER_VNODE, TSDB_MAX_PAGES_PER_VNODE);
}
if (TSDB_CODE_SUCCESS == code) {
code = checkRangeOption(pCxt, "pagesize", pOptions->pagesize, TSDB_MIN_PAGESIZE_PER_VNODE,
TSDB_MAX_PAGESIZE_PER_VNODE);
code = checkDbRangeOption(pCxt, "pagesize", pOptions->pagesize, TSDB_MIN_PAGESIZE_PER_VNODE,
TSDB_MAX_PAGESIZE_PER_VNODE);
}
if (TSDB_CODE_SUCCESS == code) {
code = checkDbPrecisionOption(pCxt, pOptions);
@ -3186,7 +3217,7 @@ static int32_t checkDatabaseOptions(STranslateContext* pCxt, const char* pDbName
code = checkDbEnumOption(pCxt, "walLevel", pOptions->walLevel, TSDB_MIN_WAL_LEVEL, TSDB_MAX_WAL_LEVEL);
}
if (TSDB_CODE_SUCCESS == code) {
code = checkRangeOption(pCxt, "vgroups", pOptions->numOfVgroups, TSDB_MIN_VNODES_PER_DB, TSDB_MAX_VNODES_PER_DB);
code = checkDbRangeOption(pCxt, "vgroups", pOptions->numOfVgroups, TSDB_MIN_VNODES_PER_DB, TSDB_MAX_VNODES_PER_DB);
}
if (TSDB_CODE_SUCCESS == code) {
code = checkDbEnumOption(pCxt, "singleStable", pOptions->singleStable, TSDB_DB_SINGLE_STABLE_ON,
@ -3257,7 +3288,6 @@ static void buildAlterDbReq(STranslateContext* pCxt, SAlterDatabaseStmt* pStmt,
pReq->buffer = pStmt->pOptions->buffer;
pReq->pageSize = -1;
pReq->pages = pStmt->pOptions->pages;
pReq->cacheLastSize = -1;
pReq->daysPerFile = -1;
pReq->daysToKeep0 = pStmt->pOptions->keep[0];
pReq->daysToKeep1 = pStmt->pOptions->keep[1];
@ -3361,12 +3391,12 @@ static int32_t checkTableRollupOption(STranslateContext* pCxt, SNodeList* pFuncs
"configured with the 'RETENTIONS' option");
}
if (1 != LIST_LENGTH(pFuncs)) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ROLLUP_OPTION,
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TABLE_OPTION,
"Invalid option rollup: only one function is allowed");
}
const char* pFunc = ((SFunctionNode*)nodesListGetNode(pFuncs, 0))->functionName;
if (!validRollupFunc(pFunc)) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ROLLUP_OPTION,
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TABLE_OPTION,
"Invalid option rollup: %s function is not supported", pFunc);
}
return TSDB_CODE_SUCCESS;
@ -3487,11 +3517,12 @@ static int32_t getTableDelayOrWatermarkOption(STranslateContext* pCxt, const cha
int32_t code = (DEAL_RES_ERROR == translateValue(pCxt, pVal) ? pCxt->errCode : TSDB_CODE_SUCCESS);
if (TSDB_CODE_SUCCESS == code && TIME_UNIT_MILLISECOND != pVal->unit && TIME_UNIT_SECOND != pVal->unit &&
TIME_UNIT_MINUTE != pVal->unit) {
code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_OPTION_UNIT, pName, pVal->unit,
TIME_UNIT_MILLISECOND, TIME_UNIT_SECOND, TIME_UNIT_MINUTE);
code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TABLE_OPTION,
"Invalid option %s unit: %c, only %c, %c, %c allowed", pName, pVal->unit,
TIME_UNIT_MILLISECOND, TIME_UNIT_SECOND, TIME_UNIT_MINUTE);
}
if (TSDB_CODE_SUCCESS == code) {
code = checkRangeOption(pCxt, pName, pVal->datum.i, minVal, maxVal);
code = checkTableRangeOption(pCxt, pName, pVal->datum.i, minVal, maxVal);
}
if (TSDB_CODE_SUCCESS == code) {
*pMaxDelay = pVal->datum.i;
@ -3517,7 +3548,7 @@ static int32_t checkTableMaxDelayOption(STranslateContext* pCxt, STableOptions*
}
if (LIST_LENGTH(pOptions->pMaxDelay) > 2) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TABLE_OPTION, "maxdelay");
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TABLE_OPTION, "Invalid option maxdelay");
}
int32_t code =
@ -3547,7 +3578,7 @@ static int32_t checkTableWatermarkOption(STranslateContext* pCxt, STableOptions*
}
if (LIST_LENGTH(pOptions->pWatermark) > 2) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TABLE_OPTION, "watermark");
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TABLE_OPTION, "Invalid option watermark");
}
int32_t code =
@ -3604,6 +3635,7 @@ typedef struct SSampleAstInfo {
SNode* pInterval;
SNode* pOffset;
SNode* pSliding;
SNodeList* pPartitionByList;
STableMeta* pRollupTableMeta;
} SSampleAstInfo;
@ -3636,6 +3668,8 @@ static int32_t buildSampleAst(STranslateContext* pCxt, SSampleAstInfo* pInfo, ch
SNode* pProject = NULL;
FOREACH(pProject, pSelect->pProjectionList) { sprintf(((SExprNode*)pProject)->aliasName, "#%p", pProject); }
TSWAP(pSelect->pPartitionByList, pInfo->pPartitionByList);
SIntervalWindowNode* pInterval = (SIntervalWindowNode*)nodesMakeNode(QUERY_NODE_INTERVAL_WINDOW);
if (NULL == pInterval) {
nodesDestroyNode((SNode*)pSelect);
@ -3764,6 +3798,15 @@ static STableMeta* createRollupTableMeta(SCreateTableStmt* pStmt, int8_t precisi
return pMeta;
}
static SNode* createTbnameFunction() {
SFunctionNode* pFunc = (SFunctionNode*)nodesMakeNode(QUERY_NODE_FUNCTION);
if (NULL == pFunc) {
return NULL;
}
strcpy(pFunc->functionName, "tbname");
return (SNode*)pFunc;
}
static int32_t buildSampleAstInfoByTable(STranslateContext* pCxt, SCreateTableStmt* pStmt, SRetention* pRetension,
int8_t precision, SSampleAstInfo* pInfo) {
pInfo->pDbName = pStmt->dbName;
@ -3774,7 +3817,7 @@ static int32_t buildSampleAstInfoByTable(STranslateContext* pCxt, SCreateTableSt
if (NULL == pInfo->pFuncs || NULL == pInfo->pInterval || NULL == pInfo->pRollupTableMeta) {
return TSDB_CODE_OUT_OF_MEMORY;
}
return TSDB_CODE_SUCCESS;
return nodesListMakeStrictAppend(&pInfo->pPartitionByList, createTbnameFunction());
}
static int32_t getRollupAst(STranslateContext* pCxt, SCreateTableStmt* pStmt, SRetention* pRetension, int8_t precision,
@ -5043,6 +5086,7 @@ static const char* getSysDbName(ENodeType type) {
case QUERY_NODE_SHOW_CLUSTER_STMT:
case QUERY_NODE_SHOW_VARIABLES_STMT:
case QUERY_NODE_SHOW_DNODE_VARIABLES_STMT:
case QUERY_NODE_SHOW_TAGS_STMT:
return TSDB_INFORMATION_SCHEMA_DB;
case QUERY_NODE_SHOW_CONNECTIONS_STMT:
case QUERY_NODE_SHOW_QUERIES_STMT:
@ -6346,6 +6390,7 @@ static int32_t rewriteQuery(STranslateContext* pCxt, SQuery* pQuery) {
case QUERY_NODE_SHOW_APPS_STMT:
case QUERY_NODE_SHOW_CONSUMERS_STMT:
case QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT:
case QUERY_NODE_SHOW_TAGS_STMT:
code = rewriteShow(pCxt, pQuery);
break;
case QUERY_NODE_SHOW_DNODE_VARIABLES_STMT:

View File

@ -67,32 +67,8 @@ static char* getSyntaxErrFormat(int32_t errCode) {
return "Invalid identifier name: %s";
case TSDB_CODE_PAR_CORRESPONDING_STABLE_ERR:
return "Corresponding super table not in this db";
case TSDB_CODE_PAR_INVALID_RANGE_OPTION:
return "Invalid option %s: %" PRId64 " valid range: [%d, %d]";
case TSDB_CODE_PAR_INVALID_STR_OPTION:
return "Invalid option %s: %s";
case TSDB_CODE_PAR_INVALID_ENUM_OPTION:
return "Invalid option %s: %" PRId64 ", only %d, %d allowed";
case TSDB_CODE_PAR_INVALID_KEEP_NUM:
return "Invalid number of keep options";
case TSDB_CODE_PAR_INVALID_KEEP_ORDER:
return "Invalid keep value, should be keep0 <= keep1 <= keep2";
case TSDB_CODE_PAR_INVALID_KEEP_VALUE:
return "Invalid option keep: %" PRId64 ", %" PRId64 ", %" PRId64 " valid range: [%dm, %dm]";
case TSDB_CODE_PAR_INVALID_COMMENT_OPTION:
return "Invalid option comment, length cannot exceed %d";
case TSDB_CODE_PAR_INVALID_F_RANGE_OPTION:
return "Invalid option %s: %f valid range: [%d, %d]";
case TSDB_CODE_PAR_INVALID_ROLLUP_OPTION:
return "Invalid option rollup: only one function is allowed";
case TSDB_CODE_PAR_INVALID_RETENTIONS_OPTION:
return "Invalid option retentions";
case TSDB_CODE_PAR_GROUPBY_WINDOW_COEXIST:
return "GROUP BY and WINDOW-clause can't be used together";
case TSDB_CODE_PAR_INVALID_OPTION_UNIT:
return "Invalid option %s unit: %c, only %c, %c, %c allowed";
case TSDB_CODE_PAR_INVALID_KEEP_UNIT:
return "Invalid option keep unit: %c, only m, h, d allowed";
case TSDB_CODE_PAR_AGG_FUNC_NESTING:
return "Aggregate functions do not support nesting";
case TSDB_CODE_PAR_INVALID_STATE_WIN_TYPE:
@ -194,8 +170,6 @@ static char* getSyntaxErrFormat(int32_t errCode) {
return "%s function is not supported in stream query";
case TSDB_CODE_PAR_GROUP_BY_NOT_ALLOWED_FUNC:
return "%s function is not supported in group query";
case TSDB_CODE_PAR_INVALID_TABLE_OPTION:
return "Invalid option %s";
case TSDB_CODE_PAR_INVALID_INTERP_CLAUSE:
return "Invalid usage of RANGE clause, EVERY clause or FILL clause";
case TSDB_CODE_PAR_NO_VALID_FUNC_IN_WIN:
@ -389,7 +363,7 @@ int32_t parseJsontoTagData(const char* json, SArray* pTagVals, STag** ppTag, voi
continue;
}
STagVal val = {0};
// strcpy(val.colName, colName);
// strcpy(val.colName, colName);
val.pKey = jsonKey;
taosHashPut(keyHash, jsonKey, keyLen, &keyLen,
CHAR_BYTES); // add key to hash to remove dumplicate, value is useless

File diff suppressed because it is too large Load Diff

View File

@ -328,46 +328,6 @@ void initMetaDataEnv() {
stub.set(catalogGetTableIndex, __catalogGetTableIndex);
stub.set(catalogGetDnodeList, __catalogGetDnodeList);
stub.set(catalogRefreshGetTableCfg, __catalogRefreshGetTableCfg);
// {
// AddrAny any("libcatalog.so");
// std::map<std::string,void*> result;
// any.get_global_func_addr_dynsym("^catalogGetHandle$", result);
// for (const auto& f : result) {
// stub.set(f.second, __catalogGetHandle);
// }
// }
// {
// AddrAny any("libcatalog.so");
// std::map<std::string,void*> result;
// any.get_global_func_addr_dynsym("^catalogGetTableMeta$", result);
// for (const auto& f : result) {
// stub.set(f.second, __catalogGetTableMeta);
// }
// }
// {
// AddrAny any("libcatalog.so");
// std::map<std::string,void*> result;
// any.get_global_func_addr_dynsym("^catalogGetTableHashVgroup$", result);
// for (const auto& f : result) {
// stub.set(f.second, __catalogGetTableHashVgroup);
// }
// }
// {
// AddrAny any("libcatalog.so");
// std::map<std::string,void*> result;
// any.get_global_func_addr_dynsym("^catalogGetTableDistVgInfo$", result);
// for (const auto& f : result) {
// stub.set(f.second, __catalogGetTableDistVgInfo);
// }
// }
// {
// AddrAny any("libcatalog.so");
// std::map<std::string,void*> result;
// any.get_global_func_addr_dynsym("^catalogGetDBVgVersion$", result);
// for (const auto& f : result) {
// stub.set(f.second, __catalogGetDBVgVersion);
// }
// }
}
void generateMetaData() {

View File

@ -27,22 +27,217 @@ TEST_F(ParserInitialATest, alterAccount) {
run("ALTER ACCOUNT ac_wxy PASS '123456'", TSDB_CODE_PAR_EXPRIE_STATEMENT, PARSER_STAGE_PARSE);
}
/*
* ALTER DNODE dnode_id 'config' ['value']
* ALTER ALL DNODES 'config' ['value']
*/
TEST_F(ParserInitialATest, alterDnode) {
useDb("root", "test");
run("ALTER DNODE 1 'resetLog'");
SMCfgDnodeReq expect = {0};
run("ALTER DNODE 1 'debugFlag' '134'");
auto clearCfgDnodeReq = [&]() { memset(&expect, 0, sizeof(SMCfgDnodeReq)); };
auto setCfgDnodeReq = [&](int32_t dnodeId, const char* pConfig, const char* pValue = nullptr) {
expect.dnodeId = dnodeId;
strcpy(expect.config, pConfig);
if (nullptr != pValue) {
strcpy(expect.value, pValue);
}
};
setCheckDdlFunc([&](const SQuery* pQuery, ParserStage stage) {
ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_ALTER_DNODE_STMT);
SMCfgDnodeReq req = {0};
ASSERT_EQ(tDeserializeSMCfgDnodeReq(pQuery->pCmdMsg->pMsg, pQuery->pCmdMsg->msgLen, &req), TSDB_CODE_SUCCESS);
ASSERT_EQ(req.dnodeId, expect.dnodeId);
ASSERT_EQ(std::string(req.config), std::string(expect.config));
ASSERT_EQ(std::string(req.value), std::string(expect.value));
});
setCfgDnodeReq(1, "resetLog");
run("ALTER DNODE 1 'resetLog'");
clearCfgDnodeReq();
setCfgDnodeReq(2, "debugFlag", "134");
run("ALTER DNODE 2 'debugFlag' '134'");
clearCfgDnodeReq();
setCfgDnodeReq(-1, "resetQueryCache");
run("ALTER ALL DNODES 'resetQueryCache'");
clearCfgDnodeReq();
setCfgDnodeReq(-1, "qDebugflag", "135");
run("ALTER ALL DNODES 'qDebugflag' '135'");
clearCfgDnodeReq();
}
/*
* ALTER DATABASE db_name [alter_database_options]
*
* alter_database_options:
* alter_database_option ...
*
* alter_database_option: {
* BUFFER int_value -- todo: range [3, 16384], default 96, unit MB
* | CACHEMODEL {'none' | 'last_row' | 'last_value' | 'both'} -- default 'none'
* | CACHESIZE int_value -- range [1, 65536], default 1, unit MB
* | FSYNC int_value -- rang [0, 180000], default 3000, unit ms
* | KEEP {int_value | duration_value} -- rang [1, 365000], default 3650, unit day
* | PAGES int_value -- todo: rang [64, +oo), default 256, unit page
* | REPLICA int_value -- todo: enum 1, 3, default 1, unit replica
* | STRICT {'off' | 'on'} -- todo: default 'off'
* | WAL int_value -- enum 1, 2, default 1
* }
*/
TEST_F(ParserInitialATest, alterDatabase) {
useDb("root", "test");
run("ALTER DATABASE test CACHEMODEL 'last_row' FSYNC 200 WAL 1");
SAlterDbReq expect = {0};
run("ALTER DATABASE test KEEP 2400");
auto clearAlterDbReq = [&]() { memset(&expect, 0, sizeof(SAlterDbReq)); };
auto initAlterDb = [&](const char* pDb) {
snprintf(expect.db, sizeof(expect.db), "0.%s", pDb);
expect.buffer = -1;
expect.pageSize = -1;
expect.pages = -1;
expect.daysPerFile = -1;
expect.daysToKeep0 = -1;
expect.daysToKeep1 = -1;
expect.daysToKeep2 = -1;
expect.fsyncPeriod = -1;
expect.walLevel = -1;
expect.strict = -1;
expect.cacheLast = -1;
expect.cacheLastSize = -1;
expect.replications = -1;
};
auto setAlterDbBuffer = [&](int32_t buffer) { expect.buffer = buffer; };
auto setAlterDbPageSize = [&](int32_t pageSize) { expect.pageSize = pageSize; };
auto setAlterDbPages = [&](int32_t pages) { expect.pages = pages; };
auto setAlterDbCacheSize = [&](int32_t cacheSize) { expect.cacheLastSize = cacheSize; };
auto setAlterDbDuration = [&](int32_t duration) { expect.daysPerFile = duration; };
auto setAlterDbKeep = [&](int32_t daysToKeep0, int32_t daysToKeep1 = -1, int32_t daysToKeep2 = -1) {
expect.daysToKeep0 = daysToKeep0;
expect.daysToKeep1 = (-1 == daysToKeep1 ? expect.daysToKeep0 : daysToKeep1);
expect.daysToKeep2 = (-1 == daysToKeep1 ? expect.daysToKeep1 : daysToKeep2);
};
auto setAlterDbFsync = [&](int32_t fsync) { expect.fsyncPeriod = fsync; };
auto setAlterDbWal = [&](int8_t wal) { expect.walLevel = wal; };
auto setAlterDbStrict = [&](int8_t strict) { expect.strict = strict; };
auto setAlterDbCacheModel = [&](int8_t cacheModel) { expect.cacheLast = cacheModel; };
auto setAlterDbReplica = [&](int8_t replications) { expect.replications = replications; };
setCheckDdlFunc([&](const SQuery* pQuery, ParserStage stage) {
ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_ALTER_DATABASE_STMT);
SAlterDbReq req = {0};
ASSERT_EQ(tDeserializeSAlterDbReq(pQuery->pCmdMsg->pMsg, pQuery->pCmdMsg->msgLen, &req), TSDB_CODE_SUCCESS);
ASSERT_EQ(std::string(req.db), std::string(expect.db));
ASSERT_EQ(req.buffer, expect.buffer);
ASSERT_EQ(req.pageSize, expect.pageSize);
ASSERT_EQ(req.pages, expect.pages);
ASSERT_EQ(req.cacheLastSize, expect.cacheLastSize);
ASSERT_EQ(req.daysToKeep0, expect.daysToKeep0);
ASSERT_EQ(req.daysToKeep1, expect.daysToKeep1);
ASSERT_EQ(req.daysToKeep2, expect.daysToKeep2);
ASSERT_EQ(req.fsyncPeriod, expect.fsyncPeriod);
ASSERT_EQ(req.walLevel, expect.walLevel);
ASSERT_EQ(req.strict, expect.strict);
ASSERT_EQ(req.cacheLast, expect.cacheLast);
ASSERT_EQ(req.replications, expect.replications);
});
const int32_t MINUTE_PER_DAY = MILLISECOND_PER_DAY / MILLISECOND_PER_MINUTE;
const int32_t MINUTE_PER_HOUR = MILLISECOND_PER_HOUR / MILLISECOND_PER_MINUTE;
initAlterDb("test");
setAlterDbCacheSize(32);
setAlterDbKeep(10 * MINUTE_PER_DAY);
setAlterDbFsync(200);
setAlterDbWal(1);
setAlterDbCacheModel(TSDB_CACHE_MODEL_LAST_ROW);
run("ALTER DATABASE test CACHEMODEL 'last_row' CACHESIZE 32 FSYNC 200 KEEP 10 WAL 1");
clearAlterDbReq();
initAlterDb("test");
setAlterDbCacheModel(TSDB_CACHE_MODEL_NONE);
run("ALTER DATABASE test CACHEMODEL 'none'");
setAlterDbCacheModel(TSDB_CACHE_MODEL_LAST_ROW);
run("ALTER DATABASE test CACHEMODEL 'last_row'");
setAlterDbCacheModel(TSDB_CACHE_MODEL_LAST_VALUE);
run("ALTER DATABASE test CACHEMODEL 'last_value'");
setAlterDbCacheModel(TSDB_CACHE_MODEL_BOTH);
run("ALTER DATABASE test CACHEMODEL 'both'");
clearAlterDbReq();
initAlterDb("test");
setAlterDbCacheSize(1);
run("ALTER DATABASE test CACHESIZE 1");
setAlterDbCacheSize(64);
run("ALTER DATABASE test CACHESIZE 64");
setAlterDbCacheSize(65536);
run("ALTER DATABASE test CACHESIZE 65536");
clearAlterDbReq();
initAlterDb("test");
setAlterDbFsync(0);
run("ALTER DATABASE test FSYNC 0");
setAlterDbFsync(1000);
run("ALTER DATABASE test FSYNC 1000");
setAlterDbFsync(180000);
run("ALTER DATABASE test FSYNC 180000");
clearAlterDbReq();
initAlterDb("test");
setAlterDbKeep(1 * MINUTE_PER_DAY);
run("ALTER DATABASE test KEEP 1");
setAlterDbKeep(30 * MINUTE_PER_DAY);
run("ALTER DATABASE test KEEP 30");
setAlterDbKeep(365000 * MINUTE_PER_DAY);
run("ALTER DATABASE test KEEP 365000");
setAlterDbKeep(1440);
run("ALTER DATABASE test KEEP 1440m");
setAlterDbKeep(14400);
run("ALTER DATABASE test KEEP 14400m");
setAlterDbKeep(525600000);
run("ALTER DATABASE test KEEP 525600000m");
setAlterDbKeep(5 * MINUTE_PER_DAY, 35 * MINUTE_PER_DAY, 500 * MINUTE_PER_DAY);
run("ALTER DATABASE test KEEP 5,35,500");
setAlterDbKeep(14400, 2400 * MINUTE_PER_HOUR, 1500 * MINUTE_PER_DAY);
run("ALTER DATABASE test KEEP 14400m,2400h,1500d");
clearAlterDbReq();
initAlterDb("test");
setAlterDbWal(1);
run("ALTER DATABASE test WAL 1");
setAlterDbWal(2);
run("ALTER DATABASE test WAL 2");
clearAlterDbReq();
}
TEST_F(ParserInitialATest, alterDatabaseSemanticCheck) {
useDb("root", "test");
run("ALTER DATABASE test CACHEMODEL 'other'", TSDB_CODE_PAR_INVALID_DB_OPTION);
run("ALTER DATABASE test CACHESIZE 0", TSDB_CODE_PAR_INVALID_DB_OPTION);
run("ALTER DATABASE test CACHESIZE 65537", TSDB_CODE_PAR_INVALID_DB_OPTION);
// The syntax limits it to only positive numbers
run("ALTER DATABASE test FSYNC -1", TSDB_CODE_PAR_SYNTAX_ERROR, PARSER_STAGE_PARSE);
run("ALTER DATABASE test FSYNC 180001", TSDB_CODE_PAR_INVALID_DB_OPTION);
run("ALTER DATABASE test KEEP 0", TSDB_CODE_PAR_INVALID_DB_OPTION);
run("ALTER DATABASE test KEEP 365001", TSDB_CODE_PAR_INVALID_DB_OPTION);
run("ALTER DATABASE test KEEP 1000000000s", TSDB_CODE_PAR_INVALID_DB_OPTION);
run("ALTER DATABASE test KEEP 1w", TSDB_CODE_PAR_INVALID_DB_OPTION);
run("ALTER DATABASE test WAL 0", TSDB_CODE_PAR_INVALID_DB_OPTION);
run("ALTER DATABASE test WAL 3", TSDB_CODE_PAR_INVALID_DB_OPTION);
// Regardless of the specific sentence
run("ALTER DATABASE db WAL 0 # td-14436", TSDB_CODE_PAR_SYNTAX_ERROR, PARSER_STAGE_PARSE);
}
/*
* ALTER LOCAL dnode_id 'config' ['value']
*/
TEST_F(ParserInitialATest, alterLocal) {
useDb("root", "test");
@ -53,7 +248,7 @@ TEST_F(ParserInitialATest, alterLocal) {
expect.second.clear();
};
auto setAlterLocalFunc = [&](const char* pConfig, const char* pValue = nullptr) {
auto setAlterLocal = [&](const char* pConfig, const char* pValue = nullptr) {
expect.first.assign(pConfig);
if (nullptr != pValue) {
expect.second.assign(pValue);
@ -68,38 +263,37 @@ TEST_F(ParserInitialATest, alterLocal) {
ASSERT_EQ(string(pStmt->value), expect.second);
});
setAlterLocalFunc("resetlog");
setAlterLocal("resetlog");
run("ALTER LOCAL 'resetlog'");
clearAlterLocal();
setAlterLocalFunc("querypolicy", "2");
setAlterLocal("querypolicy", "2");
run("ALTER LOCAL 'querypolicy' '2'");
clearAlterLocal();
}
/*
* ALTER TABLE [db_name.]tb_name alter_table_clause
* ALTER STABLE [db_name.]tb_name alter_table_clause
*
* alter_table_clause: {
* alter_table_options
* | ADD COLUMN col_name column_type
* | DROP COLUMN col_name
* | MODIFY COLUMN col_name column_type
* | RENAME COLUMN old_col_name new_col_name
* | ADD TAG tag_name tag_type
* | DROP TAG tag_name
* | MODIFY TAG tag_name tag_type
* | RENAME TAG old_tag_name new_tag_name
* | SET TAG tag_name = new_tag_value
* | ADD {FULLTEXT | SMA} INDEX index_name (col_name [, col_name] ...) [index_option]
* | RENAME COLUMN old_col_name new_col_name -- normal table
* | ADD TAG tag_name tag_type -- super table
* | DROP TAG tag_name -- super table
* | MODIFY TAG tag_name tag_type -- super table
* | RENAME TAG old_tag_name new_tag_name -- super table
* | SET TAG tag_name = new_tag_value -- child table
* }
*
* alter_table_options:
* alter_table_option ...
*
* alter_table_option: {
* TTL value
* | COMMENT 'string_value'
* TTL int_value -- child/normal table
* | COMMENT 'string_value'
* }
*/
TEST_F(ParserInitialATest, alterSTable) {
@ -112,14 +306,12 @@ TEST_F(ParserInitialATest, alterSTable) {
memset(&expect, 0, sizeof(SMAlterStbReq));
};
auto setAlterStbReqFunc = [&](const char* pTbname, int8_t alterType, int32_t numOfFields = 0,
const char* pField1Name = nullptr, int8_t field1Type = 0, int32_t field1Bytes = 0,
const char* pField2Name = nullptr, const char* pComment = nullptr,
int32_t ttl = TSDB_DEFAULT_TABLE_TTL) {
auto setAlterStbReq = [&](const char* pTbname, int8_t alterType, int32_t numOfFields = 0,
const char* pField1Name = nullptr, int8_t field1Type = 0, int32_t field1Bytes = 0,
const char* pField2Name = nullptr, const char* pComment = nullptr) {
int32_t len = snprintf(expect.name, sizeof(expect.name), "0.test.%s", pTbname);
expect.name[len] = '\0';
expect.alterType = alterType;
// expect.ttl = ttl;
if (nullptr != pComment) {
expect.comment = strdup(pComment);
expect.commentLen = strlen(pComment);
@ -178,59 +370,47 @@ TEST_F(ParserInitialATest, alterSTable) {
tFreeSMAltertbReq(&req);
});
// setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_UPDATE_OPTIONS, 0, nullptr, 0, 0, nullptr, nullptr, 10);
// run("ALTER STABLE st1 TTL 10");
// clearAlterStbReq();
setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_UPDATE_OPTIONS, 0, nullptr, 0, 0, nullptr, "test");
setAlterStbReq("st1", TSDB_ALTER_TABLE_UPDATE_OPTIONS, 0, nullptr, 0, 0, nullptr, "test");
run("ALTER STABLE st1 COMMENT 'test'");
clearAlterStbReq();
setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_ADD_COLUMN, 1, "cc1", TSDB_DATA_TYPE_BIGINT);
setAlterStbReq("st1", TSDB_ALTER_TABLE_ADD_COLUMN, 1, "cc1", TSDB_DATA_TYPE_BIGINT);
run("ALTER STABLE st1 ADD COLUMN cc1 BIGINT");
clearAlterStbReq();
setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_DROP_COLUMN, 1, "c1");
setAlterStbReq("st1", TSDB_ALTER_TABLE_DROP_COLUMN, 1, "c1");
run("ALTER STABLE st1 DROP COLUMN c1");
clearAlterStbReq();
setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES, 1, "c2", TSDB_DATA_TYPE_VARCHAR,
30 + VARSTR_HEADER_SIZE);
setAlterStbReq("st1", TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES, 1, "c2", TSDB_DATA_TYPE_VARCHAR, 30 + VARSTR_HEADER_SIZE);
run("ALTER STABLE st1 MODIFY COLUMN c2 VARCHAR(30)");
clearAlterStbReq();
// setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME, 2, "c1", 0, 0, "cc1");
// run("ALTER STABLE st1 RENAME COLUMN c1 cc1");
setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_ADD_TAG, 1, "tag11", TSDB_DATA_TYPE_BIGINT);
setAlterStbReq("st1", TSDB_ALTER_TABLE_ADD_TAG, 1, "tag11", TSDB_DATA_TYPE_BIGINT);
run("ALTER STABLE st1 ADD TAG tag11 BIGINT");
clearAlterStbReq();
setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_DROP_TAG, 1, "tag1");
setAlterStbReq("st1", TSDB_ALTER_TABLE_DROP_TAG, 1, "tag1");
run("ALTER STABLE st1 DROP TAG tag1");
clearAlterStbReq();
setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_UPDATE_TAG_BYTES, 1, "tag2", TSDB_DATA_TYPE_VARCHAR,
30 + VARSTR_HEADER_SIZE);
setAlterStbReq("st1", TSDB_ALTER_TABLE_UPDATE_TAG_BYTES, 1, "tag2", TSDB_DATA_TYPE_VARCHAR, 30 + VARSTR_HEADER_SIZE);
run("ALTER STABLE st1 MODIFY TAG tag2 VARCHAR(30)");
clearAlterStbReq();
setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_UPDATE_TAG_NAME, 2, "tag1", 0, 0, "tag11");
setAlterStbReq("st1", TSDB_ALTER_TABLE_UPDATE_TAG_NAME, 2, "tag1", 0, 0, "tag11");
run("ALTER STABLE st1 RENAME TAG tag1 tag11");
clearAlterStbReq();
// todo
// ADD {FULLTEXT | SMA} INDEX index_name (col_name [, col_name] ...) [index_option]
}
TEST_F(ParserInitialATest, alterSTableSemanticCheck) {
useDb("root", "test");
run("ALTER STABLE st1 RENAME COLUMN c1 cc1", TSDB_CODE_PAR_INVALID_ALTER_TABLE);
run("ALTER STABLE st1 MODIFY COLUMN c2 NCHAR(10)", TSDB_CODE_PAR_INVALID_MODIFY_COL);
run("ALTER STABLE st1 MODIFY TAG tag2 NCHAR(10)", TSDB_CODE_PAR_INVALID_MODIFY_COL);
run("ALTER STABLE st1 SET TAG tag1 = 10", TSDB_CODE_PAR_INVALID_ALTER_TABLE);
run("ALTER STABLE st1 TTL 10", TSDB_CODE_PAR_INVALID_ALTER_TABLE);
}
TEST_F(ParserInitialATest, alterTable) {
@ -246,8 +426,8 @@ TEST_F(ParserInitialATest, alterTable) {
memset(&expect, 0, sizeof(SVAlterTbReq));
};
auto setAlterColFunc = [&](const char* pTbname, int8_t alterType, const char* pColName, int8_t dataType = 0,
int32_t dataBytes = 0, const char* pNewColName = nullptr) {
auto setAlterTableCol = [&](const char* pTbname, int8_t alterType, const char* pColName, int8_t dataType = 0,
int32_t dataBytes = 0, const char* pNewColName = nullptr) {
expect.tbName = strdup(pTbname);
expect.action = alterType;
expect.colName = strdup(pColName);
@ -269,7 +449,7 @@ TEST_F(ParserInitialATest, alterTable) {
}
};
auto setAlterTagFunc = [&](const char* pTbname, const char* pTagName, uint8_t* pNewVal, uint32_t bytes) {
auto setAlterTableTag = [&](const char* pTbname, const char* pTagName, uint8_t* pNewVal, uint32_t bytes) {
expect.tbName = strdup(pTbname);
expect.action = TSDB_ALTER_TABLE_UPDATE_TAG_VAL;
expect.tagName = strdup(pTagName);
@ -279,7 +459,7 @@ TEST_F(ParserInitialATest, alterTable) {
expect.pTagVal = pNewVal;
};
auto setAlterOptionsFunc = [&](const char* pTbname, int32_t ttl, char* pComment = nullptr) {
auto setAlterTableOptions = [&](const char* pTbname, int32_t ttl, char* pComment = nullptr) {
expect.tbName = strdup(pTbname);
expect.action = TSDB_ALTER_TABLE_UPDATE_OPTIONS;
if (-1 != ttl) {
@ -335,47 +515,56 @@ TEST_F(ParserInitialATest, alterTable) {
tDecoderClear(&coder);
});
setAlterOptionsFunc("t1", 10, nullptr);
setAlterTableOptions("t1", 10, nullptr);
run("ALTER TABLE t1 TTL 10");
clearAlterTbReq();
setAlterOptionsFunc("t1", -1, (char*)"test");
setAlterTableOptions("t1", -1, (char*)"test");
run("ALTER TABLE t1 COMMENT 'test'");
clearAlterTbReq();
setAlterColFunc("t1", TSDB_ALTER_TABLE_ADD_COLUMN, "cc1", TSDB_DATA_TYPE_BIGINT);
setAlterTableCol("t1", TSDB_ALTER_TABLE_ADD_COLUMN, "cc1", TSDB_DATA_TYPE_BIGINT);
run("ALTER TABLE t1 ADD COLUMN cc1 BIGINT");
clearAlterTbReq();
setAlterColFunc("t1", TSDB_ALTER_TABLE_DROP_COLUMN, "c1");
setAlterTableCol("t1", TSDB_ALTER_TABLE_DROP_COLUMN, "c1");
run("ALTER TABLE t1 DROP COLUMN c1");
clearAlterTbReq();
setAlterColFunc("t1", TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES, "c2", TSDB_DATA_TYPE_VARCHAR, 30 + VARSTR_HEADER_SIZE);
setAlterTableCol("t1", TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES, "c2", TSDB_DATA_TYPE_VARCHAR, 30 + VARSTR_HEADER_SIZE);
run("ALTER TABLE t1 MODIFY COLUMN c2 VARCHAR(30)");
clearAlterTbReq();
setAlterColFunc("t1", TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME, "c1", 0, 0, "cc1");
setAlterTableCol("t1", TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME, "c1", 0, 0, "cc1");
run("ALTER TABLE t1 RENAME COLUMN c1 cc1");
clearAlterTbReq();
int32_t val = 10;
setAlterTagFunc("st1s1", "tag1", (uint8_t*)&val, sizeof(val));
setAlterTableTag("st1s1", "tag1", (uint8_t*)&val, sizeof(val));
run("ALTER TABLE st1s1 SET TAG tag1=10");
clearAlterTbReq();
// todo
// ADD {FULLTEXT | SMA} INDEX index_name (col_name [, col_name] ...) [index_option]
}
TEST_F(ParserInitialATest, alterTableSemanticCheck) {
useDb("root", "test");
run("ALTER TABLE st1s1 RENAME COLUMN c1 cc1", TSDB_CODE_PAR_INVALID_ALTER_TABLE);
run("ALTER TABLE st1s1 ADD TAG tag11 BIGINT", TSDB_CODE_PAR_INVALID_ALTER_TABLE);
run("ALTER TABLE st1s1 DROP TAG tag1", TSDB_CODE_PAR_INVALID_ALTER_TABLE);
run("ALTER TABLE st1s1 MODIFY TAG tag2 VARCHAR(30)", TSDB_CODE_PAR_INVALID_ALTER_TABLE);
run("ALTER TABLE st1s1 RENAME TAG tag1 tag11", TSDB_CODE_PAR_INVALID_ALTER_TABLE);
run("ALTER TABLE st1s1 SET TAG tag2 = '123456789012345678901'", TSDB_CODE_PAR_WRONG_VALUE_TYPE);
}
/*
* ALTER USER user_name PASS str_value
*
* alter_user_clause: {
* PASS str_value
* | ENABLE int_value
* | SYSINFO int_value
* }
*/
TEST_F(ParserInitialATest, alterUser) {
useDb("root", "test");
@ -423,6 +612,9 @@ TEST_F(ParserInitialATest, alterUser) {
clearAlterUserReq();
}
/*
* BALANCE VGROUP
*/
TEST_F(ParserInitialATest, balanceVgroup) {
useDb("root", "test");
@ -436,10 +628,4 @@ TEST_F(ParserInitialATest, balanceVgroup) {
run("BALANCE VGROUP");
}
TEST_F(ParserInitialATest, bug001) {
useDb("root", "test");
run("ALTER DATABASE db WAL 0 # td-14436", TSDB_CODE_PAR_SYNTAX_ERROR, PARSER_STAGE_PARSE);
}
} // namespace ParserTest

View File

@ -236,12 +236,12 @@ TEST_F(ParserInitialCTest, createDatabase) {
TEST_F(ParserInitialCTest, createDatabaseSemanticCheck) {
useDb("root", "test");
run("create database db2 retentions 0s:1d", TSDB_CODE_PAR_INVALID_RETENTIONS_OPTION);
run("create database db2 retentions 10s:0d", TSDB_CODE_PAR_INVALID_RETENTIONS_OPTION);
run("create database db2 retentions 1w:1d", TSDB_CODE_PAR_INVALID_RETENTIONS_OPTION);
run("create database db2 retentions 1w:1n", TSDB_CODE_PAR_INVALID_RETENTIONS_OPTION);
run("create database db2 retentions 15s:7d,15m:21d,10m:500d", TSDB_CODE_PAR_INVALID_RETENTIONS_OPTION);
run("create database db2 retentions 15s:7d,5m:21d,10m:10d", TSDB_CODE_PAR_INVALID_RETENTIONS_OPTION);
run("create database db2 retentions 0s:1d", TSDB_CODE_PAR_INVALID_DB_OPTION);
run("create database db2 retentions 10s:0d", TSDB_CODE_PAR_INVALID_DB_OPTION);
run("create database db2 retentions 1w:1d", TSDB_CODE_PAR_INVALID_DB_OPTION);
run("create database db2 retentions 1w:1n", TSDB_CODE_PAR_INVALID_DB_OPTION);
run("create database db2 retentions 15s:7d,15m:21d,10m:500d", TSDB_CODE_PAR_INVALID_DB_OPTION);
run("create database db2 retentions 15s:7d,5m:21d,10m:10d", TSDB_CODE_PAR_INVALID_DB_OPTION);
}
TEST_F(ParserInitialCTest, createDnode) {
@ -514,13 +514,13 @@ TEST_F(ParserInitialCTest, createStableSemanticCheck) {
useDb("root", "test");
run("CREATE STABLE rollup_db.stb2 (ts TIMESTAMP, c1 INT) TAGS (tag1 INT) ROLLUP(CEIL)",
TSDB_CODE_PAR_INVALID_ROLLUP_OPTION);
TSDB_CODE_PAR_INVALID_TABLE_OPTION);
run("CREATE STABLE rollup_db.stb2 (ts TIMESTAMP, c1 INT) TAGS (tag1 INT) ROLLUP(MAX) MAX_DELAY 0s WATERMARK 1m",
TSDB_CODE_PAR_INVALID_RANGE_OPTION);
TSDB_CODE_PAR_INVALID_TABLE_OPTION);
run("CREATE STABLE rollup_db.stb2 (ts TIMESTAMP, c1 INT) TAGS (tag1 INT) ROLLUP(MAX) MAX_DELAY 10s WATERMARK 18m",
TSDB_CODE_PAR_INVALID_RANGE_OPTION);
TSDB_CODE_PAR_INVALID_TABLE_OPTION);
}
TEST_F(ParserInitialCTest, createStream) {

View File

@ -192,7 +192,7 @@ class ParserTestBaseImpl {
void setParseContext(const string& sql, SParseContext* pCxt, bool async = false) {
stmtEnv_.sql_ = sql;
transform(stmtEnv_.sql_.begin(), stmtEnv_.sql_.end(), stmtEnv_.sql_.begin(), ::tolower);
strtolower((char*)stmtEnv_.sql_.c_str(), sql.c_str());
pCxt->acctId = atoi(caseEnv_.acctId_.c_str());
pCxt->db = caseEnv_.db_.c_str();

View File

@ -1378,6 +1378,22 @@ static bool planOptNodeListHasCol(SNodeList* pKeys) {
return hasCol;
}
static EDealRes partTagsOptHasTbname(SNode* pNode, void* pContext) {
if (QUERY_NODE_COLUMN == nodeType(pNode)) {
if (COLUMN_TYPE_TBNAME == ((SColumnNode*)pNode)->colType) {
*(bool*)pContext = true;
return DEAL_RES_END;
}
}
return DEAL_RES_CONTINUE;
}
static bool planOptNodeListHasTbname(SNodeList* pKeys) {
bool hasCol = false;
nodesWalkExprs(pKeys, partTagsOptHasTbname, &hasCol);
return hasCol;
}
static bool partTagsIsOptimizableNode(SLogicNode* pNode) {
return ((QUERY_NODE_LOGIC_PLAN_PARTITION == nodeType(pNode) ||
(QUERY_NODE_LOGIC_PLAN_AGG == nodeType(pNode) && NULL != ((SAggLogicNode*)pNode)->pGroupKeys &&
@ -2129,7 +2145,8 @@ static bool tagScanMayBeOptimized(SLogicNode* pNode) {
}
SAggLogicNode* pAgg = (SAggLogicNode*)(pNode->pParent);
if (NULL == pAgg->pGroupKeys || NULL != pAgg->pAggFuncs || planOptNodeListHasCol(pAgg->pGroupKeys)) {
if (NULL == pAgg->pGroupKeys || NULL != pAgg->pAggFuncs ||
planOptNodeListHasCol(pAgg->pGroupKeys) || !planOptNodeListHasTbname(pAgg->pGroupKeys)) {
return false;
}
@ -2143,6 +2160,22 @@ static int32_t tagScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubp
}
pScanNode->scanType = SCAN_TYPE_TAG;
SNode* pTarget = NULL;
FOREACH(pTarget, pScanNode->node.pTargets) {
if (PRIMARYKEY_TIMESTAMP_COL_ID == ((SColumnNode*)(pTarget))->colId) {
ERASE_NODE(pScanNode->node.pTargets);
break;
}
}
NODES_DESTORY_LIST(pScanNode->pScanCols);
SLogicNode* pAgg = pScanNode->node.pParent;
int32_t code = replaceLogicNode(pLogicSubplan, pAgg, (SLogicNode*)pScanNode);
if (TSDB_CODE_SUCCESS == code) {
NODES_CLEAR_LIST(pAgg->pChildren);
}
nodesDestroyNode((SNode*)pAgg);
pCxt->optimized = true;
return TSDB_CODE_SUCCESS;
}

View File

@ -150,7 +150,6 @@ typedef struct SQWTaskCtx {
void *taskHandle;
void *sinkHandle;
SSubplan *plan;
STbVerInfo tbInfo;
} SQWTaskCtx;

View File

@ -306,11 +306,6 @@ void qwFreeTaskCtx(SQWTaskCtx *ctx) {
dsDestroyDataSinker(ctx->sinkHandle);
ctx->sinkHandle = NULL;
}
if (ctx->plan) {
nodesDestroyNode((SNode*)ctx->plan);
ctx->plan = NULL;
}
}
int32_t qwDropTaskCtx(QW_FPARAMS_DEF) {
@ -327,7 +322,6 @@ int32_t qwDropTaskCtx(QW_FPARAMS_DEF) {
atomic_store_ptr(&ctx->taskHandle, NULL);
atomic_store_ptr(&ctx->sinkHandle, NULL);
atomic_store_ptr(&ctx->plan, NULL);
QW_SET_EVENT_PROCESSED(ctx, QW_EVENT_DROP);

View File

@ -522,8 +522,6 @@ int32_t qwProcessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg, const char* sql) {
QW_ERR_JRET(code);
}
ctx->plan = plan;
code = qCreateExecTask(qwMsg->node, mgmt->nodeId, tId, plan, &pTaskInfo, &sinkHandle, sql, OPTR_EXEC_MODEL_BATCH);
if (code) {
QW_TASK_ELOG("qCreateExecTask failed, code:%x - %s", code, tstrerror(code));
@ -928,8 +926,6 @@ int32_t qwProcessDelete(QW_FPARAMS_DEF, SQWMsg *qwMsg, SDeleteRes *pRes) {
QW_ERR_JRET(code);
}
ctx.plan = plan;
code = qCreateExecTask(qwMsg->node, mgmt->nodeId, tId, plan, &pTaskInfo, &sinkHandle, NULL, OPTR_EXEC_MODEL_BATCH);
if (code) {
QW_TASK_ELOG("qCreateExecTask failed, code:%x - %s", code, tstrerror(code));

View File

@ -3682,6 +3682,10 @@ EDealRes fltReviseRewriter(SNode** pNode, void* pContext) {
if (OP_TYPE_IN != node->opType) {
SColumnNode *refNode = (SColumnNode *)node->pLeft;
SValueNode *valueNode = (SValueNode *)node->pRight;
if (FILTER_GET_FLAG(stat->info->options, FLT_OPTION_TIMESTAMP)
&& TSDB_DATA_TYPE_UBIGINT == valueNode->node.resType.type && valueNode->datum.u <= INT64_MAX) {
valueNode->node.resType.type = TSDB_DATA_TYPE_BIGINT;
}
int32_t type = vectorGetConvertType(refNode->node.resType.type, valueNode->node.resType.type);
if (0 != type && type != refNode->node.resType.type) {
stat->scalarMode = true;

View File

@ -44,7 +44,7 @@ int32_t streamBroadcastToChildren(SStreamTask* pTask, const SSDataBlock* pBlock)
int32_t tEncodeStreamRetrieveReq(SEncoder* pEncoder, const SStreamRetrieveReq* pReq);
int32_t streamAppendQueueItem(SStreamQueueItem* dst, SStreamQueueItem* elem);
SStreamQueueItem* streamAppendQueueItem(SStreamQueueItem* dst, SStreamQueueItem* elem);
void streamFreeQitem(SStreamQueueItem* data);
#ifdef __cplusplus

View File

@ -77,6 +77,28 @@ FAIL:
return NULL;
}
SStreamMergedSubmit* streamMergedSubmitNew() {
SStreamMergedSubmit* pMerged = (SStreamMergedSubmit*)taosAllocateQitem(sizeof(SStreamMergedSubmit), DEF_QITEM);
if (pMerged == NULL) return NULL;
pMerged->reqs = taosArrayInit(0, sizeof(void*));
pMerged->dataRefs = taosArrayInit(0, sizeof(void*));
if (pMerged->dataRefs == NULL || pMerged->reqs == NULL) goto FAIL;
pMerged->type = STREAM_INPUT__MERGED_SUBMIT;
return pMerged;
FAIL:
if (pMerged->reqs) taosArrayDestroy(pMerged->reqs);
if (pMerged->dataRefs) taosArrayDestroy(pMerged->dataRefs);
taosFreeQitem(pMerged);
return NULL;
}
int32_t streamMergeSubmit(SStreamMergedSubmit* pMerged, SStreamDataSubmit* pSubmit) {
taosArrayPush(pMerged->dataRefs, &pSubmit->dataRef);
taosArrayPush(pMerged->reqs, &pSubmit->data);
pMerged->ver = pSubmit->ver;
return 0;
}
static FORCE_INLINE void streamDataSubmitRefInc(SStreamDataSubmit* pDataSubmit) {
atomic_add_fetch_32(pDataSubmit->dataRef, 1);
}
@ -100,15 +122,31 @@ void streamDataSubmitRefDec(SStreamDataSubmit* pDataSubmit) {
}
}
int32_t streamAppendQueueItem(SStreamQueueItem* dst, SStreamQueueItem* elem) {
SStreamQueueItem* streamAppendQueueItem(SStreamQueueItem* dst, SStreamQueueItem* elem) {
ASSERT(elem);
if (dst->type == elem->type && dst->type == STREAM_INPUT__DATA_BLOCK) {
if (dst->type == STREAM_INPUT__DATA_BLOCK && elem->type == STREAM_INPUT__DATA_BLOCK) {
SStreamDataBlock* pBlock = (SStreamDataBlock*)dst;
SStreamDataBlock* pBlockSrc = (SStreamDataBlock*)elem;
taosArrayAddAll(pBlock->blocks, pBlockSrc->blocks);
return 0;
taosArrayDestroy(pBlockSrc->blocks);
taosFreeQitem(elem);
return dst;
} else if (dst->type == STREAM_INPUT__MERGED_SUBMIT && elem->type == STREAM_INPUT__DATA_SUBMIT) {
SStreamMergedSubmit* pMerged = (SStreamMergedSubmit*)dst;
SStreamDataSubmit* pBlockSrc = (SStreamDataSubmit*)elem;
streamMergeSubmit(pMerged, pBlockSrc);
taosFreeQitem(elem);
return dst;
} else if (dst->type == STREAM_INPUT__DATA_SUBMIT && elem->type == STREAM_INPUT__DATA_SUBMIT) {
SStreamMergedSubmit* pMerged = streamMergedSubmitNew();
ASSERT(pMerged);
streamMergeSubmit(pMerged, (SStreamDataSubmit*)dst);
streamMergeSubmit(pMerged, (SStreamDataSubmit*)elem);
taosFreeQitem(dst);
taosFreeQitem(elem);
return (SStreamQueueItem*)pMerged;
} else {
return -1;
return NULL;
}
}
@ -123,5 +161,20 @@ void streamFreeQitem(SStreamQueueItem* data) {
} else if (type == STREAM_INPUT__DATA_SUBMIT) {
streamDataSubmitRefDec((SStreamDataSubmit*)data);
taosFreeQitem(data);
} else if (type == STREAM_INPUT__MERGED_SUBMIT) {
SStreamMergedSubmit* pMerge = (SStreamMergedSubmit*)data;
int32_t sz = taosArrayGetSize(pMerge->reqs);
for (int32_t i = 0; i < sz; i++) {
int32_t* ref = taosArrayGetP(pMerge->dataRefs, i);
(*ref)--;
if (*ref == 0) {
void* data = taosArrayGetP(pMerge->reqs, i);
taosMemoryFree(data);
taosMemoryFree(ref);
}
}
taosArrayDestroy(pMerge->reqs);
taosArrayDestroy(pMerge->dataRefs);
taosFreeQitem(pMerge);
}
}

View File

@ -33,9 +33,13 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, void* data, SArray* pRes)
SArray* blocks = pBlock->blocks;
qDebug("task %d %p set ssdata input", pTask->taskId, pTask);
qSetMultiStreamInput(exec, blocks->pData, blocks->size, STREAM_INPUT__DATA_BLOCK, false);
} else if (pItem->type == STREAM_INPUT__DROP) {
// TODO exec drop
return 0;
} else if (pItem->type == STREAM_INPUT__MERGED_SUBMIT) {
SStreamMergedSubmit* pMerged = (SStreamMergedSubmit*)data;
SArray* blocks = pMerged->reqs;
qDebug("task %d %p set submit input (merged), batch num: %d", pTask->taskId, pTask, (int32_t)blocks->size);
qSetMultiStreamInput(exec, blocks->pData, blocks->size, STREAM_INPUT__MERGED_SUBMIT, false);
} else {
ASSERT(0);
}
// exec
@ -144,7 +148,7 @@ int32_t streamPipelineExec(SStreamTask* pTask, int32_t batchNum) {
static SArray* streamExecForQall(SStreamTask* pTask, SArray* pRes) {
while (1) {
int32_t cnt = 0;
int32_t cnt = 1;
void* data = NULL;
while (1) {
SStreamQueueItem* qItem = streamQueueNextItem(pTask->inputQueue);
@ -155,24 +159,23 @@ static SArray* streamExecForQall(SStreamTask* pTask, SArray* pRes) {
if (data == NULL) {
data = qItem;
streamQueueProcessSuccess(pTask->inputQueue);
if (qItem->type == STREAM_INPUT__DATA_BLOCK) {
/*streamUpdateVer(pTask, (SStreamDataBlock*)qItem);*/
} else {
break;
}
/*if (qItem->type == STREAM_INPUT__DATA_BLOCK) {*/
/*streamUpdateVer(pTask, (SStreamDataBlock*)qItem);*/
/*}*/
} else {
if (streamAppendQueueItem(data, qItem) < 0) {
void* newRet;
if ((newRet = streamAppendQueueItem(data, qItem)) == NULL) {
streamQueueProcessFail(pTask->inputQueue);
break;
} else {
cnt++;
data = newRet;
/*streamUpdateVer(pTask, (SStreamDataBlock*)qItem);*/
streamQueueProcessSuccess(pTask->inputQueue);
taosArrayDestroy(((SStreamDataBlock*)qItem)->blocks);
taosFreeQitem(qItem);
}
}
}
if (pTask->taskStatus == TASK_STATUS__DROPPING) {
if (data) streamFreeQitem(data);
taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes);
@ -194,6 +197,7 @@ static SArray* streamExecForQall(SStreamTask* pTask, SArray* pRes) {
if (taosArrayGetSize(pRes) != 0) {
SStreamDataBlock* qRes = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM);
if (qRes == NULL) {
// TODO log failed ver
streamQueueProcessFail(pTask->inputQueue);
taosArrayDestroy(pRes);
return NULL;
@ -201,6 +205,7 @@ static SArray* streamExecForQall(SStreamTask* pTask, SArray* pRes) {
qRes->type = STREAM_INPUT__DATA_BLOCK;
qRes->blocks = pRes;
if (streamTaskOutput(pTask, qRes) < 0) {
// TODO log failed ver
/*streamQueueProcessFail(pTask->inputQueue);*/
taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes);
taosFreeQitem(qRes);

View File

@ -306,6 +306,8 @@ int32_t syncNodeReplicate(SSyncNode* pSyncNode) {
break;
}
syncNodeRestartHeartbeatTimer(pSyncNode);
return ret;
}

View File

@ -417,7 +417,7 @@ int32_t walReadVer(SWalReader *pRead, int64_t ver) {
}
if (ver > pRead->pWal->vers.lastVer || ver < pRead->pWal->vers.firstVer) {
wError("vgId:%d, invalid index:%" PRId64 ", first index:%" PRId64 ", last index:%" PRId64, pRead->pWal->cfg.vgId,
wDebug("vgId:%d, invalid index:%" PRId64 ", first index:%" PRId64 ", last index:%" PRId64, pRead->pWal->cfg.vgId,
ver, pRead->pWal->vers.firstVer, pRead->pWal->vers.lastVer);
terrno = TSDB_CODE_WAL_LOG_NOT_EXIST;
return -1;
@ -425,7 +425,7 @@ int32_t walReadVer(SWalReader *pRead, int64_t ver) {
if (pRead->curInvalid || pRead->curVersion != ver) {
if (walReadSeekVer(pRead, ver) < 0) {
wError("vgId:%d, unexpected wal log index:%" PRId64 ", since %s", pRead->pWal->cfg.vgId, ver, terrstr());
wError("vgId:%d, unexpected wal log, index:%" PRId64 ", since %s", pRead->pWal->cfg.vgId, ver, terrstr());
return -1;
}
seeked = true;
@ -452,7 +452,8 @@ int32_t walReadVer(SWalReader *pRead, int64_t ver) {
contLen = walValidHeadCksum(pRead->pHead);
if (contLen != 0) {
wError("vgId:%d, unexpected wal log index:%" PRId64 ", since head checksum not passed", pRead->pWal->cfg.vgId, ver);
wError("vgId:%d, unexpected wal log, index:%" PRId64 ", since head checksum not passed", pRead->pWal->cfg.vgId,
ver);
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
return -1;
}
@ -479,7 +480,7 @@ int32_t walReadVer(SWalReader *pRead, int64_t ver) {
}
if (pRead->pHead->head.version != ver) {
wError("vgId:%d, unexpected wal log index:%" PRId64 ", read request index:%" PRId64, pRead->pWal->cfg.vgId,
wError("vgId:%d, unexpected wal log, index:%" PRId64 ", read request index:%" PRId64, pRead->pWal->cfg.vgId,
pRead->pHead->head.version, ver);
pRead->curInvalid = 1;
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
@ -489,7 +490,8 @@ int32_t walReadVer(SWalReader *pRead, int64_t ver) {
contLen = walValidBodyCksum(pRead->pHead);
if (contLen != 0) {
wError("vgId:%d, unexpected wal log index:%" PRId64 ", since body checksum not passed", pRead->pWal->cfg.vgId, ver);
wError("vgId:%d, unexpected wal log, index:%" PRId64 ", since body checksum not passed", pRead->pWal->cfg.vgId,
ver);
pRead->curInvalid = 1;
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
ASSERT(0);

View File

@ -173,6 +173,46 @@ void taosArrayRemoveDuplicate(SArray* pArray, __compar_fn_t comparFn, void (*fp)
pArray->size = pos + 1;
}
void taosArrayRemoveDuplicateP(SArray* pArray, __compar_fn_t comparFn, void (*fp)(void*)) {
assert(pArray);
size_t size = pArray->size;
if (size <= 1) {
return;
}
int32_t pos = 0;
for (int32_t i = 1; i < size; ++i) {
char* p1 = taosArrayGet(pArray, pos);
char* p2 = taosArrayGet(pArray, i);
if (comparFn(p1, p2) == 0) {
// do nothing
} else {
if (pos + 1 != i) {
void* p = taosArrayGet(pArray, pos + 1);
if (fp != NULL) {
fp(p);
}
taosArraySet(pArray, pos + 1, p2);
pos += 1;
} else {
pos += 1;
}
}
}
if (fp != NULL) {
for (int32_t i = pos + 1; i < pArray->size; ++i) {
void* p = taosArrayGetP(pArray, i);
fp(p);
}
}
pArray->size = pos + 1;
}
void* taosArrayAddAll(SArray* pArray, const SArray* pInput) {
if (pInput) {
return taosArrayAddBatch(pArray, pInput->pData, (int32_t)taosArrayGetSize(pInput));

View File

@ -501,19 +501,9 @@ TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INTER_VALUE_TOO_SMALL, "Interval too small")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_DB_NOT_SPECIFIED, "Database not specified")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_IDENTIFIER_NAME, "Invalid identifier name")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_CORRESPONDING_STABLE_ERR, "Corresponding super table not in this db")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_RANGE_OPTION, "Invalid option")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_STR_OPTION, "Invalid option")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_ENUM_OPTION, "Invalid option")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_KEEP_NUM, "Invalid number of keep options")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_KEEP_ORDER, "Invalid keep value, should be keep0 <= keep1 <= keep2")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_KEEP_VALUE, "Invalid option keep")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_COMMENT_OPTION, "Invalid option comment")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_F_RANGE_OPTION, "Invalid option")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_ROLLUP_OPTION, "Invalid option rollup: only one function is allowed")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_RETENTIONS_OPTION, "Invalid option retentions")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_DB_OPTION, "Invalid database option")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_TABLE_OPTION, "Invalid table option")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_GROUPBY_WINDOW_COEXIST, "GROUP BY and WINDOW-clause can't be used together")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_OPTION_UNIT, "Invalid option unit: only m, h, d allowed")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_KEEP_UNIT, "Invalid option keep unit: only m, h, d allowed")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_AGG_FUNC_NESTING, "Aggregate functions do not support nesting")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_STATE_WIN_TYPE, "Only support STATE_WINDOW on integer/bool/varchar column")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_STATE_WIN_COL, "Not support STATE_WINDOW on tag column")

View File

@ -52,7 +52,7 @@ fi
docker run \
-v $REP_MOUNT_PARAM \
--rm --ulimit core=-1 taos_test:v1.0 sh -c "cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true;make -j $THREAD_COUNT"
--rm --ulimit core=-1 taos_test:v1.0 sh -c "cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DWEBSOCKET=true;make -j $THREAD_COUNT"
ret=$?
exit $ret

View File

@ -1326,7 +1326,7 @@ class Task():
0x03A1, # STable [does] not exist
0x03AA, # Tag already exists
0x0603, # Table already exists
0x2602, # Table does not exist
0x2603, # Table does not exist
0x260d, # Tags number not matched

View File

@ -489,9 +489,12 @@ class TDDnode:
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
onlyKillOnceWindows = 0
while(processID):
killCmd = "kill -INT %s > /dev/null 2>&1" % processID
os.system(killCmd)
if not platform.system().lower() == 'windows' or (onlyKillOnceWindows == 0 and platform.system().lower() == 'windows'):
killCmd = "kill -INT %s > /dev/null 2>&1" % processID
os.system(killCmd)
onlyKillOnceWindows = 1
time.sleep(1)
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
@ -524,9 +527,12 @@ class TDDnode:
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
onlyKillOnceWindows = 0
while(processID):
killCmd = "kill -INT %s > /dev/null 2>&1" % processID
os.system(killCmd)
if not platform.system().lower() == 'windows' or (onlyKillOnceWindows == 0 and platform.system().lower() == 'windows'):
killCmd = "kill -INT %s > /dev/null 2>&1" % processID
os.system(killCmd)
onlyKillOnceWindows = 1
time.sleep(1)
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
@ -550,9 +556,12 @@ class TDDnode:
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
onlyKillOnceWindows = 0
while(processID):
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
os.system(killCmd)
if not platform.system().lower() == 'windows' or (onlyKillOnceWindows == 0 and platform.system().lower() == 'windows'):
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
os.system(killCmd)
onlyKillOnceWindows = 1
time.sleep(1)
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")

View File

@ -94,6 +94,7 @@ class TDSql:
except Exception as e:
i+=1
tdLog.notice("Try to query again, query times: %d "%i)
time.sleep(1)
pass
else:
try:
@ -293,6 +294,7 @@ class TDSql:
except Exception as e:
i+=1
tdLog.notice("Try to execute sql again, query times: %d "%i)
time.sleep(1)
pass
else:
try:

View File

@ -7,6 +7,7 @@ system sh/exec.sh -n dnode1 -s start -v
system sh/exec.sh -n dnode2 -s start -v
sql connect
print =============== step1: create alter drop show user
sql create user u1 pass 'taosdata'
sql show users

View File

@ -221,7 +221,7 @@ class TDTestCase:
tdSql.execute("use testdb")
# bug need fix
tdSql.query("select last_row(c1 ,NULL) from testdb.t1")
tdSql.error("select last_row(c1 ,NULL) from testdb.t1")
error_sql_lists = [
"select last_row from testdb.t1",

View File

@ -2,6 +2,7 @@ import taos
import sys
import datetime
import inspect
import random
from util.log import *
from util.sql import *
@ -32,15 +33,23 @@ class TDTestCase:
''')
for i in range(10):
tdSql.execute(f"create table rct{i} using readings (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}','H-{i}','v2.3')")
tdSql.execute(f"create table dct{i} using diagnostics (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}','H-{i}','v2.3')")
if i == 1 or i == 2 :
tdLog.debug(f"create table rct{i} using readings (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}', NULL,'v2.3')")
tdSql.execute(f"create table rct{i} using readings (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}', NULL,'v2.3')")
else :
tdSql.execute(f"create table rct{i} using readings (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}','H-{i}','v2.3')")
if i == 1 or i == 2 :
tdSql.execute(f"create table dct{i} using diagnostics (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}',NULL ,'v2.3')")
else:
tdSql.execute(f"create table dct{i} using diagnostics (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}','H-{i}','v2.3')")
for j in range(10):
for i in range(10):
for i in range(100):
tdSql.execute(
f"insert into rct{j} values ( {ts+i*10000}, {80+i}, {90+i}, {85+i}, {30+i*10}, {1.2*i}, {221+i*2}, {20+i*0.2}, {1500+i*20}, {150+i*2},{5+i} )"
f"insert into rct{j} values ( {ts+i*60000}, {80+i}, {90+i}, {85+i}, {30+i*10}, {1.2*i}, {221+i*2}, {20+i*0.2}, {1500+i*20}, {150+i*2},{5+i} )"
)
status= random.randint(0,1)
tdSql.execute(
f"insert into dct{j} values ( {ts+i*10000}, {1+i*0.1},{1400+i*15}, {i},{1500+i*20}, {150+i*2},{5+i} )"
f"insert into dct{j} values ( {ts+i*60000}, {1+i*0.1},{1400+i*15}, {status},{1500+i*20}, {150+i*2},{5+i} )"
)
tdSql.execute("insert into dct9 (ts,fuel_state) values('2021-07-13 14:06:33.123Z',1.2) ;")
# def check_avg(self ,origin_query , check_query):
@ -109,12 +118,12 @@ class TDTestCase:
tdSql.query("select _wstart as ts,fleet,name,driver,count(mv)/6 as hours_driven from ( select _wstart as ts,fleet,name,driver,avg(velocity) as mv from readings where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(10m)) where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(1d) ;")
# 6. avg-daily-driving-session
#taosc core dumped
tdSql.execute("create table random_measure2_1 (ts timestamp,ela float, name binary(40))")
tdSql.query("SELECT ts,diff(mv) AS difka FROM (SELECT ts,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name,ts interval(10m) fill(value,0)) GROUP BY name,ts;")
tdSql.query("select name,diff(mv) AS difka FROM (SELECT ts,name,mv FROM (SELECT _wstart as ts,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0))) group BY name ;")
tdSql.query("SELECT _wstart,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0)")
# # 6. avg-daily-driving-session
# #taosc core dumped
# tdSql.execute("create table random_measure2_1 (ts timestamp,ela float, name binary(40))")
# tdSql.query("SELECT ts,diff(mv) AS difka FROM (SELECT ts,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name,ts interval(10m) fill(value,0)) GROUP BY name,ts;")
# tdSql.query("select name,diff(mv) AS difka FROM (SELECT ts,name,mv FROM (SELECT _wstart as ts,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0))) group BY name ;")
# tdSql.query("SELECT _wstart,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0)")
# 7. avg-load
tdSql.query("SELECT fleet, model,avg(ml) AS mean_load_percentage FROM (SELECT fleet, model,current_load/load_capacity AS ml FROM diagnostics partition BY name, fleet, model) partition BY fleet, model order by fleet ;")
@ -123,6 +132,13 @@ class TDTestCase:
tdSql.query(" SELECT model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;")
tdSql.query("SELECT _wstart,model,fleet,count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1 partition by model, fleet interval(1d) ;")
# 9. breakdown-frequency
# NULL ---count(NULL)=0 expect count(NULL)= 100
tdSql.query("select tbname,count(model),model from readings partition by tbname,model;")
# model=NULL count(other) is 0
tdSql.query("select tbname,count(name),model from readings where model=NULL partition by tbname,model;")
tdSql.query(" SELECT model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where state_changed =1 partition BY model,state_changed ;")
#it's already supported:
# last-loc
tdSql.query("SELECT last_row(ts),latitude,longitude,name,driver FROM readings WHERE fleet='South' and name IS NOT NULL partition BY name,driver order by name ;")

View File

@ -124,9 +124,10 @@ class TDTestCase:
tdDnodes[0].stoptaosd()
try:
cluster.checkConnectStatus(2)
tdLog.exit(" The election still succeeds when leader of both mnodes are killed ")
tdLog.notice(" The election still succeeds when leader of both mnodes are killed ")
except Exception:
pass
tdSql.error("create user user1 pass '123';")
tdLog.info("start leader")
tdDnodes[0].starttaosd()
if clusterComCheck.checkMnodeStatus(2) :

View File

@ -160,7 +160,7 @@ class TDTestCase:
stableName= '%s_%d'%(paraDict['stbName'],i)
newTdSql=tdCom.newTdSql()
clusterComCreate.create_ctable(newTdSql, paraDict["dbName"],stableName,stableName, paraDict['ctbNum'])
#insert date
#insert data
for i in range(paraDict['stbNumbers']):
stableName= '%s_%d'%(paraDict['stbName'],i)
newTdSql=tdCom.newTdSql()
@ -170,7 +170,8 @@ class TDTestCase:
dnode6Port=int(6030+5*100)
tdSql.execute("create dnode '%s:%d'"%(hostname,dnode6Port))
clusterComCheck.checkDnodes(dnodeNumbers)
for tr in threads:
tr.join()
while stopcount < restartNumbers:
tdLog.info(" restart loop: %d"%stopcount )
if stopRole == "mnode":
@ -202,8 +203,7 @@ class TDTestCase:
tdLog.exit("one or more of dnodes failed to start ")
# self.check3mnode()
stopcount+=1
for tr in threads:
tr.join()
clusterComCheck.checkDnodes(dnodeNumbers)

View File

@ -68,10 +68,10 @@ class TDTestCase:
def fiveDnodeThreeMnode(self,dnodeNumbers,mnodeNums,restartNumbers,stopRole):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'db',
'dbNumbers': 6,
'dbNumbers': 10,
'dropFlag': 1,
'event': '',
'vgroups': 4,
'vgroups': 2,
'replica': 1,
'stbName': 'stb',
'stbNumbers': 100,
@ -170,7 +170,7 @@ class TDTestCase:
def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=2,stopRole='mnode')
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=10,stopRole='mnode')
def stop(self):
tdSql.close()

View File

@ -124,37 +124,37 @@ class TDTestCase:
for tr in threads:
tr.start()
# tdLog.info("Take turns stopping Mnodes ")
# while stopcount < restartNumbers:
# tdLog.info(" restart loop: %d"%stopcount )
# if stopRole == "mnode":
# for i in range(mnodeNums):
# tdDnodes[i].stoptaosd()
# # sleep(10)
# tdDnodes[i].starttaosd()
# # sleep(10)
# elif stopRole == "vnode":
# for i in range(vnodeNumbers):
# tdDnodes[i+mnodeNums].stoptaosd()
# # sleep(10)
# tdDnodes[i+mnodeNums].starttaosd()
# # sleep(10)
# elif stopRole == "dnode":
# for i in range(dnodeNumbers):
# tdDnodes[i].stoptaosd()
# # sleep(10)
# tdDnodes[i].starttaosd()
# # sleep(10)
tdLog.info("Take turns stopping Mnodes ")
while stopcount < restartNumbers:
tdLog.info(" restart loop: %d"%stopcount )
if stopRole == "mnode":
for i in range(mnodeNums):
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
# sleep(10)
elif stopRole == "vnode":
for i in range(vnodeNumbers):
tdDnodes[i+mnodeNums].stoptaosd()
# sleep(10)
tdDnodes[i+mnodeNums].starttaosd()
# sleep(10)
elif stopRole == "dnode":
for i in range(dnodeNumbers):
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
# sleep(10)
# # dnodeNumbers don't include database of schema
# if clusterComCheck.checkDnodes(dnodeNumbers):
# tdLog.info("check dnodes status is ready")
# else:
# tdLog.info("check dnodes status is not ready")
# self.stopThread(threads)
# tdLog.exit("one or more of dnodes failed to start ")
# # self.check3mnode()
# stopcount+=1
# dnodeNumbers don't include database of schema
if clusterComCheck.checkDnodes(dnodeNumbers):
tdLog.info("check dnodes status is ready")
else:
tdLog.info("check dnodes status is not ready")
self.stopThread(threads)
tdLog.exit("one or more of dnodes failed to start ")
# self.check3mnode()
stopcount+=1
for tr in threads:
tr.join()

View File

@ -0,0 +1,270 @@
import taos
import sys
import time
import socket
import os
import threading
from enum import Enum
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
sys.path.append("./7-tmq")
from tmqCommon import *
class TDTestCase:
def __init__(self):
self.snapshot = 0
self.vgroups = 4
self.ctbNum = 1
self.rowsPerTbl = 10000
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
def prepareTestEnv(self):
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 1,
'rowsPerTbl': 10000,
'batchNum': 2000,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
tdLog.info("create ctb")
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert data")
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx",
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdSql.query("flush database %s"%(paraDict['dbName']))
return
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 1,
'rowsPerTbl': 10000,
'batchNum': 2000,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 5,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
paraDict['snapshot'] = self.snapshot
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
# update to half tables
# paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2)
# tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx",
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_UpperCase_stb1'
# queryString = "select ts, c1, c2 from %s.%s where t4 == 'shanghai' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName'])
queryString = "select ts, c1, c2, t4 from %s.%s where t4 == 'shanghai' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
# paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
consumerId = 0
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 1)
topicList = topicFromStb1
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1,\
enable.auto.commit:true,\
auto.commit.interval.ms:1000,\
auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("insert process end, and start to check consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
tdLog.info("run select sql from db")
tdSql.query(queryString)
totalRowsFromQuery = tdSql.getRows()
tdLog.info("act consume rows: %d, act query rows: %d"%(totalConsumeRows, totalRowsFromQuery))
if totalConsumeRows != totalRowsFromQuery:
tdLog.exit("tmq consume rows error!")
tmqCom.checkFileContent(consumerId, queryString)
tdSql.query("drop topic %s"%topicFromStb1)
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self):
tdLog.printNoPrefix("======== test case 2: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 1,
'rowsPerTbl': 10000,
'batchNum': 2000,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 5,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
paraDict['snapshot'] = self.snapshot
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
# tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdSql.query("flush database %s"%(paraDict['dbName']))
# update to half tables
# paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl / 2)
# paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2)
# tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict["ctbPrefix"],
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tmqCom.initConsumerTable()
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_UpperCase_stb1'
queryString = "select ts, c1, c2 from %s.%s where t4 == 'shanghai' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName'])
# queryString = "select ts, c1, c2, t4 from %s.%s where t4 == 'shanghai' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
# paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
consumerId = 1
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 + 1/2))
topicList = topicFromStb1
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1,\
enable.auto.commit:true,\
auto.commit.interval.ms:1000,\
auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl / 2)
paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2)
tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert process end, and start to check consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
tdSql.query(queryString)
totalRowsFromQuery = tdSql.getRows()
tdLog.info("act consume rows: %d, act query rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsFromQuery, expectrowcnt))
if self.snapshot == 0:
if totalConsumeRows != expectrowcnt:
tdLog.exit("tmq consume rows error!")
elif self.snapshot == 1:
if totalConsumeRows != totalRowsFromQuery:
tdLog.exit("tmq consume rows error!")
# tmqCom.checkFileContent(consumerId, queryString)
tdSql.query("drop topic %s"%topicFromStb1)
tdLog.printNoPrefix("======== test case 2 end ...... ")
def run(self):
tdSql.prepare()
self.prepareTestEnv()
tdLog.printNoPrefix("=============================================")
tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
self.tmqCase1()
self.tmqCase2()
self.prepareTestEnv()
tdLog.printNoPrefix("====================================================================")
tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
self.snapshot = 1
self.tmqCase1()
self.tmqCase2()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,271 @@
import taos
import sys
import time
import socket
import os
import threading
from enum import Enum
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
sys.path.append("./7-tmq")
from tmqCommon import *
class TDTestCase:
def __init__(self):
self.snapshot = 0
self.vgroups = 4
self.ctbNum = 100
self.rowsPerTbl = 1000
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
def prepareTestEnv(self):
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 100,
'rowsPerTbl': 1000,
'batchNum': 200,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
tdLog.info("create ctb")
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert data")
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx",
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdSql.query("flush database %s"%(paraDict['dbName']))
return
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 100,
'rowsPerTbl': 1000,
'batchNum': 200,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 5,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
paraDict['snapshot'] = self.snapshot
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
# update to half tables
# paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2)
# tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx",
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_UpperCase_stb1'
queryString = "select ts, c1, c2 from %s.%s where t4 == 'beijing' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName'])
# queryString = "select ts, c1, c2, t4 from %s.%s where t4 == 'beijing' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
# paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
consumerId = 0
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 1)
topicList = topicFromStb1
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1,\
enable.auto.commit:true,\
auto.commit.interval.ms:1000,\
auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("insert process end, and start to check consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
tdLog.info("run select sql from db")
tdSql.query(queryString)
totalRowsFromQuery = tdSql.getRows()
tdLog.info("act consume rows: %d, act query rows: %d"%(totalConsumeRows, totalRowsFromQuery))
if totalConsumeRows != totalRowsFromQuery:
tdLog.exit("tmq consume rows error!")
# tmqCom.checkFileContent(consumerId, queryString)
tdSql.query("drop topic %s"%topicFromStb1)
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self):
tdLog.printNoPrefix("======== test case 2: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 100,
'rowsPerTbl': 1000,
'batchNum': 200,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 10,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
paraDict['snapshot'] = self.snapshot
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
# tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdSql.query("flush database %s"%(paraDict['dbName']))
# update to half tables
# paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl / 2)
# paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2)
# tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict["ctbPrefix"],
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tmqCom.initConsumerTable()
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_UpperCase_stb1'
# queryString = "select ts, c1, c2 from %s.%s where t4 == 'beijing' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName'])
queryString = "select ts, c1, c2, t4 from %s.%s where t4 == 'beijing' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
# paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
consumerId = 1
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2))
topicList = topicFromStb1
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1,\
enable.auto.commit:true,\
auto.commit.interval.ms:1000,\
auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl * 7/10)
paraDict['ctbStartIdx'] = int(paraDict['ctbNum'] * 7/10)
# paraDict["rowsPerTbl"] = 100
paraDict["ctbPrefix"] = 'newCtbx'
tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert process end, and start to check consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
tdSql.query(queryString)
totalRowsFromQuery = tdSql.getRows()
tdLog.info("act consume rows: %d, act query rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsFromQuery, expectrowcnt))
if self.snapshot == 0:
if totalConsumeRows != expectrowcnt / 2:
tdLog.exit("tmq consume rows error!")
elif self.snapshot == 1:
if totalConsumeRows != totalRowsFromQuery:
tdLog.exit("tmq consume rows error when snapshot is 1!")
# tmqCom.checkFileContent(consumerId, queryString)
tdSql.query("drop topic %s"%topicFromStb1)
tdLog.printNoPrefix("======== test case 2 end ...... ")
def run(self):
tdSql.prepare()
self.prepareTestEnv()
tdLog.printNoPrefix("=============================================")
tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
self.tmqCase1()
self.tmqCase2()
self.prepareTestEnv()
tdLog.printNoPrefix("====================================================================")
tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
self.snapshot = 1
self.tmqCase1()
self.tmqCase2()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -88,7 +88,7 @@ class TDTestCase:
tmqCom.startTmqSimProcess(self.pollDelay,self.paraDict["dbName"],self.showMsg, self.showRow,self.cdbName)
tdLog.info("After waiting for a period of time, drop one stable")
time.sleep(10)
time.sleep(3)
tdSql.execute("drop table %s.%s" %(self.paraDict['dbName'], self.paraDict['stbName']))
tdLog.info("wait result from consumer, then check it")

View File

@ -360,7 +360,7 @@ class TMQCom:
return pThread
def insert_data_with_autoCreateTbl(self,tsql,dbName,stbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs=0,ctbStartIdx=0):
tdLog.debug("start to insert data wiht auto create child table ............")
tdLog.debug("start to insert data with auto create child table ............")
tsql.execute("use %s" %dbName)
pre_insert = "insert into "
sql = pre_insert

View File

@ -0,0 +1,372 @@
from distutils.log import error
import taos
import sys
import time
import socket
import os
import threading
import subprocess
import platform
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
from util.common import *
sys.path.append("./7-tmq")
from tmqCommon import *
class TDTestCase:
def __init__(self):
self.snapshot = 0
self.vgroups = 4
self.ctbNum = 100
self.rowsPerTbl = 1000
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
def prepare_udf_so(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
print(projPath)
if platform.system().lower() == 'windows':
self.libudf1 = subprocess.Popen('(for /r %s %%i in ("udf1.d*") do @echo %%i)|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
if (not tdDnodes.dnodes[0].remoteIP == ""):
tdDnodes.dnodes[0].remote_conn.get(tdDnodes.dnodes[0].config["path"]+'/debug/build/lib/libudf1.so',projPath+"\\debug\\build\\lib\\")
self.libudf1 = self.libudf1.replace('udf1.dll','libudf1.so')
else:
self.libudf1 = subprocess.Popen('find %s -name "libudf1.so"|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
self.libudf1 = self.libudf1.replace('\r','').replace('\n','')
return
def create_udf_function(self):
# create scalar functions
tdSql.execute("create function udf1 as '%s' outputtype int bufSize 8;"%self.libudf1)
functions = tdSql.getResult("show functions")
function_nums = len(functions)
if function_nums == 1:
tdLog.info("create one udf functions success ")
else:
tdLog.exit("create udf functions fail")
return
def checkFileContent(self, consumerId, queryString):
buildPath = tdCom.getBuildPath()
cfgPath = tdCom.getClientCfgPath()
dstFile = '%s/../log/dstrows_%d.txt'%(cfgPath, consumerId)
cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile)
tdLog.info(cmdStr)
os.system(cmdStr)
consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId)
tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile))
consumeFile = open(consumeRowsFile, mode='r')
queryFile = open(dstFile, mode='r')
# skip first line for it is schema
queryFile.readline()
while True:
dst = queryFile.readline()
src = consumeFile.readline()
if dst:
if dst != src:
tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId)
else:
break
return
def prepareTestEnv(self):
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 100,
'rowsPerTbl': 1000,
'batchNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
tdLog.info("create ctb")
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert data")
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx",
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdSql.query("flush database %s"%(paraDict['dbName']))
return
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: multi sub table")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 100,
'rowsPerTbl': 1000,
'batchNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
paraDict['snapshot'] = self.snapshot
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1', 'topic2']
expectRowsList = []
tmqCom.initConsumerTable()
# tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1)
# tdLog.info("create stb")
# tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema'])
# tdLog.info("create ctb")
# tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix'])
# tdLog.info("insert data")
# tmqCom.insert_data_1(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])
tdLog.info("create topics from stb with filter")
queryString = "select ts,c1,udf1(c1),c2,udf1(c2) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 0
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"]
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("0 tmq consume rows error!")
# self.checkFileContent(consumerId, queryString)
# tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId))
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
queryString = "select ts, c1,udf1(c1),sin(udf1(c2)), log(udf1(c2)) from %s.%s where udf1(c1) == 88 or sin(udf1(c1)) > 0" %(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[1], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
consumerId = 1
topicList = topicNameList[1]
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[1] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[1], resultList[0]))
tdLog.exit("1 tmq consume rows error!")
# self.checkFileContent(consumerId, queryString)
# tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId))
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self):
tdLog.printNoPrefix("======== test case 2: multi sub table, consume with auto create tble and insert data")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 100,
'rowsPerTbl': 1000,
'batchNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
paraDict['snapshot'] = self.snapshot
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1', 'topic2']
expectRowsList = []
tmqCom.initConsumerTable()
# tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1)
# tdLog.info("create stb")
# tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema'])
# tdLog.info("create ctb")
# tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix'])
# tdLog.info("insert data")
# tmqCom.insert_data_1(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])
tdLog.info("create topics from stb with filter")
queryString = "select ts,c1,udf1(c1),c2,udf1(c2) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
# tdSql.query(queryString)
# expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 2
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot'])
paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl)
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("2 tmq consume rows error!")
# self.checkFileContent(consumerId, queryString)
# tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId))
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
queryString = "select ts, c1,udf1(c1),sin(udf1(c2)), log(udf1(c2)) from %s.%s where udf1(c1) == 88 or sin(udf1(c1)) > 0" %(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[1], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
consumerId = 3
topicList = topicNameList[1]
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[1] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[1], resultList[0]))
tdLog.exit("3 tmq consume rows error!")
# self.checkFileContent(consumerId, queryString)
# tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId))
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 2 end ...... ")
def run(self):
# tdSql.prepare()
self.prepare_udf_so()
self.create_udf_function()
tdLog.printNoPrefix("=============================================")
tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
self.prepareTestEnv()
self.tmqCase1()
self.tmqCase2()
# tdLog.printNoPrefix("====================================================================")
# tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
# self.prepareTestEnv()
# self.snapshot = 1
# self.tmqCase1()
# self.tmqCase2()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,372 @@
from distutils.log import error
import taos
import sys
import time
import socket
import os
import threading
import subprocess
import platform
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
from util.common import *
sys.path.append("./7-tmq")
from tmqCommon import *
class TDTestCase:
def __init__(self):
self.snapshot = 0
self.vgroups = 4
self.ctbNum = 100
self.rowsPerTbl = 1000
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
def prepare_udf_so(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
print(projPath)
if platform.system().lower() == 'windows':
self.libudf1 = subprocess.Popen('(for /r %s %%i in ("udf1.d*") do @echo %%i)|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
if (not tdDnodes.dnodes[0].remoteIP == ""):
tdDnodes.dnodes[0].remote_conn.get(tdDnodes.dnodes[0].config["path"]+'/debug/build/lib/libudf1.so',projPath+"\\debug\\build\\lib\\")
self.libudf1 = self.libudf1.replace('udf1.dll','libudf1.so')
else:
self.libudf1 = subprocess.Popen('find %s -name "libudf1.so"|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
self.libudf1 = self.libudf1.replace('\r','').replace('\n','')
return
def create_udf_function(self):
# create scalar functions
tdSql.execute("create function udf1 as '%s' outputtype int bufSize 8;"%self.libudf1)
functions = tdSql.getResult("show functions")
function_nums = len(functions)
if function_nums == 1:
tdLog.info("create one udf functions success ")
else:
tdLog.exit("create udf functions fail")
return
def checkFileContent(self, consumerId, queryString):
buildPath = tdCom.getBuildPath()
cfgPath = tdCom.getClientCfgPath()
dstFile = '%s/../log/dstrows_%d.txt'%(cfgPath, consumerId)
cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile)
tdLog.info(cmdStr)
os.system(cmdStr)
consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId)
tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile))
consumeFile = open(consumeRowsFile, mode='r')
queryFile = open(dstFile, mode='r')
# skip first line for it is schema
queryFile.readline()
while True:
dst = queryFile.readline()
src = consumeFile.readline()
if dst:
if dst != src:
tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId)
else:
break
return
def prepareTestEnv(self):
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 100,
'rowsPerTbl': 1000,
'batchNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
tdLog.info("create ctb")
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert data")
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx",
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdSql.query("flush database %s"%(paraDict['dbName']))
return
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: multi sub table")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 100,
'rowsPerTbl': 1000,
'batchNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
paraDict['snapshot'] = self.snapshot
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1', 'topic2']
expectRowsList = []
tmqCom.initConsumerTable()
# tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1)
# tdLog.info("create stb")
# tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema'])
# tdLog.info("create ctb")
# tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix'])
# tdLog.info("insert data")
# tmqCom.insert_data_1(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])
tdLog.info("create topics from stb with filter")
queryString = "select ts,c1,udf1(c1),c2,udf1(c2) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 0
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"]
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("0 tmq consume rows error!")
# self.checkFileContent(consumerId, queryString)
# tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId))
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
queryString = "select ts, c1,udf1(c1),sin(udf1(c2)), log(udf1(c2)) from %s.%s where udf1(c1) == 88 or sin(udf1(c1)) > 0" %(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[1], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
consumerId = 1
topicList = topicNameList[1]
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[1] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[1], resultList[0]))
tdLog.exit("1 tmq consume rows error!")
# self.checkFileContent(consumerId, queryString)
# tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId))
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self):
tdLog.printNoPrefix("======== test case 2: multi sub table, consume with auto create tble and insert data")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 100,
'rowsPerTbl': 1000,
'batchNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
paraDict['snapshot'] = self.snapshot
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1', 'topic2']
expectRowsList = []
tmqCom.initConsumerTable()
# tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1)
# tdLog.info("create stb")
# tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema'])
# tdLog.info("create ctb")
# tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix'])
# tdLog.info("insert data")
# tmqCom.insert_data_1(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])
tdLog.info("create topics from stb with filter")
queryString = "select ts,c1,udf1(c1),c2,udf1(c2) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
# tdSql.query(queryString)
# expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 2
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot'])
paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl)
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("2 tmq consume rows error!")
# self.checkFileContent(consumerId, queryString)
# tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId))
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
queryString = "select ts, c1,udf1(c1),sin(udf1(c2)), log(udf1(c2)) from %s.%s where udf1(c1) == 88 or sin(udf1(c1)) > 0" %(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[1], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
consumerId = 3
topicList = topicNameList[1]
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[1] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[1], resultList[0]))
tdLog.exit("3 tmq consume rows error!")
# self.checkFileContent(consumerId, queryString)
# tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId))
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 2 end ...... ")
def run(self):
# tdSql.prepare()
self.prepare_udf_so()
self.create_udf_function()
# tdLog.printNoPrefix("=============================================")
# tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
# self.prepareTestEnv()
# self.tmqCase1()
# self.tmqCase2()
tdLog.printNoPrefix("====================================================================")
tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
self.prepareTestEnv()
self.snapshot = 1
self.tmqCase1()
self.tmqCase2()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,372 @@
from distutils.log import error
import taos
import sys
import time
import socket
import os
import threading
import subprocess
import platform
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
from util.common import *
sys.path.append("./7-tmq")
from tmqCommon import *
class TDTestCase:
def __init__(self):
self.snapshot = 0
self.vgroups = 4
self.ctbNum = 100
self.rowsPerTbl = 1000
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
def prepare_udf_so(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
print(projPath)
if platform.system().lower() == 'windows':
self.libudf1 = subprocess.Popen('(for /r %s %%i in ("udf1.d*") do @echo %%i)|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
if (not tdDnodes.dnodes[0].remoteIP == ""):
tdDnodes.dnodes[0].remote_conn.get(tdDnodes.dnodes[0].config["path"]+'/debug/build/lib/libudf1.so',projPath+"\\debug\\build\\lib\\")
self.libudf1 = self.libudf1.replace('udf1.dll','libudf1.so')
else:
self.libudf1 = subprocess.Popen('find %s -name "libudf1.so"|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
self.libudf1 = self.libudf1.replace('\r','').replace('\n','')
return
def create_udf_function(self):
# create scalar functions
tdSql.execute("create function udf1 as '%s' outputtype int bufSize 8;"%self.libudf1)
functions = tdSql.getResult("show functions")
function_nums = len(functions)
if function_nums == 1:
tdLog.info("create one udf functions success ")
else:
tdLog.exit("create udf functions fail")
return
def checkFileContent(self, consumerId, queryString):
buildPath = tdCom.getBuildPath()
cfgPath = tdCom.getClientCfgPath()
dstFile = '%s/../log/dstrows_%d.txt'%(cfgPath, consumerId)
cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile)
tdLog.info(cmdStr)
os.system(cmdStr)
consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId)
tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile))
consumeFile = open(consumeRowsFile, mode='r')
queryFile = open(dstFile, mode='r')
# skip first line for it is schema
queryFile.readline()
while True:
dst = queryFile.readline()
src = consumeFile.readline()
if dst:
if dst != src:
tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId)
else:
break
return
def prepareTestEnv(self):
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 100,
'rowsPerTbl': 1000,
'batchNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
tdLog.info("create ctb")
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert data")
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx",
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdSql.query("flush database %s"%(paraDict['dbName']))
return
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: multi sub table")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 100,
'rowsPerTbl': 1000,
'batchNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
paraDict['snapshot'] = self.snapshot
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1', 'topic2']
expectRowsList = []
tmqCom.initConsumerTable()
# tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1)
# tdLog.info("create stb")
# tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema'])
# tdLog.info("create ctb")
# tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix'])
# tdLog.info("insert data")
# tmqCom.insert_data_1(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])
tdLog.info("create topics from stb with filter")
queryString = "select ts,c1,udf1(c1),c2,udf1(c2) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 0
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"]
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("0 tmq consume rows error!")
# self.checkFileContent(consumerId, queryString)
# tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId))
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
queryString = "select ts, c1,udf1(c1),sin(udf1(c2)), log(udf1(c2)) from %s.%s where udf1(c1) == 88 or sin(udf1(c1)) > 0" %(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[1], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
consumerId = 1
topicList = topicNameList[1]
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[1] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[1], resultList[0]))
tdLog.exit("1 tmq consume rows error!")
# self.checkFileContent(consumerId, queryString)
# tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId))
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self):
tdLog.printNoPrefix("======== test case 2: multi sub table, consume with auto create tble and insert data")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 100,
'rowsPerTbl': 1000,
'batchNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
paraDict['snapshot'] = self.snapshot
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1', 'topic2']
expectRowsList = []
tmqCom.initConsumerTable()
# tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1)
# tdLog.info("create stb")
# tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema'])
# tdLog.info("create ctb")
# tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix'])
# tdLog.info("insert data")
# tmqCom.insert_data_1(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])
tdLog.info("create topics from stb with filter")
queryString = "select ts,c1,udf1(c1),c2,udf1(c2) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
# tdSql.query(queryString)
# expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 2
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot'])
paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl)
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("2 tmq consume rows error!")
# self.checkFileContent(consumerId, queryString)
# tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId))
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
queryString = "select ts, c1,udf1(c1),sin(udf1(c2)), log(udf1(c2)) from %s.%s where udf1(c1) == 88 or sin(udf1(c1)) > 0" %(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[1], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
consumerId = 3
topicList = topicNameList[1]
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[1] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[1], resultList[0]))
tdLog.exit("3 tmq consume rows error!")
# self.checkFileContent(consumerId, queryString)
# tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId))
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 2 end ...... ")
def run(self):
# tdSql.prepare()
self.prepare_udf_so()
self.create_udf_function()
tdLog.printNoPrefix("=============================================")
tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
self.prepareTestEnv()
self.tmqCase1()
self.tmqCase2()
tdLog.printNoPrefix("====================================================================")
tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
self.prepareTestEnv()
self.snapshot = 1
self.tmqCase1()
self.tmqCase2()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -17,6 +17,12 @@ sys.path.append("./7-tmq")
from tmqCommon import *
class TDTestCase:
def __init__(self):
self.snapshot = 0
self.vgroups = 4
self.ctbNum = 1
self.rowsPerTbl = 1000
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
@ -81,36 +87,87 @@ class TDTestCase:
break
return
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'db1',
def prepareTestEnv(self):
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':2}, {'type': 'binary', 'len':20, 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 1,
'rowsPerTbl': 1000,
'batchNum': 10,
'batchNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 10,
'pollDelay': 3,
'showMsg': 1,
'showRow': 1}
'showRow': 1,
'snapshot': 0}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
tdLog.info("create ctb")
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert data")
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx",
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdSql.query("flush database %s"%(paraDict['dbName']))
return
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: one sub table")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 1,
'rowsPerTbl': 1000,
'batchNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
paraDict['snapshot'] = self.snapshot
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1', 'topic2']
expectRowsList = []
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1)
tdLog.info("create stb")
tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema'])
tdLog.info("create ctb")
tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix'])
tdLog.info("insert data")
tmqCom.insert_data_1(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])
# tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1)
# tdLog.info("create stb")
# tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema'])
# tdLog.info("create ctb")
# tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix'])
# tdLog.info("insert data")
# tmqCom.insert_data_1(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])
tdLog.info("create topics from stb with filter")
queryString = "select ts,c1,udf1(c1),c2,udf1(c2) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName'])
@ -131,7 +188,7 @@ class TDTestCase:
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'])
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
@ -160,7 +217,7 @@ class TDTestCase:
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'])
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
@ -178,11 +235,133 @@ class TDTestCase:
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self):
tdLog.printNoPrefix("======== test case 2: one sub table, consume with auto create tble and insert data")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 1,
'rowsPerTbl': 1000,
'batchNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
paraDict['snapshot'] = self.snapshot
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1', 'topic2']
expectRowsList = []
tmqCom.initConsumerTable()
# tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1)
# tdLog.info("create stb")
# tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema'])
# tdLog.info("create ctb")
# tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix'])
# tdLog.info("insert data")
# tmqCom.insert_data_1(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])
tdLog.info("create topics from stb with filter")
queryString = "select ts,c1,udf1(c1),c2,udf1(c2) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
# tdSql.query(queryString)
# expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 2
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot'])
paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl)
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("2 tmq consume rows error!")
self.checkFileContent(consumerId, queryString)
tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId))
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
queryString = "select ts, c1,udf1(c1),sin(udf1(c2)), log(udf1(c2)) from %s.%s where udf1(c1) == 88 or sin(udf1(c1)) > 0" %(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[1], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
consumerId = 3
topicList = topicNameList[1]
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[1] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[1], resultList[0]))
tdLog.exit("3 tmq consume rows error!")
self.checkFileContent(consumerId, queryString)
tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId))
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 2 end ...... ")
def run(self):
tdSql.prepare()
# tdSql.prepare()
self.prepare_udf_so()
self.create_udf_function()
tdLog.printNoPrefix("=============================================")
tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
self.prepareTestEnv()
self.tmqCase1()
self.tmqCase2()
tdLog.printNoPrefix("====================================================================")
tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
self.prepareTestEnv()
self.snapshot = 1
self.tmqCase1()
self.tmqCase2()
def stop(self):
tdSql.close()

View File

@ -0,0 +1,275 @@
import taos
import sys
import time
import socket
import os
import threading
from enum import Enum
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
sys.path.append("./7-tmq")
from tmqCommon import *
class TDTestCase:
def __init__(self):
self.snapshot = 0
self.vgroups = 4
self.ctbNum = 50
self.rowsPerTbl = 1000
self.autoCtbPrefix = 'aCtb'
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
def prepareTestEnv(self):
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 100,
'rowsPerTbl': 1000,
'batchNum': 1000,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
tdLog.info("create ctb")
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert data")
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=self.autoCtbPrefix,
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdSql.query("flush database %s"%(paraDict['dbName']))
return
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 100,
'rowsPerTbl': 1000,
'batchNum': 1000,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 5,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
paraDict['snapshot'] = self.snapshot
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
# update to half tables
paraDict['ctbNum'] = int(self.ctbNum/2)
paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2)
tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=self.autoCtbPrefix,
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
consumerId = 0
if self.snapshot == 0:
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2 + 1/2*1/2*2))
elif self.snapshot == 1:
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2))
topicList = topicFromStb1
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1,\
enable.auto.commit:true,\
auto.commit.interval.ms:1000,\
auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("start to check consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
tdSql.query(queryString)
totalRowsInserted = tdSql.getRows()
tdLog.info("act consume rows: %d, expect consume rows: %d, act insert rows: %d"%(totalConsumeRows, expectrowcnt, totalRowsInserted))
if totalConsumeRows != expectrowcnt:
tdLog.exit("tmq consume rows error!")
# tmqCom.checkFileContent(consumerId, queryString)
tdSql.query("drop topic %s"%topicFromStb1)
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self):
tdLog.printNoPrefix("======== test case 2: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 100,
'rowsPerTbl': 1000,
'batchNum': 1000,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 5,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
paraDict['snapshot'] = self.snapshot
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tdLog.info("restart taosd to ensure that the data falls into the disk")
tdSql.query("flush database %s"%(paraDict['dbName']))
# update to half tables
paraDict['ctbNum'] = int(self.ctbNum/2)
paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2)
paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl / 2)
tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=self.autoCtbPrefix,
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']+int(self.ctbNum/2))
tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="aCtby",
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']+int(self.ctbNum/2))
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']+int(self.ctbNum/2))
tmqCom.initConsumerTable()
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
# paraDict['ctbNum'] = self.ctbNum
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
consumerId = 1
if self.snapshot == 0:
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2 + 1/2*1/2*2 + 1/2*1/2))
elif self.snapshot == 1:
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2 + 1/2*1/2))
topicList = topicFromStb1
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1,\
enable.auto.commit:true,\
auto.commit.interval.ms:1000,\
auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("start to check consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
tdSql.query(queryString)
totalRowsInserted = tdSql.getRows()
tdLog.info("act consume rows: %d, act insert rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsInserted, expectrowcnt))
if totalConsumeRows != expectrowcnt:
tdLog.exit("tmq consume rows error!")
# tmqCom.checkFileContent(consumerId, queryString)
tdSql.query("drop topic %s"%topicFromStb1)
tdLog.printNoPrefix("======== test case 2 end ...... ")
def run(self):
# tdSql.prepare()
self.prepareTestEnv()
tdLog.printNoPrefix("=============================================")
tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
self.tmqCase1()
self.tmqCase2()
# self.prepareTestEnv()
# tdLog.printNoPrefix("====================================================================")
# tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
# self.snapshot = 1
# self.tmqCase1()
# self.tmqCase2()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,275 @@
import taos
import sys
import time
import socket
import os
import threading
from enum import Enum
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
sys.path.append("./7-tmq")
from tmqCommon import *
class TDTestCase:
def __init__(self):
self.snapshot = 0
self.vgroups = 4
self.ctbNum = 50
self.rowsPerTbl = 1000
self.autoCtbPrefix = 'aCtb'
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
def prepareTestEnv(self):
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 100,
'rowsPerTbl': 1000,
'batchNum': 1000,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
tdLog.info("create ctb")
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert data")
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=self.autoCtbPrefix,
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdSql.query("flush database %s"%(paraDict['dbName']))
return
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 100,
'rowsPerTbl': 1000,
'batchNum': 1000,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 5,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
paraDict['snapshot'] = self.snapshot
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
# update to half tables
paraDict['ctbNum'] = int(self.ctbNum/2)
paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2)
tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=self.autoCtbPrefix,
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
consumerId = 0
if self.snapshot == 0:
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2 + 1/2*1/2*2))
elif self.snapshot == 1:
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2))
topicList = topicFromStb1
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1,\
enable.auto.commit:true,\
auto.commit.interval.ms:1000,\
auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("start to check consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
tdSql.query(queryString)
totalRowsInserted = tdSql.getRows()
tdLog.info("act consume rows: %d, expect consume rows: %d, act insert rows: %d"%(totalConsumeRows, expectrowcnt, totalRowsInserted))
if totalConsumeRows != expectrowcnt:
tdLog.exit("tmq consume rows error!")
# tmqCom.checkFileContent(consumerId, queryString)
tdSql.query("drop topic %s"%topicFromStb1)
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self):
tdLog.printNoPrefix("======== test case 2: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 100,
'rowsPerTbl': 1000,
'batchNum': 1000,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 5,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
paraDict['snapshot'] = self.snapshot
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tdLog.info("restart taosd to ensure that the data falls into the disk")
tdSql.query("flush database %s"%(paraDict['dbName']))
# update to half tables
paraDict['ctbNum'] = int(self.ctbNum/2)
paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2)
paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl / 2)
tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=self.autoCtbPrefix,
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']+int(self.ctbNum/2))
tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="aCtby",
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']+int(self.ctbNum/2))
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']+int(self.ctbNum/2))
tmqCom.initConsumerTable()
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
# paraDict['ctbNum'] = self.ctbNum
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
consumerId = 1
if self.snapshot == 0:
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2 + 1/2*1/2*2 + 1/2*1/2))
elif self.snapshot == 1:
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2 + 1/2*1/2))
topicList = topicFromStb1
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1,\
enable.auto.commit:true,\
auto.commit.interval.ms:1000,\
auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("start to check consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
tdSql.query(queryString)
totalRowsInserted = tdSql.getRows()
tdLog.info("act consume rows: %d, act insert rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsInserted, expectrowcnt))
if totalConsumeRows != expectrowcnt:
tdLog.exit("tmq consume rows error!")
# tmqCom.checkFileContent(consumerId, queryString)
tdSql.query("drop topic %s"%topicFromStb1)
tdLog.printNoPrefix("======== test case 2 end ...... ")
def run(self):
# tdSql.prepare()
# self.prepareTestEnv()
# tdLog.printNoPrefix("=============================================")
# tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
# self.tmqCase1()
# self.tmqCase2()
self.prepareTestEnv()
tdLog.printNoPrefix("====================================================================")
tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
self.snapshot = 1
self.tmqCase1()
self.tmqCase2()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -18,7 +18,7 @@ class TDTestCase:
def __init__(self):
self.snapshot = 0
self.vgroups = 4
self.ctbNum = 100
self.ctbNum = 50
self.rowsPerTbl = 1000
self.autoCtbPrefix = 'aCtb'
@ -39,9 +39,9 @@ class TDTestCase:
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 1000,
'ctbNum': 100,
'rowsPerTbl': 1000,
'batchNum': 10000,
'batchNum': 1000,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
@ -71,7 +71,6 @@ class TDTestCase:
# tdSql.query("flush database %s"%(paraDict['dbName']))
return
# 自动建表完成数据插入,启动消费
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'dbt',
@ -85,9 +84,9 @@ class TDTestCase:
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 1000,
'ctbNum': 100,
'rowsPerTbl': 1000,
'batchNum': 10000,
'batchNum': 1000,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 5,
'showMsg': 1,
@ -167,9 +166,9 @@ class TDTestCase:
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 1000,
'ctbNum': 100,
'rowsPerTbl': 1000,
'batchNum': 10000,
'batchNum': 1000,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 5,
'showMsg': 1,

View File

@ -132,20 +132,22 @@ python3 ./test.py -f 2-query/max_partition.py
python3 ./test.py -f 2-query/last_row.py
python3 ./test.py -f 6-cluster/5dnode1mnode.py
#BUG python3 ./test.py -f 6-cluster/5dnode2mnode.py -N 5 -M 3
python3 ./test.py -f 6-cluster/5dnode2mnode.py -N 5 -M 3
#python3 ./test.py -f 6-cluster/5dnode3mnodeStop.py -N 5 -M 3
python3 ./test.py -f 6-cluster/5dnode3mnodeStopLoop.py -N 5 -M 3
# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateDb.py -N 5 -M 3
# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py -N 5 -M 3
python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py -N 5 -M 3
# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateDb.py -N 5 -M 3
# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateStb.py -N 5 -M 3
# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateStb.py -N 5 -M 3
python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateStb.py -N 5 -M 3
# python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateStb.py -N 5 -M 3
# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeStopInsert.py
# python3 ./test.py -f 6-cluster/5dnode3mnodeDrop.py -N 5
# python3 test.py -f 6-cluster/5dnode3mnodeStopConnect.py -N 5 -M 3
# BUG Redict python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 6 -M 3 -C 5
# python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertData.py -N 5 -M 3
python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 6 -M 3 -C 5
python3 ./test.py -f 7-tmq/basic5.py
python3 ./test.py -f 7-tmq/subscribeDb.py
@ -166,7 +168,6 @@ python3 ./test.py -f 7-tmq/schema.py
python3 ./test.py -f 7-tmq/stbFilter.py
python3 ./test.py -f 7-tmq/tmqCheckData.py
python3 ./test.py -f 7-tmq/tmqCheckData1.py
python3 ./test.py -f 7-tmq/tmqUdf.py
#python3 ./test.py -f 7-tmq/tmq3mnodeSwitch.py -N 5
python3 ./test.py -f 7-tmq/tmqConsumerGroup.py
python3 ./test.py -f 7-tmq/tmqShow.py
@ -186,8 +187,15 @@ python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py
python3 ./test.py -f 7-tmq/tmqAutoCreateTbl.py
#python3 ./test.py -f 7-tmq/tmqDnodeRestart.py
python3 ./test.py -f 7-tmq/tmqUpdate-1ctb.py
python3 ./test.py -f 7-tmq/tmqUpdate-multiCtb.py
python3 ./test.py -f 7-tmq/tmqUpdate-multiCtb-snapshot0.py
python3 ./test.py -f 7-tmq/tmqUpdate-multiCtb-snapshot1.py
#python3 ./test.py -f 7-tmq/tmqDelete-1ctb.py
python3 ./test.py -f 7-tmq/tmqUdf.py
python3 ./test.py -f 7-tmq/tmqUdf-multCtb-snapshot0.py
python3 ./test.py -f 7-tmq/tmqUdf-multCtb-snapshot1.py
python3 ./test.py -f 7-tmq/stbTagFilter-1ctb.py
python3 ./test.py -f 7-tmq/stbTagFilter-multiCtb.py
#------------querPolicy 2-----------

View File

@ -35,14 +35,14 @@ enum {
/* label stack */
typedef struct {
char top; /* number of labels */
int8_t top; /* number of labels */
int16_t pos[MAX_NUM_LABLES]; /* the position of the label */
char label[MAX_NUM_LABLES][MAX_LABEL_LEN]; /* name of the label */
} SLabel;
/* block definition */
typedef struct {
char top; /* the number of blocks stacked */
int8_t top; /* the number of blocks stacked */
char type[MAX_NUM_BLOCK]; /* the block type */
int16_t *pos[MAX_NUM_BLOCK]; /* position of the jump for if/elif/case */
int16_t back[MAX_NUM_BLOCK]; /* go back, endw and continue */

View File

@ -758,7 +758,6 @@ void shellReadHistory() {
taosCloseFile(&pFile);
int64_t file_size;
if (taosStatFile(pHistory->file, &file_size, NULL) == 0 && file_size > SHELL_MAX_COMMAND_SIZE) {
fprintf(stdout,"%s(%d) %s %08" PRId64 "\n", __FILE__, __LINE__,__func__,taosGetSelfPthreadId());fflush(stdout);
TdFilePtr pFile = taosOpenFile(pHistory->file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_STREAM | TD_FILE_TRUNC);
if (pFile == NULL) return;
int32_t endIndex = pHistory->hstart;

@ -1 +1 @@
Subproject commit 0b8a3373bb7548f8106d13e7d3b0a988d3c4d48a
Subproject commit 2b75339b8b5c239619d1f09970d03075c58140dd

@ -1 +1 @@
Subproject commit c5fded266d3b10508e38bf3285bb7ecf798bc343
Subproject commit d8cf0e7e067d193cfaf3e920b6ec6cbb9b9f4165