Merge branch '3.0' into feature/stream

This commit is contained in:
Liu Jicong 2022-10-28 09:18:59 +08:00
commit 9b1ba10fbc
77 changed files with 1776 additions and 720 deletions

View File

@ -2,7 +2,7 @@
# taos-tools # taos-tools
ExternalProject_Add(taos-tools ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
GIT_TAG f9c1d32 GIT_TAG 7321fbb
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR "" BINARY_DIR ""
#BUILD_IN_SOURCE TRUE #BUILD_IN_SOURCE TRUE

View File

@ -270,7 +270,7 @@ if(${JEMALLOC_ENABLED})
PREFIX "jemalloc" PREFIX "jemalloc"
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/jemalloc SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/jemalloc
BUILD_IN_SOURCE 1 BUILD_IN_SOURCE 1
CONFIGURE_COMMAND ./autogen.sh COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --disable-initial-exec-tls --with-malloc-conf='background_thread:true,metadata_thp:auto' CONFIGURE_COMMAND ./autogen.sh COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --disable-initial-exec-tls
BUILD_COMMAND ${MAKE} BUILD_COMMAND ${MAKE}
) )
INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include) INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include)

View File

@ -0,0 +1,10 @@
---
title: Releases
---
```mdx-code-block
import DocCardList from '@theme/DocCardList';
import {useCurrentSidebarCategory} from '@docusaurus/theme-common';
<DocCardList items={useCurrentSidebarCategory().items}/>
```

View File

@ -91,6 +91,7 @@ extern bool tsQueryPlannerTrace;
extern int32_t tsQueryNodeChunkSize; extern int32_t tsQueryNodeChunkSize;
extern bool tsQueryUseNodeAllocator; extern bool tsQueryUseNodeAllocator;
extern bool tsKeepColumnName; extern bool tsKeepColumnName;
extern bool tsEnableQueryHb;
// client // client
extern int32_t tsMinSlidingTime; extern int32_t tsMinSlidingTime;

View File

@ -38,7 +38,7 @@ extern bool gRaftDetailLog;
#define SYNC_DEL_WAL_MS (1000 * 60) #define SYNC_DEL_WAL_MS (1000 * 60)
#define SYNC_ADD_QUORUM_COUNT 3 #define SYNC_ADD_QUORUM_COUNT 3
#define SYNC_MNODE_LOG_RETENTION 10000 #define SYNC_MNODE_LOG_RETENTION 10000
#define SYNC_VNODE_LOG_RETENTION 500 #define SYNC_VNODE_LOG_RETENTION 100
#define SYNC_APPEND_ENTRIES_TIMEOUT_MS 10000 #define SYNC_APPEND_ENTRIES_TIMEOUT_MS 10000

View File

@ -234,9 +234,9 @@ typedef enum ELogicConditionType {
* - Secondly, if all cols are VarDataT type except primary key, we need 4 bits to store the offset, thus * - Secondly, if all cols are VarDataT type except primary key, we need 4 bits to store the offset, thus
* the final value is 65531-(4096-1)*4 = 49151. * the final value is 65531-(4096-1)*4 = 49151.
*/ */
#define TSDB_MAX_BYTES_PER_ROW 49151 #define TSDB_MAX_BYTES_PER_ROW 49151
#define TSDB_MAX_TAGS_LEN 16384 #define TSDB_MAX_TAGS_LEN 16384
#define TSDB_MAX_TAGS 128 #define TSDB_MAX_TAGS 128
#define TSDB_MAX_COL_TAG_NUM (TSDB_MAX_COLUMNS + TSDB_MAX_TAGS) #define TSDB_MAX_COL_TAG_NUM (TSDB_MAX_COLUMNS + TSDB_MAX_TAGS)
#define TSDB_MAX_JSON_TAG_LEN 16384 #define TSDB_MAX_JSON_TAG_LEN 16384
@ -370,13 +370,13 @@ typedef enum ELogicConditionType {
#define TSDB_DB_MIN_WAL_RETENTION_PERIOD -1 #define TSDB_DB_MIN_WAL_RETENTION_PERIOD -1
#define TSDB_REP_DEF_DB_WAL_RET_PERIOD 0 #define TSDB_REP_DEF_DB_WAL_RET_PERIOD 0
#define TSDB_REPS_DEF_DB_WAL_RET_PERIOD (24 * 60 * 60 * 4) #define TSDB_REPS_DEF_DB_WAL_RET_PERIOD 0
#define TSDB_DB_MIN_WAL_RETENTION_SIZE -1 #define TSDB_DB_MIN_WAL_RETENTION_SIZE -1
#define TSDB_REP_DEF_DB_WAL_RET_SIZE 0 #define TSDB_REP_DEF_DB_WAL_RET_SIZE 0
#define TSDB_REPS_DEF_DB_WAL_RET_SIZE -1 #define TSDB_REPS_DEF_DB_WAL_RET_SIZE 0
#define TSDB_DB_MIN_WAL_ROLL_PERIOD 0 #define TSDB_DB_MIN_WAL_ROLL_PERIOD 0
#define TSDB_REP_DEF_DB_WAL_ROLL_PERIOD 0 #define TSDB_REP_DEF_DB_WAL_ROLL_PERIOD 0
#define TSDB_REPS_DEF_DB_WAL_ROLL_PERIOD (24 * 60 * 60 * 1) #define TSDB_REPS_DEF_DB_WAL_ROLL_PERIOD 0
#define TSDB_DB_MIN_WAL_SEGMENT_SIZE 0 #define TSDB_DB_MIN_WAL_SEGMENT_SIZE 0
#define TSDB_DEFAULT_DB_WAL_SEGMENT_SIZE 0 #define TSDB_DEFAULT_DB_WAL_SEGMENT_SIZE 0
@ -409,7 +409,7 @@ typedef enum ELogicConditionType {
#define TSDB_MAX_RPC_THREADS 10 #define TSDB_MAX_RPC_THREADS 10
#endif #endif
#define TSDB_QUERY_TYPE_NON_TYPE 0x00u // none type #define TSDB_QUERY_TYPE_NON_TYPE 0x00u // none type
#define TSDB_META_COMPACT_RATIO 0 // disable tsdb meta compact by default #define TSDB_META_COMPACT_RATIO 0 // disable tsdb meta compact by default
@ -486,7 +486,7 @@ enum {
#define MAX_NUM_STR_SIZE 40 #define MAX_NUM_STR_SIZE 40
#define MAX_META_MSG_IN_BATCH 1048576 #define MAX_META_MSG_IN_BATCH 1048576
#define MAX_META_BATCH_RSP_SIZE (1 * 1048576 * 1024) #define MAX_META_BATCH_RSP_SIZE (1 * 1048576 * 1024)
#ifdef __cplusplus #ifdef __cplusplus

View File

@ -0,0 +1,26 @@
FROM ubuntu:18.04
WORKDIR /root
ARG pkgFile
ARG dirName
ARG cpuType
RUN echo ${pkgFile} && echo ${dirName}
COPY ${pkgFile} /root/
ENV TINI_VERSION v0.19.0
ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-${cpuType} /tini
ENV DEBIAN_FRONTEND=noninteractive
WORKDIR /root/
RUN tar -zxf ${pkgFile} && cd /root/${dirName}/ && /bin/bash install.sh -e no && cd /root && rm /root/${pkgFile} && rm -rf /root/${dirName} && apt-get update && apt-get install -y locales tzdata netcat && locale-gen en_US.UTF-8 && apt-get clean && rm -rf /var/lib/apt/lists/ && chmod +x /tini
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib" \
LC_CTYPE=en_US.UTF-8 \
LANG=en_US.UTF-8 \
LC_ALL=en_US.UTF-8
COPY ./run.sh /usr/bin/
COPY ./bin/* /usr/bin/
ENTRYPOINT ["/tini", "--", "/usr/bin/entrypoint.sh"]
CMD ["bash", "-c", "/usr/bin/run.sh"]
VOLUME [ "/var/lib/taos", "/var/log/taos", "/corefile" ]

View File

@ -19,8 +19,9 @@ passWord=""
pkgFile="" pkgFile=""
verType="stable" verType="stable"
dockerLatest="n" dockerLatest="n"
cloudBuild="n"
while getopts "hc:n:p:f:V:a:b:" arg while getopts "hc:n:p:f:V:a:b:d:" arg
do do
case $arg in case $arg in
c) c)
@ -47,6 +48,10 @@ do
#echo "verType=$OPTARG" #echo "verType=$OPTARG"
verType=$(echo $OPTARG) verType=$(echo $OPTARG)
;; ;;
d)
#echo "cloudBuild=$OPTARG"
cloudBuild=$(echo $OPTARG)
;;
a) a)
#echo "dockerLatest=$OPTARG" #echo "dockerLatest=$OPTARG"
dockerLatest=$(echo $OPTARG) dockerLatest=$(echo $OPTARG)
@ -58,6 +63,7 @@ do
echo " -V [stable | beta] " echo " -V [stable | beta] "
echo " -f [pkg file] " echo " -f [pkg file] "
echo " -a [y | n ] " echo " -a [y | n ] "
echo " -d [cloud build ] "
exit 0 exit 0
;; ;;
?) #unknow option ?) #unknow option
@ -83,6 +89,9 @@ else
echo "unknow verType, nor stabel or beta" echo "unknow verType, nor stabel or beta"
exit 1 exit 1
fi fi
if [ "$cloudBuild" == "y" ]; then
dockername=cloud-${dockername}
fi
echo "cpuType=${cpuType} version=${version} pkgFile=${pkgFile} verType=${verType} " echo "cpuType=${cpuType} version=${version} pkgFile=${pkgFile} verType=${verType} "
@ -90,10 +99,15 @@ echo "$(pwd)"
echo "====NOTES: ${pkgFile} must be in the same directory as dockerbuild.sh====" echo "====NOTES: ${pkgFile} must be in the same directory as dockerbuild.sh===="
scriptDir=$(dirname $(readlink -f $0)) scriptDir=$(dirname $(readlink -f $0))
comunityArchiveDir=/nas/TDengine/v$version/community # community versionpackage directory
communityDir=${scriptDir}/../../../community communityDir=${scriptDir}/../../../community
DockerfilePath=${communityDir}/packaging/docker/ DockerfilePath=${communityDir}/packaging/docker/
Dockerfile=${communityDir}/packaging/docker/Dockerfile if [ "$cloudBuild" == "y" ]; then
comunityArchiveDir=/nas/TDengine/v$version/cloud
Dockerfile=${communityDir}/packaging/docker/DockerfileCloud
else
comunityArchiveDir=/nas/TDengine/v$version/community
Dockerfile=${communityDir}/packaging/docker/Dockerfile
fi
cd ${scriptDir} cd ${scriptDir}
cp -f ${comunityArchiveDir}/${pkgFile} . cp -f ${comunityArchiveDir}/${pkgFile} .
@ -111,42 +125,15 @@ else
fi fi
docker build --rm -f "${Dockerfile}" --network=host -t tdengine/tdengine-${dockername}:${version} "." --build-arg pkgFile=${pkgFile} --build-arg dirName=${dirName} --build-arg cpuType=${cpuTypeAlias} docker build --rm -f "${Dockerfile}" --network=host -t tdengine/tdengine-${dockername}:${version} "." --build-arg pkgFile=${pkgFile} --build-arg dirName=${dirName} --build-arg cpuType=${cpuTypeAlias}
docker login -u tdengine -p ${passWord} #replace the docker registry username and password if [ "$cloudBuild" != "y" ]; then
docker push tdengine/tdengine-${dockername}:${version} docker login -u tdengine -p ${passWord} #replace the docker registry username and password
docker push tdengine/tdengine-${dockername}:${version}
if [ -n "$(docker ps -aq)" ] ;then
echo "delete docker process"
docker stop $(docker ps -aq)
docker rm $(docker ps -aq)
fi fi
if [ -n "$(pidof taosd)" ] ;then
echo "kill taosd "
kill -9 $(pidof taosd)
fi
if [ -n "$(pidof power)" ] ;then
echo "kill power "
kill -9 $(pidof power)
fi
echo ">>>>>>>>>>>>> check whether tdengine/tdengine-${dockername}:${version} has been published"
docker run -d --name doctest -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine-${dockername}:${version}
sleep 2
curl -u root:taosdata -d 'show variables;' 127.0.0.1:6041/rest/sql > temp1.data
data_version=$( cat temp1.data |jq .data| jq '.[]' |grep "version" -A 2 -B 1 | jq ".[1]")
echo "${data_version}"
if [ "${data_version}" == "\"${version}\"" ] ; then
echo "docker version is right "
else
echo "docker version is wrong "
exit 1
fi
rm -rf temp1.data
# set this version to latest version # set this version to latest version
if [ ${dockerLatest} == 'y' ] ;then if [ "$cloudBuild" != "y" ] && [ ${dockerLatest} == 'y' ] ;then
docker tag tdengine/tdengine-${dockername}:${version} tdengine/tdengine-${dockername}:latest docker tag tdengine/tdengine-${dockername}:${version} tdengine/tdengine-${dockername}:latest
docker push tdengine/tdengine-${dockername}:latest docker push tdengine/tdengine-${dockername}:latest
fi fi
rm -f ${pkgFile}

36
packaging/docker/run.sh Normal file
View File

@ -0,0 +1,36 @@
#!/bin/bash
TAOS_RUN_TAOSBENCHMARK_TEST_ONCE=0
while ((1))
do
# echo "outer loop: $a"
sleep 10
output=`taos -k`
status=${output:0:1}
# echo $output
# echo $status
if [ "$status"x = "0"x ]
then
taosd &
fi
# echo "$status"x "$TAOS_RUN_TAOSBENCHMARK_TEST"x "$TAOS_RUN_TAOSBENCHMARK_TEST_ONCE"x
if [ "$status"x = "2"x ] && [ "$TAOS_RUN_TAOSBENCHMARK_TEST"x = "1"x ] && [ "$TAOS_RUN_TAOSBENCHMARK_TEST_ONCE"x = "0"x ]
then
TAOS_RUN_TAOSBENCHMARK_TEST_ONCE=1
# result=`taos -s "show databases;" | grep " test "`
# if [ "${result:0:5}"x != " test"x ]
# then
# taosBenchmark -y -t 1000 -n 1000 -S 900000
# fi
taos -s "select stable_name from information_schema.ins_stables where db_name = 'test';"|grep -q -w meters
if [ $? -ne 0 ]; then
taosBenchmark -y -t 1000 -n 1000 -S 900000
taos -s "create user admin_user pass 'NDS65R6t' sysinfo 0;"
taos -s "GRANT ALL on test.* to admin_user;"
fi
fi
# check taosadapter
nc -z localhost 6041
if [ $? -ne 0 ]; then
taosadapter &
fi
done

View File

@ -17,7 +17,7 @@ set -e
# -H [ false | true] # -H [ false | true]
# set parameters by default value # set parameters by default value
verMode=edge # [cluster, edge] verMode=edge # [cluster, edge, cloud]
verType=stable # [stable, beta] verType=stable # [stable, beta]
cpuType=x64 # [aarch32 | aarch64 | x64 | x86 | mips64 ...] cpuType=x64 # [aarch32 | aarch64 | x64 | x86 | mips64 ...]
osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...] osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...]
@ -169,7 +169,7 @@ build_time=$(date +"%F %R")
# get commint id from git # get commint id from git
gitinfo=$(git rev-parse --verify HEAD) gitinfo=$(git rev-parse --verify HEAD)
if [[ "$verMode" == "cluster" ]]; then if [[ "$verMode" == "cluster" ]] || [[ "$verMode" == "cloud" ]]; then
enterprise_dir="${top_dir}/../enterprise" enterprise_dir="${top_dir}/../enterprise"
cd ${enterprise_dir} cd ${enterprise_dir}
gitinfoOfInternal=$(git rev-parse --verify HEAD) gitinfoOfInternal=$(git rev-parse --verify HEAD)
@ -205,7 +205,7 @@ else
BUILD_HTTP=false BUILD_HTTP=false
fi fi
if [[ "$verMode" == "cluster" ]]; then if [[ "$verMode" == "cluster" ]] || [[ "$verMode" == "cloud" ]]; then
BUILD_HTTP=internal BUILD_HTTP=internal
fi fi
@ -217,10 +217,12 @@ fi
# check support cpu type # check support cpu type
if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" == "aarch32" ]] || [[ "$cpuType" == "arm64" ]] || [[ "$cpuType" == "arm32" ]] || [[ "$cpuType" == "mips64" ]]; then if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" == "aarch32" ]] || [[ "$cpuType" == "arm64" ]] || [[ "$cpuType" == "arm32" ]] || [[ "$cpuType" == "mips64" ]]; then
if [ "$verMode" != "cluster" ]; then if [ "$verMode" == "edge" ]; then
# community-version compile # community-version compile
cmake ../ -DCPUTYPE=${cpuType} -DWEBSOCKET=true -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro} cmake ../ -DCPUTYPE=${cpuType} -DWEBSOCKET=true -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro}
else elif [ "$verMode" == "cloud" ]; then
cmake ../../ -DCPUTYPE=${cpuType} -DWEBSOCKET=true -DBUILD_CLOUD=true -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro}
elif [ "$verMode" == "cluster" ]; then
if [[ "$dbName" != "taos" ]]; then if [[ "$dbName" != "taos" ]]; then
replace_enterprise_$dbName replace_enterprise_$dbName
fi fi
@ -244,7 +246,7 @@ cd ${curr_dir}
# 3. Call the corresponding script for packaging # 3. Call the corresponding script for packaging
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
if [[ "$verMode" != "cluster" ]] && [[ "$pagMode" == "full" ]] && [[ "$cpuType" == "x64" ]] && [[ "$dbName" == "taos" ]]; then if [[ "$verMode" != "cluster" ]] && [[ "$verMode" != "cloud" ]] && [[ "$pagMode" == "full" ]] && [[ "$cpuType" == "x64" ]] && [[ "$dbName" == "taos" ]]; then
ret='0' ret='0'
command -v dpkg >/dev/null 2>&1 || { ret='1'; } command -v dpkg >/dev/null 2>&1 || { ret='1'; }
if [ "$ret" -eq 0 ]; then if [ "$ret" -eq 0 ]; then

View File

@ -39,6 +39,8 @@ release_dir="${top_dir}/release"
if [ "$verMode" == "cluster" ]; then if [ "$verMode" == "cluster" ]; then
install_dir="${release_dir}/${productName}-enterprise-client-${version}" install_dir="${release_dir}/${productName}-enterprise-client-${version}"
elif [ "$verMode" == "cloud" ]; then
install_dir="${release_dir}/${productName}-cloud-client-${version}"
else else
install_dir="${release_dir}/${productName}-client-${version}" install_dir="${release_dir}/${productName}-client-${version}"
fi fi
@ -138,6 +140,10 @@ if [ "$verMode" == "cluster" ]; then
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install_client.sh >>install_client_temp.sh sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install_client.sh >>install_client_temp.sh
mv install_client_temp.sh ${install_dir}/install_client.sh mv install_client_temp.sh ${install_dir}/install_client.sh
fi fi
if [ "$verMode" == "cloud" ]; then
sed 's/verMode=edge/verMode=cloud/g' ${install_dir}/install_client.sh >>install_client_temp.sh
mv install_client_temp.sh ${install_dir}/install_client.sh
fi
if [ "$pagMode" == "lite" ]; then if [ "$pagMode" == "lite" ]; then
sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install_client.sh >>install_client_temp.sh sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install_client.sh >>install_client_temp.sh
@ -161,7 +167,7 @@ if [[ $productName == "TDengine" ]]; then
mkdir -p ${install_dir}/examples/taosbenchmark-json && cp ${examples_dir}/../tools/taos-tools/example/* ${install_dir}/examples/taosbenchmark-json mkdir -p ${install_dir}/examples/taosbenchmark-json && cp ${examples_dir}/../tools/taos-tools/example/* ${install_dir}/examples/taosbenchmark-json
fi fi
if [ "$verMode" == "cluster" ]; then if [ "$verMode" == "cluster" ] || [ "$verMode" == "cloud" ]; then
# Copy connector # Copy connector
connector_dir="${code_dir}/connector" connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector mkdir -p ${install_dir}/connector

View File

@ -39,6 +39,8 @@ release_dir="${top_dir}/release"
#package_name='linux' #package_name='linux'
if [ "$verMode" == "cluster" ]; then if [ "$verMode" == "cluster" ]; then
install_dir="${release_dir}/${productName}-enterprise-server-${version}" install_dir="${release_dir}/${productName}-enterprise-server-${version}"
elif [ "$verMode" == "cloud" ]; then
install_dir="${release_dir}/${productName}-cloud-server-${version}"
else else
install_dir="${release_dir}/${productName}-server-${version}" install_dir="${release_dir}/${productName}-server-${version}"
fi fi
@ -217,7 +219,10 @@ fi
if [ "$verMode" == "cluster" ]; then if [ "$verMode" == "cluster" ]; then
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove.sh >>remove_temp.sh sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove.sh >>remove_temp.sh
mv remove_temp.sh ${install_dir}/bin/remove.sh mv remove_temp.sh ${install_dir}/bin/remove.sh
fi
if [ "$verMode" == "cloud" ]; then
sed 's/verMode=edge/verMode=cloud/g' ${install_dir}/bin/remove.sh >>remove_temp.sh
mv remove_temp.sh ${install_dir}/bin/remove.sh
fi fi
cd ${install_dir} cd ${install_dir}
@ -234,6 +239,10 @@ if [ "$verMode" == "cluster" ]; then
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install.sh >>install_temp.sh sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install.sh >>install_temp.sh
mv install_temp.sh ${install_dir}/install.sh mv install_temp.sh ${install_dir}/install.sh
fi fi
if [ "$verMode" == "cloud" ]; then
sed 's/verMode=edge/verMode=cloud/g' ${install_dir}/install.sh >>install_temp.sh
mv install_temp.sh ${install_dir}/install.sh
fi
if [ "$pagMode" == "lite" ]; then if [ "$pagMode" == "lite" ]; then
sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install.sh >>install_temp.sh sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install.sh >>install_temp.sh
mv install_temp.sh ${install_dir}/install.sh mv install_temp.sh ${install_dir}/install.sh
@ -288,7 +297,7 @@ mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo
[ -f ${wslib_files} ] && cp ${wslib_files} ${install_dir}/driver || : [ -f ${wslib_files} ] && cp ${wslib_files} ${install_dir}/driver || :
# Copy connector # Copy connector
if [ "$verMode" == "cluster" ]; then if [ "$verMode" == "cluster" ] || [ "$verMode" == "cloud" ]; then
connector_dir="${code_dir}/connector" connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then

View File

@ -30,7 +30,7 @@ extern "C" {
#define tscDebug(...) do { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLog("TSC ", DEBUG_DEBUG, cDebugFlag, __VA_ARGS__); }} while(0) #define tscDebug(...) do { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLog("TSC ", DEBUG_DEBUG, cDebugFlag, __VA_ARGS__); }} while(0)
#define tscTrace(...) do { if (cDebugFlag & DEBUG_TRACE) { taosPrintLog("TSC ", DEBUG_TRACE, cDebugFlag, __VA_ARGS__); }} while(0) #define tscTrace(...) do { if (cDebugFlag & DEBUG_TRACE) { taosPrintLog("TSC ", DEBUG_TRACE, cDebugFlag, __VA_ARGS__); }} while(0)
#define tscDebugL(...) do { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLongString("TSC ", DEBUG_DEBUG, cDebugFlag, __VA_ARGS__); }} while(0) #define tscDebugL(...) do { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLongString("TSC ", DEBUG_DEBUG, cDebugFlag, __VA_ARGS__); }} while(0)
#define tscPerf(...) do { taosPrintLog("TSC ", 0, cDebugFlag, __VA_ARGS__); } while(0) //#define tscPerf(...) do { if (cDebugFlag & DEBUG_INFO) { taosPrintLog("TSC ", DEBUG_INFO, cDebugFlag, __VA_ARGS__); }} while(0)
// clang-format on // clang-format on
#ifdef __cplusplus #ifdef __cplusplus

View File

@ -77,19 +77,19 @@ static void deregisterRequest(SRequestObj *pRequest) {
pRequest->self, pTscObj->id, pRequest->requestId, duration / 1000.0, num, currentInst); pRequest->self, pTscObj->id, pRequest->requestId, duration / 1000.0, num, currentInst);
if (QUERY_NODE_VNODE_MODIF_STMT == pRequest->stmtType) { if (QUERY_NODE_VNODE_MODIF_STMT == pRequest->stmtType) {
tscPerf("insert duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64 // tscPerf("insert duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64
"us, exec:%" PRId64 "us", // "us, exec:%" PRId64 "us",
duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart, // duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart,
pRequest->metric.ctgEnd - pRequest->metric.ctgStart, pRequest->metric.semanticEnd - pRequest->metric.ctgEnd, // pRequest->metric.ctgEnd - pRequest->metric.ctgStart, pRequest->metric.semanticEnd - pRequest->metric.ctgEnd,
pRequest->metric.execEnd - pRequest->metric.semanticEnd); // pRequest->metric.execEnd - pRequest->metric.semanticEnd);
atomic_add_fetch_64((int64_t *)&pActivity->insertElapsedTime, duration); atomic_add_fetch_64((int64_t *)&pActivity->insertElapsedTime, duration);
} else if (QUERY_NODE_SELECT_STMT == pRequest->stmtType) { } else if (QUERY_NODE_SELECT_STMT == pRequest->stmtType) {
tscPerf("select duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64 // tscPerf("select duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64
"us, planner:%" PRId64 "us, exec:%" PRId64 "us, reqId:0x%" PRIx64, // "us, planner:%" PRId64 "us, exec:%" PRId64 "us, reqId:0x%" PRIx64,
duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart, // duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart,
pRequest->metric.ctgEnd - pRequest->metric.ctgStart, pRequest->metric.semanticEnd - pRequest->metric.ctgEnd, // pRequest->metric.ctgEnd - pRequest->metric.ctgStart, pRequest->metric.semanticEnd - pRequest->metric.ctgEnd,
pRequest->metric.planEnd - pRequest->metric.semanticEnd, // pRequest->metric.planEnd - pRequest->metric.semanticEnd,
pRequest->metric.resultReady - pRequest->metric.planEnd, pRequest->requestId); // pRequest->metric.resultReady - pRequest->metric.planEnd, pRequest->requestId);
atomic_add_fetch_64((int64_t *)&pActivity->queryElapsedTime, duration); atomic_add_fetch_64((int64_t *)&pActivity->queryElapsedTime, duration);
} }

View File

@ -270,7 +270,7 @@ static const SSysTableMeta infosMeta[] = {
{TSDB_INS_TABLE_MNODES, mnodesSchema, tListLen(mnodesSchema), true}, {TSDB_INS_TABLE_MNODES, mnodesSchema, tListLen(mnodesSchema), true},
{TSDB_INS_TABLE_MODULES, modulesSchema, tListLen(modulesSchema), true}, {TSDB_INS_TABLE_MODULES, modulesSchema, tListLen(modulesSchema), true},
{TSDB_INS_TABLE_QNODES, qnodesSchema, tListLen(qnodesSchema), true}, {TSDB_INS_TABLE_QNODES, qnodesSchema, tListLen(qnodesSchema), true},
// {TSDB_INS_TABLE_SNODES, snodesSchema, tListLen(snodesSchema)}, {TSDB_INS_TABLE_SNODES, snodesSchema, tListLen(snodesSchema)},
{TSDB_INS_TABLE_CLUSTER, clusterSchema, tListLen(clusterSchema), true}, {TSDB_INS_TABLE_CLUSTER, clusterSchema, tListLen(clusterSchema), true},
{TSDB_INS_TABLE_DATABASES, userDBSchema, tListLen(userDBSchema), false}, {TSDB_INS_TABLE_DATABASES, userDBSchema, tListLen(userDBSchema), false},
{TSDB_INS_TABLE_FUNCTIONS, userFuncSchema, tListLen(userFuncSchema), false}, {TSDB_INS_TABLE_FUNCTIONS, userFuncSchema, tListLen(userFuncSchema), false},

View File

@ -82,6 +82,7 @@ bool tsSmlDataFormat = false;
// query // query
int32_t tsQueryPolicy = 1; int32_t tsQueryPolicy = 1;
int32_t tsQueryRspPolicy = 0; int32_t tsQueryRspPolicy = 0;
bool tsEnableQueryHb = false;
int32_t tsQuerySmaOptimize = 0; int32_t tsQuerySmaOptimize = 0;
int32_t tsQueryRsmaTolerance = 1000; // the tolerance time (ms) to judge from which level to query rsma data. int32_t tsQueryRsmaTolerance = 1000; // the tolerance time (ms) to judge from which level to query rsma data.
bool tsQueryPlannerTrace = false; bool tsQueryPlannerTrace = false;
@ -284,6 +285,7 @@ static int32_t taosAddClientCfg(SConfig *pCfg) {
if (cfgAddInt32(pCfg, "compressMsgSize", tsCompressMsgSize, -1, 100000000, 1) != 0) return -1; if (cfgAddInt32(pCfg, "compressMsgSize", tsCompressMsgSize, -1, 100000000, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "compressColData", tsCompressColData, -1, 100000000, 1) != 0) return -1; if (cfgAddInt32(pCfg, "compressColData", tsCompressColData, -1, 100000000, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "queryPolicy", tsQueryPolicy, 1, 4, 1) != 0) return -1; if (cfgAddInt32(pCfg, "queryPolicy", tsQueryPolicy, 1, 4, 1) != 0) return -1;
if (cfgAddBool(pCfg, "enableQueryHb", tsEnableQueryHb, false) != 0) return -1;
if (cfgAddInt32(pCfg, "querySmaOptimize", tsQuerySmaOptimize, 0, 1, 1) != 0) return -1; if (cfgAddInt32(pCfg, "querySmaOptimize", tsQuerySmaOptimize, 0, 1, 1) != 0) return -1;
if (cfgAddBool(pCfg, "queryPlannerTrace", tsQueryPlannerTrace, true) != 0) return -1; if (cfgAddBool(pCfg, "queryPlannerTrace", tsQueryPlannerTrace, true) != 0) return -1;
if (cfgAddInt32(pCfg, "queryNodeChunkSize", tsQueryNodeChunkSize, 1024, 128 * 1024, true) != 0) return -1; if (cfgAddInt32(pCfg, "queryNodeChunkSize", tsQueryNodeChunkSize, 1024, 128 * 1024, true) != 0) return -1;
@ -644,6 +646,7 @@ static int32_t taosSetClientCfg(SConfig *pCfg) {
tsCompressColData = cfgGetItem(pCfg, "compressColData")->i32; tsCompressColData = cfgGetItem(pCfg, "compressColData")->i32;
tsNumOfTaskQueueThreads = cfgGetItem(pCfg, "numOfTaskQueueThreads")->i32; tsNumOfTaskQueueThreads = cfgGetItem(pCfg, "numOfTaskQueueThreads")->i32;
tsQueryPolicy = cfgGetItem(pCfg, "queryPolicy")->i32; tsQueryPolicy = cfgGetItem(pCfg, "queryPolicy")->i32;
tsEnableQueryHb = cfgGetItem(pCfg, "enableQueryHb")->bval;
tsQuerySmaOptimize = cfgGetItem(pCfg, "querySmaOptimize")->i32; tsQuerySmaOptimize = cfgGetItem(pCfg, "querySmaOptimize")->i32;
tsQueryPlannerTrace = cfgGetItem(pCfg, "queryPlannerTrace")->bval; tsQueryPlannerTrace = cfgGetItem(pCfg, "queryPlannerTrace")->bval;
tsQueryNodeChunkSize = cfgGetItem(pCfg, "queryNodeChunkSize")->i32; tsQueryNodeChunkSize = cfgGetItem(pCfg, "queryNodeChunkSize")->i32;
@ -780,6 +783,8 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
if (strcasecmp("enableCoreFile", name) == 0) { if (strcasecmp("enableCoreFile", name) == 0) {
bool enableCore = cfgGetItem(pCfg, "enableCoreFile")->bval; bool enableCore = cfgGetItem(pCfg, "enableCoreFile")->bval;
taosSetCoreDump(enableCore); taosSetCoreDump(enableCore);
} else if (strcasecmp("enableQueryHb", name) == 0) {
tsEnableQueryHb = cfgGetItem(pCfg, "enableQueryHb")->bval;
} }
break; break;
} }

View File

@ -507,29 +507,29 @@ int64_t convertTimeFromPrecisionToUnit(int64_t time, int32_t fromPrecision, char
double tmp = time; double tmp = time;
switch (toUnit) { switch (toUnit) {
case 's': { case 's': {
tmp /= (NANOSECOND_PER_SEC / factors[fromPrecision]); // the result of division is an integer
time /= (NANOSECOND_PER_SEC / factors[fromPrecision]); time /= (NANOSECOND_PER_SEC / factors[fromPrecision]);
tmp = (double)time;
break; break;
} }
case 'm': case 'm':
tmp /= (NANOSECOND_PER_MINUTE / factors[fromPrecision]); // the result of division is an integer
time /= (NANOSECOND_PER_MINUTE / factors[fromPrecision]); time /= (NANOSECOND_PER_MINUTE / factors[fromPrecision]);
tmp = (double)time;
break; break;
case 'h': case 'h':
tmp /= (NANOSECOND_PER_HOUR / factors[fromPrecision]); // the result of division is an integer
time /= (NANOSECOND_PER_HOUR / factors[fromPrecision]); time /= (NANOSECOND_PER_HOUR / factors[fromPrecision]);
tmp = (double)time;
break; break;
case 'd': case 'd':
tmp /= (NANOSECOND_PER_DAY / factors[fromPrecision]); // the result of division is an integer
time /= (NANOSECOND_PER_DAY / factors[fromPrecision]); time /= (NANOSECOND_PER_DAY / factors[fromPrecision]);
tmp = (double)time;
break; break;
case 'w': case 'w':
tmp /= (NANOSECOND_PER_WEEK / factors[fromPrecision]); // the result of division is an integer
time /= (NANOSECOND_PER_WEEK / factors[fromPrecision]); time /= (NANOSECOND_PER_WEEK / factors[fromPrecision]);
tmp = (double)time;
break; break;
case 'a': case 'a':
tmp /= (NANOSECOND_PER_MSEC / factors[fromPrecision]); // the result of division is an integer
time /= (NANOSECOND_PER_MSEC / factors[fromPrecision]); time /= (NANOSECOND_PER_MSEC / factors[fromPrecision]);
tmp = (double)time;
break; break;
case 'u': case 'u':
// the result of (NANOSECOND_PER_USEC/(double)factors[fromPrecision]) maybe a double // the result of (NANOSECOND_PER_USEC/(double)factors[fromPrecision]) maybe a double
@ -540,13 +540,13 @@ int64_t convertTimeFromPrecisionToUnit(int64_t time, int32_t fromPrecision, char
break; break;
} }
case TSDB_TIME_PRECISION_MICRO: { case TSDB_TIME_PRECISION_MICRO: {
tmp /= 1;
time /= 1; time /= 1;
tmp = (double)time;
break; break;
} }
case TSDB_TIME_PRECISION_NANO: { case TSDB_TIME_PRECISION_NANO: {
tmp /= 1000;
time /= 1000; time /= 1000;
tmp = (double)time;
break; break;
} }
} }

View File

@ -332,7 +332,6 @@ SArray *dmGetMsgHandles() {
// Requests handled by MNODE // Requests handled by MNODE
if (dmSetMgmtHandle(pArray, TDMT_MND_GRANT, dmPutNodeMsgToMgmtQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_MND_GRANT, dmPutNodeMsgToMgmtQueue, 0) == NULL) goto _OVER;
// if (dmSetMgmtHandle(pArray, TDMT_MND_GRANT_RSP, dmPutNodeMsgToMgmtQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_AUTH_RSP, dmPutNodeMsgToMgmtQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_MND_AUTH_RSP, dmPutNodeMsgToMgmtQueue, 0) == NULL) goto _OVER;
code = 0; code = 0;

View File

@ -640,12 +640,8 @@ static int32_t mndSetDbCfgFromAlterDbReq(SDbObj *pDb, SAlterDbReq *pAlter) {
} }
if (pAlter->pageSize > 0 && pAlter->pageSize != pDb->cfg.pageSize) { if (pAlter->pageSize > 0 && pAlter->pageSize != pDb->cfg.pageSize) {
#if 1
terrno = TSDB_CODE_OPS_NOT_SUPPORT;
#else
pDb->cfg.pageSize = pAlter->pageSize; pDb->cfg.pageSize = pAlter->pageSize;
terrno = 0; terrno = 0;
#endif
} }
if (pAlter->daysPerFile > 0 && pAlter->daysPerFile != pDb->cfg.daysPerFile) { if (pAlter->daysPerFile > 0 && pAlter->daysPerFile != pDb->cfg.daysPerFile) {

View File

@ -776,11 +776,6 @@ static int32_t mndProcessDropDnodeReq(SRpcMsg *pReq) {
} }
} }
if (numOfVnodes > 0) {
terrno = TSDB_CODE_OPS_NOT_SUPPORT;
goto _OVER;
}
code = mndDropDnode(pMnode, pReq, pDnode, pMObj, pQObj, pSObj, numOfVnodes); code = mndDropDnode(pMnode, pReq, pDnode, pMObj, pQObj, pSObj, numOfVnodes);
if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS; if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS;

View File

@ -422,7 +422,7 @@ SMnode *mndOpen(const char *path, const SMnodeOpt *pOption) {
return NULL; return NULL;
} }
mInfo("mnode open successfully "); mInfo("mnode open successfully");
return pMnode; return pMnode;
} }

View File

@ -273,9 +273,6 @@ _OVER:
} }
static int32_t mndProcessCreateSnodeReq(SRpcMsg *pReq) { static int32_t mndProcessCreateSnodeReq(SRpcMsg *pReq) {
#if 1
return TSDB_CODE_OPS_NOT_SUPPORT;
#else
SMnode *pMnode = pReq->info.node; SMnode *pMnode = pReq->info.node;
int32_t code = -1; int32_t code = -1;
SSnodeObj *pObj = NULL; SSnodeObj *pObj = NULL;
@ -318,7 +315,6 @@ _OVER:
mndReleaseSnode(pMnode, pObj); mndReleaseSnode(pMnode, pObj);
mndReleaseDnode(pMnode, pDnode); mndReleaseDnode(pMnode, pDnode);
return code; return code;
#endif
} }
static int32_t mndSetDropSnodeRedoLogs(STrans *pTrans, SSnodeObj *pObj) { static int32_t mndSetDropSnodeRedoLogs(STrans *pTrans, SSnodeObj *pObj) {
@ -390,9 +386,6 @@ _OVER:
} }
static int32_t mndProcessDropSnodeReq(SRpcMsg *pReq) { static int32_t mndProcessDropSnodeReq(SRpcMsg *pReq) {
#if 1
return TSDB_CODE_OPS_NOT_SUPPORT;
#else
SMnode *pMnode = pReq->info.node; SMnode *pMnode = pReq->info.node;
int32_t code = -1; int32_t code = -1;
SSnodeObj *pObj = NULL; SSnodeObj *pObj = NULL;
@ -429,7 +422,6 @@ _OVER:
mndReleaseSnode(pMnode, pObj); mndReleaseSnode(pMnode, pObj);
return code; return code;
#endif
} }
static int32_t mndRetrieveSnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows) { static int32_t mndRetrieveSnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows) {

View File

@ -1112,7 +1112,9 @@ int32_t mndSetMoveVgroupInfoToTrans(SMnode *pMnode, STrans *pTrans, SDbObj *pDb,
mInfo("vgId:%d, will add 1 vnodes", pVgroup->vgId); mInfo("vgId:%d, will add 1 vnodes", pVgroup->vgId);
if (mndAddVnodeToVgroup(pMnode, &newVg, pArray) != 0) return -1; if (mndAddVnodeToVgroup(pMnode, &newVg, pArray) != 0) return -1;
if (mndAddCreateVnodeAction(pMnode, pTrans, pDb, &newVg, &newVg.vnodeGid[newVg.replica - 1]) != 0) return -1; if (mndAddCreateVnodeAction(pMnode, pTrans, pDb, &newVg, &newVg.vnodeGid[newVg.replica - 1]) != 0) return -1;
if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pDb, &newVg, -1) != 0) return -1; for (int32_t i = 0; i < newVg.replica - 1; ++i) {
if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pDb, &newVg, newVg.vnodeGid[i].dnodeId) != 0) return -1;
}
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pDb, &newVg) != 0) return -1; if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pDb, &newVg) != 0) return -1;
mInfo("vgId:%d, will remove 1 vnodes", pVgroup->vgId); mInfo("vgId:%d, will remove 1 vnodes", pVgroup->vgId);
@ -1120,8 +1122,10 @@ int32_t mndSetMoveVgroupInfoToTrans(SMnode *pMnode, STrans *pTrans, SDbObj *pDb,
SVnodeGid del = newVg.vnodeGid[vnIndex]; SVnodeGid del = newVg.vnodeGid[vnIndex];
newVg.vnodeGid[vnIndex] = newVg.vnodeGid[newVg.replica]; newVg.vnodeGid[vnIndex] = newVg.vnodeGid[newVg.replica];
memset(&newVg.vnodeGid[newVg.replica], 0, sizeof(SVnodeGid)); memset(&newVg.vnodeGid[newVg.replica], 0, sizeof(SVnodeGid));
if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pDb, &newVg, -1) != 0) return -1;
if (mndAddDropVnodeAction(pMnode, pTrans, pDb, &newVg, &del, true) != 0) return -1; if (mndAddDropVnodeAction(pMnode, pTrans, pDb, &newVg, &del, true) != 0) return -1;
for (int32_t i = 0; i < newVg.replica; ++i) {
if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pDb, &newVg, newVg.vnodeGid[i].dnodeId) != 0) return -1;
}
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pDb, &newVg) != 0) return -1; if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pDb, &newVg) != 0) return -1;
{ {
@ -1193,21 +1197,11 @@ static int32_t mndAddIncVgroupReplicaToTrans(SMnode *pMnode, STrans *pTrans, SDb
pGid->dnodeId = newDnodeId; pGid->dnodeId = newDnodeId;
pGid->syncState = TAOS_SYNC_STATE_ERROR; pGid->syncState = TAOS_SYNC_STATE_ERROR;
if (pVgroup->replica == 2) { if (mndAddCreateVnodeAction(pMnode, pTrans, pDb, pVgroup, pGid) != 0) return -1;
if (mndAddCreateVnodeAction(pMnode, pTrans, pDb, pVgroup, pGid) != 0) return -1; for (int32_t i = 0; i < pVgroup->replica - 1; ++i) {
if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pDb, pVgroup, pVgroup->vnodeGid[0].dnodeId) != 0) return -1; if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pDb, pVgroup, pVgroup->vnodeGid[i].dnodeId) != 0) return -1;
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pDb, pVgroup) != 0) return -1;
} else if (pVgroup->replica == 4) {
if (mndAddCreateVnodeAction(pMnode, pTrans, pDb, pVgroup, pGid) != 0) return -1;
if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pDb, pVgroup, pVgroup->vnodeGid[0].dnodeId) != 0) return -1;
if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pDb, pVgroup, pVgroup->vnodeGid[1].dnodeId) != 0) return -1;
if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pDb, pVgroup, pVgroup->vnodeGid[2].dnodeId) != 0) return -1;
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pDb, pVgroup) != 0) return -1;
} else {
mError("vgId:%d, failed to add 1 vnode since invalid replica:%d", pVgroup->vgId, pVgroup->replica);
terrno = TSDB_CODE_MND_APP_ERROR;
return -1;
} }
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pDb, pVgroup) != 0) return -1;
return 0; return 0;
} }
@ -1232,21 +1226,11 @@ static int32_t mndAddDecVgroupReplicaFromTrans(SMnode *pMnode, STrans *pTrans, S
memcpy(pGid, &pVgroup->vnodeGid[pVgroup->replica], sizeof(SVnodeGid)); memcpy(pGid, &pVgroup->vnodeGid[pVgroup->replica], sizeof(SVnodeGid));
memset(&pVgroup->vnodeGid[pVgroup->replica], 0, sizeof(SVnodeGid)); memset(&pVgroup->vnodeGid[pVgroup->replica], 0, sizeof(SVnodeGid));
if (pVgroup->replica == 1) { if (mndAddDropVnodeAction(pMnode, pTrans, pDb, pVgroup, &delGid, true) != 0) return -1;
if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pDb, pVgroup, pVgroup->vnodeGid[0].dnodeId) != 0) return -1; for (int32_t i = 0; i < pVgroup->replica; ++i) {
if (mndAddDropVnodeAction(pMnode, pTrans, pDb, pVgroup, &delGid, true) != 0) return -1; if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pDb, pVgroup, pVgroup->vnodeGid[i].dnodeId) != 0) return -1;
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pDb, pVgroup) != 0) return -1;
} else if (pVgroup->replica == 3) {
if (mndAddDropVnodeAction(pMnode, pTrans, pDb, pVgroup, &delGid, true) != 0) return -1;
if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pDb, pVgroup, pVgroup->vnodeGid[0].dnodeId) != 0) return -1;
if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pDb, pVgroup, pVgroup->vnodeGid[1].dnodeId) != 0) return -1;
if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pDb, pVgroup, pVgroup->vnodeGid[2].dnodeId) != 0) return -1;
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pDb, pVgroup) != 0) return -1;
} else {
mError("vgId:%d, failed to remove 1 vnode since invalid replica:%d", pVgroup->vgId, pVgroup->replica);
terrno = TSDB_CODE_MND_APP_ERROR;
return -1;
} }
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pDb, pVgroup) != 0) return -1;
return 0; return 0;
} }
@ -1890,9 +1874,6 @@ _OVER:
} }
static int32_t mndProcessBalanceVgroupMsg(SRpcMsg *pReq) { static int32_t mndProcessBalanceVgroupMsg(SRpcMsg *pReq) {
#if 1
return TSDB_CODE_OPS_NOT_SUPPORT;
#else
SMnode *pMnode = pReq->info.node; SMnode *pMnode = pReq->info.node;
int32_t code = -1; int32_t code = -1;
SArray *pArray = NULL; SArray *pArray = NULL;
@ -1941,7 +1922,6 @@ _OVER:
taosArrayDestroy(pArray); taosArrayDestroy(pArray);
return code; return code;
#endif
} }
bool mndVgroupInDb(SVgObj *pVgroup, int64_t dbUid) { return !pVgroup->isTsma && pVgroup->dbUid == dbUid; } bool mndVgroupInDb(SVgObj *pVgroup, int64_t dbUid) { return !pVgroup->isTsma && pVgroup->dbUid == dbUid; }

View File

@ -90,7 +90,7 @@ int32_t qndProcessQueryMsg(SQnode *pQnode, int64_t ts, SRpcMsg *pMsg) {
code = qWorkerProcessFetchMsg(pQnode, pQnode->pQuery, pMsg, ts); code = qWorkerProcessFetchMsg(pQnode, pQnode->pQuery, pMsg, ts);
break; break;
case TDMT_SCH_CANCEL_TASK: case TDMT_SCH_CANCEL_TASK:
code = qWorkerProcessCancelMsg(pQnode, pQnode->pQuery, pMsg, ts); //code = qWorkerProcessCancelMsg(pQnode, pQnode->pQuery, pMsg, ts);
break; break;
case TDMT_SCH_DROP_TASK: case TDMT_SCH_DROP_TASK:
code = qWorkerProcessDropMsg(pQnode, pQnode->pQuery, pMsg, ts); code = qWorkerProcessDropMsg(pQnode, pQnode->pQuery, pMsg, ts);

View File

@ -1093,72 +1093,71 @@ _err:
// iterate next row non deleted backward ts, version (from high to low) // iterate next row non deleted backward ts, version (from high to low)
static int32_t nextRowIterGet(CacheNextRowIter *pIter, TSDBROW **ppRow) { static int32_t nextRowIterGet(CacheNextRowIter *pIter, TSDBROW **ppRow) {
int code = 0; int code = 0;
for (;;) {
for (int i = 0; i < 4; ++i) {
if (pIter->input[i].next && !pIter->input[i].stop) {
code = pIter->input[i].nextRowFn(pIter->input[i].iter, &pIter->input[i].pRow);
if (code) goto _err;
for (int i = 0; i < 4; ++i) { if (pIter->input[i].pRow == NULL) {
if (pIter->input[i].next && !pIter->input[i].stop) { pIter->input[i].stop = true;
code = pIter->input[i].nextRowFn(pIter->input[i].iter, &pIter->input[i].pRow); pIter->input[i].next = false;
if (code) goto _err;
if (pIter->input[i].pRow == NULL) {
pIter->input[i].stop = true;
pIter->input[i].next = false;
}
}
}
if (pIter->input[0].stop && pIter->input[1].stop && pIter->input[2].stop && pIter->input[3].stop) {
*ppRow = NULL;
return code;
}
// select maxpoint(s) from mem, imem, fs and last
TSDBROW *max[4] = {0};
int iMax[4] = {-1, -1, -1, -1};
int nMax = 0;
TSKEY maxKey = TSKEY_MIN;
for (int i = 0; i < 4; ++i) {
if (!pIter->input[i].stop && pIter->input[i].pRow != NULL) {
TSDBKEY key = TSDBROW_KEY(pIter->input[i].pRow);
// merging & deduplicating on client side
if (maxKey <= key.ts) {
if (maxKey < key.ts) {
nMax = 0;
maxKey = key.ts;
} }
iMax[nMax] = i;
max[nMax++] = pIter->input[i].pRow;
} }
} }
}
// delete detection if (pIter->input[0].stop && pIter->input[1].stop && pIter->input[2].stop && pIter->input[3].stop) {
TSDBROW *merge[4] = {0}; *ppRow = NULL;
int iMerge[4] = {-1, -1, -1, -1}; return code;
int nMerge = 0;
for (int i = 0; i < nMax; ++i) {
TSDBKEY maxKey1 = TSDBROW_KEY(max[i]);
bool deleted = tsdbKeyDeleted(&maxKey1, pIter->pSkyline, &pIter->iSkyline);
if (!deleted) {
iMerge[nMerge] = iMax[i];
merge[nMerge++] = max[i];
} }
pIter->input[iMax[i]].next = deleted; // select maxpoint(s) from mem, imem, fs and last
TSDBROW *max[4] = {0};
int iMax[4] = {-1, -1, -1, -1};
int nMax = 0;
TSKEY maxKey = TSKEY_MIN;
for (int i = 0; i < 4; ++i) {
if (!pIter->input[i].stop && pIter->input[i].pRow != NULL) {
TSDBKEY key = TSDBROW_KEY(pIter->input[i].pRow);
// merging & deduplicating on client side
if (maxKey <= key.ts) {
if (maxKey < key.ts) {
nMax = 0;
maxKey = key.ts;
}
iMax[nMax] = i;
max[nMax++] = pIter->input[i].pRow;
}
}
}
// delete detection
TSDBROW *merge[4] = {0};
int iMerge[4] = {-1, -1, -1, -1};
int nMerge = 0;
for (int i = 0; i < nMax; ++i) {
TSDBKEY maxKey1 = TSDBROW_KEY(max[i]);
bool deleted = tsdbKeyDeleted(&maxKey1, pIter->pSkyline, &pIter->iSkyline);
if (!deleted) {
iMerge[nMerge] = iMax[i];
merge[nMerge++] = max[i];
}
pIter->input[iMax[i]].next = deleted;
}
if (nMerge > 0) {
pIter->input[iMerge[0]].next = true;
*ppRow = merge[0];
return code;
}
} }
if (nMerge > 0) {
pIter->input[iMerge[0]].next = true;
*ppRow = merge[0];
} else {
*ppRow = NULL;
}
return code;
_err: _err:
return code; return code;
} }

View File

@ -111,7 +111,7 @@ typedef struct SDataBlockIter {
int32_t index; int32_t index;
SArray* blockList; // SArray<SFileDataBlockInfo> SArray* blockList; // SArray<SFileDataBlockInfo>
int32_t order; int32_t order;
SDataBlk block; // current SDataBlk data SDataBlk block; // current SDataBlk data
SHashObj* pTableMap; SHashObj* pTableMap;
} SDataBlockIter; } SDataBlockIter;
@ -169,14 +169,14 @@ static TSDBROW* getValidMemRow(SIterInfo* pIter, const SArray* pDelList, STsdbRe
static int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pScanInfo, STsdbReader* pReader, static int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pScanInfo, STsdbReader* pReader,
SRowMerger* pMerger); SRowMerger* pMerger);
static int32_t doMergeRowsInLastBlock(SLastBlockReader* pLastBlockReader, STableBlockScanInfo* pScanInfo, int64_t ts, static int32_t doMergeRowsInLastBlock(SLastBlockReader* pLastBlockReader, STableBlockScanInfo* pScanInfo, int64_t ts,
SRowMerger* pMerger); SRowMerger* pMerger, SVersionRange* pVerRange);
static int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, int64_t ts, SArray* pDelList, SRowMerger* pMerger, static int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, int64_t ts, SArray* pDelList, SRowMerger* pMerger,
STsdbReader* pReader); STsdbReader* pReader);
static int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow* pTSRow, uint64_t uid); static int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow* pTSRow, uint64_t uid);
static int32_t doAppendRowFromFileBlock(SSDataBlock* pResBlock, STsdbReader* pReader, SBlockData* pBlockData, static int32_t doAppendRowFromFileBlock(SSDataBlock* pResBlock, STsdbReader* pReader, SBlockData* pBlockData,
int32_t rowIndex); int32_t rowIndex);
static void setComposedBlockFlag(STsdbReader* pReader, bool composed); static void setComposedBlockFlag(STsdbReader* pReader, bool composed);
static bool hasBeenDropped(const SArray* pDelList, int32_t* index, TSDBKEY* pKey, int32_t order); static bool hasBeenDropped(const SArray* pDelList, int32_t* index, TSDBKEY* pKey, int32_t order, SVersionRange* pRange);
static int32_t doMergeMemTableMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDelList, static int32_t doMergeMemTableMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDelList,
STSRow** pTSRow, STsdbReader* pReader, bool* freeTSRow); STSRow** pTSRow, STsdbReader* pReader, bool* freeTSRow);
@ -1052,7 +1052,7 @@ static int32_t doSetCurrentBlock(SDataBlockIter* pBlockIter, const char* idStr)
if (pBlockInfo != NULL) { if (pBlockInfo != NULL) {
STableBlockScanInfo* pScanInfo = taosHashGet(pBlockIter->pTableMap, &pBlockInfo->uid, sizeof(pBlockInfo->uid)); STableBlockScanInfo* pScanInfo = taosHashGet(pBlockIter->pTableMap, &pBlockInfo->uid, sizeof(pBlockInfo->uid));
if (pScanInfo == NULL) { if (pScanInfo == NULL) {
tsdbError("failed to locate the uid:%"PRIu64" in query table uid list, %s", pBlockInfo->uid, idStr); tsdbError("failed to locate the uid:%" PRIu64 " in query table uid list, %s", pBlockInfo->uid, idStr);
return TSDB_CODE_INVALID_PARA; return TSDB_CODE_INVALID_PARA;
} }
@ -1466,7 +1466,8 @@ static bool tryCopyDistinctRowFromFileBlock(STsdbReader* pReader, SBlockData* pB
return false; return false;
} }
static bool nextRowFromLastBlocks(SLastBlockReader* pLastBlockReader, STableBlockScanInfo* pBlockScanInfo) { static bool nextRowFromLastBlocks(SLastBlockReader* pLastBlockReader, STableBlockScanInfo* pBlockScanInfo,
SVersionRange* pVerRange) {
while (1) { while (1) {
bool hasVal = tMergeTreeNext(&pLastBlockReader->mergeTree); bool hasVal = tMergeTreeNext(&pLastBlockReader->mergeTree);
if (!hasVal) { if (!hasVal) {
@ -1475,7 +1476,8 @@ static bool nextRowFromLastBlocks(SLastBlockReader* pLastBlockReader, STableBloc
TSDBROW row = tMergeTreeGetRow(&pLastBlockReader->mergeTree); TSDBROW row = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
TSDBKEY k = TSDBROW_KEY(&row); TSDBKEY k = TSDBROW_KEY(&row);
if (!hasBeenDropped(pBlockScanInfo->delSkyline, &pBlockScanInfo->lastBlockDelIndex, &k, pLastBlockReader->order)) { if (!hasBeenDropped(pBlockScanInfo->delSkyline, &pBlockScanInfo->lastBlockDelIndex, &k, pLastBlockReader->order,
pVerRange)) {
return true; return true;
} }
} }
@ -1483,7 +1485,7 @@ static bool nextRowFromLastBlocks(SLastBlockReader* pLastBlockReader, STableBloc
static bool tryCopyDistinctRowFromSttBlock(TSDBROW* fRow, SLastBlockReader* pLastBlockReader, static bool tryCopyDistinctRowFromSttBlock(TSDBROW* fRow, SLastBlockReader* pLastBlockReader,
STableBlockScanInfo* pScanInfo, int64_t ts, STsdbReader* pReader) { STableBlockScanInfo* pScanInfo, int64_t ts, STsdbReader* pReader) {
bool hasVal = nextRowFromLastBlocks(pLastBlockReader, pScanInfo); bool hasVal = nextRowFromLastBlocks(pLastBlockReader, pScanInfo, &pReader->verRange);
if (hasVal) { if (hasVal) {
int64_t next1 = getCurrentKeyInLastBlock(pLastBlockReader); int64_t next1 = getCurrentKeyInLastBlock(pLastBlockReader);
if (next1 != ts) { if (next1 != ts) {
@ -1602,7 +1604,7 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
return code; return code;
} }
} }
doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLast, &merge); doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLast, &merge, &pReader->verRange);
} }
if (minKey == k.ts) { if (minKey == k.ts) {
@ -1647,7 +1649,7 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
return code; return code;
} }
} }
doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLast, &merge); doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLast, &merge, &pReader->verRange);
} }
if (minKey == key) { if (minKey == key) {
@ -1699,7 +1701,7 @@ static int32_t doMergeFileBlockAndLastBlock(SLastBlockReader* pLastBlockReader,
TSDBROW fRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree); TSDBROW fRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
tRowMerge(&merge, &fRow1); tRowMerge(&merge, &fRow1);
doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLastBlock, &merge); doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLastBlock, &merge, &pReader->verRange);
code = tRowMergerGetRow(&merge, &pTSRow); code = tRowMergerGetRow(&merge, &pTSRow);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
@ -1717,7 +1719,7 @@ static int32_t doMergeFileBlockAndLastBlock(SLastBlockReader* pLastBlockReader,
return code; return code;
} }
doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLastBlock, &merge); doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLastBlock, &merge, &pReader->verRange);
ASSERT(mergeBlockData); ASSERT(mergeBlockData);
// merge with block data if ts == key // merge with block data if ts == key
@ -1771,7 +1773,7 @@ static int32_t mergeFileBlockAndLastBlock(STsdbReader* pReader, SLastBlockReader
TSDBROW fRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree); TSDBROW fRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
tRowMerge(&merge, &fRow1); tRowMerge(&merge, &fRow1);
doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, ts, &merge); doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, ts, &merge, &pReader->verRange);
code = tRowMergerGetRow(&merge, &pTSRow); code = tRowMergerGetRow(&merge, &pTSRow);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
@ -1882,7 +1884,7 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
} }
} }
doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLast, &merge); doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLast, &merge, &pReader->verRange);
} }
if (minKey == ik.ts) { if (minKey == ik.ts) {
@ -1901,8 +1903,8 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
} }
} }
code = doMergeRowsInBuf(&pBlockScanInfo->iiter, pBlockScanInfo->uid, ik.ts, pBlockScanInfo->delSkyline, code = doMergeRowsInBuf(&pBlockScanInfo->iiter, pBlockScanInfo->uid, ik.ts, pBlockScanInfo->delSkyline, &merge,
&merge, pReader); pReader);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
return code; return code;
} }
@ -1973,7 +1975,7 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
return code; return code;
} }
} }
doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLast, &merge); doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLast, &merge, &pReader->verRange);
} }
if (minKey == key) { if (minKey == key) {
@ -1993,7 +1995,7 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
} }
} }
if (merge.pTSchema == NULL) { if (merge.pTSchema == NULL) {
return code; return code;
} }
@ -2095,7 +2097,8 @@ static bool isValidFileBlockRow(SBlockData* pBlockData, SFileBlockDumpInfo* pDum
} }
TSDBKEY k = {.ts = ts, .version = ver}; TSDBKEY k = {.ts = ts, .version = ver};
if (hasBeenDropped(pBlockScanInfo->delSkyline, &pBlockScanInfo->fileDelIndex, &k, pReader->order)) { if (hasBeenDropped(pBlockScanInfo->delSkyline, &pBlockScanInfo->fileDelIndex, &k, pReader->order,
&pReader->verRange)) {
return false; return false;
} }
@ -2130,7 +2133,7 @@ static bool initLastBlockReader(SLastBlockReader* pLBlockReader, STableBlockScan
return false; return false;
} }
return nextRowFromLastBlocks(pLBlockReader, pScanInfo); return nextRowFromLastBlocks(pLBlockReader, pScanInfo, &pReader->verRange);
} }
static int64_t getCurrentKeyInLastBlock(SLastBlockReader* pLastBlockReader) { static int64_t getCurrentKeyInLastBlock(SLastBlockReader* pLastBlockReader) {
@ -2225,8 +2228,8 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) {
pBlockScanInfo = taosHashGet(pReader->status.pTableMap, &pBlockInfo->uid, sizeof(pBlockInfo->uid)); pBlockScanInfo = taosHashGet(pReader->status.pTableMap, &pBlockInfo->uid, sizeof(pBlockInfo->uid));
if (pBlockScanInfo == NULL) { if (pBlockScanInfo == NULL) {
code = TSDB_CODE_INVALID_PARA; code = TSDB_CODE_INVALID_PARA;
tsdbError("failed to locate the uid:%"PRIu64" in query table uid list, total tables:%d, %s", tsdbError("failed to locate the uid:%" PRIu64 " in query table uid list, total tables:%d, %s", pBlockInfo->uid,
pBlockInfo->uid, taosHashGetSize(pReader->status.pTableMap), pReader->idStr); taosHashGetSize(pReader->status.pTableMap), pReader->idStr);
goto _end; goto _end;
} }
@ -2290,7 +2293,7 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) {
} }
_end: _end:
pResBlock->info.uid = (pBlockScanInfo != NULL)? pBlockScanInfo->uid:0; pResBlock->info.uid = (pBlockScanInfo != NULL) ? pBlockScanInfo->uid : 0;
blockDataUpdateTsWindow(pResBlock, 0); blockDataUpdateTsWindow(pResBlock, 0);
setComposedBlockFlag(pReader, true); setComposedBlockFlag(pReader, true);
@ -2859,7 +2862,7 @@ SVersionRange getQueryVerRange(SVnode* pVnode, SQueryTableDataCond* pCond, int8_
return (SVersionRange){.minVer = startVer, .maxVer = endVer}; return (SVersionRange){.minVer = startVer, .maxVer = endVer};
} }
bool hasBeenDropped(const SArray* pDelList, int32_t* index, TSDBKEY* pKey, int32_t order) { bool hasBeenDropped(const SArray* pDelList, int32_t* index, TSDBKEY* pKey, int32_t order, SVersionRange* pVerRange) {
ASSERT(pKey != NULL); ASSERT(pKey != NULL);
if (pDelList == NULL) { if (pDelList == NULL) {
return false; return false;
@ -2887,7 +2890,8 @@ bool hasBeenDropped(const SArray* pDelList, int32_t* index, TSDBKEY* pKey, int32
return false; return false;
} }
if (pCurrent->ts <= pKey->ts && pNext->ts >= pKey->ts && pCurrent->version >= pKey->version) { if (pCurrent->ts <= pKey->ts && pNext->ts >= pKey->ts && pCurrent->version >= pKey->version &&
pVerRange->maxVer >= pCurrent->version) {
return true; return true;
} }
@ -2903,7 +2907,8 @@ bool hasBeenDropped(const SArray* pDelList, int32_t* index, TSDBKEY* pKey, int32
continue; continue;
} }
if (pCurrent->ts <= pKey->ts && pNext->ts >= pKey->ts && pCurrent->version >= pKey->version) { if (pCurrent->ts <= pKey->ts && pNext->ts >= pKey->ts && pCurrent->version >= pKey->version &&
pVerRange->maxVer >= pCurrent->version) {
return true; return true;
} }
} }
@ -2973,7 +2978,7 @@ TSDBROW* getValidMemRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* p
// it is a valid data version // it is a valid data version
if ((key.version <= pReader->verRange.maxVer && key.version >= pReader->verRange.minVer) && if ((key.version <= pReader->verRange.maxVer && key.version >= pReader->verRange.minVer) &&
(!hasBeenDropped(pDelList, &pIter->index, &key, pReader->order))) { (!hasBeenDropped(pDelList, &pIter->index, &key, pReader->order, &pReader->verRange))) {
return pRow; return pRow;
} }
@ -2992,7 +2997,7 @@ TSDBROW* getValidMemRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* p
} }
if (key.version <= pReader->verRange.maxVer && key.version >= pReader->verRange.minVer && if (key.version <= pReader->verRange.maxVer && key.version >= pReader->verRange.minVer &&
(!hasBeenDropped(pDelList, &pIter->index, &key, pReader->order))) { (!hasBeenDropped(pDelList, &pIter->index, &key, pReader->order, &pReader->verRange))) {
return pRow; return pRow;
} }
} }
@ -3130,9 +3135,9 @@ int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pSc
} }
int32_t doMergeRowsInLastBlock(SLastBlockReader* pLastBlockReader, STableBlockScanInfo* pScanInfo, int64_t ts, int32_t doMergeRowsInLastBlock(SLastBlockReader* pLastBlockReader, STableBlockScanInfo* pScanInfo, int64_t ts,
SRowMerger* pMerger) { SRowMerger* pMerger, SVersionRange* pVerRange) {
pScanInfo->lastKey = ts; pScanInfo->lastKey = ts;
while (nextRowFromLastBlocks(pLastBlockReader, pScanInfo)) { while (nextRowFromLastBlocks(pLastBlockReader, pScanInfo, pVerRange)) {
int64_t next1 = getCurrentKeyInLastBlock(pLastBlockReader); int64_t next1 = getCurrentKeyInLastBlock(pLastBlockReader);
if (next1 == ts) { if (next1 == ts) {
TSDBROW fRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree); TSDBROW fRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
@ -3772,7 +3777,7 @@ bool tsdbNextDataBlock(STsdbReader* pReader) {
bool tsdbTableNextDataBlock(STsdbReader* pReader, uint64_t uid) { bool tsdbTableNextDataBlock(STsdbReader* pReader, uint64_t uid) {
STableBlockScanInfo* pBlockScanInfo = taosHashGet(pReader->status.pTableMap, &uid, sizeof(uid)); STableBlockScanInfo* pBlockScanInfo = taosHashGet(pReader->status.pTableMap, &uid, sizeof(uid));
if (pBlockScanInfo == NULL) { // no data block for the table of given uid if (pBlockScanInfo == NULL) { // no data block for the table of given uid
return false; return false;
} }

View File

@ -369,8 +369,8 @@ int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
return qWorkerProcessFetchMsg(pVnode, pVnode->pQuery, pMsg, 0); return qWorkerProcessFetchMsg(pVnode, pVnode->pQuery, pMsg, 0);
case TDMT_SCH_FETCH_RSP: case TDMT_SCH_FETCH_RSP:
return qWorkerProcessRspMsg(pVnode, pVnode->pQuery, pMsg, 0); return qWorkerProcessRspMsg(pVnode, pVnode->pQuery, pMsg, 0);
case TDMT_SCH_CANCEL_TASK: //case TDMT_SCH_CANCEL_TASK:
return qWorkerProcessCancelMsg(pVnode, pVnode->pQuery, pMsg, 0); // return qWorkerProcessCancelMsg(pVnode, pVnode->pQuery, pMsg, 0);
case TDMT_SCH_DROP_TASK: case TDMT_SCH_DROP_TASK:
return qWorkerProcessDropMsg(pVnode, pVnode->pQuery, pMsg, 0); return qWorkerProcessDropMsg(pVnode, pVnode->pQuery, pMsg, 0);
case TDMT_SCH_QUERY_HEARTBEAT: case TDMT_SCH_QUERY_HEARTBEAT:

View File

@ -621,14 +621,18 @@ int32_t ctgEnqueue(SCatalog *pCtg, SCtgCacheOperation *operation) {
node->op = operation; node->op = operation;
CTG_LOCK(CTG_WRITE, &gCtgMgmt.queue.qlock); CTG_LOCK(CTG_WRITE, &gCtgMgmt.queue.qlock);
if (gCtgMgmt.queue.stopQueue) { if (gCtgMgmt.queue.stopQueue) {
ctgFreeQNode(node); ctgFreeQNode(node);
CTG_UNLOCK(CTG_WRITE, &gCtgMgmt.queue.qlock); CTG_UNLOCK(CTG_WRITE, &gCtgMgmt.queue.qlock);
CTG_RET(TSDB_CODE_CTG_EXIT); CTG_RET(TSDB_CODE_CTG_EXIT);
} }
gCtgMgmt.queue.stopQueue = operation->stopQueue;
gCtgMgmt.queue.tail->next = node; gCtgMgmt.queue.tail->next = node;
gCtgMgmt.queue.tail = node; gCtgMgmt.queue.tail = node;
gCtgMgmt.queue.stopQueue = operation->stopQueue;
CTG_UNLOCK(CTG_WRITE, &gCtgMgmt.queue.qlock); CTG_UNLOCK(CTG_WRITE, &gCtgMgmt.queue.qlock);
ctgDebug("action [%s] added into queue", opName); ctgDebug("action [%s] added into queue", opName);
@ -1997,6 +2001,59 @@ _return:
CTG_RET(code); CTG_RET(code);
} }
void ctgFreeCacheOperationData(SCtgCacheOperation *op) {
if (NULL == op || NULL == op->data) {
return;
}
switch (op->opId) {
case CTG_OP_UPDATE_VGROUP: {
SCtgUpdateVgMsg *msg = op->data;
ctgFreeVgInfo(msg->dbInfo);
taosMemoryFreeClear(op->data);
break;
}
case CTG_OP_UPDATE_TB_META: {
SCtgUpdateTbMetaMsg *msg = op->data;
taosMemoryFreeClear(msg->pMeta->tbMeta);
taosMemoryFreeClear(msg->pMeta);
taosMemoryFreeClear(op->data);
break;
}
case CTG_OP_DROP_DB_CACHE:
case CTG_OP_DROP_DB_VGROUP:
case CTG_OP_DROP_STB_META:
case CTG_OP_DROP_TB_META:
case CTG_OP_UPDATE_VG_EPSET:
case CTG_OP_DROP_TB_INDEX:
case CTG_OP_CLEAR_CACHE: {
taosMemoryFreeClear(op->data);
break;
}
case CTG_OP_UPDATE_USER: {
SCtgUpdateUserMsg *msg = op->data;
taosHashCleanup(msg->userAuth.createdDbs);
taosHashCleanup(msg->userAuth.readDbs);
taosHashCleanup(msg->userAuth.writeDbs);
taosMemoryFreeClear(op->data);
break;
}
case CTG_OP_UPDATE_TB_INDEX: {
SCtgUpdateTbIndexMsg *msg = op->data;
if (msg->pIndex) {
taosArrayDestroyEx(msg->pIndex->pIndex, tFreeSTableIndexInfo);
taosMemoryFreeClear(msg->pIndex);
}
taosMemoryFreeClear(op->data);
break;
}
default: {
qError("invalid cache op id:%d", op->opId);
break;
}
}
}
void ctgCleanupCacheQueue(void) { void ctgCleanupCacheQueue(void) {
SCtgQNode *node = NULL; SCtgQNode *node = NULL;
SCtgQNode *nodeNext = NULL; SCtgQNode *nodeNext = NULL;
@ -2015,7 +2072,7 @@ void ctgCleanupCacheQueue(void) {
stopQueue = true; stopQueue = true;
CTG_RT_STAT_INC(numOfOpDequeue, 1); CTG_RT_STAT_INC(numOfOpDequeue, 1);
} else { } else {
taosMemoryFree(op->data); ctgFreeCacheOperationData(op);
CTG_RT_STAT_INC(numOfOpAbort, 1); CTG_RT_STAT_INC(numOfOpAbort, 1);
} }
@ -2053,7 +2110,7 @@ void *ctgUpdateThreadFunc(void *param) {
qError("ctg tsem_wait failed, error:%s", tstrerror(TAOS_SYSTEM_ERROR(errno))); qError("ctg tsem_wait failed, error:%s", tstrerror(TAOS_SYSTEM_ERROR(errno)));
} }
if (atomic_load_8((int8_t *)&gCtgMgmt.exit)) { if (atomic_load_8((int8_t *)&gCtgMgmt.queue.stopQueue)) {
ctgCleanupCacheQueue(); ctgCleanupCacheQueue();
break; break;
} }

View File

@ -18,8 +18,8 @@ IF(NOT TD_DARWIN)
PRIVATE "${TD_SOURCE_DIR}/source/libs/catalog/inc" PRIVATE "${TD_SOURCE_DIR}/source/libs/catalog/inc"
) )
#add_test( add_test(
# NAME catalogTest NAME catalogTest
# COMMAND catalogTest COMMAND catalogTest
#) )
ENDIF() ENDIF()

View File

@ -27,8 +27,8 @@
#ifdef WINDOWS #ifdef WINDOWS
#define TD_USE_WINSOCK #define TD_USE_WINSOCK
#endif #endif
#include "catalog.h"
#include "catalogInt.h" #include "catalogInt.h"
#include "catalog.h"
#include "os.h" #include "os.h"
#include "stub.h" #include "stub.h"
#include "taos.h" #include "taos.h"
@ -49,14 +49,13 @@ void ctgTestSetRspCTableMeta();
void ctgTestSetRspSTableMeta(); void ctgTestSetRspSTableMeta();
void ctgTestSetRspMultiSTableMeta(); void ctgTestSetRspMultiSTableMeta();
// extern "C" SCatalogMgmt gCtgMgmt;
enum { enum {
CTGT_RSP_VGINFO = 1, CTGT_RSP_VGINFO = 1,
CTGT_RSP_TBMETA, CTGT_RSP_TBMETA,
CTGT_RSP_CTBMETA, CTGT_RSP_CTBMETA,
CTGT_RSP_STBMETA, CTGT_RSP_STBMETA,
CTGT_RSP_MSTBMETA, CTGT_RSP_MSTBMETA,
CTGT_RSP_INDEXINFO_E,
CTGT_RSP_TBMETA_NOT_EXIST, CTGT_RSP_TBMETA_NOT_EXIST,
}; };
@ -284,6 +283,8 @@ void ctgTestBuildSTableMetaRsp(STableMetaRsp *rspMsg) {
} }
void ctgTestRspDbVgroups(void *shandle, SEpSet *pEpSet, SRpcMsg *pMsg, SRpcMsg *pRsp) { void ctgTestRspDbVgroups(void *shandle, SEpSet *pEpSet, SRpcMsg *pMsg, SRpcMsg *pRsp) {
rpcFreeCont(pMsg->pCont);
SUseDbRsp usedbRsp = {0}; SUseDbRsp usedbRsp = {0};
strcpy(usedbRsp.db, ctgTestDbname); strcpy(usedbRsp.db, ctgTestDbname);
usedbRsp.vgVersion = ctgTestVgVersion; usedbRsp.vgVersion = ctgTestVgVersion;
@ -321,9 +322,13 @@ void ctgTestRspDbVgroups(void *shandle, SEpSet *pEpSet, SRpcMsg *pMsg, SRpcMsg *
pRsp->code = 0; pRsp->code = 0;
pRsp->contLen = contLen; pRsp->contLen = contLen;
pRsp->pCont = pReq; pRsp->pCont = pReq;
taosArrayDestroy(usedbRsp.pVgroupInfos);
} }
void ctgTestRspTableMeta(void *shandle, SEpSet *pEpSet, SRpcMsg *pMsg, SRpcMsg *pRsp) { void ctgTestRspTableMeta(void *shandle, SEpSet *pEpSet, SRpcMsg *pMsg, SRpcMsg *pRsp) {
rpcFreeCont(pMsg->pCont);
STableMetaRsp metaRsp = {0}; STableMetaRsp metaRsp = {0};
strcpy(metaRsp.dbFName, ctgTestDbname); strcpy(metaRsp.dbFName, ctgTestDbname);
strcpy(metaRsp.tbName, ctgTestTablename); strcpy(metaRsp.tbName, ctgTestTablename);
@ -363,10 +368,14 @@ void ctgTestRspTableMeta(void *shandle, SEpSet *pEpSet, SRpcMsg *pMsg, SRpcMsg *
} }
void ctgTestRspTableMetaNotExist(void *shandle, SEpSet *pEpSet, SRpcMsg *pMsg, SRpcMsg *pRsp) { void ctgTestRspTableMetaNotExist(void *shandle, SEpSet *pEpSet, SRpcMsg *pMsg, SRpcMsg *pRsp) {
rpcFreeCont(pMsg->pCont);
pRsp->code = CTG_ERR_CODE_TABLE_NOT_EXIST; pRsp->code = CTG_ERR_CODE_TABLE_NOT_EXIST;
} }
void ctgTestRspCTableMeta(void *shandle, SEpSet *pEpSet, SRpcMsg *pMsg, SRpcMsg *pRsp) { void ctgTestRspCTableMeta(void *shandle, SEpSet *pEpSet, SRpcMsg *pMsg, SRpcMsg *pRsp) {
rpcFreeCont(pMsg->pCont);
STableMetaRsp metaRsp = {0}; STableMetaRsp metaRsp = {0};
strcpy(metaRsp.dbFName, ctgTestDbname); strcpy(metaRsp.dbFName, ctgTestDbname);
strcpy(metaRsp.tbName, ctgTestCurrentCTableName ? ctgTestCurrentCTableName : ctgTestCTablename); strcpy(metaRsp.tbName, ctgTestCurrentCTableName ? ctgTestCurrentCTableName : ctgTestCTablename);
@ -413,6 +422,8 @@ void ctgTestRspCTableMeta(void *shandle, SEpSet *pEpSet, SRpcMsg *pMsg, SRpcMsg
} }
void ctgTestRspSTableMeta(void *shandle, SEpSet *pEpSet, SRpcMsg *pMsg, SRpcMsg *pRsp) { void ctgTestRspSTableMeta(void *shandle, SEpSet *pEpSet, SRpcMsg *pMsg, SRpcMsg *pRsp) {
rpcFreeCont(pMsg->pCont);
STableMetaRsp metaRsp = {0}; STableMetaRsp metaRsp = {0};
strcpy(metaRsp.dbFName, ctgTestDbname); strcpy(metaRsp.dbFName, ctgTestDbname);
strcpy(metaRsp.tbName, ctgTestCurrentSTableName ? ctgTestCurrentSTableName : ctgTestSTablename); strcpy(metaRsp.tbName, ctgTestCurrentSTableName ? ctgTestCurrentSTableName : ctgTestSTablename);
@ -459,6 +470,8 @@ void ctgTestRspSTableMeta(void *shandle, SEpSet *pEpSet, SRpcMsg *pMsg, SRpcMsg
} }
void ctgTestRspMultiSTableMeta(void *shandle, SEpSet *pEpSet, SRpcMsg *pMsg, SRpcMsg *pRsp) { void ctgTestRspMultiSTableMeta(void *shandle, SEpSet *pEpSet, SRpcMsg *pMsg, SRpcMsg *pRsp) {
rpcFreeCont(pMsg->pCont);
static int32_t idx = 1; static int32_t idx = 1;
STableMetaRsp metaRsp = {0}; STableMetaRsp metaRsp = {0};
@ -508,6 +521,16 @@ void ctgTestRspMultiSTableMeta(void *shandle, SEpSet *pEpSet, SRpcMsg *pMsg, SRp
tFreeSTableMetaRsp(&metaRsp); tFreeSTableMetaRsp(&metaRsp);
} }
void ctgTestRspErrIndexInfo(void *shandle, SEpSet *pEpSet, SRpcMsg *pMsg, SRpcMsg *pRsp) {
rpcFreeCont(pMsg->pCont);
pRsp->code = TSDB_CODE_MND_DB_INDEX_NOT_EXIST;
pRsp->contLen = 0;
pRsp->pCont = NULL;
}
void ctgTestRspByIdx(void *shandle, SEpSet *pEpSet, SRpcMsg *pMsg, SRpcMsg *pRsp) { void ctgTestRspByIdx(void *shandle, SEpSet *pEpSet, SRpcMsg *pMsg, SRpcMsg *pRsp) {
switch (ctgTestRspFunc[ctgTestRspIdx]) { switch (ctgTestRspFunc[ctgTestRspIdx]) {
case CTGT_RSP_VGINFO: case CTGT_RSP_VGINFO:
@ -525,6 +548,9 @@ void ctgTestRspByIdx(void *shandle, SEpSet *pEpSet, SRpcMsg *pMsg, SRpcMsg *pRsp
case CTGT_RSP_MSTBMETA: case CTGT_RSP_MSTBMETA:
ctgTestRspMultiSTableMeta(shandle, pEpSet, pMsg, pRsp); ctgTestRspMultiSTableMeta(shandle, pEpSet, pMsg, pRsp);
break; break;
case CTGT_RSP_INDEXINFO_E:
ctgTestRspErrIndexInfo(shandle, pEpSet, pMsg, pRsp);
break;
case CTGT_RSP_TBMETA_NOT_EXIST: case CTGT_RSP_TBMETA_NOT_EXIST:
ctgTestRspTableMetaNotExist(shandle, pEpSet, pMsg, pRsp); ctgTestRspTableMetaNotExist(shandle, pEpSet, pMsg, pRsp);
break; break;
@ -969,6 +995,8 @@ TEST(tableMeta, normalTable) {
ASSERT_EQ(tableMeta->tableInfo.precision, 1); ASSERT_EQ(tableMeta->tableInfo.precision, 1);
ASSERT_EQ(tableMeta->tableInfo.rowSize, 12); ASSERT_EQ(tableMeta->tableInfo.rowSize, 12);
taosMemoryFree(tableMeta);
while (true) { while (true) {
uint32_t n = ctgdGetClusterCacheNum(pCtg, CTG_DBG_META_NUM); uint32_t n = ctgdGetClusterCacheNum(pCtg, CTG_DBG_META_NUM);
if (0 == n) { if (0 == n) {
@ -990,6 +1018,8 @@ TEST(tableMeta, normalTable) {
ASSERT_EQ(tableMeta->tableInfo.precision, 1); ASSERT_EQ(tableMeta->tableInfo.precision, 1);
ASSERT_EQ(tableMeta->tableInfo.rowSize, 12); ASSERT_EQ(tableMeta->tableInfo.rowSize, 12);
taosMemoryFree(tableMeta);
SDbVgVersion *dbs = NULL; SDbVgVersion *dbs = NULL;
SSTableVersion *stb = NULL; SSTableVersion *stb = NULL;
uint32_t dbNum = 0, stbNum = 0, allDbNum = 0, allStbNum = 0; uint32_t dbNum = 0, stbNum = 0, allDbNum = 0, allStbNum = 0;
@ -1026,7 +1056,6 @@ TEST(tableMeta, normalTable) {
ASSERT_EQ(allStbNum, 0); ASSERT_EQ(allStbNum, 0);
catalogDestroy(); catalogDestroy();
memset(&gCtgMgmt, 0, sizeof(gCtgMgmt));
} }
TEST(tableMeta, childTableCase) { TEST(tableMeta, childTableCase) {
@ -1064,6 +1093,8 @@ TEST(tableMeta, childTableCase) {
ASSERT_EQ(tableMeta->tableInfo.precision, 1); ASSERT_EQ(tableMeta->tableInfo.precision, 1);
ASSERT_EQ(tableMeta->tableInfo.rowSize, 12); ASSERT_EQ(tableMeta->tableInfo.rowSize, 12);
taosMemoryFree(tableMeta);
while (true) { while (true) {
uint32_t n = ctgdGetClusterCacheNum(pCtg, CTG_DBG_META_NUM); uint32_t n = ctgdGetClusterCacheNum(pCtg, CTG_DBG_META_NUM);
if (0 == n) { if (0 == n) {
@ -1099,6 +1130,8 @@ TEST(tableMeta, childTableCase) {
ASSERT_EQ(tableMeta->tableInfo.precision, 1); ASSERT_EQ(tableMeta->tableInfo.precision, 1);
ASSERT_EQ(tableMeta->tableInfo.rowSize, 12); ASSERT_EQ(tableMeta->tableInfo.rowSize, 12);
taosMemoryFree(tableMeta);
SDbVgVersion *dbs = NULL; SDbVgVersion *dbs = NULL;
SSTableVersion *stb = NULL; SSTableVersion *stb = NULL;
uint32_t dbNum = 0, stbNum = 0, allDbNum = 0, allStbNum = 0; uint32_t dbNum = 0, stbNum = 0, allDbNum = 0, allStbNum = 0;
@ -1135,7 +1168,6 @@ TEST(tableMeta, childTableCase) {
ASSERT_EQ(allStbNum, 1); ASSERT_EQ(allStbNum, 1);
catalogDestroy(); catalogDestroy();
memset(&gCtgMgmt, 0, sizeof(gCtgMgmt));
} }
TEST(tableMeta, superTableCase) { TEST(tableMeta, superTableCase) {
@ -1173,6 +1205,8 @@ TEST(tableMeta, superTableCase) {
ASSERT_EQ(tableMeta->tableInfo.precision, 1); ASSERT_EQ(tableMeta->tableInfo.precision, 1);
ASSERT_EQ(tableMeta->tableInfo.rowSize, 12); ASSERT_EQ(tableMeta->tableInfo.rowSize, 12);
taosMemoryFree(tableMeta);
while (true) { while (true) {
uint32_t n = ctgdGetClusterCacheNum(pCtg, CTG_DBG_META_NUM); uint32_t n = ctgdGetClusterCacheNum(pCtg, CTG_DBG_META_NUM);
if (0 == n) { if (0 == n) {
@ -1199,6 +1233,8 @@ TEST(tableMeta, superTableCase) {
ASSERT_EQ(tableMeta->tableInfo.precision, 1); ASSERT_EQ(tableMeta->tableInfo.precision, 1);
ASSERT_EQ(tableMeta->tableInfo.rowSize, 12); ASSERT_EQ(tableMeta->tableInfo.rowSize, 12);
taosMemoryFree(tableMeta);
while (true) { while (true) {
uint32_t n = ctgdGetClusterCacheNum(pCtg, CTG_DBG_META_NUM); uint32_t n = ctgdGetClusterCacheNum(pCtg, CTG_DBG_META_NUM);
if (2 != n) { if (2 != n) {
@ -1220,6 +1256,8 @@ TEST(tableMeta, superTableCase) {
ASSERT_EQ(tableMeta->tableInfo.precision, 1); ASSERT_EQ(tableMeta->tableInfo.precision, 1);
ASSERT_EQ(tableMeta->tableInfo.rowSize, 12); ASSERT_EQ(tableMeta->tableInfo.rowSize, 12);
taosMemoryFree(tableMeta);
SDbVgVersion *dbs = NULL; SDbVgVersion *dbs = NULL;
SSTableVersion *stb = NULL; SSTableVersion *stb = NULL;
uint32_t dbNum = 0, stbNum = 0, allDbNum = 0, allStbNum = 0; uint32_t dbNum = 0, stbNum = 0, allDbNum = 0, allStbNum = 0;
@ -1257,7 +1295,6 @@ TEST(tableMeta, superTableCase) {
ASSERT_EQ(allStbNum, 1); ASSERT_EQ(allStbNum, 1);
catalogDestroy(); catalogDestroy();
memset(&gCtgMgmt, 0, sizeof(gCtgMgmt));
} }
TEST(tableMeta, rmStbMeta) { TEST(tableMeta, rmStbMeta) {
@ -1297,6 +1334,8 @@ TEST(tableMeta, rmStbMeta) {
ASSERT_EQ(tableMeta->tableInfo.precision, 1); ASSERT_EQ(tableMeta->tableInfo.precision, 1);
ASSERT_EQ(tableMeta->tableInfo.rowSize, 12); ASSERT_EQ(tableMeta->tableInfo.rowSize, 12);
taosMemoryFree(tableMeta);
while (true) { while (true) {
uint32_t n = ctgdGetClusterCacheNum(pCtg, CTG_DBG_META_NUM); uint32_t n = ctgdGetClusterCacheNum(pCtg, CTG_DBG_META_NUM);
if (0 == n) { if (0 == n) {
@ -1326,7 +1365,6 @@ TEST(tableMeta, rmStbMeta) {
ASSERT_EQ(ctgdGetClusterCacheNum(pCtg, CTG_DBG_STB_RENT_NUM), 0); ASSERT_EQ(ctgdGetClusterCacheNum(pCtg, CTG_DBG_STB_RENT_NUM), 0);
catalogDestroy(); catalogDestroy();
memset(&gCtgMgmt, 0, sizeof(gCtgMgmt));
} }
TEST(tableMeta, updateStbMeta) { TEST(tableMeta, updateStbMeta) {
@ -1416,7 +1454,36 @@ TEST(tableMeta, updateStbMeta) {
taosMemoryFreeClear(tableMeta); taosMemoryFreeClear(tableMeta);
catalogDestroy(); catalogDestroy();
memset(&gCtgMgmt.stat, 0, sizeof(gCtgMgmt.stat)); }
TEST(getIndexInfo, notExists) {
struct SCatalog *pCtg = NULL;
SRequestConnInfo connInfo = {0};
SRequestConnInfo *mockPointer = (SRequestConnInfo *)&connInfo;
SVgroupInfo vgInfo = {0};
SArray *vgList = NULL;
ctgTestInitLogFile();
memset(ctgTestRspFunc, 0, sizeof(ctgTestRspFunc));
ctgTestRspIdx = 0;
ctgTestRspFunc[0] = CTGT_RSP_INDEXINFO_E;
ctgTestSetRspByIdx();
initQueryModuleMsgHandle();
int32_t code = catalogInit(NULL);
ASSERT_EQ(code, 0);
code = catalogGetHandle(ctgTestClusterId, &pCtg);
ASSERT_EQ(code, 0);
SIndexInfo info;
code = catalogGetIndexMeta(pCtg, mockPointer, "index1", &info);
ASSERT_TRUE(code != 0);
catalogDestroy();
} }
TEST(refreshGetMeta, normal2normal) { TEST(refreshGetMeta, normal2normal) {
@ -1496,7 +1563,6 @@ TEST(refreshGetMeta, normal2normal) {
taosMemoryFreeClear(tableMeta); taosMemoryFreeClear(tableMeta);
catalogDestroy(); catalogDestroy();
memset(&gCtgMgmt, 0, sizeof(gCtgMgmt));
} }
TEST(refreshGetMeta, normal2notexist) { TEST(refreshGetMeta, normal2notexist) {
@ -1567,7 +1633,6 @@ TEST(refreshGetMeta, normal2notexist) {
ASSERT_TRUE(tableMeta == NULL); ASSERT_TRUE(tableMeta == NULL);
catalogDestroy(); catalogDestroy();
memset(&gCtgMgmt, 0, sizeof(gCtgMgmt));
} }
TEST(refreshGetMeta, normal2child) { TEST(refreshGetMeta, normal2child) {
@ -1649,7 +1714,6 @@ TEST(refreshGetMeta, normal2child) {
taosMemoryFreeClear(tableMeta); taosMemoryFreeClear(tableMeta);
catalogDestroy(); catalogDestroy();
memset(&gCtgMgmt, 0, sizeof(gCtgMgmt));
ctgTestCurrentCTableName = NULL; ctgTestCurrentCTableName = NULL;
ctgTestCurrentSTableName = NULL; ctgTestCurrentSTableName = NULL;
} }
@ -1737,7 +1801,6 @@ TEST(refreshGetMeta, stable2child) {
taosMemoryFreeClear(tableMeta); taosMemoryFreeClear(tableMeta);
catalogDestroy(); catalogDestroy();
memset(&gCtgMgmt, 0, sizeof(gCtgMgmt));
ctgTestCurrentCTableName = NULL; ctgTestCurrentCTableName = NULL;
ctgTestCurrentSTableName = NULL; ctgTestCurrentSTableName = NULL;
} }
@ -1824,7 +1887,6 @@ TEST(refreshGetMeta, stable2stable) {
taosMemoryFreeClear(tableMeta); taosMemoryFreeClear(tableMeta);
catalogDestroy(); catalogDestroy();
memset(&gCtgMgmt, 0, sizeof(gCtgMgmt));
ctgTestCurrentCTableName = NULL; ctgTestCurrentCTableName = NULL;
ctgTestCurrentSTableName = NULL; ctgTestCurrentSTableName = NULL;
} }
@ -1911,7 +1973,6 @@ TEST(refreshGetMeta, child2stable) {
taosMemoryFreeClear(tableMeta); taosMemoryFreeClear(tableMeta);
catalogDestroy(); catalogDestroy();
memset(&gCtgMgmt, 0, sizeof(gCtgMgmt));
ctgTestCurrentCTableName = NULL; ctgTestCurrentCTableName = NULL;
ctgTestCurrentSTableName = NULL; ctgTestCurrentSTableName = NULL;
} }
@ -1951,7 +2012,6 @@ TEST(tableDistVgroup, normalTable) {
ASSERT_TRUE(code != 0); ASSERT_TRUE(code != 0);
catalogDestroy(); catalogDestroy();
memset(&gCtgMgmt, 0, sizeof(gCtgMgmt));
} }
TEST(tableDistVgroup, childTableCase) { TEST(tableDistVgroup, childTableCase) {
@ -1990,7 +2050,6 @@ TEST(tableDistVgroup, childTableCase) {
ASSERT_TRUE(code != 0); ASSERT_TRUE(code != 0);
catalogDestroy(); catalogDestroy();
memset(&gCtgMgmt, 0, sizeof(gCtgMgmt));
} }
TEST(tableDistVgroup, superTableCase) { TEST(tableDistVgroup, superTableCase) {
@ -2037,8 +2096,9 @@ TEST(tableDistVgroup, superTableCase) {
ASSERT_EQ(vgInfo->vgId, 3); ASSERT_EQ(vgInfo->vgId, 3);
ASSERT_EQ(vgInfo->epSet.numOfEps, 3); ASSERT_EQ(vgInfo->epSet.numOfEps, 3);
taosArrayDestroy(vgList);
catalogDestroy(); catalogDestroy();
memset(&gCtgMgmt, 0, sizeof(gCtgMgmt));
} }
TEST(dbVgroup, getSetDbVgroupCase) { TEST(dbVgroup, getSetDbVgroupCase) {
@ -2077,6 +2137,8 @@ TEST(dbVgroup, getSetDbVgroupCase) {
ASSERT_EQ(code, 0); ASSERT_EQ(code, 0);
ASSERT_EQ(taosArrayGetSize((const SArray *)vgList), ctgTestVgNum); ASSERT_EQ(taosArrayGetSize((const SArray *)vgList), ctgTestVgNum);
taosArrayDestroy(vgList);
while (true) { while (true) {
uint64_t n = 0; uint64_t n = 0;
ctgdGetStatNum("runtime.numOfOpDequeue", (void *)&n); ctgdGetStatNum("runtime.numOfOpDequeue", (void *)&n);
@ -2117,7 +2179,6 @@ TEST(dbVgroup, getSetDbVgroupCase) {
ASSERT_TRUE(code != 0); ASSERT_TRUE(code != 0);
catalogDestroy(); catalogDestroy();
memset(&gCtgMgmt, 0, sizeof(gCtgMgmt));
} }
TEST(multiThread, getSetRmSameDbVgroup) { TEST(multiThread, getSetRmSameDbVgroup) {
@ -2170,7 +2231,6 @@ TEST(multiThread, getSetRmSameDbVgroup) {
taosSsleep(1); taosSsleep(1);
catalogDestroy(); catalogDestroy();
memset(&gCtgMgmt, 0, sizeof(gCtgMgmt));
} }
TEST(multiThread, getSetRmDiffDbVgroup) { TEST(multiThread, getSetRmDiffDbVgroup) {
@ -2223,7 +2283,6 @@ TEST(multiThread, getSetRmDiffDbVgroup) {
taosSsleep(1); taosSsleep(1);
catalogDestroy(); catalogDestroy();
memset(&gCtgMgmt, 0, sizeof(gCtgMgmt));
} }
TEST(multiThread, ctableMeta) { TEST(multiThread, ctableMeta) {
@ -2275,7 +2334,6 @@ TEST(multiThread, ctableMeta) {
taosSsleep(2); taosSsleep(2);
catalogDestroy(); catalogDestroy();
memset(&gCtgMgmt, 0, sizeof(gCtgMgmt));
} }
TEST(rentTest, allRent) { TEST(rentTest, allRent) {
@ -2323,6 +2381,8 @@ TEST(rentTest, allRent) {
ASSERT_EQ(tableMeta->tableInfo.precision, 1); ASSERT_EQ(tableMeta->tableInfo.precision, 1);
ASSERT_EQ(tableMeta->tableInfo.rowSize, 12); ASSERT_EQ(tableMeta->tableInfo.rowSize, 12);
taosMemoryFree(tableMeta);
while (ctgdGetClusterCacheNum(pCtg, CTG_DBG_META_NUM) < i) { while (ctgdGetClusterCacheNum(pCtg, CTG_DBG_META_NUM) < i) {
taosMsleep(50); taosMsleep(50);
} }
@ -2353,7 +2413,6 @@ TEST(rentTest, allRent) {
} }
catalogDestroy(); catalogDestroy();
memset(&gCtgMgmt, 0, sizeof(gCtgMgmt));
} }

View File

@ -319,27 +319,25 @@ typedef struct {
} SAggOptrPushDownInfo; } SAggOptrPushDownInfo;
typedef struct STableScanInfo { typedef struct STableScanInfo {
STsdbReader* dataReader; STsdbReader* dataReader;
SReadHandle readHandle; SReadHandle readHandle;
SLimitInfo limitInfo;
SFileBlockLoadRecorder readRecorder; SFileBlockLoadRecorder readRecorder;
SScanInfo scanInfo; SScanInfo scanInfo;
int32_t scanTimes; int32_t scanTimes;
SNode* pFilterNode; // filter info, which is push down by optimizer SNode* pFilterNode; // filter info, which is push down by optimizer
SSDataBlock* pResBlock;
SSDataBlock* pResBlock; SColMatchInfo matchInfo;
SColMatchInfo matchInfo; SExprSupp pseudoSup;
SExprSupp pseudoSup; SQueryTableDataCond cond;
SQueryTableDataCond cond; int32_t scanFlag; // table scan flag to denote if it is a repeat/reverse/main scan
int32_t scanFlag; // table scan flag to denote if it is a repeat/reverse/main scan int32_t dataBlockLoadFlag;
int32_t dataBlockLoadFlag; SSampleExecInfo sample; // sample execution info
SSampleExecInfo sample; // sample execution info int32_t currentGroupId;
int32_t currentGroupId; int32_t currentTable;
int32_t currentTable; int8_t scanMode;
int8_t scanMode; SAggOptrPushDownInfo pdInfo;
int8_t noTable; int8_t assignBlockUid;
SAggOptrPushDownInfo pdInfo;
int8_t assignBlockUid;
} STableScanInfo; } STableScanInfo;
typedef struct STableMergeScanInfo { typedef struct STableMergeScanInfo {
@ -357,7 +355,7 @@ typedef struct STableMergeScanInfo {
SSDataBlock* pSortInputBlock; SSDataBlock* pSortInputBlock;
int64_t startTs; // sort start time int64_t startTs; // sort start time
SArray* sortSourceParams; SArray* sortSourceParams;
SLimitInfo limitInfo;
SFileBlockLoadRecorder readRecorder; SFileBlockLoadRecorder readRecorder;
int64_t numOfRows; int64_t numOfRows;
SScanInfo scanInfo; SScanInfo scanInfo;
@ -374,6 +372,7 @@ typedef struct STableMergeScanInfo {
SQueryTableDataCond cond; SQueryTableDataCond cond;
int32_t scanFlag; // table scan flag to denote if it is a repeat/reverse/main scan int32_t scanFlag; // table scan flag to denote if it is a repeat/reverse/main scan
int32_t dataBlockLoadFlag; int32_t dataBlockLoadFlag;
// if the upstream is an interval operator, the interval info is also kept here to get the time // if the upstream is an interval operator, the interval info is also kept here to get the time
// window to check if current data block needs to be loaded. // window to check if current data block needs to be loaded.
SInterval interval; SInterval interval;
@ -903,6 +902,7 @@ void doBuildResultDatablock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SG
int32_t handleLimitOffset(SOperatorInfo* pOperator, SLimitInfo* pLimitInfo, SSDataBlock* pBlock, bool holdDataInBuf); int32_t handleLimitOffset(SOperatorInfo* pOperator, SLimitInfo* pLimitInfo, SSDataBlock* pBlock, bool holdDataInBuf);
bool hasLimitOffsetInfo(SLimitInfo* pLimitInfo); bool hasLimitOffsetInfo(SLimitInfo* pLimitInfo);
void initLimitInfo(const SNode* pLimit, const SNode* pSLimit, SLimitInfo* pLimitInfo); void initLimitInfo(const SNode* pLimit, const SNode* pSLimit, SLimitInfo* pLimitInfo);
void applyLimitOffset(SLimitInfo* pLimitInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo, SOperatorInfo* pOperator);
void doApplyFunctions(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, SColumnInfoData* pTimeWindowData, int32_t offset, void doApplyFunctions(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, SColumnInfoData* pTimeWindowData, int32_t offset,
int32_t forwardStep, int32_t numOfTotal, int32_t numOfOutput); int32_t forwardStep, int32_t numOfTotal, int32_t numOfOutput);

View File

@ -210,8 +210,6 @@ SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
pOperator->status = OP_OPENED; pOperator->status = OP_OPENED;
} }
qDebug("enter project");
if (pOperator->status == OP_EXEC_DONE) { if (pOperator->status == OP_EXEC_DONE) {
if (pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE) { if (pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE) {
pOperator->status = OP_OPENED; pOperator->status = OP_OPENED;

View File

@ -355,6 +355,34 @@ static void doSetTagColumnData(STableScanInfo* pTableScanInfo, SSDataBlock* pBlo
} }
} }
// todo handle the slimit info
void applyLimitOffset(SLimitInfo* pLimitInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo, SOperatorInfo* pOperator) {
SLimit* pLimit = &pLimitInfo->limit;
if (pLimit->offset > 0 && pLimitInfo->remainOffset > 0) {
if (pLimitInfo->remainOffset >= pBlock->info.rows) {
pLimitInfo->remainOffset -= pBlock->info.rows;
pBlock->info.rows = 0;
qDebug("current block ignore due to offset, current:%"PRId64", %s", pLimitInfo->remainOffset, GET_TASKID(pTaskInfo));
} else {
blockDataTrimFirstNRows(pBlock, pLimitInfo->remainOffset);
pLimitInfo->remainOffset = 0;
}
}
if (pLimit->limit != -1 && pLimit->limit <= (pLimitInfo->numOfOutputRows + pBlock->info.rows)) {
// limit the output rows
int32_t overflowRows = pLimitInfo->numOfOutputRows + pBlock->info.rows - pLimit->limit;
int32_t keep = pBlock->info.rows - overflowRows;
blockDataKeepFirstNRows(pBlock, keep);
qDebug("output limit %"PRId64" has reached, %s", pLimit->limit, GET_TASKID(pTaskInfo));
// setTaskStatus(pTaskInfo, TASK_COMPLETED);
pOperator->status = OP_EXEC_DONE;
}
}
static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableScanInfo, SSDataBlock* pBlock, static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableScanInfo, SSDataBlock* pBlock,
uint32_t* status) { uint32_t* status) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
@ -364,6 +392,7 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableSca
pCost->totalBlocks += 1; pCost->totalBlocks += 1;
pCost->totalRows += pBlock->info.rows; pCost->totalRows += pBlock->info.rows;
bool loadSMA = false; bool loadSMA = false;
*status = pInfo->dataBlockLoadFlag; *status = pInfo->dataBlockLoadFlag;
@ -379,6 +408,7 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableSca
qDebug("%s data block filter out, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_TASKID(pTaskInfo), qDebug("%s data block filter out, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_TASKID(pTaskInfo),
pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows); pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows);
pCost->filterOutBlocks += 1; pCost->filterOutBlocks += 1;
pCost->totalRows += pBlock->info.rows;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} else if (*status == FUNC_DATA_REQUIRED_NOT_LOAD) { } else if (*status == FUNC_DATA_REQUIRED_NOT_LOAD) {
qDebug("%s data block skipped, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_TASKID(pTaskInfo), qDebug("%s data block skipped, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_TASKID(pTaskInfo),
@ -446,6 +476,9 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableSca
relocateColumnData(pBlock, pTableScanInfo->matchInfo.pList, pCols, true); relocateColumnData(pBlock, pTableScanInfo->matchInfo.pList, pCols, true);
doSetTagColumnData(pTableScanInfo, pBlock, pTaskInfo); doSetTagColumnData(pTableScanInfo, pBlock, pTaskInfo);
// restore the previous value
pCost->totalRows -= pBlock->info.rows;
if (pTableScanInfo->pFilterNode != NULL) { if (pTableScanInfo->pFilterNode != NULL) {
int64_t st = taosGetTimestampUs(); int64_t st = taosGetTimestampUs();
doFilter(pTableScanInfo->pFilterNode, pBlock, &pTableScanInfo->matchInfo, pOperator->exprSupp.pFilterInfo); doFilter(pTableScanInfo->pFilterNode, pBlock, &pTableScanInfo->matchInfo, pOperator->exprSupp.pFilterInfo);
@ -462,6 +495,10 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableSca
} }
} }
applyLimitOffset(&pInfo->limitInfo, pBlock, pTaskInfo, pOperator);
pCost->totalRows += pBlock->info.rows;
pInfo->limitInfo.numOfOutputRows = pCost->totalRows;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
@ -691,10 +728,6 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
// if scan table by table // if scan table by table
if (pInfo->scanMode == TABLE_SCAN__TABLE_ORDER) { if (pInfo->scanMode == TABLE_SCAN__TABLE_ORDER) {
if (pInfo->noTable) {
return NULL;
}
int32_t numOfTables = taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList); int32_t numOfTables = taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList);
while (1) { while (1) {
@ -727,7 +760,6 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
} }
SArray* tableList = taosArrayGetP(pTaskInfo->tableqinfoList.pGroupList, pInfo->currentGroupId); SArray* tableList = taosArrayGetP(pTaskInfo->tableqinfoList.pGroupList, pInfo->currentGroupId);
tsdbReaderClose(pInfo->dataReader); tsdbReaderClose(pInfo->dataReader);
int32_t code = tsdbReaderOpen(pInfo->readHandle.vnode, &pInfo->cond, tableList, (STsdbReader**)&pInfo->dataReader, int32_t code = tsdbReaderOpen(pInfo->readHandle.vnode, &pInfo->cond, tableList, (STsdbReader**)&pInfo->dataReader,
@ -749,9 +781,6 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
return NULL; return NULL;
} }
SArray* tableList = taosArrayGetP(pTaskInfo->tableqinfoList.pGroupList, pInfo->currentGroupId);
// tsdbSetTableList(pInfo->dataReader, tableList);
tsdbReaderReset(pInfo->dataReader, &pInfo->cond); tsdbReaderReset(pInfo->dataReader, &pInfo->cond);
pInfo->scanTimes = 0; pInfo->scanTimes = 0;
@ -798,9 +827,15 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode,
} }
SDataBlockDescNode* pDescNode = pTableScanNode->scan.node.pOutputDataBlockDesc; SDataBlockDescNode* pDescNode = pTableScanNode->scan.node.pOutputDataBlockDesc;
int32_t numOfCols = 0;
int32_t numOfCols = 0;
int32_t code = extractColMatchInfo(pTableScanNode->scan.pScanCols, pDescNode, &numOfCols, COL_MATCH_FROM_COL_ID, int32_t code = extractColMatchInfo(pTableScanNode->scan.pScanCols, pDescNode, &numOfCols, COL_MATCH_FROM_COL_ID,
&pInfo->matchInfo); &pInfo->matchInfo);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
initLimitInfo(pTableScanNode->scan.node.pLimit, pTableScanNode->scan.node.pSlimit, &pInfo->limitInfo);
code = initQueryTableDataCond(&pInfo->cond, pTableScanNode); code = initQueryTableDataCond(&pInfo->cond, pTableScanNode);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
@ -825,6 +860,9 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode,
if (pInfo->pFilterNode != NULL) { if (pInfo->pFilterNode != NULL) {
code = filterInitFromNode((SNode*)pInfo->pFilterNode, &pOperator->exprSupp.pFilterInfo, 0); code = filterInitFromNode((SNode*)pInfo->pFilterNode, &pOperator->exprSupp.pFilterInfo, 0);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
} }
pInfo->scanFlag = MAIN_SCAN; pInfo->scanFlag = MAIN_SCAN;
@ -847,10 +885,12 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode,
return pOperator; return pOperator;
_error: _error:
taosMemoryFreeClear(pInfo); if (pInfo != NULL) {
taosMemoryFreeClear(pOperator); destroyTableScanOperatorInfo(pInfo);
}
pTaskInfo->code = TSDB_CODE_QRY_OUT_OF_MEMORY; taosMemoryFreeClear(pOperator);
pTaskInfo->code = code;
return NULL; return NULL;
} }
@ -3486,7 +3526,6 @@ static SSDataBlock* sysTableBuildUserTablesByUids(SOperatorInfo* pOperator) {
static SSDataBlock* sysTableBuildUserTables(SOperatorInfo* pOperator) { static SSDataBlock* sysTableBuildUserTables(SOperatorInfo* pOperator) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
qError("%p buildUserTable", pTaskInfo);
SSysTableScanInfo* pInfo = pOperator->info; SSysTableScanInfo* pInfo = pOperator->info;
if (pInfo->pCur == NULL) { if (pInfo->pCur == NULL) {
pInfo->pCur = metaOpenTbCursor(pInfo->readHandle.meta); pInfo->pCur = metaOpenTbCursor(pInfo->readHandle.meta);
@ -4442,6 +4481,9 @@ SSDataBlock* getSortedTableMergeScanBlockData(SSortHandle* pHandle, SSDataBlock*
} }
qDebug("%s get sorted row blocks, rows:%d", GET_TASKID(pTaskInfo), pResBlock->info.rows); qDebug("%s get sorted row blocks, rows:%d", GET_TASKID(pTaskInfo), pResBlock->info.rows);
applyLimitOffset(&pInfo->limitInfo, pResBlock, pTaskInfo, pOperator);
pInfo->limitInfo.numOfOutputRows += pResBlock->info.rows;
return (pResBlock->info.rows > 0) ? pResBlock : NULL; return (pResBlock->info.rows > 0) ? pResBlock : NULL;
} }
@ -4457,6 +4499,7 @@ SSDataBlock* doTableMergeScan(SOperatorInfo* pOperator) {
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
T_LONG_JMP(pTaskInfo->env, code); T_LONG_JMP(pTaskInfo->env, code);
} }
size_t tableListSize = taosArrayGetSize(pInfo->tableListInfo->pTableList); size_t tableListSize = taosArrayGetSize(pInfo->tableListInfo->pTableList);
if (!pInfo->hasGroupId) { if (!pInfo->hasGroupId) {
pInfo->hasGroupId = true; pInfo->hasGroupId = true;
@ -4469,6 +4512,7 @@ SSDataBlock* doTableMergeScan(SOperatorInfo* pOperator) {
pInfo->groupId = ((STableKeyInfo*)taosArrayGet(pInfo->tableListInfo->pTableList, pInfo->tableStartIndex))->groupId; pInfo->groupId = ((STableKeyInfo*)taosArrayGet(pInfo->tableListInfo->pTableList, pInfo->tableStartIndex))->groupId;
startGroupTableMergeScan(pOperator); startGroupTableMergeScan(pOperator);
} }
SSDataBlock* pBlock = NULL; SSDataBlock* pBlock = NULL;
while (pInfo->tableStartIndex < tableListSize) { while (pInfo->tableStartIndex < tableListSize) {
pBlock = getSortedTableMergeScanBlockData(pInfo->pSortHandle, pInfo->pResBlock, pOperator->resultInfo.capacity, pBlock = getSortedTableMergeScanBlockData(pInfo->pSortHandle, pInfo->pResBlock, pOperator->resultInfo.capacity,
@ -4556,6 +4600,7 @@ SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanN
if (pInfo == NULL || pOperator == NULL) { if (pInfo == NULL || pOperator == NULL) {
goto _error; goto _error;
} }
if (pTableScanNode->pGroupTags) { if (pTableScanNode->pGroupTags) {
taosArraySort(pTableListInfo->pTableList, compareTableKeyInfoByGid); taosArraySort(pTableListInfo->pTableList, compareTableKeyInfoByGid);
} }
@ -4594,6 +4639,7 @@ SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanN
pInfo->pSortInfo = generateSortByTsInfo(pInfo->matchInfo.pList, pInfo->cond.order); pInfo->pSortInfo = generateSortByTsInfo(pInfo->matchInfo.pList, pInfo->cond.order);
pInfo->pSortInputBlock = createOneDataBlock(pInfo->pResBlock, false); pInfo->pSortInputBlock = createOneDataBlock(pInfo->pResBlock, false);
initLimitInfo(pTableScanNode->scan.node.pLimit, pTableScanNode->scan.node.pSlimit, &pInfo->limitInfo);
int32_t rowSize = pInfo->pResBlock->info.rowSize; int32_t rowSize = pInfo->pResBlock->info.rowSize;
pInfo->bufPageSize = getProperSortPageSize(rowSize); pInfo->bufPageSize = getProperSortPageSize(rowSize);

View File

@ -531,7 +531,7 @@ typedef struct SMultiwayMergeOperatorInfo {
SOptrBasicInfo binfo; SOptrBasicInfo binfo;
int32_t bufPageSize; int32_t bufPageSize;
uint32_t sortBufSize; // max buffer size for in-memory sort uint32_t sortBufSize; // max buffer size for in-memory sort
SLimitInfo limitInfo;
SArray* pSortInfo; SArray* pSortInfo;
SSortHandle* pSortHandle; SSortHandle* pSortHandle;
SColMatchInfo matchInfo; SColMatchInfo matchInfo;
@ -592,6 +592,7 @@ SSDataBlock* getMultiwaySortedBlockData(SSortHandle* pHandle, SSDataBlock* pData
blockDataEnsureCapacity(p, capacity); blockDataEnsureCapacity(p, capacity);
_retry:
while (1) { while (1) {
STupleHandle* pTupleHandle = NULL; STupleHandle* pTupleHandle = NULL;
if (pInfo->groupSort) { if (pInfo->groupSort) {
@ -626,14 +627,22 @@ SSDataBlock* getMultiwaySortedBlockData(SSortHandle* pHandle, SSDataBlock* pData
} else { } else {
appendOneRowToDataBlock(p, pTupleHandle); appendOneRowToDataBlock(p, pTupleHandle);
} }
if (p->info.rows >= capacity) { if (p->info.rows >= capacity) {
break; break;
} }
} }
if (pInfo->groupSort) { if (pInfo->groupSort) {
pInfo->hasGroupId = false; pInfo->hasGroupId = false;
} }
if (p->info.rows > 0) { // todo extract method if (p->info.rows > 0) { // todo extract method
applyLimitOffset(&pInfo->limitInfo, p, pTaskInfo, pOperator);
if (p->info.rows == 0) {
goto _retry;
}
blockDataEnsureCapacity(pDataBlock, p->info.rows); blockDataEnsureCapacity(pDataBlock, p->info.rows);
int32_t numOfCols = taosArrayGetSize(pColMatchInfo); int32_t numOfCols = taosArrayGetSize(pColMatchInfo);
for (int32_t i = 0; i < numOfCols; ++i) { for (int32_t i = 0; i < numOfCols; ++i) {
@ -650,9 +659,9 @@ SSDataBlock* getMultiwaySortedBlockData(SSortHandle* pHandle, SSDataBlock* pData
} }
blockDataDestroy(p); blockDataDestroy(p);
qDebug("%s get sorted block, groupId:0x%" PRIx64 " rows:%d", GET_TASKID(pTaskInfo), pDataBlock->info.groupId, qDebug("%s get sorted block, groupId:0x%" PRIx64 " rows:%d", GET_TASKID(pTaskInfo), pDataBlock->info.groupId,
pDataBlock->info.rows); pDataBlock->info.rows);
return (pDataBlock->info.rows > 0) ? pDataBlock : NULL; return (pDataBlock->info.rows > 0) ? pDataBlock : NULL;
} }
@ -717,6 +726,7 @@ SOperatorInfo* createMultiwayMergeOperatorInfo(SOperatorInfo** downStreams, size
goto _error; goto _error;
} }
initLimitInfo(pMergePhyNode->node.pLimit, pMergePhyNode->node.pSlimit, &pInfo->limitInfo);
pInfo->binfo.pRes = createResDataBlock(pDescNode); pInfo->binfo.pRes = createResDataBlock(pDescNode);
int32_t rowSize = pInfo->binfo.pRes->info.rowSize; int32_t rowSize = pInfo->binfo.pRes->info.rowSize;
ASSERT(rowSize < 100 * 1024 * 1024); ASSERT(rowSize < 100 * 1024 * 1024);
@ -725,6 +735,10 @@ SOperatorInfo* createMultiwayMergeOperatorInfo(SOperatorInfo** downStreams, size
int32_t numOfOutputCols = 0; int32_t numOfOutputCols = 0;
code = extractColMatchInfo(pMergePhyNode->pTargets, pDescNode, &numOfOutputCols, COL_MATCH_FROM_SLOT_ID, &pInfo->matchInfo); code = extractColMatchInfo(pMergePhyNode->pTargets, pDescNode, &numOfOutputCols, COL_MATCH_FROM_SLOT_ID, &pInfo->matchInfo);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
SPhysiNode* pChildNode = (SPhysiNode*)nodesListGetNode(pPhyNode->pChildren, 0); SPhysiNode* pChildNode = (SPhysiNode*)nodesListGetNode(pPhyNode->pChildren, 0);
SSDataBlock* pInputBlock = createResDataBlock(pChildNode->pOutputDataBlockDesc); SSDataBlock* pInputBlock = createResDataBlock(pChildNode->pOutputDataBlockDesc);
initResultSizeInfo(&pOperator->resultInfo, 1024); initResultSizeInfo(&pOperator->resultInfo, 1024);

View File

@ -2498,7 +2498,7 @@ static const SOptimizeRule optimizeRuleSet[] = {
{.pName = "RewriteUnique", .optimizeFunc = rewriteUniqueOptimize}, {.pName = "RewriteUnique", .optimizeFunc = rewriteUniqueOptimize},
{.pName = "LastRowScan", .optimizeFunc = lastRowScanOptimize}, {.pName = "LastRowScan", .optimizeFunc = lastRowScanOptimize},
{.pName = "TagScan", .optimizeFunc = tagScanOptimize}, {.pName = "TagScan", .optimizeFunc = tagScanOptimize},
// {.pName = "PushDownLimit", .optimizeFunc = pushDownLimitOptimize} {.pName = "PushDownLimit", .optimizeFunc = pushDownLimitOptimize}
}; };
// clang-format on // clang-format on

View File

@ -97,6 +97,7 @@ static int32_t splCreateExchangeNode(SSplitContext* pCxt, SLogicNode* pChild, SE
if (NULL == pExchange->node.pLimit) { if (NULL == pExchange->node.pLimit) {
return TSDB_CODE_OUT_OF_MEMORY; return TSDB_CODE_OUT_OF_MEMORY;
} }
((SLimitNode*)pChild->pLimit)->limit += ((SLimitNode*)pChild->pLimit)->offset;
((SLimitNode*)pChild->pLimit)->offset = 0; ((SLimitNode*)pChild->pLimit)->offset = 0;
} }
@ -470,6 +471,12 @@ static int32_t stbSplCreateMergeNode(SSplitContext* pCxt, SLogicSubplan* pSubpla
if (NULL == pMerge->node.pTargets || NULL == pMerge->pInputs) { if (NULL == pMerge->node.pTargets || NULL == pMerge->pInputs) {
code = TSDB_CODE_OUT_OF_MEMORY; code = TSDB_CODE_OUT_OF_MEMORY;
} }
if (TSDB_CODE_SUCCESS == code && NULL != pSplitNode->pLimit) {
pMerge->node.pLimit = nodesCloneNode(pSplitNode->pLimit);
if (NULL == pMerge->node.pLimit) {
code = TSDB_CODE_OUT_OF_MEMORY;
}
}
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
if (NULL == pSubplan) { if (NULL == pSubplan) {
code = nodesListMakeAppend(&pSplitNode->pChildren, (SNode*)pMerge); code = nodesListMakeAppend(&pSplitNode->pChildren, (SNode*)pMerge);
@ -934,6 +941,7 @@ static int32_t stbSplSplitScanNodeWithoutPartTags(SSplitContext* pCxt, SStableSp
if (NULL == pSplitNode->pLimit) { if (NULL == pSplitNode->pLimit) {
return TSDB_CODE_OUT_OF_MEMORY; return TSDB_CODE_OUT_OF_MEMORY;
} }
((SLimitNode*)pInfo->pSplitNode->pLimit)->limit += ((SLimitNode*)pInfo->pSplitNode->pLimit)->offset;
((SLimitNode*)pInfo->pSplitNode->pLimit)->offset = 0; ((SLimitNode*)pInfo->pSplitNode->pLimit)->offset = 0;
} }
} }
@ -1021,6 +1029,10 @@ static int32_t stbSplSplitMergeScanNode(SSplitContext* pCxt, SLogicSubplan* pSub
SNodeList* pMergeKeys = NULL; SNodeList* pMergeKeys = NULL;
int32_t code = stbSplCreateMergeScanNode(pScan, &pMergeScan, &pMergeKeys); int32_t code = stbSplCreateMergeScanNode(pScan, &pMergeScan, &pMergeKeys);
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
if (NULL != pMergeScan->pLimit) {
((SLimitNode*)pMergeScan->pLimit)->limit += ((SLimitNode*)pMergeScan->pLimit)->offset;
((SLimitNode*)pMergeScan->pLimit)->offset = 0;
}
code = stbSplCreateMergeNode(pCxt, pSubplan, (SLogicNode*)pScan, pMergeKeys, pMergeScan, groupSort); code = stbSplCreateMergeNode(pCxt, pSubplan, (SLogicNode*)pScan, pMergeKeys, pMergeScan, groupSort);
} }
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {

View File

@ -202,6 +202,7 @@ char* jobTaskStatusStr(int32_t status) {
return "UNKNOWN"; return "UNKNOWN";
} }
#if 0
SSchema createSchema(int8_t type, int32_t bytes, col_id_t colId, const char* name) { SSchema createSchema(int8_t type, int32_t bytes, col_id_t colId, const char* name) {
SSchema s = {0}; SSchema s = {0};
s.type = type; s.type = type;
@ -211,6 +212,7 @@ SSchema createSchema(int8_t type, int32_t bytes, col_id_t colId, const char* nam
tstrncpy(s.name, name, tListLen(s.name)); tstrncpy(s.name, name, tListLen(s.name));
return s; return s;
} }
#endif
void freeSTableMetaRspPointer(void *p) { void freeSTableMetaRspPointer(void *p) {
tFreeSTableMetaRsp(*(void**)p); tFreeSTableMetaRsp(*(void**)p);

View File

@ -253,7 +253,7 @@ int32_t qwDbgEnableDebug(char *option) {
} }
if (0 == strcasecmp(option, "dead")) { if (0 == strcasecmp(option, "dead")) {
gQWDebug.sleepSimulate = true; gQWDebug.deadSimulate = true;
qError("qw dead debug enabled"); qError("qw dead debug enabled");
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }

View File

@ -146,6 +146,7 @@ int32_t qwBuildAndSendFetchRsp(int32_t rspType, SRpcHandleInfo *pConn, SRetrieve
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
#if 0
int32_t qwBuildAndSendCancelRsp(SRpcHandleInfo *pConn, int32_t code) { int32_t qwBuildAndSendCancelRsp(SRpcHandleInfo *pConn, int32_t code) {
STaskCancelRsp *pRsp = (STaskCancelRsp *)rpcMallocCont(sizeof(STaskCancelRsp)); STaskCancelRsp *pRsp = (STaskCancelRsp *)rpcMallocCont(sizeof(STaskCancelRsp));
pRsp->code = code; pRsp->code = code;
@ -177,6 +178,7 @@ int32_t qwBuildAndSendDropRsp(SRpcHandleInfo *pConn, int32_t code) {
tmsgSendRsp(&rpcRsp); tmsgSendRsp(&rpcRsp);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
#endif
int32_t qwBuildAndSendDropMsg(QW_FPARAMS_DEF, SRpcHandleInfo *pConn) { int32_t qwBuildAndSendDropMsg(QW_FPARAMS_DEF, SRpcHandleInfo *pConn) {
STaskDropReq *req = (STaskDropReq *)rpcMallocCont(sizeof(STaskDropReq)); STaskDropReq *req = (STaskDropReq *)rpcMallocCont(sizeof(STaskDropReq));
@ -490,6 +492,7 @@ int32_t qWorkerProcessRspMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
#if 0
int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) { int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) {
if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) { if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) {
return TSDB_CODE_QRY_INVALID_INPUT; return TSDB_CODE_QRY_INVALID_INPUT;
@ -530,6 +533,7 @@ _return:
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
#endif
int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) { int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) {
if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) { if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) {

View File

@ -796,7 +796,7 @@ void *fetchQueueThread(void *param) {
qWorkerProcessFetchMsg(mockPointer, mgmt, fetchRpc, 0); qWorkerProcessFetchMsg(mockPointer, mgmt, fetchRpc, 0);
break; break;
case TDMT_SCH_CANCEL_TASK: case TDMT_SCH_CANCEL_TASK:
qWorkerProcessCancelMsg(mockPointer, mgmt, fetchRpc, 0); //qWorkerProcessCancelMsg(mockPointer, mgmt, fetchRpc, 0);
break; break;
case TDMT_SCH_DROP_TASK: case TDMT_SCH_DROP_TASK:
qWorkerProcessDropMsg(mockPointer, mgmt, fetchRpc, 0); qWorkerProcessDropMsg(mockPointer, mgmt, fetchRpc, 0);

View File

@ -248,9 +248,9 @@ int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) {
} }
} }
if (optr == OP_TYPE_JSON_CONTAINS && type == TSDB_DATA_TYPE_JSON) { // if (optr == OP_TYPE_JSON_CONTAINS && type == TSDB_DATA_TYPE_JSON) {
return 28; // return 28;
} // }
switch (type) { switch (type) {
case TSDB_DATA_TYPE_BOOL: case TSDB_DATA_TYPE_BOOL:

View File

@ -505,7 +505,7 @@ bool convertJsonValue(__compar_fn_t *fp, int32_t optr, int8_t typeLeft, int8_t t
(*pLeftData)++; (*pLeftData)++;
} }
if (typeRight == TSDB_DATA_TYPE_JSON) { if (typeRight == TSDB_DATA_TYPE_JSON) {
if (tTagIsJson(*pLeftData)) { if (tTagIsJson(*pRightData)) {
terrno = TSDB_CODE_QRY_JSON_NOT_SUPPORT_ERROR; terrno = TSDB_CODE_QRY_JSON_NOT_SUPPORT_ERROR;
return false; return false;
} }

View File

@ -915,7 +915,7 @@ int32_t schLaunchRemoteTask(SSchJob *pJob, SSchTask *pTask) {
SCH_ERR_RET(schSetTaskCandidateAddrs(pJob, pTask)); SCH_ERR_RET(schSetTaskCandidateAddrs(pJob, pTask));
if (SCH_IS_QUERY_JOB(pJob)) { if (SCH_IS_QUERY_JOB(pJob)) {
// SCH_ERR_RET(schEnsureHbConnection(pJob, pTask)); SCH_ERR_RET(schEnsureHbConnection(pJob, pTask));
} }
SCH_RET(schBuildAndSendMsg(pJob, pTask, NULL, plan->msgType)); SCH_RET(schBuildAndSendMsg(pJob, pTask, NULL, plan->msgType));

View File

@ -17,6 +17,7 @@
#include "command.h" #include "command.h"
#include "query.h" #include "query.h"
#include "schInt.h" #include "schInt.h"
#include "tglobal.h"
#include "tmsg.h" #include "tmsg.h"
#include "tref.h" #include "tref.h"
#include "trpc.h" #include "trpc.h"
@ -184,6 +185,10 @@ void schDeregisterTaskHb(SSchJob *pJob, SSchTask *pTask) {
} }
int32_t schEnsureHbConnection(SSchJob *pJob, SSchTask *pTask) { int32_t schEnsureHbConnection(SSchJob *pJob, SSchTask *pTask) {
if (!tsEnableQueryHb) {
return TSDB_CODE_SUCCESS;
}
SQueryNodeAddr *addr = taosArrayGet(pTask->candidateAddrs, pTask->candidateIdx); SQueryNodeAddr *addr = taosArrayGet(pTask->candidateAddrs, pTask->candidateIdx);
SQueryNodeEpId epId = {0}; SQueryNodeEpId epId = {0};

View File

@ -89,188 +89,6 @@
// /\ UNCHANGED <<candidateVars, leaderVars>> // /\ UNCHANGED <<candidateVars, leaderVars>>
// //
static int32_t syncNodeMakeLogSame(SSyncNode* ths, SyncAppendEntries* pMsg) {
int32_t code;
SyncIndex delBegin = pMsg->prevLogIndex + 1;
SyncIndex delEnd = ths->pLogStore->syncLogLastIndex(ths->pLogStore);
// invert roll back!
for (SyncIndex index = delEnd; index >= delBegin; --index) {
if (ths->pFsm->FpRollBackCb != NULL) {
SSyncRaftEntry* pRollBackEntry;
code = ths->pLogStore->syncLogGetEntry(ths->pLogStore, index, &pRollBackEntry);
ASSERT(code == 0);
ASSERT(pRollBackEntry != NULL);
if (syncUtilUserRollback(pRollBackEntry->msgType)) {
SRpcMsg rpcMsg;
syncEntry2OriginalRpc(pRollBackEntry, &rpcMsg);
SFsmCbMeta cbMeta = {0};
cbMeta.index = pRollBackEntry->index;
cbMeta.lastConfigIndex = syncNodeGetSnapshotConfigIndex(ths, cbMeta.index);
cbMeta.isWeak = pRollBackEntry->isWeak;
cbMeta.code = 0;
cbMeta.state = ths->state;
cbMeta.seqNum = pRollBackEntry->seqNum;
ths->pFsm->FpRollBackCb(ths->pFsm, &rpcMsg, cbMeta);
rpcFreeCont(rpcMsg.pCont);
}
syncEntryDestory(pRollBackEntry);
}
}
// delete confict entries
code = ths->pLogStore->syncLogTruncate(ths->pLogStore, delBegin);
ASSERT(code == 0);
return code;
}
// if FromIndex > walCommitVer, return 0
// else return num of pass entries
static int32_t syncNodeDoMakeLogSame(SSyncNode* ths, SyncIndex FromIndex) {
int32_t code = 0;
int32_t pass = 0;
SyncIndex delBegin = FromIndex;
SyncIndex delEnd = ths->pLogStore->syncLogLastIndex(ths->pLogStore);
// invert roll back!
for (SyncIndex index = delEnd; index >= delBegin; --index) {
if (ths->pFsm->FpRollBackCb != NULL) {
SSyncRaftEntry* pRollBackEntry;
code = ths->pLogStore->syncLogGetEntry(ths->pLogStore, index, &pRollBackEntry);
ASSERT(code == 0);
ASSERT(pRollBackEntry != NULL);
if (syncUtilUserRollback(pRollBackEntry->msgType)) {
SRpcMsg rpcMsg;
syncEntry2OriginalRpc(pRollBackEntry, &rpcMsg);
SFsmCbMeta cbMeta = {0};
cbMeta.index = pRollBackEntry->index;
cbMeta.lastConfigIndex = syncNodeGetSnapshotConfigIndex(ths, cbMeta.index);
cbMeta.isWeak = pRollBackEntry->isWeak;
cbMeta.code = 0;
cbMeta.state = ths->state;
cbMeta.seqNum = pRollBackEntry->seqNum;
ths->pFsm->FpRollBackCb(ths->pFsm, &rpcMsg, cbMeta);
rpcFreeCont(rpcMsg.pCont);
}
syncEntryDestory(pRollBackEntry);
}
}
// update delete begin
SyncIndex walCommitVer = logStoreWalCommitVer(ths->pLogStore);
if (delBegin <= walCommitVer) {
delBegin = walCommitVer + 1;
pass = walCommitVer - delBegin + 1;
do {
char logBuf[128];
snprintf(logBuf, sizeof(logBuf), "update delete begin to %" PRId64, delBegin);
syncNodeEventLog(ths, logBuf);
} while (0);
}
// delete confict entries
code = ths->pLogStore->syncLogTruncate(ths->pLogStore, delBegin);
ASSERT(code == 0);
do {
char logBuf[128];
snprintf(logBuf, sizeof(logBuf), "make log same from:%" PRId64 ", delbegin:%" PRId64 ", pass:%d", FromIndex,
delBegin, pass);
syncNodeEventLog(ths, logBuf);
} while (0);
return pass;
}
int32_t syncNodePreCommit(SSyncNode* ths, SSyncRaftEntry* pEntry, int32_t code) {
SRpcMsg rpcMsg;
syncEntry2OriginalRpc(pEntry, &rpcMsg);
// leader transfer
if (pEntry->originalRpcType == TDMT_SYNC_LEADER_TRANSFER) {
int32_t code = syncDoLeaderTransfer(ths, &rpcMsg, pEntry);
ASSERT(code == 0);
}
if (ths->pFsm != NULL) {
if (ths->pFsm->FpPreCommitCb != NULL && syncUtilUserPreCommit(pEntry->originalRpcType)) {
SFsmCbMeta cbMeta = {0};
cbMeta.index = pEntry->index;
cbMeta.lastConfigIndex = syncNodeGetSnapshotConfigIndex(ths, cbMeta.index);
cbMeta.isWeak = pEntry->isWeak;
cbMeta.code = code;
cbMeta.state = ths->state;
cbMeta.seqNum = pEntry->seqNum;
ths->pFsm->FpPreCommitCb(ths->pFsm, &rpcMsg, cbMeta);
}
}
rpcFreeCont(rpcMsg.pCont);
return 0;
}
static bool syncNodeOnAppendEntriesBatchLogOK(SSyncNode* pSyncNode, SyncAppendEntriesBatch* pMsg) {
if (pMsg->prevLogIndex == SYNC_INDEX_INVALID) {
return true;
}
SyncIndex myLastIndex = syncNodeGetLastIndex(pSyncNode);
if (pMsg->prevLogIndex > myLastIndex) {
sDebug("vgId:%d, sync log not ok, preindex:%" PRId64, pSyncNode->vgId, pMsg->prevLogIndex);
return false;
}
SyncTerm myPreLogTerm = syncNodeGetPreTerm(pSyncNode, pMsg->prevLogIndex + 1);
if (myPreLogTerm == SYNC_TERM_INVALID) {
sDebug("vgId:%d, sync log not ok2, preindex:%" PRId64, pSyncNode->vgId, pMsg->prevLogIndex);
return false;
}
if (pMsg->prevLogIndex <= myLastIndex && pMsg->prevLogTerm == myPreLogTerm) {
return true;
}
sDebug("vgId:%d, sync log not ok3, preindex:%" PRId64, pSyncNode->vgId, pMsg->prevLogIndex);
return false;
}
// really pre log match
// prevLogIndex == -1
static bool syncNodeOnAppendEntriesLogOK(SSyncNode* pSyncNode, SyncAppendEntries* pMsg) {
if (pMsg->prevLogIndex == SYNC_INDEX_INVALID) {
return true;
}
SyncIndex myLastIndex = syncNodeGetLastIndex(pSyncNode);
if (pMsg->prevLogIndex > myLastIndex) {
sDebug("vgId:%d, sync log not ok, preindex:%" PRId64, pSyncNode->vgId, pMsg->prevLogIndex);
return false;
}
SyncTerm myPreLogTerm = syncNodeGetPreTerm(pSyncNode, pMsg->prevLogIndex + 1);
if (myPreLogTerm == SYNC_TERM_INVALID) {
sDebug("vgId:%d, sync log not ok2, preindex:%" PRId64, pSyncNode->vgId, pMsg->prevLogIndex);
return false;
}
if (pMsg->prevLogIndex <= myLastIndex && pMsg->prevLogTerm == myPreLogTerm) {
return true;
}
sDebug("vgId:%d, sync log not ok3, preindex:%" PRId64, pSyncNode->vgId, pMsg->prevLogIndex);
return false;
}
int32_t syncNodeFollowerCommit(SSyncNode* ths, SyncIndex newCommitIndex) { int32_t syncNodeFollowerCommit(SSyncNode* ths, SyncIndex newCommitIndex) {
// maybe update commit index, leader notice me // maybe update commit index, leader notice me
if (newCommitIndex > ths->commitIndex) { if (newCommitIndex > ths->commitIndex) {

View File

@ -1148,6 +1148,8 @@ SSyncNode* syncNodeOpen(SSyncInfo* pSyncInfo) {
pSyncNode->pRaftCfg = NULL; pSyncNode->pRaftCfg = NULL;
} }
// init by SSyncInfo
pSyncNode->vgId = pSyncInfo->vgId;
SSyncCfg* pCfg = &pSyncInfo->syncCfg; SSyncCfg* pCfg = &pSyncInfo->syncCfg;
sDebug("vgId:%d, replica:%d selfIndex:%d", pSyncNode->vgId, pCfg->replicaNum, pCfg->myIndex); sDebug("vgId:%d, replica:%d selfIndex:%d", pSyncNode->vgId, pCfg->replicaNum, pCfg->myIndex);
for (int32_t i = 0; i < pCfg->replicaNum; ++i) { for (int32_t i = 0; i < pCfg->replicaNum; ++i) {
@ -1155,8 +1157,6 @@ SSyncNode* syncNodeOpen(SSyncInfo* pSyncInfo) {
sDebug("vgId:%d, index:%d ep:%s:%u", pSyncNode->vgId, i, pNode->nodeFqdn, pNode->nodePort); sDebug("vgId:%d, index:%d ep:%s:%u", pSyncNode->vgId, i, pNode->nodeFqdn, pNode->nodePort);
} }
// init by SSyncInfo
pSyncNode->vgId = pSyncInfo->vgId;
memcpy(pSyncNode->path, pSyncInfo->path, sizeof(pSyncNode->path)); memcpy(pSyncNode->path, pSyncInfo->path, sizeof(pSyncNode->path));
snprintf(pSyncNode->raftStorePath, sizeof(pSyncNode->raftStorePath), "%s%sraft_store.json", pSyncInfo->path, snprintf(pSyncNode->raftStorePath, sizeof(pSyncNode->raftStorePath), "%s%sraft_store.json", pSyncInfo->path,
TD_DIRSEP); TD_DIRSEP);
@ -1645,7 +1645,9 @@ int32_t syncNodeStartHeartbeatTimer(SSyncNode* pSyncNode) {
for (int i = 0; i < pSyncNode->peersNum; ++i) { for (int i = 0; i < pSyncNode->peersNum; ++i) {
SSyncTimer* pSyncTimer = syncNodeGetHbTimer(pSyncNode, &(pSyncNode->peersId[i])); SSyncTimer* pSyncTimer = syncNodeGetHbTimer(pSyncNode, &(pSyncNode->peersId[i]));
syncHbTimerStart(pSyncNode, pSyncTimer); if (pSyncTimer != NULL) {
syncHbTimerStart(pSyncNode, pSyncTimer);
}
} }
return ret; return ret;
@ -1662,7 +1664,9 @@ int32_t syncNodeStopHeartbeatTimer(SSyncNode* pSyncNode) {
for (int i = 0; i < pSyncNode->peersNum; ++i) { for (int i = 0; i < pSyncNode->peersNum; ++i) {
SSyncTimer* pSyncTimer = syncNodeGetHbTimer(pSyncNode, &(pSyncNode->peersId[i])); SSyncTimer* pSyncTimer = syncNodeGetHbTimer(pSyncNode, &(pSyncNode->peersId[i]));
syncHbTimerStop(pSyncNode, pSyncTimer); if (pSyncTimer != NULL) {
syncHbTimerStop(pSyncNode, pSyncTimer);
}
} }
return ret; return ret;
@ -3424,7 +3428,7 @@ int32_t syncNodeDoCommit(SSyncNode* ths, SyncIndex beginIndex, SyncIndex endInde
// config change finish // config change finish
if (pEntry->originalRpcType == TDMT_SYNC_CONFIG_CHANGE_FINISH) { if (pEntry->originalRpcType == TDMT_SYNC_CONFIG_CHANGE_FINISH) {
if (rpcMsg.pCont != NULL) { if (rpcMsg.pCont != NULL && rpcMsg.contLen > 0) {
code = syncNodeConfigChangeFinish(ths, &rpcMsg, pEntry); code = syncNodeConfigChangeFinish(ths, &rpcMsg, pEntry);
ASSERT(code == 0); ASSERT(code == 0);
} }

View File

@ -3026,7 +3026,7 @@ void syncReconfigFinishFromRpcMsg(const SRpcMsg* pRpcMsg, SyncReconfigFinish* pM
} }
SyncReconfigFinish* syncReconfigFinishFromRpcMsg2(const SRpcMsg* pRpcMsg) { SyncReconfigFinish* syncReconfigFinishFromRpcMsg2(const SRpcMsg* pRpcMsg) {
SyncReconfigFinish* pMsg = syncReconfigFinishDeserialize2(pRpcMsg->pCont, pRpcMsg->contLen); SyncReconfigFinish* pMsg = syncReconfigFinishDeserialize2(pRpcMsg->pCont, (uint32_t)(pRpcMsg->contLen));
ASSERT(pMsg != NULL); ASSERT(pMsg != NULL);
return pMsg; return pMsg;
} }

View File

@ -1612,8 +1612,8 @@ int transSetDefaultAddr(void* shandle, const char* ip, const char* fqdn) {
SCvtAddr cvtAddr = {0}; SCvtAddr cvtAddr = {0};
if (ip != NULL && fqdn != NULL) { if (ip != NULL && fqdn != NULL) {
if (strlen(ip) <= sizeof(cvtAddr.ip)) memcpy(cvtAddr.ip, ip, strlen(ip)); tstrncpy(cvtAddr.ip, ip, sizeof(cvtAddr.ip));
if (strlen(fqdn) <= sizeof(cvtAddr.fqdn)) memcpy(cvtAddr.fqdn, fqdn, strlen(fqdn)); tstrncpy(cvtAddr.fqdn, fqdn, sizeof(cvtAddr.fqdn));
cvtAddr.cvt = true; cvtAddr.cvt = true;
} }
for (int i = 0; i < pTransInst->numOfThreads; i++) { for (int i = 0; i < pTransInst->numOfThreads; i++) {

View File

@ -249,8 +249,8 @@ int transAsyncSend(SAsyncPool* pool, queue* q) {
if (atomic_load_8(&pool->stop) == 1) { if (atomic_load_8(&pool->stop) == 1) {
return -1; return -1;
} }
int idx = pool->index; int idx = pool->index % pool->nAsync;
idx = idx % pool->nAsync;
// no need mutex here // no need mutex here
if (pool->index++ > pool->nAsync) { if (pool->index++ > pool->nAsync) {
pool->index = 0; pool->index = 0;

View File

@ -58,6 +58,7 @@ void taosStringBuilderAppendChar(SStringBuilder* sb, char c) {
void taosStringBuilderAppendStringLen(SStringBuilder* sb, const char* str, size_t len) { void taosStringBuilderAppendStringLen(SStringBuilder* sb, const char* str, size_t len) {
taosStringBuilderEnsureCapacity(sb, len); taosStringBuilderEnsureCapacity(sb, len);
if(!sb->buf) return;
memcpy(sb->buf + sb->pos, str, len); memcpy(sb->buf + sb->pos, str, len);
sb->pos += len; sb->pos += len;
} }

View File

@ -40,6 +40,7 @@ class TDSimClient:
"jniDebugFlag": "143", "jniDebugFlag": "143",
"qDebugFlag": "143", "qDebugFlag": "143",
"supportVnodes": "1024", "supportVnodes": "1024",
"enableQueryHb": "1",
"telemetryReporting": "0", "telemetryReporting": "0",
} }
@ -136,6 +137,7 @@ class TDDnode:
"wDebugFlag": "143", "wDebugFlag": "143",
"numOfLogLines": "100000000", "numOfLogLines": "100000000",
"statusInterval": "1", "statusInterval": "1",
"enableQueryHb": "1",
"supportVnodes": "1024", "supportVnodes": "1024",
"telemetryReporting": "0" "telemetryReporting": "0"
} }

View File

@ -17,6 +17,5 @@ exe:
clean: clean:
rm $(ROOT)batchprepare rm $(ROOT)batchprepare
rm $(ROOT)stmtBatchTest rm $(ROOT)stopquery
rm $(ROOT)stmtTest rm $(ROOT)dbTableRoute
rm $(ROOT)stmt_function

View File

@ -36,7 +36,7 @@ int64_t st, et;
char hostName[128]; char hostName[128];
char dbName[128]; char dbName[128];
char tbName[128]; char tbName[128];
int32_t runTimes = 1000; int32_t runTimes = 10;
typedef struct { typedef struct {
int id; int id;
@ -657,7 +657,6 @@ int sqConCleanupAsyncQuery(bool fetch) {
void sqRunAllCase(void) { void sqRunAllCase(void) {
#if 1
sqStopSyncQuery(false); sqStopSyncQuery(false);
sqStopSyncQuery(true); sqStopSyncQuery(true);
sqStopAsyncQuery(false); sqStopAsyncQuery(false);
@ -678,33 +677,33 @@ void sqRunAllCase(void) {
sqConCloseAsyncQuery(false); sqConCloseAsyncQuery(false);
sqConCloseAsyncQuery(true); sqConCloseAsyncQuery(true);
sqKillSyncQuery(false); sqKillSyncQuery(false);
sqKillSyncQuery(true); sqKillSyncQuery(true);
sqKillAsyncQuery(false); sqKillAsyncQuery(false);
sqKillAsyncQuery(true); sqKillAsyncQuery(true);
#if 0
/*
sqConKillSyncQuery(false); sqConKillSyncQuery(false);
sqConKillSyncQuery(true); sqConKillSyncQuery(true);
sqConKillAsyncQuery(false); sqConKillAsyncQuery(false);
sqConKillAsyncQuery(true); sqConKillAsyncQuery(true);
#endif
/*
sqConCleanupSyncQuery(false); sqConCleanupSyncQuery(false);
sqConCleanupSyncQuery(true); sqConCleanupSyncQuery(true);
sqConCleanupAsyncQuery(false); sqConCleanupAsyncQuery(false);
sqConCleanupAsyncQuery(true); sqConCleanupAsyncQuery(true);
*/ */
#endif
int32_t l = 5; int32_t l = 5;
while (l) { while (l) {
printf("%d\n", l--); printf("%d\n", l--);
sleep(1); sleep(1);
} }
printf("test done\n");
} }
int main(int argc, char *argv[]) { int main(int argc, char *argv[]) {
if (argc != 4) { if (argc != 4) {
printf("usage: %s server-ip dbname tablename\n", argv[0]); printf("usage: %s server-ip dbname tablename\n", argv[0]);

View File

@ -25,7 +25,6 @@
./test.sh -f tsim/db/delete_reusevnode2.sim ./test.sh -f tsim/db/delete_reusevnode2.sim
./test.sh -f tsim/db/delete_writing1.sim ./test.sh -f tsim/db/delete_writing1.sim
./test.sh -f tsim/db/delete_writing2.sim ./test.sh -f tsim/db/delete_writing2.sim
# unsupport ./test.sh -f tsim/db/dropdnodes.sim
./test.sh -f tsim/db/error1.sim ./test.sh -f tsim/db/error1.sim
./test.sh -f tsim/db/keep.sim ./test.sh -f tsim/db/keep.sim
./test.sh -f tsim/db/len.sim ./test.sh -f tsim/db/len.sim
@ -36,26 +35,26 @@
./test.sh -f tsim/db/taosdlog.sim ./test.sh -f tsim/db/taosdlog.sim
# ---- dnode # ---- dnode
# unsupport ./test.sh -f tsim/dnode/balance_replica1.sim ./test.sh -f tsim/dnode/balance_replica1.sim
# unsupport ./test.sh -f tsim/dnode/balance_replica3.sim ./test.sh -f tsim/dnode/balance_replica3.sim
# unsupport ./test.sh -f tsim/dnode/balance1.sim ./test.sh -f tsim/dnode/balance1.sim
# unsupport ./test.sh -f tsim/dnode/balance2.sim ./test.sh -f tsim/dnode/balance2.sim
# unsupport ./test.sh -f tsim/dnode/balance3.sim ./test.sh -f tsim/dnode/balance3.sim
# unsupport ./test.sh -f tsim/dnode/balancex.sim ./test.sh -f tsim/dnode/balancex.sim
./test.sh -f tsim/dnode/create_dnode.sim ./test.sh -f tsim/dnode/create_dnode.sim
./test.sh -f tsim/dnode/drop_dnode_has_mnode.sim ./test.sh -f tsim/dnode/drop_dnode_has_mnode.sim
# unsupport ./test.sh -f tsim/dnode/drop_dnode_has_qnode_snode.sim ./test.sh -f tsim/dnode/drop_dnode_has_qnode_snode.sim
# unsupport ./test.sh -f tsim/dnode/drop_dnode_has_vnode_replica1.sim ./test.sh -f tsim/dnode/drop_dnode_has_vnode_replica1.sim
# unsupport ./test.sh -f tsim/dnode/drop_dnode_has_vnode_replica3.sim ./test.sh -f tsim/dnode/drop_dnode_has_vnode_replica3.sim
# unsupport ./test.sh -f tsim/dnode/drop_dnode_has_multi_vnode_replica1.sim ./test.sh -f tsim/dnode/drop_dnode_has_multi_vnode_replica1.sim
# unsupport ./test.sh -f tsim/dnode/drop_dnode_has_multi_vnode_replica3.sim ./test.sh -f tsim/dnode/drop_dnode_has_multi_vnode_replica3.sim
./test.sh -f tsim/dnode/offline_reason.sim ./test.sh -f tsim/dnode/offline_reason.sim
./test.sh -f tsim/dnode/redistribute_vgroup_replica1.sim ./test.sh -f tsim/dnode/redistribute_vgroup_replica1.sim
./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v1_leader.sim ./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v1_leader.sim
./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v1_follower.sim ./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v1_follower.sim
./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v2.sim ./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v2.sim
./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v3.sim ./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v3.sim
# unsupport ./test.sh -f tsim/dnode/vnode_clean.sim ./test.sh -f tsim/dnode/vnode_clean.sim
./test.sh -f tsim/dnode/use_dropped_dnode.sim ./test.sh -f tsim/dnode/use_dropped_dnode.sim
# ---- import ---- # ---- import ----
@ -155,7 +154,7 @@
./test.sh -f tsim/parser/select_with_tags.sim ./test.sh -f tsim/parser/select_with_tags.sim
./test.sh -f tsim/parser/selectResNum.sim ./test.sh -f tsim/parser/selectResNum.sim
./test.sh -f tsim/parser/set_tag_vals.sim ./test.sh -f tsim/parser/set_tag_vals.sim
# TD-19572 ./test.sh -f tsim/parser/single_row_in_tb.sim ./test.sh -f tsim/parser/single_row_in_tb.sim
./test.sh -f tsim/parser/sliding.sim ./test.sh -f tsim/parser/sliding.sim
./test.sh -f tsim/parser/slimit_alter_tags.sim ./test.sh -f tsim/parser/slimit_alter_tags.sim
./test.sh -f tsim/parser/slimit.sim ./test.sh -f tsim/parser/slimit.sim
@ -185,7 +184,7 @@
./test.sh -f tsim/qnode/basic1.sim ./test.sh -f tsim/qnode/basic1.sim
# ---- snode ---- # ---- snode ----
# unsupport ./test.sh -f tsim/snode/basic1.sim ./test.sh -f tsim/snode/basic1.sim
# ---- mnode # ---- mnode
./test.sh -f tsim/mnode/basic1.sim ./test.sh -f tsim/mnode/basic1.sim
@ -303,7 +302,6 @@
# --- sma # --- sma
./test.sh -f tsim/sma/drop_sma.sim ./test.sh -f tsim/sma/drop_sma.sim
./test.sh -f tsim/sma/tsmaCreateInsertQuery.sim ./test.sh -f tsim/sma/tsmaCreateInsertQuery.sim
# temp disable
./test.sh -f tsim/sma/rsmaCreateInsertQuery.sim ./test.sh -f tsim/sma/rsmaCreateInsertQuery.sim
./test.sh -f tsim/sma/rsmaPersistenceRecovery.sim ./test.sh -f tsim/sma/rsmaPersistenceRecovery.sim
@ -319,13 +317,13 @@
./test.sh -f tsim/valgrind/checkUdf.sim ./test.sh -f tsim/valgrind/checkUdf.sim
# --- vnode ---- # --- vnode ----
# unsupport ./test.sh -f tsim/vnode/replica3_basic.sim ./test.sh -f tsim/vnode/replica3_basic.sim
# unsupport ./test.sh -f tsim/vnode/replica3_repeat.sim ./test.sh -f tsim/vnode/replica3_repeat.sim
# unsupport ./test.sh -f tsim/vnode/replica3_vgroup.sim ./test.sh -f tsim/vnode/replica3_vgroup.sim
# unsupport ./test.sh -f tsim/vnode/replica3_many.sim ./test.sh -f tsim/vnode/replica3_many.sim
# unsupport ./test.sh -f tsim/vnode/replica3_import.sim ./test.sh -f tsim/vnode/replica3_import.sim
# unsupport ./test.sh -f tsim/vnode/stable_balance_replica1.sim ./test.sh -f tsim/vnode/stable_balance_replica1.sim
# unsupport ./test.sh -f tsim/vnode/stable_dnode2_stop.sim ./test.sh -f tsim/vnode/stable_dnode2_stop.sim
./test.sh -f tsim/vnode/stable_dnode2.sim ./test.sh -f tsim/vnode/stable_dnode2.sim
./test.sh -f tsim/vnode/stable_dnode3.sim ./test.sh -f tsim/vnode/stable_dnode3.sim
./test.sh -f tsim/vnode/stable_replica3_dnode6.sim ./test.sh -f tsim/vnode/stable_replica3_dnode6.sim

View File

@ -111,13 +111,13 @@ endi
if $data21_db != 1000 then # wal_level fsyncperiod if $data21_db != 1000 then # wal_level fsyncperiod
return -1 return -1
endi endi
if $data22_db != 345600 then # wal_retention_period if $data22_db != 0 then # wal_retention_period
return -1 return -1
endi endi
if $data23_db != -1 then # wal_retention_size if $data23_db != 0 then # wal_retention_size
return -1 return -1
endi endi
if $data24_db != 86400 then # wal_roll_period if $data24_db != 0 then # wal_roll_period
return -1 return -1
endi endi
if $data25_db != 0 then # wal_segment_size if $data25_db != 0 then # wal_segment_size

View File

@ -1,107 +0,0 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/deploy.sh -n dnode2 -i 2
system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4
print ========== prepare data
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
sleep 2000
sql connect
sql create dnode $hostname2
sleep 2000
sql create database db
sql use db
print ========== step1
sql create table mt (ts timestamp, tbcol int) TAGS(tgcol int)
sql create table db.t1 using db.mt tags(1)
sql create table db.t2 using db.mt tags(2)
sql create table db.t3 using db.mt tags(3)
sql create table db.t4 using db.mt tags(4)
sql create table db.t5 using db.mt tags(5)
sql create table db.t6 using db.mt tags(6)
sql create table db.t7 using db.mt tags(7)
sql create table db.t8 using db.mt tags(8)
sql create table db.t9 using db.mt tags(9)
sql create table db.t10 using db.mt tags(10)
sql create table db.t11 using db.mt tags(11)
sql create table db.t12 using db.mt tags(12)
sql create table db.t13 using db.mt tags(13)
sql create table db.t14 using db.mt tags(14)
sql create table db.t15 using db.mt tags(15)
sql create table db.t16 using db.mt tags(16)
sql insert into db.t1 values(now, 1)
sql insert into db.t2 values(now, 1)
sql insert into db.t3 values(now, 1)
sql insert into db.t4 values(now, 1)
sql insert into db.t5 values(now, 1)
sql insert into db.t6 values(now, 1)
sql insert into db.t7 values(now, 1)
sql insert into db.t8 values(now, 1)
sql insert into db.t9 values(now, 1)
sql insert into db.t10 values(now, 1)
sql insert into db.t11 values(now, 1)
sql insert into db.t12 values(now, 1)
sql insert into db.t13 values(now, 1)
sql insert into db.t14 values(now, 1)
sql insert into db.t15 values(now, 1)
sql insert into db.t16 values(now, 1)
print ========== step2
sql show tables
print $rows
if $rows != 16 then
return -1
endi
sql select * from mt
print $rows
if $rows != 16 then
return -1
endi
print ========== step3
system sh/exec.sh -n dnode2 -s stop -x SIGINT
sleep 3000
sql drop dnode $hostname2
sleep 2000
print ========== step3
sql show tables
print $rows
if $rows != 8 then
return -1
endi
sql select * from mt
print $rows
if $rows != 8 then
return -1
endi
sql select * from db.t5
if $rows != 1 then
return -1
endi
sql select * from db.t13
if $rows != 1 then
return -1
endi
sql_error select * from db.t1
sql_error select * from db.t9
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT

View File

@ -81,6 +81,42 @@ if $data(2)[2] != 2 then
return -1 return -1
endi endi
sql select * from d1.t1 order by t desc
print $data01 $data11 $data21 $data31 $data41
if $data01 != 11 then
return -1
endi
if $data11 != 12 then
return -1
endi
if $data21 != 13 then
return -1
endi
if $data31 != 14 then
return -1
endi
if $data41 != 15 then
return -1
endi
sql select * from d2.t2 order by t desc
print $data01 $data11 $data21 $data31 $data41
if $data01 != 21 then
return -1
endi
if $data11 != 22 then
return -1
endi
if $data21 != 23 then
return -1
endi
if $data31 != 24 then
return -1
endi
if $data41 != 25 then
return -1
endi
print ========== step4 print ========== step4
sql drop dnode 2 sql drop dnode 2
sql select * from information_schema.ins_dnodes sql select * from information_schema.ins_dnodes
@ -93,6 +129,42 @@ if $data(2)[2] != null then
return -1 return -1
endi endi
sql select * from d1.t1 order by t desc
print $data01 $data11 $data21 $data31 $data41
if $data01 != 11 then
return -1
endi
if $data11 != 12 then
return -1
endi
if $data21 != 13 then
return -1
endi
if $data31 != 14 then
return -1
endi
if $data41 != 15 then
return -1
endi
sql select * from d2.t2 order by t desc
print $data01 $data11 $data21 $data31 $data41
if $data01 != 21 then
return -1
endi
if $data11 != 22 then
return -1
endi
if $data21 != 23 then
return -1
endi
if $data31 != 24 then
return -1
endi
if $data41 != 25 then
return -1
endi
system sh/exec.sh -n dnode2 -s stop -x SIGINT system sh/exec.sh -n dnode2 -s stop -x SIGINT
print ========== step5 print ========== step5
@ -131,6 +203,42 @@ if $data(3)[2] != 1 then
return -1 return -1
endi endi
sql select * from d1.t1 order by t desc
print $data01 $data11 $data21 $data31 $data41
if $data01 != 11 then
return -1
endi
if $data11 != 12 then
return -1
endi
if $data21 != 13 then
return -1
endi
if $data31 != 14 then
return -1
endi
if $data41 != 15 then
return -1
endi
sql select * from d2.t2 order by t desc
print $data01 $data11 $data21 $data31 $data41
if $data01 != 21 then
return -1
endi
if $data11 != 22 then
return -1
endi
if $data21 != 23 then
return -1
endi
if $data31 != 24 then
return -1
endi
if $data41 != 25 then
return -1
endi
print ========== step6 print ========== step6
sql create database d3 vgroups 1 sql create database d3 vgroups 1
sql create table d3.t3 (t timestamp, i int) sql create table d3.t3 (t timestamp, i int)

View File

@ -39,9 +39,9 @@ endi
if $data01 != 1 then if $data01 != 1 then
return -1 return -1
endi endi
if $data41 != 5 then #if $data41 != 5 then
return -1 # return -1
endi #endi
sql select * from $stb order by ts desc limit 5 sql select * from $stb order by ts desc limit 5
if $rows != 5 then if $rows != 5 then

View File

@ -59,7 +59,7 @@ step12:
print ====> db not ready! print ====> db not ready!
return -1 return -1
endi endi
sql show d1.vgroups sql show db1.vgroups
print ===> $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09 print ===> $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09
$leaderExist = 0 $leaderExist = 0
if $rows != 1 then if $rows != 1 then
@ -87,19 +87,19 @@ step13:
print ====> db not ready! print ====> db not ready!
return -1 return -1
endi endi
sql show d1.vgroups sql show db1.vgroups
print ===> $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09 print ===> $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09
$leaderExist = 0 $leaderExist = 0
if $rows != 1 then if $rows != 1 then
return -1 return -1
endi endi
if $data(3)[4] == leader then if $data(2)[4] == leader then
$leaderExist = 1 $leaderExist = 1
endi endi
if $data(3)[6] == leader then if $data(2)[6] == leader then
$leaderExist = 1 $leaderExist = 1
endi endi
if $data(3)[8] == leader then if $data(2)[8] == leader then
$leaderExist = 1 $leaderExist = 1
endi endi
if $leaderExist != 1 then if $leaderExist != 1 then
@ -115,16 +115,16 @@ step14:
print ====> db not ready! print ====> db not ready!
return -1 return -1
endi endi
sql show d1.vgroups sql show db1.vgroups
print ===> $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09 print ===> $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09
$leaderExist = 0 $leaderExist = 0
if $data(4)[4] == leader then if $data(2)[4] == leader then
$leaderExist = 1 $leaderExist = 1
endi endi
if $data(4)[6] == leader then if $data(2)[6] == leader then
$leaderExist = 1 $leaderExist = 1
endi endi
if $data(4)[8] == leader then if $data(2)[8] == leader then
$leaderExist = 1 $leaderExist = 1
endi endi
if $leaderExist != 1 then if $leaderExist != 1 then
@ -140,16 +140,16 @@ step15:
print ====> db not ready! print ====> db not ready!
return -1 return -1
endi endi
sql show d1.vgroups sql show db1.vgroups
print ===> $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09 print ===> $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09
$leaderExist = 0 $leaderExist = 0
if $data(4)[4] == leader then if $data(2)[4] == leader then
$leaderExist = 1 $leaderExist = 1
endi endi
if $data(4)[6] == leader then if $data(2)[6] == leader then
$leaderExist = 1 $leaderExist = 1
endi endi
if $data(4)[8] == leader then if $data(2)[8] == leader then
$leaderExist = 1 $leaderExist = 1
endi endi
if $leaderExist != 1 then if $leaderExist != 1 then

View File

@ -56,7 +56,7 @@ step1:
endi endi
sql show db.vgroups sql show db.vgroups
print ===> $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09 print ===> $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09
if $rows != 4 then if $rows != 1 then
return -1 return -1
endi endi
if $data(2)[4] == leader then if $data(2)[4] == leader then

View File

@ -426,7 +426,17 @@ class TDTestCase:
tdDnodes.stop(1) tdDnodes.stop(1)
tdDnodes.start(1) tdDnodes.start(1)
tdLog.printNoPrefix("==========step3:insert and flush in rollup database")
tdLog.printNoPrefix("==========step3: sleep 20s for catalogUpdateTableIndex")
tdSql.execute("create database db_s20")
tdSql.execute("use db_s20")
tdSql.execute(f"create stable stb1 (ts timestamp, c1 int) tags (t1 int) sma(c1);")
tdSql.execute("alter stable stb1 add column tinyint_col tinyint")
time.sleep(20)
tdSql.query("select count(*) from stb1")
tdSql.execute("drop database if exists db_s20 ")
tdLog.printNoPrefix("==========step4:insert and flush in rollup database")
tdSql.execute("create database db4 retentions 1s:4m,2s:8m,3s:12m") tdSql.execute("create database db4 retentions 1s:4m,2s:8m,3s:12m")
tdSql.execute("use db4") tdSql.execute("use db4")
self.__create_tb(rollup="first") self.__create_tb(rollup="first")
@ -435,7 +445,7 @@ class TDTestCase:
tdSql.execute(f'flush database db4') tdSql.execute(f'flush database db4')
tdLog.printNoPrefix("==========step4:after wal, all check again ") tdLog.printNoPrefix("==========step5:after wal, all check again ")
tdSql.prepare() tdSql.prepare()
self.__create_tb() self.__create_tb()
self.__insert_data() self.__insert_data()

View File

@ -29,6 +29,7 @@ class TDTestCase:
time.sleep(self.offset_time * 2) time.sleep(self.offset_time * 2)
tdSql.query(f'select * from {self.dbname}.{self.ctbname}') tdSql.query(f'select * from {self.dbname}.{self.ctbname}')
tdSql.checkEqual(tdSql.queryRows, 0) tdSql.checkEqual(tdSql.queryRows, 0)
tdSql.execute(f'TRIM DATABASE {self.dbname}')
def stop(self): def stop(self):
tdSql.close() tdSql.close()

View File

@ -12,7 +12,6 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import random import random
import string
from datetime import datetime from datetime import datetime
from util import constant from util import constant
from util.log import * from util.log import *
@ -31,6 +30,7 @@ class TDTestCase:
self.ctbname = 'ctb' self.ctbname = 'ctb'
self.ts = 1537146000000 self.ts = 1537146000000
self.str_length = 20 self.str_length = 20
self.block_update_times = 10000
self.column_dict = { self.column_dict = {
'col1': 'tinyint', 'col1': 'tinyint',
'col2': 'smallint', 'col2': 'smallint',
@ -47,6 +47,14 @@ class TDTestCase:
'col13': f'nchar({self.str_length})', 'col13': f'nchar({self.str_length})',
'col_ts' : 'timestamp' 'col_ts' : 'timestamp'
} }
self.tag_dict = {
't0':'int'
}
# The number of tag_values should be same as tbnum
self.tag_values = [
f'10',
f'100'
]
def data_check(self,tbname,col_name,col_type,value): def data_check(self,tbname,col_name,col_type,value):
tdSql.query(f'select {col_name} from {tbname}') tdSql.query(f'select {col_name} from {tbname}')
@ -248,11 +256,26 @@ class TDTestCase:
self.error_check(self.ntbname,self.column_dict,'ntb') self.error_check(self.ntbname,self.column_dict,'ntb')
self.error_check(self.ctbname,self.column_dict,'ctb',self.stbname) self.error_check(self.ctbname,self.column_dict,'ctb',self.stbname)
def update_10000times_and_query(self):
new_column_dict = {"ts": "timestamp"}
new_column_dict.update(self.column_dict)
tdSql.execute(f'drop database if exists {self.dbname}')
tdSql.execute(f'create database {self.dbname}')
tdSql.execute(f'use {self.dbname}')
tdSql.execute(self.setsql.set_create_stable_sql(self.stbname,new_column_dict,self.tag_dict))
tdSql.execute(f'create table {self.stbname}_1 using {self.stbname} tags({self.tag_values[0]})')
tdSql.execute(f'insert into {self.stbname}_1 values ({self.ts}, {random.randint(1, 127)}, {random.randint(1, 127)}, {random.randint(1, 127)}, {random.randint(1, 127)}, {random.randint(1, 127)}, {random.randint(1, 127)}, {random.randint(1, 127)}, {random.randint(1, 127)}, {random.randint(1, 127)}, {random.randint(1, 127)}, {random.choice(["True", "FALSE"])}, {random.randint(1, 127)}, {random.randint(1, 127)}, now)')
for i in range(self.block_update_times):
tdSql.execute(f'insert into {self.stbname}_1 values ({self.ts}, {random.randint(1, 127)}, {random.randint(1, 127)}, {random.randint(1, 127)}, {random.randint(1, 127)}, {random.randint(1, 127)}, {random.randint(1, 127)}, {random.randint(1, 127)}, {random.randint(1, 127)}, {random.randint(1, 127)}, {random.randint(1, 127)}, {random.choice(["True", "FALSE"])}, {random.randint(1, 127)}, {random.randint(1, 127)}, now)')
tdSql.query(f'select count(*) from {self.stbname}')
tdSql.query(f'select * from {self.stbname}')
def run(self): def run(self):
#!bug TD-17708 and TD-17709 #!bug TD-17708 and TD-17709
# for i in range(10): # for i in range(10):
self.update_check() self.update_check()
self.update_check_error() self.update_check_error()
self.update_10000times_and_query()
# i+=1 # i+=1
def stop(self): def stop(self):

View File

@ -653,6 +653,58 @@ class TDTestCase:
tdSql.query(f"select cast(c9 as timestamp ) as b from {self.dbname}.ct4") tdSql.query(f"select cast(c9 as timestamp ) as b from {self.dbname}.ct4")
tdSql.query(f"select cast(c9 as binary(64) ) as b from {self.dbname}.ct4") tdSql.query(f"select cast(c9 as binary(64) ) as b from {self.dbname}.ct4")
# enh of cast function about coverage
tdSql.query(f"select cast(c1 as int) as b from {self.dbname}.stb1")
tdSql.query(f"select cast(c1 as bool) as b from {self.dbname}.stb1")
tdSql.query(f"select cast(c1 as tinyint) as b from {self.dbname}.stb1")
tdSql.query(f"select cast(c1 as smallint) as b from {self.dbname}.stb1")
tdSql.query(f"select cast(c1 as float) as b from {self.dbname}.stb1")
tdSql.query(f"select cast(c1 as double) as b from {self.dbname}.stb1")
tdSql.query(f"select cast(c1 as tinyint unsigned) as b from {self.dbname}.stb1")
tdSql.query(f"select cast(c1 as smallint unsigned) as b from {self.dbname}.stb1")
tdSql.query(f"select cast(c1 as int unsigned) as b from {self.dbname}.stb1")
tdSql.query(f"select cast(c2 as int) as b from {self.dbname}.stb1")
tdSql.query(f"select cast(c3 as bool) as b from {self.dbname}.stb1")
tdSql.query(f"select cast(c4 as tinyint) as b from {self.dbname}.stb1")
tdSql.query(f"select cast(c5 as smallint) as b from {self.dbname}.stb1")
tdSql.query(f"select cast(c6 as float) as b from {self.dbname}.stb1")
tdSql.query(f"select cast(c7 as double) as b from {self.dbname}.stb1")
tdSql.query(f"select cast(c8 as tinyint unsigned) as b from {self.dbname}.stb1")
tdSql.query(f"select cast(c8 as timestamp ) as b from {self.dbname}.stb1")
tdSql.query(f"select cast(c9 as timestamp ) as b from {self.dbname}.stb1")
tdSql.query(f"select cast(c9 as binary(64) ) as b from {self.dbname}.stb1")
tdSql.query(f"select cast(abs(c2) as int) as b from {self.dbname}.stb1")
tdSql.query(f"select cast(c3 as bool) as b from {self.dbname}.stb1")
tdSql.query(f"select cast(floor(c4) as tinyint) as b from {self.dbname}.stb1")
tdSql.query(f"select cast(c5+2 as smallint) as b from {self.dbname}.stb1")
tdSql.query(f"select cast(2 as float) as b from {self.dbname}.stb1")
tdSql.query(f"select cast(c7 as double) as b from {self.dbname}.stb1")
tdSql.query(f"select cast('123' as tinyint unsigned) as b from {self.dbname}.stb1")
tdSql.query(f"select max(cast(abs(c2) as int)) as b from {self.dbname}.stb1")
tdSql.query(f"select log(cast(c3 as int),2) as b from {self.dbname}.stb1")
tdSql.query(f"select abs(cast(floor(c4) as tinyint)) as b from {self.dbname}.stb1")
tdSql.query(f"select last(cast(c5+2 as smallint)) as b from {self.dbname}.stb1")
tdSql.query(f"select mavg(cast(2 as float),3) as b from {self.dbname}.stb1 partition by tbname")
tdSql.query(f"select cast(c7 as double) as b from {self.dbname}.stb1 partition by tbname order by tbname")
tdSql.query(f"select cast('123' as tinyint unsigned) as b from {self.dbname}.stb1 partition by tbname")
# uion with cast and common cols
tdSql.query(f"select cast(c2 as int) as b from {self.dbname}.stb1 union all select c1 from {self.dbname}.stb1 ")
tdSql.query(f"select cast(c3 as bool) as b from {self.dbname}.stb1 union all select c7 from {self.dbname}.ct1 ")
tdSql.query(f"select cast(c4 as tinyint) as b from {self.dbname}.stb1 union all select c4 from {self.dbname}.stb1")
tdSql.query(f"select cast(c5 as smallint) as b from {self.dbname}.stb1 union all select cast(c5 as smallint) as b from {self.dbname}.stb1")
tdSql.query(f"select cast(c6 as float) as b from {self.dbname}.stb1 union all select c5 from {self.dbname}.stb1")
tdSql.query(f"select cast(c7 as double) as b from {self.dbname}.stb1 union all select 123 from {self.dbname}.stb1 ")
tdSql.query(f"select cast(c8 as tinyint unsigned) as b from {self.dbname}.stb1 union all select last(cast(c8 as tinyint unsigned)) from {self.dbname}.stb1")
def run(self): def run(self):
tdSql.prepare() tdSql.prepare()

View File

@ -17,6 +17,14 @@ class TDTestCase:
self.ts = 1537146000000 self.ts = 1537146000000
self.binary_str = 'taosdata' self.binary_str = 'taosdata'
self.nchar_str = '涛思数据' self.nchar_str = '涛思数据'
self.cachemodel = None
def generateString(self, length):
chars = string.ascii_uppercase + string.ascii_lowercase
v = ""
for i in range(length):
v += random.choice(chars)
return v
def set_create_normaltable_sql(self, ntbname, column_dict): def set_create_normaltable_sql(self, ntbname, column_dict):
column_sql = '' column_sql = ''
@ -36,7 +44,8 @@ class TDTestCase:
return create_stb_sql return create_stb_sql
def last_check_stb_tb_base(self): def last_check_stb_tb_base(self):
tdSql.prepare() tdSql.execute(
f'create database if not exists db cachemodel "{self.cachemodel}"')
stbname = f'db.{tdCom.getLongName(5, "letters")}' stbname = f'db.{tdCom.getLongName(5, "letters")}'
column_dict = { column_dict = {
'col1': 'tinyint', 'col1': 'tinyint',
@ -112,7 +121,8 @@ class TDTestCase:
tdSql.execute('drop database db') tdSql.execute('drop database db')
def last_check_ntb_base(self): def last_check_ntb_base(self):
tdSql.prepare() tdSql.execute(
f'create database if not exists db cachemodel "{self.cachemodel}"')
ntbname = f'db.{tdCom.getLongName(5, "letters")}' ntbname = f'db.{tdCom.getLongName(5, "letters")}'
column_dict = { column_dict = {
'col1': 'tinyint', 'col1': 'tinyint',
@ -165,6 +175,8 @@ class TDTestCase:
# nchar # nchar
elif 'nchar' in v.lower(): elif 'nchar' in v.lower():
tdSql.checkData(0, 0, f'{self.nchar_str}{self.rowNum}') tdSql.checkData(0, 0, f'{self.nchar_str}{self.rowNum}')
tdSql.error( tdSql.error(
f"select {list(column_dict.keys())[0]} from {ntbname} where last({list(column_dict.keys())[9]})='涛思数据10'") f"select {list(column_dict.keys())[0]} from {ntbname} where last({list(column_dict.keys())[9]})='涛思数据10'")
@ -191,7 +203,7 @@ class TDTestCase:
} }
tdSql.execute( tdSql.execute(
f"create database if not exists {dbname} vgroups {vgroup_num}") f'create database if not exists {dbname} vgroups {vgroup_num} cachemodel "{self.cachemodel}"')
tdSql.execute(f'use {dbname}') tdSql.execute(f'use {dbname}')
# build 20 child tables,every table insert 10 rows # build 20 child tables,every table insert 10 rows
@ -243,10 +255,45 @@ class TDTestCase:
tdSql.checkData(0, 0, f'{self.nchar_str}{self.rowNum}') tdSql.checkData(0, 0, f'{self.nchar_str}{self.rowNum}')
tdSql.execute(f'drop database {dbname}') tdSql.execute(f'drop database {dbname}')
def last_file_check(self):
dbname = tdCom.getLongName(10, "letters")
stbname = f'{dbname}.{tdCom.getLongName(5, "letters")}'
vgroup_num = 10
buffer_size = 3
tables = 100
rows = 50
str = self.generateString(1024)
column_dict = {
'c1': 'int',
'c2': 'binary(1024)',
'c3': 'nchar(1024)'
}
tag_dict = {
't1':'int'
}
tdSql.execute(
f"create database if not exists {dbname} vgroups {vgroup_num} buffer {buffer_size}")
tdSql.execute(f'use {dbname}')
create_ntb_sql = self.set_create_stable_sql(stbname, column_dict, tag_dict)
tdSql.execute(create_ntb_sql)
for i in range(tables):
sql = f"create table {dbname}.sub_tb{i} using {stbname} tags({i})"
tdSql.execute(sql)
for j in range(rows):
tdSql.execute(f"insert into {dbname}.sub_tb{i} values(%d, %d, '%s', '%s')" % (self.ts + j, i, str, str))
tdSql.query(f"select * from {stbname}")
tdSql.checkRows(tables * rows)
def run(self): def run(self):
self.last_check_stb_tb_base() self.last_check_stb_tb_base()
self.last_check_ntb_base() self.last_check_ntb_base()
self.last_check_stb_distribute() self.last_check_stb_distribute()
self.last_file_check()
def stop(self): def stop(self):
tdSql.close() tdSql.close()

View File

@ -229,6 +229,36 @@ class TDTestCase:
tdSql.query(f"select log(c6 ,2) from {dbname}.ct3") tdSql.query(f"select log(c6 ,2) from {dbname}.ct3")
tdSql.checkRows(0) tdSql.checkRows(0)
# log used for different param types
tdSql.query(f"select log(c1,c2) from {dbname}.ct1;")
tdSql.query(f"select log(c1,c2) from {dbname}.stb1 partition by tbname order by tbname;")
tdSql.query(f"select log(c1,2) from {dbname}.ct1;")
tdSql.query(f"select log(c1,2) from {dbname}.stb1 partition by tbname order by tbname;")
tdSql.query(f"select log(2,c2) from {dbname}.ct1;")
tdSql.query(f"select log(2,c2) from {dbname}.stb1 partition by tbname order by tbname;")
tdSql.query(f"select log(2,1) from {dbname}.ct1;")
tdSql.query(f"select log(2,2) from {dbname}.stb1 partition by tbname order by tbname;")
tdSql.query(f"select log(2,floor(1)) from {dbname}.ct1;")
tdSql.query(f"select log(2,floor(2)) from {dbname}.stb1 partition by tbname order by tbname;")
tdSql.query(f"select log(abs(2),floor(1)) from {dbname}.ct1;")
tdSql.query(f"select log(abs(2),floor(2)) from {dbname}.stb1 partition by tbname order by tbname;")
tdSql.query(f"select log(abs(c2),c1) from {dbname}.ct1;")
tdSql.query(f"select log(abs(c2),c1) from {dbname}.stb1 partition by tbname order by tbname;")
tdSql.query(f"select log(c2,abs(c1)) from {dbname}.ct1;")
tdSql.query(f"select log(c2,abs(c1)) from {dbname}.stb1 partition by tbname order by tbname;")
tdSql.query(f"select log(abs(c2),2) from {dbname}.ct1;")
tdSql.query(f"select log(abs(c2),2) from {dbname}.stb1 partition by tbname order by tbname;")
# # used for regular table # # used for regular table
tdSql.query(f"select log(c1 ,2) from {dbname}.t1") tdSql.query(f"select log(c1 ,2) from {dbname}.t1")
@ -291,6 +321,7 @@ class TDTestCase:
tdSql.query(f"select log(c1, 2) from {dbname}.stb1") tdSql.query(f"select log(c1, 2) from {dbname}.stb1")
tdSql.checkRows(25) tdSql.checkRows(25)
# used for not exists table # used for not exists table
tdSql.error(f"select log(c1, 2) from {dbname}.stbbb1") tdSql.error(f"select log(c1, 2) from {dbname}.stbbb1")

View File

@ -45,6 +45,22 @@ class TDTestCase:
tdSql.query(f"select max(col1) from {dbname}.stb where col2<=5") tdSql.query(f"select max(col1) from {dbname}.stb where col2<=5")
tdSql.checkData(0,0,5) tdSql.checkData(0,0,5)
tdSql.query(f"select ts, max(col1) from {dbname}.stb")
tdSql.checkRows(1)
tdSql.checkData(0, 1, np.max(intData))
tdSql.query(f"select ts, max(col1) from {dbname}.stb_1")
tdSql.checkRows(1)
tdSql.checkData(0, 1, np.max(intData))
tdSql.query(f"select ts, min(col9) from {dbname}.stb")
tdSql.checkRows(1)
tdSql.checkData(0, 1, np.min(floatData))
tdSql.query(f"select ts, min(col9) from {dbname}.stb_1")
tdSql.checkRows(1)
tdSql.checkData(0, 1, np.min(floatData))
def max_check_ntb_base(self, dbname="db"): def max_check_ntb_base(self, dbname="db"):
tdSql.prepare() tdSql.prepare()
intData = [] intData = []

View File

@ -216,6 +216,36 @@ class TDTestCase:
tdSql.checkRows(0) tdSql.checkRows(0)
# pow used for different param types
tdSql.query(f"select pow(c1,c2) from {dbname}.ct1;")
tdSql.query(f"select pow(c1,c2) from {dbname}.stb1 partition by tbname order by tbname;")
tdSql.query(f"select pow(c1,2) from {dbname}.ct1;")
tdSql.query(f"select pow(c1,2) from {dbname}.stb1 partition by tbname order by tbname;")
tdSql.query(f"select pow(2,c2) from {dbname}.ct1;")
tdSql.query(f"select pow(2,c2) from {dbname}.stb1 partition by tbname order by tbname;")
tdSql.query(f"select pow(2,1) from {dbname}.ct1;")
tdSql.query(f"select pow(2,2) from {dbname}.stb1 partition by tbname order by tbname;")
tdSql.query(f"select pow(2,floor(1)) from {dbname}.ct1;")
tdSql.query(f"select pow(2,floor(2)) from {dbname}.stb1 partition by tbname order by tbname;")
tdSql.query(f"select pow(abs(2),floor(1)) from {dbname}.ct1;")
tdSql.query(f"select pow(abs(2),floor(2)) from {dbname}.stb1 partition by tbname order by tbname;")
tdSql.query(f"select pow(abs(c2),c1) from {dbname}.ct1;")
tdSql.query(f"select pow(abs(c2),c1) from {dbname}.stb1 partition by tbname order by tbname;")
tdSql.query(f"select pow(c2,abs(c1)) from {dbname}.ct1;")
tdSql.query(f"select pow(c2,abs(c1)) from {dbname}.stb1 partition by tbname order by tbname;")
tdSql.query(f"select pow(abs(c2),2) from {dbname}.ct1;")
tdSql.query(f"select pow(abs(c2),2) from {dbname}.stb1 partition by tbname order by tbname;")
# # used for regular table # # used for regular table
tdSql.query(f"select pow(c1 ,2) from {dbname}.t1") tdSql.query(f"select pow(c1 ,2) from {dbname}.t1")
tdSql.checkData(0, 0, None) tdSql.checkData(0, 0, None)

View File

@ -0,0 +1,721 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import random
import os
import time
import taos
import subprocess
from faker import Faker
from util.log import tdLog
from util.cases import tdCases
from util.sql import tdSql
from util.dnodes import tdDnodes
from util.dnodes import *
class TDTestCase:
updatecfgDict = {'maxSQLLength':1048576,'debugFlag': 143 ,"querySmaOptimize":1}
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.testcasePath = os.path.split(__file__)[0]
self.testcaseFilename = os.path.split(__file__)[-1]
os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename))
self.db = "ind_sel"
def dropandcreateDB_random(self,database,n,vgroups):
ts = 1630000000000
num_random = 100
fake = Faker('zh_CN')
tdSql.execute('''drop database if exists %s ;''' %database)
tdSql.execute('''create database %s keep 36500 vgroups %d ;'''%(database,vgroups))
tdSql.execute('''use %s;'''%database)
tdSql.execute('''create stable %s.stable_1 (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \
q_binary1 binary(100) , q_nchar1 nchar(100) ,q_binary2 binary(100) , q_nchar2 nchar(100) ,q_binary3 binary(100) , q_nchar3 nchar(100) ,q_binary4 binary(100) , q_nchar4 nchar(100) ,\
q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\
q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) \
tags(loc nchar(100) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, t_bool bool , t_binary binary(100) , t_nchar nchar(100) ,t_float float , t_double double , t_ts timestamp);'''%database)
tdSql.execute('''create stable %s.stable_2 (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \
q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) \
tags(loc nchar(100) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, t_bool bool , t_binary binary(100) , t_nchar nchar(100) ,t_float float , t_double double , t_ts timestamp);'''%database)
for i in range(10*n):
tdSql.execute('''create table %s.bj_%d (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp ) ;'''%(database,i))
tdSql.execute('''create table %s.sh_%d (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp ) ;'''%(database,i))
tdSql.execute('''create table %s.bj_table_%d_r (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp ) ;'''%(database,i))
tdSql.execute('''create table %s.sh_table_%d_r (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp ) ;'''%(database,i))
tdSql.execute('''create table %s.hn_table_%d_r \
(ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \
q_binary1 binary(100) , q_nchar1 nchar(100) ,q_binary2 binary(100) , q_nchar2 nchar(100) ,q_binary3 binary(100) , q_nchar3 nchar(100) ,q_binary4 binary(100) , q_nchar4 nchar(100) ,\
q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\
q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) ;'''%(database,i))
tdSql.execute('''create table %s.bj_stable_1_%d using %s.stable_1 tags('bj_stable_1_%d', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;'''
%(database,i,database,i,fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
tdSql.execute('''create table %s.sh_table_%d_a using %s.stable_1 tags('sh_a_table_1_%d', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;'''
%(database,i,database,i,fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
tdSql.execute('''create table %s.sh_table_%d_b using %s.stable_1 tags('sh_b_table_1_%d', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;'''
%(database,i,database,i,fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
tdSql.execute('''create table %s.sh_table_%d_c using %s.stable_1 tags('sh_c_table_1_%d', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;'''
%(database,i,database,i,fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
tdSql.execute('''create table %s.bj_table_%d_a using %s.stable_1 tags('bj_a_table_1_%d', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;'''
%(database,i,database,i,fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
tdSql.execute('''create table %s.bj_table_%d_b using %s.stable_1 tags('bj_b_table_1_%d', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;'''
%(database,i,database,i,fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
tdSql.execute('''create table %s.bj_table_%d_c using %s.stable_1 tags('bj_c_table_1_%d', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;'''
%(database,i,database,i,fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
tdSql.execute('''create table %s.tj_table_%d_a using %s.stable_2 tags('tj_a_table_2_%d', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;'''
%(database,i,database,i,fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
tdSql.execute('''create table %s.tj_table_%d_b using %s.stable_2 tags('tj_b_table_2_%d', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;'''
%(database,i,database,i,fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
# insert data
for i in range(num_random*n):
tdSql.execute('''insert into %s.bj_stable_1_1 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double , q_bool , q_binary , q_nchar, q_ts,\
q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \
values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \
'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;'''
% (database,ts + i*1000, fake.random_int(min=-2147483647, max=2147483647, step=1),
fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.pystr() , ts + i , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() ,
fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr()))
tdSql.execute('''insert into %s.hn_table_1_r (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\
q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \
values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \
'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;'''
% (database,ts + i*1000, fake.random_int(min=-2147483647, max=2147483647, step=1) ,
fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1) ,
fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.pystr() , ts + i, fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() ,
fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr()))
tdSql.execute('''insert into %s.bj_stable_1_2 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\
q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8)\
values(%d, %d, %d, %d, %d, %f, %f, 1, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \
'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;'''
% (database,ts + i*1000, fake.random_int(min=0, max=2147483647, step=1),
fake.random_int(min=0, max=9223372036854775807, step=1),
fake.random_int(min=0, max=32767, step=1) , fake.random_int(min=0, max=127, step=1) ,
fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.pystr() , ts + i, fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() ,
fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr()))
tdSql.execute('''insert into %s.hn_table_2_r (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\
q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \
values(%d, %d, %d, %d, %d, %f, %f, 1, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \
'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;'''
% (database,ts + i*1000, fake.random_int(min=0, max=2147483647, step=1),
fake.random_int(min=0, max=9223372036854775807, step=1),
fake.random_int(min=0, max=32767, step=1) , fake.random_int(min=0, max=127, step=1) ,
fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.pystr() , ts + i, fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() ,
fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr()))
tdSql.execute('''insert into %s.bj_stable_1_3 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\
q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \
values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \
'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;'''
% (database,ts + i*1000, fake.random_int(min=-0, max=2147483647, step=1),
fake.random_int(min=-0, max=9223372036854775807, step=1),
fake.random_int(min=-0, max=32767, step=1) , fake.random_int(min=-0, max=127, step=1) ,
fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.pystr() , ts + i, fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() ,
fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr()))
tdSql.execute('''insert into %s.bj_stable_1_4 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\
q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \
values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \
'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;'''
% (database,ts + i*1000 +1, fake.random_int(min=-0, max=2147483647, step=1),
fake.random_int(min=-0, max=9223372036854775807, step=1),
fake.random_int(min=-0, max=32767, step=1) , fake.random_int(min=-0, max=127, step=1) ,
fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.pystr() , ts + i, fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() ,
fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr()))
tdSql.execute('''insert into %s.bj_stable_1_5 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\
q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \
values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \
'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;'''
% (database,ts + i*1000 +10, fake.random_int(min=-0, max=2147483647, step=1),
fake.random_int(min=-0, max=9223372036854775807, step=1),
fake.random_int(min=-0, max=32767, step=1) , fake.random_int(min=-0, max=127, step=1) ,
fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.pystr() , ts + i, fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() ,
fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr()))
tdSql.query("select count(*) from %s.stable_1;" %database)
tdSql.checkData(0,0,5*num_random*n)
tdSql.query("select count(*) from %s.hn_table_1_r;"%database)
tdSql.checkData(0,0,num_random*n)
def func_index_check(self,database,func,column):
fake = Faker('zh_CN')
fake_data = fake.random_int(min=1, max=20, step=1)
tdLog.info("\n=============constant(%s)_check ====================\n" %func)
tdSql.execute(" create sma index sma_index_name1 on stable_1 function(%s(%s)) interval(%dm); " %(func,column,fake_data))
sql = " select %s(%s) from %s.stable_1 interval(1m) "%(func,column,database)
tdLog.info(sql)
tdSql.query(sql)
queryRows = len(tdSql.queryResult)
for i in range(queryRows):
print("row=%d, result=%s " %(i,tdSql.queryResult[i][0]))
flush_before_value = tdSql.queryResult[i][0]
tdLog.info("\n=============flush database ====================\n")
tdSql.execute(" flush database %s;" %database)
tdSql.query(sql)
queryRows = len(tdSql.queryResult)
for i in range(queryRows):
print("row=%d, result=%s " %(i,tdSql.queryResult[i][0]))
flush_after_value = tdSql.queryResult[i][0]
self.value_check(flush_before_value,flush_after_value)
tdLog.info("\n=============drop index ====================\n")
tdSql.execute(" drop index sma_index_name1;")
tdSql.query(sql)
queryRows = len(tdSql.queryResult)
for i in range(queryRows):
print("row=%d, result=%s " %(i,tdSql.queryResult[i][0]))
drop_index_value = tdSql.queryResult[i][0]
self.value_check(flush_before_value,drop_index_value)
def constant_check(self,database,func,column):
tdLog.info("\n=============constant(%s)_check ====================\n" %column)
sql = " select %s(%s) from %s.stable_1 "%(func,column,database)
sql_no_from = " select %s(%s) "%(func,column)
tdLog.info(sql)
tdSql.query(sql)
queryRows = len(tdSql.queryResult)
for i in range(queryRows):
print("row=%d, result=%s " %(i,tdSql.queryResult[i][0]))
flush_before_value = tdSql.queryResult[i][0]
tdLog.info(sql_no_from)
tdSql.query(sql_no_from)
queryRows = len(tdSql.queryResult)
for i in range(queryRows):
print("row=%d, result=%s " %(i,tdSql.queryResult[i][0]))
flush_before_value_no_from = tdSql.queryResult[i][0]
tdLog.info("\n=============flush database ====================\n")
tdSql.execute(" flush database %s;" %database)
tdSql.query(sql)
queryRows = len(tdSql.queryResult)
for i in range(queryRows):
print("row=%d, result=%s " %(i,tdSql.queryResult[i][0]))
flush_after_value = tdSql.queryResult[i][0]
self.value_check(flush_before_value,flush_after_value)
tdSql.query(sql_no_from)
queryRows = len(tdSql.queryResult)
for i in range(queryRows):
print("row=%d, result=%s " %(i,tdSql.queryResult[i][0]))
flush_after_value_no_from = tdSql.queryResult[i][0]
self.value_check(flush_before_value_no_from,flush_after_value_no_from)
def constant_table_check(self,database,func,column):
tdLog.info("\n=============constant(%s)_check ====================\n" %column)
sql = " select %s(%s) from %s.bj_stable_1_1 "%(func,column,database)
sql_no_from = " select %s(%s) "%(func,column)
tdLog.info(sql)
tdSql.query(sql)
queryRows = len(tdSql.queryResult)
for i in range(queryRows):
print("row=%d, result=%s " %(i,tdSql.queryResult[i][0]))
flush_before_value = tdSql.queryResult[i][0]
tdLog.info(sql_no_from)
tdSql.query(sql_no_from)
queryRows = len(tdSql.queryResult)
for i in range(queryRows):
print("row=%d, result=%s " %(i,tdSql.queryResult[i][0]))
flush_before_value_no_from = tdSql.queryResult[i][0]
tdLog.info("\n=============flush database ====================\n")
tdSql.execute(" flush database %s;" %database)
tdSql.query(sql)
queryRows = len(tdSql.queryResult)
for i in range(queryRows):
print("row=%d, result=%s " %(i,tdSql.queryResult[i][0]))
flush_after_value = tdSql.queryResult[i][0]
self.value_check(flush_before_value,flush_after_value)
tdSql.query(sql_no_from)
queryRows = len(tdSql.queryResult)
for i in range(queryRows):
print("row=%d, result=%s " %(i,tdSql.queryResult[i][0]))
flush_after_value_no_from = tdSql.queryResult[i][0]
self.value_check(flush_before_value_no_from,flush_after_value_no_from)
def constant_str_check(self,database,func,column):
tdLog.info("\n=============constant(%s)_check ====================\n" %column)
sql = " select %s('%s') from %s.stable_1 "%(func,column,database)
tdLog.info(sql)
tdSql.query(sql)
queryRows = len(tdSql.queryResult)
for i in range(queryRows):
print("row=%d, result=%s " %(i,tdSql.queryResult[i][0]))
flush_before_value = tdSql.queryResult[i][0]
tdLog.info("\n=============flush database ====================\n")
tdSql.execute(" flush database %s;" %database)
tdSql.query(sql)
queryRows = len(tdSql.queryResult)
for i in range(queryRows):
print("row=%d, result=%s " %(i,tdSql.queryResult[i][0]))
flush_after_value = tdSql.queryResult[i][0]
self.value_check(flush_before_value,flush_after_value)
def constant_error_check(self,database,func,column):
tdLog.info("\n=============constant(%s)_check ====================\n" %column)
error_sql = " select %s('%s') "%(func,column)
tdLog.info(error_sql)
tdSql.error(error_sql)
tdLog.info("\n=============flush database ====================\n")
tdSql.execute(" flush database %s;" %database)
tdSql.error(error_sql)
error_sql1 = " select %s('%s') from %s.stable_1 "%(func,column,database)
tdLog.info(error_sql1)
tdSql.error(error_sql1)
error_sql2 = " select %s('%s') from %s.bj_stable_1_1 "%(func,column,database)
tdLog.info(error_sql2)
tdSql.error(error_sql2)
def derivative_sql(self,database):
fake = Faker('zh_CN')
fake_data = fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1)
tdLog.info("\n=============derivative sql ====================\n" )
sql = " select derivative(%s,%ds,0) from %s.stable_1 "%('q_smallint',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,0) from %s.stable_1 "%('q_bigint',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,0) from %s.stable_1 "%('q_tinyint',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,0) from %s.stable_1 "%('q_int',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,0) from %s.stable_1 "%('q_float',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,0) from %s.stable_1 "%('q_double',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,1) from %s.stable_1 "%('q_smallint',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,1) from %s.stable_1 "%('q_bigint',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,1) from %s.stable_1 "%('q_tinyint',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,1) from %s.stable_1 "%('q_int',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,1) from %s.stable_1 "%('q_float',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,1) from %s.stable_1 "%('q_double',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,0) from %s.stable_1 order by ts"%('q_smallint',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,0) from %s.stable_1 order by ts "%('q_bigint',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,0) from %s.stable_1 order by ts "%('q_tinyint',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,0) from %s.stable_1 order by ts "%('q_int',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,0) from %s.stable_1 order by ts "%('q_float',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,0) from %s.stable_1 order by ts "%('q_double',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,1) from %s.stable_1 order by ts "%('q_smallint',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,1) from %s.stable_1 order by ts "%('q_bigint',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,1) from %s.stable_1 order by ts "%('q_tinyint',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,1) from %s.stable_1 order by ts "%('q_int',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,1) from %s.stable_1 order by ts "%('q_float',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,1) from %s.stable_1 order by ts "%('q_double',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,0) from %s.stable_1 order by ts desc"%('q_smallint',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,0) from %s.stable_1 order by ts desc "%('q_bigint',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,0) from %s.stable_1 order by ts desc "%('q_tinyint',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,0) from %s.stable_1 order by ts desc "%('q_int',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,0) from %s.stable_1 order by ts desc "%('q_float',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,0) from %s.stable_1 order by ts desc "%('q_double',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,1) from %s.stable_1 order by ts desc "%('q_smallint',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,1) from %s.stable_1 order by ts desc "%('q_bigint',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,1) from %s.stable_1 order by ts desc "%('q_tinyint',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,1) from %s.stable_1 order by ts desc "%('q_int',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,1) from %s.stable_1 order by ts desc "%('q_float',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,1) from %s.stable_1 order by ts desc"%('q_double',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,1) from %s.stable_1 order by ts desc limit 3000"%('q_smallint',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,1) from %s.stable_1 order by ts desc limit 3000 "%('q_bigint',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,1) from %s.stable_1 order by ts desc limit 3000 "%('q_tinyint',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,1) from %s.stable_1 order by ts desc limit 3000 "%('q_int',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,1) from %s.stable_1 order by ts desc limit 3000 "%('q_float',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
sql = " select derivative(%s,%ds,1) from %s.stable_1 order by ts desc limit 3000"%('q_double',fake_data,database)
self.derivative_data_check("%s" %self.db,"%s" %sql)
def derivative_data_check(self,database,sql):
tdLog.info("\n=============derivative_data(%s)_check ====================\n" %sql)
tdLog.info(sql)
tdSql.query(sql)
queryRows = len(tdSql.queryResult)
for i in range(queryRows):
#print("row=%d, result=%s " %(i,tdSql.queryResult[i][0]))
flush_before_value = tdSql.queryResult[i][0]
#flush_before_value1 = tdSql.queryResult[i][1]
tdLog.info("\n=============flush database ====================\n")
tdSql.execute(" flush database %s;" %database)
tdSql.query(sql)
queryRows = len(tdSql.queryResult)
for i in range(queryRows):
#print("row=%d, result=%s " %(i,tdSql.queryResult[i][0]))
flush_after_value = tdSql.queryResult[i][0]
#flush_after_value1 = tdSql.queryResult[i][1]
self.value_check(flush_before_value,flush_after_value)
#self.value_check(flush_before_value1,flush_after_value1)
def value_check(self,flush_before_value,flush_after_value):
# if flush_before_value==flush_after_value:
# tdLog.info(f"checkEqual success, flush_before_value={flush_before_value},flush_after_value={flush_after_value}")
# else :
# tdLog.exit(f"checkEqual error, flush_before_value=={flush_before_value},flush_after_value={flush_after_value}")
pass
def run(self):
fake = Faker('zh_CN')
fake_data = fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1)
fake_float = fake.pyfloat()
fake_str = fake.pystr()
startTime = time.time()
os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename))
self.dropandcreateDB_random("%s" %self.db, 1,2)
#TD-19818
self.func_index_check("%s" %self.db,'max','q_int')
self.func_index_check("%s" %self.db,'max','q_bigint')
self.func_index_check("%s" %self.db,'max','q_smallint')
self.func_index_check("%s" %self.db,'max','q_tinyint')
self.func_index_check("%s" %self.db,'max','q_float')
self.func_index_check("%s" %self.db,'max','q_double')
self.func_index_check("%s" %self.db,'min','q_int')
self.func_index_check("%s" %self.db,'min','q_bigint')
self.func_index_check("%s" %self.db,'min','q_smallint')
self.func_index_check("%s" %self.db,'min','q_tinyint')
self.func_index_check("%s" %self.db,'min','q_float')
self.func_index_check("%s" %self.db,'min','q_double')
self.func_index_check("%s" %self.db,'SUM','q_int')
self.func_index_check("%s" %self.db,'SUM','q_bigint')
self.func_index_check("%s" %self.db,'SUM','q_smallint')
self.func_index_check("%s" %self.db,'SUM','q_tinyint')
self.func_index_check("%s" %self.db,'SUM','q_float')
self.func_index_check("%s" %self.db,'SUM','q_double')
#TD-19854
self.constant_check("%s" %self.db,'count','%d' %fake_data)
self.constant_check("%s" %self.db,'count','%f' %fake_float)
self.constant_str_check("%s" %self.db,'count','%s' %fake_str)
self.constant_check("%s" %self.db,'count','(cast(%d as int))' %fake_data)
self.constant_check("%s" %self.db,'count','(cast(%f as int))' %fake_float)
self.constant_check("%s" %self.db,'count','(cast(%d as smallint))' %fake_data)
self.constant_check("%s" %self.db,'count','(cast(%f as smallint))' %fake_float)
self.constant_check("%s" %self.db,'count','(cast(%d as bigint))' %fake_data)
self.constant_check("%s" %self.db,'count','(cast(%f as bigint))' %fake_float)
self.constant_check("%s" %self.db,'count','(cast(%d as tinyint))' %fake_data)
self.constant_check("%s" %self.db,'count','(cast(%f as tinyint))' %fake_float)
self.constant_check("%s" %self.db,'count','(cast(%d as float))' %fake_data)
self.constant_check("%s" %self.db,'count','(cast(%f as float))' %fake_float)
self.constant_check("%s" %self.db,'count','(cast(%d as double))' %fake_data)
self.constant_check("%s" %self.db,'count','(cast(%f as double))' %fake_float)
self.constant_check("%s" %self.db,'sum','%d' %fake_data)
self.constant_check("%s" %self.db,'sum','%f' %fake_float)
self.constant_error_check("%s" %self.db,'sum','%s' %fake_str)
self.constant_check("%s" %self.db,'sum','(cast(%d as int))' %fake_data)
self.constant_check("%s" %self.db,'sum','(cast(%f as int))' %fake_float)
self.constant_check("%s" %self.db,'sum','(cast(%d as smallint))' %fake_data)
self.constant_check("%s" %self.db,'sum','(cast(%f as smallint))' %fake_float)
self.constant_check("%s" %self.db,'sum','(cast(%d as bigint))' %fake_data)
self.constant_check("%s" %self.db,'sum','(cast(%f as bigint))' %fake_float)
self.constant_check("%s" %self.db,'sum','(cast(%d as tinyint))' %fake_data)
self.constant_check("%s" %self.db,'sum','(cast(%f as tinyint))' %fake_float)
self.constant_check("%s" %self.db,'sum','(cast(%d as float))' %fake_data)
self.constant_check("%s" %self.db,'sum','(cast(%f as float))' %fake_float)
self.constant_check("%s" %self.db,'sum','(cast(%d as double))' %fake_data)
self.constant_check("%s" %self.db,'sum','(cast(%f as double))' %fake_float)
self.constant_check("%s" %self.db,'avg','%d' %fake_data)
self.constant_check("%s" %self.db,'avg','%f' %fake_float)
self.constant_error_check("%s" %self.db,'avg','%s' %fake_str)
self.constant_check("%s" %self.db,'avg','(cast(%d as int))' %fake_data)
self.constant_check("%s" %self.db,'avg','(cast(%f as int))' %fake_float)
self.constant_check("%s" %self.db,'avg','(cast(%d as smallint))' %fake_data)
self.constant_check("%s" %self.db,'avg','(cast(%f as smallint))' %fake_float)
self.constant_check("%s" %self.db,'avg','(cast(%d as bigint))' %fake_data)
self.constant_check("%s" %self.db,'avg','(cast(%f as bigint))' %fake_float)
self.constant_check("%s" %self.db,'avg','(cast(%d as tinyint))' %fake_data)
self.constant_check("%s" %self.db,'avg','(cast(%f as tinyint))' %fake_float)
self.constant_check("%s" %self.db,'avg','(cast(%d as float))' %fake_data)
self.constant_check("%s" %self.db,'avg','(cast(%f as float))' %fake_float)
self.constant_check("%s" %self.db,'avg','(cast(%d as double))' %fake_data)
self.constant_check("%s" %self.db,'avg','(cast(%f as double))' %fake_float)
self.constant_check("%s" %self.db,'stddev','%d' %fake_data)
self.constant_check("%s" %self.db,'stddev','%f' %fake_float)
self.constant_error_check("%s" %self.db,'stddev','%s' %fake_str)
self.constant_check("%s" %self.db,'stddev','(cast(%d as int))' %fake_data)
self.constant_check("%s" %self.db,'stddev','(cast(%f as int))' %fake_float)
self.constant_check("%s" %self.db,'stddev','(cast(%d as smallint))' %fake_data)
self.constant_check("%s" %self.db,'stddev','(cast(%f as smallint))' %fake_float)
self.constant_check("%s" %self.db,'stddev','(cast(%d as bigint))' %fake_data)
self.constant_check("%s" %self.db,'stddev','(cast(%f as bigint))' %fake_float)
self.constant_check("%s" %self.db,'stddev','(cast(%d as tinyint))' %fake_data)
self.constant_check("%s" %self.db,'stddev','(cast(%f as tinyint))' %fake_float)
self.constant_check("%s" %self.db,'stddev','(cast(%d as float))' %fake_data)
self.constant_check("%s" %self.db,'stddev','(cast(%f as float))' %fake_float)
self.constant_check("%s" %self.db,'stddev','(cast(%d as double))' %fake_data)
self.constant_check("%s" %self.db,'stddev','(cast(%f as double))' %fake_float)
self.constant_check("%s" %self.db,'spread','%d' %fake_data)
self.constant_check("%s" %self.db,'spread','%f' %fake_float)
self.constant_error_check("%s" %self.db,'spread','%s' %fake_str)
self.constant_check("%s" %self.db,'spread','(cast(%d as int))' %fake_data)
self.constant_check("%s" %self.db,'spread','(cast(%f as int))' %fake_float)
self.constant_check("%s" %self.db,'spread','(cast(%d as smallint))' %fake_data)
self.constant_check("%s" %self.db,'spread','(cast(%f as smallint))' %fake_float)
self.constant_check("%s" %self.db,'spread','(cast(%d as bigint))' %fake_data)
self.constant_check("%s" %self.db,'spread','(cast(%f as bigint))' %fake_float)
self.constant_check("%s" %self.db,'spread','(cast(%d as tinyint))' %fake_data)
self.constant_check("%s" %self.db,'spread','(cast(%f as tinyint))' %fake_float)
self.constant_check("%s" %self.db,'spread','(cast(%d as float))' %fake_data)
self.constant_check("%s" %self.db,'spread','(cast(%f as float))' %fake_float)
self.constant_check("%s" %self.db,'spread','(cast(%d as double))' %fake_data)
self.constant_check("%s" %self.db,'spread','(cast(%f as double))' %fake_float)
self.constant_check("%s" %self.db,'hyperloglog','%d' %fake_data)
self.constant_check("%s" %self.db,'hyperloglog','%f' %fake_float)
self.constant_str_check("%s" %self.db,'hyperloglog','%s' %fake_str)
self.constant_check("%s" %self.db,'hyperloglog','(cast(%d as int))' %fake_data)
self.constant_check("%s" %self.db,'hyperloglog','(cast(%f as int))' %fake_float)
self.constant_check("%s" %self.db,'hyperloglog','(cast(%d as smallint))' %fake_data)
self.constant_check("%s" %self.db,'hyperloglog','(cast(%f as smallint))' %fake_float)
self.constant_check("%s" %self.db,'hyperloglog','(cast(%d as bigint))' %fake_data)
self.constant_check("%s" %self.db,'hyperloglog','(cast(%f as bigint))' %fake_float)
self.constant_check("%s" %self.db,'hyperloglog','(cast(%d as tinyint))' %fake_data)
self.constant_check("%s" %self.db,'hyperloglog','(cast(%f as tinyint))' %fake_float)
self.constant_check("%s" %self.db,'hyperloglog','(cast(%d as float))' %fake_data)
self.constant_check("%s" %self.db,'hyperloglog','(cast(%f as float))' %fake_float)
self.constant_check("%s" %self.db,'hyperloglog','(cast(%d as double))' %fake_data)
self.constant_check("%s" %self.db,'hyperloglog','(cast(%f as double))' %fake_float)
self.constant_error_check("%s" %self.db,'elapsed','%d' %fake_data)
self.constant_error_check("%s" %self.db,'elapsed','%f' %fake_float)
self.constant_error_check("%s" %self.db,'elapsed','%s' %fake_str)
self.constant_error_check("%s" %self.db,'elapsed','(cast(%d as int))' %fake_data)
self.constant_error_check("%s" %self.db,'elapsed','(cast(%f as int))' %fake_float)
self.constant_error_check("%s" %self.db,'elapsed','(cast(%d as smallint))' %fake_data)
self.constant_error_check("%s" %self.db,'elapsed','(cast(%f as smallint))' %fake_float)
self.constant_error_check("%s" %self.db,'elapsed','(cast(%d as bigint))' %fake_data)
self.constant_error_check("%s" %self.db,'elapsed','(cast(%f as bigint))' %fake_float)
self.constant_error_check("%s" %self.db,'elapsed','(cast(%d as tinyint))' %fake_data)
self.constant_error_check("%s" %self.db,'elapsed','(cast(%f as tinyint))' %fake_float)
self.constant_error_check("%s" %self.db,'elapsed','(cast(%d as float))' %fake_data)
self.constant_error_check("%s" %self.db,'elapsed','(cast(%f as float))' %fake_float)
self.constant_error_check("%s" %self.db,'elapsed','(cast(%d as double))' %fake_data)
self.constant_error_check("%s" %self.db,'elapsed','(cast(%f as double))' %fake_float)
percentile_data = fake.random_int(min=-0, max=100, step=1)
self.constant_table_check("%s" %self.db,'percentile','%d,%d' %(fake_data,percentile_data))
self.constant_table_check("%s" %self.db,'percentile','%f,%d' %(fake_float,percentile_data))
self.constant_error_check("%s" %self.db,'percentile','%s,%d' %(fake_str,percentile_data))
self.constant_table_check("%s" %self.db,'percentile','(cast(%d as int)),%d' %(fake_data,percentile_data))
self.constant_table_check("%s" %self.db,'percentile','(cast(%f as int)),%d' %(fake_float,percentile_data))
self.constant_table_check("%s" %self.db,'percentile','(cast(%d as smallint)),%d' %(fake_data,percentile_data))
self.constant_table_check("%s" %self.db,'percentile','(cast(%f as smallint)),%d' %(fake_float,percentile_data))
self.constant_table_check("%s" %self.db,'percentile','(cast(%d as bigint)),%d' %(fake_data,percentile_data))
self.constant_table_check("%s" %self.db,'percentile','(cast(%f as bigint)),%d' %(fake_float,percentile_data))
self.constant_table_check("%s" %self.db,'percentile','(cast(%d as tinyint)),%d' %(fake_data,percentile_data))
self.constant_table_check("%s" %self.db,'percentile','(cast(%f as tinyint)),%d' %(fake_float,percentile_data))
self.constant_table_check("%s" %self.db,'percentile','(cast(%d as float)),%d' %(fake_data,percentile_data))
self.constant_table_check("%s" %self.db,'percentile','(cast(%f as float)),%d' %(fake_float,percentile_data))
self.constant_table_check("%s" %self.db,'percentile','(cast(%d as double)),%d' %(fake_data,percentile_data))
self.constant_table_check("%s" %self.db,'percentile','(cast(%f as double)),%d' %(fake_float,percentile_data))
self.constant_table_check("%s" %self.db,'apercentile','%d,%d' %(fake_data,percentile_data))
self.constant_check("%s" %self.db,'apercentile','%f,%d' %(fake_float,percentile_data))
self.constant_error_check("%s" %self.db,'apercentile','%s,%d' %(fake_str,percentile_data))
self.constant_table_check("%s" %self.db,'apercentile','(cast(%d as int)),%d' %(fake_data,percentile_data))
self.constant_check("%s" %self.db,'apercentile','(cast(%f as int)),%d' %(fake_float,percentile_data))
self.constant_check("%s" %self.db,'apercentile','(cast(%d as smallint)),%d' %(fake_data,percentile_data))
self.constant_table_check("%s" %self.db,'apercentile','(cast(%f as smallint)),%d' %(fake_float,percentile_data))
self.constant_table_check("%s" %self.db,'apercentile','(cast(%d as bigint)),%d' %(fake_data,percentile_data))
self.constant_check("%s" %self.db,'apercentile','(cast(%f as bigint)),%d' %(fake_float,percentile_data))
self.constant_check("%s" %self.db,'apercentile','(cast(%d as tinyint)),%d' %(fake_data,percentile_data))
self.constant_table_check("%s" %self.db,'apercentile','(cast(%f as tinyint)),%d' %(fake_float,percentile_data))
self.constant_table_check("%s" %self.db,'apercentile','(cast(%d as float)),%d' %(fake_data,percentile_data))
self.constant_check("%s" %self.db,'apercentile','(cast(%f as float)),%d' %(fake_float,percentile_data))
self.constant_check("%s" %self.db,'apercentile','(cast(%d as double)),%d' %(fake_data,percentile_data))
self.constant_table_check("%s" %self.db,'apercentile','(cast(%f as double)),%d' %(fake_float,percentile_data))
percentile_data = fake.random_int(min=-0, max=1, step=1)
self.constant_table_check("%s" %self.db,'histogram',"%d,'user_input','[-10000,0,10000]',%d" %(fake_data,percentile_data))
self.constant_check("%s" %self.db,'histogram',"%f,'user_input','[-10000,0,10000]',%d" %(fake_float,percentile_data))
self.constant_error_check("%s" %self.db,'histogram',"%s,'user_input','[-10000,0,10000]',%d" %(fake_str,percentile_data))
self.constant_table_check("%s" %self.db,'histogram',"(cast(%d as int)),'user_input','[-10000,0,10000]',%d" %(fake_data,percentile_data))
self.constant_check("%s" %self.db,'histogram',"(cast(%f as int)),'user_input','[-10000,0,10000]',%d" %(fake_float,percentile_data))
self.constant_check("%s" %self.db,'histogram',"(cast(%d as smallint)),'user_input','[-10000,0,10000]',%d" %(fake_data,percentile_data))
self.constant_table_check("%s" %self.db,'histogram',"(cast(%f as smallint)),'user_input','[-10000,0,10000]',%d" %(fake_float,percentile_data))
self.constant_table_check("%s" %self.db,'histogram',"(cast(%d as bigint)),'user_input','[-10000,0,10000]',%d" %(fake_data,percentile_data))
self.constant_check("%s" %self.db,'histogram',"(cast(%f as bigint)),'user_input','[-10000,0,10000]',%d" %(fake_float,percentile_data))
self.constant_check("%s" %self.db,'histogram',"(cast(%d as tinyint)),'user_input','[-10000,0,10000]',%d" %(fake_data,percentile_data))
self.constant_table_check("%s" %self.db,'histogram',"(cast(%f as tinyint)),'user_input','[-10000,0,10000]',%d" %(fake_float,percentile_data))
self.constant_table_check("%s" %self.db,'histogram',"(cast(%d as float)),'user_input','[-10000,0,10000]',%d" %(fake_data,percentile_data))
self.constant_check("%s" %self.db,'histogram',"(cast(%f as float)),'user_input','[-10000,0,10000]',%d" %(fake_float,percentile_data))
self.constant_check("%s" %self.db,'histogram',"(cast(%d as double)),'user_input','[-10000,0,10000]',%d" %(fake_data,percentile_data))
self.constant_table_check("%s" %self.db,'histogram',"(cast(%f as double)),'user_input','[-10000,0,10000]',%d" %(fake_float,percentile_data))
#TD-19843
self.derivative_sql("%s" %self.db)
#taos -f sql
print("taos -f sql start!")
taos_cmd1 = "taos -f %s/%s.sql" % (self.testcasePath,self.testcaseFilename)
_ = subprocess.check_output(taos_cmd1, shell=True)
print("taos -f sql over!")
endTime = time.time()
print("total time %ds" % (endTime - startTime))
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -87,7 +87,7 @@ class TDTestCase:
'rowsPerTbl': 1000, 'rowsPerTbl': 1000,
'batchNum': 100, 'batchNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000 'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 10, 'pollDelay': 30,
'showMsg': 1, 'showMsg': 1,
'showRow': 1, 'showRow': 1,
'snapshot': 0} 'snapshot': 0}
@ -173,7 +173,7 @@ class TDTestCase:
'rowsPerTbl': 1000, 'rowsPerTbl': 1000,
'batchNum': 100, 'batchNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000 'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 20, 'pollDelay': 30,
'showMsg': 1, 'showMsg': 1,
'showRow': 1, 'showRow': 1,
'snapshot': 0} 'snapshot': 0}

View File

@ -40,9 +40,9 @@ class TDTestCase:
'ctbStartIdx': 0, 'ctbStartIdx': 0,
'ctbNum': 100, 'ctbNum': 100,
'rowsPerTbl': 1000, 'rowsPerTbl': 1000,
'batchNum': 10, 'batchNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000 'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3, 'pollDelay': 30,
'showMsg': 1, 'showMsg': 1,
'showRow': 1, 'showRow': 1,
'snapshot': 0} 'snapshot': 0}
@ -87,7 +87,7 @@ class TDTestCase:
'rowsPerTbl': 1000, 'rowsPerTbl': 1000,
'batchNum': 100, 'batchNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000 'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 10, 'pollDelay': 30,
'showMsg': 1, 'showMsg': 1,
'showRow': 1, 'showRow': 1,
'snapshot': 0} 'snapshot': 0}
@ -101,6 +101,7 @@ class TDTestCase:
topicFromStb = 'topic_stb' topicFromStb = 'topic_stb'
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as stable %s.%s" %(topicFromStb, paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as stable %s.%s" %(topicFromStb, paraDict['dbName'], paraDict['stbName'])
#sqlString = "create topic %s as %s" %(topicFromStb, queryString)
tdLog.info("create topic sql: %s"%sqlString) tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString) tdSql.execute(sqlString)
@ -111,7 +112,7 @@ class TDTestCase:
ifManualCommit = 0 ifManualCommit = 0
keyList = 'group.id:cgrp1,\ keyList = 'group.id:cgrp1,\
enable.auto.commit:true,\ enable.auto.commit:true,\
auto.commit.interval.ms:1000,\ auto.commit.interval.ms:200,\
auto.offset.reset:latest' auto.offset.reset:latest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
@ -119,7 +120,15 @@ class TDTestCase:
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
# time.sleep(3) # time.sleep(3)
tmqCom.getStartCommitNotifyFromTmqsim() tmqCom.getStartCommitNotifyFromTmqsim('cdb',1)
tdLog.info("create some new child table and insert data for latest mode")
paraDict["batchNum"] = 100
paraDict["ctbPrefix"] = 'newCtb'
paraDict["ctbNum"] = 10
paraDict["rowsPerTbl"] = 10
tmqCom.insert_data_with_autoCreateTbl(tdSql,paraDict["dbName"],paraDict["stbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"])
tdLog.info("================= restart dnode ===========================") tdLog.info("================= restart dnode ===========================")
tdDnodes.stoptaosd(1) tdDnodes.stoptaosd(1)
tdDnodes.starttaosd(1) tdDnodes.starttaosd(1)
@ -132,8 +141,7 @@ class TDTestCase:
for i in range(expectRows): for i in range(expectRows):
totalConsumeRows += resultList[i] totalConsumeRows += resultList[i]
tdSql.query(queryString) totalRowsFromQury = paraDict["ctbNum"] * paraDict["rowsPerTbl"]
totalRowsFromQury = tdSql.getRows()
tdLog.info("act consume rows: %d, act query rows: %d"%(totalConsumeRows, totalRowsFromQury)) tdLog.info("act consume rows: %d, act query rows: %d"%(totalConsumeRows, totalRowsFromQury))
if (totalConsumeRows < totalRowsFromQury): if (totalConsumeRows < totalRowsFromQury):
@ -161,7 +169,7 @@ class TDTestCase:
'rowsPerTbl': 1000, 'rowsPerTbl': 1000,
'batchNum': 100, 'batchNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000 'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 10, 'pollDelay': 30,
'showMsg': 1, 'showMsg': 1,
'showRow': 1, 'showRow': 1,
'snapshot': 0} 'snapshot': 0}
@ -185,19 +193,25 @@ class TDTestCase:
ifManualCommit = 0 ifManualCommit = 0
keyList = 'group.id:cgrp1,\ keyList = 'group.id:cgrp1,\
enable.auto.commit:true,\ enable.auto.commit:true,\
auto.commit.interval.ms:1000,\ auto.commit.interval.ms:200,\
auto.offset.reset:latest' auto.offset.reset:latest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor") tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
# time.sleep(3) tmqCom.getStartCommitNotifyFromTmqsim('cdb',1)
tmqCom.getStartCommitNotifyFromTmqsim()
tdLog.info("create some new child table and insert data for latest mode")
paraDict["batchNum"] = 100
paraDict["ctbPrefix"] = 'newCtb'
paraDict["ctbNum"] = 10
paraDict["rowsPerTbl"] = 10
tmqCom.insert_data_with_autoCreateTbl(tdSql,paraDict["dbName"],paraDict["stbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"])
tdLog.info("================= restart dnode ===========================") tdLog.info("================= restart dnode ===========================")
tdDnodes.stoptaosd(1) tdDnodes.stoptaosd(1)
tdDnodes.starttaosd(1) tdDnodes.starttaosd(1)
# time.sleep(3)
tdLog.info(" restart taosd end and wait to check consume result") tdLog.info(" restart taosd end and wait to check consume result")
expectRows = 1 expectRows = 1
@ -206,8 +220,7 @@ class TDTestCase:
for i in range(expectRows): for i in range(expectRows):
totalConsumeRows += resultList[i] totalConsumeRows += resultList[i]
tdSql.query(queryString) totalRowsFromQury = paraDict["ctbNum"] * paraDict["rowsPerTbl"]
totalRowsFromQury = tdSql.getRows()
tdLog.info("act consume rows: %d, act query rows: %d"%(totalConsumeRows, totalRowsFromQury)) tdLog.info("act consume rows: %d, act query rows: %d"%(totalConsumeRows, totalRowsFromQury))
if (totalConsumeRows < totalRowsFromQury): if (totalConsumeRows < totalRowsFromQury):

View File

@ -11,6 +11,8 @@ from util.log import *
from util.sql import * from util.sql import *
from util.cases import * from util.cases import *
from util.dnodes import * from util.dnodes import *
sys.path.append("./7-tmq")
from tmqCommon import *
class actionType(Enum): class actionType(Enum):
CREATE_DATABASE = 0 CREATE_DATABASE = 0
@ -193,32 +195,41 @@ class TDTestCase:
and restart a consumption process to complete a consumption and restart a consumption process to complete a consumption
''' '''
tdLog.printNoPrefix("======== test case 1: ") tdLog.printNoPrefix("======== test case 1: ")
tmqCom.initConsumerTable()
self.initConsumerTable() #self.initConsumerTable()
# create and start thread # create and start thread
parameterDict = {'cfg': '', \ paraDict = {'dbName': 'dbt',
'actionType': 0, \ 'dropFlag': 1,
'dbName': 'db3', \ 'event': '',
'dropFlag': 1, \ 'vgroups': 4,
'vgroups': 4, \ 'stbName': 'stb',
'replica': 1, \ 'colPrefix': 'c',
'stbName': 'stb1', \ 'tagPrefix': 't',
'ctbNum': 10, \ 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'rowsPerTbl': 20000, \ 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'batchNum': 100, \ 'ctbPrefix': 'ctb',
'startTs': 1640966400000} # 2022-01-01 00:00:00.000 'ctbStartIdx': 0,
parameterDict['cfg'] = cfgPath 'ctbNum': 10,
'rowsPerTbl': 20000,
'batchNum': 1000,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 30,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
paraDict['cfg'] = cfgPath
self.create_database(tdSql, parameterDict["dbName"]) self.create_database(tdSql, paraDict["dbName"])
self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) self.create_stable(tdSql, paraDict["dbName"], paraDict["stbName"])
self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) self.create_ctables(tdSql, paraDict["dbName"], paraDict["stbName"], paraDict["ctbNum"])
self.insert_data(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) self.insert_data(tdSql,paraDict["dbName"],paraDict["stbName"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"])
tdLog.info("create topics from stb1") tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1' topicFromStb1 = 'topic_stb1'
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, paraDict['dbName'], paraDict['stbName']))
consumerId = 0 consumerId = 0
# expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] # expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
expectrowcnt = 90000000000 expectrowcnt = 90000000000
@ -229,41 +240,45 @@ class TDTestCase:
enable.auto.commit:false,\ enable.auto.commit:false,\
auto.commit.interval.ms:6000,\ auto.commit.interval.ms:6000,\
auto.offset.reset:earliest' auto.offset.reset:earliest'
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) #self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor") tdLog.info("start consume processor")
pollDelay = 9000000 # Forever loop pollDelay = 9000000 # Forever loop
showMsg = 1 showMsg = 1
showRow = 1 showRow = 1
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) #self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
time.sleep(3) #time.sleep(3)
tmqCom.getStartConsumeNotifyFromTmqsim()
tdLog.info("================= stop dnode, and remove data file, then start dnode ===========================") tdLog.info("================= stop dnode, and remove data file, then start dnode ===========================")
tdDnodes.stop(1) tdDnodes.stop(1)
# time.sleep(5) time.sleep(5)
dataPath = buildPath + "/../sim/dnode1/data/*" dataPath = buildPath + "/../sim/dnode1/data/*"
shellCmd = 'rm -rf ' + dataPath shellCmd = 'rm -rf ' + dataPath
tdLog.info(shellCmd) tdLog.info(shellCmd)
os.system(shellCmd) os.system(shellCmd)
#tdDnodes.start(1) #tdDnodes.start(1)
tdDnodes.starttaosd(1) tdDnodes.starttaosd(1)
time.sleep(2) time.sleep(5)
######### redo to consume ######### redo to consume
self.initConsumerTable() self.initConsumerTable()
self.create_database(tdSql, parameterDict["dbName"]) self.create_database(tdSql, paraDict["dbName"])
self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) self.create_stable(tdSql, paraDict["dbName"], paraDict["stbName"])
self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) self.create_ctables(tdSql, paraDict["dbName"], paraDict["stbName"], paraDict["ctbNum"])
self.insert_data(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) self.insert_data(tdSql,paraDict["dbName"],paraDict["stbName"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"])
tdLog.info("create topics from stb1") tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1' topicFromStb1 = 'topic_stb1'
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, paraDict['dbName'], paraDict['stbName']))
consumerId = 0 consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"]
topicList = topicFromStb1 topicList = topicFromStb1
ifcheckdata = 0 ifcheckdata = 0
ifManualCommit = 0 ifManualCommit = 0
@ -271,13 +286,17 @@ class TDTestCase:
enable.auto.commit:false,\ enable.auto.commit:false,\
auto.commit.interval.ms:6000,\ auto.commit.interval.ms:6000,\
auto.offset.reset:earliest' auto.offset.reset:earliest'
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) #self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor") tdLog.info("start consume processor")
pollDelay = 20 pollDelay = 20
showMsg = 1 showMsg = 1
showRow = 1 showRow = 1
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) paraDict['pollDelay'] = 20
#self.startTmqSimProcess(buildPath,cfgPath,pollDelay,paraDict["dbName"],showMsg, showRow)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
expectRows = 1 expectRows = 1
resultList = self.selectConsumeResult(expectRows) resultList = self.selectConsumeResult(expectRows)

View File

@ -41,7 +41,7 @@ class TDTestCase:
'rowsPerTbl': 4000, 'rowsPerTbl': 4000,
'batchNum': 15, 'batchNum': 15,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000 'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 20, 'pollDelay': 30,
'showMsg': 1, 'showMsg': 1,
'showRow': 1} 'showRow': 1}
@ -124,7 +124,7 @@ class TDTestCase:
tdLog.info("async insert data") tdLog.info("async insert data")
pThread = tmqCom.asyncInsertData(paraDict) pThread = tmqCom.asyncInsertData(paraDict)
tmqCom.getStartConsumeNotifyFromTmqsim(); tmqCom.getStartConsumeNotifyFromTmqsim()
#time.sleep(5) #time.sleep(5)
tdLog.info("check show consumers") tdLog.info("check show consumers")
tdSql.query("show consumers") tdSql.query("show consumers")

View File

@ -252,7 +252,7 @@ python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py -N 5 -
python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 6 -M 3 -C 5 python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 6 -M 3 -C 5
# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeStopInsert.py # BUG python3 ./test.py -f 6-cluster/5dnode3mnodeStopInsert.py
# TD-19646 python3 ./test.py -f 6-cluster/5dnode3mnodeDrop.py -N 5 python3 ./test.py -f 6-cluster/5dnode3mnodeDrop.py -N 5
# TD-19646 python3 test.py -f 6-cluster/5dnode3mnodeStopConnect.py -N 5 -M 3 # TD-19646 python3 test.py -f 6-cluster/5dnode3mnodeStopConnect.py -N 5 -M 3
python3 ./test.py -f 6-cluster/5dnode3mnodeRecreateMnode.py -N 5 -M 3 python3 ./test.py -f 6-cluster/5dnode3mnodeRecreateMnode.py -N 5 -M 3
@ -263,28 +263,25 @@ python3 ./test.py -f 6-cluster/5dnode3mnodeStop2Follower.py -N 5 -M 3
python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py -N 4 -M 1 python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py -N 4 -M 1
python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py -N 4 -M 1 python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py -N 4 -M 1
python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py -N 4 -M 1 python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py -N 4 -M 1
python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_force_stop_all_dnodes.py -N 4 -M 1 # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_force_stop_all_dnodes.py -N 4 -M 1
python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas.py -N 4 -M 1 python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas.py -N 4 -M 1
python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_all_vnode.py -N 4 -M 1 # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_all_vnode.py -N 4 -M 1
python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_follower.py -N 4 -M 1 # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_follower.py -N 4 -M 1
python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_leader.py -N 4 -M 1 # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_leader.py -N 4 -M 1
python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys.py -N 4 -M 1 python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys.py -N 4 -M 1
python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py -N 4 -M 1 # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py -N 4 -M 1
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py -N 4 -M 1
python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py -N 4 -M 1
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync_force_stop.py -N 4 -M 1 # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync_force_stop.py -N 4 -M 1
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py -N 4 -M 1 # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py -N 4 -M 1
python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py -N 4 -M 1 # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py -N 4 -M 1
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py -N 4 -M 1 # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py -N 4 -M 1
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py -N 4 -M 1 # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py -N 4 -M 1
python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py -N 4 -M 1 # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py -N 4 -M 1
python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py -N 4 -M 1 # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py -N 4 -M 1
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py -N 4 -M 1 # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py -N 4 -M 1
python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py -N 4 -M 1 # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py -N 4 -M 1
python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py -N 4 -M 1 python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py -N 4 -M 1
python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups_stopOne.py -N 4 -M 1 # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups_stopOne.py -N 4 -M 1
python3 ./test.py -f 7-tmq/create_wrong_topic.py python3 ./test.py -f 7-tmq/create_wrong_topic.py
python3 ./test.py -f 7-tmq/dropDbR3ConflictTransaction.py -N 3 python3 ./test.py -f 7-tmq/dropDbR3ConflictTransaction.py -N 3
@ -551,7 +548,7 @@ python3 ./test.py -f 2-query/upper.py -Q 4
python3 ./test.py -f 2-query/lower.py -Q 4 python3 ./test.py -f 2-query/lower.py -Q 4
python3 ./test.py -f 2-query/join.py -Q 4 python3 ./test.py -f 2-query/join.py -Q 4
python3 ./test.py -f 2-query/join2.py -Q 4 python3 ./test.py -f 2-query/join2.py -Q 4
python3 ./test.py -f 2-query/cast.py -Q 4 #python3 ./test.py -f 2-query/cast.py -Q 4
python3 ./test.py -f 2-query/substr.py -Q 4 python3 ./test.py -f 2-query/substr.py -Q 4
python3 ./test.py -f 2-query/union.py -Q 4 python3 ./test.py -f 2-query/union.py -Q 4
python3 ./test.py -f 2-query/union1.py -Q 4 python3 ./test.py -f 2-query/union1.py -Q 4

View File

@ -12,7 +12,7 @@ IF (TD_WEBSOCKET)
ExternalProject_Add(taosws-rs ExternalProject_Add(taosws-rs
PREFIX "taosws-rs" PREFIX "taosws-rs"
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosws-rs SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosws-rs
BUILD_ALWAYS on BUILD_ALWAYS off
DEPENDS taos DEPENDS taos
BUILD_IN_SOURCE 1 BUILD_IN_SOURCE 1
CONFIGURE_COMMAND cmake -E echo "taosws-rs no need cmake to config" CONFIGURE_COMMAND cmake -E echo "taosws-rs no need cmake to config"

View File

@ -217,16 +217,16 @@ void shellRunSingleCommandImp(char *command) {
et = taosGetTimestampUs(); et = taosGetTimestampUs();
if (error_no == 0) { if (error_no == 0) {
printf("Query OK, %d rows in database (%.6fs)\r\n", numOfRows, (et - st) / 1E6); printf("Query OK, %d row(s) in set (%.6fs)\r\n", numOfRows, (et - st) / 1E6);
} else { } else {
printf("Query interrupted (%s), %d rows affected (%.6fs)\r\n", taos_errstr(pSql), numOfRows, (et - st) / 1E6); printf("Query interrupted (%s), %d row(s) in set (%.6fs)\r\n", taos_errstr(pSql), numOfRows, (et - st) / 1E6);
} }
taos_free_result(pSql); taos_free_result(pSql);
} else { } else {
int32_t num_rows_affacted = taos_affected_rows(pSql); int32_t num_rows_affacted = taos_affected_rows(pSql);
taos_free_result(pSql); taos_free_result(pSql);
et = taosGetTimestampUs(); et = taosGetTimestampUs();
printf("Query OK, %d of %d rows affected (%.6fs)\r\n", num_rows_affacted, num_rows_affacted, (et - st) / 1E6); printf("Query OK, %d row(s) affected in set (%.6fs)\r\n", num_rows_affacted, (et - st) / 1E6);
// call auto tab // call auto tab
callbackAutoTab(command, NULL, false); callbackAutoTab(command, NULL, false);

View File

@ -1084,6 +1084,35 @@ int sml_19221_Test() {
return code; return code;
} }
int sml_time_Test() {
TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
TAOS_RES *pRes = taos_query(taos, "create database if not exists sml_db schemaless 1");
taos_free_result(pRes);
const char *sql[] = {
"meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase='2022-02-02 10:22:22' 1626006833639000000",
};
pRes = taos_query(taos, "use sml_db");
taos_free_result(pRes);
char* tmp = (char*)taosMemoryCalloc(1024, 1);
memcpy(tmp, sql[0], strlen(sql[0]));
*(char*)(tmp+44) = 0;
int32_t totalRows = 0;
pRes = taos_schemaless_insert_raw(taos, tmp, strlen(sql[0]), &totalRows, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS);
ASSERT(totalRows == 3);
printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes));
int code = taos_errno(pRes);
taos_free_result(pRes);
taos_close(taos);
taosMemoryFree(tmp);
return code;
}
int main(int argc, char *argv[]) { int main(int argc, char *argv[]) {
int ret = 0; int ret = 0;
ret = smlProcess_influx_Test(); ret = smlProcess_influx_Test();