Merge branch '3.0' of github.com:taosdata/TDengine into test/chr/TD-14699

This commit is contained in:
tomchon 2022-05-20 18:25:39 +08:00
commit 2b86dd2274
155 changed files with 10296 additions and 7056 deletions

View File

@ -121,7 +121,7 @@ def pre_test_win(){
set
date /t
time /t
rd /s /Q C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\debug
rd /s /Q C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\debug || exit 0
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal

View File

@ -46,11 +46,17 @@ ENDIF ()
IF (TD_WINDOWS)
MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}")
SET(COMMON_FLAGS "/W3 /D_WIN32")
SET(COMMON_FLAGS "/w /D_WIN32")
SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /MANIFEST:NO")
# IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
# SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
# ENDIF ()
IF (CMAKE_DEPFILE_FLAGS_C)
SET(CMAKE_DEPFILE_FLAGS_C "")
ENDIF ()
IF (CMAKE_DEPFILE_FLAGS_CXX)
SET(CMAKE_DEPFILE_FLAGS_CXX "")
ENDIF ()
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMMON_FLAGS}")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMMON_FLAGS}")

View File

@ -46,6 +46,18 @@ IF(${TD_WINDOWS})
ON
)
option(
BUILD_TEST
"If build unit tests using googletest"
OFF
)
ELSE ()
option(
BUILD_TEST
"If build unit tests using googletest"
ON
)
ENDIF ()
option(
@ -54,12 +66,6 @@ option(
OFF
)
option(
BUILD_TEST
"If build unit tests using googletest"
ON
)
option(
BUILD_WITH_LEVELDB
"If build with leveldb"

View File

@ -1648,6 +1648,15 @@ typedef struct {
int32_t tSerializeSMDropTopicReq(void* buf, int32_t bufLen, SMDropTopicReq* pReq);
int32_t tDeserializeSMDropTopicReq(void* buf, int32_t bufLen, SMDropTopicReq* pReq);
typedef struct {
char topic[TSDB_TOPIC_FNAME_LEN];
char cgroup[TSDB_CGROUP_LEN];
int8_t igNotExists;
} SMDropCgroupReq;
int32_t tSerializeSMDropCgroupReq(void* buf, int32_t bufLen, SMDropCgroupReq* pReq);
int32_t tDeserializeSMDropCgroupReq(void* buf, int32_t bufLen, SMDropCgroupReq* pReq);
typedef struct {
char name[TSDB_TABLE_FNAME_LEN];
int8_t alterType;

View File

@ -236,6 +236,7 @@ typedef struct SSelectStmt {
bool isTimeOrderQuery;
bool hasAggFuncs;
bool hasRepeatScanFuncs;
bool hasNonstdSQLFunc;
} SSelectStmt;
typedef enum ESetOperatorType { SET_OP_TYPE_UNION_ALL = 1, SET_OP_TYPE_UNION } ESetOperatorType;
@ -350,9 +351,6 @@ bool nodesIsComparisonOp(const SOperatorNode* pOp);
bool nodesIsJsonOp(const SOperatorNode* pOp);
bool nodesIsRegularOp(const SOperatorNode* pOp);
bool nodesIsTimeorderQuery(const SNode* pQuery);
bool nodesIsTimelineQuery(const SNode* pQuery);
void* nodesGetValueFromNode(SValueNode* pNode);
int32_t nodesSetValueNodeValue(SValueNode* pNode, void* value);
char* nodesGetStrValueFromNode(SValueNode* pNode);

View File

@ -107,6 +107,7 @@ static FORCE_INLINE void streamDataSubmitRefDec(SStreamDataSubmit* pDataSubmit)
if (ref == 0) {
taosMemoryFree(pDataSubmit->data);
taosMemoryFree(pDataSubmit->dataRef);
taosFreeQitem(pDataSubmit);
}
}
@ -279,6 +280,12 @@ typedef struct {
SArray* res; // SArray<SSDataBlock>
} SStreamSinkReq;
typedef struct {
SMsgHead head;
int64_t streamId;
int32_t taskId;
} SStreamTaskRunReq;
int32_t streamEnqueueDataSubmit(SStreamTask* pTask, SStreamDataSubmit* input);
int32_t streamEnqueueDataBlk(SStreamTask* pTask, SStreamDataBlock* input);
int32_t streamDequeueOutput(SStreamTask* pTask, void** output);

View File

@ -20,20 +20,23 @@
extern "C" {
#endif
#include <stdbool.h>
#include <stdint.h>
//#include <tdatablock.h>
#include "cJSON.h"
#include "tdef.h"
//#include "taosdef.h"
//#include "trpc.h"
//#include "wal.h"
#include "tmsgcb.h"
#define SYNC_INDEX_BEGIN 0
#define SYNC_INDEX_INVALID -1
typedef uint64_t SyncNodeId;
typedef int32_t SyncGroupId;
typedef int64_t SyncIndex;
typedef uint64_t SyncTerm;
typedef struct SSyncNode SSyncNode;
typedef struct SSyncBuffer SSyncBuffer;
typedef struct SWal SWal;
typedef struct SSyncRaftEntry SSyncRaftEntry;
typedef enum {
TAOS_SYNC_STATE_FOLLOWER = 100,
TAOS_SYNC_STATE_CANDIDATE = 101,
@ -41,6 +44,17 @@ typedef enum {
TAOS_SYNC_STATE_ERROR = 103,
} ESyncState;
typedef enum {
TAOS_SYNC_PROPOSE_SUCCESS = 0,
TAOS_SYNC_PROPOSE_NOT_LEADER = 1,
TAOS_SYNC_PROPOSE_OTHER_ERROR = 2,
} ESyncProposeCode;
typedef enum {
TAOS_SYNC_FSM_CB_SUCCESS = 0,
TAOS_SYNC_FSM_CB_OTHER_ERROR = 1,
} ESyncFsmCbCode;
typedef struct SNodeInfo {
uint16_t nodePort;
char nodeFqdn[TSDB_FQDN_LEN];
@ -58,11 +72,6 @@ typedef struct SSnapshot {
SyncTerm lastApplyTerm;
} SSnapshot;
typedef enum {
TAOS_SYNC_FSM_CB_SUCCESS = 0,
TAOS_SYNC_FSM_CB_OTHER_ERROR,
} ESyncFsmCbCode;
typedef struct SFsmCbMeta {
SyncIndex index;
bool isWeak;
@ -71,27 +80,15 @@ typedef struct SFsmCbMeta {
uint64_t seqNum;
} SFsmCbMeta;
struct SRpcMsg;
typedef struct SRpcMsg SRpcMsg;
typedef struct SSyncFSM {
void* data;
void (*FpCommitCb)(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta);
void (*FpPreCommitCb)(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta);
void (*FpRollBackCb)(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta);
int32_t (*FpGetSnapshot)(struct SSyncFSM* pFsm, SSnapshot* pSnapshot);
int32_t (*FpRestoreSnapshot)(struct SSyncFSM* pFsm, const SSnapshot* snapshot);
} SSyncFSM;
struct SSyncRaftEntry;
typedef struct SSyncRaftEntry SSyncRaftEntry;
#define SYNC_INDEX_BEGIN 0
#define SYNC_INDEX_INVALID -1
// abstract definition of log store in raft
// SWal implements it
typedef struct SSyncLogStore {
@ -120,11 +117,6 @@ typedef struct SSyncLogStore {
} SSyncLogStore;
struct SWal;
typedef struct SWal SWal;
struct SEpSet;
typedef struct SEpSet SEpSet;
typedef struct SSyncInfo {
SyncGroupId vgId;
@ -132,12 +124,9 @@ typedef struct SSyncInfo {
char path[TSDB_FILENAME_LEN];
SWal* pWal;
SSyncFSM* pFsm;
void* rpcClient;
int32_t (*FpSendMsg)(void* rpcClient, const SEpSet* pEpSet, SRpcMsg* pMsg);
void* queue;
int32_t (*FpEqMsg)(void* queue, SRpcMsg* pMsg);
SMsgCb* msgcb;
int32_t (*FpSendMsg)(const SEpSet* pEpSet, SRpcMsg* pMsg);
int32_t (*FpEqMsg)(const SMsgCb* msgcb, SRpcMsg* pMsg);
} SSyncInfo;
int32_t syncInit();
@ -152,27 +141,8 @@ const char* syncGetMyRoleStr(int64_t rid);
SyncTerm syncGetMyTerm(int64_t rid);
void syncGetEpSet(int64_t rid, SEpSet* pEpSet);
int32_t syncGetVgId(int64_t rid);
typedef enum {
TAOS_SYNC_PROPOSE_SUCCESS = 0,
TAOS_SYNC_PROPOSE_NOT_LEADER,
TAOS_SYNC_PROPOSE_OTHER_ERROR,
} ESyncProposeCode;
int32_t syncPropose(int64_t rid, const SRpcMsg* pMsg, bool isWeak);
bool syncEnvIsStart();
extern int32_t sDebugFlag;
//-----------------------------------------
struct SSyncNode;
typedef struct SSyncNode SSyncNode;
struct SSyncBuffer;
typedef struct SSyncBuffer SSyncBuffer;
//-----------------------------------------
const char* syncStr(ESyncState state);
#ifdef __cplusplus

View File

@ -20,13 +20,7 @@
extern "C" {
#endif
#include <stdbool.h>
#include <stdint.h>
//#include <tdatablock.h>
#include "cJSON.h"
//#include "taosdef.h"
#include "trpc.h"
//#include "wal.h"
// ------------------ ds -------------------
typedef struct SRaftId {
@ -35,16 +29,12 @@ typedef struct SRaftId {
} SRaftId;
// ------------------ control -------------------
struct SSyncNode;
typedef struct SSyncNode SSyncNode;
SSyncNode* syncNodeAcquire(int64_t rid);
void syncNodeRelease(SSyncNode* pNode);
int32_t syncGetRespRpc(int64_t rid, uint64_t index, SRpcMsg* msg);
int32_t syncGetAndDelRespRpc(int64_t rid, uint64_t index, SRpcMsg* msg);
void syncSetQ(int64_t rid, void* queueHandle);
void syncSetRpc(int64_t rid, void* rpcHandle);
void syncSetMsgCb(int64_t rid, const SMsgCb* msgcb);
char* sync2SimpleStr(int64_t rid);
// set timer ms

View File

@ -63,11 +63,6 @@ typedef struct SRpcMsg {
} SRpcMsg;
typedef void (*RpcCfp)(void *parent, SRpcMsg *, SEpSet *rf);
typedef int (*RpcAfp)(void *parent, char *tableId, char *spi, char *encrypt, char *secret, char *ckey);
///
// // SRpcMsg code
// REDIERE,
// NOT READY, EpSet
typedef bool (*RpcRfp)(int32_t code);
typedef struct SRpcInit {
@ -81,17 +76,10 @@ typedef struct SRpcInit {
// the following is for client app ecurity only
char *user; // user name
char spi; // security parameter index
char encrypt; // encrypt algorithm
char *secret; // key for authentication
char *ckey; // ciphering key
// call back to process incoming msg, code shall be ignored by server app
RpcCfp cfp;
// call back to retrieve the client auth info, for server app only
RpcAfp afp;
// user defined retry func
RpcRfp rfp;

View File

@ -649,6 +649,8 @@ int32_t* taosGetErrno();
#define TSDB_CODE_PAR_INVALID_TBNAME TAOS_DEF_ERROR_CODE(0, 0x264C)
#define TSDB_CODE_PAR_INVALID_FUNCTION_NAME TAOS_DEF_ERROR_CODE(0, 0x264D)
#define TSDB_CODE_PAR_COMMENT_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x264E)
#define TSDB_CODE_PAR_NOT_ALLOWED_FUNC TAOS_DEF_ERROR_CODE(0, 0x264F)
#define TSDB_CODE_PAR_NOT_ALLOWED_WIN_QUERY TAOS_DEF_ERROR_CODE(0, 0x2650)
//planner
#define TSDB_CODE_PLAN_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x2700)

6
packaging/deb/makedeb.sh Normal file → Executable file
View File

@ -67,9 +67,9 @@ fi
cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver
cp ${compile_dir}/../src/inc/taos.h ${pkg_dir}${install_home_path}/include
cp ${compile_dir}/../src/inc/taosdef.h ${pkg_dir}${install_home_path}/include
cp ${compile_dir}/../src/inc/taoserror.h ${pkg_dir}${install_home_path}/include
cp ${compile_dir}/../include/client/taos.h ${pkg_dir}${install_home_path}/include
cp ${compile_dir}/../include/common/taosdef.h ${pkg_dir}${install_home_path}/include
cp ${compile_dir}/../include/util/taoserror.h ${pkg_dir}${install_home_path}/include
cp -r ${top_dir}/examples/* ${pkg_dir}${install_home_path}/examples
#cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector
#cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector

View File

@ -1,94 +1,315 @@
#!/bin/bash
#
# Generate the tar.gz package for linux os
# Generate the deb package for ubuntu, or rpm package for centos, or tar.gz package for other linux os
set -e
#set -x
# release.sh -v [cluster | edge]
# -c [aarch32 | aarch64 | x64 | x86 | mips64 ...]
# -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...]
# -V [stable | beta]
# -l [full | lite]
# -s [static | dynamic]
# -d [taos | ...]
# -n [2.0.0.3]
# -m [2.0.0.0]
# -H [ false | true]
# set parameters by default value
version="3.0.0.0"
verMode=edge # [cluster, edge]
verType=stable # [stable, beta]
cpuType=x64 # [aarch32 | aarch64 | x64 | x86 | mips64 ...]
osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...]
pagMode=full # [full | lite]
soMode=dynamic # [static | dynamic]
dbName=taos # [taos | ...]
allocator=glibc # [glibc | jemalloc]
verNumber=""
verNumberComp="2.0.0.0"
httpdBuild=false
while getopts "hv:V:c:o:l:s:d:a:n:m:H:" arg; do
case $arg in
v)
#echo "verMode=$OPTARG"
verMode=$(echo $OPTARG)
;;
V)
#echo "verType=$OPTARG"
verType=$(echo $OPTARG)
;;
c)
#echo "cpuType=$OPTARG"
cpuType=$(echo $OPTARG)
;;
l)
#echo "pagMode=$OPTARG"
pagMode=$(echo $OPTARG)
;;
s)
#echo "soMode=$OPTARG"
soMode=$(echo $OPTARG)
;;
d)
#echo "dbName=$OPTARG"
dbName=$(echo $OPTARG)
;;
a)
#echo "allocator=$OPTARG"
allocator=$(echo $OPTARG)
;;
n)
#echo "verNumber=$OPTARG"
verNumber=$(echo $OPTARG)
;;
m)
#echo "verNumberComp=$OPTARG"
verNumberComp=$(echo $OPTARG)
;;
o)
#echo "osType=$OPTARG"
osType=$(echo $OPTARG)
;;
H)
#echo "httpdBuild=$OPTARG"
httpdBuild=$(echo $OPTARG)
;;
h)
echo "Usage: $(basename $0) -v [cluster | edge] "
echo " -c [aarch32 | aarch64 | x64 | x86 | mips64 ...] "
echo " -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...] "
echo " -V [stable | beta] "
echo " -l [full | lite] "
echo " -a [glibc | jemalloc] "
echo " -s [static | dynamic] "
echo " -d [taos | ...] "
echo " -n [version number] "
echo " -m [compatible version number] "
echo " -H [false | true] "
exit 0
;;
?) #unknow option
echo "unkonw argument"
exit 1
;;
esac
done
echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode} soMode=${soMode} dbName=${dbName} allocator=${allocator} verNumber=${verNumber} verNumberComp=${verNumberComp} httpdBuild=${httpdBuild}"
curr_dir=$(pwd)
if [ "$osType" == "Darwin" ]; then
script_dir=$(dirname $0)
cd ${script_dir}
script_dir="$(pwd)"
top_dir=${script_dir}/..
else
script_dir="$(dirname $(readlink -f $0))"
top_dir="$(readlink -f ${script_dir}/..)"
fi
echo "=======================new version number: ${verNumber}======================================"
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
fi
function is_valid_version() {
[ -z $1 ] && return 1 || :
rx='^([0-9]+\.){3}(\*|[0-9]+)$'
if [[ $1 =~ $rx ]]; then
return 0
fi
return 1
}
function vercomp() {
if [[ $1 == $2 ]]; then
echo 0
exit 0
fi
local IFS=.
local i ver1=($1) ver2=($2)
# fill empty fields in ver1 with zeros
for ((i = ${#ver1[@]}; i < ${#ver2[@]}; i++)); do
ver1[i]=0
done
for ((i = 0; i < ${#ver1[@]}; i++)); do
if [[ -z ${ver2[i]} ]]; then
# fill empty fields in ver2 with zeros
ver2[i]=0
fi
if ((10#${ver1[i]} > 10#${ver2[i]})); then
echo 1
exit 0
fi
if ((10#${ver1[i]} < 10#${ver2[i]})); then
echo 2
exit 0
fi
done
echo 0
}
# 1. check version information
if ( (! is_valid_version $verNumber) || (! is_valid_version $verNumberComp) || [[ "$(vercomp $verNumber $verNumberComp)" == '2' ]]); then
echo "please enter correct version"
exit 0
fi
echo "=======================new version number: ${verNumber}, compatible version: ${verNumberComp}======================================"
build_time=$(date +"%F %R")
echo "script_dir: ${script_dir}"
echo "top_dir: ${top_dir}"
# get commint id from git
gitinfo=$(git rev-parse --verify HEAD)
cd ${top_dir}
# git checkout -- .
# git checkout 3.0
# git pull || :
if [[ "$verMode" == "cluster" ]]; then
enterprise_dir="${top_dir}/../enterprise"
cd ${enterprise_dir}
gitinfoOfInternal=$(git rev-parse --verify HEAD)
else
gitinfoOfInternal=NULL
fi
echo "curr_dir: ${curr_dir}"
cd "${curr_dir}"
# 2. cmake executable file
compile_dir="${top_dir}/debug"
# if [ -d ${compile_dir} ]; then
# rm -rf ${compile_dir}
# fi
mkdir -p ${compile_dir}
cd ${compile_dir}
echo "compile_dir: ${compile_dir}"
cmake .. -DBUILD_TOOLS=true
make -j32
release_dir="${top_dir}/release"
if [ -d ${release_dir} ]; then
rm -rf ${release_dir}
if [ -d ${compile_dir} ]; then
${csudo}rm -rf ${compile_dir}
fi
mkdir -p ${release_dir}
cd ${release_dir}
if [ "$osType" != "Darwin" ]; then
${csudo}mkdir -p ${compile_dir}
else
mkdir -p ${compile_dir}
fi
cd ${compile_dir}
install_dir="${release_dir}/TDengine-server-${version}"
mkdir -p ${install_dir}
mkdir -p ${install_dir}/bin
mkdir -p ${install_dir}/lib
mkdir -p ${install_dir}/inc
if [[ "$allocator" == "jemalloc" ]]; then
allocator_macro="-DJEMALLOC_ENABLED=true"
else
allocator_macro=""
fi
install_files="${script_dir}/tools/install.sh"
chmod a+x ${script_dir}/tools/install.sh || :
cp ${install_files} ${install_dir}
if [[ "$dbName" != "taos" ]]; then
source ${enterprise_dir}/packaging/oem/sed_$dbName.sh
replace_community_$dbName
fi
header_files="${top_dir}/include/client/taos.h ${top_dir}/include/util/taoserror.h"
cp ${header_files} ${install_dir}/inc
if [[ "$httpdBuild" == "true" ]]; then
BUILD_HTTP=true
else
BUILD_HTTP=false
fi
bin_files="${compile_dir}/build/bin/taosd ${compile_dir}/build/bin/taos ${compile_dir}/build/bin/create_table ${compile_dir}/build/bin/tmq_sim ${script_dir}/tools/remove.sh ${compile_dir}/build/bin/taosBenchmark ${compile_dir}/build/bin/taosdump"
cp -rf ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
if [[ "$verMode" == "cluster" ]]; then
BUILD_HTTP=internal
fi
cp ${compile_dir}/build/lib/libtaos.so ${install_dir}/lib/
cp ${compile_dir}/build/lib/libavro* ${install_dir}/lib/ > /dev/null || echo -e "failed to copy avro libraries"
cp -rf ${compile_dir}/build/lib/pkgconfig ${install_dir}/lib/ > /dev/null || echo -e "failed to copy pkgconfig directory"
if [[ "$pagMode" == "full" ]]; then
BUILD_TOOLS=true
else
BUILD_TOOLS=false
fi
# check support cpu type
if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" == "aarch32" ]] || [[ "$cpuType" == "mips64" ]]; then
if [ "$verMode" != "cluster" ]; then
# community-version compile
cmake ../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro}
else
if [[ "$dbName" != "taos" ]]; then
replace_enterprise_$dbName
fi
cmake ../../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro}
fi
else
echo "input cpuType=${cpuType} error!!!"
exit 1
fi
#cp ${compile_dir}/source/dnode/mnode/impl/libmnode.so ${install_dir}/lib/
#cp ${compile_dir}/source/dnode/qnode/libqnode.so ${install_dir}/lib/
#cp ${compile_dir}/source/dnode/snode/libsnode.so ${install_dir}/lib/
#cp ${compile_dir}/source/dnode/bnode/libbnode.so ${install_dir}/lib/
#cp ${compile_dir}/source/libs/wal/libwal.so ${install_dir}/lib/
#cp ${compile_dir}/source/libs/scheduler/libscheduler.so ${install_dir}/lib/
#cp ${compile_dir}/source/libs/planner/libplanner.so ${install_dir}/lib/
#cp ${compile_dir}/source/libs/parser/libparser.so ${install_dir}/lib/
#cp ${compile_dir}/source/libs/qcom/libqcom.so ${install_dir}/lib/
#cp ${compile_dir}/source/libs/transport/libtransport.so ${install_dir}/lib/
#cp ${compile_dir}/source/libs/function/libfunction.so ${install_dir}/lib/
#cp ${compile_dir}/source/common/libcommon.so ${install_dir}/lib/
#cp ${compile_dir}/source/os/libos.so ${install_dir}/lib/
#cp ${compile_dir}/source/dnode/mnode/sdb/libsdb.so ${install_dir}/lib/
#cp ${compile_dir}/source/libs/catalog/libcatalog.so ${install_dir}/lib/
CORES=$(grep -c ^processor /proc/cpuinfo)
pkg_name=${install_dir}-Linux-x64
if [[ "$allocator" == "jemalloc" ]]; then
# jemalloc need compile first, so disable parallel build
make -j ${CORES} && ${csudo}make install
else
make -j ${CORES} && ${csudo}make install
fi
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
cd ${curr_dir}
# 3. Call the corresponding script for packaging
if [ "$osType" != "Darwin" ]; then
if [[ "$verMode" != "cluster" ]] && [[ "$pagMode" == "full" ]] && [[ "$cpuType" == "x64" ]] && [[ "$dbName" == "taos" ]]; then
ret='0'
command -v dpkg >/dev/null 2>&1 || { ret='1'; }
if [ "$ret" -eq 0 ]; then
echo "====do deb package for the ubuntu system===="
output_dir="${top_dir}/debs"
if [ -d ${output_dir} ]; then
${csudo}rm -rf ${output_dir}
fi
${csudo}mkdir -p ${output_dir}
cd ${script_dir}/deb
${csudo}./makedeb.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType}
if [[ "$pagMode" == "full" ]]; then
if [ -d ${top_dir}/tools/taos-tools/packaging/deb ]; then
cd ${top_dir}/tools/taos-tools/packaging/deb
[ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0"
taos_tools_ver=$(git describe --tags | sed -e 's/ver-//g' | awk -F '-' '{print $1}')
${csudo}./make-taos-tools-deb.sh ${top_dir} \
${compile_dir} ${output_dir} ${taos_tools_ver} ${cpuType} ${osType} ${verMode} ${verType}
fi
fi
else
echo "==========dpkg command not exist, so not release deb package!!!"
fi
ret='0'
command -v rpmbuild >/dev/null 2>&1 || { ret='1'; }
if [ "$ret" -eq 0 ]; then
echo "====do rpm package for the centos system===="
output_dir="${top_dir}/rpms"
if [ -d ${output_dir} ]; then
${csudo}rm -rf ${output_dir}
fi
${csudo}mkdir -p ${output_dir}
cd ${script_dir}/rpm
${csudo}./makerpm.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType}
if [[ "$pagMode" == "full" ]]; then
if [ -d ${top_dir}/tools/taos-tools/packaging/rpm ]; then
cd ${top_dir}/tools/taos-tools/packaging/rpm
[ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0"
taos_tools_ver=$(git describe --tags | sed -e 's/ver-//g' | awk -F '-' '{print $1}' | sed -e 's/-/_/g')
${csudo}./make-taos-tools-rpm.sh ${top_dir} \
${compile_dir} ${output_dir} ${taos_tools_ver} ${cpuType} ${osType} ${verMode} ${verType}
fi
fi
else
echo "==========rpmbuild command not exist, so not release rpm package!!!"
fi
fi
echo "====do tar.gz package for all systems===="
cd ${script_dir}/tools
${csudo}./makepkg.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${verNumberComp} ${dbName}
${csudo}./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
# ${csudo}./makearbi.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
else
# only make client for Darwin
cd ${script_dir}/tools
./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
fi

View File

@ -74,9 +74,9 @@ if [ -f %{_compiledir}/build/bin/taosadapter ]; then
cp %{_compiledir}/build/bin/taosadapter %{buildroot}%{homepath}/bin ||:
fi
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../src/inc/taosdef.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../src/inc/taoserror.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../include/client/taos.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../include/common/taosdef.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../include/util/taoserror.h %{buildroot}%{homepath}/include
#cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector
#cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector
#cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector

0
packaging/tools/check_os.sh Normal file → Executable file
View File

0
packaging/tools/get_client.sh Normal file → Executable file
View File

0
packaging/tools/get_os.sh Normal file → Executable file
View File

0
packaging/tools/get_version.sh Normal file → Executable file
View File

View File

@ -485,6 +485,17 @@ function install_service() {
# fi
}
function install_config() {
if [ ! -f ${cfg_install_dir}/${configFile} ]; then
${csudo}mkdir -p ${cfg_install_dir}
[ -f ${script_dir}/cfg/${configFile} ] && ${csudo}cp ${script_dir}/cfg/${configFile} ${cfg_install_dir}
${csudo}chmod 644 ${cfg_install_dir}/*
fi
${csudo}cp -f ${script_dir}/cfg/${configFile} ${install_main_dir}/cfg/${configFile}.org
${csudo}ln -s ${cfg_install_dir}/${configFile} ${install_main_dir}/cfg
}
function install_TDengine() {
# Start to install
echo -e "${GREEN}Start to install TDengine...${NC}"
@ -500,7 +511,7 @@ function install_TDengine() {
# For installing new
install_bin
install_service
#install_config
install_config
# Ask if to start the service
#echo
@ -539,7 +550,7 @@ function install_TDengine() {
echo
else # Only install client
install_bin
#install_config
install_config
echo
echo -e "\033[44;32;1mTDengine client is installed successfully!${NC}"
fi

0
packaging/tools/install_arbi.sh Normal file → Executable file
View File

15
packaging/tools/install_client.sh Normal file → Executable file
View File

@ -17,7 +17,6 @@ serverName="taosd"
clientName="taos"
uninstallScript="rmtaos"
configFile="taos.cfg"
tarName="taos.tar.gz"
osType=Linux
pagMode=full
@ -243,12 +242,6 @@ function install_examples() {
function update_TDengine() {
# Start to update
if [ ! -e ${tarName} ]; then
echo "File ${tarName} does not exist"
exit 1
fi
tar -zxf ${tarName}
echo -e "${GREEN}Start to update ${productName} client...${NC}"
# Stop the client shell if running
if pidof ${clientName} &> /dev/null; then
@ -271,18 +264,10 @@ function update_TDengine() {
echo
echo -e "\033[44;32;1m${productName} client is updated successfully!${NC}"
rm -rf $(tar -tf ${tarName})
}
function install_TDengine() {
# Start to install
if [ ! -e ${tarName} ]; then
echo "File ${tarName} does not exist"
exit 1
fi
tar -zxf ${tarName}
echo -e "${GREEN}Start to install ${productName} client...${NC}"
install_main_path

4
packaging/tools/makearbi.sh Normal file → Executable file
View File

@ -22,7 +22,7 @@ productName="TDengine"
# create compressed install file.
build_dir="${compile_dir}/build"
code_dir="${top_dir}/src"
code_dir="${top_dir}"
release_dir="${top_dir}/release"
#package_name='linux'
@ -36,7 +36,7 @@ fi
bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi.sh"
install_files="${script_dir}/install_arbi.sh"
#header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h"
#header_files="${code_dir}/include/client/taos.h ${code_dir}/include/common/taosdef.h ${code_dir}/include/util/taoserror.h"
init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord
init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord

10
packaging/tools/makeclient.sh Normal file → Executable file
View File

@ -32,7 +32,7 @@ fi
# create compressed install file.
build_dir="${compile_dir}/build"
code_dir="${top_dir}/src"
code_dir="${top_dir}"
release_dir="${top_dir}/release"
#package_name='linux'
@ -62,7 +62,7 @@ else
lib_files="${build_dir}/lib/libtaos.${version}.dylib"
fi
header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h"
header_files="${code_dir}/include/client/taos.h ${code_dir}/include/common/taosdef.h ${code_dir}/include/util/taoserror.h"
if [ "$dbName" != "taos" ]; then
cfg_dir="${top_dir}/../enterprise/packaging/cfg"
else
@ -152,7 +152,7 @@ if [[ $productName == "TDengine" ]]; then
cp -r ${examples_dir}/go ${install_dir}/examples
cp -r ${examples_dir}/nodejs ${install_dir}/examples
cp -r ${examples_dir}/C# ${install_dir}/examples
mkdir -p ${install_dir}/examples/taosbenchmark-json && cp ${examples_dir}/../src/kit/taos-tools/example/* ${install_dir}/examples/taosbenchmark-json
mkdir -p ${install_dir}/examples/taosbenchmark-json && cp ${examples_dir}/../tools/taos-tools/example/* ${install_dir}/examples/taosbenchmark-json
fi
if [ "$verMode" == "cluster" ]; then
@ -199,8 +199,8 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
else
echo "WARNING: go connector not found, please check if want to use it!"
fi
cp -r ${connector_dir}/python ${install_dir}/connector
cp -r ${connector_dir}/nodejs ${install_dir}/connector
cp -r ${connector_dir}/python ${install_dir}/connector || :
cp -r ${connector_dir}/nodejs ${install_dir}/connector || :
fi
# Copy release note
# cp ${script_dir}/release_note ${install_dir}

20
packaging/tools/makepkg.sh Normal file → Executable file
View File

@ -33,7 +33,7 @@ defaultPasswd="taosdata"
# create compressed install file.
build_dir="${compile_dir}/build"
code_dir="${top_dir}/src"
code_dir="${top_dir}"
release_dir="${top_dir}/release"
#package_name='linux'
@ -43,8 +43,8 @@ else
install_dir="${release_dir}/${productName}-server-${version}"
fi
if [ -d ${top_dir}/src/kit/taos-tools/packaging/deb ]; then
cd ${top_dir}/src/kit/taos-tools/packaging/deb
if [ -d ${top_dir}/tools/taos-tools/packaging/deb ]; then
cd ${top_dir}/tools/taos-tools/packaging/deb
[ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0"
taostools_ver=$(git describe --tags | sed -e 's/ver-//g' | awk -F '-' '{print $1}')
@ -94,7 +94,7 @@ else
fi
lib_files="${build_dir}/lib/libtaos.so.${version}"
header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h"
header_files="${code_dir}/include/client/taos.h ${code_dir}/include/common/taosdef.h ${code_dir}/include/util/taoserror.h"
if [ "$dbName" != "taos" ]; then
cfg_dir="${top_dir}/../enterprise/packaging/cfg"
@ -103,7 +103,7 @@ else
fi
install_files="${script_dir}/install.sh"
nginx_dir="${code_dir}/../../enterprise/src/plugins/web"
nginx_dir="${top_dir}/../enterprise/src/plugins/web"
init_file_deb=${script_dir}/../deb/taosd
init_file_rpm=${script_dir}/../rpm/taosd
@ -162,8 +162,8 @@ if [ -n "${taostools_bin_files}" ]; then
&& cp ${taostools_bin_files} ${taostools_install_dir}/bin \
&& chmod a+x ${taostools_install_dir}/bin/* || :
if [ -f ${top_dir}/src/kit/taos-tools/packaging/tools/install-taostools.sh ]; then
cp ${top_dir}/src/kit/taos-tools/packaging/tools/install-taostools.sh \
if [ -f ${top_dir}/tools/taos-tools/packaging/tools/install-taostools.sh ]; then
cp ${top_dir}/tools/taos-tools/packaging/tools/install-taostools.sh \
${taostools_install_dir}/ > /dev/null \
&& chmod a+x ${taostools_install_dir}/install-taostools.sh \
|| echo -e "failed to copy install-taostools.sh"
@ -171,8 +171,8 @@ if [ -n "${taostools_bin_files}" ]; then
echo -e "install-taostools.sh not found"
fi
if [ -f ${top_dir}/src/kit/taos-tools/packaging/tools/uninstall-taostools.sh ]; then
cp ${top_dir}/src/kit/taos-tools/packaging/tools/uninstall-taostools.sh \
if [ -f ${top_dir}/tools/taos-tools/packaging/tools/uninstall-taostools.sh ]; then
cp ${top_dir}/tools/taos-tools/packaging/tools/uninstall-taostools.sh \
${taostools_install_dir}/ > /dev/null \
&& chmod a+x ${taostools_install_dir}/uninstall-taostools.sh \
|| echo -e "failed to copy uninstall-taostools.sh"
@ -288,7 +288,7 @@ if [[ $dbName == "taos" ]]; then
cp -r ${examples_dir}/go ${install_dir}/examples
cp -r ${examples_dir}/nodejs ${install_dir}/examples
cp -r ${examples_dir}/C# ${install_dir}/examples
mkdir -p ${install_dir}/examples/taosbenchmark-json && cp ${examples_dir}/../src/kit/taos-tools/example/* ${install_dir}/examples/taosbenchmark-json
mkdir -p ${install_dir}/examples/taosbenchmark-json && cp ${examples_dir}/../tools/taos-tools/example/* ${install_dir}/examples/taosbenchmark-json
fi
fi

0
packaging/tools/post.sh Normal file → Executable file
View File

0
packaging/tools/preun.sh Normal file → Executable file
View File

0
packaging/tools/remove.sh Normal file → Executable file
View File

0
packaging/tools/remove_arbi.sh Normal file → Executable file
View File

0
packaging/tools/remove_client.sh Normal file → Executable file
View File

0
packaging/tools/repair_link.sh Normal file → Executable file
View File

0
packaging/tools/run_taosd_and_taosadapter.sh Normal file → Executable file
View File

0
packaging/tools/set_core.sh Normal file → Executable file
View File

0
packaging/tools/startPre.sh Normal file → Executable file
View File

View File

@ -91,7 +91,6 @@ static bool clientRpcRfp(int32_t code) {
}
}
// TODO refactor
void *openTransporter(const char *user, const char *auth, int32_t numOfThread) {
SRpcInit rpcInit;
@ -105,10 +104,6 @@ void *openTransporter(const char *user, const char *auth, int32_t numOfThread) {
rpcInit.connType = TAOS_CONN_CLIENT;
rpcInit.user = (char *)user;
rpcInit.idleTime = tsShellActivityTimer * 1000;
rpcInit.ckey = "key";
rpcInit.spi = 1;
rpcInit.secret = (char *)auth;
void *pDnodeConn = rpcOpen(&rpcInit);
if (pDnodeConn == NULL) {
tscError("failed to init connection to server");

View File

@ -23,6 +23,8 @@
#include "tmsgtype.h"
#include "tpagedbuf.h"
#include "tref.h"
#include "cJSON.h"
#include "tdataformat.h"
static int32_t initEpSetFromCfg(const char* firstEp, const char* secondEp, SCorEpSet* pEpSet);
static SMsgSendInfo* buildConnectMsg(SRequestObj* pRequest);
@ -268,7 +270,7 @@ void setResSchemaInfo(SReqResultInfo* pResInfo, const SSchema* pSchema, int32_t
if (pSchema[i].type == TSDB_DATA_TYPE_VARCHAR) {
pResInfo->userFields[i].bytes -= VARSTR_HEADER_SIZE;
} else if (pSchema[i].type == TSDB_DATA_TYPE_NCHAR) {
} else if (pSchema[i].type == TSDB_DATA_TYPE_NCHAR || pSchema[i].type == TSDB_DATA_TYPE_JSON) {
pResInfo->userFields[i].bytes = (pResInfo->userFields[i].bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
}
@ -344,7 +346,6 @@ int32_t validateSversion(SRequestObj* pRequest, void* res) {
taosArrayPush(pArray, &tbSver);
}
} else if (TDMT_VND_QUERY == pRequest->type) {
}
SCatalog* pCatalog = NULL;
@ -369,7 +370,6 @@ void freeRequestRes(SRequestObj* pRequest, void* res) {
if (TDMT_VND_SUBMIT == pRequest->type) {
tFreeSSubmitRsp((SSubmitRsp*)res);
} else if (TDMT_VND_QUERY == pRequest->type) {
}
}
@ -805,6 +805,101 @@ static int32_t doPrepareResPtr(SReqResultInfo* pResInfo) {
return TSDB_CODE_SUCCESS;
}
static char* parseTagDatatoJson(void *p){
char* string = NULL;
cJSON *json = cJSON_CreateObject();
if (json == NULL)
{
goto end;
}
int16_t nCols = kvRowNCols(p);
char tagJsonKey[256] = {0};
for (int j = 0; j < nCols; ++j) {
SColIdx * pColIdx = kvRowColIdxAt(p, j);
char* val = (char*)(kvRowColVal(p, pColIdx));
if (j == 0){
if(*val == TSDB_DATA_TYPE_NULL){
string = taosMemoryCalloc(1, 8);
sprintf(varDataVal(string), "%s", TSDB_DATA_NULL_STR_L);
varDataSetLen(string, strlen(varDataVal(string)));
goto end;
}
continue;
}
// json key encode by binary
memset(tagJsonKey, 0, sizeof(tagJsonKey));
memcpy(tagJsonKey, varDataVal(val), varDataLen(val));
// json value
val += varDataTLen(val);
char* realData = POINTER_SHIFT(val, CHAR_BYTES);
char type = *val;
if(type == TSDB_DATA_TYPE_NULL) {
cJSON* value = cJSON_CreateNull();
if (value == NULL)
{
goto end;
}
cJSON_AddItemToObject(json, tagJsonKey, value);
}else if(type == TSDB_DATA_TYPE_NCHAR) {
cJSON* value = NULL;
if (varDataLen(realData) > 0){
char *tagJsonValue = taosMemoryCalloc(varDataLen(realData), 1);
int32_t length = taosUcs4ToMbs((TdUcs4 *)varDataVal(realData), varDataLen(realData), tagJsonValue);
if (length < 0) {
tscError("charset:%s to %s. val:%s convert json value failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, val);
taosMemoryFree(tagJsonValue);
goto end;
}
value = cJSON_CreateString(tagJsonValue);
taosMemoryFree(tagJsonValue);
if (value == NULL)
{
goto end;
}
}else if(varDataLen(realData) == 0){
value = cJSON_CreateString("");
}else{
ASSERT(0);
}
cJSON_AddItemToObject(json, tagJsonKey, value);
}else if(type == TSDB_DATA_TYPE_DOUBLE){
double jsonVd = *(double*)(realData);
cJSON* value = cJSON_CreateNumber(jsonVd);
if (value == NULL)
{
goto end;
}
cJSON_AddItemToObject(json, tagJsonKey, value);
// }else if(type == TSDB_DATA_TYPE_BIGINT){
// int64_t jsonVd = *(int64_t*)(realData);
// cJSON* value = cJSON_CreateNumber((double)jsonVd);
// if (value == NULL)
// {
// goto end;
// }
// cJSON_AddItemToObject(json, tagJsonKey, value);
}else if (type == TSDB_DATA_TYPE_BOOL) {
char jsonVd = *(char*)(realData);
cJSON* value = cJSON_CreateBool(jsonVd);
if (value == NULL)
{
goto end;
}
cJSON_AddItemToObject(json, tagJsonKey, value);
}else{
ASSERT(0);
}
}
string = cJSON_PrintUnformatted(json);
end:
cJSON_Delete(json);
return string;
}
static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int32_t numOfCols, int32_t* colLength) {
for (int32_t i = 0; i < numOfCols; ++i) {
int32_t type = pResultInfo->fields[i].type;
@ -835,9 +930,7 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int
pResultInfo->pCol[i].pData = pResultInfo->convertBuf[i];
pResultInfo->row[i] = pResultInfo->pCol[i].pData;
}
if (type == TSDB_DATA_TYPE_JSON) {
}else if (type == TSDB_DATA_TYPE_JSON && colLength[i] > 0) {
char* p = taosMemoryRealloc(pResultInfo->convertBuf[i], colLength[i]);
if (p == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
@ -850,6 +943,7 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int
if (pCol->offset[j] != -1) {
char* pStart = pCol->offset[j] + pCol->pData;
int32_t jsonInnerType = *pStart;
char* jsonInnerData = pStart + CHAR_BYTES;
char dst[TSDB_MAX_JSON_TAG_LEN] = {0};
@ -857,15 +951,9 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int
sprintf(varDataVal(dst), "%s", TSDB_DATA_NULL_STR_L);
varDataSetLen(dst, strlen(varDataVal(dst)));
} else if (jsonInnerType == TSDB_DATA_TYPE_JSON) {
int32_t length =
taosUcs4ToMbs((TdUcs4*)varDataVal(jsonInnerData), varDataLen(jsonInnerData), varDataVal(dst));
if (length <= 0) {
tscError("charset:%s to %s. val:%s convert failed.", DEFAULT_UNICODE_ENCODEC, tsCharset,
varDataVal(jsonInnerData));
length = 0;
}
varDataSetLen(dst, length);
char *jsonString = parseTagDatatoJson(jsonInnerData);
STR_TO_VARSTR(dst, jsonString);
taosMemoryFree(jsonString);
} else if (jsonInnerType == TSDB_DATA_TYPE_NCHAR) { // value -> "value"
*(char*)varDataVal(dst) = '\"';
int32_t length = taosUcs4ToMbs((TdUcs4*)varDataVal(jsonInnerData), varDataLen(jsonInnerData),
@ -1022,7 +1110,6 @@ TSDB_SERVER_STATUS taos_check_server_status(const char* fqdn, int port, char* de
SRpcInit rpcInit = {0};
char pass[TSDB_PASSWORD_LEN + 1] = {0};
taosEncryptPass_c((uint8_t*)("_pwd"), strlen("_pwd"), pass);
rpcInit.label = "CHK";
rpcInit.numOfThreads = 1;
rpcInit.cfp = NULL;
@ -1030,9 +1117,6 @@ TSDB_SERVER_STATUS taos_check_server_status(const char* fqdn, int port, char* de
rpcInit.connType = TAOS_CONN_CLIENT;
rpcInit.idleTime = tsShellActivityTimer * 1000;
rpcInit.user = "_dnd";
rpcInit.ckey = "_key";
rpcInit.spi = 1;
rpcInit.secret = pass;
clientRpc = rpcOpen(&rpcInit);
if (clientRpc == NULL) {

View File

@ -67,8 +67,10 @@ for (int i = 1; i < keyLen; ++i) { \
#define TS "_ts"
#define TS_LEN 3
#define TAG "_tagNone"
#define TAG_LEN 8
#define TAG "_tag"
#define TAG_LEN 4
#define TAG_VALUE "NULL"
#define TAG_VALUE_LEN 4
#define VALUE "value"
#define VALUE_LEN 5
@ -598,25 +600,33 @@ static bool smlParseNumber(SSmlKv *kvVal, SSmlMsgBuf *msg){
kvVal->type = TSDB_DATA_TYPE_FLOAT;
kvVal->f = (float)result;
}else if ((left == 1 && *endptr == 'i') || (left == 3 && strncasecmp(endptr, "i64", left) == 0)){
if(result >= (double)INT64_MAX){
kvVal->i = INT64_MAX;
}else if(result <= (double)INT64_MIN){
kvVal->i = INT64_MIN;
}else{
kvVal->i = result;
}
kvVal->type = TSDB_DATA_TYPE_BIGINT;
}else if ((left == 3 && strncasecmp(endptr, "u64", left) == 0)){
if(result < 0){
smlBuildInvalidDataMsg(msg, "unsigned big int is too large, out of precision", pVal);
if(smlDoubleToInt64OverFlow(result)){
errno = 0;
int64_t tmp = taosStr2Int64(pVal, &endptr, 10);
if(errno == ERANGE){
smlBuildInvalidDataMsg(msg, "big int out of range[-9223372036854775808,9223372036854775807]", pVal);
return false;
}
if(result >= (double)UINT64_MAX){
kvVal->u = UINT64_MAX;
}else{
kvVal->u = result;
kvVal->type = TSDB_DATA_TYPE_BIGINT;
kvVal->i = tmp;
return true;
}
kvVal->type = TSDB_DATA_TYPE_BIGINT;
kvVal->i = (int64_t)result;
}else if ((left == 3 && strncasecmp(endptr, "u64", left) == 0)){
if(result >= (double)UINT64_MAX || result < 0){
errno = 0;
uint64_t tmp = taosStr2UInt64(pVal, &endptr, 10);
if(errno == ERANGE || result < 0){
smlBuildInvalidDataMsg(msg, "unsigned big int out of range[0,18446744073709551615]", pVal);
return false;
}
kvVal->type = TSDB_DATA_TYPE_UBIGINT;
kvVal->u = tmp;
return true;
}
kvVal->type = TSDB_DATA_TYPE_UBIGINT;
kvVal->u = result;
}else if (left == 3 && strncasecmp(endptr, "i32", left) == 0){
if(!IS_VALID_INT(result)){
smlBuildInvalidDataMsg(msg, "int out of range[-2147483648,2147483647]", pVal);
@ -1103,8 +1113,7 @@ static int32_t smlParseTelnetString(SSmlHandle *info, const char* sql, SSmlTable
kv->keyLen = VALUE_LEN;
kv->value = value;
kv->length = valueLen;
if(!smlParseValue(kv, &info->msgBuf) || kv->type == TSDB_DATA_TYPE_BINARY
|| kv->type == TSDB_DATA_TYPE_NCHAR || kv->type == TSDB_DATA_TYPE_BOOL){
if(!smlParseValue(kv, &info->msgBuf)){
return TSDB_CODE_SML_INVALID_DATA;
}
@ -1124,8 +1133,8 @@ static int32_t smlParseCols(const char* data, int32_t len, SArray *cols, char *c
if(!kv) return TSDB_CODE_OUT_OF_MEMORY;
kv->key = TAG;
kv->keyLen = TAG_LEN;
kv->value = TAG;
kv->length = TAG_LEN;
kv->value = TAG_VALUE;
kv->length = TAG_VALUE_LEN;
kv->type = TSDB_DATA_TYPE_NCHAR;
if(cols) taosArrayPush(cols, &kv);
return TSDB_CODE_SUCCESS;
@ -2264,6 +2273,7 @@ static int32_t smlParseLine(SSmlHandle *info, char* lines[], int numLines){
uError("SML:0x%" PRIx64 " smlParseJSON failed:%s", info->id, *lines);
return code;
}
return code;
}
for (int32_t i = 0; i < numLines; ++i) {

View File

@ -567,6 +567,7 @@ TEST(testCase, insert_test) {
taos_free_result(pRes);
taos_close(pConn);
}
#endif
TEST(testCase, projection_query_tables) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
@ -605,7 +606,7 @@ TEST(testCase, projection_query_tables) {
}
taos_free_result(pRes);
for(int32_t i = 0; i < 10000000; i += 20) {
for(int32_t i = 0; i < 100000; i += 20) {
char sql[1024] = {0};
sprintf(sql,
"insert into tu values(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)"
@ -625,7 +626,7 @@ TEST(testCase, projection_query_tables) {
printf("start to insert next table\n");
for(int32_t i = 0; i < 10000000; i += 20) {
for(int32_t i = 0; i < 100000; i += 20) {
char sql[1024] = {0};
sprintf(sql,
"insert into tu2 values(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)"
@ -692,8 +693,6 @@ TEST(testCase, projection_query_stables) {
taos_close(pConn);
}
#endif
TEST(testCase, agg_query_tables) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
ASSERT_NE(pConn, nullptr);

View File

@ -208,6 +208,7 @@ TEST(testCase, smlParseCols_Error_Test) {
memcpy(sql, data[i], len + 1);
SArray *cols = taosArrayInit(8, POINTER_BYTES);
int32_t ret = smlParseCols(sql, len, cols, NULL, false, dumplicateKey, &msgBuf);
printf("i:%d\n",i);
ASSERT_NE(ret, TSDB_CODE_SUCCESS);
taosHashClear(dumplicateKey);
taosMemoryFree(sql);
@ -272,11 +273,11 @@ TEST(testCase, smlParseCols_tag_Test) {
// nchar
kv = (SSmlKv *)taosArrayGetP(cols, 0);
ASSERT_EQ(strncasecmp(kv->key, TAG, strlen(TAG)), 0);
ASSERT_EQ(kv->keyLen, strlen(TAG));
ASSERT_EQ(strncasecmp(kv->key, TAG, TAG_LEN), 0);
ASSERT_EQ(kv->keyLen, TAG_LEN);
ASSERT_EQ(kv->type, TSDB_DATA_TYPE_NCHAR);
ASSERT_EQ(kv->length, strlen(TAG));
ASSERT_EQ(strncasecmp(kv->value, TAG, strlen(TAG)), 0);
ASSERT_EQ(kv->length, TAG_LEN);
ASSERT_EQ(strncasecmp(kv->value, TAG_VALUE, TAG_VALUE_LEN), 0);
taosMemoryFree(kv);
taosArrayDestroy(cols);
@ -506,7 +507,7 @@ TEST(testCase, smlProcess_influx_Test) {
"readings,name=truck_0,fleet=South,driver=Trish,model=H-2,device_version=v2.3 load_capacity=1500,fuel_capacity=150,nominal_fuel_consumption=12,latitude=52.31854,longitude=4.72037,elevation=124,heading=221,grade=0,fuel_consumption=25 1451608403000000000",
"readings,name=truck_0,fleet=South,driver=Trish,model=H-2,device_version=v2.3 fuel_capacity=150,nominal_fuel_consumption=12,latitude=52.31854,longitude=4.72037,elevation=124,velocity=0,heading=221,grade=0,fuel_consumption=25 1451609404000000000",
"readings,name=truck_0,fleet=South,driver=Trish,model=H-2,device_version=v2.3 fuel_consumption=25,grade=0 1451619405000000000",
"readings,name=truck_1,fleet=South,driver=Albert,model=F-150,device_version=v1.5 load_capacity=2000,fuel_capacity=200,nominal_fuel_consumption=15,latitude=72.45258,longitude=68.83761,elevation=255,velocity=0,heading=181,grade=0,fuel_consumption=25 145160640600000000",
"readings,name=truck_1,fleet=South,driver=Albert,model=F-150,device_version=v1.5 load_capacity=2000,fuel_capacity=200,nominal_fuel_consumption=15,latitude=72.45258,longitude=68.83761,elevation=255,velocity=0,heading=181,grade=0,fuel_consumption=25 1451606406000000000",
"readings,name=truck_2,driver=Derek,model=F-150,device_version=v1.5 load_capacity=2000,fuel_capacity=200,nominal_fuel_consumption=15,latitude=24.5208,longitude=28.09377,elevation=428,velocity=0,heading=304,grade=0,fuel_consumption=25 1451606407000000000",
"readings,name=truck_2,fleet=North,driver=Derek,model=F-150 load_capacity=2000,fuel_capacity=200,nominal_fuel_consumption=15,latitude=24.5208,longitude=28.09377,elevation=428,velocity=0,heading=304,grade=0,fuel_consumption=25 1451609408000000000",
"readings,fleet=South,name=truck_0,driver=Trish,model=H-2,device_version=v2.3 fuel_consumption=25,grade=0 1451629409000000000",
@ -745,7 +746,7 @@ TEST(testCase, smlProcess_json1_Test) {
" }\n"
" }\n"
"]";
int ret = smlProcess(info, (char **)(&sql), -1);
int ret = smlProcess(info, (char **)(&sql), 1);
ASSERT_EQ(ret, 0);
// case 1
@ -1221,3 +1222,55 @@ TEST(testCase, sml_TD15662_Test) {
taos_free_result(res);
}
TEST(testCase, sml_TD15735_Test) {
TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
ASSERT_NE(taos, nullptr);
TAOS_RES* pRes = taos_query(taos, "create database if not exists sml_db");
taos_free_result(pRes);
pRes = taos_query(taos, "use sml_db");
taos_free_result(pRes);
SRequestObj *request = (SRequestObj *)createRequest((STscObj*)taos, NULL, NULL, TSDB_SQL_INSERT);
ASSERT_NE(request, nullptr);
SSmlHandle *info = smlBuildSmlInfo(taos, request, TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS);
ASSERT_NE(info, nullptr);
const char *sql[1] = {
"{'metric': 'pekoiw', 'timestamp': {'value': 1626006833639000000, 'type': 'ns'}, 'value': {'value': False, 'type': 'bool'}, 'tags': {'t0': {'value': True, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {'value': 9223372036854775807, 'type': 'bigint'}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'binaryTagValue', 'type': 'binary'}, 't8': {'value': 'ncharTagValue', 'type': 'nchar'}}}",
};
int32_t ret = smlProcess(info, (char**)sql, sizeof(sql)/sizeof(sql[0]));
ASSERT_NE(ret, 0);
destroyRequest(request);
smlDestroyInfo(info);
}
TEST(testCase, sml_TD15742_Test) {
TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
ASSERT_NE(taos, nullptr);
TAOS_RES* pRes = taos_query(taos, "create database if not exists TD15742");
taos_free_result(pRes);
pRes = taos_query(taos, "use TD15742");
taos_free_result(pRes);
SRequestObj *request = (SRequestObj *)createRequest((STscObj*)taos, NULL, NULL, TSDB_SQL_INSERT);
ASSERT_NE(request, nullptr);
SSmlHandle *info = smlBuildSmlInfo(taos, request, TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS);
ASSERT_NE(info, nullptr);
const char *sql[] = {
"zgzbix 1626006833641 False id=zgzbix_992_38861 t0=t t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7=\"binaryTagValue\" t8=L\"ncharTagValue\"",
};
int ret = smlProcess(info, (char**)sql, sizeof(sql)/sizeof(sql[0]));
ASSERT_EQ(ret, 0);
destroyRequest(request);
smlDestroyInfo(info);
}

View File

@ -122,10 +122,14 @@ int32_t colDataAppend(SColumnInfoData* pColumnInfoData, uint32_t currentRow, con
dataLen = 0;
} else if (*pData == TSDB_DATA_TYPE_NCHAR) {
dataLen = varDataTLen(pData + CHAR_BYTES);
} else if (*pData == TSDB_DATA_TYPE_BIGINT || *pData == TSDB_DATA_TYPE_DOUBLE) {
dataLen = LONG_BYTES;
} else if (*pData == TSDB_DATA_TYPE_DOUBLE) {
dataLen = DOUBLE_BYTES;
} else if (*pData == TSDB_DATA_TYPE_BOOL) {
dataLen = CHAR_BYTES;
} else if (*pData == TSDB_DATA_TYPE_JSON) {
dataLen = kvRowLen(pData + CHAR_BYTES);
} else {
ASSERT(0);
}
dataLen += CHAR_BYTES;
}

View File

@ -40,11 +40,11 @@ bool tsPrintAuth = false;
// multi process
int32_t tsMultiProcess = 0;
int32_t tsMnodeShmSize = TSDB_MAX_WAL_SIZE * 2 + 128;
int32_t tsVnodeShmSize = TSDB_MAX_WAL_SIZE * 10 + 128;
int32_t tsQnodeShmSize = TSDB_MAX_WAL_SIZE * 4 + 128;
int32_t tsSnodeShmSize = TSDB_MAX_WAL_SIZE * 4 + 128;
int32_t tsBnodeShmSize = TSDB_MAX_WAL_SIZE * 4 + 128;
int32_t tsMnodeShmSize = TSDB_MAX_WAL_SIZE * 2 + 1024;
int32_t tsVnodeShmSize = TSDB_MAX_WAL_SIZE * 10 + 1024;
int32_t tsQnodeShmSize = TSDB_MAX_WAL_SIZE * 4 + 1024;
int32_t tsSnodeShmSize = TSDB_MAX_WAL_SIZE * 4 + 1024;
int32_t tsBnodeShmSize = TSDB_MAX_WAL_SIZE * 4 + 1024;
int32_t tsNumOfShmThreads = 1;
// queue & threads
@ -380,11 +380,11 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
if (cfgAddBool(pCfg, "deadLockKillQuery", tsDeadLockKillQuery, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "multiProcess", tsMultiProcess, 0, 2, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "mnodeShmSize", tsMnodeShmSize, TSDB_MAX_WAL_SIZE + 128, INT32_MAX, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "vnodeShmSize", tsVnodeShmSize, TSDB_MAX_WAL_SIZE + 128, INT32_MAX, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "qnodeShmSize", tsQnodeShmSize, TSDB_MAX_WAL_SIZE + 128, INT32_MAX, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "snodeShmSize", tsSnodeShmSize, TSDB_MAX_WAL_SIZE + 128, INT32_MAX, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "bnodeShmSize", tsBnodeShmSize, TSDB_MAX_WAL_SIZE + 128, INT32_MAX, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "mnodeShmSize", tsMnodeShmSize, TSDB_MAX_WAL_SIZE * 2 + 1024, INT32_MAX, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "vnodeShmSize", tsVnodeShmSize, TSDB_MAX_WAL_SIZE * 2 + 1024, INT32_MAX, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "qnodeShmSize", tsQnodeShmSize, TSDB_MAX_WAL_SIZE * 2 + 1024, INT32_MAX, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "snodeShmSize", tsSnodeShmSize, TSDB_MAX_WAL_SIZE * 2 + 1024, INT32_MAX, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "bnodeShmSize", tsBnodeShmSize, TSDB_MAX_WAL_SIZE * 2 + 1024, INT32_MAX, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "mumOfShmThreads", tsNumOfShmThreads, 1, 1024, 0) != 0) return -1;
tsNumOfRpcThreads = tsNumOfCores / 2;

View File

@ -2627,6 +2627,35 @@ int32_t tDeserializeSMDropTopicReq(void *buf, int32_t bufLen, SMDropTopicReq *pR
return 0;
}
int32_t tSerializeSMDropCgroupReq(void *buf, int32_t bufLen, SMDropCgroupReq *pReq) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
if (tStartEncode(&encoder) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->topic) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->cgroup) < 0) return -1;
if (tEncodeI8(&encoder, pReq->igNotExists) < 0) return -1;
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
tEncoderClear(&encoder);
return tlen;
}
int32_t tDeserializeSMDropCgroupReq(void *buf, int32_t bufLen, SMDropCgroupReq *pReq) {
SDecoder decoder = {0};
tDecoderInit(&decoder, buf, bufLen);
if (tStartDecode(&decoder) < 0) return -1;
if (tDecodeCStrTo(&decoder, pReq->topic) < 0) return -1;
if (tDecodeCStrTo(&decoder, pReq->cgroup) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->igNotExists) < 0) return -1;
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
}
int32_t tSerializeSCMCreateTopicReq(void *buf, int32_t bufLen, const SCMCreateTopicReq *pReq) {
int32_t sqlLen = 0;
int32_t astLen = 0;

View File

@ -22,21 +22,19 @@
static inline void vmSendRsp(SRpcMsg *pMsg, int32_t code) {
SRpcMsg rsp = {
.code = code,
.info = pMsg->info,
.pCont = pMsg->info.rsp,
.contLen = pMsg->info.rspLen,
.info = pMsg->info,
};
tmsgSendRsp(&rsp);
}
static void vmProcessMgmtMonitorQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
SVnodeMgmt *pMgmt = pInfo->ahandle;
int32_t code = -1;
tmsg_t msgType = pMsg->msgType;
dTrace("msg:%p, get from vnode queue, type:%s", pMsg, TMSG_INFO(msgType));
dTrace("msg:%p, get from vnode queue, type:%s", pMsg, TMSG_INFO(pMsg->msgType));
switch (msgType) {
switch (pMsg->msgType) {
case TDMT_MON_VM_INFO:
code = vmProcessGetMonitorInfoReq(pMgmt, pMsg);
break;
@ -54,7 +52,7 @@ static void vmProcessMgmtMonitorQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
dError("msg:%p, not processed in vnode queue", pMsg);
}
if (msgType & 1u) {
if (IsReq(pMsg)) {
if (code != 0 && terrno != 0) code = terrno;
vmSendRsp(pMsg, code);
}
@ -96,7 +94,6 @@ static void vmProcessFetchQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
static void vmProcessWriteQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
SVnodeObj *pVnode = pInfo->ahandle;
SArray *pArray = taosArrayInit(numOfMsgs, sizeof(SRpcMsg *));
if (pArray == NULL) {
dError("failed to process %d msgs in write-queue since %s", numOfMsgs, terrstr());
@ -116,7 +113,7 @@ static void vmProcessWriteQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO
for (int i = 0; i < taosArrayGetSize(pArray); i++) {
SRpcMsg *pMsg = *(SRpcMsg **)taosArrayGet(pArray, i);
SRpcMsg rsp = {.info = pMsg->info, .pCont = NULL, .contLen = 0};
SRpcMsg rsp = {.info = pMsg->info};
int32_t ret = syncPropose(vnodeGetSyncHandle(pVnode->pImpl), pMsg, false);
if (ret == TAOS_SYNC_PROPOSE_NOT_LEADER) {
@ -126,12 +123,10 @@ static void vmProcessWriteQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO
syncGetEpSet(vnodeGetSyncHandle(pVnode->pImpl), &newEpSet);
newEpSet.inUse = (newEpSet.inUse + 1) % newEpSet.numOfEps;
tmsgSendRedirectRsp(&rsp, &newEpSet);
} else if (ret == TAOS_SYNC_PROPOSE_OTHER_ERROR) {
rsp.code = TSDB_CODE_SYN_INTERNAL_ERROR;
tmsgSendRsp(&rsp);
} else if (ret == TAOS_SYNC_PROPOSE_SUCCESS) {
// ok
// send response in applyQ
} else {
assert(0);
@ -150,16 +145,13 @@ static void vmProcessWriteQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO
static void vmProcessApplyQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
SVnodeObj *pVnode = pInfo->ahandle;
SRpcMsg *pMsg = NULL;
SRpcMsg rsp;
for (int32_t i = 0; i < numOfMsgs; ++i) {
SRpcMsg *pMsg = NULL;
taosGetQitem(qall, (void **)&pMsg);
// init response rpc msg
rsp.code = 0;
rsp.pCont = NULL;
rsp.contLen = 0;
SRpcMsg rsp = {0};
// get original rpc msg
assert(pMsg->msgType == TDMT_VND_SYNC_APPLY_MSG);
@ -178,7 +170,6 @@ static void vmProcessApplyQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO
rpcFreeCont(originalRpcMsg.pCont);
// if leader, send response
// if (pMsg->rpcMsg.handle != NULL && pMsg->rpcMsg.ahandle != NULL) {
if (pMsg->info.handle != NULL) {
rsp.info = pMsg->info;
tmsgSendRsp(&rsp);
@ -191,21 +182,19 @@ static void vmProcessApplyQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO
static void vmProcessSyncQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
SVnodeObj *pVnode = pInfo->ahandle;
SRpcMsg *pMsg = NULL;
for (int32_t i = 0; i < numOfMsgs; ++i) {
SRpcMsg *pMsg = NULL;
taosGetQitem(qall, (void **)&pMsg);
// todo
SRpcMsg *pRsp = NULL;
int32_t ret = vnodeProcessSyncReq(pVnode->pImpl, pMsg, &pRsp);
if (ret != 0) {
// if leader, send response
int32_t code = vnodeProcessSyncReq(pVnode->pImpl, pMsg, NULL);
if (code != 0) {
if (pMsg->info.handle != NULL) {
SRpcMsg rsp = {0};
rsp.code = terrno;
rsp.info = pMsg->info;
dTrace("msg:%p, process sync queue error since code:%s", pMsg, terrstr());
SRpcMsg rsp = {
.code = (terrno < 0) ? terrno : code,
.info = pMsg->info,
};
dTrace("msg:%p, failed to process sync queue since %s", pMsg, terrstr());
tmsgSendRsp(&rsp);
}
}
@ -217,9 +206,9 @@ static void vmProcessSyncQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOf
static void vmProcessMergeQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
SVnodeObj *pVnode = pInfo->ahandle;
SRpcMsg *pMsg = NULL;
for (int32_t i = 0; i < numOfMsgs; ++i) {
SRpcMsg *pMsg = NULL;
taosGetQitem(qall, (void **)&pMsg);
dTrace("msg:%p, get from vnode-merge queue", pMsg);
@ -309,7 +298,6 @@ int32_t vmPutNodeMsgToMgmtQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
int32_t vmPutNodeMsgToMonitorQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
SSingleWorker *pWorker = &pMgmt->monitorWorker;
dTrace("msg:%p, put into vnode-monitor worker, type:%s", pMsg, TMSG_INFO(pMsg->msgType));
taosWriteQitem(pWorker->queue, pMsg);
return 0;
@ -317,14 +305,17 @@ int32_t vmPutNodeMsgToMonitorQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
static int32_t vmPutRpcMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pRpc, EQueueType qtype) {
SMsgHead *pHead = pRpc->pCont;
SVnodeObj *pVnode = vmAcquireVnode(pMgmt, pHead->vgId);
if (pVnode == NULL) return -1;
SRpcMsg *pMsg = taosAllocateQitem(sizeof(SRpcMsg), RPC_QITEM);
int32_t code = 0;
if (pMsg != NULL) {
if (pMsg == NULL) {
rpcFreeCont(pRpc->pCont);
pRpc->pCont = NULL;
code = -1;
} else {
memcpy(pMsg, pRpc, sizeof(SRpcMsg));
switch (qtype) {
case WRITE_QUEUE:
@ -429,7 +420,7 @@ int32_t vmAllocQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
return -1;
}
dDebug("vgId:%d, vnode queue is alloced", pVnode->vgId);
dDebug("vgId:%d, queue is alloced", pVnode->vgId);
return 0;
}
@ -446,7 +437,7 @@ void vmFreeQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
pVnode->pQueryQ = NULL;
pVnode->pFetchQ = NULL;
pVnode->pMergeQ = NULL;
dDebug("vgId:%d, vnode queue is freed", pVnode->vgId);
dDebug("vgId:%d, queue is freed", pVnode->vgId);
}
int32_t vmStartWorker(SVnodeMgmt *pMgmt) {
@ -497,7 +488,7 @@ int32_t vmStartWorker(SVnodeMgmt *pMgmt) {
.param = pMgmt,
};
if (tSingleWorkerInit(&pMgmt->monitorWorker, &mCfg) != 0) {
dError("failed to start mnode vnode-monitor worker since %s", terrstr());
dError("failed to start vnode-monitor worker since %s", terrstr());
return -1;
}

View File

@ -161,6 +161,7 @@ void dmGetVnodeLoads(SMonVloadInfo *pInfo) {
void dmGetMnodeLoads(SMonMloadInfo *pInfo) {
SDnode *pDnode = dmInstance();
SMgmtWrapper *pWrapper = &pDnode->wrappers[MNODE];
if (dmMarkWrapper(pWrapper) == 0) {
if (tsMultiProcess) {
dmSendLocalRecv(pDnode, TDMT_MON_MM_LOAD, tDeserializeSMonMloadInfo, pInfo);
} else if (pWrapper->pMgmt != NULL) {
@ -168,3 +169,4 @@ void dmGetMnodeLoads(SMonMloadInfo *pInfo) {
}
dmReleaseWrapper(pWrapper);
}
}

View File

@ -103,7 +103,7 @@ static inline int32_t dmPushToProcQueue(SProc *proc, SProcQueue *queue, SRpcMsg
return -1;
}
if (ftype == DND_FUNC_REQ && IsReq(pMsg) && pMsg->code == 0 && handle != 0) {
if (ftype == DND_FUNC_REQ && IsReq(pMsg) && pMsg->code == 0 && handle != 0 && pMsg->info.noResp == 0) {
if (taosHashPut(proc->hash, &handle, sizeof(int64_t), &pMsg->info, sizeof(SRpcConnInfo)) != 0) {
taosThreadMutexUnlock(&queue->mutex);
return -1;

View File

@ -200,47 +200,6 @@ int32_t dmInitMsgHandle(SDnode *pDnode) {
return 0;
}
static void dmSendRpcRedirectRsp(const SRpcMsg *pMsg) {
SDnode *pDnode = dmInstance();
SEpSet epSet = {0};
dmGetMnodeEpSet(&pDnode->data, &epSet);
dDebug("RPC %p, req is redirected, num:%d use:%d", pMsg->info.handle, epSet.numOfEps, epSet.inUse);
for (int32_t i = 0; i < epSet.numOfEps; ++i) {
dDebug("mnode index:%d %s:%u", i, epSet.eps[i].fqdn, epSet.eps[i].port);
if (strcmp(epSet.eps[i].fqdn, tsLocalFqdn) == 0 && epSet.eps[i].port == tsServerPort) {
epSet.inUse = (i + 1) % epSet.numOfEps;
}
epSet.eps[i].port = htons(epSet.eps[i].port);
}
SMEpSet msg = {.epSet = epSet};
int32_t len = tSerializeSMEpSet(NULL, 0, &msg);
SRpcMsg rsp = {
.code = TSDB_CODE_RPC_REDIRECT,
.info = pMsg->info,
.contLen = len,
};
rsp.pCont = rpcMallocCont(len);
tSerializeSMEpSet(rsp.pCont, len, &msg);
rpcSendResponse(&rsp);
rpcFreeCont(pMsg->pCont);
}
static inline void dmSendRecv(SEpSet *pEpSet, SRpcMsg *pReq, SRpcMsg *pRsp) {
SDnode *pDnode = dmInstance();
if (pDnode->status != DND_STAT_RUNNING) {
pRsp->code = TSDB_CODE_NODE_OFFLINE;
rpcFreeCont(pReq->pCont);
pReq->pCont = NULL;
} else {
rpcSendRecv(pDnode->trans.clientRpc, pEpSet, pReq, pRsp);
}
}
static inline int32_t dmSendReq(const SEpSet *pEpSet, SRpcMsg *pMsg) {
SDnode *pDnode = dmInstance();
if (pDnode->status != DND_STAT_RUNNING) {
@ -257,39 +216,38 @@ static inline int32_t dmSendReq(const SEpSet *pEpSet, SRpcMsg *pMsg) {
static inline void dmSendRsp(SRpcMsg *pMsg) {
SMgmtWrapper *pWrapper = pMsg->info.wrapper;
if (pMsg->code == TSDB_CODE_NODE_REDIRECT) {
dmSendRpcRedirectRsp(pMsg);
} else {
if (InChildProc(pWrapper)) {
dmPutToProcPQueue(&pWrapper->proc, pMsg, DND_FUNC_RSP);
rpcFreeCont(pMsg->pCont);
pMsg->pCont = NULL;
} else {
rpcSendResponse(pMsg);
}
}
}
static inline void dmSendRedirectRsp(SRpcMsg *pMsg, const SEpSet *pNewEpSet) {
SMgmtWrapper *pWrapper = pMsg->info.wrapper;
if (InChildProc(pWrapper)) {
dmPutToProcPQueue(&pWrapper->proc, pMsg, DND_FUNC_RSP);
} else {
SRpcMsg rsp = {0};
SRpcMsg rsp = {.code = TSDB_CODE_RPC_REDIRECT, .info = pMsg->info};
SMEpSet msg = {.epSet = *pNewEpSet};
int32_t len = tSerializeSMEpSet(NULL, 0, &msg);
rsp.pCont = rpcMallocCont(len);
rsp.contLen = len;
tSerializeSMEpSet(rsp.pCont, len, &msg);
int32_t contLen = tSerializeSMEpSet(NULL, 0, &msg);
rsp.code = TSDB_CODE_RPC_REDIRECT;
rsp.info = pMsg->info;
rpcSendResponse(&rsp);
rsp.pCont = rpcMallocCont(contLen);
if (rsp.pCont == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
} else {
tSerializeSMEpSet(rsp.pCont, contLen, &msg);
rsp.contLen = contLen;
}
dmSendRsp(&rsp);
rpcFreeCont(pMsg->pCont);
pMsg->pCont = NULL;
}
static inline void dmRegisterBrokenLinkArg(SRpcMsg *pMsg) {
SMgmtWrapper *pWrapper = pMsg->info.wrapper;
if (InChildProc(pWrapper)) {
dmPutToProcPQueue(&pWrapper->proc, pMsg, DND_FUNC_REGIST);
rpcFreeCont(pMsg->pCont);
pMsg->pCont = NULL;
} else {
rpcRegisterBrokenLinkArg(pMsg);
}
@ -318,15 +276,9 @@ int32_t dmInitClient(SDnode *pDnode) {
rpcInit.connType = TAOS_CONN_CLIENT;
rpcInit.idleTime = tsShellActivityTimer * 1000;
rpcInit.user = INTERNAL_USER;
rpcInit.ckey = INTERNAL_CKEY;
rpcInit.spi = 1;
rpcInit.parent = pDnode;
rpcInit.rfp = rpcRfp;
char pass[TSDB_PASSWORD_LEN + 1] = {0};
taosEncryptPass_c((uint8_t *)(INTERNAL_SECRET), strlen(INTERNAL_SECRET), pass);
rpcInit.secret = pass;
pTrans->clientRpc = rpcOpen(&rpcInit);
if (pTrans->clientRpc == NULL) {
dError("failed to init dnode rpc client");
@ -391,3 +343,34 @@ SMsgCb dmGetMsgcb(SDnode *pDnode) {
};
return msgCb;
}
static void dmSendMnodeRedirectRsp(SRpcMsg *pMsg) {
SDnode *pDnode = dmInstance();
SEpSet epSet = {0};
dmGetMnodeEpSet(&pDnode->data, &epSet);
dDebug("msg:%p, is redirected, num:%d use:%d", pMsg, epSet.numOfEps, epSet.inUse);
for (int32_t i = 0; i < epSet.numOfEps; ++i) {
dDebug("mnode index:%d %s:%u", i, epSet.eps[i].fqdn, epSet.eps[i].port);
if (strcmp(epSet.eps[i].fqdn, tsLocalFqdn) == 0 && epSet.eps[i].port == tsServerPort) {
epSet.inUse = (i + 1) % epSet.numOfEps;
}
epSet.eps[i].port = htons(epSet.eps[i].port);
}
SRpcMsg rsp = {.code = TSDB_CODE_RPC_REDIRECT, .info = pMsg->info};
SMEpSet msg = {.epSet = epSet};
int32_t contLen = tSerializeSMEpSet(NULL, 0, &msg);
rsp.pCont = rpcMallocCont(contLen);
if (rsp.pCont == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
} else {
tSerializeSMEpSet(rsp.pCont, contLen, &msg);
rsp.contLen = contLen;
}
dmSendRsp(&rsp);
rpcFreeCont(pMsg->pCont);
pMsg->pCont = NULL;
}

View File

@ -48,10 +48,10 @@ void TestClient::DoInit() {
rpcInit.connType = TAOS_CONN_CLIENT;
rpcInit.idleTime = 30 * 1000;
rpcInit.user = (char*)this->user;
rpcInit.ckey = (char*)"key";
// rpcInit.ckey = (char*)"key";
rpcInit.parent = this;
rpcInit.secret = (char*)secretEncrypt;
rpcInit.spi = 1;
// rpcInit.secret = (char*)secretEncrypt;
// rpcInit.spi = 1;
clientRpc = rpcOpen(&rpcInit);
ASSERT(clientRpc);

View File

@ -45,20 +45,20 @@ typedef struct SVnodeCfg SVnodeCfg;
extern const SVnodeCfg vnodeCfgDefault;
int vnodeInit(int nthreads);
int32_t vnodeInit(int32_t nthreads);
void vnodeCleanup();
int vnodeCreate(const char *path, SVnodeCfg *pCfg, STfs *pTfs);
int32_t vnodeCreate(const char *path, SVnodeCfg *pCfg, STfs *pTfs);
void vnodeDestroy(const char *path, STfs *pTfs);
SVnode *vnodeOpen(const char *path, STfs *pTfs, SMsgCb msgCb);
void vnodeClose(SVnode *pVnode);
int vnodePreprocessWriteReqs(SVnode *pVnode, SArray *pMsgs, int64_t *version);
int vnodeProcessWriteReq(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRpcMsg *pRsp);
int vnodeProcessCMsg(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp);
int vnodeProcessSyncReq(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp);
int vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg);
int vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo);
int32_t vnodePreprocessWriteReqs(SVnode *pVnode, SArray *pMsgs, int64_t *version);
int32_t vnodeProcessWriteReq(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRpcMsg *pRsp);
int32_t vnodeProcessCMsg(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp);
int32_t vnodeProcessSyncReq(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp);
int32_t vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg);
int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo);
int32_t vnodeGetLoad(SVnode *pVnode, SVnodeLoad *pLoad);
int vnodeValidateTableHash(SVnode *pVnode, char *tableFName);
int32_t vnodeValidateTableHash(SVnode *pVnode, char *tableFName);
int32_t vnodeStart(SVnode *pVnode);
void vnodeStop(SVnode *pVnode);
@ -74,8 +74,8 @@ typedef struct SMetaEntry SMetaEntry;
void metaReaderInit(SMetaReader *pReader, SMeta *pMeta, int32_t flags);
void metaReaderClear(SMetaReader *pReader);
int metaGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid);
int metaReadNext(SMetaReader *pReader);
int32_t metaGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid);
int32_t metaReadNext(SMetaReader *pReader);
const void *metaGetTableTagVal(SMetaEntry *pEntry, int16_t cid);
#if 1 // refact APIs below (TODO)
@ -86,7 +86,7 @@ typedef struct SMTbCursor SMTbCursor;
SMTbCursor *metaOpenTbCursor(SMeta *pMeta);
void metaCloseTbCursor(SMTbCursor *pTbCur);
int metaTbCursorNext(SMTbCursor *pTbCur);
int32_t metaTbCursorNext(SMTbCursor *pTbCur);
#endif
// tsdb
@ -124,8 +124,10 @@ typedef struct STqReadHandle STqReadHandle;
STqReadHandle *tqInitSubmitMsgScanner(SMeta *pMeta);
void tqReadHandleSetColIdList(STqReadHandle *pReadHandle, SArray *pColIdList);
int tqReadHandleSetTbUidList(STqReadHandle *pHandle, const SArray *tbUidList);
int tqReadHandleAddTbUidList(STqReadHandle *pHandle, const SArray *tbUidList);
int32_t tqReadHandleSetTbUidList(STqReadHandle *pHandle, const SArray *tbUidList);
int32_t tqReadHandleAddTbUidList(STqReadHandle *pHandle, const SArray *tbUidList);
int32_t tqReadHandleRemoveTbUidList(STqReadHandle* pHandle, const SArray* tbUidList);
int32_t tqReadHandleSetMsg(STqReadHandle *pHandle, SSubmitReq *pMsg, int64_t ver);
bool tqNextDataBlock(STqReadHandle *pHandle);
bool tqNextDataBlockFilterOut(STqReadHandle *pHandle, SHashObj *filterOutUids);
@ -207,15 +209,15 @@ struct SMetaReader {
SDecoder coder;
SMetaEntry me;
void *pBuf;
int szBuf;
int32_t szBuf;
};
struct SMTbCursor {
TBC *pDbc;
void *pKey;
void *pVal;
int kLen;
int vLen;
int32_t kLen;
int32_t vLen;
SMetaReader mr;
};

View File

@ -24,7 +24,6 @@
extern "C" {
#endif
// vnodeDebug ====================
// clang-format off
#define vFatal(...) do { if (vDebugFlag & DEBUG_FATAL) { taosPrintLog("VND FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }} while(0)
#define vError(...) do { if (vDebugFlag & DEBUG_ERROR) { taosPrintLog("VND ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }} while(0)
@ -34,17 +33,17 @@ extern "C" {
#define vTrace(...) do { if (vDebugFlag & DEBUG_TRACE) { taosPrintLog("VND ", DEBUG_TRACE, vDebugFlag, __VA_ARGS__); }} while(0)
// clang-format on
// vnodeCfg ====================
// vnodeCfg.c
extern const SVnodeCfg vnodeCfgDefault;
int vnodeCheckCfg(const SVnodeCfg*);
int vnodeEncodeConfig(const void* pObj, SJson* pJson);
int vnodeDecodeConfig(const SJson* pJson, void* pObj);
int32_t vnodeCheckCfg(const SVnodeCfg*);
int32_t vnodeEncodeConfig(const void* pObj, SJson* pJson);
int32_t vnodeDecodeConfig(const SJson* pJson, void* pObj);
// vnodeModule ====================
int vnodeScheduleTask(int (*execute)(void*), void* arg);
// vnodeModule.c
int32_t vnodeScheduleTask(int32_t (*execute)(void*), void* arg);
// vnodeBufPool ====================
// vnodeBufPool.c
typedef struct SVBufPoolNode SVBufPoolNode;
struct SVBufPoolNode {
SVBufPoolNode* prev;
@ -62,38 +61,29 @@ struct SVBufPool {
SVBufPoolNode node;
};
int vnodeOpenBufPool(SVnode* pVnode, int64_t size);
int vnodeCloseBufPool(SVnode* pVnode);
int32_t vnodeOpenBufPool(SVnode* pVnode, int64_t size);
int32_t vnodeCloseBufPool(SVnode* pVnode);
void vnodeBufPoolReset(SVBufPool* pPool);
// vnodeQuery ====================
int vnodeQueryOpen(SVnode* pVnode);
// vnodeQuery.c
int32_t vnodeQueryOpen(SVnode* pVnode);
void vnodeQueryClose(SVnode* pVnode);
int vnodeGetTableMeta(SVnode* pVnode, SRpcMsg* pMsg);
int32_t vnodeGetTableMeta(SVnode* pVnode, SRpcMsg* pMsg);
// vnodeCommit ====================
int vnodeBegin(SVnode* pVnode);
int vnodeShouldCommit(SVnode* pVnode);
int vnodeCommit(SVnode* pVnode);
int vnodeSaveInfo(const char* dir, const SVnodeInfo* pCfg);
int vnodeCommitInfo(const char* dir, const SVnodeInfo* pInfo);
int vnodeLoadInfo(const char* dir, SVnodeInfo* pInfo);
int vnodeSyncCommit(SVnode* pVnode);
int vnodeAsyncCommit(SVnode* pVnode);
// vnodeCommit.c
int32_t vnodeBegin(SVnode* pVnode);
int32_t vnodeShouldCommit(SVnode* pVnode);
int32_t vnodeCommit(SVnode* pVnode);
int32_t vnodeSaveInfo(const char* dir, const SVnodeInfo* pCfg);
int32_t vnodeCommitInfo(const char* dir, const SVnodeInfo* pInfo);
int32_t vnodeLoadInfo(const char* dir, SVnodeInfo* pInfo);
int32_t vnodeSyncCommit(SVnode* pVnode);
int32_t vnodeAsyncCommit(SVnode* pVnode);
// vnodeCommit ====================
// vnodeSync.c
int32_t vnodeSyncOpen(SVnode* pVnode, char* path);
int32_t vnodeSyncStart(SVnode* pVnode);
void vnodeSyncStart(SVnode* pVnode);
void vnodeSyncClose(SVnode* pVnode);
void vnodeSyncSetQ(SVnode* pVnode, void* qHandle);
void vnodeSyncSetRpc(SVnode* pVnode, void* rpcHandle);
int32_t vnodeSyncEqMsg(void* qHandle, SRpcMsg* pMsg);
int32_t vnodeSendMsg(void* rpcHandle, const SEpSet* pEpSet, SRpcMsg* pMsg);
void vnodeSyncCommitCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta);
void vnodeSyncPreCommitCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta);
void vnodeSyncRollBackCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta);
int32_t vnodeSyncGetSnapshotCb(struct SSyncFSM* pFsm, SSnapshot* pSnapshot);
SSyncFSM* syncVnodeMakeFsm();
#ifdef __cplusplus
}

View File

@ -56,8 +56,8 @@ int metaDecodeEntry(SDecoder *pCoder, SMetaEntry *pME) {
if (tDecodeCStr(pCoder, &pME->name) < 0) return -1;
if (pME->type == TSDB_SUPER_TABLE) {
if (tDecodeSSchemaWrapperEx(pCoder, &pME->stbEntry.schema) < 0) return -1;
if (tDecodeSSchemaWrapperEx(pCoder, &pME->stbEntry.schemaTag) < 0) return -1;
if (tDecodeSSchemaWrapper(pCoder, &pME->stbEntry.schema) < 0) return -1;
if (tDecodeSSchemaWrapper(pCoder, &pME->stbEntry.schemaTag) < 0) return -1;
} else if (pME->type == TSDB_CHILD_TABLE) {
if (tDecodeI64(pCoder, &pME->ctbEntry.ctime) < 0) return -1;
if (tDecodeI32(pCoder, &pME->ctbEntry.ttlDays) < 0) return -1;
@ -67,7 +67,7 @@ int metaDecodeEntry(SDecoder *pCoder, SMetaEntry *pME) {
if (tDecodeI64(pCoder, &pME->ntbEntry.ctime) < 0) return -1;
if (tDecodeI32(pCoder, &pME->ntbEntry.ttlDays) < 0) return -1;
if (tDecodeI32v(pCoder, &pME->ntbEntry.ncid) < 0) return -1;
if (tDecodeSSchemaWrapperEx(pCoder, &pME->ntbEntry.schema) < 0) return -1;
if (tDecodeSSchemaWrapper(pCoder, &pME->ntbEntry.schema) < 0) return -1;
} else if (pME->type == TSDB_TSMA_TABLE) {
pME->smaEntry.tsma = tDecoderMalloc(pCoder, sizeof(STSma));
if (!pME->smaEntry.tsma) {

View File

@ -112,21 +112,20 @@ int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) {
if (pIter == NULL) break;
pExec = (STqExec*)pIter;
if (pExec->subType == TOPIC_SUB_TYPE__DB) {
if (isAdd) {
continue;
} else {
if (!isAdd) {
int32_t sz = taosArrayGetSize(tbUidList);
for (int32_t i = 0; i < sz; i++) {
int64_t tbUid = *(int64_t*)taosArrayGet(tbUidList, i);
taosHashPut(pExec->pDropTbUid, &tbUid, sizeof(int64_t), NULL, 0);
}
}
}
} else {
for (int32_t i = 0; i < 5; i++) {
int32_t code = qUpdateQualifiedTableId(pExec->task[i], tbUidList, isAdd);
ASSERT(code == 0);
}
}
}
return 0;
}
@ -1059,6 +1058,57 @@ int32_t tqProcessStreamTrigger(STQ* pTq, void* data, int32_t dataLen, int32_t wo
return 0;
}
int32_t tqProcessStreamTriggerNew(STQ* pTq, SSubmitReq* data) {
SStreamDataSubmit* pSubmit = NULL;
// build data
pSubmit = taosAllocateQitem(sizeof(SStreamDataSubmit), DEF_QITEM);
if (pSubmit == NULL) return -1;
pSubmit->dataRef = taosMemoryMalloc(sizeof(int32_t));
if (pSubmit->dataRef == NULL) goto FAIL;
*pSubmit->dataRef = 1;
pSubmit->data = data;
pSubmit->type = STREAM_INPUT__DATA_BLOCK;
void* pIter = NULL;
while (1) {
pIter = taosHashIterate(pTq->pStreamTasks, pIter);
if (pIter == NULL) break;
SStreamTask* pTask = (SStreamTask*)pIter;
if (pTask->inputType == TASK_INPUT_TYPE__SUMBIT_BLOCK) {
streamEnqueueDataSubmit(pTask, pSubmit);
// TODO cal back pressure
}
// check run
int8_t execStatus = atomic_load_8(&pTask->status);
if (execStatus == TASK_STATUS__IDLE || execStatus == TASK_STATUS__CLOSING) {
SStreamTaskRunReq* pReq = taosMemoryMalloc(sizeof(SStreamTaskRunReq));
if (pReq == NULL) continue;
// TODO: do we need htonl?
pReq->head.vgId = pTq->pVnode->config.vgId;
pReq->streamId = pTask->streamId;
pReq->taskId = pTask->taskId;
SRpcMsg msg = {
.msgType = 0,
.pCont = pReq,
.contLen = sizeof(SStreamTaskRunReq),
};
tmsgPutToQueue(&pTq->pVnode->msgCb, FETCH_QUEUE, &msg);
}
}
streamDataSubmitRefDec(pSubmit);
return 0;
FAIL:
if (pSubmit) {
if (pSubmit->dataRef) {
taosMemoryFree(pSubmit->dataRef);
}
taosFreeQitem(pSubmit);
}
return -1;
}
int32_t tqProcessTaskExec(STQ* pTq, char* msg, int32_t msgLen, int32_t workerId) {
SStreamTaskExecReq req;
tDecodeSStreamTaskExecReq(msg, &req);

View File

@ -34,21 +34,11 @@ STqReadHandle* tqInitSubmitMsgScanner(SMeta* pMeta) {
int32_t tqReadHandleSetMsg(STqReadHandle* pReadHandle, SSubmitReq* pMsg, int64_t ver) {
pReadHandle->pMsg = pMsg;
// pMsg->length = htonl(pMsg->length);
// pMsg->numOfBlocks = htonl(pMsg->numOfBlocks);
// iterate and convert
if (tInitSubmitMsgIter(pMsg, &pReadHandle->msgIter) < 0) return -1;
while (true) {
if (tGetSubmitMsgNext(&pReadHandle->msgIter, &pReadHandle->pBlock) < 0) return -1;
if (pReadHandle->pBlock == NULL) break;
// pReadHandle->pBlock->uid = htobe64(pReadHandle->pBlock->uid);
// pReadHandle->pBlock->suid = htobe64(pReadHandle->pBlock->suid);
// pReadHandle->pBlock->sversion = htonl(pReadHandle->pBlock->sversion);
// pReadHandle->pBlock->dataLen = htonl(pReadHandle->pBlock->dataLen);
// pReadHandle->pBlock->schemaLen = htonl(pReadHandle->pBlock->schemaLen);
// pReadHandle->pBlock->numOfRows = htons(pReadHandle->pBlock->numOfRows);
}
if (tInitSubmitMsgIter(pMsg, &pReadHandle->msgIter) < 0) return -1;
@ -241,3 +231,14 @@ int tqReadHandleAddTbUidList(STqReadHandle* pHandle, const SArray* tbUidList) {
return 0;
}
int tqReadHandleRemoveTbUidList(STqReadHandle* pHandle, const SArray* tbUidList) {
ASSERT(pHandle->tbIdHash != NULL);
for(int32_t i = 0; i < taosArrayGetSize(tbUidList); i++) {
int64_t* pKey = (int64_t*) taosArrayGet(tbUidList, i);
taosHashRemove(pHandle->tbIdHash, pKey, sizeof(int64_t));
}
return 0;
}

View File

@ -425,6 +425,12 @@ static STsdbReadHandle* tsdbQueryTablesImpl(SVnode* pVnode, SQueryTableDataCond*
rowLen += pCond->colList[i].bytes;
}
// make sure the output SSDataBlock size be less than 2MB.
int32_t TWOMB = 2 * 1024 * 1024;
if (pReadHandle->outputCapacity * rowLen > TWOMB) {
pReadHandle->outputCapacity = TWOMB / rowLen;
}
// allocate buffer in order to load data blocks from file
pReadHandle->suppInfo.pstatis = taosMemoryCalloc(pCond->numOfCols, sizeof(SColumnDataAgg));
if (pReadHandle->suppInfo.pstatis == NULL) {
@ -1302,20 +1308,22 @@ static int32_t handleDataMergeIfNeeded(STsdbReadHandle* pTsdbReadHandle, SBlock*
if ((ascScan && (key != TSKEY_INITIAL_VAL && key <= binfo.window.ekey)) ||
(!ascScan && (key != TSKEY_INITIAL_VAL && key >= binfo.window.skey))) {
if ((ascScan && (key != TSKEY_INITIAL_VAL && key < binfo.window.skey)) ||
(!ascScan && (key != TSKEY_INITIAL_VAL && key > binfo.window.ekey))) {
bool cacheDataInFileBlockHole = (ascScan && (key != TSKEY_INITIAL_VAL && key < binfo.window.skey)) ||
(!ascScan && (key != TSKEY_INITIAL_VAL && key > binfo.window.ekey));
if (cacheDataInFileBlockHole) {
// do not load file block into buffer
int32_t step = ascScan ? 1 : -1;
TSKEY maxKey =
ASCENDING_TRAVERSE(pTsdbReadHandle->order) ? (binfo.window.skey - step) : (binfo.window.ekey - step);
TSKEY maxKey = ascScan ? (binfo.window.skey - step) : (binfo.window.ekey - step);
cur->rows =
tsdbReadRowsFromCache(pCheckInfo, maxKey, pTsdbReadHandle->outputCapacity, &cur->win, pTsdbReadHandle);
pTsdbReadHandle->realNumOfRows = cur->rows;
// update the last key value
pCheckInfo->lastKey = cur->win.ekey + step;
if (!ASCENDING_TRAVERSE(pTsdbReadHandle->order)) {
if (!ascScan) {
TSWAP(cur->win.skey, cur->win.ekey);
}
@ -1334,14 +1342,12 @@ static int32_t handleDataMergeIfNeeded(STsdbReadHandle* pTsdbReadHandle, SBlock*
/*
* no data in cache, only load data from file
* during the query processing, data in cache will not be checked anymore.
*
* Here the buffer is not enough, so only part of file block can be loaded into memory buffer
*/
assert(pTsdbReadHandle->outputCapacity >= binfo.rows);
int32_t endPos = getEndPosInDataBlock(pTsdbReadHandle, &binfo);
if ((cur->pos == 0 && endPos == binfo.rows - 1 && ascScan) ||
(cur->pos == (binfo.rows - 1) && endPos == 0 && (!ascScan))) {
bool wholeBlockReturned = ((abs(cur->pos - endPos) + 1) == binfo.rows);
if (wholeBlockReturned) {
pTsdbReadHandle->realNumOfRows = binfo.rows;
cur->rows = binfo.rows;
@ -1357,11 +1363,23 @@ static int32_t handleDataMergeIfNeeded(STsdbReadHandle* pTsdbReadHandle, SBlock*
cur->pos = -1;
}
} else { // partially copy to dest buffer
// make sure to only load once
bool firstTimeExtract = ((cur->pos == 0 && ascScan) || (cur->pos == binfo.rows -1 && (!ascScan)));
if (pTsdbReadHandle->outputCapacity < binfo.rows && firstTimeExtract) {
code = doLoadFileDataBlock(pTsdbReadHandle, pBlock, pCheckInfo, cur->slot);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
}
copyAllRemainRowsFromFileBlock(pTsdbReadHandle, pCheckInfo, &binfo, endPos);
cur->mixBlock = true;
}
assert(cur->blockCompleted);
if (pTsdbReadHandle->outputCapacity >= binfo.rows) {
ASSERT(cur->blockCompleted);
}
if (cur->rows == binfo.rows) {
tsdbDebug("%p whole file block qualified, brange:%" PRId64 "-%" PRId64 ", rows:%d, lastKey:%" PRId64 ", %s",
pTsdbReadHandle, cur->win.skey, cur->win.ekey, cur->rows, cur->lastKey, pTsdbReadHandle->idStr);
@ -1858,15 +1876,14 @@ static void copyAllRemainRowsFromFileBlock(STsdbReadHandle* pTsdbReadHandle, STa
SDataCols* pCols = pTsdbReadHandle->rhelper.pDCols[0];
TSKEY* tsArray = pCols->cols[0].pData;
int32_t step = ASCENDING_TRAVERSE(pTsdbReadHandle->order) ? 1 : -1;
int32_t numOfCols = (int32_t)(QH_GET_NUM_OF_COLS(pTsdbReadHandle));
bool ascScan = ASCENDING_TRAVERSE(pTsdbReadHandle->order);
int32_t pos = cur->pos;
int32_t step = ascScan? 1 : -1;
int32_t start = cur->pos;
int32_t end = endPos;
if (!ASCENDING_TRAVERSE(pTsdbReadHandle->order)) {
if (!ascScan) {
TSWAP(start, end);
}
@ -1877,10 +1894,10 @@ static void copyAllRemainRowsFromFileBlock(STsdbReadHandle* pTsdbReadHandle, STa
cur->win = (STimeWindow){.skey = tsArray[start], .ekey = tsArray[end]};
cur->mixBlock = (numOfRows != pBlockInfo->rows);
cur->lastKey = tsArray[endPos] + step;
cur->blockCompleted = true;
cur->blockCompleted = (ascScan? (endPos == pBlockInfo->rows - 1):(endPos == 0));
// The value of pos may be -1 or pBlockInfo->rows, and it is invalid in both cases.
pos = endPos + step;
int32_t pos = endPos + step;
updateInfoAfterMerge(pTsdbReadHandle, pCheckInfo, numOfRows, pos);
doCheckGeneratedBlockRange(pTsdbReadHandle);
@ -1892,15 +1909,17 @@ static void copyAllRemainRowsFromFileBlock(STsdbReadHandle* pTsdbReadHandle, STa
int32_t getEndPosInDataBlock(STsdbReadHandle* pTsdbReadHandle, SDataBlockInfo* pBlockInfo) {
// NOTE: reverse the order to find the end position in data block
int32_t endPos = -1;
int32_t order = ASCENDING_TRAVERSE(pTsdbReadHandle->order) ? TSDB_ORDER_DESC : TSDB_ORDER_ASC;
bool ascScan = ASCENDING_TRAVERSE(pTsdbReadHandle->order);
int32_t order = ascScan? TSDB_ORDER_DESC : TSDB_ORDER_ASC;
SQueryFilePos* cur = &pTsdbReadHandle->cur;
SDataCols* pCols = pTsdbReadHandle->rhelper.pDCols[0];
if (ASCENDING_TRAVERSE(pTsdbReadHandle->order) && pTsdbReadHandle->window.ekey >= pBlockInfo->window.ekey) {
if (pTsdbReadHandle->outputCapacity >= pBlockInfo->rows) {
if (ascScan && pTsdbReadHandle->window.ekey >= pBlockInfo->window.ekey) {
endPos = pBlockInfo->rows - 1;
cur->mixBlock = (cur->pos != 0);
} else if (!ASCENDING_TRAVERSE(pTsdbReadHandle->order) && pTsdbReadHandle->window.ekey <= pBlockInfo->window.skey) {
} else if ((!ascScan) && pTsdbReadHandle->window.ekey <= pBlockInfo->window.skey) {
endPos = 0;
cur->mixBlock = (cur->pos != pBlockInfo->rows - 1);
} else {
@ -1908,6 +1927,28 @@ int32_t getEndPosInDataBlock(STsdbReadHandle* pTsdbReadHandle, SDataBlockInfo* p
endPos = doBinarySearchKey(pCols->cols[0].pData, pCols->numOfRows, pTsdbReadHandle->window.ekey, order);
cur->mixBlock = true;
}
} else {
if (ascScan && pTsdbReadHandle->window.ekey >= pBlockInfo->window.ekey) {
endPos = TMIN(cur->pos + pTsdbReadHandle->outputCapacity - 1, pBlockInfo->rows - 1);
} else if ((!ascScan) && pTsdbReadHandle->window.ekey <= pBlockInfo->window.skey) {
endPos = TMAX(cur->pos - pTsdbReadHandle->outputCapacity + 1, 0);
} else {
ASSERT(pCols->numOfRows > 0);
endPos = doBinarySearchKey(pCols->cols[0].pData, pCols->numOfRows, pTsdbReadHandle->window.ekey, order);
// current data is more than the capacity
int32_t size = abs(cur->pos - endPos) + 1;
if (size > pTsdbReadHandle->outputCapacity) {
int32_t delta = size - pTsdbReadHandle->outputCapacity;
if (ascScan) {
endPos -= delta;
} else {
endPos += delta;
}
}
}
cur->mixBlock = true;
}
return endPos;
}
@ -2035,8 +2076,14 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
}
#endif
if (TD_SUPPORT_UPDATE(pCfg->update)) {
numOfRows = doCopyRowsFromFileBlock(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, curRow, pos, pos);
if (lastKeyAppend != key) {
if (lastKeyAppend != TSKEY_INITIAL_VAL) {
++curRow;
}
lastKeyAppend = key;
}
// load data from file firstly
numOfRows = doCopyRowsFromFileBlock(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, curRow, pos, pos);
if (rv1 != TD_ROW_SVER(row1)) {
rv1 = TD_ROW_SVER(row1);
@ -2046,7 +2093,7 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
}
// still assign data into current row
mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols,
numOfRows += mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols,
pCheckInfo->tableId, pSchema1, pSchema2, pCfg->update, &lastKeyAppend);
if (cur->win.skey == TSKEY_INITIAL_VAL) {
@ -2058,7 +2105,6 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
cur->mixBlock = true;
moveToNextRowInMem(pCheckInfo);
++curRow;
pos += step;
} else {
@ -2369,7 +2415,7 @@ static int32_t createDataBlocksInfo(STsdbReadHandle* pTsdbReadHandle, int32_t nu
static int32_t getFirstFileDataBlock(STsdbReadHandle* pTsdbReadHandle, bool* exists);
static int32_t getDataBlockRv(STsdbReadHandle* pTsdbReadHandle, STableBlockInfo* pNext, bool* exists) {
static int32_t getDataBlock(STsdbReadHandle* pTsdbReadHandle, STableBlockInfo* pNext, bool* exists) {
int32_t step = ASCENDING_TRAVERSE(pTsdbReadHandle->order) ? 1 : -1;
SQueryFilePos* cur = &pTsdbReadHandle->cur;
@ -2478,7 +2524,7 @@ static int32_t getFirstFileDataBlock(STsdbReadHandle* pTsdbReadHandle, bool* exi
cur->fid = pTsdbReadHandle->pFileGroup->fid;
STableBlockInfo* pBlockInfo = &pTsdbReadHandle->pDataBlockInfo[cur->slot];
return getDataBlockRv(pTsdbReadHandle, pBlockInfo, exists);
return getDataBlock(pTsdbReadHandle, pBlockInfo, exists);
}
static bool isEndFileDataBlock(SQueryFilePos* cur, int32_t numOfBlocks, bool ascTrav) {
@ -2643,7 +2689,7 @@ static int32_t getDataBlocksInFiles(STsdbReadHandle* pTsdbReadHandle, bool* exis
} else {
moveToNextDataBlockInCurrentFile(pTsdbReadHandle);
STableBlockInfo* pNext = &pTsdbReadHandle->pDataBlockInfo[cur->slot];
return getDataBlockRv(pTsdbReadHandle, pNext, exists);
return getDataBlock(pTsdbReadHandle, pNext, exists);
}
}
}

View File

@ -180,8 +180,6 @@ void vnodeClose(SVnode *pVnode) {
// start the sync timer after the queue is ready
int32_t vnodeStart(SVnode *pVnode) {
vnodeSyncSetQ(pVnode, NULL);
vnodeSyncSetRpc(pVnode, NULL);
vnodeSyncStart(pVnode);
return 0;
}

View File

@ -118,11 +118,6 @@ int vnodeProcessWriteReq(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRpcMsg
break;
}
if (tqPushMsg(pVnode->pTq, pMsg->pCont, pMsg->contLen, pMsg->msgType, version) < 0) {
vError("vgId:%d failed to push msg to TQ since %s", TD_VID(pVnode), tstrerror(terrno));
return -1;
}
vDebug("vgId:%d process %s request success, version: %" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType), version);
if (tqPushMsg(pVnode->pTq, pMsg->pCont, pMsg->contLen, pMsg->msgType, version) < 0) {
@ -687,7 +682,7 @@ static int vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, in
submitBlkRsp.uid = createTbReq.uid;
submitBlkRsp.tblFName = taosMemoryMalloc(strlen(pVnode->config.dbname) + strlen(createTbReq.name) + 2);
sprintf(submitBlkRsp.tblFName, "%s.%s", pVnode->config.dbname, createTbReq.name);
sprintf(submitBlkRsp.tblFName, "%s.", pVnode->config.dbname);
msgIter.uid = createTbReq.uid;
if (createTbReq.type == TSDB_CHILD_TABLE) {

View File

@ -13,90 +13,62 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define _DEFAULT_SOURCE
#include "vnd.h"
static int32_t vnodeSyncEqMsg(const SMsgCb *msgcb, SRpcMsg *pMsg);
static int32_t vnodeSyncSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg);
static SSyncFSM *vnodeSyncMakeFsm(SVnode *pVnode);
static void vnodeSyncCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta);
static void vnodeSyncPreCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta);
static void vnodeSyncRollBackMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta);
static int32_t vnodeSyncGetSnapshot(SSyncFSM *pFsm, SSnapshot *pSnapshot);
int32_t vnodeSyncOpen(SVnode *pVnode, char *path) {
SSyncInfo syncInfo;
syncInfo.vgId = pVnode->config.vgId;
SSyncCfg *pCfg = &(syncInfo.syncCfg);
pCfg->replicaNum = pVnode->config.syncCfg.replicaNum;
pCfg->myIndex = pVnode->config.syncCfg.myIndex;
memcpy(pCfg->nodeInfo, pVnode->config.syncCfg.nodeInfo, sizeof(pCfg->nodeInfo));
SSyncInfo syncInfo = {
.vgId = pVnode->config.vgId,
.syncCfg = pVnode->config.syncCfg,
.pWal = pVnode->pWal,
.msgcb = NULL,
.FpSendMsg = vnodeSyncSendMsg,
.FpEqMsg = vnodeSyncEqMsg,
};
snprintf(syncInfo.path, sizeof(syncInfo.path), "%s/sync", path);
syncInfo.pWal = pVnode->pWal;
syncInfo.pFsm = syncVnodeMakeFsm(pVnode);
syncInfo.rpcClient = NULL;
syncInfo.FpSendMsg = vnodeSendMsg;
syncInfo.queue = NULL;
syncInfo.FpEqMsg = vnodeSyncEqMsg;
snprintf(syncInfo.path, sizeof(syncInfo.path), "%s%ssync", path, TD_DIRSEP);
syncInfo.pFsm = vnodeSyncMakeFsm(pVnode);
pVnode->sync = syncOpen(&syncInfo);
assert(pVnode->sync > 0);
if (pVnode->sync <= 0) {
vError("vgId:%d, failed to open sync since %s", pVnode->config.vgId, terrstr());
return -1;
}
// for test
setPingTimerMS(pVnode->sync, 3000);
setElectTimerMS(pVnode->sync, 500);
setHeartbeatTimerMS(pVnode->sync, 100);
return 0;
}
int32_t vnodeSyncStart(SVnode *pVnode) {
void vnodeSyncStart(SVnode *pVnode) {
syncSetMsgCb(pVnode->sync, &pVnode->msgCb);
syncStart(pVnode->sync);
}
void vnodeSyncClose(SVnode *pVnode) { syncStop(pVnode->sync); }
int32_t vnodeSyncEqMsg(const SMsgCb *msgcb, SRpcMsg *pMsg) { return tmsgPutToQueue(msgcb, SYNC_QUEUE, pMsg); }
int32_t vnodeSyncSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg) { return tmsgSendReq(pEpSet, pMsg); }
int32_t vnodeSyncGetSnapshot(SSyncFSM *pFsm, SSnapshot *pSnapshot) {
vnodeGetSnapshot(pFsm->data, pSnapshot);
return 0;
}
void vnodeSyncClose(SVnode *pVnode) {
// stop by ref id
syncStop(pVnode->sync);
}
void vnodeSyncSetQ(SVnode *pVnode, void *qHandle) { syncSetQ(pVnode->sync, (void *)(&(pVnode->msgCb))); }
void vnodeSyncSetRpc(SVnode *pVnode, void *rpcHandle) { syncSetRpc(pVnode->sync, (void *)(&(pVnode->msgCb))); }
int32_t vnodeSyncEqMsg(void *qHandle, SRpcMsg *pMsg) {
int32_t ret = 0;
SMsgCb *pMsgCb = qHandle;
if (pMsgCb->queueFps[SYNC_QUEUE] != NULL) {
tmsgPutToQueue(qHandle, SYNC_QUEUE, pMsg);
} else {
vError("vnodeSyncEqMsg queue is NULL, SYNC_QUEUE:%d", SYNC_QUEUE);
}
return ret;
}
int32_t vnodeSendMsg(void *rpcHandle, const SEpSet *pEpSet, SRpcMsg *pMsg) {
int32_t ret = 0;
SMsgCb *pMsgCb = rpcHandle;
if (pMsgCb->queueFps[SYNC_QUEUE] != NULL) {
pMsg->info.noResp = 1;
tmsgSendReq(pEpSet, pMsg);
} else {
vError("vnodeSendMsg queue is NULL, SYNC_QUEUE:%d", SYNC_QUEUE);
}
return ret;
}
int32_t vnodeSyncGetSnapshotCb(struct SSyncFSM *pFsm, SSnapshot *pSnapshot) {
SVnode *pVnode = (SVnode *)(pFsm->data);
vnodeGetSnapshot(pVnode, pSnapshot);
/*
pSnapshot->data = NULL;
pSnapshot->lastApplyIndex = 0;
pSnapshot->lastApplyTerm = 0;
*/
return 0;
}
void vnodeSyncCommitCb(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) {
void vnodeSyncCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) {
SyncIndex beginIndex = SYNC_INDEX_INVALID;
if (pFsm->FpGetSnapshot != NULL) {
SSnapshot snapshot;
SSnapshot snapshot = {0};
pFsm->FpGetSnapshot(pFsm, &snapshot);
beginIndex = snapshot.lastApplyIndex;
}
@ -147,7 +119,7 @@ void vnodeSyncCommitCb(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cb
}
}
void vnodeSyncPreCommitCb(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) {
void vnodeSyncPreCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) {
char logBuf[256];
snprintf(logBuf, sizeof(logBuf),
"==callback== ==PreCommitCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s \n", pFsm, cbMeta.index,
@ -155,19 +127,19 @@ void vnodeSyncPreCommitCb(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta
syncRpcMsgLog2(logBuf, (SRpcMsg *)pMsg);
}
void vnodeSyncRollBackCb(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) {
void vnodeSyncRollBackMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) {
char logBuf[256];
snprintf(logBuf, sizeof(logBuf), "==callback== ==RollBackCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s \n",
pFsm, cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state));
syncRpcMsgLog2(logBuf, (SRpcMsg *)pMsg);
}
SSyncFSM *syncVnodeMakeFsm(SVnode *pVnode) {
SSyncFSM *pFsm = (SSyncFSM *)taosMemoryMalloc(sizeof(SSyncFSM));
SSyncFSM *vnodeSyncMakeFsm(SVnode *pVnode) {
SSyncFSM *pFsm = taosMemoryCalloc(1, sizeof(SSyncFSM));
pFsm->data = pVnode;
pFsm->FpCommitCb = vnodeSyncCommitCb;
pFsm->FpPreCommitCb = vnodeSyncPreCommitCb;
pFsm->FpRollBackCb = vnodeSyncRollBackCb;
pFsm->FpGetSnapshot = vnodeSyncGetSnapshotCb;
pFsm->FpCommitCb = vnodeSyncCommitMsg;
pFsm->FpPreCommitCb = vnodeSyncPreCommitMsg;
pFsm->FpRollBackCb = vnodeSyncRollBackMsg;
pFsm->FpGetSnapshot = vnodeSyncGetSnapshot;
return pFsm;
}

View File

@ -125,19 +125,10 @@ qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, void* streamReadHandle) {
return pTaskInfo;
}
int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bool isAdd) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
// traverse to the stream scanner node to add this table id
SOperatorInfo* pInfo = pTaskInfo->pRoot;
while (pInfo->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
pInfo = pInfo->pDownstream[0];
}
SStreamBlockScanInfo* pScanInfo = pInfo->info;
if (isAdd) {
static SArray* filterQualifiedChildTables(const SStreamBlockScanInfo* pScanInfo, const SArray* tableIdList) {
SArray* qa = taosArrayInit(4, sizeof(tb_uid_t));
// let's discard the tables those are not created according to the queried super table.
SMetaReader mr = {0};
metaReaderInit(&mr, pScanInfo->readHandle.meta, 0);
for (int32_t i = 0; i < taosArrayGetSize(tableIdList); ++i) {
@ -158,17 +149,36 @@ int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bo
}
metaReaderClear(&mr);
return qa;
}
int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bool isAdd) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
// traverse to the stream scanner node to add this table id
SOperatorInfo* pInfo = pTaskInfo->pRoot;
while (pInfo->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
pInfo = pInfo->pDownstream[0];
}
int32_t code = 0;
SStreamBlockScanInfo* pScanInfo = pInfo->info;
if (isAdd) { // add new table id
SArray* qa = filterQualifiedChildTables(pScanInfo, tableIdList);
qDebug(" %d qualified child tables added into stream scanner", (int32_t)taosArrayGetSize(qa));
int32_t code = tqReadHandleAddTbUidList(pScanInfo->streamBlockReader, qa);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
} else {
assert(0);
code = tqReadHandleAddTbUidList(pScanInfo->streamBlockReader, qa);
taosArrayDestroy(qa);
} else { // remove the table id in current list
SArray* qa = filterQualifiedChildTables(pScanInfo, tableIdList);
qDebug(" %d remove child tables from the stream scanner", (int32_t)taosArrayGetSize(tableIdList));
code = tqReadHandleAddTbUidList(pScanInfo->streamBlockReader, tableIdList);
taosArrayDestroy(qa);
}
return TSDB_CODE_SUCCESS;
return code;
}
int32_t qGetQueriedTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* tableName, int32_t* sversion, int32_t* tversion) {

View File

@ -2062,15 +2062,7 @@ void setExecutionContext(int32_t numOfOutput, uint64_t groupId, SExecTaskInfo* p
pAggInfo->groupId = groupId;
}
/**
* For interval query of both super table and table, copy the data in ascending order, since the output results are
* ordered in SWindowResutl already. While handling the group by query for both table and super table,
* all group result are completed already.
*
* @param pQInfo
* @param result
*/
int32_t doCopyToSDataBlock(SExecTaskInfo* taskInfo, SSDataBlock* pBlock, SExprInfo* pExprInfo, SDiskbasedBuf* pBuf, SGroupResInfo* pGroupResInfo,
int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprInfo* pExprInfo, SDiskbasedBuf* pBuf, SGroupResInfo* pGroupResInfo,
int32_t* rowCellOffset, SqlFunctionCtx* pCtx, int32_t numOfExprs) {
int32_t numOfRows = getNumOfTotalRes(pGroupResInfo);
int32_t start = pGroupResInfo->index;
@ -2087,6 +2079,15 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* taskInfo, SSDataBlock* pBlock, SExprIn
continue;
}
if (pBlock->info.groupId == 0) {
pBlock->info.groupId = pPos->groupId;
} else {
// current value belongs to different group, it can't be packed into one datablock
if (pBlock->info.groupId != pPos->groupId) {
break;
}
}
if (pBlock->info.rows + pRow->numOfRows > pBlock->info.capacity) {
break;
}
@ -2100,9 +2101,8 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* taskInfo, SSDataBlock* pBlock, SExprIn
if (pCtx[j].fpSet.finalize) {
int32_t code = pCtx[j].fpSet.finalize(&pCtx[j], pBlock);
if (TAOS_FAILED(code)) {
qError("%s build result data block error, code %s", GET_TASKID(taskInfo), tstrerror(code));
taskInfo->code = code;
longjmp(taskInfo->env, code);
qError("%s build result data block error, code %s", GET_TASKID(pTaskInfo), tstrerror(code));
longjmp(pTaskInfo->env, code);
}
} else if (strcmp(pCtx[j].pExpr->pExpr->_function.functionName, "_select_value") == 0) {
// do nothing, todo refactor
@ -2124,7 +2124,7 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* taskInfo, SSDataBlock* pBlock, SExprIn
}
}
// qDebug("QInfo:0x%"PRIx64" copy data to query buf completed", GET_TASKID(pRuntimeEnv));
qDebug("%s result generated, rows:%d, groupId:%"PRIu64, GET_TASKID(pTaskInfo), pBlock->info.rows, pBlock->info.groupId);
blockDataUpdateTsWindow(pBlock);
return 0;
}
@ -2145,10 +2145,9 @@ void doBuildResultDatablock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SG
return;
}
// clear the existed group id
pBlock->info.groupId = 0;
doCopyToSDataBlock(pTaskInfo, pBlock, pExprInfo, pBuf, pGroupResInfo, rowCellOffset, pCtx, numOfExprs);
// add condition (pBlock->info.rows >= 1) just to runtime happy
blockDataUpdateTsWindow(pBlock);
}
static void updateNumOfRowsInResultRows(SqlFunctionCtx* pCtx, int32_t numOfOutput, SResultRowInfo* pResultRowInfo,
@ -3546,11 +3545,12 @@ _error:
int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t *order, int32_t* scanFlag) {
// todo add more information about exchange operation
if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_EXCHANGE) {
int32_t type = pOperator->operatorType;
if (type == QUERY_NODE_PHYSICAL_PLAN_EXCHANGE || type == QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN || type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
*order = TSDB_ORDER_ASC;
*scanFlag = MAIN_SCAN;
return TSDB_CODE_SUCCESS;
} else if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN) {
} else if (type == QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN) {
STableScanInfo* pTableScanInfo = pOperator->info;
*order = pTableScanInfo->cond.order;
*scanFlag = pTableScanInfo->scanFlag;
@ -3655,7 +3655,6 @@ static SSDataBlock* getAggregateResult(SOperatorInfo* pOperator) {
doSetOperatorCompleted(pOperator);
}
doSetOperatorCompleted(pOperator);
return (blockDataGetNumOfRows(pInfo->pRes) != 0) ? pInfo->pRes : NULL;
}
@ -3910,6 +3909,9 @@ static SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
// the pDataBlock are always the same one, no need to call this again
int32_t code = getTableScanInfo(pOperator->pDownstream[0], &order, &scanFlag);
if (code != TSDB_CODE_SUCCESS) {
longjmp(pTaskInfo->env, code);
}
setInputDataBlock(pOperator, pInfo->pCtx, pBlock, order, scanFlag, false);
blockDataEnsureCapacity(pInfo->pRes, pInfo->pRes->info.rows + pBlock->info.rows);
@ -4203,7 +4205,7 @@ SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SExprInfo*
pInfo->pScalarExprInfo = pScalarExprInfo;
pInfo->numOfScalarExpr = numOfScalarExpr;
if (pInfo->pScalarExprInfo != NULL) {
pInfo->pScalarCtx = createSqlFunctionCtx(pScalarExprInfo, numOfCols, &pInfo->rowCellInfoOffset);
pInfo->pScalarCtx = createSqlFunctionCtx(pScalarExprInfo, numOfScalarExpr, &pInfo->rowCellInfoOffset);
}
pOperator->name = "TableAggregate";
@ -4311,11 +4313,17 @@ SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SExprInfo* p
int32_t numOfRows = 4096;
size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES;
// Make sure the size of SSDataBlock will never exceed the size of 2MB.
int32_t TWOMB = 2 * 1024 * 1024;
if (numOfRows * pResBlock->info.rowSize > TWOMB) {
numOfRows = TWOMB / pResBlock->info.rowSize;
}
initResultSizeInfo(pOperator, numOfRows);
initAggInfo(&pInfo->binfo, &pInfo->aggSup, pExprInfo, numOfCols, pResBlock, keyBufSize, pTaskInfo->id.str);
setFunctionResultOutput(&pInfo->binfo, &pInfo->aggSup, MAIN_SCAN, numOfCols, pTaskInfo);
pInfo->pPseudoColInfo = setRowTsColumnOutputInfo(pInfo->binfo.pCtx, numOfCols);
pInfo->pPseudoColInfo = setRowTsColumnOutputInfo(pInfo->binfo.pCtx, numOfCols);
pOperator->name = "ProjectOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_PROJECT;
pOperator->blocking = false;
@ -4323,11 +4331,11 @@ SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SExprInfo* p
pOperator->info = pInfo;
pOperator->pExpr = pExprInfo;
pOperator->numOfExprs = num;
pOperator->pTaskInfo = pTaskInfo;
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doProjectOperation, NULL, NULL,
destroyProjectOperatorInfo, NULL, NULL, NULL);
pOperator->pTaskInfo = pTaskInfo;
int32_t code = appendDownstream(pOperator, &downstream, 1);
if (code != TSDB_CODE_SUCCESS) {
goto _error;

View File

@ -300,10 +300,26 @@ void addTagPseudoColumnData(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock)
if (fmIsScanPseudoColumnFunc(functionId)) {
setTbNameColData(pTableScanInfo->readHandle.meta, pBlock, pColInfoData, functionId);
} else { // these are tags
const char* p = metaGetTableTagVal(&mr.me, pExpr->base.pParam[0].pCol->colId);
const char* p = NULL;
if(pColInfoData->info.type == TSDB_DATA_TYPE_JSON){
const uint8_t *tmp = mr.me.ctbEntry.pTags;
char *data = taosMemoryCalloc(kvRowLen(tmp) + 1, 1);
if(data == NULL){
qError("doTagScan calloc error:%d", kvRowLen(tmp) + 1);
return;
}
*data = TSDB_DATA_TYPE_JSON;
memcpy(data+1, tmp, kvRowLen(tmp));
p = data;
}else{
p = metaGetTableTagVal(&mr.me, pExpr->base.pParam[0].pCol->colId);
}
for (int32_t i = 0; i < pBlock->info.rows; ++i) {
colDataAppend(pColInfoData, i, p, (p == NULL));
}
if(pColInfoData->info.type == TSDB_DATA_TYPE_JSON){
taosMemoryFree((void*)p);
}
}
}
@ -1587,10 +1603,23 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) {
STR_TO_VARSTR(str, mr.me.name);
colDataAppend(pDst, count, str, false);
} else { // it is a tag value
if(pDst->info.type == TSDB_DATA_TYPE_JSON){
const uint8_t *tmp = mr.me.ctbEntry.pTags;
char *data = taosMemoryCalloc(kvRowLen(tmp) + 1, 1);
if(data == NULL){
qError("doTagScan calloc error:%d", kvRowLen(tmp) + 1);
return NULL;
}
*data = TSDB_DATA_TYPE_JSON;
memcpy(data+1, tmp, kvRowLen(tmp));
colDataAppend(pDst, count, data, false);
taosMemoryFree(data);
}else{
const char* p = metaGetTableTagVal(&mr.me, pExprInfo[j].base.pParam[0].pCol->colId);
colDataAppend(pDst, count, p, (p == NULL));
}
}
}
count += 1;
if (++pInfo->curPos >= pInfo->pTableGroups->numOfTables) {

View File

@ -120,7 +120,13 @@ int32_t sampleFunction(SqlFunctionCtx* pCtx);
bool getTailFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv);
bool tailFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo);
int32_t tailFunction(SqlFunctionCtx* pCtx);
int32_t tailFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
//int32_t tailFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
bool getUniqueFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv);
bool uniqueFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo);
int32_t uniqueFunction(SqlFunctionCtx *pCtx);
//int32_t uniqueFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
bool getSelectivityFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv);

View File

@ -493,6 +493,21 @@ static int32_t translateFirstLast(SFunctionNode* pFunc, char* pErrBuf, int32_t l
return TSDB_CODE_SUCCESS;
}
static int32_t translateUnique(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
if (1 != LIST_LENGTH(pFunc->pParameterList)) {
return TSDB_CODE_SUCCESS;
}
SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0);
if (QUERY_NODE_COLUMN != nodeType(pPara)) {
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
"The parameters of UNIQUE can only be columns");
}
pFunc->node.resType = ((SExprNode*)pPara)->resType;
return TSDB_CODE_SUCCESS;
}
static int32_t translateDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
int32_t paraLen = LIST_LENGTH(pFunc->pParameterList);
if (paraLen == 0 || paraLen > 2) {
@ -877,16 +892,6 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.processFunc = lastFunction,
.finalizeFunc = lastFinalize
},
{
.name = "diff",
.type = FUNCTION_TYPE_DIFF,
.classification = FUNC_MGT_NONSTANDARD_SQL_FUNC | FUNC_MGT_TIMELINE_FUNC,
.translateFunc = translateDiff,
.getEnvFunc = getDiffFuncEnv,
.initFunc = diffFunctionSetup,
.processFunc = diffFunction,
.finalizeFunc = functionFinalize
},
{
.name = "histogram",
.type = FUNCTION_TYPE_HISTOGRAM,
@ -907,6 +912,16 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.processFunc = hllFunction,
.finalizeFunc = hllFinalize
},
{
.name = "diff",
.type = FUNCTION_TYPE_DIFF,
.classification = FUNC_MGT_NONSTANDARD_SQL_FUNC | FUNC_MGT_TIMELINE_FUNC,
.translateFunc = translateDiff,
.getEnvFunc = getDiffFuncEnv,
.initFunc = diffFunctionSetup,
.processFunc = diffFunction,
.finalizeFunc = functionFinalize
},
{
.name = "state_count",
.type = FUNCTION_TYPE_STATE_COUNT,
@ -965,7 +980,17 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.getEnvFunc = getTailFuncEnv,
.initFunc = tailFunctionSetup,
.processFunc = tailFunction,
.finalizeFunc = tailFinalize
.finalizeFunc = NULL
},
{
.name = "unique",
.type = FUNCTION_TYPE_UNIQUE,
.classification = FUNC_MGT_NONSTANDARD_SQL_FUNC | FUNC_MGT_TIMELINE_FUNC,
.translateFunc = translateUnique,
.getEnvFunc = getUniqueFuncEnv,
.initFunc = uniqueFunctionSetup,
.processFunc = uniqueFunction,
.finalizeFunc = NULL
},
{
.name = "abs",

View File

@ -28,12 +28,15 @@
#define TAIL_MAX_POINTS_NUM 100
#define TAIL_MAX_OFFSET 100
#define UNIQUE_MAX_RESULT_SIZE (1024*1024*10)
#define HLL_BUCKET_BITS 14 // The bits of the bucket
#define HLL_DATA_BITS (64-HLL_BUCKET_BITS)
#define HLL_BUCKETS (1<<HLL_BUCKET_BITS)
#define HLL_BUCKET_MASK (HLL_BUCKETS-1)
#define HLL_ALPHA_INF 0.721347520444481703680 // constant for 0.5/ln(2)
typedef struct SSumRes {
union {
int64_t isum;
@ -197,6 +200,20 @@ typedef struct STailInfo {
STailItem **pItems;
} STailInfo;
typedef struct SUniqueItem {
int64_t timestamp;
bool isNull;
char data[];
} SUniqueItem;
typedef struct SUniqueInfo {
int32_t numOfPoints;
uint8_t colType;
int16_t colBytes;
SHashObj *pHash;
char pItems[];
} SUniqueInfo;
#define SET_VAL(_info, numOfElem, res) \
do { \
if ((numOfElem) <= 0) { \
@ -216,6 +233,18 @@ typedef struct STailInfo {
} \
} while (0);
#define DO_UPDATE_SUBSID_RES(ctx, ts) \
do { \
for (int32_t _i = 0; _i < (ctx)->subsidiaries.num; ++_i) { \
SqlFunctionCtx* __ctx = (ctx)->subsidiaries.pCtx[_i]; \
if (__ctx->functionId == FUNCTION_TS_DUMMY) { \
__ctx->tag.i = (ts); \
__ctx->tag.nType = TSDB_DATA_TYPE_BIGINT; \
} \
__ctx->fpSet.process(__ctx); \
} \
} while (0)
#define UPDATE_DATA(ctx, left, right, num, sign, _ts) \
do { \
if (((left) < (right)) ^ (sign)) { \
@ -748,50 +777,6 @@ bool getMinmaxFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv) {
return true;
}
#define GET_TS_LIST(x) ((TSKEY*)((x)->ptsList))
#define GET_TS_DATA(x, y) (GET_TS_LIST(x)[(y)])
#define DO_UPDATE_TAG_COLUMNS_WITHOUT_TS(ctx) \
do { \
for (int32_t _i = 0; _i < (ctx)->tagInfo.numOfTagCols; ++_i) { \
SqlFunctionCtx* __ctx = (ctx)->tagInfo.pTagCtxList[_i]; \
__ctx->fpSet.process(__ctx); \
} \
} while (0);
#define DO_UPDATE_SUBSID_RES(ctx, ts) \
do { \
for (int32_t _i = 0; _i < (ctx)->subsidiaries.num; ++_i) { \
SqlFunctionCtx* __ctx = (ctx)->subsidiaries.pCtx[_i]; \
if (__ctx->functionId == FUNCTION_TS_DUMMY) { \
__ctx->tag.i = (ts); \
__ctx->tag.nType = TSDB_DATA_TYPE_BIGINT; \
} \
__ctx->fpSet.process(__ctx); \
} \
} while (0)
#define UPDATE_DATA(ctx, left, right, num, sign, _ts) \
do { \
if (((left) < (right)) ^ (sign)) { \
(left) = (right); \
DO_UPDATE_SUBSID_RES(ctx, _ts); \
(num) += 1; \
} \
} while (0)
#define LOOPCHECK_N(val, _col, ctx, _t, _nrow, _start, sign, num) \
do { \
_t* d = (_t*)((_col)->pData); \
for (int32_t i = (_start); i < (_nrow) + (_start); ++i) { \
if (((_col)->hasNull) && colDataIsNull_f((_col)->nullbitmap, i)) { \
continue; \
} \
TSKEY ts = (ctx)->ptsList != NULL ? GET_TS_DATA(ctx, i) : 0; \
UPDATE_DATA(ctx, val, d[i], num, sign, ts); \
} \
} while (0)
static void saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos);
static void copyTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos);
@ -3581,3 +3566,92 @@ int32_t tailFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
return pEntryInfo->numOfRes;
}
bool getUniqueFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) {
pEnv->calcMemSize = sizeof(SUniqueInfo) + UNIQUE_MAX_RESULT_SIZE;
return true;
}
bool uniqueFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResInfo) {
if (!functionSetup(pCtx, pResInfo)) {
return false;
}
SUniqueInfo* pInfo = GET_ROWCELL_INTERBUF(pResInfo);
pInfo->numOfPoints = 0;
pInfo->colType = pCtx->resDataInfo.type;
pInfo->colBytes = pCtx->resDataInfo.bytes;
if (pInfo->pHash != NULL) {
taosHashClear(pInfo->pHash);
} else {
pInfo->pHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
}
return true;
}
static void doUniqueAdd(SUniqueInfo* pInfo, char *data, TSKEY ts, bool isNull) {
int32_t hashKeyBytes = IS_VAR_DATA_TYPE(pInfo->colType) ? varDataTLen(data) : pInfo->colBytes;
SUniqueItem *pHashItem = taosHashGet(pInfo->pHash, data, hashKeyBytes);
if (pHashItem == NULL) {
int32_t size = sizeof(SUniqueItem) + pInfo->colBytes;
SUniqueItem *pItem = (SUniqueItem *)(pInfo->pItems + pInfo->numOfPoints * size);
pItem->timestamp = ts;
memcpy(pItem->data, data, pInfo->colBytes);
taosHashPut(pInfo->pHash, data, hashKeyBytes, (char *)pItem, sizeof(SUniqueItem*));
pInfo->numOfPoints++;
} else if (pHashItem->timestamp > ts) {
pHashItem->timestamp = ts;
}
}
int32_t uniqueFunction(SqlFunctionCtx* pCtx) {
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
SUniqueInfo* pInfo = GET_ROWCELL_INTERBUF(pResInfo);
SInputColumnInfoData* pInput = &pCtx->input;
TSKEY* tsList = (int64_t*)pInput->pPTS->pData;
SColumnInfoData* pInputCol = pInput->pData[0];
SColumnInfoData* pTsOutput = pCtx->pTsOutput;
SColumnInfoData* pOutput = (SColumnInfoData*)pCtx->pOutput;
int32_t startOffset = pCtx->offset;
for (int32_t i = pInput->startRowIndex; i < pInput->numOfRows + pInput->startRowIndex; ++i) {
char* data = colDataGetData(pInputCol, i);
doUniqueAdd(pInfo, data, tsList[i], colDataIsNull_s(pInputCol, i));
if (sizeof(SUniqueInfo) + pInfo->numOfPoints * (sizeof(SUniqueItem) + pInfo->colBytes) >= UNIQUE_MAX_RESULT_SIZE) {
taosHashCleanup(pInfo->pHash);
return 0;
}
}
for (int32_t i = 0; i < pInfo->numOfPoints; ++i) {
SUniqueItem *pItem = (SUniqueItem *)(pInfo->pItems + i * (sizeof(SUniqueItem) + pInfo->colBytes));
colDataAppend(pOutput, i, pItem->data, false);
if (pTsOutput != NULL) {
colDataAppendInt64(pTsOutput, i, &pItem->timestamp);
}
}
return pInfo->numOfPoints;
}
int32_t uniqueFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
SUniqueInfo* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
int32_t slotId = pCtx->pExpr->base.resSchema.slotId;
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId);
for (int32_t i = 0; i < pResInfo->numOfRes; ++i) {
SUniqueItem *pItem = (SUniqueItem *)(pInfo->pItems + i * (sizeof(SUniqueItem) + pInfo->colBytes));
colDataAppend(pCol, i, pItem->data, false);
//TODO: handle ts output
}
return pResInfo->numOfRes;
}

View File

@ -145,7 +145,8 @@ void udfdProcessRpcRsp(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet) {
char path[PATH_MAX] = {0};
snprintf(path, sizeof(path), "%s/lib%s.so", "/tmp", pFuncInfo->name);
TdFilePtr file = taosOpenFile(path, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_READ | TD_FILE_TRUNC | TD_FILE_AUTO_DEL);
TdFilePtr file =
taosOpenFile(path, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_READ | TD_FILE_TRUNC | TD_FILE_AUTO_DEL);
// TODO check for failure of flush to disk
taosWriteFile(file, pFuncInfo->pCode, pFuncInfo->codeSize);
taosCloseFile(&file);
@ -341,8 +342,7 @@ void udfdProcessSetupRequest(SUvUdfWork* uvUdf, SUdfRequest* request) {
void udfdProcessCallRequest(SUvUdfWork *uvUdf, SUdfRequest *request) {
SUdfCallRequest *call = &request->call;
fnDebug("%" PRId64 "call request. call type %d, handle: %" PRIx64, request->seqNum, call->callType,
call->udfHandle);
fnDebug("%" PRId64 "call request. call type %d, handle: %" PRIx64, request->seqNum, call->callType, call->udfHandle);
SUdfcFuncHandle * handle = (SUdfcFuncHandle *)(call->udfHandle);
SUdf * udf = handle->udf;
SUdfResponse response = {0};
@ -363,9 +363,7 @@ void udfdProcessCallRequest(SUvUdfWork *uvUdf, SUdfRequest *request) {
break;
}
case TSDB_UDF_CALL_AGG_INIT: {
SUdfInterBuf outBuf = {.buf = taosMemoryMalloc(udf->bufSize),
.bufLen= udf->bufSize,
.numOfResult = 0};
SUdfInterBuf outBuf = {.buf = taosMemoryMalloc(udf->bufSize), .bufLen = udf->bufSize, .numOfResult = 0};
udf->aggStartFunc(&outBuf);
subRsp->resultBuf = outBuf;
break;
@ -373,9 +371,7 @@ void udfdProcessCallRequest(SUvUdfWork *uvUdf, SUdfRequest *request) {
case TSDB_UDF_CALL_AGG_PROC: {
SUdfDataBlock input = {0};
convertDataBlockToUdfDataBlock(&call->block, &input);
SUdfInterBuf outBuf = {.buf = taosMemoryMalloc(udf->bufSize),
.bufLen= udf->bufSize,
.numOfResult = 0};
SUdfInterBuf outBuf = {.buf = taosMemoryMalloc(udf->bufSize), .bufLen = udf->bufSize, .numOfResult = 0};
code = udf->aggProcFunc(&input, &call->interBuf, &outBuf);
freeUdfInterBuf(&call->interBuf);
freeUdfDataDataBlock(&input);
@ -384,9 +380,7 @@ void udfdProcessCallRequest(SUvUdfWork *uvUdf, SUdfRequest *request) {
break;
}
case TSDB_UDF_CALL_AGG_FIN: {
SUdfInterBuf outBuf = {.buf = taosMemoryMalloc(udf->bufSize),
.bufLen= udf->bufSize,
.numOfResult = 0};
SUdfInterBuf outBuf = {.buf = taosMemoryMalloc(udf->bufSize), .bufLen = udf->bufSize, .numOfResult = 0};
code = udf->aggFinishFunc(&call->interBuf, &outBuf);
freeUdfInterBuf(&call->interBuf);
subRsp->resultBuf = outBuf;
@ -429,7 +423,6 @@ void udfdProcessCallRequest(SUvUdfWork *uvUdf, SUdfRequest *request) {
}
default:
break;
}
taosMemoryFree(uvUdf->input.base);
@ -694,7 +687,6 @@ int initEpSetFromCfg(const char* firstEp, const char* secondEp, SCorEpSet* pEpSe
return 0;
}
int32_t udfdOpenClientRpc() {
SRpcInit rpcInit = {0};
rpcInit.label = "UDFD";
@ -704,15 +696,9 @@ int32_t udfdOpenClientRpc() {
rpcInit.connType = TAOS_CONN_CLIENT;
rpcInit.idleTime = tsShellActivityTimer * 1000;
rpcInit.user = TSDB_DEFAULT_USER;
rpcInit.ckey = "key";
rpcInit.spi = 1;
rpcInit.parent = &global;
rpcInit.rfp = udfdRpcRfp;
char pass[TSDB_PASSWORD_LEN + 1] = {0};
taosEncryptPass_c((uint8_t *)(TSDB_DEFAULT_PASS), strlen(TSDB_DEFAULT_PASS), pass);
rpcInit.secret = pass;
global.clientRpc = rpcOpen(&rpcInit);
if (global.clientRpc == NULL) {
fnError("failed to init dnode rpc client");

View File

@ -26,7 +26,7 @@ int32_t udf2_start(SUdfInterBuf *buf) {
int32_t udf2(SUdfDataBlock* block, SUdfInterBuf *interBuf, SUdfInterBuf *newInterBuf) {
double sumSquares = *(double*)interBuf->buf;
int8_t numOutput = 0;
int8_t numNotNull = 0;
for (int32_t i = 0; i < block->numOfCols; ++i) {
SUdfColumn* col = block->udfCols[i];
if (!(col->colMeta.type == TSDB_DATA_TYPE_INT ||
@ -56,15 +56,18 @@ int32_t udf2(SUdfDataBlock* block, SUdfInterBuf *interBuf, SUdfInterBuf *newInte
default:
break;
}
numOutput = 1;
++numNotNull;
}
}
if (numOutput == 1) {
*(double*)(newInterBuf->buf) = sumSquares;
newInterBuf->bufLen = sizeof(double);
if (interBuf->numOfResult == 0 && numNotNull == 0) {
newInterBuf->numOfResult = 0;
} else {
newInterBuf->numOfResult = 1;
}
newInterBuf->numOfResult = numOutput;
return 0;
}

View File

@ -22,7 +22,7 @@
#define MAX_INDEX_KEY_LEN 256 // test only, change later
#define MEM_TERM_LIMIT 10 * 10000
#define MEM_THRESHOLD 1024 * 1024
#define MEM_THRESHOLD 64 * 1024
#define MEM_ESTIMATE_RADIO 1.5
static void indexMemRef(MemTable* tbl);

View File

@ -99,7 +99,7 @@ void fstUnFinishedNodesAddSuffix(FstUnFinishedNodes* nodes, FstSlice bs, Output
if (fstSliceIsEmpty(s)) {
return;
}
size_t sz = taosArrayGetSize(nodes->stack) - 1;
int32_t sz = taosArrayGetSize(nodes->stack) - 1;
FstBuilderNodeUnfinished* un = taosArrayGet(nodes->stack, sz);
assert(un->last == NULL);
@ -130,11 +130,11 @@ void fstUnFinishedNodesAddSuffix(FstUnFinishedNodes* nodes, FstSlice bs, Output
uint64_t fstUnFinishedNodesFindCommPrefix(FstUnFinishedNodes* node, FstSlice bs) {
FstSlice* s = &bs;
size_t ssz = taosArrayGetSize(node->stack); // stack size
int32_t ssz = taosArrayGetSize(node->stack); // stack size
uint64_t count = 0;
int32_t lsz; // data len
uint8_t* data = fstSliceData(s, &lsz);
for (size_t i = 0; i < ssz && i < lsz; i++) {
for (int32_t i = 0; i < ssz && i < lsz; i++) {
FstBuilderNodeUnfinished* un = taosArrayGet(node->stack, i);
if (un->last->inp == data[i]) {
count++;
@ -147,8 +147,8 @@ uint64_t fstUnFinishedNodesFindCommPrefix(FstUnFinishedNodes* node, FstSlice bs)
uint64_t fstUnFinishedNodesFindCommPrefixAndSetOutput(FstUnFinishedNodes* node, FstSlice bs, Output in, Output* out) {
FstSlice* s = &bs;
size_t lsz = (size_t)(s->end - s->start + 1); // data len
size_t ssz = taosArrayGetSize(node->stack); // stack size
int32_t lsz = (size_t)(s->end - s->start + 1); // data len
int32_t ssz = taosArrayGetSize(node->stack); // stack size
*out = in;
uint64_t i = 0;
for (i = 0; i < lsz && i < ssz; i++) {
@ -245,7 +245,7 @@ void fstStateCompileForOneTrans(FstCountingWriter* w, CompiledAddr addr, FstTran
return;
}
void fstStateCompileForAnyTrans(FstCountingWriter* w, CompiledAddr addr, FstBuilderNode* node) {
size_t sz = taosArrayGetSize(node->trans);
int32_t sz = taosArrayGetSize(node->trans);
assert(sz <= 256);
uint8_t tSize = 0;
@ -253,7 +253,7 @@ void fstStateCompileForAnyTrans(FstCountingWriter* w, CompiledAddr addr, FstBuil
// finalOutput.is_zero()
bool anyOuts = (node->finalOutput != 0);
for (size_t i = 0; i < sz; i++) {
for (int32_t i = 0; i < sz; i++) {
FstTransition* t = taosArrayGet(node->trans, i);
tSize = TMAX(tSize, packDeltaSize(addr, t->addr));
oSize = TMAX(oSize, packSize(t->out));
@ -301,7 +301,7 @@ void fstStateCompileForAnyTrans(FstCountingWriter* w, CompiledAddr addr, FstBuil
/// for (uint8_t i = 0; i < 256; i++) {
// index[i] = 255;
///}
for (size_t i = 0; i < sz; i++) {
for (int32_t i = 0; i < sz; i++) {
FstTransition* t = taosArrayGet(node->trans, i);
index[t->inp] = i;
// fstPackDeltaIn(w, addr, t->addr, tSize);
@ -731,7 +731,7 @@ bool fstNodeFindInput(FstNode* node, uint8_t b, uint64_t* res) {
}
bool fstNodeCompile(FstNode* node, void* w, CompiledAddr lastAddr, CompiledAddr addr, FstBuilderNode* builderNode) {
size_t sz = taosArrayGetSize(builderNode->trans);
int32_t sz = taosArrayGetSize(builderNode->trans);
assert(sz < 256);
if (sz == 0 && builderNode->isFinal && builderNode->finalOutput == 0) {
return true;
@ -959,8 +959,8 @@ void fstBuilderNodeUnfinishedAddOutputPrefix(FstBuilderNodeUnfinished* unNode, O
if (FST_BUILDER_NODE_IS_FINAL(unNode->node)) {
unNode->node->finalOutput += out;
}
size_t sz = taosArrayGetSize(unNode->node->trans);
for (size_t i = 0; i < sz; i++) {
int32_t sz = taosArrayGetSize(unNode->node->trans);
for (int32_t i = 0; i < sz; i++) {
FstTransition* trn = taosArrayGet(unNode->node->trans, i);
trn->out += out;
}
@ -1077,7 +1077,7 @@ bool fstGet(Fst* fst, FstSlice* b, Output* out) {
tOut = tOut + FST_NODE_FINAL_OUTPUT(root);
}
for (size_t i = 0; i < taosArrayGetSize(nodes); i++) {
for (int32_t i = 0; i < taosArrayGetSize(nodes); i++) {
FstNode** node = (FstNode**)taosArrayGet(nodes, i);
fstNodeDestroy(*node);
}
@ -1352,7 +1352,7 @@ StreamWithStateResult* streamWithStateNextWith(StreamWithState* sws, StreamCallb
StreamState s2 = {.node = nextNode, .trans = 0, .out = {.null = false, .out = out}, .autState = nextState};
taosArrayPush(sws->stack, &s2);
size_t isz = taosArrayGetSize(sws->inp);
int32_t isz = taosArrayGetSize(sws->inp);
uint8_t* buf = (uint8_t*)taosMemoryMalloc(isz * sizeof(uint8_t));
for (uint32_t i = 0; i < isz; i++) {
buf[i] = *(uint8_t*)taosArrayGet(sws->inp, i);

View File

@ -116,7 +116,7 @@ TFileCache* tfileCacheCreate(const char* path) {
continue;
}
TFileHeader* header = &reader->header;
ICacheKey key = {.suid = header->suid, .colName = header->colName, .nColName = strlen(header->colName)};
ICacheKey key = {.suid = header->suid, .colName = header->colName, .nColName = (int32_t)strlen(header->colName)};
char buf[128] = {0};
int32_t sz = indexSerialCacheKey(&key, buf);
@ -230,7 +230,7 @@ static int32_t tfSearchTerm(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
indexInfo("index: %" PRIu64 ", col: %s, colVal: %s, found table info in tindex, time cost: %" PRIu64 "us",
tem->suid, tem->colName, tem->colVal, cost);
ret = tfileReaderLoadTableIds((TFileReader*)reader, offset, tr->total);
ret = tfileReaderLoadTableIds((TFileReader*)reader, (int32_t)offset, tr->total);
cost = taosGetTimestampUs() - et;
indexInfo("index: %" PRIu64 ", col: %s, colVal: %s, load all table info, time cost: %" PRIu64 "us", tem->suid,
tem->colName, tem->colVal, cost);
@ -890,7 +890,7 @@ static int tfileWriteFooter(TFileWriter* write) {
char buf[sizeof(tfileMagicNumber) + 1] = {0};
void* pBuf = (void*)buf;
taosEncodeFixedU64((void**)(void*)&pBuf, tfileMagicNumber);
int nwrite = write->ctx->write(write->ctx, buf, strlen(buf));
int nwrite = write->ctx->write(write->ctx, buf, (int32_t)strlen(buf));
indexInfo("tfile write footer size: %d", write->ctx->size(write->ctx));
assert(nwrite == sizeof(tfileMagicNumber));

View File

@ -37,14 +37,14 @@ static int iBinarySearch(SArray *arr, int s, int e, uint64_t k) {
}
void iIntersection(SArray *inters, SArray *final) {
int32_t sz = taosArrayGetSize(inters);
int32_t sz = (int32_t)taosArrayGetSize(inters);
if (sz <= 0) {
return;
}
MergeIndex *mi = taosMemoryCalloc(sz, sizeof(MergeIndex));
for (int i = 0; i < sz; i++) {
SArray *t = taosArrayGetP(inters, i);
mi[i].len = taosArrayGetSize(t);
mi[i].len = (int32_t)taosArrayGetSize(t);
mi[i].idx = 0;
}
@ -70,7 +70,7 @@ void iIntersection(SArray *inters, SArray *final) {
taosMemoryFreeClear(mi);
}
void iUnion(SArray *inters, SArray *final) {
int32_t sz = taosArrayGetSize(inters);
int32_t sz = (int32_t)taosArrayGetSize(inters);
if (sz <= 0) {
return;
}
@ -82,7 +82,7 @@ void iUnion(SArray *inters, SArray *final) {
MergeIndex *mi = taosMemoryCalloc(sz, sizeof(MergeIndex));
for (int i = 0; i < sz; i++) {
SArray *t = taosArrayGetP(inters, i);
mi[i].len = taosArrayGetSize(t);
mi[i].len = (int32_t)taosArrayGetSize(t);
mi[i].idx = 0;
}
while (1) {
@ -117,8 +117,8 @@ void iUnion(SArray *inters, SArray *final) {
}
void iExcept(SArray *total, SArray *except) {
int32_t tsz = taosArrayGetSize(total);
int32_t esz = taosArrayGetSize(except);
int32_t tsz = (int32_t)taosArrayGetSize(total);
int32_t esz = (int32_t)taosArrayGetSize(except);
if (esz == 0 || tsz == 0) {
return;
}
@ -141,7 +141,10 @@ int uidCompare(const void *a, const void *b) {
// add more version compare
uint64_t u1 = *(uint64_t *)a;
uint64_t u2 = *(uint64_t *)b;
return u1 - u2;
if (u1 == u2) {
return 0;
}
return u1 < u2 ? -1 : 1;
}
int verdataCompare(const void *a, const void *b) {
SIdxVerdata *va = (SIdxVerdata *)a;

View File

@ -92,7 +92,19 @@ target_link_libraries (jsonUT
index
)
#add_test(
# NAME index_test
# COMMAND indexTest
#)
add_test(
NAME idxtest
COMMAND indexTest
)
add_test(
NAME idxJsonUT
COMMAND jsonUT
)
add_test(
NAME idxUtilUT
COMMAND UtilUT
)
add_test(
NAME idxFstUT
COMMAND fstUT
)

View File

@ -48,7 +48,7 @@ class FstWriter {
class FstReadMemory {
public:
FstReadMemory(size_t size, const std::string& fileName = "/tmp/tindex.tindex") {
FstReadMemory(int32_t size, const std::string& fileName = "/tmp/tindex.tindex") {
_wc = writerCtxCreate(TFile, fileName.c_str(), true, 64 * 1024);
_w = fstCountingWriterCreate(_wc);
_size = size;
@ -152,7 +152,7 @@ class FstReadMemory {
Fst* _fst;
FstSlice _s;
WriterCtx* _wc;
size_t _size;
int32_t _size;
};
#define L 100

View File

@ -714,7 +714,7 @@ class IndexObj {
return numOfTable;
}
int ReadMultiMillonData(const std::string& colName, const std::string& colVal = "Hello world",
size_t numOfTable = 100 * 10000) {
size_t numOfTable = 100) {
std::string tColVal = colVal;
int colValSize = tColVal.size();
@ -896,7 +896,7 @@ TEST_F(IndexEnv2, testIndex_TrigeFlush) {
// r
std::cout << "failed to init" << std::endl;
}
int numOfTable = 100 * 10000;
int numOfTable = 100 * 100;
index->WriteMillonData("tag1", "Hello Wolrd", numOfTable);
int target = index->SearchOne("tag1", "Hello Wolrd");
std::cout << "Get Index: " << target << std::endl;
@ -910,8 +910,8 @@ static void single_write_and_search(IndexObj* idx) {
static void multi_write_and_search(IndexObj* idx) {
int target = idx->SearchOne("tag1", "Hello");
target = idx->SearchOne("tag2", "Test");
idx->WriteMultiMillonData("tag1", "hello world test", 100 * 10000);
idx->WriteMultiMillonData("tag2", "world test nothing", 100 * 10000);
idx->WriteMultiMillonData("tag1", "hello world test", 100 * 100);
idx->WriteMultiMillonData("tag2", "world test nothing", 100 * 10);
}
TEST_F(IndexEnv2, testIndex_serarch_cache_and_tfile) {
std::string path = "/tmp/cache_and_tfile";
@ -920,8 +920,8 @@ TEST_F(IndexEnv2, testIndex_serarch_cache_and_tfile) {
}
index->PutOne("tag1", "Hello");
index->PutOne("tag2", "Test");
index->WriteMultiMillonData("tag1", "Hello", 100 * 10000);
index->WriteMultiMillonData("tag2", "Test", 100 * 10000);
index->WriteMultiMillonData("tag1", "Hello", 100 * 100);
index->WriteMultiMillonData("tag2", "Test", 100 * 100);
std::thread threads[NUM_OF_THREAD];
for (int i = 0; i < NUM_OF_THREAD; i++) {
@ -949,49 +949,49 @@ TEST_F(IndexEnv2, testIndex_MultiWrite_and_MultiRead) {
}
}
TEST_F(IndexEnv2, testIndex_restart) {
std::string path = "/tmp/cache_and_tfile";
if (index->Init(path) != 0) {
}
index->SearchOneTarget("tag1", "Hello", 10);
index->SearchOneTarget("tag2", "Test", 10);
}
TEST_F(IndexEnv2, testIndex_restart1) {
std::string path = "/tmp/cache_and_tfile";
if (index->Init(path) != 0) {
}
index->ReadMultiMillonData("tag1", "coding");
index->SearchOneTarget("tag1", "Hello", 10);
index->SearchOneTarget("tag2", "Test", 10);
}
// TEST_F(IndexEnv2, testIndex_restart) {
// std::string path = "/tmp/cache_and_tfile";
// if (index->Init(path) != 0) {
// }
// index->SearchOneTarget("tag1", "Hello", 10);
// index->SearchOneTarget("tag2", "Test", 10);
//}
// TEST_F(IndexEnv2, testIndex_restart1) {
// std::string path = "/tmp/cache_and_tfile";
// if (index->Init(path) != 0) {
// }
// index->ReadMultiMillonData("tag1", "coding");
// index->SearchOneTarget("tag1", "Hello", 10);
// index->SearchOneTarget("tag2", "Test", 10);
//}
TEST_F(IndexEnv2, testIndex_read_performance) {
std::string path = "/tmp/cache_and_tfile";
if (index->Init(path) != 0) {
}
index->PutOneTarge("tag1", "Hello", 12);
index->PutOneTarge("tag1", "Hello", 15);
index->ReadMultiMillonData("tag1", "Hello");
std::cout << "reader sz: " << index->SearchOne("tag1", "Hello") << std::endl;
assert(3 == index->SearchOne("tag1", "Hello"));
}
TEST_F(IndexEnv2, testIndexMultiTag) {
std::string path = "/tmp/multi_tag";
if (index->Init(path) != 0) {
}
int64_t st = taosGetTimestampUs();
int32_t num = 1000 * 10000;
index->WriteMultiMillonData("tag1", "xxxxxxxxxxxxxxx", num);
std::cout << "numOfRow: " << num << "\ttime cost:" << taosGetTimestampUs() - st << std::endl;
// index->WriteMultiMillonData("tag2", "xxxxxxxxxxxxxxxxxxxxxxxxx", 100 * 10000);
}
// TEST_F(IndexEnv2, testIndex_read_performance) {
// std::string path = "/tmp/cache_and_tfile";
// if (index->Init(path) != 0) {
// }
// index->PutOneTarge("tag1", "Hello", 12);
// index->PutOneTarge("tag1", "Hello", 15);
// index->ReadMultiMillonData("tag1", "Hello");
// std::cout << "reader sz: " << index->SearchOne("tag1", "Hello") << std::endl;
// assert(3 == index->SearchOne("tag1", "Hello"));
//}
// TEST_F(IndexEnv2, testIndexMultiTag) {
// std::string path = "/tmp/multi_tag";
// if (index->Init(path) != 0) {
// }
// int64_t st = taosGetTimestampUs();
// int32_t num = 1000 * 10000;
// index->WriteMultiMillonData("tag1", "xxxxxxxxxxxxxxx", num);
// std::cout << "numOfRow: " << num << "\ttime cost:" << taosGetTimestampUs() - st << std::endl;
// // index->WriteMultiMillonData("tag2", "xxxxxxxxxxxxxxxxxxxxxxxxx", 100 * 10000);
//}
TEST_F(IndexEnv2, testLongComVal1) {
std::string path = "/tmp/long_colVal";
if (index->Init(path) != 0) {
}
// gen colVal by randstr
std::string randstr = "xxxxxxxxxxxxxxxxx";
index->WriteMultiMillonData("tag1", randstr, 100 * 10000);
index->WriteMultiMillonData("tag1", randstr, 100 * 1000);
}
TEST_F(IndexEnv2, testLongComVal2) {
@ -1000,7 +1000,7 @@ TEST_F(IndexEnv2, testLongComVal2) {
}
// gen colVal by randstr
std::string randstr = "abcccc fdadfafdafda";
index->WriteMultiMillonData("tag1", randstr, 100 * 10000);
index->WriteMultiMillonData("tag1", randstr, 100 * 1000);
}
TEST_F(IndexEnv2, testLongComVal3) {
std::string path = "/tmp/long_colVal";
@ -1008,7 +1008,7 @@ TEST_F(IndexEnv2, testLongComVal3) {
}
// gen colVal by randstr
std::string randstr = "Yes, coding and coding and coding";
index->WriteMultiMillonData("tag1", randstr, 100 * 10000);
index->WriteMultiMillonData("tag1", randstr, 100 * 1000);
}
TEST_F(IndexEnv2, testLongComVal4) {
std::string path = "/tmp/long_colVal";
@ -1016,7 +1016,7 @@ TEST_F(IndexEnv2, testLongComVal4) {
}
// gen colVal by randstr
std::string randstr = "111111 bac fdadfa";
index->WriteMultiMillonData("tag1", randstr, 100 * 10000);
index->WriteMultiMillonData("tag1", randstr, 100 * 100);
}
TEST_F(IndexEnv2, testIndex_read_performance1) {
std::string path = "/tmp/cache_and_tfile";
@ -1026,7 +1026,7 @@ TEST_F(IndexEnv2, testIndex_read_performance1) {
index->PutOneTarge("tag1", "Hello", 15);
index->ReadMultiMillonData("tag1", "Hello", 1000);
std::cout << "reader sz: " << index->SearchOne("tag1", "Hello") << std::endl;
assert(3 == index->SearchOne("tag1", "Hello"));
EXPECT_EQ(2, index->SearchOne("tag1", "Hello"));
}
TEST_F(IndexEnv2, testIndex_read_performance2) {
std::string path = "/tmp/cache_and_tfile";
@ -1034,9 +1034,9 @@ TEST_F(IndexEnv2, testIndex_read_performance2) {
}
index->PutOneTarge("tag1", "Hello", 12);
index->PutOneTarge("tag1", "Hello", 15);
index->ReadMultiMillonData("tag1", "Hello", 1000 * 10);
index->ReadMultiMillonData("tag1", "Hello", 1000);
std::cout << "reader sz: " << index->SearchOne("tag1", "Hello") << std::endl;
assert(3 == index->SearchOne("tag1", "Hello"));
EXPECT_EQ(2, index->SearchOne("tag1", "Hello"));
}
TEST_F(IndexEnv2, testIndex_read_performance3) {
std::string path = "/tmp/cache_and_tfile";
@ -1044,9 +1044,9 @@ TEST_F(IndexEnv2, testIndex_read_performance3) {
}
index->PutOneTarge("tag1", "Hello", 12);
index->PutOneTarge("tag1", "Hello", 15);
index->ReadMultiMillonData("tag1", "Hello", 1000 * 100);
index->ReadMultiMillonData("tag1", "Hello", 1000);
std::cout << "reader sz: " << index->SearchOne("tag1", "Hello") << std::endl;
assert(3 == index->SearchOne("tag1", "Hello"));
EXPECT_EQ(2, index->SearchOne("tag1", "Hello"));
}
TEST_F(IndexEnv2, testIndex_read_performance4) {
std::string path = "/tmp/cache_and_tfile";
@ -1054,9 +1054,9 @@ TEST_F(IndexEnv2, testIndex_read_performance4) {
}
index->PutOneTarge("tag10", "Hello", 12);
index->PutOneTarge("tag12", "Hello", 15);
index->ReadMultiMillonData("tag10", "Hello", 1000 * 100);
index->ReadMultiMillonData("tag10", "Hello", 1000);
std::cout << "reader sz: " << index->SearchOne("tag1", "Hello") << std::endl;
assert(3 == index->SearchOne("tag10", "Hello"));
EXPECT_EQ(1, index->SearchOne("tag10", "Hello"));
}
TEST_F(IndexEnv2, testIndex_cache_del) {
std::string path = "/tmp/cache_and_tfile";
@ -1108,7 +1108,7 @@ TEST_F(IndexEnv2, testIndex_del) {
index->Del("tag10", "Hello", 11);
EXPECT_EQ(98, index->SearchOne("tag10", "Hello"));
index->WriteMultiMillonData("tag10", "xxxxxxxxxxxxxx", 100 * 10000);
index->WriteMultiMillonData("tag10", "xxxxxxxxxxxxxx", 100 * 100);
index->Del("tag10", "Hello", 17);
EXPECT_EQ(97, index->SearchOne("tag10", "Hello"));
}

View File

@ -154,7 +154,7 @@ TEST_F(JsonEnv, testWriteMillonData) {
SIndexMultiTerm* terms = indexMultiTermCreate();
indexMultiTermAdd(terms, term);
for (size_t i = 0; i < 100; i++) {
for (size_t i = 0; i < 10; i++) {
tIndexJsonPut(index, terms, i);
}
indexMultiTermDestroy(terms);
@ -162,14 +162,14 @@ TEST_F(JsonEnv, testWriteMillonData) {
{
std::string colName("voltagefdadfa");
std::string colVal("abxxxxxxxxxxxx");
for (int i = 0; i < 1000; i++) {
for (int i = 0; i < 10; i++) {
colVal[i % colVal.size()] = '0' + i % 128;
SIndexTerm* term = indexTermCreate(1, ADD_VALUE, TSDB_DATA_TYPE_BINARY, colName.c_str(), colName.size(),
colVal.c_str(), colVal.size());
SIndexMultiTerm* terms = indexMultiTermCreate();
indexMultiTermAdd(terms, term);
for (size_t i = 0; i < 1000; i++) {
for (size_t i = 0; i < 100; i++) {
tIndexJsonPut(index, terms, i);
}
indexMultiTermDestroy(terms);
@ -199,7 +199,7 @@ TEST_F(JsonEnv, testWriteMillonData) {
SArray* result = taosArrayInit(1, sizeof(uint64_t));
indexMultiTermQueryAdd(mq, q, QUERY_TERM);
tIndexJsonSearch(index, mq, result);
assert(100 == taosArrayGetSize(result));
EXPECT_EQ(10, taosArrayGetSize(result));
indexMultiTermQueryDestroy(mq);
}
{
@ -229,7 +229,7 @@ TEST_F(JsonEnv, testWriteMillonData) {
SArray* result = taosArrayInit(1, sizeof(uint64_t));
indexMultiTermQueryAdd(mq, q, QUERY_GREATER_EQUAL);
tIndexJsonSearch(index, mq, result);
assert(100 == taosArrayGetSize(result));
EXPECT_EQ(10, taosArrayGetSize(result));
indexMultiTermQueryDestroy(mq);
}
}
@ -385,7 +385,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_INT) {
SIndexMultiTerm* terms = indexMultiTermCreate();
indexMultiTermAdd(terms, term);
for (size_t i = 0; i < 100000; i++) {
for (size_t i = 0; i < 1000; i++) {
tIndexJsonPut(index, terms, i);
}
indexMultiTermDestroy(terms);
@ -523,7 +523,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_INT2) {
{
int val = 10;
std::string colName("test1");
for (int i = 0; i < 10000; i++) {
for (int i = 0; i < 1000; i++) {
val += 1;
WriteData(index, colName, TSDB_DATA_TYPE_INT, &val, sizeof(val), i);
}
@ -532,7 +532,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_INT2) {
int val = 10;
std::string colName("test2xxx");
std::string colVal("xxxxxxxxxxxxxxx");
for (int i = 0; i < 100000; i++) {
for (int i = 0; i < 1000; i++) {
val += 1;
WriteData(index, colName, TSDB_DATA_TYPE_BINARY, (void*)(colVal.c_str()), colVal.size(), i);
}
@ -542,14 +542,14 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_INT2) {
std::string colName("test1");
int val = 9;
Search(index, colName, TSDB_DATA_TYPE_INT, &val, sizeof(val), QUERY_GREATER_EQUAL, &res);
EXPECT_EQ(10000, taosArrayGetSize(res));
EXPECT_EQ(1000, taosArrayGetSize(res));
}
{
SArray* res = NULL;
std::string colName("test2xxx");
std::string colVal("xxxxxxxxxxxxxxx");
Search(index, colName, TSDB_DATA_TYPE_BINARY, (void*)(colVal.c_str()), colVal.size(), QUERY_TERM, &res);
EXPECT_EQ(100000, taosArrayGetSize(res));
EXPECT_EQ(1000, taosArrayGetSize(res));
}
}
TEST_F(JsonEnv, testWriteJsonTfileAndCache_FLOAT) {

View File

@ -1771,6 +1771,7 @@ static const char* jkSubplanId = "Id";
static const char* jkSubplanType = "SubplanType";
static const char* jkSubplanMsgType = "MsgType";
static const char* jkSubplanLevel = "Level";
static const char* jkSubplanDbFName = "DbFName";
static const char* jkSubplanNodeAddr = "NodeAddr";
static const char* jkSubplanRootNode = "RootNode";
static const char* jkSubplanDataSink = "DataSink";
@ -1788,6 +1789,9 @@ static int32_t subplanToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkSubplanLevel, pNode->level);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddStringToObject(pJson, jkSubplanDbFName, pNode->dbFName);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddObject(pJson, jkSubplanNodeAddr, queryNodeAddrToJson, &pNode->execNode);
}
@ -1815,6 +1819,9 @@ static int32_t jsonToSubplan(const SJson* pJson, void* pObj) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetIntValue(pJson, jkSubplanLevel, &pNode->level);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetStringValue(pJson, jkSubplanDbFName, pNode->dbFName);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonToObject(pJson, jkSubplanNodeAddr, jsonToQueryNodeAddr, &pNode->execNode);
}

View File

@ -1137,10 +1137,6 @@ bool nodesIsRegularOp(const SOperatorNode* pOp) {
return false;
}
bool nodesIsTimeorderQuery(const SNode* pQuery) { return false; }
bool nodesIsTimelineQuery(const SNode* pQuery) { return false; }
typedef struct SCollectColumnsCxt {
int32_t errCode;
const char* pTableAlias;

View File

@ -1110,7 +1110,9 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) {
NEXT_TOKEN(pCxt->pSql, sToken);
autoCreateTbl = true;
} else {
CHECK_CODE(getTableMeta(pCxt, &name, tbFName));
char dbFName[TSDB_DB_FNAME_LEN];
tNameGetFullDbName(&name, dbFName);
CHECK_CODE(getTableMeta(pCxt, &name, dbFName));
}
STableDataBlocks* dataBuf = NULL;

View File

@ -53,6 +53,8 @@ static bool afterGroupBy(ESqlClause clause) { return clause > SQL_CLAUSE_GROUP_B
static bool beforeHaving(ESqlClause clause) { return clause < SQL_CLAUSE_HAVING; }
static bool afterHaving(ESqlClause clause) { return clause > SQL_CLAUSE_HAVING; }
static int32_t addNamespace(STranslateContext* pCxt, void* pTable) {
size_t currTotalLevel = taosArrayGetSize(pCxt->pNsLevel);
if (currTotalLevel > pCxt->currLevel) {
@ -276,6 +278,10 @@ static bool isScanPseudoColumnFunc(const SNode* pNode) {
return (QUERY_NODE_FUNCTION == nodeType(pNode) && fmIsScanPseudoColumnFunc(((SFunctionNode*)pNode)->funcId));
}
static bool isNonstandardSQLFunc(const SNode* pNode) {
return (QUERY_NODE_FUNCTION == nodeType(pNode) && fmIsNonstandardSQLFunc(((SFunctionNode*)pNode)->funcId));
}
static bool isDistinctOrderBy(STranslateContext* pCxt) {
return (SQL_CLAUSE_ORDER_BY == pCxt->currClause && pCxt->pCurrStmt->isDistinct);
}
@ -376,6 +382,35 @@ static bool isInternalPrimaryKey(const SColumnNode* pCol) {
return PRIMARYKEY_TIMESTAMP_COL_ID == pCol->colId && 0 == strcmp(pCol->colName, PK_TS_COL_INTERNAL_NAME);
}
static bool isTimeOrderQuery(SNode* pStmt) {
if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) {
return ((SSelectStmt*)pStmt)->isTimeOrderQuery;
} else {
return false;
}
}
static bool isPrimaryKeyImpl(STempTableNode* pTable, SNode* pExpr) {
if (QUERY_NODE_COLUMN == nodeType(pExpr)) {
return (PRIMARYKEY_TIMESTAMP_COL_ID == ((SColumnNode*)pExpr)->colId);
} else if (QUERY_NODE_FUNCTION == nodeType(pExpr)) {
SFunctionNode* pFunc = (SFunctionNode*)pExpr;
if (FUNCTION_TYPE_SELECT_VALUE == pFunc->funcType) {
return isPrimaryKeyImpl(pTable, nodesListGetNode(pFunc->pParameterList, 0));
} else if (FUNCTION_TYPE_WSTARTTS == pFunc->funcType || FUNCTION_TYPE_WENDTS == pFunc->funcType) {
return true;
}
}
return false;
}
static bool isPrimaryKey(STempTableNode* pTable, SNode* pExpr) {
if (!isTimeOrderQuery(pTable->pSubquery)) {
return false;
}
return isPrimaryKeyImpl(pTable, pExpr);
}
static bool findAndSetColumn(SColumnNode* pCol, const STableNode* pTable) {
bool found = false;
if (QUERY_NODE_REAL_TABLE == nodeType(pTable)) {
@ -398,8 +433,7 @@ static bool findAndSetColumn(SColumnNode* pCol, const STableNode* pTable) {
FOREACH(pNode, pProjectList) {
SExprNode* pExpr = (SExprNode*)pNode;
if (0 == strcmp(pCol->colName, pExpr->aliasName) ||
((QUERY_NODE_COLUMN == nodeType(pExpr) && PRIMARYKEY_TIMESTAMP_COL_ID == ((SColumnNode*)pExpr)->colId) &&
isInternalPrimaryKey(pCol))) {
(isPrimaryKey((STempTableNode*)pTable, pNode) && isInternalPrimaryKey(pCol))) {
setColumnInfoByExpr(pTable, pExpr, pCol);
found = true;
break;
@ -433,6 +467,7 @@ static EDealRes translateColumnWithoutPrefix(STranslateContext* pCxt, SColumnNod
SArray* pTables = taosArrayGetP(pCxt->pNsLevel, pCxt->currLevel);
size_t nums = taosArrayGetSize(pTables);
bool found = false;
bool isInternalPk = isInternalPrimaryKey(pCol);
for (size_t i = 0; i < nums; ++i) {
STableNode* pTable = taosArrayGetP(pTables, i);
if (findAndSetColumn(pCol, pTable)) {
@ -440,10 +475,16 @@ static EDealRes translateColumnWithoutPrefix(STranslateContext* pCxt, SColumnNod
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_AMBIGUOUS_COLUMN, pCol->colName);
}
found = true;
if (isInternalPk) {
break;
}
}
}
if (!found) {
if (isInternalPrimaryKey(pCol)) {
if (isInternalPk) {
if (NULL != pCxt->pCurrStmt->pWindow) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_NOT_ALLOWED_WIN_QUERY);
}
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_INVALID_INTERNAL_PK);
} else {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_INVALID_COLUMN, pCol->colName);
@ -703,10 +744,13 @@ static EDealRes translateOperator(STranslateContext* pCxt, SOperatorNode* pOp) {
return DEAL_RES_CONTINUE;
}
static EDealRes haveAggFunction(SNode* pNode, void* pContext) {
static EDealRes haveAggOrNonstdFunction(SNode* pNode, void* pContext) {
if (isAggFunc(pNode)) {
*((bool*)pContext) = true;
return DEAL_RES_END;
} else if (isNonstandardSQLFunc(pNode)) {
*((bool*)pContext) = true;
return DEAL_RES_END;
}
return DEAL_RES_CONTINUE;
}
@ -743,6 +787,12 @@ static int32_t rewriteCountStar(STranslateContext* pCxt, SFunctionNode* pCount)
return code;
}
static bool hasInvalidFuncNesting(SNodeList* pParameterList) {
bool hasInvalidFunc = false;
nodesWalkExprs(pParameterList, haveAggOrNonstdFunction, &hasInvalidFunc);
return hasInvalidFunc;
}
static EDealRes translateFunction(STranslateContext* pCxt, SFunctionNode* pFunc) {
SFmGetFuncInfoParam param = {.pCtg = pCxt->pParseCxt->pCatalog,
.pRpc = pCxt->pParseCxt->pTransporter,
@ -754,14 +804,14 @@ static EDealRes translateFunction(STranslateContext* pCxt, SFunctionNode* pFunc)
if (beforeHaving(pCxt->currClause)) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_ILLEGAL_USE_AGG_FUNCTION);
}
bool haveAggFunc = false;
nodesWalkExprs(pFunc->pParameterList, haveAggFunction, &haveAggFunc);
if (haveAggFunc) {
if (hasInvalidFuncNesting(pFunc->pParameterList)) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_AGG_FUNC_NESTING);
}
if (pCxt->pCurrStmt->hasNonstdSQLFunc) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_NOT_ALLOWED_FUNC);
}
pCxt->pCurrStmt->hasAggFuncs = true;
pCxt->pCurrStmt->isTimeOrderQuery = false;
if (isCountStar(pFunc)) {
pCxt->errCode = rewriteCountStar(pCxt, pFunc);
}
@ -784,6 +834,15 @@ static EDealRes translateFunction(STranslateContext* pCxt, SFunctionNode* pFunc)
}
}
}
if (TSDB_CODE_SUCCESS == pCxt->errCode && fmIsNonstandardSQLFunc(pFunc->funcId)) {
if (SQL_CLAUSE_SELECT != pCxt->currClause || pCxt->pCurrStmt->hasNonstdSQLFunc || pCxt->pCurrStmt->hasAggFuncs) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_NOT_ALLOWED_FUNC);
}
if (hasInvalidFuncNesting(pFunc->pParameterList)) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_AGG_FUNC_NESTING);
}
pCxt->pCurrStmt->hasNonstdSQLFunc = true;
}
return TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_CONTINUE : DEAL_RES_ERROR;
}
@ -952,6 +1011,7 @@ typedef struct CheckAggColCoexistCxt {
STranslateContext* pTranslateCxt;
bool existAggFunc;
bool existCol;
bool existNonstdFunc;
int32_t selectFuncNum;
} CheckAggColCoexistCxt;
@ -962,6 +1022,10 @@ static EDealRes doCheckAggColCoexist(SNode* pNode, void* pContext) {
pCxt->existAggFunc = true;
return DEAL_RES_IGNORE_CHILD;
}
if (isNonstandardSQLFunc(pNode)) {
pCxt->existNonstdFunc = true;
return DEAL_RES_IGNORE_CHILD;
}
if (isScanPseudoColumnFunc(pNode) || QUERY_NODE_COLUMN == nodeType(pNode)) {
pCxt->existCol = true;
}
@ -972,16 +1036,21 @@ static int32_t checkAggColCoexist(STranslateContext* pCxt, SSelectStmt* pSelect)
if (NULL != pSelect->pGroupByList) {
return TSDB_CODE_SUCCESS;
}
CheckAggColCoexistCxt cxt = {.pTranslateCxt = pCxt, .existAggFunc = false, .existCol = false};
CheckAggColCoexistCxt cxt = {
.pTranslateCxt = pCxt, .existAggFunc = false, .existCol = false, .existNonstdFunc = false};
nodesWalkExprs(pSelect->pProjectionList, doCheckAggColCoexist, &cxt);
if (!pSelect->isDistinct) {
nodesWalkExprs(pSelect->pOrderByList, doCheckAggColCoexist, &cxt);
}
if (1 == cxt.selectFuncNum) {
return rewriteColsToSelectValFunc(pCxt, pSelect);
} else if ((cxt.selectFuncNum > 1 || cxt.existAggFunc || NULL != pSelect->pWindow) && cxt.existCol) {
}
if ((cxt.selectFuncNum > 1 || cxt.existAggFunc || NULL != pSelect->pWindow) && cxt.existCol) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_SINGLE_GROUP);
}
if (cxt.existNonstdFunc && cxt.existCol) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC);
}
return TSDB_CODE_SUCCESS;
}

View File

@ -164,6 +164,11 @@ static char* getSyntaxErrFormat(int32_t errCode) {
return "Invalid function name";
case TSDB_CODE_PAR_COMMENT_TOO_LONG:
return "Comment too long";
case TSDB_CODE_PAR_NOT_ALLOWED_FUNC:
return "Some functions are allowed only in the SELECT list of a query. "
"And, cannot be mixed with other non scalar functions or columns.";
case TSDB_CODE_PAR_NOT_ALLOWED_WIN_QUERY:
return "Window query not supported, since the result of subquery not include valid timestamp column";
case TSDB_CODE_OUT_OF_MEMORY:
return "Out of memory";
default:
@ -362,8 +367,8 @@ int parseJsontoTagData(const char* json, SKVRowBuilder* kvRowBuilder, SMsgBuf* p
if (keyLen == 0 || taosHashGet(keyHash, jsonKey, keyLen) != NULL) {
continue;
}
// key: keyLen + VARSTR_HEADER_SIZE, value type: CHAR_BYTES, value reserved: LONG_BYTES
tagKV = taosMemoryCalloc(keyLen + VARSTR_HEADER_SIZE + CHAR_BYTES + LONG_BYTES, 1);
// key: keyLen + VARSTR_HEADER_SIZE, value type: CHAR_BYTES, value reserved: DOUBLE_BYTES
tagKV = taosMemoryCalloc(keyLen + VARSTR_HEADER_SIZE + CHAR_BYTES + DOUBLE_BYTES, 1);
if (!tagKV) {
retCode = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto end;
@ -408,13 +413,9 @@ int parseJsontoTagData(const char* json, SKVRowBuilder* kvRowBuilder, SMsgBuf* p
}
char* valueType = POINTER_SHIFT(tagKV, keyLen + VARSTR_HEADER_SIZE);
char* valueData = POINTER_SHIFT(tagKV, keyLen + VARSTR_HEADER_SIZE + CHAR_BYTES);
*valueType =
(item->valuedouble - (int64_t)(item->valuedouble) == 0) ? TSDB_DATA_TYPE_BIGINT : TSDB_DATA_TYPE_DOUBLE;
if (*valueType == TSDB_DATA_TYPE_DOUBLE)
*valueType = TSDB_DATA_TYPE_DOUBLE;
*((double*)valueData) = item->valuedouble;
else if (*valueType == TSDB_DATA_TYPE_BIGINT)
*((int64_t*)valueData) = item->valueint;
tdAddColToKVRow(kvRowBuilder, jsonIndex++, tagKV, keyLen + VARSTR_HEADER_SIZE + CHAR_BYTES + LONG_BYTES);
tdAddColToKVRow(kvRowBuilder, jsonIndex++, tagKV, keyLen + VARSTR_HEADER_SIZE + CHAR_BYTES + DOUBLE_BYTES);
} else if (item->type == cJSON_True || item->type == cJSON_False) {
char* valueType = POINTER_SHIFT(tagKV, keyLen + VARSTR_HEADER_SIZE);
char* valueData = POINTER_SHIFT(tagKV, keyLen + VARSTR_HEADER_SIZE + CHAR_BYTES);

View File

@ -121,10 +121,29 @@ TEST_F(ParserSelectTest, selectFunc) {
run("SELECT MAX(c1), c2 FROM t1 STATE_WINDOW(c3)");
}
TEST_F(ParserSelectTest, clause) {
TEST_F(ParserSelectTest, nonstdFunc) {
useDb("root", "test");
run("SELECT DIFF(c1) FROM t1");
}
TEST_F(ParserSelectTest, nonstdFuncSemanticCheck) {
useDb("root", "test");
run("SELECT DIFF(c1), c2 FROM t1", TSDB_CODE_PAR_NOT_ALLOWED_FUNC, PARSER_STAGE_TRANSLATE);
run("SELECT DIFF(c1), tbname FROM t1", TSDB_CODE_PAR_NOT_ALLOWED_FUNC, PARSER_STAGE_TRANSLATE);
run("SELECT DIFF(c1), count(*) FROM t1", TSDB_CODE_PAR_NOT_ALLOWED_FUNC, PARSER_STAGE_TRANSLATE);
run("SELECT DIFF(c1), CSUM(c1) FROM t1", TSDB_CODE_PAR_NOT_ALLOWED_FUNC, PARSER_STAGE_TRANSLATE);
// run("SELECT DIFF(c1) FROM t1 INTERVAL(10s)");
}
TEST_F(ParserSelectTest, groupBy) {
useDb("root", "test");
// GROUP BY clause
run("SELECT COUNT(*) cnt FROM t1 WHERE c1 > 0");
run("SELECT COUNT(*), c2 cnt FROM t1 WHERE c1 > 0 GROUP BY c2");
@ -134,13 +153,19 @@ TEST_F(ParserSelectTest, clause) {
run("SELECT COUNT(*), c1, c2 + 10, c1 + c2 cnt FROM t1 WHERE c1 > 0 GROUP BY c2, c1");
run("SELECT COUNT(*), c1 + 10, c2 cnt FROM t1 WHERE c1 > 0 GROUP BY c1 + 10, c2");
}
TEST_F(ParserSelectTest, orderBy) {
useDb("root", "test");
// order by clause
run("SELECT COUNT(*) cnt FROM t1 WHERE c1 > 0 GROUP BY c2 order by cnt");
run("SELECT COUNT(*) cnt FROM t1 WHERE c1 > 0 GROUP BY c2 order by 1");
}
TEST_F(ParserSelectTest, distinct) {
useDb("root", "test");
// distinct clause
// run("SELECT distinct c1, c2 FROM t1 WHERE c1 > 0 order by c1");
// run("SELECT distinct c1 + 10, c2 FROM t1 WHERE c1 > 0 order by c1 + 10, c2");
@ -174,6 +199,25 @@ TEST_F(ParserSelectTest, intervalSemanticCheck) {
PARSER_STAGE_TRANSLATE);
}
TEST_F(ParserSelectTest, subquery) {
useDb("root", "test");
run("SELECT SUM(a) FROM (SELECT MAX(c1) a, ts FROM st1s1 INTERVAL(1m)) INTERVAL(1n)");
run("SELECT SUM(a) FROM (SELECT MAX(c1) a, _wstartts FROM st1s1 INTERVAL(1m)) INTERVAL(1n)");
run("SELECT SUM(a) FROM (SELECT MAX(c1) a, ts FROM st1s1 PARTITION BY TBNAME INTERVAL(1m)) INTERVAL(1n)");
run("SELECT SUM(a) FROM (SELECT MAX(c1) a, _wstartts FROM st1s1 PARTITION BY TBNAME INTERVAL(1m)) INTERVAL(1n)");
}
TEST_F(ParserSelectTest, subquerySemanticError) {
useDb("root", "test");
run("SELECT SUM(a) FROM (SELECT MAX(c1) a FROM st1s1 INTERVAL(1m)) INTERVAL(1n)", TSDB_CODE_PAR_NOT_ALLOWED_WIN_QUERY,
PARSER_STAGE_TRANSLATE);
}
TEST_F(ParserSelectTest, semanticError) {
useDb("root", "test");

View File

@ -24,7 +24,7 @@
#define SPLIT_FLAG_TEST_MASK(val, mask) (((val) & (mask)) != 0)
typedef struct SSplitContext {
int32_t queryId;
uint64_t queryId;
int32_t groupId;
bool split;
} SSplitContext;

View File

@ -287,7 +287,7 @@ int32_t qwRegisterQueryBrokenLinkArg(QW_FPARAMS_DEF, SRpcHandleInfo *pConn) {
req->taskId = htobe64(tId);
req->refId = htobe64(rId);
SRpcMsg pMsg = {
SRpcMsg brokenMsg = {
.msgType = TDMT_VND_DROP_TASK,
.pCont = req,
.contLen = sizeof(STaskDropReq),
@ -295,7 +295,7 @@ int32_t qwRegisterQueryBrokenLinkArg(QW_FPARAMS_DEF, SRpcHandleInfo *pConn) {
.info = *pConn,
};
tmsgRegisterBrokenLinkArg(&pMsg);
tmsgRegisterBrokenLinkArg(&brokenMsg);
return TSDB_CODE_SUCCESS;
}
@ -321,7 +321,7 @@ int32_t qwRegisterHbBrokenLinkArg(SQWorker *mgmt, uint64_t sId, SRpcHandleInfo *
QW_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
SRpcMsg pMsg = {
SRpcMsg brokenMsg = {
.msgType = TDMT_VND_QUERY_HEARTBEAT,
.pCont = msg,
.contLen = msgSize,
@ -329,7 +329,7 @@ int32_t qwRegisterHbBrokenLinkArg(SQWorker *mgmt, uint64_t sId, SRpcHandleInfo *
.info = *pConn,
};
tmsgRegisterBrokenLinkArg(&pMsg);
tmsgRegisterBrokenLinkArg(&brokenMsg);
return TSDB_CODE_SUCCESS;
}

View File

@ -899,7 +899,7 @@ int32_t scalarCalculate(SNode *pNode, SArray *pBlockList, SScalarParam *pDst) {
}
int32_t code = 0;
SScalarCtx ctx = {.code = 0, .pBlockList = pBlockList, .param = pDst->param};
SScalarCtx ctx = {.code = 0, .pBlockList = pBlockList, .param = pDst ? pDst->param : NULL};
// TODO: OPT performance
ctx.pRes = taosHashInit(SCL_DEFAULT_OP_NUM, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);

View File

@ -923,6 +923,7 @@ static void doReleaseVec(SColumnInfoData* pCol, int32_t type) {
}
char *getJsonValue(char *json, char *key){ //todo
json++; // jump type
int16_t cols = kvRowNCols(json);
for (int i = 0; i < cols; ++i) {
SColIdx *pColIdx = kvRowColIdxAt(json, i);

View File

@ -1035,7 +1035,7 @@ void makeJsonArrow(SSDataBlock **src, SNode **opNode, void *json, char *key){
SNode *pLeft = NULL, *pRight = NULL;
scltMakeValueNode(&pRight, TSDB_DATA_TYPE_BINARY, keyVar);
scltMakeColumnNode(&pLeft, src, TSDB_DATA_TYPE_JSON, varDataLen(json), 1, json);
scltMakeColumnNode(&pLeft, src, TSDB_DATA_TYPE_JSON, kvRowLen(json), 1, json);
scltMakeOpNode(opNode, OP_TYPE_JSON_GET_VALUE, TSDB_DATA_TYPE_JSON, pLeft, pRight);
}
@ -1088,18 +1088,17 @@ void makeCalculate(void *json, void *key, int32_t rightType, void *rightData, do
}else if(opType == OP_TYPE_ADD || opType == OP_TYPE_SUB || opType == OP_TYPE_MULTI || opType == OP_TYPE_DIV ||
opType == OP_TYPE_MOD || opType == OP_TYPE_MINUS){
double tmp = *((double *)colDataGetData(column, 0));
ASSERT_TRUE(tmp == exceptValue);
printf("result:%lf\n", tmp);
printf("1result:%f,except:%f\n", *((double *)colDataGetData(column, 0)), exceptValue);
ASSERT_TRUE(abs(*((double *)colDataGetData(column, 0)) - exceptValue) < 1e-15);
}else if(opType == OP_TYPE_BIT_AND || opType == OP_TYPE_BIT_OR){
printf("2result:%ld,except:%f\n", *((int64_t *)colDataGetData(column, 0)), exceptValue);
ASSERT_EQ(*((int64_t *)colDataGetData(column, 0)), exceptValue);
printf("result:%ld\n", *((int64_t *)colDataGetData(column, 0)));
}else if(opType == OP_TYPE_GREATER_THAN || opType == OP_TYPE_GREATER_EQUAL || opType == OP_TYPE_LOWER_THAN ||
opType == OP_TYPE_LOWER_EQUAL || opType == OP_TYPE_EQUAL || opType == OP_TYPE_NOT_EQUAL ||
opType == OP_TYPE_IS_NULL || opType == OP_TYPE_IS_NOT_NULL || opType == OP_TYPE_IS_TRUE ||
opType == OP_TYPE_LIKE || opType == OP_TYPE_NOT_LIKE || opType == OP_TYPE_MATCH || opType == OP_TYPE_NMATCH){
printf("3result:%d,except:%f\n", *((bool *)colDataGetData(column, 0)), exceptValue);
ASSERT_EQ(*((bool *)colDataGetData(column, 0)), exceptValue);
printf("result:%d\n", *((bool *)colDataGetData(column, 0)));
}
taosArrayDestroyEx(blockList, scltFreeDataBlock);
@ -1114,6 +1113,13 @@ TEST(columnTest, json_column_arith_op) {
tdInitKVRowBuilder(&kvRowBuilder);
parseJsontoTagData(rightv, &kvRowBuilder, NULL, 0);
SKVRow row = tdGetKVRowFromBuilder(&kvRowBuilder);
char *tmp = (char *)taosMemoryRealloc(row, kvRowLen(row)+1);
if(tmp == NULL){
ASSERT_TRUE(0);
}
memmove(tmp+1, tmp, kvRowLen(tmp));
*tmp = TSDB_DATA_TYPE_JSON;
row = tmp;
const int32_t len = 8;
EOperatorType op[len] = {OP_TYPE_ADD, OP_TYPE_SUB, OP_TYPE_MULTI, OP_TYPE_DIV,
@ -1166,6 +1172,9 @@ TEST(columnTest, json_column_arith_op) {
for(int i = 0; i < len; i++){
makeCalculate(row, key, TSDB_DATA_TYPE_INT, &input[i], eRes5[i], op[i]);
}
tdDestroyKVRowBuilder(&kvRowBuilder);
taosMemoryFree(row);
}
void *prepareNchar(char* rightData){
@ -1186,6 +1195,13 @@ TEST(columnTest, json_column_logic_op) {
tdInitKVRowBuilder(&kvRowBuilder);
parseJsontoTagData(rightv, &kvRowBuilder, NULL, 0);
SKVRow row = tdGetKVRowFromBuilder(&kvRowBuilder);
char *tmp = (char *)taosMemoryRealloc(row, kvRowLen(row)+1);
if(tmp == NULL){
ASSERT_TRUE(0);
}
memmove(tmp+1, tmp, kvRowLen(tmp));
*tmp = TSDB_DATA_TYPE_JSON;
row = tmp;
const int32_t len = 9;
const int32_t len1 = 4;
@ -1223,7 +1239,7 @@ TEST(columnTest, json_column_logic_op) {
printf("--------------------json null---------------------\n");
key = "k3";
double eRes2[len+len1] = {DBL_MAX, DBL_MAX, DBL_MAX, DBL_MAX, DBL_MAX, DBL_MAX, true, false, DBL_MAX, DBL_MAX, DBL_MAX, DBL_MAX, DBL_MAX};
bool eRes2[len+len1] = {false, false, false, false, false, false, true, false, false, false, false, false, false};
for(int i = 0; i < len; i++){
makeCalculate(row, key, TSDB_DATA_TYPE_INT, &input[i], eRes2[i], op[i]);
}
@ -1262,7 +1278,7 @@ TEST(columnTest, json_column_logic_op) {
printf("--------------------json double---------------------\n");
key = "k6";
bool eRes5[len+len1] = {true, false, false, false, false, true, false, true, true, false, false, false, true};
bool eRes5[len+len1] = {true, false, false, false, false, true, false, true, true, false, true, false, true};
for(int i = 0; i < len; i++){
makeCalculate(row, key, TSDB_DATA_TYPE_INT, &input[i], eRes5[i], op[i]);
}
@ -1275,7 +1291,7 @@ TEST(columnTest, json_column_logic_op) {
printf("---------------------json not exist--------------------\n");
key = "k10";
double eRes10[len+len1] = {DBL_MAX, DBL_MAX, DBL_MAX, DBL_MAX, DBL_MAX, DBL_MAX, true, false, DBL_MAX, DBL_MAX, DBL_MAX, DBL_MAX, DBL_MAX};
double eRes10[len+len1] = {false, false, false, false, false, false, true, false, false, false, false, false, false};
for(int i = 0; i < len; i++){
makeCalculate(row, key, TSDB_DATA_TYPE_INT, &input[i], eRes10[i], op[i]);
}
@ -1284,6 +1300,9 @@ TEST(columnTest, json_column_logic_op) {
makeCalculate(row, key, TSDB_DATA_TYPE_NCHAR, rightData, eRes10[i], op[i]);
taosMemoryFree(rightData);
}
tdDestroyKVRowBuilder(&kvRowBuilder);
taosMemoryFree(row);
}
TEST(columnTest, smallint_value_add_int_column) {

View File

@ -247,6 +247,19 @@ int32_t streamTaskExec2(SStreamTask* pTask, SMsgCb* pMsgCb) {
void* data = NULL;
taosGetQitem(pTask->inputQAll, &data);
if (data == NULL) break;
streamTaskExecImpl(pTask, data, pRes);
taosFreeQitem(data);
if (taosArrayGetSize(pRes) != 0) {
SStreamDataBlock* resQ = taosAllocateQitem(sizeof(void**), DEF_QITEM);
resQ->type = STREAM_INPUT__DATA_BLOCK;
resQ->blocks = pRes;
taosWriteQitem(pTask->outputQ, resQ);
pRes = taosArrayInit(0, sizeof(SSDataBlock));
if (pRes == NULL) goto FAIL;
}
}
atomic_store_8(&pTask->status, TASK_STATUS__IDLE);
@ -298,6 +311,9 @@ int32_t streamTaskSink(SStreamTask* pTask, SMsgCb* pMsgCb) {
}
// dispatch
// TODO dispatch guard
int8_t outputStatus = atomic_load_8(&pTask->outputStatus);
if (outputStatus == TASK_OUTPUT_STATUS__NORMAL) {
if (pTask->dispatchType == TASK_DISPATCH__INPLACE) {
SRpcMsg dispatchMsg = {0};
if (streamBuildExecMsg(pTask, pRes, &dispatchMsg, NULL) < 0) {
@ -356,6 +372,7 @@ int32_t streamTaskSink(SStreamTask* pTask, SMsgCb* pMsgCb) {
ASSERT(pTask->dispatchType == TASK_DISPATCH__NONE);
}
}
}
return 0;
}
@ -406,11 +423,32 @@ int32_t streamTaskProcessDispatchReq(SStreamTask* pTask, SMsgCb* pMsgCb, SStream
return 0;
}
int32_t streamTaskProcessDispatchRsp(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamDispatchRsp* pRsp) {
atomic_store_8(&pTask->inputStatus, pRsp->inputStatus);
if (pRsp->inputStatus == TASK_INPUT_STATUS__BLOCKED) {
// TODO: init recover timer
}
// continue dispatch
streamTaskSink(pTask, pMsgCb);
return 0;
}
int32_t streamTaskProcessRunReq(SStreamTask* pTask, SMsgCb* pMsgCb) {
streamTaskExec2(pTask, pMsgCb);
streamTaskSink(pTask, pMsgCb);
return 0;
}
int32_t streamTaskProcessRecoverReq(SStreamTask* pTask, char* msg) {
//
return 0;
}
int32_t streamTaskProcessRecoverRsp(SStreamTask* pTask, char* msg) {
//
return 0;
}
int32_t streamExecTask(SStreamTask* pTask, SMsgCb* pMsgCb, const void* input, int32_t inputType, int32_t workId) {
SArray* pRes = NULL;
// source

View File

@ -36,10 +36,10 @@ typedef struct SSyncIO {
STaosQueue *pMsgQ;
STaosQset * pQset;
TdThread consumerTid;
void *serverRpc;
void *clientRpc;
SEpSet myAddr;
SMsgCb msgcb;
tmr_h qTimer;
int32_t qTimerMS;
@ -65,8 +65,8 @@ extern SSyncIO *gSyncIO;
int32_t syncIOStart(char *host, uint16_t port);
int32_t syncIOStop();
int32_t syncIOSendMsg(void *clientRpc, const SEpSet *pEpSet, SRpcMsg *pMsg);
int32_t syncIOEqMsg(void *queue, SRpcMsg *pMsg);
int32_t syncIOSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg);
int32_t syncIOEqMsg(const SMsgCb *msgcb, SRpcMsg *pMsg);
int32_t syncIOQTimerStart();
int32_t syncIOQTimerStop();

View File

@ -20,134 +20,40 @@
extern "C" {
#endif
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include "cJSON.h"
#include "sync.h"
#include "syncTools.h"
#include "taosdef.h"
#include "tglobal.h"
#include "tlog.h"
#include "ttimer.h"
#define sFatal(...) \
{ \
if (sDebugFlag & DEBUG_FATAL) { \
taosPrintLog("SYN FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); \
} \
}
#define sError(...) \
{ \
if (sDebugFlag & DEBUG_ERROR) { \
taosPrintLog("SYN ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); \
} \
}
#define sWarn(...) \
{ \
if (sDebugFlag & DEBUG_WARN) { \
taosPrintLog("SYN WARN ", DEBUG_WARN, 255, __VA_ARGS__); \
} \
}
#define sInfo(...) \
{ \
if (sDebugFlag & DEBUG_INFO) { \
taosPrintLog("SYN INFO ", DEBUG_INFO, 255, __VA_ARGS__); \
} \
}
#define sDebug(...) \
{ \
if (sDebugFlag & DEBUG_DEBUG) { \
taosPrintLog("SYN DEBUG ", DEBUG_DEBUG, sDebugFlag, __VA_ARGS__); \
} \
}
#define sTrace(...) \
{ \
if (sDebugFlag & DEBUG_TRACE) { \
taosPrintLog("SYN TRACE ", DEBUG_TRACE, sDebugFlag, __VA_ARGS__); \
} \
}
// clang-format off
#define sFatal(...) do { if (sDebugFlag & DEBUG_FATAL) { taosPrintLog("SYN FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }} while(0)
#define sError(...) do { if (sDebugFlag & DEBUG_ERROR) { taosPrintLog("SYN ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }} while(0)
#define sWarn(...) do { if (sDebugFlag & DEBUG_WARN) { taosPrintLog("SYN WARN ", DEBUG_WARN, 255, __VA_ARGS__); }} while(0)
#define sInfo(...) do { if (sDebugFlag & DEBUG_INFO) { taosPrintLog("SYN ", DEBUG_INFO, 255, __VA_ARGS__); }} while(0)
#define sDebug(...) do { if (sDebugFlag & DEBUG_DEBUG) { taosPrintLog("SYN ", DEBUG_DEBUG, sDebugFlag, __VA_ARGS__); }} while(0)
#define sTrace(...) do { if (sDebugFlag & DEBUG_TRACE) { taosPrintLog("SYN ", DEBUG_TRACE, sDebugFlag, __VA_ARGS__); }} while(0)
#define sFatalLong(...) do { if (sDebugFlag & DEBUG_FATAL) { taosPrintLongString("SYN FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }} while(0)
#define sErrorLong(...) do { if (sDebugFlag & DEBUG_ERROR) { taosPrintLongString("SYN ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }} while(0)
#define sWarnLong(...) do { if (sDebugFlag & DEBUG_WARN) { taosPrintLongString("SYN WARN ", DEBUG_WARN, 255, __VA_ARGS__); }} while(0)
#define sInfoLong(...) do { if (sDebugFlag & DEBUG_INFO) { taosPrintLongString("SYN ", DEBUG_INFO, 255, __VA_ARGS__); }} while(0)
#define sDebugLong(...) do { if (sDebugFlag & DEBUG_DEBUG) { taosPrintLongString("SYN ", DEBUG_DEBUG, sDebugFlag, __VA_ARGS__); }} while(0)
#define sTraceLong(...) do { if (sDebugFlag & DEBUG_TRACE) { taosPrintLongString("SYN ", DEBUG_TRACE, sDebugFlag, __VA_ARGS__); }} while(0)
// clang-format on
#define sFatalLong(...) \
{ \
if (sDebugFlag & DEBUG_FATAL) { \
taosPrintLongString("SYN FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); \
} \
}
#define sErrorLong(...) \
{ \
if (sDebugFlag & DEBUG_ERROR) { \
taosPrintLongString("SYN ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); \
} \
}
#define sWarnLong(...) \
{ \
if (sDebugFlag & DEBUG_WARN) { \
taosPrintLongString("SYN WARN ", DEBUG_WARN, 255, __VA_ARGS__); \
} \
}
#define sInfoLong(...) \
{ \
if (sDebugFlag & DEBUG_INFO) { \
taosPrintLongString("SYN INFO ", DEBUG_INFO, 255, __VA_ARGS__); \
} \
}
#define sDebugLong(...) \
{ \
if (sDebugFlag & DEBUG_DEBUG) { \
taosPrintLongString("SYN DEBUG ", DEBUG_DEBUG, sDebugFlag, __VA_ARGS__); \
} \
}
#define sTraceLong(...) \
{ \
if (sDebugFlag & DEBUG_TRACE) { \
taosPrintLongString("SYN TRACE ", DEBUG_TRACE, sDebugFlag, __VA_ARGS__); \
} \
}
struct SyncTimeout;
typedef struct SyncTimeout SyncTimeout;
struct SyncClientRequest;
typedef struct SyncClientRequest SyncClientRequest;
struct SyncPing;
typedef struct SyncPing SyncPing;
struct SyncPingReply;
typedef struct SyncPingReply SyncPingReply;
struct SyncRequestVote;
typedef struct SyncRequestVote SyncRequestVote;
struct SyncRequestVoteReply;
typedef struct SyncRequestVoteReply SyncRequestVoteReply;
struct SyncAppendEntries;
typedef struct SyncAppendEntries SyncAppendEntries;
struct SyncAppendEntriesReply;
typedef struct SyncAppendEntriesReply SyncAppendEntriesReply;
struct SSyncEnv;
typedef struct SSyncEnv SSyncEnv;
struct SRaftStore;
typedef struct SRaftStore SRaftStore;
struct SVotesGranted;
typedef struct SVotesGranted SVotesGranted;
struct SVotesRespond;
typedef struct SVotesRespond SVotesRespond;
struct SSyncIndexMgr;
typedef struct SSyncIndexMgr SSyncIndexMgr;
struct SRaftCfg;
typedef struct SRaftCfg SRaftCfg;
struct SSyncRespMgr;
typedef struct SSyncRespMgr SSyncRespMgr;
typedef struct SSyncNode {
@ -160,10 +66,9 @@ typedef struct SSyncNode {
// sync io
SWal* pWal;
void* rpcClient;
int32_t (*FpSendMsg)(void* rpcClient, const SEpSet* pEpSet, SRpcMsg* pMsg);
void* queue;
int32_t (*FpEqMsg)(void* queue, SRpcMsg* pMsg);
const SMsgCb* msgcb;
int32_t (*FpSendMsg)(const SEpSet* pEpSet, SRpcMsg* pMsg);
int32_t (*FpEqMsg)(const SMsgCb* msgcb, SRpcMsg* pMsg);
// init internal
SNodeInfo myNodeInfo;

View File

@ -66,7 +66,7 @@ int32_t syncIOStop() {
return ret;
}
int32_t syncIOSendMsg(void *clientRpc, const SEpSet *pEpSet, SRpcMsg *pMsg) {
int32_t syncIOSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg) {
assert(pEpSet->inUse == 0);
assert(pEpSet->numOfEps == 1);
@ -83,11 +83,11 @@ int32_t syncIOSendMsg(void *clientRpc, const SEpSet *pEpSet, SRpcMsg *pMsg) {
pMsg->info.handle = NULL;
pMsg->info.noResp = 1;
rpcSendRequest(clientRpc, pEpSet, pMsg, NULL);
rpcSendRequest(gSyncIO->clientRpc, pEpSet, pMsg, NULL);
return ret;
}
int32_t syncIOEqMsg(void *queue, SRpcMsg *pMsg) {
int32_t syncIOEqMsg(const SMsgCb *msgcb, SRpcMsg *pMsg) {
int32_t ret = 0;
char logBuf[128];
syncRpcMsgLog2((char *)"==syncIOEqMsg==", pMsg);
@ -96,7 +96,7 @@ int32_t syncIOEqMsg(void *queue, SRpcMsg *pMsg) {
pTemp = taosAllocateQitem(sizeof(SRpcMsg), DEF_QITEM);
memcpy(pTemp, pMsg, sizeof(SRpcMsg));
STaosQueue *pMsgQ = queue;
STaosQueue *pMsgQ = gSyncIO->pMsgQ;
taosWriteQitem(pMsgQ, pTemp);
return ret;
@ -183,9 +183,6 @@ static int32_t syncIOStartInternal(SSyncIO *io) {
rpcInit.sessions = 100;
rpcInit.idleTime = 100;
rpcInit.user = "sync-io";
rpcInit.secret = "sync-io";
rpcInit.ckey = "key";
rpcInit.spi = 0;
rpcInit.connType = TAOS_CONN_CLIENT;
io->clientRpc = rpcOpen(&rpcInit);
@ -206,7 +203,6 @@ static int32_t syncIOStartInternal(SSyncIO *io) {
rpcInit.cfp = syncIOProcessRequest;
rpcInit.sessions = 1000;
rpcInit.idleTime = 2 * 1500;
rpcInit.afp = syncIOAuth;
rpcInit.parent = io;
rpcInit.connType = TAOS_CONN_SERVER;

View File

@ -240,26 +240,14 @@ int32_t syncGetAndDelRespRpc(int64_t rid, uint64_t index, SRpcMsg* msg) {
return ret;
}
void syncSetQ(int64_t rid, void* queue) {
void syncSetMsgCb(int64_t rid, const SMsgCb *msgcb) {
SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid);
if (pSyncNode == NULL) {
sTrace("syncSetQ get pSyncNode is NULL, rid:%ld", rid);
return;
}
assert(rid == pSyncNode->rid);
pSyncNode->queue = queue;
taosReleaseRef(tsNodeRefId, pSyncNode->rid);
}
void syncSetRpc(int64_t rid, void* rpcHandle) {
SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid);
if (pSyncNode == NULL) {
sTrace("syncSetRpc get pSyncNode is NULL, rid:%ld", rid);
return;
}
assert(rid == pSyncNode->rid);
pSyncNode->rpcClient = rpcHandle;
pSyncNode->msgcb = msgcb;
taosReleaseRef(tsNodeRefId, pSyncNode->rid);
}
@ -332,7 +320,7 @@ int32_t syncPropose(int64_t rid, const SRpcMsg* pMsg, bool isWeak) {
SRpcMsg rpcMsg;
syncClientRequest2RpcMsg(pSyncMsg, &rpcMsg);
if (pSyncNode->FpEqMsg != NULL) {
pSyncNode->FpEqMsg(pSyncNode->queue, &rpcMsg);
pSyncNode->FpEqMsg(pSyncNode->msgcb, &rpcMsg);
} else {
sTrace("syncPropose pSyncNode->FpEqMsg is NULL");
}
@ -375,9 +363,8 @@ SSyncNode* syncNodeOpen(const SSyncInfo* pSyncInfo) {
snprintf(pSyncNode->configPath, sizeof(pSyncNode->configPath), "%s/raft_config.json", pSyncInfo->path);
pSyncNode->pWal = pSyncInfo->pWal;
pSyncNode->rpcClient = pSyncInfo->rpcClient;
pSyncNode->msgcb = pSyncInfo->msgcb;
pSyncNode->FpSendMsg = pSyncInfo->FpSendMsg;
pSyncNode->queue = pSyncInfo->queue;
pSyncNode->FpEqMsg = pSyncInfo->FpEqMsg;
// init raft config
@ -687,11 +674,11 @@ int32_t syncNodeSendMsgById(const SRaftId* destRaftId, SSyncNode* pSyncNode, SRp
SEpSet epSet;
syncUtilraftId2EpSet(destRaftId, &epSet);
if (pSyncNode->FpSendMsg != NULL) {
pMsg->info.noResp = 1;
// htonl
syncUtilMsgHtoN(pMsg->pCont);
pSyncNode->FpSendMsg(pSyncNode->rpcClient, &epSet, pMsg);
pMsg->info.noResp = 1;
pSyncNode->FpSendMsg(&epSet, pMsg);
} else {
sTrace("syncNodeSendMsgById pSyncNode->FpSendMsg is NULL");
}
@ -702,11 +689,11 @@ int32_t syncNodeSendMsgByInfo(const SNodeInfo* nodeInfo, SSyncNode* pSyncNode, S
SEpSet epSet;
syncUtilnodeInfo2EpSet(nodeInfo, &epSet);
if (pSyncNode->FpSendMsg != NULL) {
pMsg->info.noResp = 1;
// htonl
syncUtilMsgHtoN(pMsg->pCont);
pSyncNode->FpSendMsg(pSyncNode->rpcClient, &epSet, pMsg);
pMsg->info.noResp = 1;
pSyncNode->FpSendMsg(&epSet, pMsg);
} else {
sTrace("syncNodeSendMsgByInfo pSyncNode->FpSendMsg is NULL");
}
@ -728,12 +715,12 @@ cJSON* syncNode2Json(const SSyncNode* pSyncNode) {
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->pWal);
cJSON_AddStringToObject(pRoot, "pWal", u64buf);
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->rpcClient);
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->msgcb);
cJSON_AddStringToObject(pRoot, "rpcClient", u64buf);
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->FpSendMsg);
cJSON_AddStringToObject(pRoot, "FpSendMsg", u64buf);
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->queue);
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->msgcb);
cJSON_AddStringToObject(pRoot, "queue", u64buf);
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->FpEqMsg);
cJSON_AddStringToObject(pRoot, "FpEqMsg", u64buf);
@ -1095,7 +1082,7 @@ static void syncNodeEqPingTimer(void* param, void* tmrId) {
syncTimeout2RpcMsg(pSyncMsg, &rpcMsg);
syncRpcMsgLog2((char*)"==syncNodeEqPingTimer==", &rpcMsg);
if (pSyncNode->FpEqMsg != NULL) {
pSyncNode->FpEqMsg(pSyncNode->queue, &rpcMsg);
pSyncNode->FpEqMsg(pSyncNode->msgcb, &rpcMsg);
} else {
sTrace("syncNodeEqPingTimer pSyncNode->FpEqMsg is NULL");
}
@ -1118,7 +1105,7 @@ static void syncNodeEqElectTimer(void* param, void* tmrId) {
syncTimeout2RpcMsg(pSyncMsg, &rpcMsg);
syncRpcMsgLog2((char*)"==syncNodeEqElectTimer==", &rpcMsg);
if (pSyncNode->FpEqMsg != NULL) {
pSyncNode->FpEqMsg(pSyncNode->queue, &rpcMsg);
pSyncNode->FpEqMsg(pSyncNode->msgcb, &rpcMsg);
} else {
sTrace("syncNodeEqElectTimer pSyncNode->FpEqMsg is NULL");
}
@ -1145,7 +1132,7 @@ static void syncNodeEqHeartbeatTimer(void* param, void* tmrId) {
syncTimeout2RpcMsg(pSyncMsg, &rpcMsg);
syncRpcMsgLog2((char*)"==syncNodeEqHeartbeatTimer==", &rpcMsg);
if (pSyncNode->FpEqMsg != NULL) {
pSyncNode->FpEqMsg(pSyncNode->queue, &rpcMsg);
pSyncNode->FpEqMsg(pSyncNode->msgcb, &rpcMsg);
} else {
sTrace("syncNodeEqHeartbeatTimer pSyncNode->FpEqMsg is NULL");
}
@ -1175,10 +1162,10 @@ static int32_t syncNodeEqNoop(SSyncNode* ths) {
assert(pSyncMsg->dataLen == entryLen);
memcpy(pSyncMsg->data, serialized, entryLen);
SRpcMsg rpcMsg;
SRpcMsg rpcMsg = {0};
syncClientRequest2RpcMsg(pSyncMsg, &rpcMsg);
if (ths->FpEqMsg != NULL) {
ths->FpEqMsg(ths->queue, &rpcMsg);
ths->FpEqMsg(ths->msgcb, &rpcMsg);
} else {
sTrace("syncNodeEqNoop pSyncNode->FpEqMsg is NULL");
}

View File

@ -100,9 +100,8 @@ SWal* createWal(char* path, int32_t vgId) {
int64_t createSyncNode(int32_t replicaNum, int32_t myIndex, int32_t vgId, SWal* pWal, char* path, bool isStandBy) {
SSyncInfo syncInfo;
syncInfo.vgId = vgId;
syncInfo.rpcClient = gSyncIO->clientRpc;
syncInfo.msgcb = &gSyncIO->msgcb;
syncInfo.FpSendMsg = syncIOSendMsg;
syncInfo.queue = gSyncIO->pMsgQ;
syncInfo.FpEqMsg = syncIOEqMsg;
syncInfo.pFsm = createFsm();
snprintf(syncInfo.path, sizeof(syncInfo.path), "%s_sync_replica%d_index%d", path, replicaNum, myIndex);

View File

@ -44,9 +44,8 @@ SWal* createWal(char* path, int32_t vgId) {
SSyncNode* createSyncNode(int32_t replicaNum, int32_t myIndex, int32_t vgId, SWal* pWal, char* path) {
SSyncInfo syncInfo;
syncInfo.vgId = vgId;
syncInfo.rpcClient = gSyncIO->clientRpc;
syncInfo.msgcb = &gSyncIO->msgcb;
syncInfo.FpSendMsg = syncIOSendMsg;
syncInfo.queue = gSyncIO->pMsgQ;
syncInfo.FpEqMsg = syncIOEqMsg;
syncInfo.pFsm = NULL;
snprintf(syncInfo.path, sizeof(syncInfo.path), "%s_sync_replica%d_index%d", path, replicaNum, myIndex);

View File

@ -31,9 +31,8 @@ SSyncNode *pSyncNode;
SSyncNode *syncNodeInit() {
syncInfo.vgId = 1234;
syncInfo.rpcClient = gSyncIO->clientRpc;
syncInfo.msgcb = &gSyncIO->msgcb;
syncInfo.FpSendMsg = syncIOSendMsg;
syncInfo.queue = gSyncIO->pMsgQ;
syncInfo.FpEqMsg = syncIOEqMsg;
syncInfo.pFsm = pFsm;
snprintf(syncInfo.path, sizeof(syncInfo.path), "%s", "./");

View File

@ -25,9 +25,7 @@ SSyncFSM* pFsm;
SSyncNode* syncNodeInit() {
syncInfo.vgId = 1234;
syncInfo.rpcClient = gSyncIO->clientRpc;
syncInfo.FpSendMsg = syncIOSendMsg;
syncInfo.queue = gSyncIO->pMsgQ;
syncInfo.FpEqMsg = syncIOEqMsg;
syncInfo.pFsm = pFsm;
snprintf(syncInfo.path, sizeof(syncInfo.path), "%s", "./");
@ -99,7 +97,7 @@ int main(int argc, char** argv) {
SyncPingReply* pSyncMsg = syncPingReplyBuild2(&pSyncNode->myRaftId, &pSyncNode->myRaftId, 1000, "syncEnqTest");
SRpcMsg rpcMsg;
syncPingReply2RpcMsg(pSyncMsg, &rpcMsg);
pSyncNode->FpEqMsg(pSyncNode->queue, &rpcMsg);
pSyncNode->FpEqMsg(pSyncNode->msgcb, &rpcMsg);
taosMsleep(1000);
}

View File

@ -43,7 +43,7 @@ int main() {
SRpcMsg rpcMsg;
syncPingReply2RpcMsg(pSyncMsg, &rpcMsg);
syncIOSendMsg(gSyncIO->clientRpc, &epSet, &rpcMsg);
syncIOSendMsg(&epSet, &rpcMsg);
taosSsleep(1);
}

View File

@ -25,9 +25,8 @@ SSyncFSM* pFsm;
SSyncNode* syncNodeInit() {
syncInfo.vgId = 1234;
syncInfo.rpcClient = gSyncIO->clientRpc;
syncInfo.msgcb = &gSyncIO->msgcb;
syncInfo.FpSendMsg = syncIOSendMsg;
syncInfo.queue = gSyncIO->pMsgQ;
syncInfo.FpEqMsg = syncIOEqMsg;
syncInfo.pFsm = pFsm;
snprintf(syncInfo.path, sizeof(syncInfo.path), "%s", "./");
@ -98,12 +97,13 @@ int main(int argc, char** argv) {
for (int i = 0; i < 10; ++i) {
SyncPingReply* pSyncMsg =
syncPingReplyBuild2(&pSyncNode->myRaftId, &pSyncNode->myRaftId, 1000, "syncIOSendMsgTest");
SRpcMsg rpcMsg;
SRpcMsg rpcMsg = {0};
syncPingReply2RpcMsg(pSyncMsg, &rpcMsg);
SEpSet epSet;
syncUtilnodeInfo2EpSet(&pSyncNode->myNodeInfo, &epSet);
pSyncNode->FpSendMsg(pSyncNode->rpcClient, &epSet, &rpcMsg);
rpcMsg.info.noResp = 1;
pSyncNode->FpSendMsg(&epSet, &rpcMsg);
taosMsleep(1000);
}

View File

@ -28,9 +28,8 @@ SSyncNode* pSyncNode;
SSyncNode* syncNodeInit() {
syncInfo.vgId = 1234;
syncInfo.rpcClient = gSyncIO->clientRpc;
syncInfo.msgcb = &gSyncIO->msgcb;
syncInfo.FpSendMsg = syncIOSendMsg;
syncInfo.queue = gSyncIO->pMsgQ;
syncInfo.FpEqMsg = syncIOEqMsg;
syncInfo.pFsm = pFsm;
snprintf(syncInfo.path, sizeof(syncInfo.path), "%s", "./");

View File

@ -25,9 +25,8 @@ SSyncFSM* pFsm;
SSyncNode* syncNodeInit() {
syncInfo.vgId = 1234;
syncInfo.rpcClient = gSyncIO->clientRpc;
syncInfo.msgcb = &gSyncIO->msgcb;
syncInfo.FpSendMsg = syncIOSendMsg;
syncInfo.queue = gSyncIO->pMsgQ;
syncInfo.FpEqMsg = syncIOEqMsg;
syncInfo.pFsm = pFsm;
snprintf(syncInfo.path, sizeof(syncInfo.path), "%s", "./sync_init_test");

View File

@ -25,9 +25,8 @@ SSyncFSM* pFsm;
SSyncNode* syncNodeInit() {
syncInfo.vgId = 1234;
syncInfo.rpcClient = gSyncIO->clientRpc;
syncInfo.msgcb = &gSyncIO->msgcb;
syncInfo.FpSendMsg = syncIOSendMsg;
syncInfo.queue = gSyncIO->pMsgQ;
syncInfo.FpEqMsg = syncIOEqMsg;
syncInfo.pFsm = pFsm;
snprintf(syncInfo.path, sizeof(syncInfo.path), "%s", "./");

View File

@ -25,9 +25,8 @@ SSyncFSM* pFsm;
SSyncNode* syncNodeInit() {
syncInfo.vgId = 1234;
syncInfo.rpcClient = gSyncIO->clientRpc;
syncInfo.msgcb = &gSyncIO->msgcb;
syncInfo.FpSendMsg = syncIOSendMsg;
syncInfo.queue = gSyncIO->pMsgQ;
syncInfo.FpEqMsg = syncIOEqMsg;
syncInfo.pFsm = pFsm;
snprintf(syncInfo.path, sizeof(syncInfo.path), "%s", "./");

View File

@ -25,9 +25,8 @@ SSyncFSM* pFsm;
SSyncNode* syncNodeInit() {
syncInfo.vgId = 1234;
syncInfo.rpcClient = gSyncIO->clientRpc;
syncInfo.msgcb = &gSyncIO->msgcb;
syncInfo.FpSendMsg = syncIOSendMsg;
syncInfo.queue = gSyncIO->pMsgQ;
syncInfo.FpEqMsg = syncIOEqMsg;
syncInfo.pFsm = pFsm;
snprintf(syncInfo.path, sizeof(syncInfo.path), "%s", "./");

View File

@ -97,9 +97,8 @@ SWal* createWal(char* path, int32_t vgId) {
int64_t createSyncNode(int32_t replicaNum, int32_t myIndex, int32_t vgId, SWal* pWal, char* path) {
SSyncInfo syncInfo;
syncInfo.vgId = vgId;
syncInfo.rpcClient = gSyncIO->clientRpc;
syncInfo.msgcb = &gSyncIO->msgcb;
syncInfo.FpSendMsg = syncIOSendMsg;
syncInfo.queue = gSyncIO->pMsgQ;
syncInfo.FpEqMsg = syncIOEqMsg;
syncInfo.pFsm = createFsm();
snprintf(syncInfo.path, sizeof(syncInfo.path), "%s_sync_replica%d_index%d", path, replicaNum, myIndex);

View File

@ -83,9 +83,8 @@ void initFsm() {
SSyncNode *syncNodeInit() {
syncInfo.vgId = 1234;
syncInfo.rpcClient = gSyncIO->clientRpc;
syncInfo.msgcb = &gSyncIO->msgcb;
syncInfo.FpSendMsg = syncIOSendMsg;
syncInfo.queue = gSyncIO->pMsgQ;
syncInfo.FpEqMsg = syncIOEqMsg;
syncInfo.pFsm = pFsm;
snprintf(syncInfo.path, sizeof(syncInfo.path), "%s", pDir);
@ -200,7 +199,7 @@ int main(int argc, char **argv) {
SyncClientRequest *pSyncClientRequest = pMsg1;
SRpcMsg rpcMsg;
syncClientRequest2RpcMsg(pSyncClientRequest, &rpcMsg);
gSyncNode->FpEqMsg(gSyncNode->queue, &rpcMsg);
gSyncNode->FpEqMsg(gSyncNode->msgcb, &rpcMsg);
taosMsleep(1000);
}

View File

@ -27,9 +27,8 @@ SSyncNode* pSyncNode;
SSyncNode* syncNodeInit() {
syncInfo.vgId = 1234;
syncInfo.rpcClient = gSyncIO->clientRpc;
syncInfo.msgcb = &gSyncIO->msgcb;
syncInfo.FpSendMsg = syncIOSendMsg;
syncInfo.queue = gSyncIO->pMsgQ;
syncInfo.FpEqMsg = syncIOEqMsg;
syncInfo.pFsm = pFsm;
snprintf(syncInfo.path, sizeof(syncInfo.path), "%s", "./");

Some files were not shown because too many files have changed in this diff Show More