Merge pull request #10618 from taosdata/feature/3.0_query_integrate_wxy
merge 3.0
This commit is contained in:
commit
d3138fff43
|
@ -24,7 +24,6 @@ mac/
|
|||
*.orig
|
||||
src/connector/nodejs/node_modules/
|
||||
src/connector/nodejs/out/
|
||||
tests/test/
|
||||
tests/taoshebei/
|
||||
tests/taoscsv/
|
||||
tests/taosdalipu/
|
||||
|
|
|
@ -10,10 +10,3 @@
|
|||
[submodule "deps/TSZ"]
|
||||
path = deps/TSZ
|
||||
url = https://github.com/taosdata/TSZ.git
|
||||
[submodule "tests"]
|
||||
path = tests
|
||||
url = https://github.com/taosdata/tests
|
||||
branch = 3.0
|
||||
[submodule "examples/rust"]
|
||||
path = examples/rust
|
||||
url = https://github.com/songtianyi/tdengine-rust-bindings.git
|
||||
|
|
|
@ -9,6 +9,7 @@ project(
|
|||
set(CMAKE_SUPPORT_DIR "${CMAKE_SOURCE_DIR}/cmake")
|
||||
set(CMAKE_CONTRIB_DIR "${CMAKE_SOURCE_DIR}/contrib")
|
||||
include(${CMAKE_SUPPORT_DIR}/cmake.options)
|
||||
include(${CMAKE_SUPPORT_DIR}/cmake.version)
|
||||
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -fPIC -gdwarf-2 -msse4.2 -mfma -g3")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -fPIC -gdwarf-2 -msse4.2 -mfma -g3")
|
||||
|
|
31
Jenkinsfile2
31
Jenkinsfile2
|
@ -74,37 +74,9 @@ def pre_test(){
|
|||
git pull >/dev/null
|
||||
git fetch origin +refs/pull/${CHANGE_ID}/merge
|
||||
git checkout -qf FETCH_HEAD
|
||||
git submodule update --init --recursive --remote
|
||||
git submodule update --init --recursive
|
||||
'''
|
||||
script {
|
||||
if (env.CHANGE_TARGET == 'master') {
|
||||
sh '''
|
||||
cd ${WKCT}
|
||||
git checkout master
|
||||
'''
|
||||
}
|
||||
else if(env.CHANGE_TARGET == '2.0'){
|
||||
sh '''
|
||||
cd ${WKCT}
|
||||
git checkout 2.0
|
||||
'''
|
||||
}
|
||||
else if(env.CHANGE_TARGET == '3.0'){
|
||||
sh '''
|
||||
cd ${WKCT}
|
||||
git checkout 3.0
|
||||
'''
|
||||
}
|
||||
else{
|
||||
sh '''
|
||||
cd ${WKCT}
|
||||
git checkout develop
|
||||
'''
|
||||
}
|
||||
}
|
||||
sh'''
|
||||
cd ${WKCT}
|
||||
git pull >/dev/null
|
||||
cd ${WKC}
|
||||
export TZ=Asia/Harbin
|
||||
date
|
||||
|
@ -123,7 +95,6 @@ pipeline {
|
|||
environment{
|
||||
WK = '/var/lib/jenkins/workspace/TDinternal'
|
||||
WKC= '/var/lib/jenkins/workspace/TDengine'
|
||||
WKCT= '/var/lib/jenkins/workspace/TDengine/tests'
|
||||
}
|
||||
stages {
|
||||
stage('pre_build'){
|
||||
|
|
|
@ -0,0 +1,77 @@
|
|||
|
||||
IF (DEFINED VERNUMBER)
|
||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||
ELSE ()
|
||||
SET(TD_VER_NUMBER "3.0.0.0")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERCOMPATIBLE)
|
||||
SET(TD_VER_COMPATIBLE ${VERCOMPATIBLE})
|
||||
ELSE ()
|
||||
SET(TD_VER_COMPATIBLE "3.0.0.0")
|
||||
ENDIF ()
|
||||
|
||||
find_program(HAVE_GIT NAMES git)
|
||||
|
||||
IF (DEFINED GITINFO)
|
||||
SET(TD_VER_GIT ${GITINFO})
|
||||
ELSEIF (HAVE_GIT)
|
||||
execute_process(COMMAND git log -1 --format=%H WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} OUTPUT_VARIABLE GIT_COMMITID)
|
||||
#message(STATUS "git log result:${GIT_COMMITID}")
|
||||
IF (GIT_COMMITID)
|
||||
string (REGEX REPLACE "[\n\t\r]" "" GIT_COMMITID ${GIT_COMMITID})
|
||||
SET(TD_VER_GIT ${GIT_COMMITID})
|
||||
ELSE ()
|
||||
message(STATUS "not a git repository")
|
||||
SET(TD_VER_GIT "no git commit id")
|
||||
ENDIF ()
|
||||
ELSE ()
|
||||
message(STATUS "no git cmd")
|
||||
SET(TD_VER_GIT "no git commit id")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERDATE)
|
||||
SET(TD_VER_DATE ${VERDATE})
|
||||
ELSE ()
|
||||
STRING(TIMESTAMP TD_VER_DATE "%Y-%m-%d %H:%M:%S")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERTYPE)
|
||||
SET(TD_VER_VERTYPE ${VERTYPE})
|
||||
ELSE ()
|
||||
SET(TD_VER_VERTYPE "stable")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED CPUTYPE)
|
||||
SET(TD_VER_CPUTYPE ${CPUTYPE})
|
||||
ELSE ()
|
||||
IF (TD_WINDOWS_32)
|
||||
SET(TD_VER_CPUTYPE "x86")
|
||||
ELSEIF (TD_LINUX_32)
|
||||
SET(TD_VER_CPUTYPE "x86")
|
||||
ELSEIF (TD_ARM_32)
|
||||
SET(TD_VER_CPUTYPE "x86")
|
||||
ELSEIF (TD_MIPS_32)
|
||||
SET(TD_VER_CPUTYPE "x86")
|
||||
ELSE ()
|
||||
SET(TD_VER_CPUTYPE "x64")
|
||||
ENDIF ()
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED OSTYPE)
|
||||
SET(TD_VER_OSTYPE ${OSTYPE})
|
||||
ELSE ()
|
||||
SET(TD_VER_OSTYPE "Linux")
|
||||
ENDIF ()
|
||||
|
||||
MESSAGE(STATUS "============= compile version parameter information start ============= ")
|
||||
MESSAGE(STATUS "ver number:" ${TD_VER_NUMBER})
|
||||
MESSAGE(STATUS "compatible ver number:" ${TD_VER_COMPATIBLE})
|
||||
MESSAGE(STATUS "communit commit id:" ${TD_VER_GIT})
|
||||
MESSAGE(STATUS "build date:" ${TD_VER_DATE})
|
||||
MESSAGE(STATUS "ver type:" ${TD_VER_VERTYPE})
|
||||
MESSAGE(STATUS "ver cpu:" ${TD_VER_CPUTYPE})
|
||||
MESSAGE(STATUS "os type:" ${TD_VER_OSTYPE})
|
||||
MESSAGE(STATUS "============= compile version parameter information end ============= ")
|
||||
|
||||
STRING(REPLACE "." "_" TD_LIB_VER_NUMBER ${TD_VER_NUMBER})
|
|
@ -1 +0,0 @@
|
|||
Subproject commit 1c8924dc668e6aa848214c2fc54e3ace3f5bf8df
|
|
@ -58,6 +58,7 @@ extern int32_t tsMonitorInterval;
|
|||
extern char tsMonitorFqdn[];
|
||||
extern uint16_t tsMonitorPort;
|
||||
extern int32_t tsMonitorMaxLogs;
|
||||
extern bool tsMonitorComp;
|
||||
|
||||
// query buffer management
|
||||
extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing
|
||||
|
@ -83,7 +84,6 @@ extern int64_t tsMaxRetentWindow;
|
|||
extern char version[];
|
||||
extern char compatible_version[];
|
||||
extern char gitinfo[];
|
||||
extern char gitinfoOfInternal[];
|
||||
extern char buildinfo[];
|
||||
|
||||
// lossy
|
||||
|
|
|
@ -754,7 +754,7 @@ typedef struct {
|
|||
int32_t fsyncPeriod;
|
||||
uint32_t hashBegin;
|
||||
uint32_t hashEnd;
|
||||
int8_t hashMethod;
|
||||
int8_t hashMethod;
|
||||
int8_t walLevel;
|
||||
int8_t precision;
|
||||
int8_t compression;
|
||||
|
@ -765,7 +765,7 @@ typedef struct {
|
|||
int8_t selfIndex;
|
||||
int8_t streamMode;
|
||||
SReplica replicas[TSDB_MAX_REPLICA];
|
||||
|
||||
|
||||
} SCreateVnodeReq, SAlterVnodeReq;
|
||||
|
||||
int32_t tSerializeSCreateVnodeReq(void* buf, int32_t bufLen, SCreateVnodeReq* pReq);
|
||||
|
@ -1400,7 +1400,7 @@ typedef struct {
|
|||
typedef struct SMqCMGetSubEpReq {
|
||||
int64_t consumerId;
|
||||
int32_t epoch;
|
||||
char cgroup[TSDB_CONSUMER_GROUP_LEN];
|
||||
char cgroup[TSDB_CGROUP_LEN];
|
||||
} SMqCMGetSubEpReq;
|
||||
|
||||
static FORCE_INLINE int32_t tEncodeSMsgHead(void** buf, const SMsgHead* pMsg) {
|
||||
|
@ -1700,7 +1700,7 @@ typedef struct {
|
|||
int32_t vgId;
|
||||
int64_t consumerId;
|
||||
char topicName[TSDB_TOPIC_FNAME_LEN];
|
||||
char cgroup[TSDB_CONSUMER_GROUP_LEN];
|
||||
char cgroup[TSDB_CGROUP_LEN];
|
||||
char* sql;
|
||||
char* logicalPlan;
|
||||
char* physicalPlan;
|
||||
|
@ -1763,7 +1763,7 @@ typedef struct {
|
|||
int32_t vgId;
|
||||
int64_t consumerId;
|
||||
char topicName[TSDB_TOPIC_FNAME_LEN];
|
||||
char cgroup[TSDB_CONSUMER_GROUP_LEN];
|
||||
char cgroup[TSDB_CGROUP_LEN];
|
||||
} SMqSetCVgRsp;
|
||||
|
||||
typedef struct {
|
||||
|
@ -1771,14 +1771,14 @@ typedef struct {
|
|||
int32_t vgId;
|
||||
int64_t consumerId;
|
||||
char topicName[TSDB_TOPIC_FNAME_LEN];
|
||||
char cgroup[TSDB_CONSUMER_GROUP_LEN];
|
||||
char cgroup[TSDB_CGROUP_LEN];
|
||||
} SMqMVRebRsp;
|
||||
|
||||
typedef struct {
|
||||
int32_t vgId;
|
||||
int64_t offset;
|
||||
char topicName[TSDB_TOPIC_FNAME_LEN];
|
||||
char cgroup[TSDB_CONSUMER_GROUP_LEN];
|
||||
char cgroup[TSDB_CGROUP_LEN];
|
||||
} SMqOffset;
|
||||
|
||||
typedef struct {
|
||||
|
@ -1883,15 +1883,27 @@ typedef struct {
|
|||
} STSma; // Time-range-wise SMA
|
||||
|
||||
typedef struct {
|
||||
int8_t msgType; // 0 create, 1 recreate
|
||||
STSma tSma;
|
||||
STimeWindow window;
|
||||
} SCreateTSmaMsg;
|
||||
int64_t ver; // use a general definition
|
||||
STSma tSma;
|
||||
} SVCreateTSmaReq;
|
||||
|
||||
typedef struct {
|
||||
STimeWindow window;
|
||||
char indexName[TSDB_INDEX_NAME_LEN + 1];
|
||||
} SDropTSmaMsg;
|
||||
int8_t type; // 0 status report, 1 update data
|
||||
char indexName[TSDB_INDEX_NAME_LEN + 1]; //
|
||||
STimeWindow windows;
|
||||
} STSmaMsg;
|
||||
|
||||
typedef struct {
|
||||
int64_t ver; // use a general definition
|
||||
char indexName[TSDB_INDEX_NAME_LEN + 1];
|
||||
} SVDropTSmaReq;
|
||||
typedef struct {
|
||||
} SVCreateTSmaRsp, SVDropTSmaRsp;
|
||||
|
||||
int32_t tSerializeSVCreateTSmaReq(void** buf, SVCreateTSmaReq* pReq);
|
||||
void* tDeserializeSVCreateTSmaReq(void* buf, SVCreateTSmaReq* pReq);
|
||||
int32_t tSerializeSVDropTSmaReq(void** buf, SVDropTSmaReq* pReq);
|
||||
void* tDeserializeSVDropTSmaReq(void* buf, SVDropTSmaReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
STimeWindow tsWindow; // [skey, ekey]
|
||||
|
@ -1913,22 +1925,18 @@ static FORCE_INLINE void tdDestroySmaData(STSmaData* pSmaData) {
|
|||
}
|
||||
}
|
||||
|
||||
// RSma: Time-range-wise Rollup SMA
|
||||
// TODO: refactor when rSma grammar defined finally =>
|
||||
// RSma: Rollup SMA
|
||||
typedef struct {
|
||||
int64_t interval;
|
||||
int32_t retention; // unit: day
|
||||
uint16_t days; // unit: day
|
||||
int8_t intervalUnit;
|
||||
} SSmaParams;
|
||||
// TODO: refactor when rSma grammar defined finally <=
|
||||
|
||||
typedef struct {
|
||||
// TODO: refactor to use the real schema =>
|
||||
STSma tsma;
|
||||
float xFilesFactor;
|
||||
SArray* smaParams; // SSmaParams
|
||||
// TODO: refactor to use the real schema <=
|
||||
} SRSma;
|
||||
|
||||
typedef struct {
|
||||
|
@ -1936,27 +1944,21 @@ typedef struct {
|
|||
STSma* tSma;
|
||||
} STSmaWrapper;
|
||||
|
||||
static FORCE_INLINE void tdDestroyTSma(STSma* pSma, bool releaseSelf) {
|
||||
static FORCE_INLINE void tdDestroyTSma(STSma* pSma) {
|
||||
if (pSma) {
|
||||
tfree(pSma->colIds);
|
||||
tfree(pSma->funcIds);
|
||||
if (releaseSelf) {
|
||||
free(pSma);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static FORCE_INLINE void tdDestroyTSmaWrapper(STSmaWrapper* pSW, bool releaseSelf) {
|
||||
static FORCE_INLINE void tdDestroyTSmaWrapper(STSmaWrapper* pSW) {
|
||||
if (pSW) {
|
||||
if (pSW->tSma) {
|
||||
for (uint32_t i = 0; i < pSW->number; ++i) {
|
||||
tdDestroyTSma(pSW->tSma + i, false);
|
||||
tdDestroyTSma(pSW->tSma + i);
|
||||
}
|
||||
tfree(pSW->tSma);
|
||||
}
|
||||
if (releaseSelf) {
|
||||
free(pSW);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2043,7 +2045,7 @@ static FORCE_INLINE void* tDecodeTSmaWrapper(void* buf, STSmaWrapper* pSW) {
|
|||
for (uint32_t i = 0; i < pSW->number; ++i) {
|
||||
if ((buf = tDecodeTSma(buf, pSW->tSma + i)) == NULL) {
|
||||
for (uint32_t j = i; j >= 0; --i) {
|
||||
tdDestroyTSma(pSW->tSma + j, false);
|
||||
tdDestroyTSma(pSW->tSma + j);
|
||||
}
|
||||
free(pSW->tSma);
|
||||
return NULL;
|
||||
|
@ -2092,7 +2094,7 @@ typedef struct {
|
|||
int64_t consumerId;
|
||||
int64_t blockingTime;
|
||||
int32_t epoch;
|
||||
char cgroup[TSDB_CONSUMER_GROUP_LEN];
|
||||
char cgroup[TSDB_CGROUP_LEN];
|
||||
|
||||
int64_t currentOffset;
|
||||
char topic[TSDB_TOPIC_FNAME_LEN];
|
||||
|
@ -2111,7 +2113,7 @@ typedef struct {
|
|||
|
||||
typedef struct {
|
||||
int64_t consumerId;
|
||||
char cgroup[TSDB_CONSUMER_GROUP_LEN];
|
||||
char cgroup[TSDB_CGROUP_LEN];
|
||||
SArray* topics; // SArray<SMqSubTopicEp>
|
||||
} SMqCMGetSubEpRsp;
|
||||
|
||||
|
|
|
@ -16,6 +16,9 @@
|
|||
#ifndef _TD_SNODE_H_
|
||||
#define _TD_SNODE_H_
|
||||
|
||||
#include "tmsg.h"
|
||||
#include "trpc.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
@ -88,4 +91,4 @@ void sndDestroy(const char *path);
|
|||
}
|
||||
#endif
|
||||
|
||||
#endif /*_TD_SNODE_H_*/
|
||||
#endif /*_TD_SNODE_H_*/
|
||||
|
|
|
@ -32,6 +32,8 @@ extern "C" {
|
|||
typedef struct {
|
||||
int32_t dnode_id;
|
||||
char dnode_ep[TSDB_EP_LEN];
|
||||
int64_t cluster_id;
|
||||
int32_t protocol;
|
||||
} SMonBasicInfo;
|
||||
|
||||
typedef struct {
|
||||
|
@ -130,6 +132,7 @@ typedef struct {
|
|||
const char *server;
|
||||
uint16_t port;
|
||||
int32_t maxLogs;
|
||||
bool comp;
|
||||
} SMonCfg;
|
||||
|
||||
int32_t monInit(const SMonCfg *pCfg);
|
||||
|
|
|
@ -22,7 +22,9 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
int32_t taosSendHttpReport(const char* server, uint16_t port, const char* pCont, int32_t contLen);
|
||||
typedef enum { HTTP_GZIP, HTTP_FLAT } EHttpCompFlag;
|
||||
|
||||
int32_t taosSendHttpReport(const char* server, uint16_t port, char* pCont, int32_t contLen, EHttpCompFlag flag);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
|
@ -38,8 +38,6 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef USE_UV
|
||||
|
||||
#define TAOS_EPOLL_WAIT_TIME 500
|
||||
typedef int32_t SOCKET;
|
||||
typedef SOCKET EpollFd;
|
||||
|
@ -59,8 +57,6 @@ int32_t taosCloseSocket(SocketFd fd);
|
|||
void taosShutDownSocketRD(SOCKET fd);
|
||||
void taosShutDownSocketWR(SOCKET fd);
|
||||
int32_t taosSetNonblocking(SOCKET sock, int32_t on);
|
||||
void taosIgnSIGPIPE();
|
||||
void taosSetMaskSIGPIPE();
|
||||
int32_t taosSetSockOpt(SOCKET socketfd, int32_t level, int32_t optname, void *optval, int32_t optlen);
|
||||
int32_t taosGetSockOpt(SOCKET socketfd, int32_t level, int32_t optname, void *optval, int32_t *optlen);
|
||||
|
||||
|
@ -90,14 +86,13 @@ SOCKET taosOpenTcpClientSocket(uint32_t ip, uint16_t port, uint32_t localIp);
|
|||
SOCKET taosOpenTcpServerSocket(uint32_t ip, uint16_t port);
|
||||
int32_t taosKeepTcpAlive(SOCKET sockFd);
|
||||
|
||||
int32_t taosGetFqdn(char *);
|
||||
void taosBlockSIGPIPE();
|
||||
uint32_t taosGetIpv4FromFqdn(const char *);
|
||||
int32_t taosGetFqdn(char *);
|
||||
void tinet_ntoa(char *ipstr, uint32_t ip);
|
||||
uint32_t ip2uint(const char *const ip_addr);
|
||||
|
||||
#endif
|
||||
|
||||
void taosBlockSIGPIPE();
|
||||
void taosIgnSIGPIPE();
|
||||
void taosSetMaskSIGPIPE();
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -84,6 +84,7 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_INVALID_VERSION_NUMBER TAOS_DEF_ERROR_CODE(0, 0x0120)
|
||||
#define TSDB_CODE_INVALID_VERSION_STRING TAOS_DEF_ERROR_CODE(0, 0x0121)
|
||||
#define TSDB_CODE_VERSION_NOT_COMPATIBLE TAOS_DEF_ERROR_CODE(0, 0x0122)
|
||||
#define TSDB_CODE_COMPRESS_ERROR TAOS_DEF_ERROR_CODE(0, 0x0123)
|
||||
|
||||
//client
|
||||
#define TSDB_CODE_TSC_INVALID_OPERATION TAOS_DEF_ERROR_CODE(0, 0x0200)
|
||||
|
|
|
@ -195,6 +195,7 @@ typedef enum ELogicConditionType {
|
|||
#define TSDB_NODE_NAME_LEN 64
|
||||
#define TSDB_TABLE_NAME_LEN 193 // it is a null-terminated string
|
||||
#define TSDB_TOPIC_NAME_LEN 193 // it is a null-terminated string
|
||||
#define TSDB_CGROUP_LEN 193 // it is a null-terminated string
|
||||
#define TSDB_DB_NAME_LEN 65
|
||||
#define TSDB_DB_FNAME_LEN (TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN + TSDB_NAME_DELIMITER_LEN)
|
||||
|
||||
|
@ -210,9 +211,8 @@ typedef enum ELogicConditionType {
|
|||
#define TSDB_TYPE_STR_MAX_LEN 32
|
||||
#define TSDB_TABLE_FNAME_LEN (TSDB_DB_FNAME_LEN + TSDB_TABLE_NAME_LEN + TSDB_NAME_DELIMITER_LEN)
|
||||
#define TSDB_TOPIC_FNAME_LEN TSDB_TABLE_FNAME_LEN
|
||||
#define TSDB_CONSUMER_GROUP_LEN 192
|
||||
#define TSDB_SUBSCRIBE_KEY_LEN (TSDB_CONSUMER_GROUP_LEN + TSDB_TOPIC_FNAME_LEN + 2)
|
||||
#define TSDB_PARTITION_KEY_LEN (TSDB_CONSUMER_GROUP_LEN + TSDB_TOPIC_FNAME_LEN + 2)
|
||||
#define TSDB_SUBSCRIBE_KEY_LEN (TSDB_CGROUP_LEN + TSDB_TOPIC_FNAME_LEN + 2)
|
||||
#define TSDB_PARTITION_KEY_LEN (TSDB_SUBSCRIBE_KEY_LEN + 20)
|
||||
#define TSDB_COL_NAME_LEN 65
|
||||
#define TSDB_MAX_SAVED_SQL_LEN TSDB_MAX_COLUMNS * 64
|
||||
#define TSDB_MAX_SQL_LEN TSDB_PAYLOAD_SIZE
|
||||
|
@ -442,6 +442,8 @@ typedef struct {
|
|||
int32_t primary;
|
||||
} SDiskCfg;
|
||||
|
||||
#define TMQ_SEPARATOR ':'
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -23,7 +23,6 @@ extern "C" {
|
|||
extern char version[];
|
||||
extern char compatible_version[];
|
||||
extern char gitinfo[];
|
||||
extern char gitinfoOfInternal[];
|
||||
extern char buildinfo[];
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -46,7 +46,7 @@ struct tmq_topic_vgroup_list_t {
|
|||
|
||||
struct tmq_conf_t {
|
||||
char clientId[256];
|
||||
char groupId[256];
|
||||
char groupId[TSDB_CGROUP_LEN];
|
||||
int8_t auto_commit;
|
||||
int8_t resetOffset;
|
||||
tmq_commit_cb* commit_cb;
|
||||
|
@ -56,7 +56,7 @@ struct tmq_conf_t {
|
|||
|
||||
struct tmq_t {
|
||||
// conf
|
||||
char groupId[256];
|
||||
char groupId[TSDB_CGROUP_LEN];
|
||||
char clientId[256];
|
||||
int8_t autoCommit;
|
||||
int8_t inWaiting;
|
||||
|
|
|
@ -48,10 +48,11 @@ bool tsPrintAuth = 0;
|
|||
|
||||
// monitor
|
||||
bool tsEnableMonitor = 1;
|
||||
int32_t tsMonitorInterval = 5;
|
||||
int32_t tsMonitorInterval = 30;
|
||||
char tsMonitorFqdn[TSDB_FQDN_LEN] = {0};
|
||||
uint16_t tsMonitorPort = 6043;
|
||||
int32_t tsMonitorMaxLogs = 100;
|
||||
bool tsMonitorComp = false;
|
||||
|
||||
/*
|
||||
* denote if the server needs to compress response message at the application layer to client, including query rsp,
|
||||
|
@ -313,7 +314,6 @@ static int32_t taosAddSystemCfg(SConfig *pCfg) {
|
|||
if (cfgAddString(pCfg, "version", version, 1) != 0) return -1;
|
||||
if (cfgAddString(pCfg, "compatible_version", compatible_version, 1) != 0) return -1;
|
||||
if (cfgAddString(pCfg, "gitinfo", gitinfo, 1) != 0) return -1;
|
||||
if (cfgAddString(pCfg, "gitinfoOfInternal", gitinfoOfInternal, 1) != 0) return -1;
|
||||
if (cfgAddString(pCfg, "buildinfo", buildinfo, 1) != 0) return -1;
|
||||
return 0;
|
||||
}
|
||||
|
@ -346,6 +346,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
|||
if (cfgAddString(pCfg, "monitorFqdn", tsMonitorFqdn, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "monitorPort", tsMonitorPort, 1, 65056, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "monitorMaxLogs", tsMonitorMaxLogs, 1, 1000000, 0) != 0) return -1;
|
||||
if (cfgAddBool(pCfg, "monitorComp", tsMonitorComp, 0) != 0) return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -462,6 +463,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
|
|||
tstrncpy(tsMonitorFqdn, cfgGetItem(pCfg, "monitorFqdn")->str, TSDB_FQDN_LEN);
|
||||
tsMonitorPort = (uint16_t)cfgGetItem(pCfg, "monitorPort")->i32;
|
||||
tsMonitorMaxLogs = cfgGetItem(pCfg, "monitorMaxLogs")->i32;
|
||||
tsMonitorComp = cfgGetItem(pCfg, "monitorComp")->bval;
|
||||
|
||||
if (tsQueryBufferSize >= 0) {
|
||||
tsQueryBufferSizeBytes = tsQueryBufferSize * 1048576UL;
|
||||
|
|
|
@ -2415,3 +2415,36 @@ int32_t tDecodeSMqCMCommitOffsetReq(SCoder *decoder, SMqCMCommitOffsetReq *pReq)
|
|||
tEndDecode(decoder);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tSerializeSVCreateTSmaReq(void **buf, SVCreateTSmaReq *pReq) {
|
||||
int32_t tlen = 0;
|
||||
|
||||
tlen += taosEncodeFixedI64(buf, pReq->ver);
|
||||
tlen += tEncodeTSma(buf, &pReq->tSma);
|
||||
|
||||
return tlen;
|
||||
}
|
||||
|
||||
void *tDeserializeSVCreateTSmaReq(void *buf, SVCreateTSmaReq *pReq) {
|
||||
buf = taosDecodeFixedI64(buf, &(pReq->ver));
|
||||
|
||||
if ((buf = tDecodeTSma(buf, &pReq->tSma)) == NULL) {
|
||||
tdDestroyTSma(&pReq->tSma);
|
||||
}
|
||||
return buf;
|
||||
}
|
||||
|
||||
int32_t tSerializeSVDropTSmaReq(void **buf, SVDropTSmaReq *pReq) {
|
||||
int32_t tlen = 0;
|
||||
|
||||
tlen += taosEncodeFixedI64(buf, pReq->ver);
|
||||
tlen += taosEncodeString(buf, pReq->indexName);
|
||||
|
||||
return tlen;
|
||||
}
|
||||
void *tDeserializeSVDropTSmaReq(void *buf, SVDropTSmaReq *pReq) {
|
||||
buf = taosDecodeFixedI64(buf, &(pReq->ver));
|
||||
buf = taosDecodeStringTo(buf, pReq->indexName);
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
|
|
@ -30,6 +30,5 @@ void dmnPrintVersion() {
|
|||
#endif
|
||||
printf("%s version: %s compatible_version: %s\n", releaseName, version, compatible_version);
|
||||
printf("gitinfo: %s\n", gitinfo);
|
||||
printf("gitinfoI: %s\n", gitinfoOfInternal);
|
||||
printf("builuInfo: %s\n", buildinfo);
|
||||
}
|
||||
|
|
|
@ -298,7 +298,7 @@ int32_t dndInit() {
|
|||
return -1;
|
||||
}
|
||||
|
||||
SMonCfg monCfg = {.maxLogs = tsMonitorMaxLogs, .port = tsMonitorPort, .server = tsMonitorFqdn};
|
||||
SMonCfg monCfg = {.maxLogs = tsMonitorMaxLogs, .port = tsMonitorPort, .server = tsMonitorFqdn, .comp = tsMonitorComp};
|
||||
if (monInit(&monCfg) != 0) {
|
||||
dError("failed to init monitor since %s", terrstr());
|
||||
dndCleanup();
|
||||
|
|
|
@ -477,6 +477,8 @@ void dndProcessStartupReq(SDnode *pDnode, SRpcMsg *pReq) {
|
|||
static void dndGetMonitorBasicInfo(SDnode *pDnode, SMonBasicInfo *pInfo) {
|
||||
pInfo->dnode_id = dndGetDnodeId(pDnode);
|
||||
tstrncpy(pInfo->dnode_ep, tsLocalEp, TSDB_EP_LEN);
|
||||
pInfo->cluster_id = dndGetClusterId(pDnode);
|
||||
pInfo->protocol = 1;
|
||||
}
|
||||
|
||||
static void dndGetMonitorDnodeInfo(SDnode *pDnode, SMonDnodeInfo *pInfo) {
|
||||
|
|
|
@ -594,7 +594,7 @@ typedef struct {
|
|||
int64_t consumerId;
|
||||
int64_t connId;
|
||||
SRWLatch lock;
|
||||
char cgroup[TSDB_CONSUMER_GROUP_LEN];
|
||||
char cgroup[TSDB_CGROUP_LEN];
|
||||
SArray* currentTopics; // SArray<char*>
|
||||
SArray* recentRemovedTopics; // SArray<char*>
|
||||
int32_t epoch;
|
||||
|
@ -661,6 +661,9 @@ static FORCE_INLINE void* tDecodeSMqConsumerObj(void* buf, SMqConsumerObj* pCons
|
|||
return buf;
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
} SStreamScheduler;
|
||||
|
||||
typedef struct SMnodeMsg {
|
||||
char user[TSDB_USER_LEN];
|
||||
char db[TSDB_DB_FNAME_LEN];
|
||||
|
|
|
@ -0,0 +1,34 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef _TD_MND_SCHEDULER_H_
|
||||
#define _TD_MND_SCHEDULER_H_
|
||||
|
||||
#include "mndInt.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
int32_t mndInitScheduler(SMnode* pMnode);
|
||||
void mndCleanupScheduler(SMnode* pMnode);
|
||||
|
||||
int32_t mndSchedInitSubEp(SMnode* pMnode, const SMqTopicObj* pTopic, SMqSubscribeObj* pSub);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /*_TD_MND_SCHEDULER_H_ */
|
|
@ -340,9 +340,16 @@ static int32_t mndProcessStatusReq(SMnodeMsg *pReq) {
|
|||
pVgroup->compStorage = pVload->compStorage;
|
||||
pVgroup->pointsWritten = pVload->pointsWritten;
|
||||
}
|
||||
bool roleChanged = false;
|
||||
for (int32_t vg = 0; vg < pVgroup->replica; ++vg) {
|
||||
if (pVgroup->vnodeGid[vg].role != pVload->role) {
|
||||
roleChanged = true;
|
||||
}
|
||||
pVgroup->vnodeGid[vg].role = pVload->role;
|
||||
}
|
||||
if (roleChanged) {
|
||||
// notify scheduler role has changed
|
||||
}
|
||||
}
|
||||
|
||||
mndReleaseVgroup(pMnode, pVgroup);
|
||||
|
@ -631,13 +638,13 @@ static int32_t mndGetConfigMeta(SMnodeMsg *pReq, SShowObj *pShow, STableMetaRsp
|
|||
|
||||
pShow->bytes[cols] = TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE;
|
||||
pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
|
||||
strcpy(pSchema[cols].name, "name");
|
||||
strcpy(pSchema[cols].name, "name");
|
||||
pSchema[cols].bytes = pShow->bytes[cols];
|
||||
cols++;
|
||||
|
||||
pShow->bytes[cols] = TSDB_CONIIG_VALUE_LEN + VARSTR_HEADER_SIZE;
|
||||
pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
|
||||
strcpy(pSchema[cols].name, "value");
|
||||
strcpy(pSchema[cols].name, "value");
|
||||
pSchema[cols].bytes = pShow->bytes[cols];
|
||||
cols++;
|
||||
|
||||
|
@ -823,4 +830,4 @@ static int32_t mndRetrieveDnodes(SMnodeMsg *pReq, SShowObj *pShow, char *data, i
|
|||
static void mndCancelGetNextDnode(SMnode *pMnode, void *pIter) {
|
||||
SSdb *pSdb = pMnode->pSdb;
|
||||
sdbCancelFetch(pSdb, pIter);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,78 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "mndScheduler.h"
|
||||
#include "mndConsumer.h"
|
||||
#include "mndDb.h"
|
||||
#include "mndDnode.h"
|
||||
#include "mndMnode.h"
|
||||
#include "mndOffset.h"
|
||||
#include "mndShow.h"
|
||||
#include "mndStb.h"
|
||||
#include "mndSubscribe.h"
|
||||
#include "mndTopic.h"
|
||||
#include "mndTrans.h"
|
||||
#include "mndUser.h"
|
||||
#include "mndVgroup.h"
|
||||
#include "tcompare.h"
|
||||
#include "tname.h"
|
||||
|
||||
int32_t mndSchedInitSubEp(SMnode* pMnode, const SMqTopicObj* pTopic, SMqSubscribeObj* pSub) {
|
||||
SSdb* pSdb = pMnode->pSdb;
|
||||
SVgObj* pVgroup = NULL;
|
||||
SQueryPlan* pPlan = qStringToQueryPlan(pTopic->physicalPlan);
|
||||
SArray* pAray = NULL;
|
||||
SArray* unassignedVg = pSub->unassignedVg;
|
||||
|
||||
ASSERT(pSub->vgNum == 0);
|
||||
|
||||
int32_t levelNum = LIST_LENGTH(pPlan->pSubplans);
|
||||
if (levelNum != 1) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
SNodeListNode* inner = nodesListGetNode(pPlan->pSubplans, 0);
|
||||
|
||||
int32_t opNum = LIST_LENGTH(inner->pNodeList);
|
||||
if (opNum != 1) {
|
||||
return -1;
|
||||
}
|
||||
SSubplan* plan = nodesListGetNode(inner->pNodeList, 0);
|
||||
|
||||
void* pIter = NULL;
|
||||
while (1) {
|
||||
pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void**)&pVgroup);
|
||||
if (pIter == NULL) break;
|
||||
if (pVgroup->dbUid != pTopic->dbUid) {
|
||||
sdbRelease(pSdb, pVgroup);
|
||||
continue;
|
||||
}
|
||||
|
||||
pSub->vgNum++;
|
||||
plan->execNode.nodeId = pVgroup->vgId;
|
||||
plan->execNode.epset = mndGetVgroupEpset(pMnode, pVgroup);
|
||||
|
||||
SMqConsumerEp consumerEp = {0};
|
||||
consumerEp.status = 0;
|
||||
consumerEp.consumerId = -1;
|
||||
consumerEp.epSet = plan->execNode.epset;
|
||||
consumerEp.vgId = plan->execNode.nodeId;
|
||||
int32_t msgLen;
|
||||
int32_t code = qSubPlanToString(plan, &consumerEp.qmsg, &msgLen);
|
||||
taosArrayPush(unassignedVg, &consumerEp);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -20,6 +20,7 @@
|
|||
#include "mndDnode.h"
|
||||
#include "mndMnode.h"
|
||||
#include "mndOffset.h"
|
||||
#include "mndScheduler.h"
|
||||
#include "mndShow.h"
|
||||
#include "mndStb.h"
|
||||
#include "mndTopic.h"
|
||||
|
@ -39,7 +40,7 @@ enum {
|
|||
MQ_SUBSCRIBE_STATUS__DELETED,
|
||||
};
|
||||
|
||||
static char *mndMakeSubscribeKey(const char *cgroup, const char *topicName);
|
||||
static int32_t mndMakeSubscribeKey(char *key, const char *cgroup, const char *topicName);
|
||||
|
||||
static SSdbRaw *mndSubActionEncode(SMqSubscribeObj *);
|
||||
static SSdbRow *mndSubActionDecode(SSdbRaw *pRaw);
|
||||
|
@ -87,22 +88,25 @@ static SMqSubscribeObj *mndCreateSubscription(SMnode *pMnode, const SMqTopicObj
|
|||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return NULL;
|
||||
}
|
||||
char *key = mndMakeSubscribeKey(cgroup, pTopic->name);
|
||||
if (key == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
char key[TSDB_SUBSCRIBE_KEY_LEN];
|
||||
mndMakeSubscribeKey(key, cgroup, pTopic->name);
|
||||
strcpy(pSub->key, key);
|
||||
|
||||
if (mndSchedInitSubEp(pMnode, pTopic, pSub) < 0) {
|
||||
terrno = TSDB_CODE_MND_UNSUPPORTED_TOPIC;
|
||||
tDeleteSMqSubscribeObj(pSub);
|
||||
free(pSub);
|
||||
return NULL;
|
||||
}
|
||||
strcpy(pSub->key, key);
|
||||
free(key);
|
||||
|
||||
#if 0
|
||||
if (mndInitUnassignedVg(pMnode, pTopic, pSub) < 0) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
tDeleteSMqSubscribeObj(pSub);
|
||||
free(pSub);
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
// TODO: disable alter subscribed table
|
||||
return pSub;
|
||||
}
|
||||
|
@ -325,15 +329,14 @@ static int32_t mndProcessGetSubEpReq(SMnodeMsg *pMsg) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int32_t mndSplitSubscribeKey(char *key, char **topic, char **cgroup) {
|
||||
static int32_t mndSplitSubscribeKey(const char *key, char *topic, char *cgroup) {
|
||||
int32_t i = 0;
|
||||
while (key[i] != ':') {
|
||||
while (key[i] != TMQ_SEPARATOR) {
|
||||
i++;
|
||||
}
|
||||
key[i] = 0;
|
||||
*cgroup = strdup(key);
|
||||
key[i] = ':';
|
||||
*topic = strdup(&key[i + 1]);
|
||||
memcpy(topic, key, i - 1);
|
||||
topic[i] = 0;
|
||||
strcpy(cgroup, &key[i + 1]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -369,8 +372,9 @@ static int32_t mndProcessMqTimerMsg(SMnodeMsg *pMsg) {
|
|||
// get all topics of that topic
|
||||
int32_t sz = taosArrayGetSize(pConsumer->currentTopics);
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
char *topic = taosArrayGetP(pConsumer->currentTopics, i);
|
||||
char *key = mndMakeSubscribeKey(pConsumer->cgroup, topic);
|
||||
char *topic = taosArrayGetP(pConsumer->currentTopics, i);
|
||||
char key[TSDB_SUBSCRIBE_KEY_LEN];
|
||||
mndMakeSubscribeKey(key, pConsumer->cgroup, topic);
|
||||
SMqRebSubscribe *pRebSub = mndGetOrCreateRebSub(pRebMsg->rebSubHash, key);
|
||||
taosArrayPush(pRebSub->lostConsumers, &pConsumer->consumerId);
|
||||
}
|
||||
|
@ -386,8 +390,9 @@ static int32_t mndProcessMqTimerMsg(SMnodeMsg *pMsg) {
|
|||
}
|
||||
int32_t sz = taosArrayGetSize(rebSubs);
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
char *topic = taosArrayGetP(rebSubs, i);
|
||||
char *key = mndMakeSubscribeKey(pConsumer->cgroup, topic);
|
||||
char *topic = taosArrayGetP(rebSubs, i);
|
||||
char key[TSDB_SUBSCRIBE_KEY_LEN];
|
||||
mndMakeSubscribeKey(key, pConsumer->cgroup, topic);
|
||||
SMqRebSubscribe *pRebSub = mndGetOrCreateRebSub(pRebMsg->rebSubHash, key);
|
||||
if (status == MQ_CONSUMER_STATUS__INIT) {
|
||||
taosArrayPush(pRebSub->newConsumers, &pConsumer->consumerId);
|
||||
|
@ -520,9 +525,9 @@ static int32_t mndProcessDoRebalanceMsg(SMnodeMsg *pMsg) {
|
|||
taosArrayPush(pSubConsumer->vgInfo, pConsumerEp);
|
||||
|
||||
if (pConsumerEp->oldConsumerId == -1) {
|
||||
char *topic;
|
||||
char *cgroup;
|
||||
mndSplitSubscribeKey(pSub->key, &topic, &cgroup);
|
||||
char topic[TSDB_TOPIC_FNAME_LEN];
|
||||
char cgroup[TSDB_CGROUP_LEN];
|
||||
mndSplitSubscribeKey(pSub->key, topic, cgroup);
|
||||
SMqTopicObj *pTopic = mndAcquireTopic(pMnode, topic);
|
||||
|
||||
mInfo("mq set conn: assign vgroup %d of topic %s to consumer %ld", pConsumerEp->vgId, topic,
|
||||
|
@ -530,8 +535,6 @@ static int32_t mndProcessDoRebalanceMsg(SMnodeMsg *pMsg) {
|
|||
|
||||
mndPersistMqSetConnReq(pMnode, pTrans, pTopic, cgroup, pConsumerEp);
|
||||
mndReleaseTopic(pMnode, pTopic);
|
||||
free(topic);
|
||||
free(cgroup);
|
||||
} else {
|
||||
mInfo("mq rebalance: assign vgroup %d, from consumer %ld to consumer %ld", pConsumerEp->vgId,
|
||||
pConsumerEp->oldConsumerId, pConsumerEp->consumerId);
|
||||
|
@ -759,6 +762,7 @@ static int32_t mndProcessDoRebalanceMsg(SMnodeMsg *pMsg) {
|
|||
}
|
||||
#endif
|
||||
|
||||
#if 0
|
||||
static int32_t mndInitUnassignedVg(SMnode *pMnode, const SMqTopicObj *pTopic, SMqSubscribeObj *pSub) {
|
||||
SSdb *pSdb = pMnode->pSdb;
|
||||
SVgObj *pVgroup = NULL;
|
||||
|
@ -804,6 +808,7 @@ static int32_t mndInitUnassignedVg(SMnode *pMnode, const SMqTopicObj *pTopic, SM
|
|||
/*qDestroyQueryDag(pDag);*/
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int32_t mndPersistMqSetConnReq(SMnode *pMnode, STrans *pTrans, const SMqTopicObj *pTopic, const char *cgroup,
|
||||
const SMqConsumerEp *pConsumerEp) {
|
||||
|
@ -949,23 +954,19 @@ static int32_t mndSubActionUpdate(SSdb *pSdb, SMqSubscribeObj *pOldSub, SMqSubsc
|
|||
return 0;
|
||||
}
|
||||
|
||||
static char *mndMakeSubscribeKey(const char *cgroup, const char *topicName) {
|
||||
char *key = malloc(TSDB_SHOW_SUBQUERY_LEN);
|
||||
if (key == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
static int32_t mndMakeSubscribeKey(char *key, const char *cgroup, const char *topicName) {
|
||||
int32_t tlen = strlen(cgroup);
|
||||
memcpy(key, cgroup, tlen);
|
||||
key[tlen] = ':';
|
||||
key[tlen] = TMQ_SEPARATOR;
|
||||
strcpy(key + tlen + 1, topicName);
|
||||
return key;
|
||||
return 0;
|
||||
}
|
||||
|
||||
SMqSubscribeObj *mndAcquireSubscribe(SMnode *pMnode, const char *cgroup, const char *topicName) {
|
||||
SSdb *pSdb = pMnode->pSdb;
|
||||
char *key = mndMakeSubscribeKey(cgroup, topicName);
|
||||
SSdb *pSdb = pMnode->pSdb;
|
||||
char key[TSDB_SUBSCRIBE_KEY_LEN];
|
||||
mndMakeSubscribeKey(key, cgroup, topicName);
|
||||
SMqSubscribeObj *pSub = sdbAcquire(pSdb, SDB_SUBSCRIBE, key);
|
||||
free(key);
|
||||
if (pSub == NULL) {
|
||||
terrno = TSDB_CODE_MND_SUBSCRIBE_NOT_EXIST;
|
||||
}
|
||||
|
|
|
@ -87,7 +87,7 @@ static int32_t mndProcessTelemTimer(SMnodeMsg* pReq) {
|
|||
taosWLockLatch(&pMgmt->lock);
|
||||
char* pCont = mndBuildTelemetryReport(pMnode);
|
||||
if (pCont != NULL) {
|
||||
taosSendHttpReport(TELEMETRY_SERVER, TELEMETRY_PORT, pCont, strlen(pCont));
|
||||
taosSendHttpReport(TELEMETRY_SERVER, TELEMETRY_PORT, pCont, strlen(pCont), HTTP_FLAT);
|
||||
free(pCont);
|
||||
}
|
||||
taosWUnLockLatch(&pMgmt->lock);
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
|
||||
SSnode *sndOpen(const char *path, const SSnodeOpt *pOption) {
|
||||
SSnode *pSnode = calloc(1, sizeof(SSnode));
|
||||
memcpy(&pSnode->cfg, pOption, sizeof(SSnodeOpt));
|
||||
return pSnode;
|
||||
}
|
||||
|
||||
|
@ -29,4 +30,4 @@ int32_t sndProcessMsg(SSnode *pSnode, SRpcMsg *pMsg, SRpcMsg **pRsp) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
void sndDestroy(const char *path) {}
|
||||
void sndDestroy(const char *path) {}
|
||||
|
|
|
@ -40,24 +40,27 @@ typedef struct SMTbCursor SMTbCursor;
|
|||
typedef struct SMCtbCursor SMCtbCursor;
|
||||
typedef struct SMSmaCursor SMSmaCursor;
|
||||
|
||||
typedef SVCreateTbReq STbCfg;
|
||||
typedef STSma SSmaCfg;
|
||||
typedef SVCreateTbReq STbCfg;
|
||||
typedef SVCreateTSmaReq SSmaCfg;
|
||||
|
||||
// SMeta operations
|
||||
SMeta *metaOpen(const char *path, const SMetaCfg *pMetaCfg, SMemAllocatorFactory *pMAF);
|
||||
void metaClose(SMeta *pMeta);
|
||||
void metaRemove(const char *path);
|
||||
int metaCreateTable(SMeta *pMeta, STbCfg *pTbCfg);
|
||||
int metaDropTable(SMeta *pMeta, tb_uid_t uid);
|
||||
int metaCommit(SMeta *pMeta);
|
||||
SMeta * metaOpen(const char *path, const SMetaCfg *pMetaCfg, SMemAllocatorFactory *pMAF);
|
||||
void metaClose(SMeta *pMeta);
|
||||
void metaRemove(const char *path);
|
||||
int metaCreateTable(SMeta *pMeta, STbCfg *pTbCfg);
|
||||
int metaDropTable(SMeta *pMeta, tb_uid_t uid);
|
||||
int metaCommit(SMeta *pMeta);
|
||||
int32_t metaCreateTSma(SMeta *pMeta, SSmaCfg *pCfg);
|
||||
int32_t metaDropTSma(SMeta *pMeta, char *indexName);
|
||||
|
||||
// For Query
|
||||
STbCfg * metaGetTbInfoByUid(SMeta *pMeta, tb_uid_t uid);
|
||||
STbCfg * metaGetTbInfoByName(SMeta *pMeta, char *tbname, tb_uid_t *uid);
|
||||
SSchemaWrapper *metaGetTableSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver, bool isinline);
|
||||
STSchema * metaGetTbTSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver);
|
||||
SSmaCfg * metaGetSmaInfoByName(SMeta *pMeta, const char *indexName);
|
||||
STSma * metaGetSmaInfoByName(SMeta *pMeta, const char *indexName);
|
||||
STSmaWrapper * metaGetSmaInfoByUid(SMeta *pMeta, tb_uid_t uid);
|
||||
SArray * metaGetSmaTbUids(SMeta *pMeta, bool isDup);
|
||||
|
||||
SMTbCursor *metaOpenTbCursor(SMeta *pMeta);
|
||||
void metaCloseTbCursor(SMTbCursor *pTbCur);
|
||||
|
|
|
@ -33,7 +33,7 @@ int metaOpenDB(SMeta* pMeta);
|
|||
void metaCloseDB(SMeta* pMeta);
|
||||
int metaSaveTableToDB(SMeta* pMeta, STbCfg* pTbCfg);
|
||||
int metaRemoveTableFromDb(SMeta* pMeta, tb_uid_t uid);
|
||||
int metaSaveSmaToDB(SMeta* pMeta, SSmaCfg* pTbCfg);
|
||||
int metaSaveSmaToDB(SMeta* pMeta, STSma* pTbCfg);
|
||||
int metaRemoveSmaFromDb(SMeta* pMeta, const char* indexName);
|
||||
|
||||
// SMetaCache
|
||||
|
|
|
@ -41,55 +41,4 @@ static FORCE_INLINE int32_t tsdbEncodeTSmaKey(uint64_t tableUid, col_id_t colId,
|
|||
return len;
|
||||
}
|
||||
|
||||
#if 0
|
||||
|
||||
typedef struct {
|
||||
int minFid;
|
||||
int midFid;
|
||||
int maxFid;
|
||||
TSKEY minKey;
|
||||
} SRtn;
|
||||
|
||||
typedef struct {
|
||||
uint64_t uid;
|
||||
int64_t offset;
|
||||
int64_t size;
|
||||
} SKVRecord;
|
||||
|
||||
void tsdbGetRtnSnap(STsdb *pRepo, SRtn *pRtn);
|
||||
|
||||
static FORCE_INLINE int TSDB_KEY_FID(TSKEY key, int32_t days, int8_t precision) {
|
||||
if (key < 0) {
|
||||
return (int)((key + 1) / tsTickPerDay[precision] / days - 1);
|
||||
} else {
|
||||
return (int)((key / tsTickPerDay[precision] / days));
|
||||
}
|
||||
}
|
||||
|
||||
static FORCE_INLINE int tsdbGetFidLevel(int fid, SRtn *pRtn) {
|
||||
if (fid >= pRtn->maxFid) {
|
||||
return 0;
|
||||
} else if (fid >= pRtn->midFid) {
|
||||
return 1;
|
||||
} else if (fid >= pRtn->minFid) {
|
||||
return 2;
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
#define TSDB_DEFAULT_BLOCK_ROWS(maxRows) ((maxRows)*4 / 5)
|
||||
|
||||
int tsdbEncodeKVRecord(void **buf, SKVRecord *pRecord);
|
||||
void *tsdbDecodeKVRecord(void *buf, SKVRecord *pRecord);
|
||||
void *tsdbCommitData(STsdbRepo *pRepo);
|
||||
int tsdbApplyRtnOnFSet(STsdbRepo *pRepo, SDFileSet *pSet, SRtn *pRtn);
|
||||
int tsdbWriteBlockInfoImpl(SDFile *pHeadf, STable *pTable, SArray *pSupA, SArray *pSubA, void **ppBuf, SBlockIdx *pIdx);
|
||||
int tsdbWriteBlockIdx(SDFile *pHeadf, SArray *pIdxA, void **ppBuf);
|
||||
int tsdbWriteBlockImpl(STsdbRepo *pRepo, STable *pTable, SDFile *pDFile, SDataCols *pDataCols, SBlock *pBlock,
|
||||
bool isLast, bool isSuper, void **ppBuf, void **ppCBuf);
|
||||
int tsdbApplyRtn(STsdbRepo *pRepo);
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* _TD_TSDB_SMA_H_ */
|
|
@ -226,7 +226,7 @@ int metaRemoveTableFromDb(SMeta *pMeta, tb_uid_t uid) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int metaSaveSmaToDB(SMeta *pMeta, SSmaCfg *pSmaCfg) {
|
||||
int metaSaveSmaToDB(SMeta *pMeta, STSma *pSmaCfg) {
|
||||
char buf[512] = {0}; // TODO: may overflow
|
||||
void *pBuf = NULL;
|
||||
DBT key1 = {0}, value1 = {0};
|
||||
|
@ -485,7 +485,7 @@ static int metaCtbIdxCb(DB *pIdx, const DBT *pKey, const DBT *pValue, DBT *pSKey
|
|||
}
|
||||
|
||||
static int metaSmaIdxCb(DB *pIdx, const DBT *pKey, const DBT *pValue, DBT *pSKey) {
|
||||
SSmaCfg *pSmaCfg = (SSmaCfg *)(pValue->app_data);
|
||||
STSma *pSmaCfg = (STSma *)(pValue->app_data);
|
||||
|
||||
memset(pSKey, 0, sizeof(*pSKey));
|
||||
pSKey->data = &(pSmaCfg->tableUid);
|
||||
|
@ -609,8 +609,8 @@ STbCfg *metaGetTbInfoByName(SMeta *pMeta, char *tbname, tb_uid_t *uid) {
|
|||
return pTbCfg;
|
||||
}
|
||||
|
||||
SSmaCfg *metaGetSmaInfoByName(SMeta *pMeta, const char *indexName) {
|
||||
SSmaCfg *pCfg = NULL;
|
||||
STSma *metaGetSmaInfoByName(SMeta *pMeta, const char *indexName) {
|
||||
STSma * pCfg = NULL;
|
||||
SMetaDB *pDB = pMeta->pDB;
|
||||
DBT key = {0};
|
||||
DBT value = {0};
|
||||
|
@ -629,7 +629,7 @@ SSmaCfg *metaGetSmaInfoByName(SMeta *pMeta, const char *indexName) {
|
|||
}
|
||||
|
||||
// Decode
|
||||
pCfg = (SSmaCfg *)malloc(sizeof(SSmaCfg));
|
||||
pCfg = (STSma *)malloc(sizeof(STSma));
|
||||
if (pCfg == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
@ -885,8 +885,8 @@ STSmaWrapper *metaGetSmaInfoByUid(SMeta *pMeta, tb_uid_t uid) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
DBT skey = {.data = &(pCur->uid)};
|
||||
DBT pval = {.size = sizeof(pCur->uid)};
|
||||
DBT skey = {.data = &(pCur->uid), .size = sizeof(pCur->uid)};
|
||||
DBT pval = {0};
|
||||
void *pBuf = NULL;
|
||||
|
||||
while (true) {
|
||||
|
@ -896,14 +896,16 @@ STSmaWrapper *metaGetSmaInfoByUid(SMeta *pMeta, tb_uid_t uid) {
|
|||
STSma *tptr = (STSma *)realloc(pSW->tSma, pSW->number * sizeof(STSma));
|
||||
if (tptr == NULL) {
|
||||
metaCloseSmaCurosr(pCur);
|
||||
tdDestroyTSmaWrapper(pSW, true);
|
||||
tdDestroyTSmaWrapper(pSW);
|
||||
tfree(pSW);
|
||||
return NULL;
|
||||
}
|
||||
pSW->tSma = tptr;
|
||||
pBuf = pval.data;
|
||||
if (tDecodeTSma(pBuf, pSW->tSma + pSW->number - 1) == NULL) {
|
||||
metaCloseSmaCurosr(pCur);
|
||||
tdDestroyTSmaWrapper(pSW, true);
|
||||
tdDestroyTSmaWrapper(pSW);
|
||||
tfree(pSW);
|
||||
return NULL;
|
||||
}
|
||||
continue;
|
||||
|
@ -912,10 +914,49 @@ STSmaWrapper *metaGetSmaInfoByUid(SMeta *pMeta, tb_uid_t uid) {
|
|||
}
|
||||
|
||||
metaCloseSmaCurosr(pCur);
|
||||
|
||||
|
||||
return pSW;
|
||||
}
|
||||
|
||||
SArray *metaGetSmaTbUids(SMeta *pMeta, bool isDup) {
|
||||
SArray * pUids = NULL;
|
||||
SMetaDB *pDB = pMeta->pDB;
|
||||
DBC * pCur = NULL;
|
||||
DBT pkey = {0}, pval = {0};
|
||||
int ret;
|
||||
|
||||
pUids = taosArrayInit(16, sizeof(tb_uid_t));
|
||||
|
||||
if (!pUids) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// TODO: lock?
|
||||
ret = pDB->pCtbIdx->cursor(pDB->pSmaIdx, NULL, &pCur, 0);
|
||||
if (ret != 0) {
|
||||
taosArrayDestroy(pUids);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void *pBuf = NULL;
|
||||
|
||||
// TODO: lock?
|
||||
while (true) {
|
||||
ret = pCur->get(pCur, &pkey, &pval, isDup ? DB_NEXT_DUP : DB_NEXT_NODUP);
|
||||
if(ret == 0) {
|
||||
taosArrayPush(pUids, pkey.data);
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (pCur) {
|
||||
pCur->close(pCur);
|
||||
}
|
||||
|
||||
return pUids;
|
||||
}
|
||||
|
||||
static void metaDBWLock(SMetaDB *pDB) {
|
||||
#if IMPL_WITH_LOCK
|
||||
pthread_rwlock_wrlock(&(pDB->rwlock));
|
||||
|
|
|
@ -107,19 +107,27 @@ int metaRemoveTableFromIdx(SMeta *pMeta, tb_uid_t uid) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int metaCreateSma(SMeta *pMeta, SSmaCfg *pSmaCfg) {
|
||||
// Validate the tbOptions
|
||||
// if (metaValidateTbCfg(pMeta, pTbCfg) < 0) {
|
||||
// // TODO: handle error
|
||||
// return -1;
|
||||
// }
|
||||
int32_t metaCreateTSma(SMeta *pMeta, SSmaCfg *pCfg) {
|
||||
// TODO: Validate the cfg
|
||||
// The table uid should exists and be super table or common table.
|
||||
// Check other cfg value
|
||||
|
||||
// TODO: add atomicity
|
||||
|
||||
if (metaSaveSmaToDB(pMeta, pSmaCfg) < 0) {
|
||||
if (metaSaveSmaToDB(pMeta, &pCfg->tSma) < 0) {
|
||||
// TODO: handle error
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t metaDropTSma(SMeta *pMeta, char* indexName) {
|
||||
// TODO: Validate the cfg
|
||||
// TODO: add atomicity
|
||||
|
||||
if (metaRemoveSmaFromDb(pMeta, indexName) < 0) {
|
||||
// TODO: handle error
|
||||
return -1;
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
|
@ -50,3 +50,4 @@ int metaDropTable(SMeta *pMeta, tb_uid_t uid) {
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -376,7 +376,10 @@ int32_t tqProcessSetConnReq(STQ* pTq, char* msg) {
|
|||
for (int i = 0; i < TQ_BUFFER_SIZE; i++) {
|
||||
pTopic->buffer.output[i].status = 0;
|
||||
STqReadHandle* pReadHandle = tqInitSubmitMsgScanner(pTq->pVnodeMeta);
|
||||
SReadHandle handle = {.reader = pReadHandle, .meta = pTq->pVnodeMeta};
|
||||
SReadHandle handle = {
|
||||
.reader = pReadHandle,
|
||||
.meta = pTq->pVnodeMeta,
|
||||
};
|
||||
pTopic->buffer.output[i].pReadHandle = pReadHandle;
|
||||
pTopic->buffer.output[i].task = qCreateStreamExecTaskInfo(req.qmsg, &handle);
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ STqReadHandle* tqInitSubmitMsgScanner(SMeta* pMeta) {
|
|||
pReadHandle->sver = -1;
|
||||
pReadHandle->pSchema = NULL;
|
||||
pReadHandle->pSchemaWrapper = NULL;
|
||||
pReadHandle->tbIdHash = NULL;
|
||||
return pReadHandle;
|
||||
}
|
||||
|
||||
|
|
|
@ -69,7 +69,7 @@ int vnodeApplyWMsg(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp) {
|
|||
// TODO: handle error
|
||||
}
|
||||
|
||||
// TODO: maybe need to clear the requst struct
|
||||
// TODO: maybe need to clear the request struct
|
||||
free(vCreateTbReq.stbCfg.pSchema);
|
||||
free(vCreateTbReq.stbCfg.pTagSchema);
|
||||
free(vCreateTbReq.name);
|
||||
|
@ -133,13 +133,44 @@ int vnodeApplyWMsg(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp) {
|
|||
}
|
||||
} break;
|
||||
case TDMT_VND_CREATE_SMA: { // timeRangeSMA
|
||||
// 1. tdCreateSmaMeta(pVnode->pMeta,...);
|
||||
// 2. tdCreateSmaDataInit();
|
||||
// 3. tdCreateSmaData
|
||||
SSmaCfg vCreateSmaReq = {0};
|
||||
if (tDeserializeSVCreateTSmaReq(POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)), &vCreateSmaReq) == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (metaCreateTSma(pVnode->pMeta, &vCreateSmaReq) < 0) {
|
||||
// TODO: handle error
|
||||
tdDestroyTSma(&vCreateSmaReq.tSma);
|
||||
return -1;
|
||||
}
|
||||
// TODO: send msg to stream computing to create tSma
|
||||
// if ((send msg to stream computing) < 0) {
|
||||
// tdDestroyTSma(&vCreateSmaReq);
|
||||
// return -1;
|
||||
// }
|
||||
tdDestroyTSma(&vCreateSmaReq.tSma);
|
||||
// TODO: return directly or go on follow steps?
|
||||
} break;
|
||||
case TDMT_VND_CANCEL_SMA: { // timeRangeSMA
|
||||
} break;
|
||||
case TDMT_VND_DROP_SMA: { // timeRangeSMA
|
||||
SVDropTSmaReq vDropSmaReq = {0};
|
||||
if (tDeserializeSVDropTSmaReq(POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)), &vDropSmaReq) == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (metaDropTSma(pVnode->pMeta, vDropSmaReq.indexName) < 0) {
|
||||
// TODO: handle error
|
||||
return -1;
|
||||
}
|
||||
// TODO: send msg to stream computing to drop tSma
|
||||
// if ((send msg to stream computing) < 0) {
|
||||
// tdDestroyTSma(&vCreateSmaReq);
|
||||
// return -1;
|
||||
// }
|
||||
// TODO: return directly or go on follow steps?
|
||||
} break;
|
||||
default:
|
||||
ASSERT(0);
|
||||
|
|
|
@ -94,8 +94,8 @@ TEST(testCase, tSmaEncodeDecodeTest) {
|
|||
}
|
||||
|
||||
// resource release
|
||||
tdDestroyTSma(&tSma, false);
|
||||
tdDestroyTSmaWrapper(&dstTSmaWrapper, false);
|
||||
tdDestroyTSma(&tSma);
|
||||
tdDestroyTSmaWrapper(&dstTSmaWrapper);
|
||||
}
|
||||
|
||||
TEST(testCase, tSma_DB_Put_Get_Del_Test) {
|
||||
|
@ -103,6 +103,7 @@ TEST(testCase, tSma_DB_Put_Get_Del_Test) {
|
|||
const char *smaIndexName2 = "sma_index_test_2";
|
||||
const char *smaTestDir = "./smaTest";
|
||||
const uint64_t tbUid = 1234567890;
|
||||
const uint32_t nCntTSma = 2;
|
||||
// encode
|
||||
STSma tSma = {0};
|
||||
tSma.version = 0;
|
||||
|
@ -125,7 +126,7 @@ TEST(testCase, tSma_DB_Put_Get_Del_Test) {
|
|||
}
|
||||
|
||||
SMeta * pMeta = NULL;
|
||||
SSmaCfg * pSmaCfg = &tSma;
|
||||
STSma * pSmaCfg = &tSma;
|
||||
const SMetaCfg *pMetaCfg = &defaultMetaOptions;
|
||||
|
||||
taosRemoveDir(smaTestDir);
|
||||
|
@ -146,20 +147,22 @@ TEST(testCase, tSma_DB_Put_Get_Del_Test) {
|
|||
metaSaveSmaToDB(pMeta, pSmaCfg);
|
||||
|
||||
// get value by indexName
|
||||
SSmaCfg *qSmaCfg = NULL;
|
||||
STSma *qSmaCfg = NULL;
|
||||
qSmaCfg = metaGetSmaInfoByName(pMeta, smaIndexName1);
|
||||
assert(qSmaCfg != NULL);
|
||||
printf("name1 = %s\n", qSmaCfg->indexName);
|
||||
EXPECT_STRCASEEQ(qSmaCfg->indexName, smaIndexName1);
|
||||
EXPECT_EQ(qSmaCfg->tableUid, tSma.tableUid);
|
||||
tdDestroyTSma(qSmaCfg, true);
|
||||
tdDestroyTSma(qSmaCfg);
|
||||
tfree(qSmaCfg);
|
||||
|
||||
qSmaCfg = metaGetSmaInfoByName(pMeta, smaIndexName2);
|
||||
assert(qSmaCfg != NULL);
|
||||
printf("name2 = %s\n", qSmaCfg->indexName);
|
||||
EXPECT_STRCASEEQ(qSmaCfg->indexName, smaIndexName2);
|
||||
EXPECT_EQ(qSmaCfg->interval, tSma.interval);
|
||||
tdDestroyTSma(qSmaCfg, true);
|
||||
tdDestroyTSma(qSmaCfg);
|
||||
tfree(qSmaCfg);
|
||||
|
||||
// get index name by table uid
|
||||
SMSmaCursor *pSmaCur = metaOpenSmaCursor(pMeta, tbUid);
|
||||
|
@ -173,23 +176,36 @@ TEST(testCase, tSma_DB_Put_Get_Del_Test) {
|
|||
printf("indexName = %s\n", indexName);
|
||||
++indexCnt;
|
||||
}
|
||||
EXPECT_EQ(indexCnt, 2);
|
||||
EXPECT_EQ(indexCnt, nCntTSma);
|
||||
metaCloseSmaCurosr(pSmaCur);
|
||||
|
||||
// get wrapper by table uid
|
||||
STSmaWrapper *pSW = metaGetSmaInfoByUid(pMeta, tbUid);
|
||||
assert(pSW != NULL);
|
||||
EXPECT_EQ(pSW->number, 2);
|
||||
EXPECT_EQ(pSW->number, nCntTSma);
|
||||
EXPECT_STRCASEEQ(pSW->tSma->indexName, smaIndexName1);
|
||||
EXPECT_EQ(pSW->tSma->tableUid, tSma.tableUid);
|
||||
EXPECT_STRCASEEQ((pSW->tSma + 1)->indexName, smaIndexName2);
|
||||
EXPECT_EQ((pSW->tSma + 1)->tableUid, tSma.tableUid);
|
||||
|
||||
tdDestroyTSmaWrapper(pSW);
|
||||
tfree(pSW);
|
||||
|
||||
// get all sma table uids
|
||||
SArray *pUids = metaGetSmaTbUids(pMeta, false);
|
||||
assert(pUids != NULL);
|
||||
for (uint32_t i = 0; i < taosArrayGetSize(pUids); ++i) {
|
||||
printf("metaGetSmaTbUids: uid[%" PRIu32 "] = %" PRIi64 "\n", i, *(tb_uid_t *)taosArrayGet(pUids, i));
|
||||
// printf("metaGetSmaTbUids: index[%" PRIu32 "] = %s", i, (char *)taosArrayGet(pUids, i));
|
||||
}
|
||||
EXPECT_EQ(taosArrayGetSize(pUids), 1);
|
||||
taosArrayDestroy(pUids);
|
||||
|
||||
// resource release
|
||||
metaRemoveSmaFromDb(pMeta, smaIndexName1);
|
||||
metaRemoveSmaFromDb(pMeta, smaIndexName2);
|
||||
|
||||
tdDestroyTSma(&tSma, false);
|
||||
tdDestroyTSma(&tSma);
|
||||
metaClose(pMeta);
|
||||
}
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ TARGET_INCLUDE_DIRECTORIES(
|
|||
PRIVATE "${CMAKE_SOURCE_DIR}/source/libs/catalog/inc"
|
||||
)
|
||||
|
||||
add_test(
|
||||
NAME catalogTest
|
||||
COMMAND catalogTest
|
||||
)
|
||||
# add_test(
|
||||
# NAME catalogTest
|
||||
# COMMAND catalogTest
|
||||
# )
|
||||
|
|
|
@ -1058,6 +1058,45 @@ TEST_F(IndexEnv2, testIndex_read_performance4) {
|
|||
std::cout << "reader sz: " << index->SearchOne("tag1", "Hello") << std::endl;
|
||||
assert(3 == index->SearchOne("tag10", "Hello"));
|
||||
}
|
||||
TEST_F(IndexEnv2, testIndex_cache_del) {
|
||||
std::string path = "/tmp/cache_and_tfile";
|
||||
if (index->Init(path) != 0) {
|
||||
}
|
||||
for (int i = 0; i < 100; i++) {
|
||||
index->PutOneTarge("tag10", "Hello", i);
|
||||
}
|
||||
index->Del("tag10", "Hello", 12);
|
||||
index->Del("tag10", "Hello", 11);
|
||||
|
||||
// index->WriteMultiMillonData("tag10", "xxxxxxxxxxxxxx", 100 * 10000);
|
||||
index->Del("tag10", "Hello", 17);
|
||||
EXPECT_EQ(97, index->SearchOne("tag10", "Hello"));
|
||||
|
||||
index->PutOneTarge("tag10", "Hello", 17); // add again
|
||||
EXPECT_EQ(98, index->SearchOne("tag10", "Hello"));
|
||||
|
||||
// del all
|
||||
for (int i = 0; i < 200; i++) {
|
||||
index->Del("tag10", "Hello", i);
|
||||
}
|
||||
EXPECT_EQ(0, index->SearchOne("tag10", "Hello"));
|
||||
|
||||
// add other item
|
||||
for (int i = 0; i < 2000; i++) {
|
||||
index->PutOneTarge("tag10", "World", i);
|
||||
}
|
||||
|
||||
for (int i = 0; i < 2000; i++) {
|
||||
index->PutOneTarge("tag10", "Hello", i);
|
||||
}
|
||||
EXPECT_EQ(2000, index->SearchOne("tag10", "Hello"));
|
||||
|
||||
for (int i = 0; i < 2000; i++) {
|
||||
index->Del("tag10", "Hello", i);
|
||||
}
|
||||
EXPECT_EQ(0, index->SearchOne("tag10", "Hello"));
|
||||
}
|
||||
|
||||
TEST_F(IndexEnv2, testIndex_del) {
|
||||
std::string path = "/tmp/cache_and_tfile";
|
||||
if (index->Init(path) != 0) {
|
||||
|
@ -1069,8 +1108,6 @@ TEST_F(IndexEnv2, testIndex_del) {
|
|||
index->Del("tag10", "Hello", 11);
|
||||
|
||||
index->WriteMultiMillonData("tag10", "xxxxxxxxxxxxxx", 100 * 10000);
|
||||
|
||||
EXPECT_EQ(98, index->SearchOne("tag10", "Hello"));
|
||||
// std::cout << "reader sz: " << index->SearchOne("tag1", "Hello") << std::endl;
|
||||
// assert(3 == index->SearchOne("tag10", "Hello"));
|
||||
index->Del("tag10", "Hello", 17);
|
||||
EXPECT_EQ(97, index->SearchOne("tag10", "Hello"));
|
||||
}
|
||||
|
|
|
@ -3,10 +3,11 @@ add_library(monitor STATIC ${MONITOR_SRC})
|
|||
target_include_directories(
|
||||
monitor
|
||||
PUBLIC "${CMAKE_SOURCE_DIR}/include/libs/monitor"
|
||||
PUBLIC "${CMAKE_SOURCE_DIR}/include/libs/transport"
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
)
|
||||
|
||||
target_link_libraries(monitor os util common)
|
||||
target_link_libraries(monitor os util common transport)
|
||||
|
||||
if(${BUILD_TEST})
|
||||
add_subdirectory(test)
|
||||
|
|
|
@ -54,6 +54,7 @@ typedef struct {
|
|||
int32_t maxLogs;
|
||||
const char *server;
|
||||
uint16_t port;
|
||||
bool comp;
|
||||
SMonState state;
|
||||
} SMonitor;
|
||||
|
||||
|
|
|
@ -45,6 +45,7 @@ int32_t monInit(const SMonCfg *pCfg) {
|
|||
tsMonitor.maxLogs = pCfg->maxLogs;
|
||||
tsMonitor.server = pCfg->server;
|
||||
tsMonitor.port = pCfg->port;
|
||||
tsMonitor.comp = pCfg->comp;
|
||||
tsLogFp = monRecordLog;
|
||||
tsMonitor.state.time = taosGetTimestampMs();
|
||||
pthread_mutex_init(&tsMonitor.lock, NULL);
|
||||
|
@ -98,6 +99,9 @@ void monSetBasicInfo(SMonInfo *pMonitor, SMonBasicInfo *pInfo) {
|
|||
tjsonAddStringToObject(pJson, "ts", buf);
|
||||
tjsonAddDoubleToObject(pJson, "dnode_id", pInfo->dnode_id);
|
||||
tjsonAddStringToObject(pJson, "dnode_ep", pInfo->dnode_ep);
|
||||
snprintf(buf, sizeof(buf), "%" PRId64, pInfo->cluster_id);
|
||||
tjsonAddStringToObject(pJson, "cluster_id", buf);
|
||||
tjsonAddDoubleToObject(pJson, "protocol", pInfo->protocol);
|
||||
}
|
||||
|
||||
void monSetClusterInfo(SMonInfo *pMonitor, SMonClusterInfo *pInfo) {
|
||||
|
@ -375,7 +379,8 @@ void monSendReport(SMonInfo *pMonitor) {
|
|||
|
||||
char *pCont = tjsonToString(pMonitor->pJson);
|
||||
if (pCont != NULL) {
|
||||
taosSendHttpReport(tsMonitor.server, tsMonitor.port, pCont, strlen(pCont));
|
||||
EHttpCompFlag flag = tsMonitor.comp ? HTTP_GZIP : HTTP_FLAT;
|
||||
taosSendHttpReport(tsMonitor.server, tsMonitor.port, pCont, strlen(pCont), flag);
|
||||
free(pCont);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ class MonitorTest : public ::testing::Test {
|
|||
cfg.maxLogs = 2;
|
||||
cfg.port = 80;
|
||||
cfg.server = "localhost";
|
||||
cfg.comp = 0;
|
||||
monInit(&cfg);
|
||||
}
|
||||
|
||||
|
@ -44,6 +45,8 @@ class MonitorTest : public ::testing::Test {
|
|||
void MonitorTest::GetBasicInfo(SMonInfo *pMonitor, SMonBasicInfo *pInfo) {
|
||||
pInfo->dnode_id = 1;
|
||||
strcpy(pInfo->dnode_ep, "localhost");
|
||||
pInfo->cluster_id = 6980428120398645172;
|
||||
pInfo->protocol = 1;
|
||||
}
|
||||
|
||||
void MonitorTest::GetClusterInfo(SMonInfo *pMonitor, SMonClusterInfo *pInfo) {
|
||||
|
|
|
@ -28,9 +28,72 @@ extern "C" {
|
|||
#include "syncRaft.h"
|
||||
#include "taosdef.h"
|
||||
|
||||
void appendEntries(SRaft *pRaft, const SyncAppendEntries *pMsg);
|
||||
|
||||
void onAppendEntries(SRaft *pRaft, const SyncAppendEntries *pMsg);
|
||||
// TLA+ Spec
|
||||
// HandleAppendEntriesRequest(i, j, m) ==
|
||||
// LET logOk == \/ m.mprevLogIndex = 0
|
||||
// \/ /\ m.mprevLogIndex > 0
|
||||
// /\ m.mprevLogIndex <= Len(log[i])
|
||||
// /\ m.mprevLogTerm = log[i][m.mprevLogIndex].term
|
||||
// IN /\ m.mterm <= currentTerm[i]
|
||||
// /\ \/ /\ \* reject request
|
||||
// \/ m.mterm < currentTerm[i]
|
||||
// \/ /\ m.mterm = currentTerm[i]
|
||||
// /\ state[i] = Follower
|
||||
// /\ \lnot logOk
|
||||
// /\ Reply([mtype |-> AppendEntriesResponse,
|
||||
// mterm |-> currentTerm[i],
|
||||
// msuccess |-> FALSE,
|
||||
// mmatchIndex |-> 0,
|
||||
// msource |-> i,
|
||||
// mdest |-> j],
|
||||
// m)
|
||||
// /\ UNCHANGED <<serverVars, logVars>>
|
||||
// \/ \* return to follower state
|
||||
// /\ m.mterm = currentTerm[i]
|
||||
// /\ state[i] = Candidate
|
||||
// /\ state' = [state EXCEPT ![i] = Follower]
|
||||
// /\ UNCHANGED <<currentTerm, votedFor, logVars, messages>>
|
||||
// \/ \* accept request
|
||||
// /\ m.mterm = currentTerm[i]
|
||||
// /\ state[i] = Follower
|
||||
// /\ logOk
|
||||
// /\ LET index == m.mprevLogIndex + 1
|
||||
// IN \/ \* already done with request
|
||||
// /\ \/ m.mentries = << >>
|
||||
// \/ /\ m.mentries /= << >>
|
||||
// /\ Len(log[i]) >= index
|
||||
// /\ log[i][index].term = m.mentries[1].term
|
||||
// \* This could make our commitIndex decrease (for
|
||||
// \* example if we process an old, duplicated request),
|
||||
// \* but that doesn't really affect anything.
|
||||
// /\ commitIndex' = [commitIndex EXCEPT ![i] =
|
||||
// m.mcommitIndex]
|
||||
// /\ Reply([mtype |-> AppendEntriesResponse,
|
||||
// mterm |-> currentTerm[i],
|
||||
// msuccess |-> TRUE,
|
||||
// mmatchIndex |-> m.mprevLogIndex +
|
||||
// Len(m.mentries),
|
||||
// msource |-> i,
|
||||
// mdest |-> j],
|
||||
// m)
|
||||
// /\ UNCHANGED <<serverVars, log>>
|
||||
// \/ \* conflict: remove 1 entry
|
||||
// /\ m.mentries /= << >>
|
||||
// /\ Len(log[i]) >= index
|
||||
// /\ log[i][index].term /= m.mentries[1].term
|
||||
// /\ LET new == [index2 \in 1..(Len(log[i]) - 1) |->
|
||||
// log[i][index2]]
|
||||
// IN log' = [log EXCEPT ![i] = new]
|
||||
// /\ UNCHANGED <<serverVars, commitIndex, messages>>
|
||||
// \/ \* no conflict: append entry
|
||||
// /\ m.mentries /= << >>
|
||||
// /\ Len(log[i]) = m.mprevLogIndex
|
||||
// /\ log' = [log EXCEPT ![i] =
|
||||
// Append(log[i], m.mentries[1])]
|
||||
// /\ UNCHANGED <<serverVars, commitIndex, messages>>
|
||||
// /\ UNCHANGED <<candidateVars, leaderVars>>
|
||||
//
|
||||
int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -28,7 +28,20 @@ extern "C" {
|
|||
#include "syncRaft.h"
|
||||
#include "taosdef.h"
|
||||
|
||||
void onAppendEntriesReply(SRaft *pRaft, const SyncAppendEntriesReply *pMsg);
|
||||
// TLA+ Spec
|
||||
// HandleAppendEntriesResponse(i, j, m) ==
|
||||
// /\ m.mterm = currentTerm[i]
|
||||
// /\ \/ /\ m.msuccess \* successful
|
||||
// /\ nextIndex' = [nextIndex EXCEPT ![i][j] = m.mmatchIndex + 1]
|
||||
// /\ matchIndex' = [matchIndex EXCEPT ![i][j] = m.mmatchIndex]
|
||||
// \/ /\ \lnot m.msuccess \* not successful
|
||||
// /\ nextIndex' = [nextIndex EXCEPT ![i][j] =
|
||||
// Max({nextIndex[i][j] - 1, 1})]
|
||||
// /\ UNCHANGED <<matchIndex>>
|
||||
// /\ Discard(m)
|
||||
// /\ UNCHANGED <<serverVars, candidateVars, logVars, elections>>
|
||||
//
|
||||
int32_t syncNodeOnAppendEntriesReplyCb(SSyncNode* ths, SyncAppendEntriesReply* pMsg);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -26,8 +26,22 @@ extern "C" {
|
|||
#include "syncInt.h"
|
||||
#include "taosdef.h"
|
||||
|
||||
void syncNodeElect(SSyncNode* pSyncNode);
|
||||
void syncNodeRequestVotePeers(SSyncNode* pSyncNode);
|
||||
// TLA+ Spec
|
||||
// RequestVote(i, j) ==
|
||||
// /\ state[i] = Candidate
|
||||
// /\ j \notin votesResponded[i]
|
||||
// /\ Send([mtype |-> RequestVoteRequest,
|
||||
// mterm |-> currentTerm[i],
|
||||
// mlastLogTerm |-> LastTerm(log[i]),
|
||||
// mlastLogIndex |-> Len(log[i]),
|
||||
// msource |-> i,
|
||||
// mdest |-> j])
|
||||
// /\ UNCHANGED <<serverVars, candidateVars, leaderVars, logVars>>
|
||||
//
|
||||
int32_t syncNodeRequestVotePeers(SSyncNode* pSyncNode);
|
||||
|
||||
int32_t syncNodeElect(SSyncNode* pSyncNode);
|
||||
int32_t syncNodeRequestVote(SSyncNode* pSyncNode, const SRaftId* destRaftId, const SyncRequestVote* pMsg);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -30,23 +30,25 @@ extern "C" {
|
|||
|
||||
#define TIMER_MAX_MS 0x7FFFFFFF
|
||||
#define PING_TIMER_MS 1000
|
||||
#define ELECT_TIMER_MS_MIN 150
|
||||
#define ELECT_TIMER_MS_MAX 300
|
||||
#define ELECT_TIMER_MS_RANGE (ELECT_TIMER_MS_MAX - ELECT_TIMER_MS_MIN)
|
||||
#define HEARTBEAT_TIMER_MS 30
|
||||
|
||||
#define EMPTY_RAFT_ID ((SRaftId){.addr = 0, .vgId = 0})
|
||||
|
||||
typedef struct SSyncEnv {
|
||||
tmr_h pEnvTickTimer;
|
||||
tmr_h pTimerManager;
|
||||
char name[128];
|
||||
|
||||
} SSyncEnv;
|
||||
|
||||
extern SSyncEnv* gSyncEnv;
|
||||
|
||||
int32_t syncEnvStart();
|
||||
|
||||
int32_t syncEnvStop();
|
||||
|
||||
tmr_h syncEnvStartTimer(TAOS_TMR_CALLBACK fp, int mseconds, void* param);
|
||||
|
||||
void syncEnvStopTimer(tmr_h* pTimer);
|
||||
tmr_h syncEnvStartTimer(TAOS_TMR_CALLBACK fp, int mseconds, void* param);
|
||||
void syncEnvStopTimer(tmr_h* pTimer);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -154,9 +154,8 @@ typedef struct SSyncNode {
|
|||
SyncIndex commitIndex;
|
||||
|
||||
// timer
|
||||
tmr_h pPingTimer;
|
||||
int32_t pingTimerMS;
|
||||
// uint8_t pingTimerEnable;
|
||||
tmr_h pPingTimer;
|
||||
int32_t pingTimerMS;
|
||||
uint64_t pingTimerLogicClock;
|
||||
uint64_t pingTimerLogicClockUser;
|
||||
TAOS_TMR_CALLBACK FpPingTimer; // Timer Fp
|
||||
|
@ -164,13 +163,15 @@ typedef struct SSyncNode {
|
|||
|
||||
tmr_h pElectTimer;
|
||||
int32_t electTimerMS;
|
||||
uint8_t electTimerEnable;
|
||||
uint64_t electTimerLogicClock;
|
||||
uint64_t electTimerLogicClockUser;
|
||||
TAOS_TMR_CALLBACK FpElectTimer; // Timer Fp
|
||||
uint64_t electTimerCounter;
|
||||
|
||||
tmr_h pHeartbeatTimer;
|
||||
int32_t heartbeatTimerMS;
|
||||
uint8_t heartbeatTimerEnable;
|
||||
uint64_t heartbeatTimerLogicClock;
|
||||
uint64_t heartbeatTimerLogicClockUser;
|
||||
TAOS_TMR_CALLBACK FpHeartbeatTimer; // Timer Fp
|
||||
uint64_t heartbeatTimerCounter;
|
||||
|
||||
|
@ -187,26 +188,22 @@ typedef struct SSyncNode {
|
|||
|
||||
SSyncNode* syncNodeOpen(const SSyncInfo* pSyncInfo);
|
||||
void syncNodeClose(SSyncNode* pSyncNode);
|
||||
void syncNodePingAll(SSyncNode* pSyncNode);
|
||||
void syncNodePingPeers(SSyncNode* pSyncNode);
|
||||
void syncNodePingSelf(SSyncNode* pSyncNode);
|
||||
|
||||
int32_t syncNodeSendMsgById(const SRaftId* destRaftId, SSyncNode* pSyncNode, SRpcMsg* pMsg);
|
||||
int32_t syncNodeSendMsgByInfo(const SNodeInfo* nodeInfo, SSyncNode* pSyncNode, SRpcMsg* pMsg);
|
||||
int32_t syncNodePing(SSyncNode* pSyncNode, const SRaftId* destRaftId, SyncPing* pMsg);
|
||||
int32_t syncNodePingAll(SSyncNode* pSyncNode);
|
||||
int32_t syncNodePingPeers(SSyncNode* pSyncNode);
|
||||
int32_t syncNodePingSelf(SSyncNode* pSyncNode);
|
||||
|
||||
int32_t syncNodeStartPingTimer(SSyncNode* pSyncNode);
|
||||
int32_t syncNodeStopPingTimer(SSyncNode* pSyncNode);
|
||||
|
||||
int32_t syncNodeStartElectTimer(SSyncNode* pSyncNode);
|
||||
int32_t syncNodeStartElectTimer(SSyncNode* pSyncNode, int32_t ms);
|
||||
int32_t syncNodeStopElectTimer(SSyncNode* pSyncNode);
|
||||
|
||||
int32_t syncNodeRestartElectTimer(SSyncNode* pSyncNode, int32_t ms);
|
||||
int32_t syncNodeStartHeartbeatTimer(SSyncNode* pSyncNode);
|
||||
int32_t syncNodeStopHeartbeatTimer(SSyncNode* pSyncNode);
|
||||
|
||||
int32_t syncNodeRequestVote(SSyncNode* ths, const SyncRequestVote* pMsg);
|
||||
int32_t syncNodeOnRequestVoteCb(SSyncNode* ths, SyncRequestVote* pMsg);
|
||||
int32_t syncNodeOnRequestVoteReplyCb(SSyncNode* ths, SyncRequestVoteReply* pMsg);
|
||||
int32_t syncNodeAppendEntries(SSyncNode* ths, const SyncAppendEntries* pMsg);
|
||||
int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg);
|
||||
int32_t syncNodeOnAppendEntriesReplyCb(SSyncNode* ths, SyncAppendEntriesReply* pMsg);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -59,6 +59,7 @@ typedef struct SyncTimeout {
|
|||
uint32_t msgType;
|
||||
ESyncTimeoutType timeoutType;
|
||||
uint64_t logicClock;
|
||||
int32_t timerMS;
|
||||
void* data;
|
||||
} SyncTimeout;
|
||||
|
||||
|
@ -69,7 +70,7 @@ void syncTimeoutDeserialize(const char* buf, uint32_t len, SyncTimeout*
|
|||
void syncTimeout2RpcMsg(const SyncTimeout* pMsg, SRpcMsg* pRpcMsg);
|
||||
void syncTimeoutFromRpcMsg(const SRpcMsg* pRpcMsg, SyncTimeout* pMsg);
|
||||
cJSON* syncTimeout2Json(const SyncTimeout* pMsg);
|
||||
SyncTimeout* syncTimeoutBuild2(ESyncTimeoutType timeoutType, uint64_t logicClock, void* data);
|
||||
SyncTimeout* syncTimeoutBuild2(ESyncTimeoutType timeoutType, uint64_t logicClock, int32_t timerMS, void* data);
|
||||
|
||||
// ---------------------------------------------
|
||||
typedef struct SyncPing {
|
||||
|
|
|
@ -26,8 +26,6 @@ extern "C" {
|
|||
#include "syncRaft.h"
|
||||
#include "taosdef.h"
|
||||
|
||||
void onMessage(SRaft *pRaft, void *pMsg);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -32,28 +32,18 @@ extern "C" {
|
|||
#define RAFT_STORE_PATH_LEN 128
|
||||
|
||||
typedef struct SRaftStore {
|
||||
SyncTerm currentTerm;
|
||||
SRaftId voteFor;
|
||||
// FileFd fd;
|
||||
SyncTerm currentTerm;
|
||||
SRaftId voteFor;
|
||||
TdFilePtr pFile;
|
||||
char path[RAFT_STORE_PATH_LEN];
|
||||
} SRaftStore;
|
||||
|
||||
SRaftStore *raftStoreOpen(const char *path);
|
||||
|
||||
static int32_t raftStoreInit(SRaftStore *pRaftStore);
|
||||
|
||||
int32_t raftStoreClose(SRaftStore *pRaftStore);
|
||||
|
||||
int32_t raftStorePersist(SRaftStore *pRaftStore);
|
||||
|
||||
static bool raftStoreFileExist(char *path);
|
||||
|
||||
int32_t raftStoreSerialize(SRaftStore *pRaftStore, char *buf, size_t len);
|
||||
|
||||
int32_t raftStoreDeserialize(SRaftStore *pRaftStore, char *buf, size_t len);
|
||||
|
||||
void raftStorePrint(SRaftStore *pRaftStore);
|
||||
int32_t raftStoreClose(SRaftStore *pRaftStore);
|
||||
int32_t raftStorePersist(SRaftStore *pRaftStore);
|
||||
int32_t raftStoreSerialize(SRaftStore *pRaftStore, char *buf, size_t len);
|
||||
int32_t raftStoreDeserialize(SRaftStore *pRaftStore, char *buf, size_t len);
|
||||
void raftStorePrint(SRaftStore *pRaftStore);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -26,7 +26,34 @@ extern "C" {
|
|||
#include "syncInt.h"
|
||||
#include "taosdef.h"
|
||||
|
||||
void syncNodeAppendEntriesPeers(SSyncNode* pSyncNode);
|
||||
// TLA+ Spec
|
||||
// AppendEntries(i, j) ==
|
||||
// /\ i /= j
|
||||
// /\ state[i] = Leader
|
||||
// /\ LET prevLogIndex == nextIndex[i][j] - 1
|
||||
// prevLogTerm == IF prevLogIndex > 0 THEN
|
||||
// log[i][prevLogIndex].term
|
||||
// ELSE
|
||||
// 0
|
||||
// \* Send up to 1 entry, constrained by the end of the log.
|
||||
// lastEntry == Min({Len(log[i]), nextIndex[i][j]})
|
||||
// entries == SubSeq(log[i], nextIndex[i][j], lastEntry)
|
||||
// IN Send([mtype |-> AppendEntriesRequest,
|
||||
// mterm |-> currentTerm[i],
|
||||
// mprevLogIndex |-> prevLogIndex,
|
||||
// mprevLogTerm |-> prevLogTerm,
|
||||
// mentries |-> entries,
|
||||
// \* mlog is used as a history variable for the proof.
|
||||
// \* It would not exist in a real implementation.
|
||||
// mlog |-> log[i],
|
||||
// mcommitIndex |-> Min({commitIndex[i], lastEntry}),
|
||||
// msource |-> i,
|
||||
// mdest |-> j])
|
||||
// /\ UNCHANGED <<serverVars, candidateVars, leaderVars, logVars>>
|
||||
//
|
||||
int32_t syncNodeAppendEntriesPeers(SSyncNode* pSyncNode);
|
||||
|
||||
int32_t syncNodeAppendEntries(SSyncNode* pSyncNode, const SRaftId* destRaftId, const SyncAppendEntries* pMsg);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -28,6 +28,30 @@ extern "C" {
|
|||
#include "syncRaft.h"
|
||||
#include "taosdef.h"
|
||||
|
||||
// TLA+ Spec
|
||||
// HandleRequestVoteRequest(i, j, m) ==
|
||||
// LET logOk == \/ m.mlastLogTerm > LastTerm(log[i])
|
||||
// \/ /\ m.mlastLogTerm = LastTerm(log[i])
|
||||
// /\ m.mlastLogIndex >= Len(log[i])
|
||||
// grant == /\ m.mterm = currentTerm[i]
|
||||
// /\ logOk
|
||||
// /\ votedFor[i] \in {Nil, j}
|
||||
// IN /\ m.mterm <= currentTerm[i]
|
||||
// /\ \/ grant /\ votedFor' = [votedFor EXCEPT ![i] = j]
|
||||
// \/ ~grant /\ UNCHANGED votedFor
|
||||
// /\ Reply([mtype |-> RequestVoteResponse,
|
||||
// mterm |-> currentTerm[i],
|
||||
// mvoteGranted |-> grant,
|
||||
// \* mlog is used just for the `elections' history variable for
|
||||
// \* the proof. It would not exist in a real implementation.
|
||||
// mlog |-> log[i],
|
||||
// msource |-> i,
|
||||
// mdest |-> j],
|
||||
// m)
|
||||
// /\ UNCHANGED <<state, currentTerm, candidateVars, leaderVars, logVars>>
|
||||
//
|
||||
int32_t syncNodeOnRequestVoteCb(SSyncNode* ths, SyncRequestVote* pMsg);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -28,6 +28,25 @@ extern "C" {
|
|||
#include "syncRaft.h"
|
||||
#include "taosdef.h"
|
||||
|
||||
// TLA+ Spec
|
||||
// HandleRequestVoteResponse(i, j, m) ==
|
||||
// \* This tallies votes even when the current state is not Candidate, but
|
||||
// \* they won't be looked at, so it doesn't matter.
|
||||
// /\ m.mterm = currentTerm[i]
|
||||
// /\ votesResponded' = [votesResponded EXCEPT ![i] =
|
||||
// votesResponded[i] \cup {j}]
|
||||
// /\ \/ /\ m.mvoteGranted
|
||||
// /\ votesGranted' = [votesGranted EXCEPT ![i] =
|
||||
// votesGranted[i] \cup {j}]
|
||||
// /\ voterLog' = [voterLog EXCEPT ![i] =
|
||||
// voterLog[i] @@ (j :> m.mlog)]
|
||||
// \/ /\ ~m.mvoteGranted
|
||||
// /\ UNCHANGED <<votesGranted, voterLog>>
|
||||
// /\ Discard(m)
|
||||
// /\ UNCHANGED <<serverVars, votedFor, leaderVars, logVars>>
|
||||
//
|
||||
int32_t syncNodeOnRequestVoteReplyCb(SSyncNode* ths, SyncRequestVoteReply* pMsg);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -28,7 +28,19 @@ extern "C" {
|
|||
#include "syncRaft.h"
|
||||
#include "taosdef.h"
|
||||
|
||||
void onTimeout(SRaft *pRaft, void *pMsg);
|
||||
// TLA+ Spec
|
||||
// Timeout(i) == /\ state[i] \in {Follower, Candidate}
|
||||
// /\ state' = [state EXCEPT ![i] = Candidate]
|
||||
// /\ currentTerm' = [currentTerm EXCEPT ![i] = currentTerm[i] + 1]
|
||||
// \* Most implementations would probably just set the local vote
|
||||
// \* atomically, but messaging localhost for it is weaker.
|
||||
// /\ votedFor' = [votedFor EXCEPT ![i] = Nil]
|
||||
// /\ votesResponded' = [votesResponded EXCEPT ![i] = {}]
|
||||
// /\ votesGranted' = [votesGranted EXCEPT ![i] = {}]
|
||||
// /\ voterLog' = [voterLog EXCEPT ![i] = [j \in {} |-> <<>>]]
|
||||
// /\ UNCHANGED <<messages, leaderVars, logVars>>
|
||||
//
|
||||
int32_t syncNodeOnTimeoutCb(SSyncNode* ths, SyncTimeout* pMsg);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -28,28 +28,23 @@ extern "C" {
|
|||
#include "taosdef.h"
|
||||
|
||||
// ---- encode / decode
|
||||
|
||||
uint64_t syncUtilAddr2U64(const char* host, uint16_t port);
|
||||
|
||||
void syncUtilU642Addr(uint64_t u64, char* host, size_t len, uint16_t* port);
|
||||
|
||||
void syncUtilnodeInfo2EpSet(const SNodeInfo* pNodeInfo, SEpSet* pEpSet);
|
||||
|
||||
void syncUtilraftId2EpSet(const SRaftId* raftId, SEpSet* pEpSet);
|
||||
|
||||
void syncUtilnodeInfo2raftId(const SNodeInfo* pNodeInfo, SyncGroupId vgId, SRaftId* raftId);
|
||||
|
||||
bool syncUtilSameId(const SRaftId* pId1, const SRaftId* pId2);
|
||||
void syncUtilU642Addr(uint64_t u64, char* host, size_t len, uint16_t* port);
|
||||
void syncUtilnodeInfo2EpSet(const SNodeInfo* pNodeInfo, SEpSet* pEpSet);
|
||||
void syncUtilraftId2EpSet(const SRaftId* raftId, SEpSet* pEpSet);
|
||||
void syncUtilnodeInfo2raftId(const SNodeInfo* pNodeInfo, SyncGroupId vgId, SRaftId* raftId);
|
||||
bool syncUtilSameId(const SRaftId* pId1, const SRaftId* pId2);
|
||||
|
||||
// ---- SSyncBuffer ----
|
||||
void syncUtilbufBuild(SSyncBuffer* syncBuf, size_t len);
|
||||
|
||||
void syncUtilbufDestroy(SSyncBuffer* syncBuf);
|
||||
|
||||
void syncUtilbufCopy(const SSyncBuffer* src, SSyncBuffer* dest);
|
||||
|
||||
void syncUtilbufCopyDeep(const SSyncBuffer* src, SSyncBuffer* dest);
|
||||
|
||||
// ---- misc ----
|
||||
int32_t syncUtilRand(int32_t max);
|
||||
int32_t syncUtilElectRandomMS();
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -15,97 +15,69 @@
|
|||
|
||||
#include "syncAppendEntries.h"
|
||||
|
||||
int32_t syncNodeAppendEntries(SSyncNode* ths, const SyncAppendEntries* pMsg) {
|
||||
// TLA+ Spec
|
||||
// AppendEntries(i, j) ==
|
||||
// /\ i /= j
|
||||
// /\ state[i] = Leader
|
||||
// /\ LET prevLogIndex == nextIndex[i][j] - 1
|
||||
// prevLogTerm == IF prevLogIndex > 0 THEN
|
||||
// log[i][prevLogIndex].term
|
||||
// ELSE
|
||||
// 0
|
||||
// \* Send up to 1 entry, constrained by the end of the log.
|
||||
// lastEntry == Min({Len(log[i]), nextIndex[i][j]})
|
||||
// entries == SubSeq(log[i], nextIndex[i][j], lastEntry)
|
||||
// IN Send([mtype |-> AppendEntriesRequest,
|
||||
// mterm |-> currentTerm[i],
|
||||
// mprevLogIndex |-> prevLogIndex,
|
||||
// mprevLogTerm |-> prevLogTerm,
|
||||
// mentries |-> entries,
|
||||
// \* mlog is used as a history variable for the proof.
|
||||
// \* It would not exist in a real implementation.
|
||||
// mlog |-> log[i],
|
||||
// mcommitIndex |-> Min({commitIndex[i], lastEntry}),
|
||||
// msource |-> i,
|
||||
// mdest |-> j])
|
||||
// /\ UNCHANGED <<serverVars, candidateVars, leaderVars, logVars>>
|
||||
}
|
||||
|
||||
int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
|
||||
// TLA+ Spec
|
||||
// HandleAppendEntriesRequest(i, j, m) ==
|
||||
// LET logOk == \/ m.mprevLogIndex = 0
|
||||
// \/ /\ m.mprevLogIndex > 0
|
||||
// /\ m.mprevLogIndex <= Len(log[i])
|
||||
// /\ m.mprevLogTerm = log[i][m.mprevLogIndex].term
|
||||
// IN /\ m.mterm <= currentTerm[i]
|
||||
// /\ \/ /\ \* reject request
|
||||
// \/ m.mterm < currentTerm[i]
|
||||
// \/ /\ m.mterm = currentTerm[i]
|
||||
// /\ state[i] = Follower
|
||||
// /\ \lnot logOk
|
||||
// /\ Reply([mtype |-> AppendEntriesResponse,
|
||||
// mterm |-> currentTerm[i],
|
||||
// msuccess |-> FALSE,
|
||||
// mmatchIndex |-> 0,
|
||||
// msource |-> i,
|
||||
// mdest |-> j],
|
||||
// m)
|
||||
// /\ UNCHANGED <<serverVars, logVars>>
|
||||
// \/ \* return to follower state
|
||||
// /\ m.mterm = currentTerm[i]
|
||||
// /\ state[i] = Candidate
|
||||
// /\ state' = [state EXCEPT ![i] = Follower]
|
||||
// /\ UNCHANGED <<currentTerm, votedFor, logVars, messages>>
|
||||
// \/ \* accept request
|
||||
// /\ m.mterm = currentTerm[i]
|
||||
// /\ state[i] = Follower
|
||||
// /\ logOk
|
||||
// /\ LET index == m.mprevLogIndex + 1
|
||||
// IN \/ \* already done with request
|
||||
// /\ \/ m.mentries = << >>
|
||||
// \/ /\ m.mentries /= << >>
|
||||
// /\ Len(log[i]) >= index
|
||||
// /\ log[i][index].term = m.mentries[1].term
|
||||
// \* This could make our commitIndex decrease (for
|
||||
// \* example if we process an old, duplicated request),
|
||||
// \* but that doesn't really affect anything.
|
||||
// /\ commitIndex' = [commitIndex EXCEPT ![i] =
|
||||
// m.mcommitIndex]
|
||||
// /\ Reply([mtype |-> AppendEntriesResponse,
|
||||
// mterm |-> currentTerm[i],
|
||||
// msuccess |-> TRUE,
|
||||
// mmatchIndex |-> m.mprevLogIndex +
|
||||
// Len(m.mentries),
|
||||
// msource |-> i,
|
||||
// mdest |-> j],
|
||||
// m)
|
||||
// /\ UNCHANGED <<serverVars, log>>
|
||||
// \/ \* conflict: remove 1 entry
|
||||
// /\ m.mentries /= << >>
|
||||
// /\ Len(log[i]) >= index
|
||||
// /\ log[i][index].term /= m.mentries[1].term
|
||||
// /\ LET new == [index2 \in 1..(Len(log[i]) - 1) |->
|
||||
// log[i][index2]]
|
||||
// IN log' = [log EXCEPT ![i] = new]
|
||||
// /\ UNCHANGED <<serverVars, commitIndex, messages>>
|
||||
// \/ \* no conflict: append entry
|
||||
// /\ m.mentries /= << >>
|
||||
// /\ Len(log[i]) = m.mprevLogIndex
|
||||
// /\ log' = [log EXCEPT ![i] =
|
||||
// Append(log[i], m.mentries[1])]
|
||||
// /\ UNCHANGED <<serverVars, commitIndex, messages>>
|
||||
// /\ UNCHANGED <<candidateVars, leaderVars>>
|
||||
//
|
||||
}
|
||||
// TLA+ Spec
|
||||
// HandleAppendEntriesRequest(i, j, m) ==
|
||||
// LET logOk == \/ m.mprevLogIndex = 0
|
||||
// \/ /\ m.mprevLogIndex > 0
|
||||
// /\ m.mprevLogIndex <= Len(log[i])
|
||||
// /\ m.mprevLogTerm = log[i][m.mprevLogIndex].term
|
||||
// IN /\ m.mterm <= currentTerm[i]
|
||||
// /\ \/ /\ \* reject request
|
||||
// \/ m.mterm < currentTerm[i]
|
||||
// \/ /\ m.mterm = currentTerm[i]
|
||||
// /\ state[i] = Follower
|
||||
// /\ \lnot logOk
|
||||
// /\ Reply([mtype |-> AppendEntriesResponse,
|
||||
// mterm |-> currentTerm[i],
|
||||
// msuccess |-> FALSE,
|
||||
// mmatchIndex |-> 0,
|
||||
// msource |-> i,
|
||||
// mdest |-> j],
|
||||
// m)
|
||||
// /\ UNCHANGED <<serverVars, logVars>>
|
||||
// \/ \* return to follower state
|
||||
// /\ m.mterm = currentTerm[i]
|
||||
// /\ state[i] = Candidate
|
||||
// /\ state' = [state EXCEPT ![i] = Follower]
|
||||
// /\ UNCHANGED <<currentTerm, votedFor, logVars, messages>>
|
||||
// \/ \* accept request
|
||||
// /\ m.mterm = currentTerm[i]
|
||||
// /\ state[i] = Follower
|
||||
// /\ logOk
|
||||
// /\ LET index == m.mprevLogIndex + 1
|
||||
// IN \/ \* already done with request
|
||||
// /\ \/ m.mentries = << >>
|
||||
// \/ /\ m.mentries /= << >>
|
||||
// /\ Len(log[i]) >= index
|
||||
// /\ log[i][index].term = m.mentries[1].term
|
||||
// \* This could make our commitIndex decrease (for
|
||||
// \* example if we process an old, duplicated request),
|
||||
// \* but that doesn't really affect anything.
|
||||
// /\ commitIndex' = [commitIndex EXCEPT ![i] =
|
||||
// m.mcommitIndex]
|
||||
// /\ Reply([mtype |-> AppendEntriesResponse,
|
||||
// mterm |-> currentTerm[i],
|
||||
// msuccess |-> TRUE,
|
||||
// mmatchIndex |-> m.mprevLogIndex +
|
||||
// Len(m.mentries),
|
||||
// msource |-> i,
|
||||
// mdest |-> j],
|
||||
// m)
|
||||
// /\ UNCHANGED <<serverVars, log>>
|
||||
// \/ \* conflict: remove 1 entry
|
||||
// /\ m.mentries /= << >>
|
||||
// /\ Len(log[i]) >= index
|
||||
// /\ log[i][index].term /= m.mentries[1].term
|
||||
// /\ LET new == [index2 \in 1..(Len(log[i]) - 1) |->
|
||||
// log[i][index2]]
|
||||
// IN log' = [log EXCEPT ![i] = new]
|
||||
// /\ UNCHANGED <<serverVars, commitIndex, messages>>
|
||||
// \/ \* no conflict: append entry
|
||||
// /\ m.mentries /= << >>
|
||||
// /\ Len(log[i]) = m.mprevLogIndex
|
||||
// /\ log' = [log EXCEPT ![i] =
|
||||
// Append(log[i], m.mentries[1])]
|
||||
// /\ UNCHANGED <<serverVars, commitIndex, messages>>
|
||||
// /\ UNCHANGED <<candidateVars, leaderVars>>
|
||||
//
|
||||
int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {}
|
||||
|
|
|
@ -15,17 +15,17 @@
|
|||
|
||||
#include "syncAppendEntriesReply.h"
|
||||
|
||||
int32_t syncNodeOnAppendEntriesReplyCb(SSyncNode* ths, SyncAppendEntriesReply* pMsg) {
|
||||
// TLA+ Spec
|
||||
// HandleAppendEntriesResponse(i, j, m) ==
|
||||
// /\ m.mterm = currentTerm[i]
|
||||
// /\ \/ /\ m.msuccess \* successful
|
||||
// /\ nextIndex' = [nextIndex EXCEPT ![i][j] = m.mmatchIndex + 1]
|
||||
// /\ matchIndex' = [matchIndex EXCEPT ![i][j] = m.mmatchIndex]
|
||||
// \/ /\ \lnot m.msuccess \* not successful
|
||||
// /\ nextIndex' = [nextIndex EXCEPT ![i][j] =
|
||||
// Max({nextIndex[i][j] - 1, 1})]
|
||||
// /\ UNCHANGED <<matchIndex>>
|
||||
// /\ Discard(m)
|
||||
// /\ UNCHANGED <<serverVars, candidateVars, logVars, elections>>
|
||||
}
|
||||
// TLA+ Spec
|
||||
// HandleAppendEntriesResponse(i, j, m) ==
|
||||
// /\ m.mterm = currentTerm[i]
|
||||
// /\ \/ /\ m.msuccess \* successful
|
||||
// /\ nextIndex' = [nextIndex EXCEPT ![i][j] = m.mmatchIndex + 1]
|
||||
// /\ matchIndex' = [matchIndex EXCEPT ![i][j] = m.mmatchIndex]
|
||||
// \/ /\ \lnot m.msuccess \* not successful
|
||||
// /\ nextIndex' = [nextIndex EXCEPT ![i][j] =
|
||||
// Max({nextIndex[i][j] - 1, 1})]
|
||||
// /\ UNCHANGED <<matchIndex>>
|
||||
// /\ Discard(m)
|
||||
// /\ UNCHANGED <<serverVars, candidateVars, logVars, elections>>
|
||||
//
|
||||
int32_t syncNodeOnAppendEntriesReplyCb(SSyncNode* ths, SyncAppendEntriesReply* pMsg) {}
|
||||
|
|
|
@ -14,7 +14,33 @@
|
|||
*/
|
||||
|
||||
#include "syncElection.h"
|
||||
#include "syncMessage.h"
|
||||
|
||||
void syncNodeElect(SSyncNode* pSyncNode) {}
|
||||
// TLA+ Spec
|
||||
// RequestVote(i, j) ==
|
||||
// /\ state[i] = Candidate
|
||||
// /\ j \notin votesResponded[i]
|
||||
// /\ Send([mtype |-> RequestVoteRequest,
|
||||
// mterm |-> currentTerm[i],
|
||||
// mlastLogTerm |-> LastTerm(log[i]),
|
||||
// mlastLogIndex |-> Len(log[i]),
|
||||
// msource |-> i,
|
||||
// mdest |-> j])
|
||||
// /\ UNCHANGED <<serverVars, candidateVars, leaderVars, logVars>>
|
||||
//
|
||||
int32_t syncNodeRequestVotePeers(SSyncNode* pSyncNode) {}
|
||||
|
||||
void syncNodeRequestVotePeers(SSyncNode* pSyncNode) {}
|
||||
int32_t syncNodeElect(SSyncNode* pSyncNode) {
|
||||
// start election
|
||||
syncNodeRequestVotePeers(pSyncNode);
|
||||
}
|
||||
|
||||
int32_t syncNodeRequestVote(SSyncNode* pSyncNode, const SRaftId* destRaftId, const SyncRequestVote* pMsg) {
|
||||
sTrace("syncNodeRequestVote pSyncNode:%p ", pSyncNode);
|
||||
int32_t ret = 0;
|
||||
|
||||
SRpcMsg rpcMsg;
|
||||
syncRequestVote2RpcMsg(pMsg, &rpcMsg);
|
||||
syncNodeSendMsgById(destRaftId, pSyncNode, &rpcMsg);
|
||||
return ret;
|
||||
}
|
|
@ -28,6 +28,7 @@ static void doSyncEnvStopTimer(SSyncEnv *pSyncEnv, tmr_h *pTimer);
|
|||
|
||||
int32_t syncEnvStart() {
|
||||
int32_t ret;
|
||||
srand(time(NULL));
|
||||
gSyncEnv = (SSyncEnv *)malloc(sizeof(SSyncEnv));
|
||||
assert(gSyncEnv != NULL);
|
||||
ret = doSyncEnvStart(gSyncEnv);
|
||||
|
|
|
@ -44,6 +44,7 @@ int32_t syncIOStart(char *host, uint16_t port) {
|
|||
gSyncIO = syncIOCreate(host, port);
|
||||
assert(gSyncIO != NULL);
|
||||
|
||||
srand(time(NULL));
|
||||
int32_t ret = syncIOStartInternal(gSyncIO);
|
||||
assert(ret == 0);
|
||||
|
||||
|
|
|
@ -15,25 +15,25 @@
|
|||
|
||||
#include <stdint.h>
|
||||
#include "sync.h"
|
||||
#include "syncAppendEntries.h"
|
||||
#include "syncAppendEntriesReply.h"
|
||||
#include "syncEnv.h"
|
||||
#include "syncInt.h"
|
||||
#include "syncRaft.h"
|
||||
#include "syncRequestVote.h"
|
||||
#include "syncRequestVoteReply.h"
|
||||
#include "syncTimeout.h"
|
||||
#include "syncUtil.h"
|
||||
|
||||
static int32_t tsNodeRefId = -1;
|
||||
|
||||
// ------ local funciton ---------
|
||||
static int32_t syncNodeSendMsgById(const SRaftId* destRaftId, SSyncNode* pSyncNode, SRpcMsg* pMsg);
|
||||
static int32_t syncNodeSendMsgByInfo(const SNodeInfo* nodeInfo, SSyncNode* pSyncNode, SRpcMsg* pMsg);
|
||||
|
||||
static void syncNodeEqPingTimer(void* param, void* tmrId);
|
||||
static void syncNodeEqElectTimer(void* param, void* tmrId);
|
||||
static void syncNodeEqHeartbeatTimer(void* param, void* tmrId);
|
||||
|
||||
static int32_t syncNodePing(SSyncNode* pSyncNode, const SRaftId* destRaftId, SyncPing* pMsg);
|
||||
static int32_t syncNodeOnPingCb(SSyncNode* ths, SyncPing* pMsg);
|
||||
static int32_t syncNodeOnPingReplyCb(SSyncNode* ths, SyncPingReply* pMsg);
|
||||
static int32_t syncNodeOnTimeoutCb(SSyncNode* ths, SyncTimeout* pMsg);
|
||||
|
||||
static void syncNodeBecomeFollower(SSyncNode* pSyncNode);
|
||||
static void syncNodeBecomeLeader(SSyncNode* pSyncNode);
|
||||
|
@ -41,9 +41,6 @@ static void syncNodeFollower2Candidate(SSyncNode* pSyncNode);
|
|||
static void syncNodeCandidate2Leader(SSyncNode* pSyncNode);
|
||||
static void syncNodeLeader2Follower(SSyncNode* pSyncNode);
|
||||
static void syncNodeCandidate2Follower(SSyncNode* pSyncNode);
|
||||
|
||||
void syncNodeRequestVotePeers(SSyncNode* pSyncNode);
|
||||
void syncNodeAppendEntriesPeers(SSyncNode* pSyncNode);
|
||||
// ---------------------------------
|
||||
|
||||
int32_t syncInit() {
|
||||
|
@ -98,6 +95,7 @@ SSyncNode* syncNodeOpen(const SSyncInfo* pSyncInfo) {
|
|||
pSyncNode->state = TAOS_SYNC_STATE_FOLLOWER;
|
||||
syncUtilnodeInfo2raftId(&pSyncNode->me, pSyncNode->vgId, &pSyncNode->raftId);
|
||||
|
||||
// init ping timer
|
||||
pSyncNode->pPingTimer = NULL;
|
||||
pSyncNode->pingTimerMS = PING_TIMER_MS;
|
||||
atomic_store_64(&pSyncNode->pingTimerLogicClock, 0);
|
||||
|
@ -105,6 +103,22 @@ SSyncNode* syncNodeOpen(const SSyncInfo* pSyncInfo) {
|
|||
pSyncNode->FpPingTimer = syncNodeEqPingTimer;
|
||||
pSyncNode->pingTimerCounter = 0;
|
||||
|
||||
// init elect timer
|
||||
pSyncNode->pElectTimer = NULL;
|
||||
pSyncNode->electTimerMS = syncUtilElectRandomMS();
|
||||
atomic_store_64(&pSyncNode->electTimerLogicClock, 0);
|
||||
atomic_store_64(&pSyncNode->electTimerLogicClockUser, 0);
|
||||
pSyncNode->FpElectTimer = syncNodeEqElectTimer;
|
||||
pSyncNode->electTimerCounter = 0;
|
||||
|
||||
// init heartbeat timer
|
||||
pSyncNode->pHeartbeatTimer = NULL;
|
||||
pSyncNode->heartbeatTimerMS = HEARTBEAT_TIMER_MS;
|
||||
atomic_store_64(&pSyncNode->heartbeatTimerLogicClock, 0);
|
||||
atomic_store_64(&pSyncNode->heartbeatTimerLogicClockUser, 0);
|
||||
pSyncNode->FpHeartbeatTimer = syncNodeEqHeartbeatTimer;
|
||||
pSyncNode->heartbeatTimerCounter = 0;
|
||||
|
||||
pSyncNode->FpOnPing = syncNodeOnPingCb;
|
||||
pSyncNode->FpOnPingReply = syncNodeOnPingReplyCb;
|
||||
pSyncNode->FpOnRequestVote = syncNodeOnRequestVoteCb;
|
||||
|
@ -121,100 +135,21 @@ void syncNodeClose(SSyncNode* pSyncNode) {
|
|||
free(pSyncNode);
|
||||
}
|
||||
|
||||
void syncNodePingAll(SSyncNode* pSyncNode) {
|
||||
sTrace("syncNodePingAll pSyncNode:%p ", pSyncNode);
|
||||
int32_t ret = 0;
|
||||
for (int i = 0; i < pSyncNode->syncCfg.replicaNum; ++i) {
|
||||
SRaftId destId;
|
||||
syncUtilnodeInfo2raftId(&pSyncNode->syncCfg.nodeInfo[i], pSyncNode->vgId, &destId);
|
||||
SyncPing* pMsg = syncPingBuild3(&pSyncNode->raftId, &destId);
|
||||
ret = syncNodePing(pSyncNode, &destId, pMsg);
|
||||
assert(ret == 0);
|
||||
syncPingDestroy(pMsg);
|
||||
}
|
||||
}
|
||||
|
||||
void syncNodePingPeers(SSyncNode* pSyncNode) {
|
||||
int32_t ret = 0;
|
||||
for (int i = 0; i < pSyncNode->peersNum; ++i) {
|
||||
SRaftId destId;
|
||||
syncUtilnodeInfo2raftId(&pSyncNode->peers[i], pSyncNode->vgId, &destId);
|
||||
SyncPing* pMsg = syncPingBuild3(&pSyncNode->raftId, &destId);
|
||||
ret = syncNodePing(pSyncNode, &destId, pMsg);
|
||||
assert(ret == 0);
|
||||
syncPingDestroy(pMsg);
|
||||
}
|
||||
}
|
||||
|
||||
void syncNodePingSelf(SSyncNode* pSyncNode) {
|
||||
int32_t ret;
|
||||
SyncPing* pMsg = syncPingBuild3(&pSyncNode->raftId, &pSyncNode->raftId);
|
||||
ret = syncNodePing(pSyncNode, &pMsg->destId, pMsg);
|
||||
assert(ret == 0);
|
||||
syncPingDestroy(pMsg);
|
||||
}
|
||||
|
||||
int32_t syncNodeStartPingTimer(SSyncNode* pSyncNode) {
|
||||
atomic_store_64(&pSyncNode->pingTimerLogicClock, pSyncNode->pingTimerLogicClockUser);
|
||||
pSyncNode->pingTimerMS = PING_TIMER_MS;
|
||||
|
||||
if (pSyncNode->pPingTimer == NULL) {
|
||||
pSyncNode->pPingTimer =
|
||||
taosTmrStart(pSyncNode->FpPingTimer, pSyncNode->pingTimerMS, pSyncNode, gSyncEnv->pTimerManager);
|
||||
} else {
|
||||
taosTmrReset(pSyncNode->FpPingTimer, pSyncNode->pingTimerMS, pSyncNode, gSyncEnv->pTimerManager,
|
||||
&pSyncNode->pPingTimer);
|
||||
}
|
||||
|
||||
int32_t syncNodeSendMsgById(const SRaftId* destRaftId, SSyncNode* pSyncNode, SRpcMsg* pMsg) {
|
||||
SEpSet epSet;
|
||||
syncUtilraftId2EpSet(destRaftId, &epSet);
|
||||
pSyncNode->FpSendMsg(pSyncNode->rpcClient, &epSet, pMsg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t syncNodeStopPingTimer(SSyncNode* pSyncNode) {
|
||||
atomic_add_fetch_64(&pSyncNode->pingTimerLogicClockUser, 1);
|
||||
pSyncNode->pingTimerMS = TIMER_MAX_MS;
|
||||
int32_t syncNodeSendMsgByInfo(const SNodeInfo* nodeInfo, SSyncNode* pSyncNode, SRpcMsg* pMsg) {
|
||||
SEpSet epSet;
|
||||
syncUtilnodeInfo2EpSet(nodeInfo, &epSet);
|
||||
pSyncNode->FpSendMsg(pSyncNode->rpcClient, &epSet, pMsg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t syncNodeStartElectTimer(SSyncNode* pSyncNode) {
|
||||
if (pSyncNode->pElectTimer == NULL) {
|
||||
pSyncNode->pElectTimer =
|
||||
taosTmrStart(pSyncNode->FpElectTimer, pSyncNode->electTimerMS, pSyncNode, gSyncEnv->pTimerManager);
|
||||
} else {
|
||||
taosTmrReset(pSyncNode->FpElectTimer, pSyncNode->electTimerMS, pSyncNode, gSyncEnv->pTimerManager,
|
||||
&pSyncNode->pElectTimer);
|
||||
}
|
||||
|
||||
atomic_store_8(&pSyncNode->electTimerEnable, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t syncNodeStopElectTimer(SSyncNode* pSyncNode) {
|
||||
atomic_store_8(&pSyncNode->electTimerEnable, 0);
|
||||
pSyncNode->electTimerMS = TIMER_MAX_MS;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t syncNodeStartHeartbeatTimer(SSyncNode* pSyncNode) {
|
||||
if (pSyncNode->pHeartbeatTimer == NULL) {
|
||||
pSyncNode->pHeartbeatTimer =
|
||||
taosTmrStart(pSyncNode->FpHeartbeatTimer, pSyncNode->heartbeatTimerMS, pSyncNode, gSyncEnv->pTimerManager);
|
||||
} else {
|
||||
taosTmrReset(pSyncNode->FpHeartbeatTimer, pSyncNode->heartbeatTimerMS, pSyncNode, gSyncEnv->pTimerManager,
|
||||
&pSyncNode->pHeartbeatTimer);
|
||||
}
|
||||
|
||||
atomic_store_8(&pSyncNode->heartbeatTimerEnable, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t syncNodeStopHeartbeatTimer(SSyncNode* pSyncNode) {
|
||||
atomic_store_8(&pSyncNode->heartbeatTimerEnable, 0);
|
||||
pSyncNode->heartbeatTimerMS = TIMER_MAX_MS;
|
||||
return 0;
|
||||
}
|
||||
|
||||
// ------ local funciton ---------
|
||||
static int32_t syncNodePing(SSyncNode* pSyncNode, const SRaftId* destRaftId, SyncPing* pMsg) {
|
||||
int32_t syncNodePing(SSyncNode* pSyncNode, const SRaftId* destRaftId, SyncPing* pMsg) {
|
||||
sTrace("syncNodePing pSyncNode:%p ", pSyncNode);
|
||||
int32_t ret = 0;
|
||||
|
||||
|
@ -242,20 +177,102 @@ static int32_t syncNodePing(SSyncNode* pSyncNode, const SRaftId* destRaftId, Syn
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int32_t syncNodeSendMsgById(const SRaftId* destRaftId, SSyncNode* pSyncNode, SRpcMsg* pMsg) {
|
||||
SEpSet epSet;
|
||||
syncUtilraftId2EpSet(destRaftId, &epSet);
|
||||
pSyncNode->FpSendMsg(pSyncNode->rpcClient, &epSet, pMsg);
|
||||
int32_t syncNodePingAll(SSyncNode* pSyncNode) {
|
||||
sTrace("syncNodePingAll pSyncNode:%p ", pSyncNode);
|
||||
int32_t ret = 0;
|
||||
for (int i = 0; i < pSyncNode->syncCfg.replicaNum; ++i) {
|
||||
SRaftId destId;
|
||||
syncUtilnodeInfo2raftId(&pSyncNode->syncCfg.nodeInfo[i], pSyncNode->vgId, &destId);
|
||||
SyncPing* pMsg = syncPingBuild3(&pSyncNode->raftId, &destId);
|
||||
ret = syncNodePing(pSyncNode, &destId, pMsg);
|
||||
assert(ret == 0);
|
||||
syncPingDestroy(pMsg);
|
||||
}
|
||||
}
|
||||
|
||||
int32_t syncNodePingPeers(SSyncNode* pSyncNode) {
|
||||
int32_t ret = 0;
|
||||
for (int i = 0; i < pSyncNode->peersNum; ++i) {
|
||||
SRaftId destId;
|
||||
syncUtilnodeInfo2raftId(&pSyncNode->peers[i], pSyncNode->vgId, &destId);
|
||||
SyncPing* pMsg = syncPingBuild3(&pSyncNode->raftId, &destId);
|
||||
ret = syncNodePing(pSyncNode, &destId, pMsg);
|
||||
assert(ret == 0);
|
||||
syncPingDestroy(pMsg);
|
||||
}
|
||||
}
|
||||
|
||||
int32_t syncNodePingSelf(SSyncNode* pSyncNode) {
|
||||
int32_t ret;
|
||||
SyncPing* pMsg = syncPingBuild3(&pSyncNode->raftId, &pSyncNode->raftId);
|
||||
ret = syncNodePing(pSyncNode, &pMsg->destId, pMsg);
|
||||
assert(ret == 0);
|
||||
syncPingDestroy(pMsg);
|
||||
}
|
||||
|
||||
int32_t syncNodeStartPingTimer(SSyncNode* pSyncNode) {
|
||||
atomic_store_64(&pSyncNode->pingTimerLogicClock, pSyncNode->pingTimerLogicClockUser);
|
||||
pSyncNode->pingTimerMS = PING_TIMER_MS;
|
||||
if (pSyncNode->pPingTimer == NULL) {
|
||||
pSyncNode->pPingTimer =
|
||||
taosTmrStart(pSyncNode->FpPingTimer, pSyncNode->pingTimerMS, pSyncNode, gSyncEnv->pTimerManager);
|
||||
} else {
|
||||
taosTmrReset(pSyncNode->FpPingTimer, pSyncNode->pingTimerMS, pSyncNode, gSyncEnv->pTimerManager,
|
||||
&pSyncNode->pPingTimer);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t syncNodeSendMsgByInfo(const SNodeInfo* nodeInfo, SSyncNode* pSyncNode, SRpcMsg* pMsg) {
|
||||
SEpSet epSet;
|
||||
syncUtilnodeInfo2EpSet(nodeInfo, &epSet);
|
||||
pSyncNode->FpSendMsg(pSyncNode->rpcClient, &epSet, pMsg);
|
||||
int32_t syncNodeStopPingTimer(SSyncNode* pSyncNode) {
|
||||
atomic_add_fetch_64(&pSyncNode->pingTimerLogicClockUser, 1);
|
||||
pSyncNode->pingTimerMS = TIMER_MAX_MS;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t syncNodeStartElectTimer(SSyncNode* pSyncNode, int32_t ms) {
|
||||
pSyncNode->electTimerMS = ms;
|
||||
atomic_store_64(&pSyncNode->electTimerLogicClock, pSyncNode->electTimerLogicClockUser);
|
||||
if (pSyncNode->pElectTimer == NULL) {
|
||||
pSyncNode->pElectTimer =
|
||||
taosTmrStart(pSyncNode->FpElectTimer, pSyncNode->electTimerMS, pSyncNode, gSyncEnv->pTimerManager);
|
||||
} else {
|
||||
taosTmrReset(pSyncNode->FpElectTimer, pSyncNode->electTimerMS, pSyncNode, gSyncEnv->pTimerManager,
|
||||
&pSyncNode->pElectTimer);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t syncNodeStopElectTimer(SSyncNode* pSyncNode) {
|
||||
atomic_add_fetch_64(&pSyncNode->electTimerLogicClockUser, 1);
|
||||
pSyncNode->electTimerMS = TIMER_MAX_MS;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t syncNodeRestartElectTimer(SSyncNode* pSyncNode, int32_t ms) {
|
||||
syncNodeStopElectTimer(pSyncNode);
|
||||
syncNodeStartElectTimer(pSyncNode, ms);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t syncNodeStartHeartbeatTimer(SSyncNode* pSyncNode) {
|
||||
atomic_store_64(&pSyncNode->heartbeatTimerLogicClock, pSyncNode->heartbeatTimerLogicClockUser);
|
||||
if (pSyncNode->pHeartbeatTimer == NULL) {
|
||||
pSyncNode->pHeartbeatTimer =
|
||||
taosTmrStart(pSyncNode->FpHeartbeatTimer, pSyncNode->heartbeatTimerMS, pSyncNode, gSyncEnv->pTimerManager);
|
||||
} else {
|
||||
taosTmrReset(pSyncNode->FpHeartbeatTimer, pSyncNode->heartbeatTimerMS, pSyncNode, gSyncEnv->pTimerManager,
|
||||
&pSyncNode->pHeartbeatTimer);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t syncNodeStopHeartbeatTimer(SSyncNode* pSyncNode) {
|
||||
atomic_add_fetch_64(&pSyncNode->heartbeatTimerLogicClockUser, 1);
|
||||
pSyncNode->heartbeatTimerMS = TIMER_MAX_MS;
|
||||
return 0;
|
||||
}
|
||||
|
||||
// ------ local funciton ---------
|
||||
static int32_t syncNodeOnPingCb(SSyncNode* ths, SyncPing* pMsg) {
|
||||
int32_t ret = 0;
|
||||
sTrace("<-- syncNodeOnPingCb -->");
|
||||
|
@ -291,45 +308,19 @@ static int32_t syncNodeOnPingReplyCb(SSyncNode* ths, SyncPingReply* pMsg) {
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int32_t syncNodeOnTimeoutCb(SSyncNode* ths, SyncTimeout* pMsg) {
|
||||
int32_t ret = 0;
|
||||
sTrace("<-- syncNodeOnTimeoutCb -->");
|
||||
|
||||
{
|
||||
cJSON* pJson = syncTimeout2Json(pMsg);
|
||||
char* serialized = cJSON_Print(pJson);
|
||||
sTrace("process syncMessage recv: syncNodeOnTimeoutCb pMsg:%s ", serialized);
|
||||
free(serialized);
|
||||
cJSON_Delete(pJson);
|
||||
}
|
||||
|
||||
if (pMsg->timeoutType == SYNC_TIMEOUT_PING) {
|
||||
if (atomic_load_64(&ths->pingTimerLogicClockUser) <= pMsg->logicClock) {
|
||||
++(ths->pingTimerCounter);
|
||||
syncNodePingAll(ths);
|
||||
}
|
||||
|
||||
} else if (pMsg->timeoutType == SYNC_TIMEOUT_ELECTION) {
|
||||
} else if (pMsg->timeoutType == SYNC_TIMEOUT_HEARTBEAT) {
|
||||
} else {
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void syncNodeEqPingTimer(void* param, void* tmrId) {
|
||||
SSyncNode* pSyncNode = (SSyncNode*)param;
|
||||
if (atomic_load_64(&pSyncNode->pingTimerLogicClockUser) <= atomic_load_64(&pSyncNode->pingTimerLogicClock)) {
|
||||
// pSyncNode->pingTimerMS += 100;
|
||||
|
||||
SyncTimeout* pSyncMsg =
|
||||
syncTimeoutBuild2(SYNC_TIMEOUT_PING, atomic_load_64(&pSyncNode->pingTimerLogicClock), pSyncNode);
|
||||
|
||||
SRpcMsg rpcMsg;
|
||||
SyncTimeout* pSyncMsg = syncTimeoutBuild2(SYNC_TIMEOUT_PING, atomic_load_64(&pSyncNode->pingTimerLogicClock),
|
||||
pSyncNode->pingTimerMS, pSyncNode);
|
||||
SRpcMsg rpcMsg;
|
||||
syncTimeout2RpcMsg(pSyncMsg, &rpcMsg);
|
||||
pSyncNode->FpEqMsg(pSyncNode->queue, &rpcMsg);
|
||||
syncTimeoutDestroy(pSyncMsg);
|
||||
|
||||
// reset timer ms
|
||||
// pSyncNode->pingTimerMS += 100;
|
||||
|
||||
taosTmrReset(syncNodeEqPingTimer, pSyncNode->pingTimerMS, pSyncNode, &gSyncEnv->pTimerManager,
|
||||
&pSyncNode->pPingTimer);
|
||||
} else {
|
||||
|
@ -338,20 +329,58 @@ static void syncNodeEqPingTimer(void* param, void* tmrId) {
|
|||
}
|
||||
}
|
||||
|
||||
static void syncNodeEqElectTimer(void* param, void* tmrId) {}
|
||||
static void syncNodeEqElectTimer(void* param, void* tmrId) {
|
||||
SSyncNode* pSyncNode = (SSyncNode*)param;
|
||||
if (atomic_load_64(&pSyncNode->electTimerLogicClockUser) <= atomic_load_64(&pSyncNode->electTimerLogicClock)) {
|
||||
SyncTimeout* pSyncMsg = syncTimeoutBuild2(SYNC_TIMEOUT_ELECTION, atomic_load_64(&pSyncNode->electTimerLogicClock),
|
||||
pSyncNode->electTimerMS, pSyncNode);
|
||||
|
||||
SRpcMsg rpcMsg;
|
||||
syncTimeout2RpcMsg(pSyncMsg, &rpcMsg);
|
||||
pSyncNode->FpEqMsg(pSyncNode->queue, &rpcMsg);
|
||||
syncTimeoutDestroy(pSyncMsg);
|
||||
|
||||
// reset timer ms
|
||||
pSyncNode->electTimerMS = syncUtilElectRandomMS();
|
||||
|
||||
taosTmrReset(syncNodeEqPingTimer, pSyncNode->pingTimerMS, pSyncNode, &gSyncEnv->pTimerManager,
|
||||
&pSyncNode->pPingTimer);
|
||||
} else {
|
||||
sTrace("syncNodeEqElectTimer: electTimerLogicClock:%lu, electTimerLogicClockUser:%lu",
|
||||
pSyncNode->electTimerLogicClock, pSyncNode->electTimerLogicClockUser);
|
||||
}
|
||||
}
|
||||
|
||||
static void syncNodeEqHeartbeatTimer(void* param, void* tmrId) {}
|
||||
|
||||
static void syncNodeBecomeFollower(SSyncNode* pSyncNode) {
|
||||
if (pSyncNode->state == TAOS_SYNC_STATE_LEADER) {
|
||||
pSyncNode->leaderCache.addr = 0;
|
||||
pSyncNode->leaderCache.vgId = 0;
|
||||
pSyncNode->leaderCache = EMPTY_RAFT_ID;
|
||||
}
|
||||
|
||||
syncNodeStopHeartbeatTimer(pSyncNode);
|
||||
syncNodeStartElectTimer(pSyncNode);
|
||||
int32_t electMS = syncUtilElectRandomMS();
|
||||
syncNodeStartElectTimer(pSyncNode, electMS);
|
||||
}
|
||||
|
||||
// TLA+ Spec
|
||||
// \* Candidate i transitions to leader.
|
||||
// BecomeLeader(i) ==
|
||||
// /\ state[i] = Candidate
|
||||
// /\ votesGranted[i] \in Quorum
|
||||
// /\ state' = [state EXCEPT ![i] = Leader]
|
||||
// /\ nextIndex' = [nextIndex EXCEPT ![i] =
|
||||
// [j \in Server |-> Len(log[i]) + 1]]
|
||||
// /\ matchIndex' = [matchIndex EXCEPT ![i] =
|
||||
// [j \in Server |-> 0]]
|
||||
// /\ elections' = elections \cup
|
||||
// {[eterm |-> currentTerm[i],
|
||||
// eleader |-> i,
|
||||
// elog |-> log[i],
|
||||
// evotes |-> votesGranted[i],
|
||||
// evoterLog |-> voterLog[i]]}
|
||||
// /\ UNCHANGED <<messages, currentTerm, votedFor, candidateVars, logVars>>
|
||||
//
|
||||
static void syncNodeBecomeLeader(SSyncNode* pSyncNode) {
|
||||
pSyncNode->state = TAOS_SYNC_STATE_LEADER;
|
||||
pSyncNode->leaderCache = pSyncNode->raftId;
|
||||
|
@ -375,7 +404,3 @@ static void syncNodeCandidate2Leader(SSyncNode* pSyncNode) {}
|
|||
static void syncNodeLeader2Follower(SSyncNode* pSyncNode) {}
|
||||
|
||||
static void syncNodeCandidate2Follower(SSyncNode* pSyncNode) {}
|
||||
|
||||
void syncNodeRequestVotePeers(SSyncNode* pSyncNode) {}
|
||||
|
||||
void syncNodeAppendEntriesPeers(SSyncNode* pSyncNode) {}
|
|
@ -18,8 +18,6 @@
|
|||
#include "syncUtil.h"
|
||||
#include "tcoding.h"
|
||||
|
||||
void onMessage(SRaft* pRaft, void* pMsg) {}
|
||||
|
||||
// ---------------------------------------------
|
||||
cJSON* syncRpcMsg2Json(SRpcMsg* pRpcMsg) {
|
||||
cJSON* pRoot;
|
||||
|
@ -125,6 +123,7 @@ cJSON* syncTimeout2Json(const SyncTimeout* pMsg) {
|
|||
cJSON_AddNumberToObject(pRoot, "timeoutType", pMsg->timeoutType);
|
||||
snprintf(u64buf, sizeof(u64buf), "%lu", pMsg->logicClock);
|
||||
cJSON_AddStringToObject(pRoot, "logicClock", u64buf);
|
||||
cJSON_AddNumberToObject(pRoot, "timerMS", pMsg->timerMS);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pMsg->data);
|
||||
cJSON_AddStringToObject(pRoot, "data", u64buf);
|
||||
|
||||
|
@ -133,10 +132,11 @@ cJSON* syncTimeout2Json(const SyncTimeout* pMsg) {
|
|||
return pJson;
|
||||
}
|
||||
|
||||
SyncTimeout* syncTimeoutBuild2(ESyncTimeoutType timeoutType, uint64_t logicClock, void* data) {
|
||||
SyncTimeout* syncTimeoutBuild2(ESyncTimeoutType timeoutType, uint64_t logicClock, int32_t timerMS, void* data) {
|
||||
SyncTimeout* pMsg = syncTimeoutBuild();
|
||||
pMsg->timeoutType = timeoutType;
|
||||
pMsg->logicClock = logicClock;
|
||||
pMsg->timerMS = timerMS;
|
||||
pMsg->data = data;
|
||||
return pMsg;
|
||||
}
|
||||
|
|
|
@ -20,7 +20,30 @@ int32_t raftLogAppendEntry(struct SSyncLogStore* pLogStore, SSyncBuffer* pBuf) {
|
|||
// get one log entry, user need to free pBuf->data
|
||||
int32_t raftLogGetEntry(struct SSyncLogStore* pLogStore, SyncIndex index, SSyncBuffer* pBuf) { return 0; }
|
||||
|
||||
// update log store commit index with "index"
|
||||
// TLA+ Spec
|
||||
// \* Leader i advances its commitIndex.
|
||||
// \* This is done as a separate step from handling AppendEntries responses,
|
||||
// \* in part to minimize atomic regions, and in part so that leaders of
|
||||
// \* single-server clusters are able to mark entries committed.
|
||||
// AdvanceCommitIndex(i) ==
|
||||
// /\ state[i] = Leader
|
||||
// /\ LET \* The set of servers that agree up through index.
|
||||
// Agree(index) == {i} \cup {k \in Server :
|
||||
// matchIndex[i][k] >= index}
|
||||
// \* The maximum indexes for which a quorum agrees
|
||||
// agreeIndexes == {index \in 1..Len(log[i]) :
|
||||
// Agree(index) \in Quorum}
|
||||
// \* New value for commitIndex'[i]
|
||||
// newCommitIndex ==
|
||||
// IF /\ agreeIndexes /= {}
|
||||
// /\ log[i][Max(agreeIndexes)].term = currentTerm[i]
|
||||
// THEN
|
||||
// Max(agreeIndexes)
|
||||
// ELSE
|
||||
// commitIndex[i]
|
||||
// IN commitIndex' = [commitIndex EXCEPT ![i] = newCommitIndex]
|
||||
// /\ UNCHANGED <<messages, serverVars, candidateVars, leaderVars, log>>
|
||||
//
|
||||
int32_t raftLogupdateCommitIndex(struct SSyncLogStore* pLogStore, SyncIndex index) { return 0; }
|
||||
|
||||
// truncate log with index, entries after the given index (>index) will be deleted
|
||||
|
|
|
@ -16,8 +16,11 @@
|
|||
#include "syncRaftStore.h"
|
||||
#include "cJSON.h"
|
||||
|
||||
// to complie success: FileIO interface is modified
|
||||
// private function
|
||||
static int32_t raftStoreInit(SRaftStore *pRaftStore);
|
||||
static bool raftStoreFileExist(char *path);
|
||||
|
||||
// public function
|
||||
SRaftStore *raftStoreOpen(const char *path) {
|
||||
int32_t ret;
|
||||
|
||||
|
@ -137,121 +140,3 @@ void raftStorePrint(SRaftStore *pRaftStore) {
|
|||
raftStoreSerialize(pRaftStore, storeBuf, sizeof(storeBuf));
|
||||
printf("%s\n", storeBuf);
|
||||
}
|
||||
|
||||
#if 0
|
||||
|
||||
SRaftStore *raftStoreOpen(const char *path) {
|
||||
int32_t ret;
|
||||
|
||||
SRaftStore *pRaftStore = malloc(sizeof(SRaftStore));
|
||||
if (pRaftStore == NULL) {
|
||||
sError("raftStoreOpen malloc error");
|
||||
return NULL;
|
||||
}
|
||||
memset(pRaftStore, 0, sizeof(*pRaftStore));
|
||||
snprintf(pRaftStore->path, sizeof(pRaftStore->path), "%s", path);
|
||||
|
||||
char storeBuf[RAFT_STORE_BLOCK_SIZE];
|
||||
memset(storeBuf, 0, sizeof(storeBuf));
|
||||
|
||||
if (!raftStoreFileExist(pRaftStore->path)) {
|
||||
ret = raftStoreInit(pRaftStore);
|
||||
assert(ret == 0);
|
||||
}
|
||||
|
||||
pRaftStore->fd = taosOpenFileReadWrite(pRaftStore->path);
|
||||
if (pRaftStore->fd < 0) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int len = taosReadFile(pRaftStore->fd, storeBuf, sizeof(storeBuf));
|
||||
assert(len == RAFT_STORE_BLOCK_SIZE);
|
||||
|
||||
ret = raftStoreDeserialize(pRaftStore, storeBuf, len);
|
||||
assert(ret == 0);
|
||||
|
||||
return pRaftStore;
|
||||
}
|
||||
|
||||
static int32_t raftStoreInit(SRaftStore *pRaftStore) {
|
||||
pRaftStore->fd = taosOpenFileCreateWrite(pRaftStore->path);
|
||||
if (pRaftStore->fd < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
pRaftStore->currentTerm = 0;
|
||||
pRaftStore->voteFor.addr = 0;
|
||||
pRaftStore->voteFor.vgId = 0;
|
||||
|
||||
int32_t ret = raftStorePersist(pRaftStore);
|
||||
assert(ret == 0);
|
||||
|
||||
taosCloseFile(pRaftStore->fd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t raftStoreClose(SRaftStore *pRaftStore) {
|
||||
taosCloseFile(pRaftStore->fd);
|
||||
free(pRaftStore);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t raftStorePersist(SRaftStore *pRaftStore) {
|
||||
int32_t ret;
|
||||
char storeBuf[RAFT_STORE_BLOCK_SIZE];
|
||||
|
||||
ret = raftStoreSerialize(pRaftStore, storeBuf, sizeof(storeBuf));
|
||||
assert(ret == 0);
|
||||
|
||||
taosLSeekFile(pRaftStore->fd, 0, SEEK_SET);
|
||||
|
||||
ret = taosWriteFile(pRaftStore->fd, storeBuf, sizeof(storeBuf));
|
||||
assert(ret == RAFT_STORE_BLOCK_SIZE);
|
||||
|
||||
fsync(pRaftStore->fd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool raftStoreFileExist(char *path) { return taosStatFile(path, NULL, NULL) >= 0; }
|
||||
|
||||
int32_t raftStoreSerialize(SRaftStore *pRaftStore, char *buf, size_t len) {
|
||||
cJSON *pRoot = cJSON_CreateObject();
|
||||
cJSON_AddNumberToObject(pRoot, "current_term", pRaftStore->currentTerm);
|
||||
cJSON_AddNumberToObject(pRoot, "vote_for_addr", pRaftStore->voteFor.addr);
|
||||
cJSON_AddNumberToObject(pRoot, "vote_for_vgid", pRaftStore->voteFor.vgId);
|
||||
|
||||
char *serialized = cJSON_Print(pRoot);
|
||||
int len2 = strlen(serialized);
|
||||
assert(len2 < len);
|
||||
memset(buf, 0, len);
|
||||
snprintf(buf, len, "%s", serialized);
|
||||
free(serialized);
|
||||
|
||||
cJSON_Delete(pRoot);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t raftStoreDeserialize(SRaftStore *pRaftStore, char *buf, size_t len) {
|
||||
assert(len > 0 && len <= RAFT_STORE_BLOCK_SIZE);
|
||||
cJSON *pRoot = cJSON_Parse(buf);
|
||||
|
||||
cJSON *pCurrentTerm = cJSON_GetObjectItem(pRoot, "current_term");
|
||||
pRaftStore->currentTerm = pCurrentTerm->valueint;
|
||||
|
||||
cJSON *pVoteForAddr = cJSON_GetObjectItem(pRoot, "vote_for_addr");
|
||||
pRaftStore->voteFor.addr = pVoteForAddr->valueint;
|
||||
|
||||
cJSON *pVoteForVgid = cJSON_GetObjectItem(pRoot, "vote_for_vgid");
|
||||
pRaftStore->voteFor.vgId = pVoteForVgid->valueint;
|
||||
|
||||
cJSON_Delete(pRoot);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void raftStorePrint(SRaftStore *pRaftStore) {
|
||||
char storeBuf[RAFT_STORE_BLOCK_SIZE];
|
||||
raftStoreSerialize(pRaftStore, storeBuf, sizeof(storeBuf));
|
||||
printf("%s\n", storeBuf);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -14,5 +14,41 @@
|
|||
*/
|
||||
|
||||
#include "syncReplication.h"
|
||||
#include "syncMessage.h"
|
||||
|
||||
void syncNodeAppendEntriesPeers(SSyncNode* pSyncNode) {}
|
||||
// TLA+ Spec
|
||||
// AppendEntries(i, j) ==
|
||||
// /\ i /= j
|
||||
// /\ state[i] = Leader
|
||||
// /\ LET prevLogIndex == nextIndex[i][j] - 1
|
||||
// prevLogTerm == IF prevLogIndex > 0 THEN
|
||||
// log[i][prevLogIndex].term
|
||||
// ELSE
|
||||
// 0
|
||||
// \* Send up to 1 entry, constrained by the end of the log.
|
||||
// lastEntry == Min({Len(log[i]), nextIndex[i][j]})
|
||||
// entries == SubSeq(log[i], nextIndex[i][j], lastEntry)
|
||||
// IN Send([mtype |-> AppendEntriesRequest,
|
||||
// mterm |-> currentTerm[i],
|
||||
// mprevLogIndex |-> prevLogIndex,
|
||||
// mprevLogTerm |-> prevLogTerm,
|
||||
// mentries |-> entries,
|
||||
// \* mlog is used as a history variable for the proof.
|
||||
// \* It would not exist in a real implementation.
|
||||
// mlog |-> log[i],
|
||||
// mcommitIndex |-> Min({commitIndex[i], lastEntry}),
|
||||
// msource |-> i,
|
||||
// mdest |-> j])
|
||||
// /\ UNCHANGED <<serverVars, candidateVars, leaderVars, logVars>>
|
||||
//
|
||||
int32_t syncNodeAppendEntriesPeers(SSyncNode* pSyncNode) {}
|
||||
|
||||
int32_t syncNodeAppendEntries(SSyncNode* pSyncNode, const SRaftId* destRaftId, const SyncAppendEntries* pMsg) {
|
||||
sTrace("syncNodeAppendEntries pSyncNode:%p ", pSyncNode);
|
||||
int32_t ret = 0;
|
||||
|
||||
SRpcMsg rpcMsg;
|
||||
syncAppendEntries2RpcMsg(pMsg, &rpcMsg);
|
||||
syncNodeSendMsgById(destRaftId, pSyncNode, &rpcMsg);
|
||||
return ret;
|
||||
}
|
|
@ -15,40 +15,26 @@
|
|||
|
||||
#include "syncRequestVote.h"
|
||||
|
||||
int32_t syncNodeRequestVote(SSyncNode* ths, const SyncRequestVote* pMsg) {
|
||||
// TLA+ Spec
|
||||
// RequestVote(i, j) ==
|
||||
// /\ state[i] = Candidate
|
||||
// /\ j \notin votesResponded[i]
|
||||
// /\ Send([mtype |-> RequestVoteRequest,
|
||||
// mterm |-> currentTerm[i],
|
||||
// mlastLogTerm |-> LastTerm(log[i]),
|
||||
// mlastLogIndex |-> Len(log[i]),
|
||||
// msource |-> i,
|
||||
// mdest |-> j])
|
||||
// /\ UNCHANGED <<serverVars, candidateVars, leaderVars, logVars>>
|
||||
}
|
||||
|
||||
int32_t syncNodeOnRequestVoteCb(SSyncNode* ths, SyncRequestVote* pMsg) {
|
||||
// TLA+ Spec
|
||||
// HandleRequestVoteRequest(i, j, m) ==
|
||||
// LET logOk == \/ m.mlastLogTerm > LastTerm(log[i])
|
||||
// \/ /\ m.mlastLogTerm = LastTerm(log[i])
|
||||
// /\ m.mlastLogIndex >= Len(log[i])
|
||||
// grant == /\ m.mterm = currentTerm[i]
|
||||
// /\ logOk
|
||||
// /\ votedFor[i] \in {Nil, j}
|
||||
// IN /\ m.mterm <= currentTerm[i]
|
||||
// /\ \/ grant /\ votedFor' = [votedFor EXCEPT ![i] = j]
|
||||
// \/ ~grant /\ UNCHANGED votedFor
|
||||
// /\ Reply([mtype |-> RequestVoteResponse,
|
||||
// mterm |-> currentTerm[i],
|
||||
// mvoteGranted |-> grant,
|
||||
// \* mlog is used just for the `elections' history variable for
|
||||
// \* the proof. It would not exist in a real implementation.
|
||||
// mlog |-> log[i],
|
||||
// msource |-> i,
|
||||
// mdest |-> j],
|
||||
// m)
|
||||
// /\ UNCHANGED <<state, currentTerm, candidateVars, leaderVars, logVars>>
|
||||
}
|
||||
// TLA+ Spec
|
||||
// HandleRequestVoteRequest(i, j, m) ==
|
||||
// LET logOk == \/ m.mlastLogTerm > LastTerm(log[i])
|
||||
// \/ /\ m.mlastLogTerm = LastTerm(log[i])
|
||||
// /\ m.mlastLogIndex >= Len(log[i])
|
||||
// grant == /\ m.mterm = currentTerm[i]
|
||||
// /\ logOk
|
||||
// /\ votedFor[i] \in {Nil, j}
|
||||
// IN /\ m.mterm <= currentTerm[i]
|
||||
// /\ \/ grant /\ votedFor' = [votedFor EXCEPT ![i] = j]
|
||||
// \/ ~grant /\ UNCHANGED votedFor
|
||||
// /\ Reply([mtype |-> RequestVoteResponse,
|
||||
// mterm |-> currentTerm[i],
|
||||
// mvoteGranted |-> grant,
|
||||
// \* mlog is used just for the `elections' history variable for
|
||||
// \* the proof. It would not exist in a real implementation.
|
||||
// mlog |-> log[i],
|
||||
// msource |-> i,
|
||||
// mdest |-> j],
|
||||
// m)
|
||||
// /\ UNCHANGED <<state, currentTerm, candidateVars, leaderVars, logVars>>
|
||||
//
|
||||
int32_t syncNodeOnRequestVoteCb(SSyncNode* ths, SyncRequestVote* pMsg) {}
|
||||
|
|
|
@ -15,21 +15,21 @@
|
|||
|
||||
#include "syncRequestVoteReply.h"
|
||||
|
||||
int32_t syncNodeOnRequestVoteReplyCb(SSyncNode* ths, SyncRequestVoteReply* pMsg) {
|
||||
// TLA+ Spec
|
||||
// HandleRequestVoteResponse(i, j, m) ==
|
||||
// \* This tallies votes even when the current state is not Candidate, but
|
||||
// \* they won't be looked at, so it doesn't matter.
|
||||
// /\ m.mterm = currentTerm[i]
|
||||
// /\ votesResponded' = [votesResponded EXCEPT ![i] =
|
||||
// votesResponded[i] \cup {j}]
|
||||
// /\ \/ /\ m.mvoteGranted
|
||||
// /\ votesGranted' = [votesGranted EXCEPT ![i] =
|
||||
// votesGranted[i] \cup {j}]
|
||||
// /\ voterLog' = [voterLog EXCEPT ![i] =
|
||||
// voterLog[i] @@ (j :> m.mlog)]
|
||||
// \/ /\ ~m.mvoteGranted
|
||||
// /\ UNCHANGED <<votesGranted, voterLog>>
|
||||
// /\ Discard(m)
|
||||
// /\ UNCHANGED <<serverVars, votedFor, leaderVars, logVars>>
|
||||
}
|
||||
// TLA+ Spec
|
||||
// HandleRequestVoteResponse(i, j, m) ==
|
||||
// \* This tallies votes even when the current state is not Candidate, but
|
||||
// \* they won't be looked at, so it doesn't matter.
|
||||
// /\ m.mterm = currentTerm[i]
|
||||
// /\ votesResponded' = [votesResponded EXCEPT ![i] =
|
||||
// votesResponded[i] \cup {j}]
|
||||
// /\ \/ /\ m.mvoteGranted
|
||||
// /\ votesGranted' = [votesGranted EXCEPT ![i] =
|
||||
// votesGranted[i] \cup {j}]
|
||||
// /\ voterLog' = [voterLog EXCEPT ![i] =
|
||||
// voterLog[i] @@ (j :> m.mlog)]
|
||||
// \/ /\ ~m.mvoteGranted
|
||||
// /\ UNCHANGED <<votesGranted, voterLog>>
|
||||
// /\ Discard(m)
|
||||
// /\ UNCHANGED <<serverVars, votedFor, leaderVars, logVars>>
|
||||
//
|
||||
int32_t syncNodeOnRequestVoteReplyCb(SSyncNode* ths, SyncRequestVoteReply* pMsg) {}
|
||||
|
|
|
@ -14,5 +14,41 @@
|
|||
*/
|
||||
|
||||
#include "syncTimeout.h"
|
||||
#include "syncElection.h"
|
||||
#include "syncReplication.h"
|
||||
|
||||
void onTimeout(SRaft *pRaft, void *pMsg) {}
|
||||
int32_t syncNodeOnTimeoutCb(SSyncNode* ths, SyncTimeout* pMsg) {
|
||||
int32_t ret = 0;
|
||||
sTrace("<-- syncNodeOnTimeoutCb -->");
|
||||
|
||||
{
|
||||
cJSON* pJson = syncTimeout2Json(pMsg);
|
||||
char* serialized = cJSON_Print(pJson);
|
||||
sTrace("process syncMessage recv: syncNodeOnTimeoutCb pMsg:%s ", serialized);
|
||||
free(serialized);
|
||||
cJSON_Delete(pJson);
|
||||
}
|
||||
|
||||
if (pMsg->timeoutType == SYNC_TIMEOUT_PING) {
|
||||
if (atomic_load_64(&ths->pingTimerLogicClockUser) <= pMsg->logicClock) {
|
||||
++(ths->pingTimerCounter);
|
||||
syncNodePingAll(ths);
|
||||
}
|
||||
|
||||
} else if (pMsg->timeoutType == SYNC_TIMEOUT_ELECTION) {
|
||||
if (atomic_load_64(&ths->electTimerLogicClockUser) <= pMsg->logicClock) {
|
||||
++(ths->electTimerCounter);
|
||||
syncNodeElect(ths);
|
||||
}
|
||||
|
||||
} else if (pMsg->timeoutType == SYNC_TIMEOUT_HEARTBEAT) {
|
||||
if (atomic_load_64(&ths->heartbeatTimerLogicClockUser) <= pMsg->logicClock) {
|
||||
++(ths->heartbeatTimerCounter);
|
||||
syncNodeAppendEntriesPeers(ths);
|
||||
}
|
||||
} else {
|
||||
sTrace("unknown timeoutType:%d", pMsg->timeoutType);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
|
@ -17,6 +17,7 @@
|
|||
#include <arpa/inet.h>
|
||||
#include <netinet/in.h>
|
||||
#include <sys/socket.h>
|
||||
#include "syncEnv.h"
|
||||
|
||||
// ---- encode / decode
|
||||
uint64_t syncUtilAddr2U64(const char* host, uint16_t port) {
|
||||
|
@ -91,3 +92,9 @@ void syncUtilbufCopyDeep(const SSyncBuffer* src, SSyncBuffer* dest) {
|
|||
dest->data = malloc(dest->len);
|
||||
memcpy(dest->data, src->data, dest->len);
|
||||
}
|
||||
|
||||
// ---- misc ----
|
||||
|
||||
int32_t syncUtilRand(int32_t max) { return rand() % max; }
|
||||
|
||||
int32_t syncUtilElectRandomMS() { ELECT_TIMER_MS_MIN + syncUtilRand(ELECT_TIMER_MS_RANGE); }
|
|
@ -13,17 +13,21 @@ void print(SHashObj *pNextIndex) {
|
|||
}
|
||||
}
|
||||
|
||||
void logTest() {
|
||||
sTrace("--- sync log test: trace");
|
||||
sDebug("--- sync log test: debug");
|
||||
sInfo("--- sync log test: info");
|
||||
sWarn("--- sync log test: warn");
|
||||
sError("--- sync log test: error");
|
||||
sFatal("--- sync log test: fatal");
|
||||
}
|
||||
|
||||
int main() {
|
||||
// taosInitLog((char *)"syncTest.log", 100000, 10);
|
||||
tsAsyncLog = 0;
|
||||
sDebugFlag = 143 + 64;
|
||||
|
||||
sTrace("sync log test: trace");
|
||||
sDebug("sync log test: debug");
|
||||
sInfo("sync log test: info");
|
||||
sWarn("sync log test: warn");
|
||||
sError("sync log test: error");
|
||||
sFatal("sync log test: fatal");
|
||||
logTest();
|
||||
|
||||
SRaftId me;
|
||||
SRaftId peer1;
|
||||
|
|
|
@ -4,14 +4,13 @@
|
|||
#include "syncIO.h"
|
||||
#include "syncInt.h"
|
||||
|
||||
void *pingFunc(void *param) {
|
||||
SSyncIO *io = (SSyncIO *)param;
|
||||
while (1) {
|
||||
sDebug("io->ping");
|
||||
// io->ping(io);
|
||||
sleep(1);
|
||||
}
|
||||
return NULL;
|
||||
void logTest() {
|
||||
sTrace("--- sync log test: trace");
|
||||
sDebug("--- sync log test: debug");
|
||||
sInfo("--- sync log test: info");
|
||||
sWarn("--- sync log test: warn");
|
||||
sError("--- sync log test: error");
|
||||
sFatal("--- sync log test: fatal");
|
||||
}
|
||||
|
||||
int main() {
|
||||
|
@ -19,12 +18,7 @@ int main() {
|
|||
tsAsyncLog = 0;
|
||||
sDebugFlag = 143 + 64;
|
||||
|
||||
sTrace("sync log test: trace");
|
||||
sDebug("sync log test: debug");
|
||||
sInfo("sync log test: info");
|
||||
sWarn("sync log test: warn");
|
||||
sError("sync log test: error");
|
||||
sFatal("sync log test: fatal");
|
||||
logTest();
|
||||
|
||||
SRaftStore *pRaftStore = raftStoreOpen("./raft_store.json");
|
||||
assert(pRaftStore != NULL);
|
||||
|
|
|
@ -4,55 +4,20 @@
|
|||
#include "syncInt.h"
|
||||
#include "syncRaftStore.h"
|
||||
|
||||
void *pingFunc(void *param) {
|
||||
SSyncIO *io = (SSyncIO *)param;
|
||||
while (1) {
|
||||
sDebug("io->ping");
|
||||
// io->ping(io);
|
||||
sleep(1);
|
||||
}
|
||||
return NULL;
|
||||
void logTest() {
|
||||
sTrace("--- sync log test: trace");
|
||||
sDebug("--- sync log test: debug");
|
||||
sInfo("--- sync log test: info");
|
||||
sWarn("--- sync log test: warn");
|
||||
sError("--- sync log test: error");
|
||||
sFatal("--- sync log test: fatal");
|
||||
}
|
||||
|
||||
int main() {
|
||||
// taosInitLog((char *)"syncTest.log", 100000, 10);
|
||||
tsAsyncLog = 0;
|
||||
sDebugFlag = 143 + 64;
|
||||
logTest();
|
||||
|
||||
sTrace("sync log test: trace");
|
||||
sDebug("sync log test: debug");
|
||||
sInfo("sync log test: info");
|
||||
sWarn("sync log test: warn");
|
||||
sError("sync log test: error");
|
||||
sFatal("sync log test: fatal");
|
||||
|
||||
SRaftStore *pRaftStore = raftStoreOpen("./raft_store.json");
|
||||
// assert(pRaftStore != NULL);
|
||||
|
||||
// raftStorePrint(pRaftStore);
|
||||
|
||||
// pRaftStore->currentTerm = 100;
|
||||
// pRaftStore->voteFor.addr = 200;
|
||||
// pRaftStore->voteFor.vgId = 300;
|
||||
|
||||
// raftStorePrint(pRaftStore);
|
||||
|
||||
// raftStorePersist(pRaftStore);
|
||||
|
||||
// sDebug("sync test");
|
||||
|
||||
// SSyncIO *syncIO = syncIOCreate();
|
||||
// assert(syncIO != NULL);
|
||||
|
||||
// syncIO->start(syncIO);
|
||||
|
||||
// sleep(2);
|
||||
|
||||
// pthread_t tid;
|
||||
// pthread_create(&tid, NULL, pingFunc, syncIO);
|
||||
|
||||
// while (1) {
|
||||
// sleep(1);
|
||||
// }
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,233 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#define _DEFAULT_SOURCE
|
||||
#include "thttp.h"
|
||||
#include "taoserror.h"
|
||||
#include "tlog.h"
|
||||
#include "zlib.h"
|
||||
|
||||
static int32_t taosBuildHttpHeader(const char* server, int32_t contLen, char* pHead, int32_t headLen,
|
||||
EHttpCompFlag flag) {
|
||||
if (flag == HTTP_FLAT) {
|
||||
return snprintf(pHead, headLen,
|
||||
"POST /report HTTP/1.1\n"
|
||||
"Host: %s\n"
|
||||
"Content-Type: application/json\n"
|
||||
"Content-Length: %d\n\n",
|
||||
server, contLen);
|
||||
} else if (flag == HTTP_GZIP) {
|
||||
return snprintf(pHead, headLen,
|
||||
"POST /report HTTP/1.1\n"
|
||||
"Host: %s\n"
|
||||
"Content-Type: application/json\n"
|
||||
"Content-Encoding: gzip\n"
|
||||
"Content-Length: %d\n\n",
|
||||
server, contLen);
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t taosCompressHttpRport(char* pSrc, int32_t srcLen) {
|
||||
int32_t code = -1;
|
||||
int32_t destLen = srcLen;
|
||||
void* pDest = malloc(destLen);
|
||||
|
||||
if (pDest == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
z_stream gzipStream = {0};
|
||||
gzipStream.zalloc = (alloc_func)0;
|
||||
gzipStream.zfree = (free_func)0;
|
||||
gzipStream.opaque = (voidpf)0;
|
||||
if (deflateInit2(&gzipStream, Z_DEFAULT_COMPRESSION, Z_DEFLATED, MAX_WBITS + 16, 8, Z_DEFAULT_STRATEGY) != Z_OK) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
gzipStream.next_in = (Bytef*)pSrc;
|
||||
gzipStream.avail_in = (uLong)srcLen;
|
||||
gzipStream.next_out = (Bytef*)pDest;
|
||||
gzipStream.avail_out = (uLong)(destLen);
|
||||
|
||||
while (gzipStream.avail_in != 0 && gzipStream.total_out < (uLong)(destLen)) {
|
||||
if (deflate(&gzipStream, Z_FULL_FLUSH) != Z_OK) {
|
||||
terrno = TSDB_CODE_COMPRESS_ERROR;
|
||||
goto _OVER;
|
||||
}
|
||||
}
|
||||
|
||||
if (gzipStream.avail_in != 0) {
|
||||
terrno = TSDB_CODE_COMPRESS_ERROR;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
int32_t err = 0;
|
||||
while (1) {
|
||||
if ((err = deflate(&gzipStream, Z_FINISH)) == Z_STREAM_END) {
|
||||
break;
|
||||
}
|
||||
if (err != Z_OK) {
|
||||
terrno = TSDB_CODE_COMPRESS_ERROR;
|
||||
goto _OVER;
|
||||
}
|
||||
}
|
||||
|
||||
if (deflateEnd(&gzipStream) != Z_OK) {
|
||||
terrno = TSDB_CODE_COMPRESS_ERROR;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
if (gzipStream.total_out >= srcLen) {
|
||||
terrno = TSDB_CODE_COMPRESS_ERROR;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
code = 0;
|
||||
|
||||
_OVER:
|
||||
if (code == 0) {
|
||||
memcpy(pSrc, pDest, gzipStream.total_out);
|
||||
code = gzipStream.total_out;
|
||||
}
|
||||
|
||||
free(pDest);
|
||||
return code;
|
||||
}
|
||||
|
||||
#ifdef USE_UV
|
||||
#include <uv.h>
|
||||
static void clientConnCb(uv_connect_t* req, int32_t status) {
|
||||
if (status < 0) {
|
||||
terrno = TAOS_SYSTEM_ERROR(status);
|
||||
uError("Connection error %s\n", uv_strerror(status));
|
||||
uv_close((uv_handle_t*)req->handle, NULL);
|
||||
return;
|
||||
}
|
||||
uv_buf_t* wb = req->data;
|
||||
assert(wb != NULL);
|
||||
uv_write_t write_req;
|
||||
uv_write(&write_req, req->handle, wb, 2, NULL);
|
||||
uv_close((uv_handle_t*)req->handle, NULL);
|
||||
}
|
||||
|
||||
int32_t taosSendHttpReport(const char* server, uint16_t port, char* pCont, int32_t contLen, EHttpCompFlag flag) {
|
||||
uint32_t ipv4 = taosGetIpv4FromFqdn(server);
|
||||
if (ipv4 == 0xffffffff) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
uError("failed to get http server:%s ip since %s", server, terrstr());
|
||||
return -1;
|
||||
}
|
||||
|
||||
char ipv4Buf[128] = {0};
|
||||
tinet_ntoa(ipv4Buf, ipv4);
|
||||
|
||||
struct sockaddr_in dest = {0};
|
||||
uv_ip4_addr(ipv4Buf, port, &dest);
|
||||
|
||||
uv_tcp_t socket_tcp = {0};
|
||||
uv_loop_t* loop = uv_default_loop();
|
||||
uv_tcp_init(loop, &socket_tcp);
|
||||
uv_connect_t* connect = (uv_connect_t*)malloc(sizeof(uv_connect_t));
|
||||
|
||||
if (flag == HTTP_GZIP) {
|
||||
int32_t dstLen = taosCompressHttpRport(pCont, contLen);
|
||||
if (dstLen > 0) {
|
||||
contLen = dstLen;
|
||||
} else {
|
||||
flag = HTTP_FLAT;
|
||||
}
|
||||
}
|
||||
|
||||
char header[1024] = {0};
|
||||
int32_t headLen = taosBuildHttpHeader(server, contLen, header, sizeof(header), flag);
|
||||
|
||||
uv_buf_t wb[2];
|
||||
wb[0] = uv_buf_init((char*)header, headLen);
|
||||
wb[1] = uv_buf_init((char*)pCont, contLen);
|
||||
|
||||
connect->data = wb;
|
||||
uv_tcp_connect(connect, &socket_tcp, (const struct sockaddr*)&dest, clientConnCb);
|
||||
terrno = 0;
|
||||
uv_run(loop, UV_RUN_DEFAULT);
|
||||
uv_loop_close(loop);
|
||||
free(connect);
|
||||
return terrno;
|
||||
}
|
||||
|
||||
#else
|
||||
int32_t taosSendHttpReport(const char* server, uint16_t port, char* pCont, int32_t contLen, EHttpCompFlag flag) {
|
||||
int32_t code = -1;
|
||||
SOCKET fd = 0;
|
||||
|
||||
uint32_t ip = taosGetIpv4FromFqdn(server);
|
||||
if (ip == 0xffffffff) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
uError("failed to get http server:%s ip since %s", server, terrstr());
|
||||
goto SEND_OVER;
|
||||
}
|
||||
|
||||
fd = taosOpenTcpClientSocket(ip, port, 0);
|
||||
if (fd < 0) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
uError("failed to create http socket to %s:%u since %s", server, port, terrstr());
|
||||
goto SEND_OVER;
|
||||
}
|
||||
|
||||
if (flag == HTTP_GZIP) {
|
||||
int32_t dstLen = taosCompressHttpRport(pCont, contLen);
|
||||
if (dstLen > 0) {
|
||||
contLen = dstLen;
|
||||
} else {
|
||||
flag = HTTP_FLAT;
|
||||
}
|
||||
}
|
||||
|
||||
char header[1024] = {0};
|
||||
int32_t headLen = taosBuildHttpHeader(server, contLen, header, sizeof(header), flag);
|
||||
|
||||
if (taosWriteSocket(fd, header, headLen) < 0) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
uError("failed to send http header to %s:%u since %s", server, port, terrstr());
|
||||
goto SEND_OVER;
|
||||
}
|
||||
|
||||
if (taosWriteSocket(fd, (void*)pCont, contLen) < 0) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
uError("failed to send http content to %s:%u since %s", server, port, terrstr());
|
||||
goto SEND_OVER;
|
||||
}
|
||||
|
||||
// read something to avoid nginx error 499
|
||||
if (taosReadSocket(fd, header, 10) < 0) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
uError("failed to receive response from %s:%u since %s", server, port, terrstr());
|
||||
goto SEND_OVER;
|
||||
}
|
||||
|
||||
code = 0;
|
||||
|
||||
SEND_OVER:
|
||||
if (fd != 0) {
|
||||
taosCloseSocket(fd);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
#endif
|
|
@ -7,4 +7,4 @@ target_include_directories(
|
|||
)
|
||||
target_link_libraries(
|
||||
os pthread dl rt m
|
||||
)
|
||||
)
|
||||
|
|
|
@ -542,7 +542,7 @@ int32_t taosFsyncFile(TdFilePtr pFile) {
|
|||
}
|
||||
|
||||
if (pFile->fp != NULL) return fflush(pFile->fp);
|
||||
if (pFile->fp >= 0) return fsync(pFile->fd);
|
||||
if (pFile->fd >= 0) return fsync(pFile->fd);
|
||||
|
||||
return 0;
|
||||
#endif
|
||||
|
|
|
@ -34,8 +34,6 @@
|
|||
#include <unistd.h>
|
||||
#endif
|
||||
|
||||
#ifndef USE_UV
|
||||
|
||||
// typedef struct TdSocketServer {
|
||||
// #if SOCKET_WITH_LOCK
|
||||
// pthread_rwlock_t rwlock;
|
||||
|
@ -131,18 +129,8 @@ int32_t taosSetNonblocking(SOCKET sock, int32_t on) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
void taosIgnSIGPIPE() { signal(SIGPIPE, SIG_IGN); }
|
||||
|
||||
|
||||
void taosSetMaskSIGPIPE() {
|
||||
sigset_t signal_mask;
|
||||
sigemptyset(&signal_mask);
|
||||
sigaddset(&signal_mask, SIGPIPE);
|
||||
int32_t rc = pthread_sigmask(SIG_SETMASK, &signal_mask, NULL);
|
||||
if (rc != 0) {
|
||||
//printf("failed to setmask SIGPIPE");
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -223,9 +211,6 @@ int32_t taosSetNonblocking(SOCKET sock, int32_t on) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
void taosIgnSIGPIPE() {}
|
||||
void taosSetMaskSIGPIPE() {}
|
||||
|
||||
int32_t taosSetSockOpt(SOCKET socketfd, int32_t level, int32_t optname, void *optval, int32_t optlen) {
|
||||
if (level == SOL_SOCKET && optname == TCP_KEEPCNT) {
|
||||
return 0;
|
||||
|
@ -282,98 +267,6 @@ uint64_t htonll(uint64_t val) { return (((uint64_t)htonl(val)) << 32) + htonl(va
|
|||
|
||||
#define TCP_CONN_TIMEOUT 3000 // conn timeout
|
||||
|
||||
int32_t taosGetFqdn(char *fqdn) {
|
||||
char hostname[1024];
|
||||
hostname[1023] = '\0';
|
||||
if (gethostname(hostname, 1023) == -1) {
|
||||
//printf("failed to get hostname, reason:%s", strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
struct addrinfo hints = {0};
|
||||
struct addrinfo *result = NULL;
|
||||
#ifdef __APPLE__
|
||||
// on macosx, hostname -f has the form of xxx.local
|
||||
// which will block getaddrinfo for a few seconds if AI_CANONNAME is set
|
||||
// thus, we choose AF_INET (ipv4 for the moment) to make getaddrinfo return
|
||||
// immediately
|
||||
hints.ai_family = AF_INET;
|
||||
#else // __APPLE__
|
||||
hints.ai_flags = AI_CANONNAME;
|
||||
#endif // __APPLE__
|
||||
int32_t ret = getaddrinfo(hostname, NULL, &hints, &result);
|
||||
if (!result) {
|
||||
//printf("failed to get fqdn, code:%d, reason:%s", ret, gai_strerror(ret));
|
||||
return -1;
|
||||
}
|
||||
|
||||
#ifdef __APPLE__
|
||||
// refer to comments above
|
||||
strcpy(fqdn, hostname);
|
||||
#else // __APPLE__
|
||||
strcpy(fqdn, result->ai_canonname);
|
||||
#endif // __APPLE__
|
||||
freeaddrinfo(result);
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t taosGetIpv4FromFqdn(const char *fqdn) {
|
||||
struct addrinfo hints = {0};
|
||||
hints.ai_family = AF_INET;
|
||||
hints.ai_socktype = SOCK_STREAM;
|
||||
|
||||
struct addrinfo *result = NULL;
|
||||
|
||||
int32_t ret = getaddrinfo(fqdn, NULL, &hints, &result);
|
||||
if (result) {
|
||||
struct sockaddr * sa = result->ai_addr;
|
||||
struct sockaddr_in *si = (struct sockaddr_in *)sa;
|
||||
struct in_addr ia = si->sin_addr;
|
||||
uint32_t ip = ia.s_addr;
|
||||
freeaddrinfo(result);
|
||||
return ip;
|
||||
} else {
|
||||
#ifdef EAI_SYSTEM
|
||||
if (ret == EAI_SYSTEM) {
|
||||
//printf("failed to get the ip address, fqdn:%s, since:%s", fqdn, strerror(errno));
|
||||
} else {
|
||||
//printf("failed to get the ip address, fqdn:%s, since:%s", fqdn, gai_strerror(ret));
|
||||
}
|
||||
#else
|
||||
//printf("failed to get the ip address, fqdn:%s, since:%s", fqdn, gai_strerror(ret));
|
||||
#endif
|
||||
return 0xFFFFFFFF;
|
||||
}
|
||||
}
|
||||
|
||||
// Function converting an IP address string to an uint32_t.
|
||||
uint32_t ip2uint(const char *const ip_addr) {
|
||||
char ip_addr_cpy[20];
|
||||
char ip[5];
|
||||
|
||||
tstrncpy(ip_addr_cpy, ip_addr, sizeof(ip_addr_cpy));
|
||||
|
||||
char *s_start, *s_end;
|
||||
s_start = ip_addr_cpy;
|
||||
s_end = ip_addr_cpy;
|
||||
|
||||
int32_t k;
|
||||
|
||||
for (k = 0; *s_start != '\0'; s_start = s_end) {
|
||||
for (s_end = s_start; *s_end != '.' && *s_end != '\0'; s_end++) {
|
||||
}
|
||||
if (*s_end == '.') {
|
||||
*s_end = '\0';
|
||||
s_end++;
|
||||
}
|
||||
ip[k++] = (char)atoi(s_start);
|
||||
}
|
||||
|
||||
ip[k] = '\0';
|
||||
|
||||
return *((uint32_t *)ip);
|
||||
}
|
||||
|
||||
int32_t taosWriteMsg(SOCKET fd, void *buf, int32_t nbytes) {
|
||||
int32_t nleft, nwritten;
|
||||
char * ptr = (char *)buf;
|
||||
|
@ -754,10 +647,6 @@ SOCKET taosOpenTcpServerSocket(uint32_t ip, uint16_t port) {
|
|||
return sockFd;
|
||||
}
|
||||
|
||||
void tinet_ntoa(char *ipstr, uint32_t ip) {
|
||||
sprintf(ipstr, "%d.%d.%d.%d", ip & 0xFF, (ip >> 8) & 0xFF, (ip >> 16) & 0xFF, ip >> 24);
|
||||
}
|
||||
|
||||
#define COPY_SIZE 32768
|
||||
// sendfile shall be used
|
||||
|
||||
|
@ -795,12 +684,9 @@ int64_t taosCopyFds(SOCKET sfd, int32_t dfd, int64_t len) {
|
|||
return len;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
#if !(defined(_TD_WINDOWS_64) || defined(_TD_WINDOWS_32))
|
||||
void taosBlockSIGPIPE() {
|
||||
#if defined(_TD_WINDOWS_64) || defined(_TD_WINDOWS_32)
|
||||
#else
|
||||
sigset_t signal_mask;
|
||||
sigemptyset(&signal_mask);
|
||||
sigaddset(&signal_mask, SIGPIPE);
|
||||
|
@ -808,7 +694,122 @@ void taosBlockSIGPIPE() {
|
|||
if (rc != 0) {
|
||||
//printf("failed to block SIGPIPE");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
uint32_t taosGetIpv4FromFqdn(const char *fqdn) {
|
||||
struct addrinfo hints = {0};
|
||||
hints.ai_family = AF_INET;
|
||||
hints.ai_socktype = SOCK_STREAM;
|
||||
|
||||
struct addrinfo *result = NULL;
|
||||
|
||||
int32_t ret = getaddrinfo(fqdn, NULL, &hints, &result);
|
||||
if (result) {
|
||||
struct sockaddr * sa = result->ai_addr;
|
||||
struct sockaddr_in *si = (struct sockaddr_in *)sa;
|
||||
struct in_addr ia = si->sin_addr;
|
||||
uint32_t ip = ia.s_addr;
|
||||
freeaddrinfo(result);
|
||||
return ip;
|
||||
} else {
|
||||
#ifdef EAI_SYSTEM
|
||||
if (ret == EAI_SYSTEM) {
|
||||
//printf("failed to get the ip address, fqdn:%s, since:%s", fqdn, strerror(errno));
|
||||
} else {
|
||||
//printf("failed to get the ip address, fqdn:%s, since:%s", fqdn, gai_strerror(ret));
|
||||
}
|
||||
#else
|
||||
void taosBlockSIGPIPE() {}
|
||||
#endif
|
||||
//printf("failed to get the ip address, fqdn:%s, since:%s", fqdn, gai_strerror(ret));
|
||||
#endif
|
||||
return 0xFFFFFFFF;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t taosGetFqdn(char *fqdn) {
|
||||
char hostname[1024];
|
||||
hostname[1023] = '\0';
|
||||
if (gethostname(hostname, 1023) == -1) {
|
||||
//printf("failed to get hostname, reason:%s", strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
struct addrinfo hints = {0};
|
||||
struct addrinfo *result = NULL;
|
||||
#ifdef __APPLE__
|
||||
// on macosx, hostname -f has the form of xxx.local
|
||||
// which will block getaddrinfo for a few seconds if AI_CANONNAME is set
|
||||
// thus, we choose AF_INET (ipv4 for the moment) to make getaddrinfo return
|
||||
// immediately
|
||||
hints.ai_family = AF_INET;
|
||||
#else // __APPLE__
|
||||
hints.ai_flags = AI_CANONNAME;
|
||||
#endif // __APPLE__
|
||||
int32_t ret = getaddrinfo(hostname, NULL, &hints, &result);
|
||||
if (!result) {
|
||||
//printf("failed to get fqdn, code:%d, reason:%s", ret, gai_strerror(ret));
|
||||
return -1;
|
||||
}
|
||||
|
||||
#ifdef __APPLE__
|
||||
// refer to comments above
|
||||
strcpy(fqdn, hostname);
|
||||
#else // __APPLE__
|
||||
strcpy(fqdn, result->ai_canonname);
|
||||
#endif // __APPLE__
|
||||
freeaddrinfo(result);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Function converting an IP address string to an uint32_t.
|
||||
uint32_t ip2uint(const char *const ip_addr) {
|
||||
char ip_addr_cpy[20];
|
||||
char ip[5];
|
||||
|
||||
tstrncpy(ip_addr_cpy, ip_addr, sizeof(ip_addr_cpy));
|
||||
|
||||
char *s_start, *s_end;
|
||||
s_start = ip_addr_cpy;
|
||||
s_end = ip_addr_cpy;
|
||||
|
||||
int32_t k;
|
||||
|
||||
for (k = 0; *s_start != '\0'; s_start = s_end) {
|
||||
for (s_end = s_start; *s_end != '.' && *s_end != '\0'; s_end++) {
|
||||
}
|
||||
if (*s_end == '.') {
|
||||
*s_end = '\0';
|
||||
s_end++;
|
||||
}
|
||||
ip[k++] = (char)atoi(s_start);
|
||||
}
|
||||
|
||||
ip[k] = '\0';
|
||||
|
||||
return *((uint32_t *)ip);
|
||||
}
|
||||
|
||||
void tinet_ntoa(char *ipstr, uint32_t ip) {
|
||||
sprintf(ipstr, "%d.%d.%d.%d", ip & 0xFF, (ip >> 8) & 0xFF, (ip >> 16) & 0xFF, ip >> 24);
|
||||
}
|
||||
|
||||
|
||||
void taosIgnSIGPIPE() {
|
||||
#if defined(_TD_WINDOWS_64) || defined(_TD_WINDOWS_32)
|
||||
#else
|
||||
signal(SIGPIPE, SIG_IGN);
|
||||
#endif
|
||||
}
|
||||
|
||||
void taosSetMaskSIGPIPE() {
|
||||
#if defined(_TD_WINDOWS_64) || defined(_TD_WINDOWS_32)
|
||||
#else
|
||||
sigset_t signal_mask;
|
||||
sigemptyset(&signal_mask);
|
||||
sigaddset(&signal_mask, SIGPIPE);
|
||||
int32_t rc = pthread_sigmask(SIG_SETMASK, &signal_mask, NULL);
|
||||
if (rc != 0) {
|
||||
//printf("failed to setmask SIGPIPE");
|
||||
}
|
||||
#endif
|
||||
}
|
|
@ -10,14 +10,8 @@ target_link_libraries(
|
|||
util
|
||||
PRIVATE os
|
||||
PUBLIC lz4_static
|
||||
PUBLIC api cjson
|
||||
PUBLIC api cjson zlib
|
||||
)
|
||||
if(${BUILD_WITH_UV})
|
||||
target_link_libraries(
|
||||
util
|
||||
PUBLIC uv_a
|
||||
)
|
||||
endif(${BUILD_TEST})
|
||||
|
||||
if(${BUILD_TEST})
|
||||
ADD_SUBDIRECTORY(test)
|
||||
|
|
|
@ -68,6 +68,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_RPC_INVALID_TIME_STAMP, "Client and server's t
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_APP_NOT_READY, "Database not ready")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_RPC_FQDN_ERROR, "Unable to resolve FQDN")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_RPC_INVALID_VERSION, "Invalid app version")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_COMPRESS_ERROR, "Failed to compress msg")
|
||||
|
||||
//common & util
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_OPS_NOT_SUPPORT, "Operation not supported")
|
||||
|
@ -266,7 +267,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_INVALID_STAGE, "Invalid stage to kill
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_CANT_PARALLEL, "Invalid stage to kill")
|
||||
|
||||
// mnode-topic
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_UNSUPPORTED_TOPIC, "Topic with STable not supported yet")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_UNSUPPORTED_TOPIC, "Topic with aggregation is unsupported")
|
||||
|
||||
// dnode
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_DND_ACTION_IN_PROGRESS, "Action in progress")
|
||||
|
|
|
@ -1,138 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#define _DEFAULT_SOURCE
|
||||
#include "thttp.h"
|
||||
#include "taoserror.h"
|
||||
#include "tlog.h"
|
||||
|
||||
#ifdef USE_UV
|
||||
|
||||
#include <uv.h>
|
||||
|
||||
void clientConnCb(uv_connect_t* req, int status) {
|
||||
if(status < 0) {
|
||||
terrno = TAOS_SYSTEM_ERROR(status);
|
||||
uError("Connection error %s\n",uv_strerror(status));
|
||||
return;
|
||||
}
|
||||
|
||||
// impl later
|
||||
uv_buf_t* wb = req->data;
|
||||
if (wb == NULL) {
|
||||
uv_close((uv_handle_t *)req->handle,NULL);
|
||||
}
|
||||
uv_write_t write_req;
|
||||
uv_write(&write_req, req->handle, wb, 2, NULL);
|
||||
uv_close((uv_handle_t *)req->handle,NULL);
|
||||
}
|
||||
|
||||
int32_t taosSendHttpReport(const char* server, uint16_t port, const char* pCont, int32_t contLen) {
|
||||
uint32_t ipv4 = taosGetIpv4FromFqdn(server);
|
||||
if (ipv4 == 0xffffffff) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
uError("failed to get http server:%s ip since %s", server, terrstr());
|
||||
return -1;
|
||||
// goto SEND_OVER;
|
||||
}
|
||||
char ipv4Buf[128];
|
||||
tinet_ntoa(ipv4Buf, ipv4);
|
||||
|
||||
struct sockaddr_in dest;
|
||||
uv_ip4_addr(ipv4Buf, port, &dest);
|
||||
|
||||
uv_tcp_t socket_tcp;
|
||||
uv_loop_t *loop = uv_default_loop();
|
||||
uv_tcp_init(loop, &socket_tcp);
|
||||
uv_connect_t* connect = (uv_connect_t*)malloc(sizeof(uv_connect_t));
|
||||
|
||||
char header[4096] = {0};
|
||||
int32_t headLen = snprintf(header, sizeof(header),
|
||||
"POST /report HTTP/1.1\n"
|
||||
"Host: %s\n"
|
||||
"Content-Type: application/json\n"
|
||||
"Content-Length: %d\n\n",
|
||||
server, contLen);
|
||||
uv_buf_t wb[2];
|
||||
wb[0] = uv_buf_init((char*)header, headLen);
|
||||
wb[1] = uv_buf_init((char*)pCont, contLen);
|
||||
|
||||
connect->data = wb;
|
||||
uv_tcp_connect(connect, &socket_tcp, (const struct sockaddr*)&dest, clientConnCb);
|
||||
terrno = 0;
|
||||
uv_run(loop,UV_RUN_DEFAULT);
|
||||
uv_loop_close(loop);
|
||||
free(connect);
|
||||
return terrno;
|
||||
}
|
||||
|
||||
#else
|
||||
int32_t taosSendHttpReport(const char* server, uint16_t port, const char* pCont, int32_t contLen) {
|
||||
int32_t code = -1;
|
||||
SOCKET fd = 0;
|
||||
|
||||
uint32_t ip = taosGetIpv4FromFqdn(server);
|
||||
if (ip == 0xffffffff) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
uError("failed to get http server:%s ip since %s", server, terrstr());
|
||||
goto SEND_OVER;
|
||||
}
|
||||
|
||||
fd = taosOpenTcpClientSocket(ip, port, 0);
|
||||
if (fd < 0) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
uError("failed to create http socket to %s:%u since %s", server, port, terrstr());
|
||||
goto SEND_OVER;
|
||||
}
|
||||
|
||||
char header[4096] = {0};
|
||||
int32_t headLen = snprintf(header, sizeof(header),
|
||||
"POST /report HTTP/1.1\n"
|
||||
"Host: %s\n"
|
||||
"Content-Type: application/json\n"
|
||||
"Content-Length: %d\n\n",
|
||||
server, contLen);
|
||||
|
||||
if (taosWriteSocket(fd, (void*)header, headLen) < 0) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
uError("failed to send http header to %s:%u since %s", server, port, terrstr());
|
||||
goto SEND_OVER;
|
||||
}
|
||||
|
||||
if (taosWriteSocket(fd, (void*)pCont, contLen) < 0) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
uError("failed to send http content to %s:%u since %s", server, port, terrstr());
|
||||
goto SEND_OVER;
|
||||
}
|
||||
|
||||
// read something to avoid nginx error 499
|
||||
if (taosReadSocket(fd, header, 10) < 0) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
uError("failed to receive response from %s:%u since %s", server, port, terrstr());
|
||||
goto SEND_OVER;
|
||||
}
|
||||
|
||||
uTrace("send http to %s:%u, len:%d content: %s", server, port, contLen, pCont);
|
||||
code = 0;
|
||||
|
||||
SEND_OVER:
|
||||
if (fd != 0) {
|
||||
taosCloseSocket(fd);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
#endif
|
|
@ -1,7 +1,6 @@
|
|||
char version[12] = "${TD_VER_NUMBER}";
|
||||
char compatible_version[12] = "${TD_VER_COMPATIBLE}";
|
||||
char gitinfo[48] = "${TD_VER_GIT}";
|
||||
char gitinfoOfInternal[48] = "${TD_VER_GIT_INTERNAL}";
|
||||
char buildinfo[64] = "Built at ${TD_VER_DATE}";
|
||||
|
||||
void libtaos_${TD_LIB_VER_NUMBER}_${TD_VER_OSTYPE}_${TD_VER_CPUTYPE}_${TD_VER_VERTYPE}() {};
|
||||
|
|
1
tests
1
tests
|
@ -1 +0,0 @@
|
|||
Subproject commit 904e6f0e152e8fe61edfe0a0a9ae497cfde2a72c
|
|
@ -0,0 +1,4 @@
|
|||
#ADD_SUBDIRECTORY(examples/c)
|
||||
ADD_SUBDIRECTORY(tsim)
|
||||
ADD_SUBDIRECTORY(test/c)
|
||||
#ADD_SUBDIRECTORY(comparisonTest/tdengine)
|
|
@ -0,0 +1,243 @@
|
|||
### Prepare development environment
|
||||
|
||||
1. sudo apt install
|
||||
build-essential cmake net-tools python-pip python-setuptools python3-pip
|
||||
python3-setuptools valgrind psmisc curl
|
||||
|
||||
2. git clone <https://github.com/taosdata/TDengine>; cd TDengine
|
||||
|
||||
3. mkdir debug; cd debug; cmake ..; make ; sudo make install
|
||||
|
||||
4. pip install ../src/connector/python ; pip3 install
|
||||
../src/connector/python
|
||||
|
||||
5. pip install numpy; pip3 install numpy (numpy is required only if you need to run querySort.py)
|
||||
|
||||
> Note: Both Python2 and Python3 are currently supported by the Python test
|
||||
> framework. Since Python2 is no longer officially supported by Python Software
|
||||
> Foundation since January 1, 2020, it is recommended that subsequent test case
|
||||
> development be guaranteed to run correctly on Python3.
|
||||
|
||||
> For Python2, please consider being compatible if appropriate without
|
||||
> additional burden.
|
||||
>
|
||||
> If you use some new Linux distribution like Ubuntu 20.04 which already do not
|
||||
> include Python2, please do not install Python2-related packages.
|
||||
>
|
||||
> <https://nakedsecurity.sophos.com/2020/01/03/python-is-dead-long-live-python/>
|
||||
|
||||
### How to run Python test suite
|
||||
|
||||
1. cd \<TDengine\>/tests/pytest
|
||||
|
||||
2. ./smoketest.sh \# for smoke test
|
||||
|
||||
3. ./smoketest.sh -g \# for memory leak detection test with valgrind
|
||||
|
||||
4. ./fulltest.sh \# for full test
|
||||
|
||||
> Note1: TDengine daemon's configuration and data files are stored in
|
||||
> \<TDengine\>/sim directory. As a historical design, it's same place with
|
||||
> TSIM script. So after the TSIM script ran with sudo privilege, the directory
|
||||
> has been used by TSIM then the python script cannot write it by a normal
|
||||
> user. You need to remove the directory completely first before running the
|
||||
> Python test case. We should consider using two different locations to store
|
||||
> for TSIM and Python script.
|
||||
|
||||
> Note2: if you need to debug crash problem with a core dump, you need
|
||||
> manually edit smoketest.sh or fulltest.sh to add "ulimit -c unlimited"
|
||||
> before the script line. Then you can look for the core file in
|
||||
> \<TDengine\>/tests/pytest after the program crash.
|
||||
|
||||
|
||||
### How to add a new test case
|
||||
|
||||
**1. TSIM test cases:**
|
||||
|
||||
TSIM was the testing framework has been used internally. Now it still be used to run the test cases we develop in the past as a legacy system. We are turning to use Python to develop new test case and are abandoning TSIM gradually.
|
||||
|
||||
**2. Python test cases:**
|
||||
|
||||
**2.1 Please refer to \<TDengine\>/tests/pytest/insert/basic.py to add a new
|
||||
test case.** The new test case must implement 3 functions, where self.init()
|
||||
and self.stop() simply copy the contents of insert/basic.py and the test
|
||||
logic is implemented in self.run(). You can refer to the code in the util
|
||||
directory for more information.
|
||||
|
||||
**2.2 Edit smoketest.sh to add the path and filename of the new test case**
|
||||
|
||||
Note: The Python test framework may continue to be improved in the future,
|
||||
hopefully, to provide more functionality and ease of writing test cases. The
|
||||
method of writing the test case above does not exclude that it will also be
|
||||
affected.
|
||||
|
||||
**2.3 What test.py does in detail:**
|
||||
|
||||
test.py is the entry program for test case execution and monitoring.
|
||||
|
||||
test.py has the following functions.
|
||||
|
||||
\-f --file, Specifies the test case file name to be executed
|
||||
-p --path, Specifies deployment path
|
||||
|
||||
\-m --master, Specifies the master server IP for cluster deployment
|
||||
-c--cluster, test cluster function
|
||||
-s--stop, terminates all running nodes
|
||||
|
||||
\-g--valgrind, load valgrind for memory leak detection test
|
||||
|
||||
\-h--help, display help
|
||||
|
||||
**2.4 What util/log.py does in detail:**
|
||||
|
||||
log.py is quite simple, the main thing is that you can print the output in
|
||||
different colors as needed. The success() should be called for successful
|
||||
test case execution and the success() will print green text. The exit() will
|
||||
print red text and exit the program, exit() should be called for test
|
||||
failure.
|
||||
|
||||
**util/log.py**
|
||||
|
||||
...
|
||||
|
||||
def info(self, info):
|
||||
|
||||
printf("%s %s" % (datetime.datetime.now(), info))
|
||||
|
||||
|
||||
|
||||
def sleep(self, sec):
|
||||
|
||||
printf("%s sleep %d seconds" % (datetime.datetime.now(), sec))
|
||||
|
||||
time.sleep(sec)
|
||||
|
||||
|
||||
|
||||
def debug(self, err):
|
||||
|
||||
printf("\\033[1;36m%s %s\\033[0m" % (datetime.datetime.now(), err))
|
||||
|
||||
|
||||
|
||||
def success(self, info):
|
||||
|
||||
printf("\\033[1;32m%s %s\\033[0m" % (datetime.datetime.now(), info))
|
||||
|
||||
|
||||
|
||||
def notice(self, err):
|
||||
|
||||
printf("\\033[1;33m%s %s\\033[0m" % (datetime.datetime.now(), err))
|
||||
|
||||
|
||||
|
||||
def exit(self, err):
|
||||
|
||||
printf("\\033[1;31m%s %s\\033[0m" % (datetime.datetime.now(), err))
|
||||
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
|
||||
def printNoPrefix(self, info):
|
||||
|
||||
printf("\\033[1;36m%s\\033[0m" % (info)
|
||||
|
||||
...
|
||||
|
||||
**2.5 What util/sql.py does in detail:**
|
||||
|
||||
SQL.py is mainly used to execute SQL statements to manipulate the database,
|
||||
and the code is extracted and commented as follows:
|
||||
|
||||
**util/sql.py**
|
||||
|
||||
\# prepare() is mainly used to set up the environment for testing table and
|
||||
data, and to set up the database db for testing. do not call prepare() if you
|
||||
need to test the database operation command.
|
||||
|
||||
def prepare(self):
|
||||
|
||||
tdLog.info("prepare database:db")
|
||||
|
||||
self.cursor.execute('reset query cache')
|
||||
|
||||
self.cursor.execute('drop database if exists db')
|
||||
|
||||
self.cursor.execute('create database db')
|
||||
|
||||
self.cursor.execute('use db')
|
||||
|
||||
...
|
||||
|
||||
\# query() is mainly used to execute select statements for normal syntax input
|
||||
|
||||
def query(self, sql):
|
||||
|
||||
...
|
||||
|
||||
\# error() is mainly used to execute the select statement with the wrong syntax
|
||||
input, the error will be caught as a reasonable behavior, if not caught it will
|
||||
prove that the test failed
|
||||
|
||||
def error()
|
||||
|
||||
...
|
||||
|
||||
\# checkRows() is used to check the number of returned lines after calling
|
||||
query(select ...) after calling the query(select ...) to check the number of
|
||||
rows of returned results.
|
||||
|
||||
def checkRows(self, expectRows):
|
||||
|
||||
...
|
||||
|
||||
\# checkData() is used to check the returned result data after calling
|
||||
query(select ...) after the query(select ...) is called, failure to meet
|
||||
expectation is
|
||||
|
||||
def checkData(self, row, col, data):
|
||||
|
||||
...
|
||||
|
||||
\# getData() returns the result data after calling query(select ...) to return
|
||||
the resulting data after calling query(select ...)
|
||||
|
||||
def getData(self, row, col):
|
||||
|
||||
...
|
||||
|
||||
\# execute() used to execute sql and return the number of affected rows
|
||||
|
||||
def execute(self, sql):
|
||||
|
||||
...
|
||||
|
||||
\# executeTimes() Multiple executions of the same sql statement
|
||||
|
||||
def executeTimes(self, sql, times):
|
||||
|
||||
...
|
||||
|
||||
\# CheckAffectedRows() Check if the number of affected rows is as expected
|
||||
|
||||
def checkAffectedRows(self, expectAffectedRows):
|
||||
|
||||
...
|
||||
|
||||
### CI submission adoption principle.
|
||||
|
||||
- Every commit / PR compilation must pass. Currently, the warning is treated
|
||||
as an error, so the warning must also be resolved.
|
||||
|
||||
- Test cases that already exist must pass.
|
||||
|
||||
- Because CI is very important to support build and automatically test
|
||||
procedure, it is necessary to manually test the test case before adding it
|
||||
and do as many iterations as possible to ensure that the test case provides
|
||||
stable and reliable test results when added.
|
||||
|
||||
> Note: In the future, according to the requirements and test development
|
||||
> progress will add stress testing, performance testing, code style,
|
||||
> and other features based on functional testing.
|
|
@ -0,0 +1,328 @@
|
|||
def pre_test(){
|
||||
|
||||
sh '''
|
||||
sudo rmtaos||echo 'no taosd installed'
|
||||
'''
|
||||
sh '''
|
||||
cd ${WKC}
|
||||
git reset --hard
|
||||
git checkout $BRANCH_NAME
|
||||
git pull
|
||||
git submodule update
|
||||
cd ${WK}
|
||||
git reset --hard
|
||||
git checkout $BRANCH_NAME
|
||||
git pull
|
||||
export TZ=Asia/Harbin
|
||||
date
|
||||
rm -rf ${WK}/debug
|
||||
mkdir debug
|
||||
cd debug
|
||||
cmake .. > /dev/null
|
||||
make > /dev/null
|
||||
make install > /dev/null
|
||||
pip3 install ${WKC}/src/connector/python
|
||||
'''
|
||||
return 1
|
||||
}
|
||||
def pre_test_p(){
|
||||
|
||||
sh '''
|
||||
sudo rmtaos||echo 'no taosd installed'
|
||||
'''
|
||||
sh '''
|
||||
cd ${WKC}
|
||||
git reset --hard
|
||||
git checkout $BRANCH_NAME
|
||||
git pull
|
||||
git submodule update
|
||||
cd ${WK}
|
||||
git reset --hard
|
||||
git checkout $BRANCH_NAME
|
||||
git pull
|
||||
export TZ=Asia/Harbin
|
||||
date
|
||||
rm -rf ${WK}/debug
|
||||
mkdir debug
|
||||
cd debug
|
||||
cmake .. > /dev/null
|
||||
make > /dev/null
|
||||
make install > /dev/null
|
||||
pip3 install ${WKC}/src/connector/python
|
||||
'''
|
||||
return 1
|
||||
}
|
||||
pipeline {
|
||||
agent none
|
||||
environment{
|
||||
|
||||
WK = '/data/lib/jenkins/workspace/TDinternal'
|
||||
WKC= '/data/lib/jenkins/workspace/TDinternal/community'
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Parallel test stage') {
|
||||
parallel {
|
||||
stage('pytest') {
|
||||
agent{label 'slad1'}
|
||||
steps {
|
||||
pre_test_p()
|
||||
sh '''
|
||||
cd ${WKC}/tests
|
||||
find pytest -name '*'sql|xargs rm -rf
|
||||
./test-all.sh pytest
|
||||
date'''
|
||||
}
|
||||
}
|
||||
stage('test_b1') {
|
||||
agent{label 'slad2'}
|
||||
steps {
|
||||
pre_test()
|
||||
|
||||
sh '''
|
||||
cd ${WKC}/tests
|
||||
./test-all.sh b1
|
||||
date'''
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
stage('test_crash_gen') {
|
||||
agent{label "slad3"}
|
||||
steps {
|
||||
pre_test()
|
||||
sh '''
|
||||
cd ${WKC}/tests/pytest
|
||||
'''
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
sh '''
|
||||
cd ${WKC}/tests/pytest
|
||||
./crash_gen.sh -a -p -t 4 -s 2000
|
||||
'''
|
||||
}
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
sh '''
|
||||
cd ${WKC}/tests/pytest
|
||||
rm -rf /var/lib/taos/*
|
||||
rm -rf /var/log/taos/*
|
||||
./handle_crash_gen_val_log.sh
|
||||
'''
|
||||
}
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
sh '''
|
||||
cd ${WKC}/tests/pytest
|
||||
rm -rf /var/lib/taos/*
|
||||
rm -rf /var/log/taos/*
|
||||
./handle_taosd_val_log.sh
|
||||
'''
|
||||
}
|
||||
|
||||
sh'''
|
||||
nohup taosd >/dev/null &
|
||||
sleep 10
|
||||
'''
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
sh '''
|
||||
cd ${WKC}/tests/gotest
|
||||
bash batchtest.sh
|
||||
'''
|
||||
}
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
sh '''
|
||||
cd ${WKC}/tests/examples/python/PYTHONConnectorChecker
|
||||
python3 PythonChecker.py
|
||||
'''
|
||||
}
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
sh '''
|
||||
cd ${WKC}/tests/examples/JDBC/JDBCDemo/
|
||||
mvn clean package >/dev/null
|
||||
java -jar target/JdbcRestfulDemo-jar-with-dependencies.jar
|
||||
'''
|
||||
}
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
sh '''
|
||||
cp -rf ${WKC}/tests/examples/nodejs ${JENKINS_HOME}/workspace/
|
||||
cd ${JENKINS_HOME}/workspace/nodejs
|
||||
node nodejsChecker.js host=localhost
|
||||
'''
|
||||
}
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
sh '''
|
||||
cd ${JENKINS_HOME}/workspace/C#NET/src/CheckC#
|
||||
dotnet run
|
||||
'''
|
||||
}
|
||||
sh '''
|
||||
pkill -9 taosd || echo 1
|
||||
cd ${WKC}/tests
|
||||
./test-all.sh b2
|
||||
date
|
||||
'''
|
||||
sh '''
|
||||
cd ${WKC}/tests
|
||||
./test-all.sh full unit
|
||||
date'''
|
||||
}
|
||||
}
|
||||
|
||||
stage('test_valgrind') {
|
||||
agent{label "slad4"}
|
||||
|
||||
steps {
|
||||
pre_test()
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
sh '''
|
||||
cd ${WKC}/tests/pytest
|
||||
nohup taosd >/dev/null &
|
||||
sleep 10
|
||||
python3 concurrent_inquiry.py -c 1
|
||||
|
||||
'''
|
||||
}
|
||||
sh '''
|
||||
cd ${WKC}/tests
|
||||
./test-all.sh full jdbc
|
||||
date'''
|
||||
sh '''
|
||||
cd ${WKC}/tests/pytest
|
||||
./valgrind-test.sh 2>&1 > mem-error-out.log
|
||||
./handle_val_log.sh
|
||||
|
||||
date
|
||||
cd ${WKC}/tests
|
||||
./test-all.sh b3
|
||||
date'''
|
||||
sh '''
|
||||
date
|
||||
cd ${WKC}/tests
|
||||
./test-all.sh full example
|
||||
date'''
|
||||
}
|
||||
}
|
||||
|
||||
stage('arm64_build'){
|
||||
agent{label 'arm64'}
|
||||
steps{
|
||||
sh '''
|
||||
cd ${WK}
|
||||
git fetch
|
||||
git checkout develop
|
||||
git pull
|
||||
cd ${WKC}
|
||||
git fetch
|
||||
git checkout develop
|
||||
git pull
|
||||
git submodule update
|
||||
cd ${WKC}/packaging
|
||||
./release.sh -v cluster -c aarch64 -n 2.0.0.0 -m 2.0.0.0
|
||||
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage('arm32_build'){
|
||||
agent{label 'arm32'}
|
||||
steps{
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
sh '''
|
||||
cd ${WK}
|
||||
git fetch
|
||||
git checkout develop
|
||||
git pull
|
||||
cd ${WKC}
|
||||
git fetch
|
||||
git checkout develop
|
||||
git pull
|
||||
git submodule update
|
||||
cd ${WKC}/packaging
|
||||
./release.sh -v cluster -c aarch32 -n 2.0.0.0 -m 2.0.0.0
|
||||
|
||||
'''
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
post {
|
||||
success {
|
||||
emailext (
|
||||
subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]' SUCCESS",
|
||||
body: """<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
</head>
|
||||
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
|
||||
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
|
||||
<tr>
|
||||
<td><br />
|
||||
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
|
||||
<hr size="2" width="100%" align="center" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<ul>
|
||||
<div style="font-size:18px">
|
||||
<li>构建名称>>分支:${env.BRANCH_NAME}</li>
|
||||
<li>构建结果:<span style="color:green"> Successful </span></li>
|
||||
<li>构建编号:${BUILD_NUMBER}</li>
|
||||
<li>触发用户:${env.CHANGE_AUTHOR}</li>
|
||||
<li>提交信息:${env.CHANGE_TITLE}</li>
|
||||
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
|
||||
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
|
||||
|
||||
</div>
|
||||
</ul>
|
||||
</td>
|
||||
</tr>
|
||||
</table></font>
|
||||
</body>
|
||||
</html>""",
|
||||
to: "yqliu@taosdata.com,pxiao@taosdata.com",
|
||||
from: "support@taosdata.com"
|
||||
)
|
||||
}
|
||||
failure {
|
||||
emailext (
|
||||
subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]' FAIL",
|
||||
body: """<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
</head>
|
||||
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
|
||||
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
|
||||
<tr>
|
||||
<td><br />
|
||||
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
|
||||
<hr size="2" width="100%" align="center" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<ul>
|
||||
<div style="font-size:18px">
|
||||
<li>构建名称>>分支:${env.BRANCH_NAME}</li>
|
||||
<li>构建结果:<span style="color:red"> Failure </span></li>
|
||||
<li>构建编号:${BUILD_NUMBER}</li>
|
||||
<li>触发用户:${env.CHANGE_AUTHOR}</li>
|
||||
<li>提交信息:${env.CHANGE_TITLE}</li>
|
||||
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
|
||||
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
|
||||
|
||||
</div>
|
||||
</ul>
|
||||
</td>
|
||||
</tr>
|
||||
</table></font>
|
||||
</body>
|
||||
</html>""",
|
||||
to: "yqliu@taosdata.com,pxiao@taosdata.com",
|
||||
from: "support@taosdata.com"
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,336 @@
|
|||
def pre_test(){
|
||||
|
||||
sh '''
|
||||
sudo rmtaos||echo 'no taosd installed'
|
||||
'''
|
||||
sh '''
|
||||
cd ${WKC}
|
||||
git reset --hard
|
||||
git checkout $BRANCH_NAME
|
||||
git pull
|
||||
git submodule update
|
||||
cd ${WK}
|
||||
git reset --hard
|
||||
git checkout $BRANCH_NAME
|
||||
git pull
|
||||
export TZ=Asia/Harbin
|
||||
date
|
||||
rm -rf ${WK}/debug
|
||||
mkdir debug
|
||||
cd debug
|
||||
cmake .. > /dev/null
|
||||
make > /dev/null
|
||||
make install > /dev/null
|
||||
pip3 install ${WKC}/src/connector/python/ || echo 0
|
||||
'''
|
||||
return 1
|
||||
}
|
||||
def pre_test_p(){
|
||||
|
||||
sh '''
|
||||
sudo rmtaos||echo 'no taosd installed'
|
||||
'''
|
||||
sh '''
|
||||
cd ${WKC}
|
||||
git reset --hard
|
||||
git checkout $BRANCH_NAME
|
||||
git pull
|
||||
git submodule update
|
||||
cd ${WK}
|
||||
git reset --hard
|
||||
git checkout $BRANCH_NAME
|
||||
git pull
|
||||
export TZ=Asia/Harbin
|
||||
date
|
||||
rm -rf ${WK}/debug
|
||||
mkdir debug
|
||||
cd debug
|
||||
cmake .. > /dev/null
|
||||
make > /dev/null
|
||||
make install > /dev/null
|
||||
pip3 install ${WKC}/src/connector/python/ || echo 0
|
||||
'''
|
||||
return 1
|
||||
}
|
||||
pipeline {
|
||||
agent none
|
||||
environment{
|
||||
|
||||
WK = '/data/lib/jenkins/workspace/TDinternal'
|
||||
WKC= '/data/lib/jenkins/workspace/TDinternal/community'
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Parallel test stage') {
|
||||
parallel {
|
||||
stage('pytest') {
|
||||
agent{label 'slam1'}
|
||||
steps {
|
||||
pre_test_p()
|
||||
sh '''
|
||||
cd ${WKC}/tests
|
||||
find pytest -name '*'sql|xargs rm -rf
|
||||
./test-all.sh pytest
|
||||
date'''
|
||||
}
|
||||
}
|
||||
stage('test_b1') {
|
||||
agent{label 'slam2'}
|
||||
steps {
|
||||
pre_test()
|
||||
|
||||
sh '''
|
||||
cd ${WKC}/tests
|
||||
./test-all.sh b1
|
||||
date'''
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
stage('test_crash_gen') {
|
||||
agent{label "slam3"}
|
||||
steps {
|
||||
pre_test()
|
||||
sh '''
|
||||
cd ${WKC}/tests/pytest
|
||||
'''
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
sh '''
|
||||
cd ${WKC}/tests/pytest
|
||||
./crash_gen.sh -a -p -t 4 -s 2000
|
||||
'''
|
||||
}
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
sh '''
|
||||
cd ${WKC}/tests/pytest
|
||||
rm -rf /var/lib/taos/*
|
||||
rm -rf /var/log/taos/*
|
||||
./handle_crash_gen_val_log.sh
|
||||
'''
|
||||
}
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
sh '''
|
||||
cd ${WKC}/tests/pytest
|
||||
rm -rf /var/lib/taos/*
|
||||
rm -rf /var/log/taos/*
|
||||
./handle_taosd_val_log.sh
|
||||
'''
|
||||
}
|
||||
|
||||
sh'''
|
||||
nohup taosd >/dev/null &
|
||||
sleep 10
|
||||
'''
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
sh '''
|
||||
cd ${WKC}/tests/gotest
|
||||
bash batchtest.sh
|
||||
'''
|
||||
}
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
sh '''
|
||||
cd ${WKC}/tests/examples/python/PYTHONConnectorChecker
|
||||
python3 PythonChecker.py
|
||||
'''
|
||||
}
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
sh '''
|
||||
cd ${WKC}/tests/examples/JDBC/JDBCDemo/
|
||||
mvn clean package assembly:single -DskipTests >/dev/null
|
||||
java -jar target/JDBCDemo-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1
|
||||
'''
|
||||
}
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
sh '''
|
||||
cd ${WKC}/src/connector/jdbc
|
||||
mvn clean package -Dmaven.test.skip=true >/dev/null
|
||||
cd ${WKC}/tests/examples/JDBC/JDBCDemo/
|
||||
java --class-path=../../../../src/connector/jdbc/target:$JAVA_HOME/jre/lib/ext -jar target/JDBCDemo-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1
|
||||
'''
|
||||
}
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
sh '''
|
||||
cp -rf ${WKC}/tests/examples/nodejs ${JENKINS_HOME}/workspace/
|
||||
cd ${JENKINS_HOME}/workspace/nodejs
|
||||
node nodejsChecker.js host=localhost
|
||||
'''
|
||||
}
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
sh '''
|
||||
cd ${JENKINS_HOME}/workspace/C#NET/src/CheckC#
|
||||
dotnet run
|
||||
'''
|
||||
}
|
||||
sh '''
|
||||
pkill -9 taosd || echo 1
|
||||
cd ${WKC}/tests
|
||||
./test-all.sh b2
|
||||
date
|
||||
'''
|
||||
sh '''
|
||||
cd ${WKC}/tests
|
||||
./test-all.sh full unit
|
||||
date'''
|
||||
}
|
||||
}
|
||||
|
||||
stage('test_valgrind') {
|
||||
agent{label "slam4"}
|
||||
|
||||
steps {
|
||||
pre_test()
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
sh '''
|
||||
cd ${WKC}/tests/pytest
|
||||
nohup taosd >/dev/null &
|
||||
sleep 10
|
||||
python3 concurrent_inquiry.py -c 1
|
||||
|
||||
'''
|
||||
}
|
||||
sh '''
|
||||
cd ${WKC}/tests
|
||||
./test-all.sh full jdbc
|
||||
date'''
|
||||
sh '''
|
||||
cd ${WKC}/tests/pytest
|
||||
./valgrind-test.sh 2>&1 > mem-error-out.log
|
||||
./handle_val_log.sh
|
||||
|
||||
date
|
||||
cd ${WKC}/tests
|
||||
./test-all.sh b3
|
||||
date'''
|
||||
sh '''
|
||||
date
|
||||
cd ${WKC}/tests
|
||||
./test-all.sh full example
|
||||
date'''
|
||||
}
|
||||
}
|
||||
|
||||
stage('arm64_build'){
|
||||
agent{label 'arm64'}
|
||||
steps{
|
||||
sh '''
|
||||
cd ${WK}
|
||||
git fetch
|
||||
git checkout develop
|
||||
git pull
|
||||
cd ${WKC}
|
||||
git fetch
|
||||
git checkout develop
|
||||
git pull
|
||||
git submodule update
|
||||
cd ${WKC}/packaging
|
||||
./release.sh -v cluster -c aarch64 -n 2.0.0.0 -m 2.0.0.0
|
||||
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage('arm32_build'){
|
||||
agent{label 'arm32'}
|
||||
steps{
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
sh '''
|
||||
cd ${WK}
|
||||
git fetch
|
||||
git checkout develop
|
||||
git pull
|
||||
cd ${WKC}
|
||||
git fetch
|
||||
git checkout develop
|
||||
git pull
|
||||
git submodule update
|
||||
cd ${WKC}/packaging
|
||||
./release.sh -v cluster -c aarch32 -n 2.0.0.0 -m 2.0.0.0
|
||||
|
||||
'''
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
post {
|
||||
success {
|
||||
emailext (
|
||||
subject: "SUCCESSFUL: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'",
|
||||
body: '''<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
</head>
|
||||
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
|
||||
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
|
||||
<tr>
|
||||
<td><br />
|
||||
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
|
||||
<hr size="2" width="100%" align="center" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<ul>
|
||||
<div style="font-size:18px">
|
||||
<li>构建名称>>分支:${PROJECT_NAME}</li>
|
||||
<li>构建结果:<span style="color:green"> Successful </span></li>
|
||||
<li>构建编号:${BUILD_NUMBER}</li>
|
||||
<li>触发用户:${CAUSE}</li>
|
||||
<li>变更概要:${CHANGES}</li>
|
||||
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
|
||||
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
|
||||
<li>变更集:${JELLY_SCRIPT}</li>
|
||||
</div>
|
||||
</ul>
|
||||
</td>
|
||||
</tr>
|
||||
</table></font>
|
||||
</body>
|
||||
</html>''',
|
||||
to: "yqliu@taosdata.com,pxiao@taosdata.com",
|
||||
from: "support@taosdata.com"
|
||||
)
|
||||
}
|
||||
failure {
|
||||
emailext (
|
||||
subject: "FAILED: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'",
|
||||
body: '''<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
</head>
|
||||
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
|
||||
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
|
||||
<tr>
|
||||
<td><br />
|
||||
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
|
||||
<hr size="2" width="100%" align="center" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<ul>
|
||||
<div style="font-size:18px">
|
||||
<li>构建名称>>分支:${PROJECT_NAME}</li>
|
||||
<li>构建结果:<span style="color:green"> Successful </span></li>
|
||||
<li>构建编号:${BUILD_NUMBER}</li>
|
||||
<li>触发用户:${CAUSE}</li>
|
||||
<li>变更概要:${CHANGES}</li>
|
||||
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
|
||||
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
|
||||
<li>变更集:${JELLY_SCRIPT}</li>
|
||||
</div>
|
||||
</ul>
|
||||
</td>
|
||||
</tr>
|
||||
</table></font>
|
||||
</body>
|
||||
</html>''',
|
||||
to: "yqliu@taosdata.com,pxiao@taosdata.com",
|
||||
from: "support@taosdata.com"
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,200 @@
|
|||
import hudson.model.Result
|
||||
import hudson.model.*;
|
||||
import jenkins.model.CauseOfInterruption
|
||||
node {
|
||||
}
|
||||
|
||||
def skipbuild=0
|
||||
def win_stop=0
|
||||
|
||||
def abortPreviousBuilds() {
|
||||
def currentJobName = env.JOB_NAME
|
||||
def currentBuildNumber = env.BUILD_NUMBER.toInteger()
|
||||
def jobs = Jenkins.instance.getItemByFullName(currentJobName)
|
||||
def builds = jobs.getBuilds()
|
||||
|
||||
for (build in builds) {
|
||||
if (!build.isBuilding()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (currentBuildNumber == build.getNumber().toInteger()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
build.doKill() //doTerm(),doKill(),doTerm()
|
||||
}
|
||||
}
|
||||
// abort previous build
|
||||
abortPreviousBuilds()
|
||||
def abort_previous(){
|
||||
def buildNumber = env.BUILD_NUMBER as int
|
||||
if (buildNumber > 1) milestone(buildNumber - 1)
|
||||
milestone(buildNumber)
|
||||
}
|
||||
def pre_test(){
|
||||
sh'hostname'
|
||||
sh '''
|
||||
sudo rmtaos || echo "taosd has not installed"
|
||||
'''
|
||||
sh '''
|
||||
killall -9 taosd ||echo "no taosd running"
|
||||
killall -9 gdb || echo "no gdb running"
|
||||
killall -9 python3.8 || echo "no python program running"
|
||||
cd ${WKC}
|
||||
'''
|
||||
script {
|
||||
if (env.CHANGE_TARGET == 'master') {
|
||||
sh '''
|
||||
cd ${WKC}
|
||||
git checkout master
|
||||
'''
|
||||
}
|
||||
else if(env.CHANGE_TARGET == '2.0'){
|
||||
sh '''
|
||||
cd ${WKC}
|
||||
git checkout 2.0
|
||||
'''
|
||||
}
|
||||
else if(env.CHANGE_TARGET == '3.0'){
|
||||
sh '''
|
||||
cd ${WKC}
|
||||
git checkout 3.0
|
||||
'''
|
||||
}
|
||||
else{
|
||||
sh '''
|
||||
cd ${WKC}
|
||||
git checkout develop
|
||||
'''
|
||||
}
|
||||
}
|
||||
sh'''
|
||||
cd ${WKC}
|
||||
git pull >/dev/null
|
||||
git fetch origin +refs/pull/${CHANGE_ID}/merge
|
||||
git checkout -qf FETCH_HEAD
|
||||
export TZ=Asia/Harbin
|
||||
date
|
||||
rm -rf debug
|
||||
mkdir debug
|
||||
cd debug
|
||||
cmake .. > /dev/null
|
||||
make -j4> /dev/null
|
||||
|
||||
'''
|
||||
return 1
|
||||
}
|
||||
|
||||
pipeline {
|
||||
agent none
|
||||
options { skipDefaultCheckout() }
|
||||
environment{
|
||||
WK = '/var/lib/jenkins/workspace/TDinternal'
|
||||
WKC= '/var/lib/jenkins/workspace/TDengine'
|
||||
}
|
||||
stages {
|
||||
stage('pre_build'){
|
||||
agent{label 'slave3_0'}
|
||||
options { skipDefaultCheckout() }
|
||||
when {
|
||||
changeRequest()
|
||||
}
|
||||
steps {
|
||||
script{
|
||||
abort_previous()
|
||||
abortPreviousBuilds()
|
||||
}
|
||||
timeout(time: 45, unit: 'MINUTES'){
|
||||
pre_test()
|
||||
sh'''
|
||||
cd ${WKC}/tests
|
||||
./test-all.sh b1fq
|
||||
'''
|
||||
sh'''
|
||||
cd ${WKC}/debug
|
||||
ctest
|
||||
'''
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
success {
|
||||
emailext (
|
||||
subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]' SUCCESS",
|
||||
body: """<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
</head>
|
||||
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
|
||||
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
|
||||
<tr>
|
||||
<td><br />
|
||||
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
|
||||
<hr size="2" width="100%" align="center" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<ul>
|
||||
<div style="font-size:18px">
|
||||
<li>构建名称>>分支:${env.BRANCH_NAME}</li>
|
||||
<li>构建结果:<span style="color:green"> Successful </span></li>
|
||||
<li>构建编号:${BUILD_NUMBER}</li>
|
||||
<li>触发用户:${env.CHANGE_AUTHOR}</li>
|
||||
<li>提交信息:${env.CHANGE_TITLE}</li>
|
||||
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
|
||||
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
|
||||
|
||||
</div>
|
||||
</ul>
|
||||
</td>
|
||||
</tr>
|
||||
</table></font>
|
||||
</body>
|
||||
</html>""",
|
||||
to: "${env.CHANGE_AUTHOR_EMAIL}",
|
||||
from: "support@taosdata.com"
|
||||
)
|
||||
}
|
||||
failure {
|
||||
emailext (
|
||||
subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]' FAIL",
|
||||
body: """<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
</head>
|
||||
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
|
||||
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
|
||||
<tr>
|
||||
<td><br />
|
||||
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
|
||||
<hr size="2" width="100%" align="center" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<ul>
|
||||
<div style="font-size:18px">
|
||||
<li>构建名称>>分支:${env.BRANCH_NAME}</li>
|
||||
<li>构建结果:<span style="color:red"> Failure </span></li>
|
||||
<li>构建编号:${BUILD_NUMBER}</li>
|
||||
<li>触发用户:${env.CHANGE_AUTHOR}</li>
|
||||
<li>提交信息:${env.CHANGE_TITLE}</li>
|
||||
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
|
||||
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
|
||||
|
||||
</div>
|
||||
</ul>
|
||||
</td>
|
||||
</tr>
|
||||
</table></font>
|
||||
</body>
|
||||
</html>""",
|
||||
to: "${env.CHANGE_AUTHOR_EMAIL}",
|
||||
from: "support@taosdata.com"
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,176 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import os
|
||||
import subprocess
|
||||
import time
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
from util.dnodes import *
|
||||
import datetime
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root)-len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
buildPath = self.getBuildPath()
|
||||
if (buildPath == ""):
|
||||
tdLog.exit("taosd not found!")
|
||||
else:
|
||||
tdLog.info("taosd found in %s" % buildPath)
|
||||
binPath = buildPath+ "/build/bin/"
|
||||
|
||||
tdSql.execute("create database timezone")
|
||||
tdSql.execute("use timezone")
|
||||
tdSql.execute("create stable st (ts timestamp, id int ) tags (index int)")
|
||||
|
||||
tdSql.execute("insert into tb0 using st tags (1) values ('2021-07-01 00:00:00.000',0)")
|
||||
tdSql.query("select ts from tb0")
|
||||
tdSql.checkData(0, 0, "2021-07-01 00:00:00.000")
|
||||
|
||||
tdSql.execute("insert into tb1 using st tags (1) values ('2021-07-01T00:00:00.000+07:50',1)")
|
||||
tdSql.query("select ts from tb1")
|
||||
tdSql.checkData(0, 0, "2021-07-01 00:10:00.000")
|
||||
|
||||
tdSql.execute("insert into tb2 using st tags (1) values ('2021-07-01T00:00:00.000+08:00',2)")
|
||||
tdSql.query("select ts from tb2")
|
||||
tdSql.checkData(0, 0, "2021-07-01 00:00:00.000")
|
||||
|
||||
tdSql.execute("insert into tb3 using st tags (1) values ('2021-07-01T00:00:00.000Z',3)")
|
||||
tdSql.query("select ts from tb3")
|
||||
tdSql.checkData(0, 0, "2021-07-01 08:00:00.000")
|
||||
|
||||
tdSql.execute("insert into tb4 using st tags (1) values ('2021-07-01 00:00:00.000+07:50',4)")
|
||||
tdSql.query("select ts from tb4")
|
||||
tdSql.checkData(0, 0, "2021-07-01 00:10:00.000")
|
||||
|
||||
tdSql.execute("insert into tb5 using st tags (1) values ('2021-07-01 00:00:00.000Z',5)")
|
||||
tdSql.query("select ts from tb5")
|
||||
tdSql.checkData(0, 0, "2021-07-01 08:00:00.000")
|
||||
|
||||
tdSql.execute("insert into tb6 using st tags (1) values ('2021-07-01T00:00:00.000+0800',6)")
|
||||
tdSql.query("select ts from tb6")
|
||||
tdSql.checkData(0, 0, "2021-07-01 00:00:00.000")
|
||||
|
||||
tdSql.execute("insert into tb7 using st tags (1) values ('2021-07-01 00:00:00.000+0800',7)")
|
||||
tdSql.query("select ts from tb7")
|
||||
tdSql.checkData(0, 0, "2021-07-01 00:00:00.000")
|
||||
|
||||
tdSql.execute("insert into tb8 using st tags (1) values ('2021-07-0100:00:00.000',8)")
|
||||
tdSql.query("select ts from tb8")
|
||||
tdSql.checkData(0, 0, "2021-07-01 00:00:00.000")
|
||||
|
||||
tdSql.execute("insert into tb9 using st tags (1) values ('2021-07-0100:00:00.000+0800',9)")
|
||||
tdSql.query("select ts from tb9")
|
||||
tdSql.checkData(0, 0, "2021-07-01 00:00:00.000")
|
||||
|
||||
tdSql.execute("insert into tb10 using st tags (1) values ('2021-07-0100:00:00.000+08:00',10)")
|
||||
tdSql.query("select ts from tb10")
|
||||
tdSql.checkData(0, 0, "2021-07-01 00:00:00.000")
|
||||
|
||||
tdSql.execute("insert into tb11 using st tags (1) values ('2021-07-0100:00:00.000+07:00',11)")
|
||||
tdSql.query("select ts from tb11")
|
||||
tdSql.checkData(0, 0, "2021-07-01 01:00:00.000")
|
||||
|
||||
tdSql.execute("insert into tb12 using st tags (1) values ('2021-07-0100:00:00.000+0700',12)")
|
||||
tdSql.query("select ts from tb12")
|
||||
tdSql.checkData(0, 0, "2021-07-01 01:00:00.000")
|
||||
|
||||
tdSql.execute("insert into tb13 using st tags (1) values ('2021-07-0100:00:00.000+07:12',13)")
|
||||
tdSql.query("select ts from tb13")
|
||||
tdSql.checkData(0, 0, "2021-07-01 00:48:00.000")
|
||||
|
||||
tdSql.execute("insert into tb14 using st tags (1) values ('2021-07-0100:00:00.000+712',14)")
|
||||
tdSql.query("select ts from tb14")
|
||||
tdSql.checkData(0, 0, "2021-06-28 08:58:00.000")
|
||||
|
||||
tdSql.execute("insert into tb15 using st tags (1) values ('2021-07-0100:00:00.000Z',15)")
|
||||
tdSql.query("select ts from tb15")
|
||||
tdSql.checkData(0, 0, "2021-07-01 08:00:00.000")
|
||||
|
||||
tdSql.execute("insert into tb16 using st tags (1) values ('2021-7-1 00:00:00.000Z',16)")
|
||||
tdSql.query("select ts from tb16")
|
||||
tdSql.checkData(0, 0, "2021-07-01 08:00:00.000")
|
||||
|
||||
tdSql.execute("insert into tb17 using st tags (1) values ('2021-07-0100:00:00.000+0750',17)")
|
||||
tdSql.query("select ts from tb17")
|
||||
tdSql.checkData(0, 0, "2021-07-01 00:10:00.000")
|
||||
|
||||
tdSql.execute("insert into tb18 using st tags (1) values ('2021-07-0100:00:00.000+0752',18)")
|
||||
tdSql.query("select ts from tb18")
|
||||
tdSql.checkData(0, 0, "2021-07-01 00:08:00.000")
|
||||
|
||||
tdSql.execute("insert into tb19 using st tags (1) values ('2021-07-0100:00:00.000+075',19)")
|
||||
tdSql.query("select ts from tb19")
|
||||
tdSql.checkData(0, 0, "2021-07-01 00:55:00.000")
|
||||
|
||||
tdSql.execute("insert into tb20 using st tags (1) values ('2021-07-0100:00:00.000+75',20)")
|
||||
tdSql.query("select ts from tb20")
|
||||
tdSql.checkData(0, 0, "2021-06-28 05:00:00.000")
|
||||
|
||||
tdSql.execute("insert into tb21 using st tags (1) values ('2021-7-1 1:1:1.234+075',21)")
|
||||
tdSql.query("select ts from tb21")
|
||||
tdSql.checkData(0, 0, "2021-07-01 01:56:01.234")
|
||||
|
||||
tdSql.execute("insert into tb22 using st tags (1) values ('2021-7-1T1:1:1.234+075',22)")
|
||||
tdSql.query("select ts from tb22")
|
||||
tdSql.checkData(0, 0, "2021-07-01 01:56:01.234")
|
||||
|
||||
tdSql.execute("insert into tb23 using st tags (1) values ('2021-7-131:1:1.234+075',22)")
|
||||
tdSql.query("select ts from tb23")
|
||||
tdSql.checkData(0, 0, "2021-07-13 01:56:01.234")
|
||||
|
||||
|
||||
tdSql.error("insert into tberror using st tags (1) values ('20210701 00:00:00.000+0800',0)")
|
||||
tdSql.error("insert into tberror using st tags (1) values ('2021070100:00:00.000+0800',0)")
|
||||
tdSql.error("insert into tberror using st tags (1) values ('202171 00:00:00.000+0800',0)")
|
||||
tdSql.error("insert into tberror using st tags (1) values ('2021 07 01 00:00:00.000+0800',0)")
|
||||
tdSql.error("insert into tberror using st tags (1) values ('2021 -07-0100:00:00.000+0800',0)")
|
||||
tdSql.error("insert into tberror using st tags (1) values ('2021-7-11:1:1.234+075',0)")
|
||||
|
||||
os.system("rm -rf ./TimeZone/*.py.sql")
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,174 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import os
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
from util.dnodes import *
|
||||
import datetime
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
def checkCommunity(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
if ("community" in selfPath):
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosdump" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
|
||||
|
||||
def run(self):
|
||||
|
||||
# clear envs
|
||||
|
||||
tdSql.execute(" create database ZoneTime precision 'us' ")
|
||||
tdSql.execute(" use ZoneTime ")
|
||||
tdSql.execute(" create stable st (ts timestamp , id int , val float) tags (tag1 timestamp ,tag2 int) ")
|
||||
|
||||
# standard case for Timestamp
|
||||
|
||||
tdSql.execute(" insert into tb1 using st tags (\"2021-07-01 00:00:00.000\" , 2) values( \"2021-07-01 00:00:00.000\" , 1 , 1.0 ) ")
|
||||
case1 = (tdSql.getResult("select * from tb1"))
|
||||
print(case1)
|
||||
if case1 == [(datetime.datetime(2021, 7, 1, 0, 0), 1, 1.0)]:
|
||||
print ("check pass! ")
|
||||
else:
|
||||
print ("check failed about timestamp '2021-07-01 00:00:00.000' ")
|
||||
|
||||
# RCF-3339 : it allows "T" is replaced by " "
|
||||
|
||||
tdSql.execute(" insert into tb2 using st tags (\"2021-07-01T00:00:00.000+07:50\" , 2) values( \"2021-07-01T00:00:00.000+07:50\" , 2 , 2.0 ) ")
|
||||
case2 = (tdSql.getResult("select * from tb2"))
|
||||
print(case2)
|
||||
if case2 == [(datetime.datetime(2021, 7, 1, 0, 10), 2, 2.0)]:
|
||||
print ("check pass! ")
|
||||
else:
|
||||
print ("check failed about timestamp '2021-07-01T00:00:00.000+07:50'! ")
|
||||
|
||||
tdSql.execute(" insert into tb3 using st tags (\"2021-07-01T00:00:00.000+08:00\" , 3) values( \"2021-07-01T00:00:00.000+08:00\" , 3 , 3.0 ) ")
|
||||
case3 = (tdSql.getResult("select * from tb3"))
|
||||
print(case3)
|
||||
if case3 == [(datetime.datetime(2021, 7, 1, 0, 0), 3, 3.0)]:
|
||||
print ("check pass! ")
|
||||
else:
|
||||
print ("check failed about timestamp '2021-07-01T00:00:00.000+08:00'! ")
|
||||
|
||||
tdSql.execute(" insert into tb4 using st tags (\"2021-07-01T00:00:00.000Z\" , 4) values( \"2021-07-01T00:00:00.000Z\" , 4 , 4.0 ) ")
|
||||
case4 = (tdSql.getResult("select * from tb4"))
|
||||
print(case4)
|
||||
if case4 == [(datetime.datetime(2021, 7, 1, 8, 0), 4, 4.0)]:
|
||||
print ("check pass! ")
|
||||
else:
|
||||
print ("check failed about timestamp '2021-07-01T00:00:00.000Z'! ")
|
||||
|
||||
tdSql.execute(" insert into tb5 using st tags (\"2021-07-01 00:00:00.000+07:50\" , 5) values( \"2021-07-01 00:00:00.000+07:50\" , 5 , 5.0 ) ")
|
||||
case5 = (tdSql.getResult("select * from tb5"))
|
||||
print(case5)
|
||||
if case5 == [(datetime.datetime(2021, 7, 1, 0, 10), 5, 5.0)]:
|
||||
print ("check pass! ")
|
||||
else:
|
||||
print ("check failed about timestamp '2021-07-01 00:00:00.000+08:00 ")
|
||||
|
||||
tdSql.execute(" insert into tb6 using st tags (\"2021-07-01 00:00:00.000Z\" , 6) values( \"2021-07-01 00:00:00.000Z\" , 6 , 6.0 ) ")
|
||||
case6 = (tdSql.getResult("select * from tb6"))
|
||||
print(case6)
|
||||
if case6 == [(datetime.datetime(2021, 7, 1, 8, 0), 6, 6.0)]:
|
||||
print ("check pass! ")
|
||||
else:
|
||||
print ("check failed about timestamp '2021-07-01 00:00:00.000Z'! ")
|
||||
|
||||
# ISO 8610 timestamp format , time days and hours must be split by "T"
|
||||
|
||||
tdSql.execute(" insert into tb7 using st tags (\"2021-07-01T00:00:00.000+0800\" , 7) values( \"2021-07-01T00:00:00.000+0800\" , 7 , 7.0 ) ")
|
||||
case7 = (tdSql.getResult("select * from tb7"))
|
||||
print(case7)
|
||||
if case7 == [(datetime.datetime(2021, 7, 1, 0, 0), 7, 7.0)]:
|
||||
print ("check pass! ")
|
||||
else:
|
||||
print ("check failed about timestamp '2021-07-01T00:00:00.000+0800'! ")
|
||||
|
||||
tdSql.execute(" insert into tb8 using st tags (\"2021-07-01T00:00:00.000+08\" , 8) values( \"2021-07-01T00:00:00.000+08\" , 8 , 8.0 ) ")
|
||||
case8 = (tdSql.getResult("select * from tb8"))
|
||||
print(case8)
|
||||
if case8 == [(datetime.datetime(2021, 7, 1, 0, 0), 8, 8.0)]:
|
||||
print ("check pass! ")
|
||||
else:
|
||||
print ("check failed about timestamp '2021-07-01T00:00:00.000+08'! ")
|
||||
|
||||
# Non-standard case for Timestamp
|
||||
|
||||
tdSql.execute(" insert into tb9 using st tags (\"2021-07-01 00:00:00.000+0800\" , 9) values( \"2021-07-01 00:00:00.000+0800\" , 9 , 9.0 ) ")
|
||||
case9 = (tdSql.getResult("select * from tb9"))
|
||||
print(case9)
|
||||
|
||||
tdSql.execute(" insert into tb10 using st tags (\"2021-07-0100:00:00.000\" , 10) values( \"2021-07-0100:00:00.000\" , 10 , 10.0 ) ")
|
||||
case10 = (tdSql.getResult("select * from tb10"))
|
||||
print(case10)
|
||||
|
||||
tdSql.execute(" insert into tb11 using st tags (\"2021-07-0100:00:00.000+0800\" , 11) values( \"2021-07-0100:00:00.000+0800\" , 11 , 11.0 ) ")
|
||||
case11 = (tdSql.getResult("select * from tb11"))
|
||||
print(case11)
|
||||
|
||||
tdSql.execute(" insert into tb12 using st tags (\"2021-07-0100:00:00.000+08:00\" , 12) values( \"2021-07-0100:00:00.000+08:00\" , 12 , 12.0 ) ")
|
||||
case12 = (tdSql.getResult("select * from tb12"))
|
||||
print(case12)
|
||||
|
||||
tdSql.execute(" insert into tb13 using st tags (\"2021-07-0100:00:00.000Z\" , 13) values( \"2021-07-0100:00:00.000Z\" , 13 , 13.0 ) ")
|
||||
case13 = (tdSql.getResult("select * from tb13"))
|
||||
print(case13)
|
||||
|
||||
tdSql.execute(" insert into tb14 using st tags (\"2021-07-0100:00:00.000Z\" , 14) values( \"2021-07-0100:00:00.000Z\" , 14 , 14.0 ) ")
|
||||
case14 = (tdSql.getResult("select * from tb14"))
|
||||
print(case14)
|
||||
|
||||
tdSql.execute(" insert into tb15 using st tags (\"2021-07-0100:00:00.000+08\" , 15) values( \"2021-07-0100:00:00.000+08\" , 15 , 15.0 ) ")
|
||||
case15 = (tdSql.getResult("select * from tb15"))
|
||||
print(case15)
|
||||
|
||||
tdSql.execute(" insert into tb16 using st tags (\"2021-07-0100:00:00.000+07:50\" , 16) values( \"2021-07-0100:00:00.000+07:50\" , 16 , 16.0 ) ")
|
||||
case16 = (tdSql.getResult("select * from tb16"))
|
||||
print(case16)
|
||||
|
||||
os.system("rm -rf *.py.sql")
|
||||
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,53 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import taos
|
||||
from util.log import tdLog
|
||||
from util.cases import tdCases
|
||||
from util.sql import tdSql
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
def run(self):
|
||||
tdSql.query("show users")
|
||||
rows = tdSql.queryRows
|
||||
|
||||
tdSql.execute("create user test PASS 'test' ")
|
||||
tdSql.query("show users")
|
||||
tdSql.checkRows(rows + 1)
|
||||
|
||||
tdSql.error("create user tdenginetdenginetdengine PASS 'test' ")
|
||||
|
||||
tdSql.error("create user tdenginet PASS '1234512345123456' ")
|
||||
|
||||
try:
|
||||
tdSql.execute("create account a&cc PASS 'pass123'")
|
||||
except Exception as e:
|
||||
print("create account a&cc PASS 'pass123'")
|
||||
return
|
||||
|
||||
tdLog.exit("drop built-in user is error.")
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,52 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import taos
|
||||
from util.log import tdLog
|
||||
from util.cases import tdCases
|
||||
from util.sql import tdSql
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
def run(self):
|
||||
print("==========step1")
|
||||
print("drop built-in account")
|
||||
try:
|
||||
tdSql.execute("drop account root")
|
||||
except Exception as e:
|
||||
if len(e.args) > 0 and 'no rights' != e.args[0]:
|
||||
tdLog.exit(e)
|
||||
|
||||
print("==========step2")
|
||||
print("drop built-in user")
|
||||
try:
|
||||
tdSql.execute("drop user root")
|
||||
except Exception as e:
|
||||
if len(e.args) > 0 and 'no rights' != e.args[0]:
|
||||
tdLog.exit(e)
|
||||
return
|
||||
|
||||
tdLog.exit("drop built-in user is error.")
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,67 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import random
|
||||
import string
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
from util.dnodes import *
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
def genColList(self):
|
||||
'''
|
||||
generate column list
|
||||
'''
|
||||
col_list = list()
|
||||
for i in range(1, 18):
|
||||
col_list.append(f'c{i}')
|
||||
return col_list
|
||||
|
||||
def genIncreaseValue(self, input_value):
|
||||
'''
|
||||
add ', 1' to end of value every loop
|
||||
'''
|
||||
value_list = list(input_value)
|
||||
value_list.insert(-1, ", 1")
|
||||
return ''.join(value_list)
|
||||
|
||||
def insertAlter(self):
|
||||
'''
|
||||
after each alter and insert, when execute 'select * from {tbname};' taosd will coredump
|
||||
'''
|
||||
tbname = ''.join(random.choice(string.ascii_letters.lower()) for i in range(7))
|
||||
input_value = '(now, 1)'
|
||||
tdSql.execute(f'create table {tbname} (ts timestamp, c0 int);')
|
||||
tdSql.execute(f'insert into {tbname} values {input_value};')
|
||||
for col in self.genColList():
|
||||
input_value = self.genIncreaseValue(input_value)
|
||||
tdSql.execute(f'alter table {tbname} add column {col} int;')
|
||||
tdSql.execute(f'insert into {tbname} values {input_value};')
|
||||
tdSql.query(f'select * from {tbname};')
|
||||
tdSql.checkRows(18)
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
self.insertAlter()
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,85 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
from util.dnodes import *
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug(f"start to execute {__file__}")
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
def run(self):
|
||||
tdSql.execute("drop database if exists db")
|
||||
tdSql.execute("create database if not exists db keep 36500")
|
||||
tdSql.execute("use db")
|
||||
|
||||
tdLog.printNoPrefix("==========step1:create table && insert data")
|
||||
tdSql.execute("create table stbtag (ts timestamp, c1 int) TAGS(t1 int)")
|
||||
tdSql.execute("create table tag1 using stbtag tags(1)")
|
||||
|
||||
tdLog.printNoPrefix("==========step2:alter stb add tag create new chiltable")
|
||||
tdSql.execute("alter table stbtag add tag t2 int")
|
||||
tdSql.execute("alter table stbtag add tag t3 tinyint")
|
||||
tdSql.execute("alter table stbtag add tag t4 smallint ")
|
||||
tdSql.execute("alter table stbtag add tag t5 bigint")
|
||||
tdSql.execute("alter table stbtag add tag t6 float ")
|
||||
tdSql.execute("alter table stbtag add tag t7 double ")
|
||||
tdSql.execute("alter table stbtag add tag t8 bool ")
|
||||
tdSql.execute("alter table stbtag add tag t9 binary(10) ")
|
||||
tdSql.execute("alter table stbtag add tag t10 nchar(10)")
|
||||
|
||||
tdSql.execute("create table tag2 using stbtag tags(2, 22, 23, 24, 25, 26.1, 27.1, 1, 'binary9', 'nchar10')")
|
||||
tdSql.query( "select tbname, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10 from stbtag" )
|
||||
tdSql.checkData(1, 0, "tag2")
|
||||
tdSql.checkData(1, 1, 2)
|
||||
tdSql.checkData(1, 2, 22)
|
||||
tdSql.checkData(1, 3, 23)
|
||||
tdSql.checkData(1, 4, 24)
|
||||
tdSql.checkData(1, 5, 25)
|
||||
tdSql.checkData(1, 6, 26.1)
|
||||
tdSql.checkData(1, 7, 27.1)
|
||||
tdSql.checkData(1, 8, 1)
|
||||
tdSql.checkData(1, 9, "binary9")
|
||||
tdSql.checkData(1, 10, "nchar10")
|
||||
|
||||
tdLog.printNoPrefix("==========step3:alter stb drop tag create new chiltable")
|
||||
tdSql.execute("alter table stbtag drop tag t2 ")
|
||||
tdSql.execute("alter table stbtag drop tag t3 ")
|
||||
tdSql.execute("alter table stbtag drop tag t4 ")
|
||||
tdSql.execute("alter table stbtag drop tag t5 ")
|
||||
tdSql.execute("alter table stbtag drop tag t6 ")
|
||||
tdSql.execute("alter table stbtag drop tag t7 ")
|
||||
tdSql.execute("alter table stbtag drop tag t8 ")
|
||||
tdSql.execute("alter table stbtag drop tag t9 ")
|
||||
tdSql.execute("alter table stbtag drop tag t10 ")
|
||||
|
||||
tdSql.execute("create table tag3 using stbtag tags(3)")
|
||||
tdSql.query("select * from stbtag where tbname like 'tag3' ")
|
||||
tdSql.checkCols(3)
|
||||
tdSql.query("select tbname, t1 from stbtag where tbname like 'tag3' ")
|
||||
tdSql.checkData(0, 1, 3)
|
||||
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,73 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
from util.dnodes import *
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug(f"start to execute {__file__}")
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
def run(self):
|
||||
tdSql.execute("drop database if exists db")
|
||||
tdSql.execute("create database if not exists db keep 36500")
|
||||
tdSql.execute("use db")
|
||||
|
||||
tdLog.printNoPrefix("==========step1:create table && insert data")
|
||||
# timestamp list:
|
||||
# 0 -> "1970-01-01 08:00:00" | -28800000 -> "1970-01-01 00:00:00" | -946800000000 -> "1940-01-01 00:00:00"
|
||||
# -631180800000 -> "1950-01-01 00:00:00"
|
||||
ts1 = 0
|
||||
ts2 = -28800000
|
||||
ts3 = -946800000000
|
||||
ts4 = "1950-01-01 00:00:00"
|
||||
tdSql.execute(
|
||||
"create table stb2ts (ts timestamp, ts1 timestamp, ts2 timestamp, c1 int, ts3 timestamp) TAGS(t1 int)"
|
||||
)
|
||||
tdSql.execute("create table t2ts1 using stb2ts tags(1)")
|
||||
|
||||
tdSql.execute(f"insert into t2ts1 values ({ts1}, {ts1}, {ts1}, 1, {ts1})")
|
||||
tdSql.execute(f"insert into t2ts1 values ({ts2}, {ts2}, {ts2}, 2, {ts2})")
|
||||
tdSql.execute(f"insert into t2ts1 values ({ts3}, {ts3}, {ts3}, 4, {ts3})")
|
||||
tdSql.execute(f"insert into t2ts1 values ('{ts4}', '{ts4}', '{ts4}', 3, '{ts4}')")
|
||||
|
||||
tdLog.printNoPrefix("==========step2:check inserted data")
|
||||
tdSql.query("select * from stb2ts where ts1=0 and ts2='1970-01-01 08:00:00' ")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 4,'1970-01-01 08:00:00')
|
||||
|
||||
tdSql.query("select * from stb2ts where ts1=-28800000 and ts2='1970-01-01 00:00:00' ")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 4, '1970-01-01 00:00:00')
|
||||
|
||||
tdSql.query("select * from stb2ts where ts1=-946800000000 and ts2='1940-01-01 00:00:00' ")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 4, '1940-01-01 00:00:00')
|
||||
|
||||
tdSql.query("select * from stb2ts where ts1=-631180800000 and ts2='1950-01-01 00:00:00' ")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 4, '1950-01-01 00:00:00')
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,109 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
from util.dnodes import tdDnodes
|
||||
from datetime import datetime
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
tdSql.query('show databases')
|
||||
tdSql.checkData(0,15,0)
|
||||
buildPath = self.getBuildPath()
|
||||
if (buildPath == ""):
|
||||
tdLog.exit("taosd not found!")
|
||||
else:
|
||||
tdLog.info("taosd found in %s" % buildPath)
|
||||
binPath = buildPath + "/build/bin/"
|
||||
|
||||
#write 5M rows into db, then restart to force the data move into disk.
|
||||
#create 500 tables
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/insert_5M_rows.json -y " % binPath)
|
||||
tdDnodes.stop(1)
|
||||
tdDnodes.start(1)
|
||||
tdSql.execute('use db')
|
||||
|
||||
#prepare to query 500 tables last_row()
|
||||
tableName = []
|
||||
for i in range(500):
|
||||
tableName.append(f"stb_{i}")
|
||||
tdSql.execute('use db')
|
||||
lastRow_Off_start = datetime.now()
|
||||
|
||||
slow = 0 #count time where lastRow on is slower
|
||||
for i in range(5):
|
||||
#switch lastRow to off and check
|
||||
tdSql.execute('alter database db cachelast 0')
|
||||
tdSql.query('show databases')
|
||||
tdSql.checkData(0,15,0)
|
||||
|
||||
#run last_row(*) query 500 times
|
||||
for i in range(500):
|
||||
tdSql.execute(f'SELECT LAST_ROW(*) FROM {tableName[i]}')
|
||||
lastRow_Off_end = datetime.now()
|
||||
|
||||
tdLog.debug(f'time used:{lastRow_Off_end-lastRow_Off_start}')
|
||||
|
||||
#switch lastRow to on and check
|
||||
tdSql.execute('alter database db cachelast 1')
|
||||
tdSql.query('show databases')
|
||||
tdSql.checkData(0,15,1)
|
||||
|
||||
#run last_row(*) query 500 times
|
||||
tdSql.execute('use db')
|
||||
lastRow_On_start = datetime.now()
|
||||
for i in range(500):
|
||||
tdSql.execute(f'SELECT LAST_ROW(*) FROM {tableName[i]}')
|
||||
lastRow_On_end = datetime.now()
|
||||
|
||||
tdLog.debug(f'time used:{lastRow_On_end-lastRow_On_start}')
|
||||
|
||||
#check which one used more time
|
||||
if (lastRow_Off_end-lastRow_Off_start > lastRow_On_end-lastRow_On_start):
|
||||
pass
|
||||
else:
|
||||
slow += 1
|
||||
tdLog.debug(slow)
|
||||
if slow > 1: #tolerance for the first time
|
||||
tdLog.exit('lastRow hot alter failed')
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,91 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
#TODO: after TD-4518 and TD-4510 is resolved, add the exception test case for these situations
|
||||
|
||||
import sys
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
#checking string input exception for alter
|
||||
tdSql.error("alter database db keep '10'")
|
||||
tdSql.error('alter database db keep "10"')
|
||||
tdSql.error("alter database db keep '\t'")
|
||||
tdSql.error("alter database db keep \'\t\'")
|
||||
tdSql.error('alter database db keep "a"')
|
||||
tdSql.error('alter database db keep "1.4"')
|
||||
tdSql.error("alter database db blocks '10'")
|
||||
tdSql.error('alter database db comp "0"')
|
||||
tdSql.execute('drop database if exists db')
|
||||
|
||||
#checking string input exception for create
|
||||
tdSql.error("create database db comp '0'")
|
||||
tdSql.error('create database db comp "1"')
|
||||
tdSql.error("create database db comp '\t'")
|
||||
tdSql.error("alter database db keep \'\t\'")
|
||||
tdSql.error('create database db comp "a"')
|
||||
tdSql.error('create database db comp "1.4"')
|
||||
tdSql.error("create database db blocks '10'")
|
||||
tdSql.error('create database db keep "3650"')
|
||||
tdSql.error('create database db fsync "3650"')
|
||||
tdSql.execute('create database db precision "us"')
|
||||
tdSql.query('show databases')
|
||||
tdSql.checkData(0,16,'us')
|
||||
tdSql.execute('drop database if exists db')
|
||||
|
||||
#checking float input exception for create
|
||||
tdSql.error("create database db fsync 7.3")
|
||||
tdSql.error("create database db fsync 0.0")
|
||||
tdSql.error("create database db fsync -5.32")
|
||||
tdSql.error('create database db comp 7.2')
|
||||
tdSql.error("create database db blocks 5.87")
|
||||
tdSql.error('create database db keep 15.4')
|
||||
|
||||
#checking float input exception for insert
|
||||
tdSql.execute('create database db')
|
||||
tdSql.error('alter database db blocks 5.9')
|
||||
tdSql.error('alter database db blocks -4.7')
|
||||
tdSql.error('alter database db blocks 0.0')
|
||||
tdSql.error('alter database db keep 15.4')
|
||||
tdSql.error('alter database db comp 2.67')
|
||||
|
||||
#checking additional exception param for alter keep
|
||||
tdSql.error('alter database db keep 365001')
|
||||
tdSql.error('alter database db keep 364999,365000,365001')
|
||||
tdSql.error('alter database db keep -10')
|
||||
tdSql.error('alter database db keep 5')
|
||||
tdSql.error('alter database db keep ')
|
||||
tdSql.error('alter database db keep 40,a,60')
|
||||
tdSql.error('alter database db keep ,,60,')
|
||||
tdSql.error('alter database db keep \t')
|
||||
tdSql.execute('alter database db keep \t50')
|
||||
tdSql.query('show databases')
|
||||
tdSql.checkData(0,7,'50,50,50')
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,54 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import random
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
from util.dnodes import tdDnodes
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
flagList=["debugflag", "cdebugflag", "tmrDebugFlag", "uDebugFlag", "rpcDebugFlag"]
|
||||
|
||||
for flag in flagList:
|
||||
tdSql.execute("alter local %s 131" % flag)
|
||||
tdSql.execute("alter local %s 135" % flag)
|
||||
tdSql.execute("alter local %s 143" % flag)
|
||||
randomFlag = random.randint(100, 250)
|
||||
if randomFlag != 131 and randomFlag != 135 and randomFlag != 143:
|
||||
tdSql.error("alter local %s %d" % (flag, randomFlag))
|
||||
|
||||
tdSql.query("show dnodes")
|
||||
dnodeId = tdSql.getData(0, 0)
|
||||
|
||||
for flag in flagList:
|
||||
tdSql.execute("alter dnode %d %s 131" % (dnodeId, flag))
|
||||
tdSql.execute("alter dnode %d %s 135" % (dnodeId, flag))
|
||||
tdSql.execute("alter dnode %d %s 143" % (dnodeId, flag))
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue