other: merge 3.0
This commit is contained in:
commit
ee470a9427
|
@ -121,7 +121,7 @@ def pre_test_win(){
|
|||
set
|
||||
date /t
|
||||
time /t
|
||||
rd /s /Q C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\debug
|
||||
rd /s /Q C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\debug || exit 0
|
||||
'''
|
||||
bat '''
|
||||
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
|
||||
|
|
|
@ -46,11 +46,17 @@ ENDIF ()
|
|||
|
||||
IF (TD_WINDOWS)
|
||||
MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}")
|
||||
SET(COMMON_FLAGS "/W3 /D_WIN32")
|
||||
SET(COMMON_FLAGS "/w /D_WIN32")
|
||||
SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /MANIFEST:NO")
|
||||
# IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
|
||||
# SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
|
||||
# ENDIF ()
|
||||
IF (CMAKE_DEPFILE_FLAGS_C)
|
||||
SET(CMAKE_DEPFILE_FLAGS_C "")
|
||||
ENDIF ()
|
||||
IF (CMAKE_DEPFILE_FLAGS_CXX)
|
||||
SET(CMAKE_DEPFILE_FLAGS_CXX "")
|
||||
ENDIF ()
|
||||
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMMON_FLAGS}")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMMON_FLAGS}")
|
||||
|
|
|
@ -46,6 +46,18 @@ IF(${TD_WINDOWS})
|
|||
ON
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_TEST
|
||||
"If build unit tests using googletest"
|
||||
OFF
|
||||
)
|
||||
ELSE ()
|
||||
|
||||
option(
|
||||
BUILD_TEST
|
||||
"If build unit tests using googletest"
|
||||
ON
|
||||
)
|
||||
ENDIF ()
|
||||
|
||||
option(
|
||||
|
@ -54,12 +66,6 @@ option(
|
|||
OFF
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_TEST
|
||||
"If build unit tests using googletest"
|
||||
ON
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_WITH_LEVELDB
|
||||
"If build with leveldb"
|
||||
|
|
|
@ -1648,6 +1648,15 @@ typedef struct {
|
|||
int32_t tSerializeSMDropTopicReq(void* buf, int32_t bufLen, SMDropTopicReq* pReq);
|
||||
int32_t tDeserializeSMDropTopicReq(void* buf, int32_t bufLen, SMDropTopicReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
char topic[TSDB_TOPIC_FNAME_LEN];
|
||||
char cgroup[TSDB_CGROUP_LEN];
|
||||
int8_t igNotExists;
|
||||
} SMDropCgroupReq;
|
||||
|
||||
int32_t tSerializeSMDropCgroupReq(void* buf, int32_t bufLen, SMDropCgroupReq* pReq);
|
||||
int32_t tDeserializeSMDropCgroupReq(void* buf, int32_t bufLen, SMDropCgroupReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
char name[TSDB_TABLE_FNAME_LEN];
|
||||
int8_t alterType;
|
||||
|
|
|
@ -236,6 +236,7 @@ typedef struct SSelectStmt {
|
|||
bool isTimeOrderQuery;
|
||||
bool hasAggFuncs;
|
||||
bool hasRepeatScanFuncs;
|
||||
bool hasNonstdSQLFunc;
|
||||
} SSelectStmt;
|
||||
|
||||
typedef enum ESetOperatorType { SET_OP_TYPE_UNION_ALL = 1, SET_OP_TYPE_UNION } ESetOperatorType;
|
||||
|
@ -350,9 +351,6 @@ bool nodesIsComparisonOp(const SOperatorNode* pOp);
|
|||
bool nodesIsJsonOp(const SOperatorNode* pOp);
|
||||
bool nodesIsRegularOp(const SOperatorNode* pOp);
|
||||
|
||||
bool nodesIsTimeorderQuery(const SNode* pQuery);
|
||||
bool nodesIsTimelineQuery(const SNode* pQuery);
|
||||
|
||||
void* nodesGetValueFromNode(SValueNode* pNode);
|
||||
int32_t nodesSetValueNodeValue(SValueNode* pNode, void* value);
|
||||
char* nodesGetStrValueFromNode(SValueNode* pNode);
|
||||
|
|
|
@ -107,6 +107,7 @@ static FORCE_INLINE void streamDataSubmitRefDec(SStreamDataSubmit* pDataSubmit)
|
|||
if (ref == 0) {
|
||||
taosMemoryFree(pDataSubmit->data);
|
||||
taosMemoryFree(pDataSubmit->dataRef);
|
||||
taosFreeQitem(pDataSubmit);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -279,6 +280,12 @@ typedef struct {
|
|||
SArray* res; // SArray<SSDataBlock>
|
||||
} SStreamSinkReq;
|
||||
|
||||
typedef struct {
|
||||
SMsgHead head;
|
||||
int64_t streamId;
|
||||
int32_t taskId;
|
||||
} SStreamTaskRunReq;
|
||||
|
||||
int32_t streamEnqueueDataSubmit(SStreamTask* pTask, SStreamDataSubmit* input);
|
||||
int32_t streamEnqueueDataBlk(SStreamTask* pTask, SStreamDataBlock* input);
|
||||
int32_t streamDequeueOutput(SStreamTask* pTask, void** output);
|
||||
|
|
|
@ -20,20 +20,23 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
//#include <tdatablock.h>
|
||||
#include "cJSON.h"
|
||||
#include "tdef.h"
|
||||
//#include "taosdef.h"
|
||||
//#include "trpc.h"
|
||||
//#include "wal.h"
|
||||
#include "tmsgcb.h"
|
||||
|
||||
#define SYNC_INDEX_BEGIN 0
|
||||
#define SYNC_INDEX_INVALID -1
|
||||
|
||||
typedef uint64_t SyncNodeId;
|
||||
typedef int32_t SyncGroupId;
|
||||
typedef int64_t SyncIndex;
|
||||
typedef uint64_t SyncTerm;
|
||||
|
||||
typedef struct SSyncNode SSyncNode;
|
||||
typedef struct SSyncBuffer SSyncBuffer;
|
||||
typedef struct SWal SWal;
|
||||
typedef struct SSyncRaftEntry SSyncRaftEntry;
|
||||
|
||||
typedef enum {
|
||||
TAOS_SYNC_STATE_FOLLOWER = 100,
|
||||
TAOS_SYNC_STATE_CANDIDATE = 101,
|
||||
|
@ -41,6 +44,17 @@ typedef enum {
|
|||
TAOS_SYNC_STATE_ERROR = 103,
|
||||
} ESyncState;
|
||||
|
||||
typedef enum {
|
||||
TAOS_SYNC_PROPOSE_SUCCESS = 0,
|
||||
TAOS_SYNC_PROPOSE_NOT_LEADER = 1,
|
||||
TAOS_SYNC_PROPOSE_OTHER_ERROR = 2,
|
||||
} ESyncProposeCode;
|
||||
|
||||
typedef enum {
|
||||
TAOS_SYNC_FSM_CB_SUCCESS = 0,
|
||||
TAOS_SYNC_FSM_CB_OTHER_ERROR = 1,
|
||||
} ESyncFsmCbCode;
|
||||
|
||||
typedef struct SNodeInfo {
|
||||
uint16_t nodePort;
|
||||
char nodeFqdn[TSDB_FQDN_LEN];
|
||||
|
@ -58,11 +72,6 @@ typedef struct SSnapshot {
|
|||
SyncTerm lastApplyTerm;
|
||||
} SSnapshot;
|
||||
|
||||
typedef enum {
|
||||
TAOS_SYNC_FSM_CB_SUCCESS = 0,
|
||||
TAOS_SYNC_FSM_CB_OTHER_ERROR,
|
||||
} ESyncFsmCbCode;
|
||||
|
||||
typedef struct SFsmCbMeta {
|
||||
SyncIndex index;
|
||||
bool isWeak;
|
||||
|
@ -71,27 +80,15 @@ typedef struct SFsmCbMeta {
|
|||
uint64_t seqNum;
|
||||
} SFsmCbMeta;
|
||||
|
||||
struct SRpcMsg;
|
||||
typedef struct SRpcMsg SRpcMsg;
|
||||
|
||||
typedef struct SSyncFSM {
|
||||
void* data;
|
||||
|
||||
void (*FpCommitCb)(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta);
|
||||
void (*FpPreCommitCb)(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta);
|
||||
void (*FpRollBackCb)(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta);
|
||||
|
||||
int32_t (*FpGetSnapshot)(struct SSyncFSM* pFsm, SSnapshot* pSnapshot);
|
||||
int32_t (*FpRestoreSnapshot)(struct SSyncFSM* pFsm, const SSnapshot* snapshot);
|
||||
|
||||
} SSyncFSM;
|
||||
|
||||
struct SSyncRaftEntry;
|
||||
typedef struct SSyncRaftEntry SSyncRaftEntry;
|
||||
|
||||
#define SYNC_INDEX_BEGIN 0
|
||||
#define SYNC_INDEX_INVALID -1
|
||||
|
||||
// abstract definition of log store in raft
|
||||
// SWal implements it
|
||||
typedef struct SSyncLogStore {
|
||||
|
@ -120,11 +117,6 @@ typedef struct SSyncLogStore {
|
|||
|
||||
} SSyncLogStore;
|
||||
|
||||
struct SWal;
|
||||
typedef struct SWal SWal;
|
||||
|
||||
struct SEpSet;
|
||||
typedef struct SEpSet SEpSet;
|
||||
|
||||
typedef struct SSyncInfo {
|
||||
SyncGroupId vgId;
|
||||
|
@ -132,12 +124,9 @@ typedef struct SSyncInfo {
|
|||
char path[TSDB_FILENAME_LEN];
|
||||
SWal* pWal;
|
||||
SSyncFSM* pFsm;
|
||||
|
||||
void* rpcClient;
|
||||
int32_t (*FpSendMsg)(void* rpcClient, const SEpSet* pEpSet, SRpcMsg* pMsg);
|
||||
void* queue;
|
||||
int32_t (*FpEqMsg)(void* queue, SRpcMsg* pMsg);
|
||||
|
||||
SMsgCb* msgcb;
|
||||
int32_t (*FpSendMsg)(const SEpSet* pEpSet, SRpcMsg* pMsg);
|
||||
int32_t (*FpEqMsg)(const SMsgCb* msgcb, SRpcMsg* pMsg);
|
||||
} SSyncInfo;
|
||||
|
||||
int32_t syncInit();
|
||||
|
@ -152,27 +141,8 @@ const char* syncGetMyRoleStr(int64_t rid);
|
|||
SyncTerm syncGetMyTerm(int64_t rid);
|
||||
void syncGetEpSet(int64_t rid, SEpSet* pEpSet);
|
||||
int32_t syncGetVgId(int64_t rid);
|
||||
|
||||
typedef enum {
|
||||
TAOS_SYNC_PROPOSE_SUCCESS = 0,
|
||||
TAOS_SYNC_PROPOSE_NOT_LEADER,
|
||||
TAOS_SYNC_PROPOSE_OTHER_ERROR,
|
||||
} ESyncProposeCode;
|
||||
|
||||
int32_t syncPropose(int64_t rid, const SRpcMsg* pMsg, bool isWeak);
|
||||
|
||||
bool syncEnvIsStart();
|
||||
|
||||
extern int32_t sDebugFlag;
|
||||
|
||||
//-----------------------------------------
|
||||
struct SSyncNode;
|
||||
typedef struct SSyncNode SSyncNode;
|
||||
|
||||
struct SSyncBuffer;
|
||||
typedef struct SSyncBuffer SSyncBuffer;
|
||||
//-----------------------------------------
|
||||
|
||||
int32_t syncPropose(int64_t rid, const SRpcMsg* pMsg, bool isWeak);
|
||||
bool syncEnvIsStart();
|
||||
const char* syncStr(ESyncState state);
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -20,13 +20,7 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
//#include <tdatablock.h>
|
||||
#include "cJSON.h"
|
||||
//#include "taosdef.h"
|
||||
#include "trpc.h"
|
||||
//#include "wal.h"
|
||||
|
||||
// ------------------ ds -------------------
|
||||
typedef struct SRaftId {
|
||||
|
@ -35,16 +29,12 @@ typedef struct SRaftId {
|
|||
} SRaftId;
|
||||
|
||||
// ------------------ control -------------------
|
||||
struct SSyncNode;
|
||||
typedef struct SSyncNode SSyncNode;
|
||||
|
||||
SSyncNode* syncNodeAcquire(int64_t rid);
|
||||
void syncNodeRelease(SSyncNode* pNode);
|
||||
|
||||
int32_t syncGetRespRpc(int64_t rid, uint64_t index, SRpcMsg* msg);
|
||||
int32_t syncGetAndDelRespRpc(int64_t rid, uint64_t index, SRpcMsg* msg);
|
||||
void syncSetQ(int64_t rid, void* queueHandle);
|
||||
void syncSetRpc(int64_t rid, void* rpcHandle);
|
||||
void syncSetMsgCb(int64_t rid, const SMsgCb* msgcb);
|
||||
char* sync2SimpleStr(int64_t rid);
|
||||
|
||||
// set timer ms
|
||||
|
|
|
@ -38,7 +38,7 @@ typedef struct {
|
|||
|
||||
typedef struct SRpcHandleInfo {
|
||||
// rpc info
|
||||
void *handle; // rpc handle returned to app
|
||||
void * handle; // rpc handle returned to app
|
||||
int64_t refId; // refid, used by server
|
||||
int32_t noResp; // has response or not(default 0, 0: resp, 1: no resp);
|
||||
int32_t persistHandle; // persist handle or not
|
||||
|
@ -49,13 +49,13 @@ typedef struct SRpcHandleInfo {
|
|||
void *node; // node mgmt handle
|
||||
|
||||
// resp info
|
||||
void *rsp;
|
||||
void * rsp;
|
||||
int32_t rspLen;
|
||||
} SRpcHandleInfo;
|
||||
|
||||
typedef struct SRpcMsg {
|
||||
tmsg_t msgType;
|
||||
void *pCont;
|
||||
void * pCont;
|
||||
int32_t contLen;
|
||||
int32_t code;
|
||||
SRpcHandleInfo info;
|
||||
|
@ -63,11 +63,6 @@ typedef struct SRpcMsg {
|
|||
} SRpcMsg;
|
||||
|
||||
typedef void (*RpcCfp)(void *parent, SRpcMsg *, SEpSet *rf);
|
||||
typedef int (*RpcAfp)(void *parent, char *tableId, char *spi, char *encrypt, char *secret, char *ckey);
|
||||
///
|
||||
// // SRpcMsg code
|
||||
// REDIERE,
|
||||
// NOT READY, EpSet
|
||||
typedef bool (*RpcRfp)(int32_t code);
|
||||
|
||||
typedef struct SRpcInit {
|
||||
|
@ -80,18 +75,11 @@ typedef struct SRpcInit {
|
|||
int idleTime; // milliseconds, 0 means idle timer is disabled
|
||||
|
||||
// the following is for client app ecurity only
|
||||
char *user; // user name
|
||||
char spi; // security parameter index
|
||||
char encrypt; // encrypt algorithm
|
||||
char *secret; // key for authentication
|
||||
char *ckey; // ciphering key
|
||||
char *user; // user name
|
||||
|
||||
// call back to process incoming msg, code shall be ignored by server app
|
||||
RpcCfp cfp;
|
||||
|
||||
// call back to retrieve the client auth info, for server app only
|
||||
RpcAfp afp;
|
||||
|
||||
// user defined retry func
|
||||
RpcRfp rfp;
|
||||
|
||||
|
|
|
@ -649,6 +649,8 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_PAR_INVALID_TBNAME TAOS_DEF_ERROR_CODE(0, 0x264C)
|
||||
#define TSDB_CODE_PAR_INVALID_FUNCTION_NAME TAOS_DEF_ERROR_CODE(0, 0x264D)
|
||||
#define TSDB_CODE_PAR_COMMENT_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x264E)
|
||||
#define TSDB_CODE_PAR_NOT_ALLOWED_FUNC TAOS_DEF_ERROR_CODE(0, 0x264F)
|
||||
#define TSDB_CODE_PAR_NOT_ALLOWED_WIN_QUERY TAOS_DEF_ERROR_CODE(0, 0x2650)
|
||||
|
||||
//planner
|
||||
#define TSDB_CODE_PLAN_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x2700)
|
||||
|
|
|
@ -1,22 +1,22 @@
|
|||
[Unit]
|
||||
Description=Nginx For TDengine Service
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=forking
|
||||
PIDFile=/usr/local/nginxd/logs/nginx.pid
|
||||
ExecStart=/usr/local/nginxd/sbin/nginx
|
||||
ExecStop=/usr/local/nginxd/sbin/nginx -s stop
|
||||
TimeoutStopSec=1000000s
|
||||
LimitNOFILE=infinity
|
||||
LimitNPROC=infinity
|
||||
LimitCORE=infinity
|
||||
TimeoutStartSec=0
|
||||
StandardOutput=null
|
||||
Restart=always
|
||||
StartLimitBurst=3
|
||||
StartLimitInterval=60s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
[Unit]
|
||||
Description=Nginx For TDengine Service
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=forking
|
||||
PIDFile=/usr/local/nginxd/logs/nginx.pid
|
||||
ExecStart=/usr/local/nginxd/sbin/nginx
|
||||
ExecStop=/usr/local/nginxd/sbin/nginx -s stop
|
||||
TimeoutStopSec=1000000s
|
||||
LimitNOFILE=infinity
|
||||
LimitNPROC=infinity
|
||||
LimitCORE=infinity
|
||||
TimeoutStartSec=0
|
||||
StandardOutput=null
|
||||
Restart=always
|
||||
StartLimitBurst=3
|
||||
StartLimitInterval=60s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
|
|
@ -1,312 +1,312 @@
|
|||
########################################################
|
||||
# #
|
||||
# TDengine Configuration #
|
||||
# Any questions, please email support@taosdata.com #
|
||||
# #
|
||||
########################################################
|
||||
|
||||
# first fully qualified domain name (FQDN) for TDengine system
|
||||
# firstEp hostname:6030
|
||||
|
||||
# local fully qualified domain name (FQDN)
|
||||
# fqdn hostname
|
||||
|
||||
# first port number for the connection (12 continuous UDP/TCP port number are used)
|
||||
# serverPort 6030
|
||||
|
||||
# log file's directory
|
||||
# logDir /var/log/taos
|
||||
|
||||
# data file's directory
|
||||
# dataDir /var/lib/taos
|
||||
|
||||
# temporary file's directory
|
||||
# tempDir /tmp/
|
||||
|
||||
# the arbitrator's fully qualified domain name (FQDN) for TDengine system, for cluster only
|
||||
# arbitrator arbitrator_hostname:6042
|
||||
|
||||
# number of threads per CPU core
|
||||
# numOfThreadsPerCore 1.0
|
||||
|
||||
# number of threads to commit cache data
|
||||
# numOfCommitThreads 4
|
||||
|
||||
# the proportion of total CPU cores available for query processing
|
||||
# 2.0: the query threads will be set to double of the CPU cores.
|
||||
# 1.0: all CPU cores are available for query processing [default].
|
||||
# 0.5: only half of the CPU cores are available for query.
|
||||
# 0.0: only one core available.
|
||||
# ratioOfQueryCores 1.0
|
||||
|
||||
# the last_row/first/last aggregator will not change the original column name in the result fields
|
||||
keepColumnName 1
|
||||
|
||||
# number of management nodes in the system
|
||||
# numOfMnodes 1
|
||||
|
||||
# enable/disable backuping vnode directory when removing vnode
|
||||
# vnodeBak 1
|
||||
|
||||
# enable/disable installation / usage report
|
||||
# telemetryReporting 1
|
||||
|
||||
# enable/disable load balancing
|
||||
# balance 1
|
||||
|
||||
# role for dnode. 0 - any, 1 - mnode, 2 - dnode
|
||||
# role 0
|
||||
|
||||
# max timer control blocks
|
||||
# maxTmrCtrl 512
|
||||
|
||||
# time interval of system monitor, seconds
|
||||
# monitorInterval 30
|
||||
|
||||
# number of seconds allowed for a dnode to be offline, for cluster only
|
||||
# offlineThreshold 864000
|
||||
|
||||
# RPC re-try timer, millisecond
|
||||
# rpcTimer 300
|
||||
|
||||
# RPC maximum time for ack, seconds.
|
||||
# rpcMaxTime 600
|
||||
|
||||
# time interval of dnode status reporting to mnode, seconds, for cluster only
|
||||
# statusInterval 1
|
||||
|
||||
# time interval of heart beat from shell to dnode, seconds
|
||||
# shellActivityTimer 3
|
||||
|
||||
# minimum sliding window time, milli-second
|
||||
# minSlidingTime 10
|
||||
|
||||
# minimum time window, milli-second
|
||||
# minIntervalTime 10
|
||||
|
||||
# maximum delay before launching a stream computation, milli-second
|
||||
# maxStreamCompDelay 20000
|
||||
|
||||
# maximum delay before launching a stream computation for the first time, milli-second
|
||||
# maxFirstStreamCompDelay 10000
|
||||
|
||||
# retry delay when a stream computation fails, milli-second
|
||||
# retryStreamCompDelay 10
|
||||
|
||||
# the delayed time for launching a stream computation, from 0.1(default, 10% of whole computing time window) to 0.9
|
||||
# streamCompDelayRatio 0.1
|
||||
|
||||
# max number of vgroups per db, 0 means configured automatically
|
||||
# maxVgroupsPerDb 0
|
||||
|
||||
# max number of tables per vnode
|
||||
# maxTablesPerVnode 1000000
|
||||
|
||||
# cache block size (Mbyte)
|
||||
# cache 16
|
||||
|
||||
# number of cache blocks per vnode
|
||||
# blocks 6
|
||||
|
||||
# number of days per DB file
|
||||
# days 10
|
||||
|
||||
# number of days to keep DB file
|
||||
# keep 3650
|
||||
|
||||
# minimum rows of records in file block
|
||||
# minRows 100
|
||||
|
||||
# maximum rows of records in file block
|
||||
# maxRows 4096
|
||||
|
||||
# the number of acknowledgments required for successful data writing
|
||||
# quorum 1
|
||||
|
||||
# enable/disable compression
|
||||
# comp 2
|
||||
|
||||
# write ahead log (WAL) level, 0: no wal; 1: write wal, but no fysnc; 2: write wal, and call fsync
|
||||
# walLevel 1
|
||||
|
||||
# if walLevel is set to 2, the cycle of fsync being executed, if set to 0, fsync is called right away
|
||||
# fsync 3000
|
||||
|
||||
# number of replications, for cluster only
|
||||
# replica 1
|
||||
|
||||
# the compressed rpc message, option:
|
||||
# -1 (no compression)
|
||||
# 0 (all message compressed),
|
||||
# > 0 (rpc message body which larger than this value will be compressed)
|
||||
# compressMsgSize -1
|
||||
|
||||
# query retrieved column data compression option:
|
||||
# -1 (no compression)
|
||||
# 0 (all retrieved column data compressed),
|
||||
# > 0 (any retrieved column size greater than this value all data will be compressed.)
|
||||
# compressColData -1
|
||||
|
||||
# max length of an SQL
|
||||
# maxSQLLength 65480
|
||||
|
||||
# max length of WildCards
|
||||
# maxWildCardsLength 100
|
||||
|
||||
# the maximum number of records allowed for super table time sorting
|
||||
# maxNumOfOrderedRes 100000
|
||||
|
||||
# system time zone
|
||||
# timezone Asia/Shanghai (CST, +0800)
|
||||
# system time zone (for windows 10)
|
||||
# timezone UTC-8
|
||||
|
||||
# system locale
|
||||
# locale en_US.UTF-8
|
||||
|
||||
# default system charset
|
||||
# charset UTF-8
|
||||
|
||||
# max number of connections allowed in dnode
|
||||
# maxShellConns 5000
|
||||
|
||||
# max number of connections allowed in client
|
||||
# maxConnections 5000
|
||||
|
||||
# stop writing logs when the disk size of the log folder is less than this value
|
||||
# minimalLogDirGB 1.0
|
||||
|
||||
# stop writing temporary files when the disk size of the tmp folder is less than this value
|
||||
# minimalTmpDirGB 1.0
|
||||
|
||||
# if disk free space is less than this value, taosd service exit directly within startup process
|
||||
# minimalDataDirGB 2.0
|
||||
|
||||
# One mnode is equal to the number of vnode consumed
|
||||
# mnodeEqualVnodeNum 4
|
||||
|
||||
# enbale/disable http service
|
||||
# http 1
|
||||
|
||||
# enable/disable system monitor
|
||||
# monitor 1
|
||||
|
||||
# enable/disable recording the SQL statements via restful interface
|
||||
# httpEnableRecordSql 0
|
||||
|
||||
# number of threads used to process http requests
|
||||
# httpMaxThreads 2
|
||||
|
||||
# maximum number of rows returned by the restful interface
|
||||
# restfulRowLimit 10240
|
||||
|
||||
# database name must be specified in restful interface if the following parameter is set, off by default
|
||||
# httpDbNameMandatory 1
|
||||
|
||||
# http keep alive, default is 30 seconds
|
||||
# httpKeepAlive 30000
|
||||
|
||||
# The following parameter is used to limit the maximum number of lines in log files.
|
||||
# max number of lines per log filters
|
||||
# numOfLogLines 10000000
|
||||
|
||||
# enable/disable async log
|
||||
# asyncLog 1
|
||||
|
||||
# time of keeping log files, days
|
||||
# logKeepDays 0
|
||||
|
||||
|
||||
# The following parameters are used for debug purpose only.
|
||||
# debugFlag 8 bits mask: FILE-SCREEN-UNUSED-HeartBeat-DUMP-TRACE_WARN-ERROR
|
||||
# 131: output warning and error
|
||||
# 135: output debug, warning and error
|
||||
# 143: output trace, debug, warning and error to log
|
||||
# 199: output debug, warning and error to both screen and file
|
||||
# 207: output trace, debug, warning and error to both screen and file
|
||||
|
||||
# debug flag for all log type, take effect when non-zero value
|
||||
# debugFlag 0
|
||||
|
||||
# debug flag for meta management messages
|
||||
# mDebugFlag 135
|
||||
|
||||
# debug flag for dnode messages
|
||||
# dDebugFlag 135
|
||||
|
||||
# debug flag for sync module
|
||||
# sDebugFlag 135
|
||||
|
||||
# debug flag for WAL
|
||||
# wDebugFlag 135
|
||||
|
||||
# debug flag for SDB
|
||||
# sdbDebugFlag 135
|
||||
|
||||
# debug flag for RPC
|
||||
# rpcDebugFlag 131
|
||||
|
||||
# debug flag for TAOS TIMER
|
||||
# tmrDebugFlag 131
|
||||
|
||||
# debug flag for TDengine client
|
||||
# cDebugFlag 131
|
||||
|
||||
# debug flag for JNI
|
||||
# jniDebugFlag 131
|
||||
|
||||
# debug flag for storage
|
||||
# uDebugFlag 131
|
||||
|
||||
# debug flag for http server
|
||||
# httpDebugFlag 131
|
||||
|
||||
# debug flag for monitor
|
||||
# monDebugFlag 131
|
||||
|
||||
# debug flag for query
|
||||
# qDebugFlag 131
|
||||
|
||||
# debug flag for vnode
|
||||
# vDebugFlag 131
|
||||
|
||||
# debug flag for TSDB
|
||||
# tsdbDebugFlag 131
|
||||
|
||||
# debug flag for continue query
|
||||
# cqDebugFlag 131
|
||||
|
||||
# enable/disable recording the SQL in taos client
|
||||
# enableRecordSql 0
|
||||
|
||||
# generate core file when service crash
|
||||
# enableCoreFile 1
|
||||
|
||||
# maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden
|
||||
# maxBinaryDisplayWidth 30
|
||||
|
||||
# enable/disable stream (continuous query)
|
||||
# stream 1
|
||||
|
||||
# in retrieve blocking model, only in 50% query threads will be used in query processing in dnode
|
||||
# retrieveBlockingModel 0
|
||||
|
||||
# the maximum allowed query buffer size in MB during query processing for each data node
|
||||
# -1 no limit (default)
|
||||
# 0 no query allowed, queries are disabled
|
||||
# queryBufferSize -1
|
||||
|
||||
# percent of redundant data in tsdb meta will compact meta data,0 means donot compact
|
||||
# tsdbMetaCompactRatio 0
|
||||
|
||||
# default string type used for storing JSON String, options can be binary/nchar, default is nchar
|
||||
# defaultJSONStrType nchar
|
||||
|
||||
# force TCP transmission
|
||||
# rpcForceTcp 0
|
||||
|
||||
# unit MB. Flush vnode wal file if walSize > walFlushSize and walSize > cache*0.5*blocks
|
||||
# walFlushSize 1024
|
||||
|
||||
# unit Hour. Latency of data migration
|
||||
# keepTimeOffset 0
|
||||
########################################################
|
||||
# #
|
||||
# TDengine Configuration #
|
||||
# Any questions, please email support@taosdata.com #
|
||||
# #
|
||||
########################################################
|
||||
|
||||
# first fully qualified domain name (FQDN) for TDengine system
|
||||
# firstEp hostname:6030
|
||||
|
||||
# local fully qualified domain name (FQDN)
|
||||
# fqdn hostname
|
||||
|
||||
# first port number for the connection (12 continuous UDP/TCP port number are used)
|
||||
# serverPort 6030
|
||||
|
||||
# log file's directory
|
||||
# logDir /var/log/taos
|
||||
|
||||
# data file's directory
|
||||
# dataDir /var/lib/taos
|
||||
|
||||
# temporary file's directory
|
||||
# tempDir /tmp/
|
||||
|
||||
# the arbitrator's fully qualified domain name (FQDN) for TDengine system, for cluster only
|
||||
# arbitrator arbitrator_hostname:6042
|
||||
|
||||
# number of threads per CPU core
|
||||
# numOfThreadsPerCore 1.0
|
||||
|
||||
# number of threads to commit cache data
|
||||
# numOfCommitThreads 4
|
||||
|
||||
# the proportion of total CPU cores available for query processing
|
||||
# 2.0: the query threads will be set to double of the CPU cores.
|
||||
# 1.0: all CPU cores are available for query processing [default].
|
||||
# 0.5: only half of the CPU cores are available for query.
|
||||
# 0.0: only one core available.
|
||||
# ratioOfQueryCores 1.0
|
||||
|
||||
# the last_row/first/last aggregator will not change the original column name in the result fields
|
||||
keepColumnName 1
|
||||
|
||||
# number of management nodes in the system
|
||||
# numOfMnodes 1
|
||||
|
||||
# enable/disable backuping vnode directory when removing vnode
|
||||
# vnodeBak 1
|
||||
|
||||
# enable/disable installation / usage report
|
||||
# telemetryReporting 1
|
||||
|
||||
# enable/disable load balancing
|
||||
# balance 1
|
||||
|
||||
# role for dnode. 0 - any, 1 - mnode, 2 - dnode
|
||||
# role 0
|
||||
|
||||
# max timer control blocks
|
||||
# maxTmrCtrl 512
|
||||
|
||||
# time interval of system monitor, seconds
|
||||
# monitorInterval 30
|
||||
|
||||
# number of seconds allowed for a dnode to be offline, for cluster only
|
||||
# offlineThreshold 864000
|
||||
|
||||
# RPC re-try timer, millisecond
|
||||
# rpcTimer 300
|
||||
|
||||
# RPC maximum time for ack, seconds.
|
||||
# rpcMaxTime 600
|
||||
|
||||
# time interval of dnode status reporting to mnode, seconds, for cluster only
|
||||
# statusInterval 1
|
||||
|
||||
# time interval of heart beat from shell to dnode, seconds
|
||||
# shellActivityTimer 3
|
||||
|
||||
# minimum sliding window time, milli-second
|
||||
# minSlidingTime 10
|
||||
|
||||
# minimum time window, milli-second
|
||||
# minIntervalTime 10
|
||||
|
||||
# maximum delay before launching a stream computation, milli-second
|
||||
# maxStreamCompDelay 20000
|
||||
|
||||
# maximum delay before launching a stream computation for the first time, milli-second
|
||||
# maxFirstStreamCompDelay 10000
|
||||
|
||||
# retry delay when a stream computation fails, milli-second
|
||||
# retryStreamCompDelay 10
|
||||
|
||||
# the delayed time for launching a stream computation, from 0.1(default, 10% of whole computing time window) to 0.9
|
||||
# streamCompDelayRatio 0.1
|
||||
|
||||
# max number of vgroups per db, 0 means configured automatically
|
||||
# maxVgroupsPerDb 0
|
||||
|
||||
# max number of tables per vnode
|
||||
# maxTablesPerVnode 1000000
|
||||
|
||||
# cache block size (Mbyte)
|
||||
# cache 16
|
||||
|
||||
# number of cache blocks per vnode
|
||||
# blocks 6
|
||||
|
||||
# number of days per DB file
|
||||
# days 10
|
||||
|
||||
# number of days to keep DB file
|
||||
# keep 3650
|
||||
|
||||
# minimum rows of records in file block
|
||||
# minRows 100
|
||||
|
||||
# maximum rows of records in file block
|
||||
# maxRows 4096
|
||||
|
||||
# the number of acknowledgments required for successful data writing
|
||||
# quorum 1
|
||||
|
||||
# enable/disable compression
|
||||
# comp 2
|
||||
|
||||
# write ahead log (WAL) level, 0: no wal; 1: write wal, but no fysnc; 2: write wal, and call fsync
|
||||
# walLevel 1
|
||||
|
||||
# if walLevel is set to 2, the cycle of fsync being executed, if set to 0, fsync is called right away
|
||||
# fsync 3000
|
||||
|
||||
# number of replications, for cluster only
|
||||
# replica 1
|
||||
|
||||
# the compressed rpc message, option:
|
||||
# -1 (no compression)
|
||||
# 0 (all message compressed),
|
||||
# > 0 (rpc message body which larger than this value will be compressed)
|
||||
# compressMsgSize -1
|
||||
|
||||
# query retrieved column data compression option:
|
||||
# -1 (no compression)
|
||||
# 0 (all retrieved column data compressed),
|
||||
# > 0 (any retrieved column size greater than this value all data will be compressed.)
|
||||
# compressColData -1
|
||||
|
||||
# max length of an SQL
|
||||
# maxSQLLength 65480
|
||||
|
||||
# max length of WildCards
|
||||
# maxWildCardsLength 100
|
||||
|
||||
# the maximum number of records allowed for super table time sorting
|
||||
# maxNumOfOrderedRes 100000
|
||||
|
||||
# system time zone
|
||||
# timezone Asia/Shanghai (CST, +0800)
|
||||
# system time zone (for windows 10)
|
||||
# timezone UTC-8
|
||||
|
||||
# system locale
|
||||
# locale en_US.UTF-8
|
||||
|
||||
# default system charset
|
||||
# charset UTF-8
|
||||
|
||||
# max number of connections allowed in dnode
|
||||
# maxShellConns 5000
|
||||
|
||||
# max number of connections allowed in client
|
||||
# maxConnections 5000
|
||||
|
||||
# stop writing logs when the disk size of the log folder is less than this value
|
||||
# minimalLogDirGB 1.0
|
||||
|
||||
# stop writing temporary files when the disk size of the tmp folder is less than this value
|
||||
# minimalTmpDirGB 1.0
|
||||
|
||||
# if disk free space is less than this value, taosd service exit directly within startup process
|
||||
# minimalDataDirGB 2.0
|
||||
|
||||
# One mnode is equal to the number of vnode consumed
|
||||
# mnodeEqualVnodeNum 4
|
||||
|
||||
# enbale/disable http service
|
||||
# http 1
|
||||
|
||||
# enable/disable system monitor
|
||||
# monitor 1
|
||||
|
||||
# enable/disable recording the SQL statements via restful interface
|
||||
# httpEnableRecordSql 0
|
||||
|
||||
# number of threads used to process http requests
|
||||
# httpMaxThreads 2
|
||||
|
||||
# maximum number of rows returned by the restful interface
|
||||
# restfulRowLimit 10240
|
||||
|
||||
# database name must be specified in restful interface if the following parameter is set, off by default
|
||||
# httpDbNameMandatory 1
|
||||
|
||||
# http keep alive, default is 30 seconds
|
||||
# httpKeepAlive 30000
|
||||
|
||||
# The following parameter is used to limit the maximum number of lines in log files.
|
||||
# max number of lines per log filters
|
||||
# numOfLogLines 10000000
|
||||
|
||||
# enable/disable async log
|
||||
# asyncLog 1
|
||||
|
||||
# time of keeping log files, days
|
||||
# logKeepDays 0
|
||||
|
||||
|
||||
# The following parameters are used for debug purpose only.
|
||||
# debugFlag 8 bits mask: FILE-SCREEN-UNUSED-HeartBeat-DUMP-TRACE_WARN-ERROR
|
||||
# 131: output warning and error
|
||||
# 135: output debug, warning and error
|
||||
# 143: output trace, debug, warning and error to log
|
||||
# 199: output debug, warning and error to both screen and file
|
||||
# 207: output trace, debug, warning and error to both screen and file
|
||||
|
||||
# debug flag for all log type, take effect when non-zero value
|
||||
# debugFlag 0
|
||||
|
||||
# debug flag for meta management messages
|
||||
# mDebugFlag 135
|
||||
|
||||
# debug flag for dnode messages
|
||||
# dDebugFlag 135
|
||||
|
||||
# debug flag for sync module
|
||||
# sDebugFlag 135
|
||||
|
||||
# debug flag for WAL
|
||||
# wDebugFlag 135
|
||||
|
||||
# debug flag for SDB
|
||||
# sdbDebugFlag 135
|
||||
|
||||
# debug flag for RPC
|
||||
# rpcDebugFlag 131
|
||||
|
||||
# debug flag for TAOS TIMER
|
||||
# tmrDebugFlag 131
|
||||
|
||||
# debug flag for TDengine client
|
||||
# cDebugFlag 131
|
||||
|
||||
# debug flag for JNI
|
||||
# jniDebugFlag 131
|
||||
|
||||
# debug flag for storage
|
||||
# uDebugFlag 131
|
||||
|
||||
# debug flag for http server
|
||||
# httpDebugFlag 131
|
||||
|
||||
# debug flag for monitor
|
||||
# monDebugFlag 131
|
||||
|
||||
# debug flag for query
|
||||
# qDebugFlag 131
|
||||
|
||||
# debug flag for vnode
|
||||
# vDebugFlag 131
|
||||
|
||||
# debug flag for TSDB
|
||||
# tsdbDebugFlag 131
|
||||
|
||||
# debug flag for continue query
|
||||
# cqDebugFlag 131
|
||||
|
||||
# enable/disable recording the SQL in taos client
|
||||
# enableRecordSql 0
|
||||
|
||||
# generate core file when service crash
|
||||
# enableCoreFile 1
|
||||
|
||||
# maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden
|
||||
# maxBinaryDisplayWidth 30
|
||||
|
||||
# enable/disable stream (continuous query)
|
||||
# stream 1
|
||||
|
||||
# in retrieve blocking model, only in 50% query threads will be used in query processing in dnode
|
||||
# retrieveBlockingModel 0
|
||||
|
||||
# the maximum allowed query buffer size in MB during query processing for each data node
|
||||
# -1 no limit (default)
|
||||
# 0 no query allowed, queries are disabled
|
||||
# queryBufferSize -1
|
||||
|
||||
# percent of redundant data in tsdb meta will compact meta data,0 means donot compact
|
||||
# tsdbMetaCompactRatio 0
|
||||
|
||||
# default string type used for storing JSON String, options can be binary/nchar, default is nchar
|
||||
# defaultJSONStrType nchar
|
||||
|
||||
# force TCP transmission
|
||||
# rpcForceTcp 0
|
||||
|
||||
# unit MB. Flush vnode wal file if walSize > walFlushSize and walSize > cache*0.5*blocks
|
||||
# walFlushSize 1024
|
||||
|
||||
# unit Hour. Latency of data migration
|
||||
# keepTimeOffset 0
|
||||
|
|
|
@ -1,21 +1,21 @@
|
|||
[Unit]
|
||||
Description=TDengine server service
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart=/usr/bin/taosd
|
||||
ExecStartPre=/usr/local/taos/bin/startPre.sh
|
||||
TimeoutStopSec=1000000s
|
||||
LimitNOFILE=infinity
|
||||
LimitNPROC=infinity
|
||||
LimitCORE=infinity
|
||||
TimeoutStartSec=0
|
||||
StandardOutput=null
|
||||
Restart=always
|
||||
StartLimitBurst=3
|
||||
StartLimitInterval=60s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
[Unit]
|
||||
Description=TDengine server service
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart=/usr/bin/taosd
|
||||
ExecStartPre=/usr/local/taos/bin/startPre.sh
|
||||
TimeoutStopSec=1000000s
|
||||
LimitNOFILE=infinity
|
||||
LimitNPROC=infinity
|
||||
LimitCORE=infinity
|
||||
TimeoutStartSec=0
|
||||
StandardOutput=null
|
||||
Restart=always
|
||||
StartLimitBurst=3
|
||||
StartLimitInterval=60s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
|
|
@ -1,20 +1,20 @@
|
|||
[Unit]
|
||||
Description=TDengine arbitrator service
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart=/usr/bin/tarbitrator
|
||||
TimeoutStopSec=1000000s
|
||||
LimitNOFILE=infinity
|
||||
LimitNPROC=infinity
|
||||
LimitCORE=infinity
|
||||
TimeoutStartSec=0
|
||||
StandardOutput=null
|
||||
Restart=always
|
||||
StartLimitBurst=3
|
||||
StartLimitInterval=60s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
[Unit]
|
||||
Description=TDengine arbitrator service
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart=/usr/bin/tarbitrator
|
||||
TimeoutStopSec=1000000s
|
||||
LimitNOFILE=infinity
|
||||
LimitNPROC=infinity
|
||||
LimitCORE=infinity
|
||||
TimeoutStartSec=0
|
||||
StandardOutput=null
|
||||
Restart=always
|
||||
StartLimitBurst=3
|
||||
StartLimitInterval=60s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
|
|
@ -1,252 +1,252 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# This file is used to install database on linux systems. The operating system
|
||||
# is required to use systemd to manage services at boot
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
verMode=edge
|
||||
pagMode=full
|
||||
|
||||
iplist=""
|
||||
serverFqdn=""
|
||||
|
||||
# -----------------------Variables definition---------------------
|
||||
script_dir="../release"
|
||||
# Dynamic directory
|
||||
data_dir="/var/lib/taos"
|
||||
log_dir="/var/log/taos"
|
||||
|
||||
data_link_dir="/usr/local/taos/data"
|
||||
log_link_dir="/usr/local/taos/log"
|
||||
|
||||
cfg_install_dir="/etc/taos"
|
||||
|
||||
bin_link_dir="/usr/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
lib64_link_dir="/usr/lib64"
|
||||
inc_link_dir="/usr/include"
|
||||
|
||||
#install main path
|
||||
install_main_dir="/usr/local/taos"
|
||||
|
||||
# old bin dir
|
||||
sbin_dir="/usr/local/taos/bin"
|
||||
|
||||
temp_version=""
|
||||
fin_result=""
|
||||
|
||||
service_config_dir="/etc/systemd/system"
|
||||
nginx_port=6060
|
||||
nginx_dir="/usr/local/nginxd"
|
||||
|
||||
# Color setting
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
GREEN_DARK='\033[0;32m'
|
||||
GREEN_UNDERLINE='\033[4;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
# ============================= get input parameters =================================================
|
||||
|
||||
# install.sh -v [server | client] -e [yes | no] -i [systemd | service | ...]
|
||||
|
||||
# set parameters by default value
|
||||
interactiveFqdn=yes # [yes | no]
|
||||
verType=server # [server | client]
|
||||
initType=systemd # [systemd | service | ...]
|
||||
|
||||
while getopts "hv:d:" arg
|
||||
do
|
||||
case $arg in
|
||||
d)
|
||||
#echo "interactiveFqdn=$OPTARG"
|
||||
script_dir=$( echo $OPTARG )
|
||||
;;
|
||||
h)
|
||||
echo "Usage: `basename $0` -d scripy_path"
|
||||
exit 0
|
||||
;;
|
||||
?) #unknow option
|
||||
echo "unkonw argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
#echo "verType=${verType} interactiveFqdn=${interactiveFqdn}"
|
||||
|
||||
function kill_process() {
|
||||
pid=$(ps -ef | grep "$1" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function check_file() {
|
||||
#check file whether exists
|
||||
if [ ! -e $1/$2 ];then
|
||||
echo -e "$1/$2 \033[31mnot exists\033[0m!quit"
|
||||
fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
|
||||
echo -e $fin_result
|
||||
exit 8
|
||||
fi
|
||||
}
|
||||
|
||||
function get_package_name() {
|
||||
var=$1
|
||||
if [[ $1 =~ 'aarch' ]];then
|
||||
echo ${var::-21}
|
||||
else
|
||||
echo ${var::-17}
|
||||
fi
|
||||
}
|
||||
|
||||
function check_link() {
|
||||
#check Link whether exists or broken
|
||||
if [ -L $1 ] ; then
|
||||
if [ ! -e $1 ] ; then
|
||||
echo -e "$1 \033[31Broken link\033[0m"
|
||||
fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
|
||||
echo -e $fin_result
|
||||
exit 8
|
||||
fi
|
||||
else
|
||||
echo -e "$1 \033[31mnot exists\033[0m!quit"
|
||||
fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
|
||||
echo -e $fin_result
|
||||
exit 8
|
||||
fi
|
||||
}
|
||||
|
||||
function check_main_path() {
|
||||
#check install main dir and all sub dir
|
||||
main_dir=("" "cfg" "bin" "connector" "driver" "examples" "include" "init.d")
|
||||
for i in "${main_dir[@]}";do
|
||||
check_file ${install_main_dir} $i
|
||||
done
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
nginx_main_dir=("admin" "conf" "html" "sbin" "logs")
|
||||
for i in "${nginx_main_dir[@]}";do
|
||||
check_file ${nginx_dir} $i
|
||||
done
|
||||
fi
|
||||
echo -e "Check main path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function check_bin_path() {
|
||||
# check install bin dir and all sub dir
|
||||
bin_dir=("taos" "taosd" "taosadapter" "taosdemo" "remove.sh" "tarbitrator" "set_core.sh")
|
||||
for i in "${bin_dir[@]}";do
|
||||
check_file ${sbin_dir} $i
|
||||
done
|
||||
lbin_dir=("taos" "taosd" "taosadapter" "taosdemo" "rmtaos" "tarbitrator" "set_core")
|
||||
for i in "${lbin_dir[@]}";do
|
||||
check_link ${bin_link_dir}/$i
|
||||
done
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
check_file ${nginx_dir}/sbin nginx
|
||||
fi
|
||||
echo -e "Check bin path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function check_lib_path() {
|
||||
# check all links
|
||||
check_link ${lib_link_dir}/libtaos.so
|
||||
check_link ${lib_link_dir}/libtaos.so.1
|
||||
|
||||
if [[ -d ${lib64_link_dir} ]]; then
|
||||
check_link ${lib64_link_dir}/libtaos.so
|
||||
check_link ${lib64_link_dir}/libtaos.so.1
|
||||
fi
|
||||
echo -e "Check lib path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function check_header_path() {
|
||||
# check all header
|
||||
header_dir=("taos.h" "taosdef.h" "taoserror.h")
|
||||
for i in "${header_dir[@]}";do
|
||||
check_link ${inc_link_dir}/$i
|
||||
done
|
||||
echo -e "Check bin path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function check_taosadapter_config_dir() {
|
||||
# check all config
|
||||
check_file ${cfg_install_dir} taosadapter.toml
|
||||
check_file ${cfg_install_dir} taosadapter.service
|
||||
check_file ${install_main_dir}/cfg taosadapter.toml.org
|
||||
echo -e "Check conf path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function check_config_dir() {
|
||||
# check all config
|
||||
check_file ${cfg_install_dir} taos.cfg
|
||||
check_file ${install_main_dir}/cfg taos.cfg.org
|
||||
echo -e "Check conf path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function check_log_path() {
|
||||
# check log path
|
||||
check_file ${log_dir}
|
||||
echo -e "Check log path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function check_data_path() {
|
||||
# check data path
|
||||
check_file ${data_dir}
|
||||
echo -e "Check data path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function install_TDengine() {
|
||||
cd ${script_dir}
|
||||
tar zxf $1
|
||||
temp_version=$(get_package_name $1)
|
||||
cd $(get_package_name $1)
|
||||
echo -e "\033[32muninstall TDengine && install TDengine...\033[0m"
|
||||
rmtaos >/dev/null 2>&1 || echo 'taosd not installed' && echo -e '\n\n' |./install.sh >/dev/null 2>&1
|
||||
echo -e "\033[32mTDengine has been installed!\033[0m"
|
||||
echo -e "\033[32mTDengine is starting...\033[0m"
|
||||
kill_process taos && systemctl start taosd && sleep 10
|
||||
}
|
||||
|
||||
function test_TDengine() {
|
||||
check_main_path
|
||||
check_bin_path
|
||||
check_lib_path
|
||||
check_header_path
|
||||
check_config_dir
|
||||
check_taosadapter_config_dir
|
||||
check_log_path
|
||||
check_data_path
|
||||
result=`taos -s 'create database test ;create table test.tt(ts timestamp ,i int);insert into test.tt values(now,11);select * from test.tt' 2>&1 ||:`
|
||||
if [[ $result =~ "Unable to establish" ]];then
|
||||
echo -e "\033[31mTDengine connect failed\033[0m"
|
||||
fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
|
||||
echo -e $fin_result
|
||||
exit 8
|
||||
fi
|
||||
echo -e "Check TDengine connect:\033[32mOK\033[0m!"
|
||||
fin_result=$fin_result"\033[32m$temp_version\033[0m test OK!\n"
|
||||
}
|
||||
# ## ==============================Main program starts from here============================
|
||||
TD_package_name=`ls ${script_dir}/*server*gz |awk -F '/' '{print $NF}' `
|
||||
temp=`pwd`
|
||||
for i in $TD_package_name;do
|
||||
if [[ $i =~ 'enterprise' ]];then
|
||||
verMode="cluster"
|
||||
else
|
||||
verMode=""
|
||||
fi
|
||||
cd $temp
|
||||
install_TDengine $i
|
||||
test_TDengine
|
||||
done
|
||||
echo "============================================================"
|
||||
echo -e $fin_result
|
||||
#!/bin/bash
|
||||
#
|
||||
# This file is used to install database on linux systems. The operating system
|
||||
# is required to use systemd to manage services at boot
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
verMode=edge
|
||||
pagMode=full
|
||||
|
||||
iplist=""
|
||||
serverFqdn=""
|
||||
|
||||
# -----------------------Variables definition---------------------
|
||||
script_dir="../release"
|
||||
# Dynamic directory
|
||||
data_dir="/var/lib/taos"
|
||||
log_dir="/var/log/taos"
|
||||
|
||||
data_link_dir="/usr/local/taos/data"
|
||||
log_link_dir="/usr/local/taos/log"
|
||||
|
||||
cfg_install_dir="/etc/taos"
|
||||
|
||||
bin_link_dir="/usr/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
lib64_link_dir="/usr/lib64"
|
||||
inc_link_dir="/usr/include"
|
||||
|
||||
#install main path
|
||||
install_main_dir="/usr/local/taos"
|
||||
|
||||
# old bin dir
|
||||
sbin_dir="/usr/local/taos/bin"
|
||||
|
||||
temp_version=""
|
||||
fin_result=""
|
||||
|
||||
service_config_dir="/etc/systemd/system"
|
||||
nginx_port=6060
|
||||
nginx_dir="/usr/local/nginxd"
|
||||
|
||||
# Color setting
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
GREEN_DARK='\033[0;32m'
|
||||
GREEN_UNDERLINE='\033[4;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
# ============================= get input parameters =================================================
|
||||
|
||||
# install.sh -v [server | client] -e [yes | no] -i [systemd | service | ...]
|
||||
|
||||
# set parameters by default value
|
||||
interactiveFqdn=yes # [yes | no]
|
||||
verType=server # [server | client]
|
||||
initType=systemd # [systemd | service | ...]
|
||||
|
||||
while getopts "hv:d:" arg
|
||||
do
|
||||
case $arg in
|
||||
d)
|
||||
#echo "interactiveFqdn=$OPTARG"
|
||||
script_dir=$( echo $OPTARG )
|
||||
;;
|
||||
h)
|
||||
echo "Usage: `basename $0` -d scripy_path"
|
||||
exit 0
|
||||
;;
|
||||
?) #unknow option
|
||||
echo "unkonw argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
#echo "verType=${verType} interactiveFqdn=${interactiveFqdn}"
|
||||
|
||||
function kill_process() {
|
||||
pid=$(ps -ef | grep "$1" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function check_file() {
|
||||
#check file whether exists
|
||||
if [ ! -e $1/$2 ];then
|
||||
echo -e "$1/$2 \033[31mnot exists\033[0m!quit"
|
||||
fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
|
||||
echo -e $fin_result
|
||||
exit 8
|
||||
fi
|
||||
}
|
||||
|
||||
function get_package_name() {
|
||||
var=$1
|
||||
if [[ $1 =~ 'aarch' ]];then
|
||||
echo ${var::-21}
|
||||
else
|
||||
echo ${var::-17}
|
||||
fi
|
||||
}
|
||||
|
||||
function check_link() {
|
||||
#check Link whether exists or broken
|
||||
if [ -L $1 ] ; then
|
||||
if [ ! -e $1 ] ; then
|
||||
echo -e "$1 \033[31Broken link\033[0m"
|
||||
fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
|
||||
echo -e $fin_result
|
||||
exit 8
|
||||
fi
|
||||
else
|
||||
echo -e "$1 \033[31mnot exists\033[0m!quit"
|
||||
fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
|
||||
echo -e $fin_result
|
||||
exit 8
|
||||
fi
|
||||
}
|
||||
|
||||
function check_main_path() {
|
||||
#check install main dir and all sub dir
|
||||
main_dir=("" "cfg" "bin" "connector" "driver" "examples" "include" "init.d")
|
||||
for i in "${main_dir[@]}";do
|
||||
check_file ${install_main_dir} $i
|
||||
done
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
nginx_main_dir=("admin" "conf" "html" "sbin" "logs")
|
||||
for i in "${nginx_main_dir[@]}";do
|
||||
check_file ${nginx_dir} $i
|
||||
done
|
||||
fi
|
||||
echo -e "Check main path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function check_bin_path() {
|
||||
# check install bin dir and all sub dir
|
||||
bin_dir=("taos" "taosd" "taosadapter" "taosdemo" "remove.sh" "tarbitrator" "set_core.sh")
|
||||
for i in "${bin_dir[@]}";do
|
||||
check_file ${sbin_dir} $i
|
||||
done
|
||||
lbin_dir=("taos" "taosd" "taosadapter" "taosdemo" "rmtaos" "tarbitrator" "set_core")
|
||||
for i in "${lbin_dir[@]}";do
|
||||
check_link ${bin_link_dir}/$i
|
||||
done
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
check_file ${nginx_dir}/sbin nginx
|
||||
fi
|
||||
echo -e "Check bin path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function check_lib_path() {
|
||||
# check all links
|
||||
check_link ${lib_link_dir}/libtaos.so
|
||||
check_link ${lib_link_dir}/libtaos.so.1
|
||||
|
||||
if [[ -d ${lib64_link_dir} ]]; then
|
||||
check_link ${lib64_link_dir}/libtaos.so
|
||||
check_link ${lib64_link_dir}/libtaos.so.1
|
||||
fi
|
||||
echo -e "Check lib path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function check_header_path() {
|
||||
# check all header
|
||||
header_dir=("taos.h" "taosdef.h" "taoserror.h")
|
||||
for i in "${header_dir[@]}";do
|
||||
check_link ${inc_link_dir}/$i
|
||||
done
|
||||
echo -e "Check bin path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function check_taosadapter_config_dir() {
|
||||
# check all config
|
||||
check_file ${cfg_install_dir} taosadapter.toml
|
||||
check_file ${cfg_install_dir} taosadapter.service
|
||||
check_file ${install_main_dir}/cfg taosadapter.toml.org
|
||||
echo -e "Check conf path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function check_config_dir() {
|
||||
# check all config
|
||||
check_file ${cfg_install_dir} taos.cfg
|
||||
check_file ${install_main_dir}/cfg taos.cfg.org
|
||||
echo -e "Check conf path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function check_log_path() {
|
||||
# check log path
|
||||
check_file ${log_dir}
|
||||
echo -e "Check log path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function check_data_path() {
|
||||
# check data path
|
||||
check_file ${data_dir}
|
||||
echo -e "Check data path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function install_TDengine() {
|
||||
cd ${script_dir}
|
||||
tar zxf $1
|
||||
temp_version=$(get_package_name $1)
|
||||
cd $(get_package_name $1)
|
||||
echo -e "\033[32muninstall TDengine && install TDengine...\033[0m"
|
||||
rmtaos >/dev/null 2>&1 || echo 'taosd not installed' && echo -e '\n\n' |./install.sh >/dev/null 2>&1
|
||||
echo -e "\033[32mTDengine has been installed!\033[0m"
|
||||
echo -e "\033[32mTDengine is starting...\033[0m"
|
||||
kill_process taos && systemctl start taosd && sleep 10
|
||||
}
|
||||
|
||||
function test_TDengine() {
|
||||
check_main_path
|
||||
check_bin_path
|
||||
check_lib_path
|
||||
check_header_path
|
||||
check_config_dir
|
||||
check_taosadapter_config_dir
|
||||
check_log_path
|
||||
check_data_path
|
||||
result=`taos -s 'create database test ;create table test.tt(ts timestamp ,i int);insert into test.tt values(now,11);select * from test.tt' 2>&1 ||:`
|
||||
if [[ $result =~ "Unable to establish" ]];then
|
||||
echo -e "\033[31mTDengine connect failed\033[0m"
|
||||
fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
|
||||
echo -e $fin_result
|
||||
exit 8
|
||||
fi
|
||||
echo -e "Check TDengine connect:\033[32mOK\033[0m!"
|
||||
fin_result=$fin_result"\033[32m$temp_version\033[0m test OK!\n"
|
||||
}
|
||||
# ## ==============================Main program starts from here============================
|
||||
TD_package_name=`ls ${script_dir}/*server*gz |awk -F '/' '{print $NF}' `
|
||||
temp=`pwd`
|
||||
for i in $TD_package_name;do
|
||||
if [[ $i =~ 'enterprise' ]];then
|
||||
verMode="cluster"
|
||||
else
|
||||
verMode=""
|
||||
fi
|
||||
cd $temp
|
||||
install_TDengine $i
|
||||
test_TDengine
|
||||
done
|
||||
echo "============================================================"
|
||||
echo -e $fin_result
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
Package: tdengine
|
||||
Version: 1.0.0
|
||||
Section: utils
|
||||
Priority: optional
|
||||
#Essential: no
|
||||
#Depends: no
|
||||
#Suggests: no
|
||||
Architecture: amd64
|
||||
Installed-Size: 66666
|
||||
Maintainer: support@taosdata.com
|
||||
Provides: taosdata
|
||||
Homepage: http://taosdata.com
|
||||
Description: Big Data Platform Designed and Optimized for IoT.
|
||||
Package: tdengine
|
||||
Version: 1.0.0
|
||||
Section: utils
|
||||
Priority: optional
|
||||
#Essential: no
|
||||
#Depends: no
|
||||
#Suggests: no
|
||||
Architecture: amd64
|
||||
Installed-Size: 66666
|
||||
Maintainer: support@taosdata.com
|
||||
Provides: taosdata
|
||||
Homepage: http://taosdata.com
|
||||
Description: Big Data Platform Designed and Optimized for IoT.
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
#!/bin/bash
|
||||
#set -x
|
||||
#path=`pwd`
|
||||
insmetaPath="/usr/local/taos/script"
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
${csudo}chmod -R 744 ${insmetaPath}
|
||||
cd ${insmetaPath}
|
||||
${csudo}./post.sh
|
||||
#!/bin/bash
|
||||
#set -x
|
||||
#path=`pwd`
|
||||
insmetaPath="/usr/local/taos/script"
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
${csudo}chmod -R 744 ${insmetaPath}
|
||||
cd ${insmetaPath}
|
||||
${csudo}./post.sh
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
#!/bin/bash
|
||||
|
||||
#!/bin/bash
|
||||
|
||||
|
|
|
@ -1,40 +1,40 @@
|
|||
#!/bin/bash
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
# Stop the service if running
|
||||
if pidof taosd &> /dev/null; then
|
||||
if pidof systemd &> /dev/null; then
|
||||
${csudo}systemctl stop taosd || :
|
||||
elif $(which service &> /dev/null); then
|
||||
${csudo}service taosd stop || :
|
||||
else
|
||||
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
fi
|
||||
echo "Stop taosd service success!"
|
||||
sleep 1
|
||||
fi
|
||||
|
||||
# if taos.cfg already softlink, remove it
|
||||
cfg_install_dir="/etc/taos"
|
||||
install_main_dir="/usr/local/taos"
|
||||
if [ -f "${install_main_dir}/taos.cfg" ]; then
|
||||
${csudo}rm -f ${install_main_dir}/cfg/taos.cfg || :
|
||||
fi
|
||||
|
||||
if [ -f "${install_main_dir}/taosadapter.toml" ]; then
|
||||
${csudo}rm -f ${install_main_dir}/cfg/taosadapter.toml || :
|
||||
fi
|
||||
|
||||
if [ -f "${install_main_dir}/taosadapter.service" ]; then
|
||||
${csudo}rm -f ${install_main_dir}/cfg/taosadapter.service || :
|
||||
fi
|
||||
|
||||
# there can not libtaos.so*, otherwise ln -s error
|
||||
${csudo}rm -f ${install_main_dir}/driver/libtaos* || :
|
||||
#!/bin/bash
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
# Stop the service if running
|
||||
if pidof taosd &> /dev/null; then
|
||||
if pidof systemd &> /dev/null; then
|
||||
${csudo}systemctl stop taosd || :
|
||||
elif $(which service &> /dev/null); then
|
||||
${csudo}service taosd stop || :
|
||||
else
|
||||
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
fi
|
||||
echo "Stop taosd service success!"
|
||||
sleep 1
|
||||
fi
|
||||
|
||||
# if taos.cfg already softlink, remove it
|
||||
cfg_install_dir="/etc/taos"
|
||||
install_main_dir="/usr/local/taos"
|
||||
if [ -f "${install_main_dir}/taos.cfg" ]; then
|
||||
${csudo}rm -f ${install_main_dir}/cfg/taos.cfg || :
|
||||
fi
|
||||
|
||||
if [ -f "${install_main_dir}/taosadapter.toml" ]; then
|
||||
${csudo}rm -f ${install_main_dir}/cfg/taosadapter.toml || :
|
||||
fi
|
||||
|
||||
if [ -f "${install_main_dir}/taosadapter.service" ]; then
|
||||
${csudo}rm -f ${install_main_dir}/cfg/taosadapter.service || :
|
||||
fi
|
||||
|
||||
# there can not libtaos.so*, otherwise ln -s error
|
||||
${csudo}rm -f ${install_main_dir}/driver/libtaos* || :
|
||||
|
|
|
@ -1,42 +1,42 @@
|
|||
#!/bin/bash
|
||||
|
||||
insmetaPath="/usr/local/taos/script"
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
${csudo}chmod -R 744 ${insmetaPath} || :
|
||||
#cd ${insmetaPath}
|
||||
#${csudo}./preun.sh
|
||||
if [ -f ${insmetaPath}/preun.sh ]; then
|
||||
cd ${insmetaPath}
|
||||
${csudo}./preun.sh
|
||||
else
|
||||
bin_link_dir="/usr/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
inc_link_dir="/usr/include"
|
||||
|
||||
data_link_dir="/usr/local/taos/data"
|
||||
log_link_dir="/usr/local/taos/log"
|
||||
cfg_link_dir="/usr/local/taos/cfg"
|
||||
|
||||
# Remove all links
|
||||
${csudo}rm -f ${bin_link_dir}/taos || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosd || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosadapter || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosdemo || :
|
||||
${csudo}rm -f ${cfg_link_dir}/* || :
|
||||
${csudo}rm -f ${inc_link_dir}/taos.h || :
|
||||
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
||||
|
||||
${csudo}rm -f ${log_link_dir} || :
|
||||
${csudo}rm -f ${data_link_dir} || :
|
||||
|
||||
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
fi
|
||||
|
||||
#!/bin/bash
|
||||
|
||||
insmetaPath="/usr/local/taos/script"
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
${csudo}chmod -R 744 ${insmetaPath} || :
|
||||
#cd ${insmetaPath}
|
||||
#${csudo}./preun.sh
|
||||
if [ -f ${insmetaPath}/preun.sh ]; then
|
||||
cd ${insmetaPath}
|
||||
${csudo}./preun.sh
|
||||
else
|
||||
bin_link_dir="/usr/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
inc_link_dir="/usr/include"
|
||||
|
||||
data_link_dir="/usr/local/taos/data"
|
||||
log_link_dir="/usr/local/taos/log"
|
||||
cfg_link_dir="/usr/local/taos/cfg"
|
||||
|
||||
# Remove all links
|
||||
${csudo}rm -f ${bin_link_dir}/taos || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosd || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosadapter || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosdemo || :
|
||||
${csudo}rm -f ${cfg_link_dir}/* || :
|
||||
${csudo}rm -f ${inc_link_dir}/taos.h || :
|
||||
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
||||
|
||||
${csudo}rm -f ${log_link_dir} || :
|
||||
${csudo}rm -f ${data_link_dir} || :
|
||||
|
||||
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
fi
|
||||
|
||||
|
|
|
@ -1,147 +1,147 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Generate deb package for ubuntu
|
||||
set -e
|
||||
# set -x
|
||||
|
||||
#curr_dir=$(pwd)
|
||||
compile_dir=$1
|
||||
output_dir=$2
|
||||
tdengine_ver=$3
|
||||
cpuType=$4
|
||||
osType=$5
|
||||
verMode=$6
|
||||
verType=$7
|
||||
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -f ${script_dir}/../..)"
|
||||
pkg_dir="${top_dir}/debworkroom"
|
||||
|
||||
#echo "curr_dir: ${curr_dir}"
|
||||
#echo "top_dir: ${top_dir}"
|
||||
#echo "script_dir: ${script_dir}"
|
||||
echo "compile_dir: ${compile_dir}"
|
||||
echo "pkg_dir: ${pkg_dir}"
|
||||
|
||||
if [ -d ${pkg_dir} ]; then
|
||||
rm -rf ${pkg_dir}
|
||||
fi
|
||||
mkdir -p ${pkg_dir}
|
||||
cd ${pkg_dir}
|
||||
|
||||
libfile="libtaos.so.${tdengine_ver}"
|
||||
|
||||
# create install dir
|
||||
install_home_path="/usr/local/taos"
|
||||
mkdir -p ${pkg_dir}${install_home_path}
|
||||
mkdir -p ${pkg_dir}${install_home_path}/bin
|
||||
mkdir -p ${pkg_dir}${install_home_path}/cfg
|
||||
#mkdir -p ${pkg_dir}${install_home_path}/connector
|
||||
mkdir -p ${pkg_dir}${install_home_path}/driver
|
||||
mkdir -p ${pkg_dir}${install_home_path}/examples
|
||||
mkdir -p ${pkg_dir}${install_home_path}/include
|
||||
#mkdir -p ${pkg_dir}${install_home_path}/init.d
|
||||
mkdir -p ${pkg_dir}${install_home_path}/script
|
||||
|
||||
cp ${compile_dir}/../packaging/cfg/taos.cfg ${pkg_dir}${install_home_path}/cfg
|
||||
if [ -f "${compile_dir}/test/cfg/taosadapter.toml" ]; then
|
||||
cp ${compile_dir}/test/cfg/taosadapter.toml ${pkg_dir}${install_home_path}/cfg || :
|
||||
fi
|
||||
if [ -f "${compile_dir}/test/cfg/taosadapter.service" ]; then
|
||||
cp ${compile_dir}/test/cfg/taosadapter.service ${pkg_dir}${install_home_path}/cfg || :
|
||||
fi
|
||||
|
||||
#cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_path}/init.d
|
||||
cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script
|
||||
cp ${compile_dir}/../packaging/tools/preun.sh ${pkg_dir}${install_home_path}/script
|
||||
cp ${compile_dir}/../packaging/tools/startPre.sh ${pkg_dir}${install_home_path}/bin
|
||||
cp ${compile_dir}/../packaging/tools/set_core.sh ${pkg_dir}${install_home_path}/bin
|
||||
cp ${compile_dir}/../packaging/tools/taosd-dump-cfg.gdb ${pkg_dir}${install_home_path}/bin
|
||||
|
||||
cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin
|
||||
#cp ${compile_dir}/build/bin/taosBenchmark ${pkg_dir}${install_home_path}/bin
|
||||
|
||||
if [ -f "${compile_dir}/build/bin/taosadapter" ]; then
|
||||
cp ${compile_dir}/build/bin/taosadapter ${pkg_dir}${install_home_path}/bin ||:
|
||||
fi
|
||||
|
||||
cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin
|
||||
cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver
|
||||
cp ${compile_dir}/../src/inc/taos.h ${pkg_dir}${install_home_path}/include
|
||||
cp ${compile_dir}/../src/inc/taosdef.h ${pkg_dir}${install_home_path}/include
|
||||
cp ${compile_dir}/../src/inc/taoserror.h ${pkg_dir}${install_home_path}/include
|
||||
cp -r ${top_dir}/examples/* ${pkg_dir}${install_home_path}/examples
|
||||
#cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector
|
||||
#cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector
|
||||
#cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector
|
||||
#cp ${compile_dir}/build/lib/taos-jdbcdriver*.* ${pkg_dir}${install_home_path}/connector ||:
|
||||
|
||||
install_user_local_path="/usr/local"
|
||||
|
||||
if [ -f ${compile_dir}/build/bin/jemalloc-config ]; then
|
||||
mkdir -p ${pkg_dir}${install_user_local_path}/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
|
||||
cp ${compile_dir}/build/bin/jemalloc-config ${pkg_dir}${install_user_local_path}/bin/
|
||||
if [ -f ${compile_dir}/build/bin/jemalloc.sh ]; then
|
||||
cp ${compile_dir}/build/bin/jemalloc.sh ${pkg_dir}${install_user_local_path}/bin/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/bin/jeprof ]; then
|
||||
cp ${compile_dir}/build/bin/jeprof ${pkg_dir}${install_user_local_path}/bin/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/include/jemalloc/jemalloc.h ]; then
|
||||
cp ${compile_dir}/build/include/jemalloc/jemalloc.h ${pkg_dir}${install_user_local_path}/include/jemalloc/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/lib/libjemalloc.so.2 ]; then
|
||||
cp ${compile_dir}/build/lib/libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/
|
||||
ln -sf libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/libjemalloc.so
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/lib/libjemalloc.a ]; then
|
||||
cp ${compile_dir}/build/lib/libjemalloc.a ${pkg_dir}${install_user_local_path}/lib/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/lib/libjemalloc_pic.a ]; then
|
||||
cp ${compile_dir}/build/lib/libjemalloc_pic.a ${pkg_dir}${install_user_local_path}/lib/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ]; then
|
||||
cp ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ${pkg_dir}${install_user_local_path}/lib/pkgconfig/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/share/doc/jemalloc/jemalloc.html ]; then
|
||||
cp ${compile_dir}/build/share/doc/jemalloc/jemalloc.html ${pkg_dir}${install_user_local_path}/share/doc/jemalloc/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/share/man/man3/jemalloc.3 ]; then
|
||||
cp ${compile_dir}/build/share/man/man3/jemalloc.3 ${pkg_dir}${install_user_local_path}/share/man/man3/
|
||||
fi
|
||||
fi
|
||||
|
||||
cp -r ${compile_dir}/../packaging/deb/DEBIAN ${pkg_dir}/
|
||||
chmod 755 ${pkg_dir}/DEBIAN/*
|
||||
|
||||
# modify version of control
|
||||
debver="Version: "$tdengine_ver
|
||||
sed -i "2c$debver" ${pkg_dir}/DEBIAN/control
|
||||
|
||||
#get taos version, then set deb name
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
debname="TDengine-server-"${tdengine_ver}-${osType}-${cpuType}
|
||||
elif [ "$verMode" == "edge" ]; then
|
||||
debname="TDengine-server"-${tdengine_ver}-${osType}-${cpuType}
|
||||
else
|
||||
echo "unknow verMode, nor cluster or edge"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$verType" == "beta" ]; then
|
||||
debname="TDengine-server-"${tdengine_ver}-${verType}-${osType}-${cpuType}".deb"
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
debname=${debname}".deb"
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# make deb package
|
||||
dpkg -b ${pkg_dir} $debname
|
||||
echo "make deb package success!"
|
||||
|
||||
cp ${pkg_dir}/*.deb ${output_dir}
|
||||
|
||||
# clean temp dir
|
||||
rm -rf ${pkg_dir}
|
||||
#!/bin/bash
|
||||
#
|
||||
# Generate deb package for ubuntu
|
||||
set -e
|
||||
# set -x
|
||||
|
||||
#curr_dir=$(pwd)
|
||||
compile_dir=$1
|
||||
output_dir=$2
|
||||
tdengine_ver=$3
|
||||
cpuType=$4
|
||||
osType=$5
|
||||
verMode=$6
|
||||
verType=$7
|
||||
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -f ${script_dir}/../..)"
|
||||
pkg_dir="${top_dir}/debworkroom"
|
||||
|
||||
#echo "curr_dir: ${curr_dir}"
|
||||
#echo "top_dir: ${top_dir}"
|
||||
#echo "script_dir: ${script_dir}"
|
||||
echo "compile_dir: ${compile_dir}"
|
||||
echo "pkg_dir: ${pkg_dir}"
|
||||
|
||||
if [ -d ${pkg_dir} ]; then
|
||||
rm -rf ${pkg_dir}
|
||||
fi
|
||||
mkdir -p ${pkg_dir}
|
||||
cd ${pkg_dir}
|
||||
|
||||
libfile="libtaos.so.${tdengine_ver}"
|
||||
|
||||
# create install dir
|
||||
install_home_path="/usr/local/taos"
|
||||
mkdir -p ${pkg_dir}${install_home_path}
|
||||
mkdir -p ${pkg_dir}${install_home_path}/bin
|
||||
mkdir -p ${pkg_dir}${install_home_path}/cfg
|
||||
#mkdir -p ${pkg_dir}${install_home_path}/connector
|
||||
mkdir -p ${pkg_dir}${install_home_path}/driver
|
||||
mkdir -p ${pkg_dir}${install_home_path}/examples
|
||||
mkdir -p ${pkg_dir}${install_home_path}/include
|
||||
#mkdir -p ${pkg_dir}${install_home_path}/init.d
|
||||
mkdir -p ${pkg_dir}${install_home_path}/script
|
||||
|
||||
cp ${compile_dir}/../packaging/cfg/taos.cfg ${pkg_dir}${install_home_path}/cfg
|
||||
if [ -f "${compile_dir}/test/cfg/taosadapter.toml" ]; then
|
||||
cp ${compile_dir}/test/cfg/taosadapter.toml ${pkg_dir}${install_home_path}/cfg || :
|
||||
fi
|
||||
if [ -f "${compile_dir}/test/cfg/taosadapter.service" ]; then
|
||||
cp ${compile_dir}/test/cfg/taosadapter.service ${pkg_dir}${install_home_path}/cfg || :
|
||||
fi
|
||||
|
||||
#cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_path}/init.d
|
||||
cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script
|
||||
cp ${compile_dir}/../packaging/tools/preun.sh ${pkg_dir}${install_home_path}/script
|
||||
cp ${compile_dir}/../packaging/tools/startPre.sh ${pkg_dir}${install_home_path}/bin
|
||||
cp ${compile_dir}/../packaging/tools/set_core.sh ${pkg_dir}${install_home_path}/bin
|
||||
cp ${compile_dir}/../packaging/tools/taosd-dump-cfg.gdb ${pkg_dir}${install_home_path}/bin
|
||||
|
||||
cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin
|
||||
#cp ${compile_dir}/build/bin/taosBenchmark ${pkg_dir}${install_home_path}/bin
|
||||
|
||||
if [ -f "${compile_dir}/build/bin/taosadapter" ]; then
|
||||
cp ${compile_dir}/build/bin/taosadapter ${pkg_dir}${install_home_path}/bin ||:
|
||||
fi
|
||||
|
||||
cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin
|
||||
cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver
|
||||
cp ${compile_dir}/../include/client/taos.h ${pkg_dir}${install_home_path}/include
|
||||
cp ${compile_dir}/../include/common/taosdef.h ${pkg_dir}${install_home_path}/include
|
||||
cp ${compile_dir}/../include/util/taoserror.h ${pkg_dir}${install_home_path}/include
|
||||
cp -r ${top_dir}/examples/* ${pkg_dir}${install_home_path}/examples
|
||||
#cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector
|
||||
#cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector
|
||||
#cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector
|
||||
#cp ${compile_dir}/build/lib/taos-jdbcdriver*.* ${pkg_dir}${install_home_path}/connector ||:
|
||||
|
||||
install_user_local_path="/usr/local"
|
||||
|
||||
if [ -f ${compile_dir}/build/bin/jemalloc-config ]; then
|
||||
mkdir -p ${pkg_dir}${install_user_local_path}/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
|
||||
cp ${compile_dir}/build/bin/jemalloc-config ${pkg_dir}${install_user_local_path}/bin/
|
||||
if [ -f ${compile_dir}/build/bin/jemalloc.sh ]; then
|
||||
cp ${compile_dir}/build/bin/jemalloc.sh ${pkg_dir}${install_user_local_path}/bin/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/bin/jeprof ]; then
|
||||
cp ${compile_dir}/build/bin/jeprof ${pkg_dir}${install_user_local_path}/bin/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/include/jemalloc/jemalloc.h ]; then
|
||||
cp ${compile_dir}/build/include/jemalloc/jemalloc.h ${pkg_dir}${install_user_local_path}/include/jemalloc/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/lib/libjemalloc.so.2 ]; then
|
||||
cp ${compile_dir}/build/lib/libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/
|
||||
ln -sf libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/libjemalloc.so
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/lib/libjemalloc.a ]; then
|
||||
cp ${compile_dir}/build/lib/libjemalloc.a ${pkg_dir}${install_user_local_path}/lib/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/lib/libjemalloc_pic.a ]; then
|
||||
cp ${compile_dir}/build/lib/libjemalloc_pic.a ${pkg_dir}${install_user_local_path}/lib/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ]; then
|
||||
cp ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ${pkg_dir}${install_user_local_path}/lib/pkgconfig/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/share/doc/jemalloc/jemalloc.html ]; then
|
||||
cp ${compile_dir}/build/share/doc/jemalloc/jemalloc.html ${pkg_dir}${install_user_local_path}/share/doc/jemalloc/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/share/man/man3/jemalloc.3 ]; then
|
||||
cp ${compile_dir}/build/share/man/man3/jemalloc.3 ${pkg_dir}${install_user_local_path}/share/man/man3/
|
||||
fi
|
||||
fi
|
||||
|
||||
cp -r ${compile_dir}/../packaging/deb/DEBIAN ${pkg_dir}/
|
||||
chmod 755 ${pkg_dir}/DEBIAN/*
|
||||
|
||||
# modify version of control
|
||||
debver="Version: "$tdengine_ver
|
||||
sed -i "2c$debver" ${pkg_dir}/DEBIAN/control
|
||||
|
||||
#get taos version, then set deb name
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
debname="TDengine-server-"${tdengine_ver}-${osType}-${cpuType}
|
||||
elif [ "$verMode" == "edge" ]; then
|
||||
debname="TDengine-server"-${tdengine_ver}-${osType}-${cpuType}
|
||||
else
|
||||
echo "unknow verMode, nor cluster or edge"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$verType" == "beta" ]; then
|
||||
debname="TDengine-server-"${tdengine_ver}-${verType}-${osType}-${cpuType}".deb"
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
debname=${debname}".deb"
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# make deb package
|
||||
dpkg -b ${pkg_dir} $debname
|
||||
echo "make deb package success!"
|
||||
|
||||
cp ${pkg_dir}/*.deb ${output_dir}
|
||||
|
||||
# clean temp dir
|
||||
rm -rf ${pkg_dir}
|
||||
|
|
|
@ -1,95 +1,95 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Modified from original source: Elastic Search
|
||||
# https://github.com/elasticsearch/elasticsearch
|
||||
# Thank you to the Elastic Search authors
|
||||
#
|
||||
# chkconfig: 2345 99 01
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: TDengine
|
||||
# Required-Start: $local_fs $network $syslog
|
||||
# Required-Stop: $local_fs $network $syslog
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: Starts TDengine taosd
|
||||
# Description: Starts TDengine taosd, a time-series database engine
|
||||
### END INIT INFO
|
||||
|
||||
set -e
|
||||
|
||||
PATH="/bin:/usr/bin:/sbin:/usr/sbin"
|
||||
NAME="TDengine"
|
||||
USER="root"
|
||||
GROUP="root"
|
||||
DAEMON="/usr/local/taos/bin/taosd"
|
||||
DAEMON_OPTS=""
|
||||
|
||||
HTTPD_NAME="taosadapter"
|
||||
DAEMON_HTTPD_NAME=$HTTPD_NAME
|
||||
DAEMON_HTTPD="/usr/local/taos/bin/$HTTPD_NAME"
|
||||
|
||||
PID_FILE="/var/run/$NAME.pid"
|
||||
APPARGS=""
|
||||
|
||||
# Maximum number of open files
|
||||
MAX_OPEN_FILES=65535
|
||||
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
|
||||
log_action_begin_msg "Starting TDengine..."
|
||||
$DAEMON_HTTPD &
|
||||
if start-stop-daemon --test --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS &> /dev/null; then
|
||||
|
||||
touch "$PID_FILE" && chown "$USER":"$GROUP" "$PID_FILE"
|
||||
|
||||
if [ -n "$MAX_OPEN_FILES" ]; then
|
||||
ulimit -n $MAX_OPEN_FILES
|
||||
fi
|
||||
|
||||
start-stop-daemon --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS
|
||||
|
||||
log_end_msg $?
|
||||
fi
|
||||
;;
|
||||
|
||||
stop)
|
||||
log_action_begin_msg "Stopping TDengine..."
|
||||
pkill -9 $DAEMON_HTTPD_NAME
|
||||
set +e
|
||||
if [ -f "$PID_FILE" ]; then
|
||||
start-stop-daemon --stop --pidfile "$PID_FILE" --user "$USER" --retry=TERM/120/KILL/5 > /dev/null
|
||||
if [ $? -eq 1 ]; then
|
||||
log_action_cont_msg "TSD is not running but pid file exists, cleaning up"
|
||||
elif [ $? -eq 3 ]; then
|
||||
PID="`cat $PID_FILE`"
|
||||
log_failure_msg "Failed to stop TDengine (pid $PID)"
|
||||
exit 1
|
||||
fi
|
||||
rm -f "$PID_FILE"
|
||||
else
|
||||
log_action_cont_msg "TDengine was not running"
|
||||
fi
|
||||
log_action_end_msg 0
|
||||
set -e
|
||||
;;
|
||||
|
||||
restart|force-reload)
|
||||
if [ -f "$PID_FILE" ]; then
|
||||
$0 stop
|
||||
sleep 1
|
||||
fi
|
||||
$0 start
|
||||
;;
|
||||
status)
|
||||
status_of_proc -p "$PID_FILE" "$DAEMON" "$NAME"
|
||||
;;
|
||||
*)
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
||||
#!/bin/bash
|
||||
#
|
||||
# Modified from original source: Elastic Search
|
||||
# https://github.com/elasticsearch/elasticsearch
|
||||
# Thank you to the Elastic Search authors
|
||||
#
|
||||
# chkconfig: 2345 99 01
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: TDengine
|
||||
# Required-Start: $local_fs $network $syslog
|
||||
# Required-Stop: $local_fs $network $syslog
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: Starts TDengine taosd
|
||||
# Description: Starts TDengine taosd, a time-series database engine
|
||||
### END INIT INFO
|
||||
|
||||
set -e
|
||||
|
||||
PATH="/bin:/usr/bin:/sbin:/usr/sbin"
|
||||
NAME="TDengine"
|
||||
USER="root"
|
||||
GROUP="root"
|
||||
DAEMON="/usr/local/taos/bin/taosd"
|
||||
DAEMON_OPTS=""
|
||||
|
||||
HTTPD_NAME="taosadapter"
|
||||
DAEMON_HTTPD_NAME=$HTTPD_NAME
|
||||
DAEMON_HTTPD="/usr/local/taos/bin/$HTTPD_NAME"
|
||||
|
||||
PID_FILE="/var/run/$NAME.pid"
|
||||
APPARGS=""
|
||||
|
||||
# Maximum number of open files
|
||||
MAX_OPEN_FILES=65535
|
||||
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
|
||||
log_action_begin_msg "Starting TDengine..."
|
||||
$DAEMON_HTTPD &
|
||||
if start-stop-daemon --test --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS &> /dev/null; then
|
||||
|
||||
touch "$PID_FILE" && chown "$USER":"$GROUP" "$PID_FILE"
|
||||
|
||||
if [ -n "$MAX_OPEN_FILES" ]; then
|
||||
ulimit -n $MAX_OPEN_FILES
|
||||
fi
|
||||
|
||||
start-stop-daemon --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS
|
||||
|
||||
log_end_msg $?
|
||||
fi
|
||||
;;
|
||||
|
||||
stop)
|
||||
log_action_begin_msg "Stopping TDengine..."
|
||||
pkill -9 $DAEMON_HTTPD_NAME
|
||||
set +e
|
||||
if [ -f "$PID_FILE" ]; then
|
||||
start-stop-daemon --stop --pidfile "$PID_FILE" --user "$USER" --retry=TERM/120/KILL/5 > /dev/null
|
||||
if [ $? -eq 1 ]; then
|
||||
log_action_cont_msg "TSD is not running but pid file exists, cleaning up"
|
||||
elif [ $? -eq 3 ]; then
|
||||
PID="`cat $PID_FILE`"
|
||||
log_failure_msg "Failed to stop TDengine (pid $PID)"
|
||||
exit 1
|
||||
fi
|
||||
rm -f "$PID_FILE"
|
||||
else
|
||||
log_action_cont_msg "TDengine was not running"
|
||||
fi
|
||||
log_action_end_msg 0
|
||||
set -e
|
||||
;;
|
||||
|
||||
restart|force-reload)
|
||||
if [ -f "$PID_FILE" ]; then
|
||||
$0 stop
|
||||
sleep 1
|
||||
fi
|
||||
$0 start
|
||||
;;
|
||||
status)
|
||||
status_of_proc -p "$PID_FILE" "$DAEMON" "$NAME"
|
||||
;;
|
||||
*)
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
||||
|
|
|
@ -1,88 +1,88 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Modified from original source: Elastic Search
|
||||
# https://github.com/elasticsearch/elasticsearch
|
||||
# Thank you to the Elastic Search authors
|
||||
#
|
||||
# chkconfig: 2345 99 01
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: taoscluster
|
||||
# Required-Start: $local_fs $network $syslog
|
||||
# Required-Stop: $local_fs $network $syslog
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: Starts taoscluster tarbitrator
|
||||
# Description: Starts taoscluster tarbitrator, a arbitrator
|
||||
### END INIT INFO
|
||||
|
||||
set -e
|
||||
|
||||
PATH="/bin:/usr/bin:/sbin:/usr/sbin"
|
||||
NAME="taoscluster"
|
||||
USER="root"
|
||||
GROUP="root"
|
||||
DAEMON="/usr/local/taos/bin/tarbitrator"
|
||||
DAEMON_OPTS=""
|
||||
PID_FILE="/var/run/$NAME.pid"
|
||||
APPARGS=""
|
||||
|
||||
# Maximum number of open files
|
||||
MAX_OPEN_FILES=65535
|
||||
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
|
||||
log_action_begin_msg "Starting tarbitrator..."
|
||||
if start-stop-daemon --test --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS &> /dev/null; then
|
||||
|
||||
touch "$PID_FILE" && chown "$USER":"$GROUP" "$PID_FILE"
|
||||
|
||||
if [ -n "$MAX_OPEN_FILES" ]; then
|
||||
ulimit -n $MAX_OPEN_FILES
|
||||
fi
|
||||
|
||||
start-stop-daemon --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS
|
||||
|
||||
log_end_msg $?
|
||||
fi
|
||||
;;
|
||||
|
||||
stop)
|
||||
log_action_begin_msg "Stopping tarbitrator..."
|
||||
set +e
|
||||
if [ -f "$PID_FILE" ]; then
|
||||
start-stop-daemon --stop --pidfile "$PID_FILE" --user "$USER" --retry=TERM/120/KILL/5 > /dev/null
|
||||
if [ $? -eq 1 ]; then
|
||||
log_action_cont_msg "TSD is not running but pid file exists, cleaning up"
|
||||
elif [ $? -eq 3 ]; then
|
||||
PID="`cat $PID_FILE`"
|
||||
log_failure_msg "Failed to stop tarbitrator (pid $PID)"
|
||||
exit 1
|
||||
fi
|
||||
rm -f "$PID_FILE"
|
||||
else
|
||||
log_action_cont_msg "tarbitrator was not running"
|
||||
fi
|
||||
log_action_end_msg 0
|
||||
set -e
|
||||
;;
|
||||
|
||||
restart|force-reload)
|
||||
if [ -f "$PID_FILE" ]; then
|
||||
$0 stop
|
||||
sleep 1
|
||||
fi
|
||||
$0 start
|
||||
;;
|
||||
status)
|
||||
status_of_proc -p "$PID_FILE" "$DAEMON" "$NAME"
|
||||
;;
|
||||
*)
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
||||
#!/bin/bash
|
||||
#
|
||||
# Modified from original source: Elastic Search
|
||||
# https://github.com/elasticsearch/elasticsearch
|
||||
# Thank you to the Elastic Search authors
|
||||
#
|
||||
# chkconfig: 2345 99 01
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: taoscluster
|
||||
# Required-Start: $local_fs $network $syslog
|
||||
# Required-Stop: $local_fs $network $syslog
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: Starts taoscluster tarbitrator
|
||||
# Description: Starts taoscluster tarbitrator, a arbitrator
|
||||
### END INIT INFO
|
||||
|
||||
set -e
|
||||
|
||||
PATH="/bin:/usr/bin:/sbin:/usr/sbin"
|
||||
NAME="taoscluster"
|
||||
USER="root"
|
||||
GROUP="root"
|
||||
DAEMON="/usr/local/taos/bin/tarbitrator"
|
||||
DAEMON_OPTS=""
|
||||
PID_FILE="/var/run/$NAME.pid"
|
||||
APPARGS=""
|
||||
|
||||
# Maximum number of open files
|
||||
MAX_OPEN_FILES=65535
|
||||
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
|
||||
log_action_begin_msg "Starting tarbitrator..."
|
||||
if start-stop-daemon --test --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS &> /dev/null; then
|
||||
|
||||
touch "$PID_FILE" && chown "$USER":"$GROUP" "$PID_FILE"
|
||||
|
||||
if [ -n "$MAX_OPEN_FILES" ]; then
|
||||
ulimit -n $MAX_OPEN_FILES
|
||||
fi
|
||||
|
||||
start-stop-daemon --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS
|
||||
|
||||
log_end_msg $?
|
||||
fi
|
||||
;;
|
||||
|
||||
stop)
|
||||
log_action_begin_msg "Stopping tarbitrator..."
|
||||
set +e
|
||||
if [ -f "$PID_FILE" ]; then
|
||||
start-stop-daemon --stop --pidfile "$PID_FILE" --user "$USER" --retry=TERM/120/KILL/5 > /dev/null
|
||||
if [ $? -eq 1 ]; then
|
||||
log_action_cont_msg "TSD is not running but pid file exists, cleaning up"
|
||||
elif [ $? -eq 3 ]; then
|
||||
PID="`cat $PID_FILE`"
|
||||
log_failure_msg "Failed to stop tarbitrator (pid $PID)"
|
||||
exit 1
|
||||
fi
|
||||
rm -f "$PID_FILE"
|
||||
else
|
||||
log_action_cont_msg "tarbitrator was not running"
|
||||
fi
|
||||
log_action_end_msg 0
|
||||
set -e
|
||||
;;
|
||||
|
||||
restart|force-reload)
|
||||
if [ -f "$PID_FILE" ]; then
|
||||
$0 stop
|
||||
sleep 1
|
||||
fi
|
||||
$0 start
|
||||
;;
|
||||
status)
|
||||
status_of_proc -p "$PID_FILE" "$DAEMON" "$NAME"
|
||||
;;
|
||||
*)
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
||||
|
|
|
@ -1,32 +1,32 @@
|
|||
FROM ubuntu:18.04
|
||||
|
||||
WORKDIR /root
|
||||
|
||||
ARG pkgFile
|
||||
ARG dirName
|
||||
ARG cpuType
|
||||
RUN echo ${pkgFile} && echo ${dirName}
|
||||
|
||||
COPY ${pkgFile} /root/
|
||||
RUN tar -zxf ${pkgFile}
|
||||
WORKDIR /root/
|
||||
RUN cd /root/${dirName}/ && /bin/bash install.sh -e no && cd /root
|
||||
RUN rm /root/${pkgFile}
|
||||
RUN rm -rf /root/${dirName}
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
RUN apt-get clean && apt-get update && apt-get install -y locales tzdata netcat && locale-gen en_US.UTF-8
|
||||
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib" \
|
||||
LC_CTYPE=en_US.UTF-8 \
|
||||
LANG=en_US.UTF-8 \
|
||||
LC_ALL=en_US.UTF-8
|
||||
|
||||
COPY ./bin/* /usr/bin/
|
||||
|
||||
ENV TINI_VERSION v0.19.0
|
||||
RUN bash -c 'echo -e "Downloading tini-${cpuType} ..."'
|
||||
ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-${cpuType} /tini
|
||||
RUN chmod +x /tini
|
||||
ENTRYPOINT ["/tini", "--", "/usr/bin/entrypoint.sh"]
|
||||
CMD ["taosd"]
|
||||
VOLUME [ "/var/lib/taos", "/var/log/taos", "/corefile" ]
|
||||
FROM ubuntu:18.04
|
||||
|
||||
WORKDIR /root
|
||||
|
||||
ARG pkgFile
|
||||
ARG dirName
|
||||
ARG cpuType
|
||||
RUN echo ${pkgFile} && echo ${dirName}
|
||||
|
||||
COPY ${pkgFile} /root/
|
||||
RUN tar -zxf ${pkgFile}
|
||||
WORKDIR /root/
|
||||
RUN cd /root/${dirName}/ && /bin/bash install.sh -e no && cd /root
|
||||
RUN rm /root/${pkgFile}
|
||||
RUN rm -rf /root/${dirName}
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
RUN apt-get clean && apt-get update && apt-get install -y locales tzdata netcat && locale-gen en_US.UTF-8
|
||||
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib" \
|
||||
LC_CTYPE=en_US.UTF-8 \
|
||||
LANG=en_US.UTF-8 \
|
||||
LC_ALL=en_US.UTF-8
|
||||
|
||||
COPY ./bin/* /usr/bin/
|
||||
|
||||
ENV TINI_VERSION v0.19.0
|
||||
RUN bash -c 'echo -e "Downloading tini-${cpuType} ..."'
|
||||
ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-${cpuType} /tini
|
||||
RUN chmod +x /tini
|
||||
ENTRYPOINT ["/tini", "--", "/usr/bin/entrypoint.sh"]
|
||||
CMD ["taosd"]
|
||||
VOLUME [ "/var/lib/taos", "/var/log/taos", "/corefile" ]
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,83 +1,83 @@
|
|||
#!/bin/sh
|
||||
set -e
|
||||
# for TZ awareness
|
||||
if [ "$TZ" != "" ]; then
|
||||
ln -sf /usr/share/zoneinfo/$TZ /etc/localtime
|
||||
echo $TZ >/etc/timezone
|
||||
fi
|
||||
|
||||
# option to disable taosadapter, default is no
|
||||
DISABLE_ADAPTER=${TAOS_DISABLE_ADAPTER:-0}
|
||||
unset TAOS_DISABLE_ADAPTER
|
||||
|
||||
# to get mnodeEpSet from data dir
|
||||
DATA_DIR=${TAOS_DATA_DIR:-/var/lib/taos}
|
||||
|
||||
# append env to custom taos.cfg
|
||||
CFG_DIR=/tmp/taos
|
||||
CFG_FILE=$CFG_DIR/taos.cfg
|
||||
|
||||
mkdir -p $CFG_DIR >/dev/null 2>&1
|
||||
|
||||
[ -f /etc/taos/taos.cfg ] && cat /etc/taos/taos.cfg | grep -E -v "^#|^\s*$" >$CFG_FILE
|
||||
env-to-cfg >>$CFG_FILE
|
||||
|
||||
FQDN=$(cat $CFG_FILE | grep -E -v "^#|^$" | grep fqdn | tail -n1 | sed -E 's/.*fqdn\s+//')
|
||||
|
||||
# ensure the fqdn is resolved as localhost
|
||||
grep "$FQDN" /etc/hosts >/dev/null || echo "127.0.0.1 $FQDN" >>/etc/hosts
|
||||
|
||||
# parse first ep host and port
|
||||
FIRST_EP_HOST=${TAOS_FIRST_EP%:*}
|
||||
FIRST_EP_PORT=${TAOS_FIRST_EP#*:}
|
||||
|
||||
# in case of custom server port
|
||||
SERVER_PORT=$(cat $CFG_FILE | grep -E -v "^#|^$" | grep serverPort | tail -n1 | sed -E 's/.*serverPort\s+//')
|
||||
SERVER_PORT=${SERVER_PORT:-6030}
|
||||
|
||||
# for other binaries like interpreters
|
||||
if echo $1 | grep -E "taosd$" - >/dev/null; then
|
||||
true # will run taosd
|
||||
else
|
||||
cp -f $CFG_FILE /etc/taos/taos.cfg || true
|
||||
$@
|
||||
exit $?
|
||||
fi
|
||||
|
||||
set +e
|
||||
ulimit -c unlimited
|
||||
# set core files pattern, maybe failed
|
||||
sysctl -w kernel.core_pattern=/corefile/core-$FQDN-%e-%p >/dev/null >&1
|
||||
set -e
|
||||
|
||||
if [ "$DISABLE_ADAPTER" = "0" ]; then
|
||||
which taosadapter >/dev/null && taosadapter &
|
||||
# wait for 6041 port ready
|
||||
for _ in $(seq 1 20); do
|
||||
nc -z localhost 6041 && break
|
||||
sleep 0.5
|
||||
done
|
||||
fi
|
||||
|
||||
# if has mnode ep set or the host is first ep or not for cluster, just start.
|
||||
if [ -f "$DATA_DIR/dnode/mnodeEpSet.json" ] ||
|
||||
[ "$TAOS_FQDN" = "$FIRST_EP_HOST" ]; then
|
||||
$@ -c $CFG_DIR
|
||||
# others will first wait the first ep ready.
|
||||
else
|
||||
if [ "$TAOS_FIRST_EP" = "" ]; then
|
||||
echo "run TDengine with single node."
|
||||
$@ -c $CFG_DIR
|
||||
exit $?
|
||||
fi
|
||||
while true; do
|
||||
es=0
|
||||
taos -h $FIRST_EP_HOST -P $FIRST_EP_PORT -n startup >/dev/null || es=$?
|
||||
if [ "$es" -eq 0 ]; then
|
||||
taos -h $FIRST_EP_HOST -P $FIRST_EP_PORT -s "create dnode \"$FQDN:$SERVER_PORT\";"
|
||||
break
|
||||
fi
|
||||
sleep 1s
|
||||
done
|
||||
$@ -c $CFG_DIR
|
||||
fi
|
||||
#!/bin/sh
|
||||
set -e
|
||||
# for TZ awareness
|
||||
if [ "$TZ" != "" ]; then
|
||||
ln -sf /usr/share/zoneinfo/$TZ /etc/localtime
|
||||
echo $TZ >/etc/timezone
|
||||
fi
|
||||
|
||||
# option to disable taosadapter, default is no
|
||||
DISABLE_ADAPTER=${TAOS_DISABLE_ADAPTER:-0}
|
||||
unset TAOS_DISABLE_ADAPTER
|
||||
|
||||
# to get mnodeEpSet from data dir
|
||||
DATA_DIR=${TAOS_DATA_DIR:-/var/lib/taos}
|
||||
|
||||
# append env to custom taos.cfg
|
||||
CFG_DIR=/tmp/taos
|
||||
CFG_FILE=$CFG_DIR/taos.cfg
|
||||
|
||||
mkdir -p $CFG_DIR >/dev/null 2>&1
|
||||
|
||||
[ -f /etc/taos/taos.cfg ] && cat /etc/taos/taos.cfg | grep -E -v "^#|^\s*$" >$CFG_FILE
|
||||
env-to-cfg >>$CFG_FILE
|
||||
|
||||
FQDN=$(cat $CFG_FILE | grep -E -v "^#|^$" | grep fqdn | tail -n1 | sed -E 's/.*fqdn\s+//')
|
||||
|
||||
# ensure the fqdn is resolved as localhost
|
||||
grep "$FQDN" /etc/hosts >/dev/null || echo "127.0.0.1 $FQDN" >>/etc/hosts
|
||||
|
||||
# parse first ep host and port
|
||||
FIRST_EP_HOST=${TAOS_FIRST_EP%:*}
|
||||
FIRST_EP_PORT=${TAOS_FIRST_EP#*:}
|
||||
|
||||
# in case of custom server port
|
||||
SERVER_PORT=$(cat $CFG_FILE | grep -E -v "^#|^$" | grep serverPort | tail -n1 | sed -E 's/.*serverPort\s+//')
|
||||
SERVER_PORT=${SERVER_PORT:-6030}
|
||||
|
||||
# for other binaries like interpreters
|
||||
if echo $1 | grep -E "taosd$" - >/dev/null; then
|
||||
true # will run taosd
|
||||
else
|
||||
cp -f $CFG_FILE /etc/taos/taos.cfg || true
|
||||
$@
|
||||
exit $?
|
||||
fi
|
||||
|
||||
set +e
|
||||
ulimit -c unlimited
|
||||
# set core files pattern, maybe failed
|
||||
sysctl -w kernel.core_pattern=/corefile/core-$FQDN-%e-%p >/dev/null >&1
|
||||
set -e
|
||||
|
||||
if [ "$DISABLE_ADAPTER" = "0" ]; then
|
||||
which taosadapter >/dev/null && taosadapter &
|
||||
# wait for 6041 port ready
|
||||
for _ in $(seq 1 20); do
|
||||
nc -z localhost 6041 && break
|
||||
sleep 0.5
|
||||
done
|
||||
fi
|
||||
|
||||
# if has mnode ep set or the host is first ep or not for cluster, just start.
|
||||
if [ -f "$DATA_DIR/dnode/mnodeEpSet.json" ] ||
|
||||
[ "$TAOS_FQDN" = "$FIRST_EP_HOST" ]; then
|
||||
$@ -c $CFG_DIR
|
||||
# others will first wait the first ep ready.
|
||||
else
|
||||
if [ "$TAOS_FIRST_EP" = "" ]; then
|
||||
echo "run TDengine with single node."
|
||||
$@ -c $CFG_DIR
|
||||
exit $?
|
||||
fi
|
||||
while true; do
|
||||
es=0
|
||||
taos -h $FIRST_EP_HOST -P $FIRST_EP_PORT -n startup >/dev/null || es=$?
|
||||
if [ "$es" -eq 0 ]; then
|
||||
taos -h $FIRST_EP_HOST -P $FIRST_EP_PORT -s "create dnode \"$FQDN:$SERVER_PORT\";"
|
||||
break
|
||||
fi
|
||||
sleep 1s
|
||||
done
|
||||
$@ -c $CFG_DIR
|
||||
fi
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
#!/bin/sh
|
||||
set -e
|
||||
self=$0
|
||||
|
||||
snake_to_camel_case() {
|
||||
echo $1 | awk -F _ '{printf "%s", $1; for(i=2; i<=NF; i++) printf "%s", toupper(substr($i,1,1)) substr($i,2); print"";}'
|
||||
}
|
||||
|
||||
if echo $1 | grep -E "^$" - >/dev/null; then
|
||||
export |grep -E 'TAOS_.*' -o| sed 's/TAOS_//' |tr A-Z a-z | awk -F"=" '{print "name=$(""'$self' " $1"); echo $name "$2}' |sh
|
||||
else
|
||||
snake_to_camel_case $1
|
||||
fi
|
||||
#!/bin/sh
|
||||
set -e
|
||||
self=$0
|
||||
|
||||
snake_to_camel_case() {
|
||||
echo $1 | awk -F _ '{printf "%s", $1; for(i=2; i<=NF; i++) printf "%s", toupper(substr($i,1,1)) substr($i,2); print"";}'
|
||||
}
|
||||
|
||||
if echo $1 | grep -E "^$" - >/dev/null; then
|
||||
export |grep -E 'TAOS_.*' -o| sed 's/TAOS_//' |tr A-Z a-z | awk -F"=" '{print "name=$(""'$self' " $1"); echo $name "$2}' |sh
|
||||
else
|
||||
snake_to_camel_case $1
|
||||
fi
|
||||
|
|
|
@ -1,77 +1,77 @@
|
|||
version: "3"
|
||||
|
||||
networks:
|
||||
inter:
|
||||
api:
|
||||
|
||||
services:
|
||||
arbitrator:
|
||||
image: tdengine/tdengine:$VERSION
|
||||
command: tarbitrator
|
||||
networks:
|
||||
- inter
|
||||
td-1:
|
||||
image: tdengine/tdengine:$VERSION
|
||||
networks:
|
||||
- inter
|
||||
environment:
|
||||
TAOS_FQDN: "td-1"
|
||||
TAOS_FIRST_EP: "td-1"
|
||||
TAOS_NUM_OF_MNODES: "2"
|
||||
TAOS_REPLICA: "2"
|
||||
TAOS_ARBITRATOR: arbitrator:6042
|
||||
volumes:
|
||||
- taosdata-td1:/var/lib/taos/
|
||||
- taoslog-td1:/var/log/taos/
|
||||
td-2:
|
||||
image: tdengine/tdengine:$VERSION
|
||||
networks:
|
||||
- inter
|
||||
environment:
|
||||
TAOS_FQDN: "td-2"
|
||||
TAOS_FIRST_EP: "td-1"
|
||||
TAOS_NUM_OF_MNODES: "2"
|
||||
TAOS_REPLICA: "2"
|
||||
TAOS_ARBITRATOR: arbitrator:6042
|
||||
volumes:
|
||||
- taosdata-td2:/var/lib/taos/
|
||||
- taoslog-td2:/var/log/taos/
|
||||
adapter:
|
||||
image: tdengine/tdengine:$VERSION
|
||||
command: taosadapter
|
||||
networks:
|
||||
- inter
|
||||
environment:
|
||||
TAOS_FIRST_EP: "td-1"
|
||||
TOAS_SECOND_EP: "td-2"
|
||||
deploy:
|
||||
replicas: 4
|
||||
update_config:
|
||||
parallelism: 4
|
||||
nginx:
|
||||
image: nginx
|
||||
depends_on:
|
||||
- adapter
|
||||
networks:
|
||||
- inter
|
||||
- api
|
||||
ports:
|
||||
- 6041:6041
|
||||
- 6044:6044/udp
|
||||
command: [
|
||||
"sh",
|
||||
"-c",
|
||||
"while true;
|
||||
do curl -s http://adapter:6041/-/ping >/dev/null && break;
|
||||
done;
|
||||
printf 'server{listen 6041;location /{proxy_pass http://adapter:6041;}}'
|
||||
> /etc/nginx/conf.d/rest.conf;
|
||||
printf 'stream{server{listen 6044 udp;proxy_pass adapter:6044;}}'
|
||||
>> /etc/nginx/nginx.conf;cat /etc/nginx/nginx.conf;
|
||||
nginx -g 'daemon off;'",
|
||||
]
|
||||
volumes:
|
||||
taosdata-td1:
|
||||
taoslog-td1:
|
||||
taosdata-td2:
|
||||
taoslog-td2:
|
||||
version: "3"
|
||||
|
||||
networks:
|
||||
inter:
|
||||
api:
|
||||
|
||||
services:
|
||||
arbitrator:
|
||||
image: tdengine/tdengine:$VERSION
|
||||
command: tarbitrator
|
||||
networks:
|
||||
- inter
|
||||
td-1:
|
||||
image: tdengine/tdengine:$VERSION
|
||||
networks:
|
||||
- inter
|
||||
environment:
|
||||
TAOS_FQDN: "td-1"
|
||||
TAOS_FIRST_EP: "td-1"
|
||||
TAOS_NUM_OF_MNODES: "2"
|
||||
TAOS_REPLICA: "2"
|
||||
TAOS_ARBITRATOR: arbitrator:6042
|
||||
volumes:
|
||||
- taosdata-td1:/var/lib/taos/
|
||||
- taoslog-td1:/var/log/taos/
|
||||
td-2:
|
||||
image: tdengine/tdengine:$VERSION
|
||||
networks:
|
||||
- inter
|
||||
environment:
|
||||
TAOS_FQDN: "td-2"
|
||||
TAOS_FIRST_EP: "td-1"
|
||||
TAOS_NUM_OF_MNODES: "2"
|
||||
TAOS_REPLICA: "2"
|
||||
TAOS_ARBITRATOR: arbitrator:6042
|
||||
volumes:
|
||||
- taosdata-td2:/var/lib/taos/
|
||||
- taoslog-td2:/var/log/taos/
|
||||
adapter:
|
||||
image: tdengine/tdengine:$VERSION
|
||||
command: taosadapter
|
||||
networks:
|
||||
- inter
|
||||
environment:
|
||||
TAOS_FIRST_EP: "td-1"
|
||||
TOAS_SECOND_EP: "td-2"
|
||||
deploy:
|
||||
replicas: 4
|
||||
update_config:
|
||||
parallelism: 4
|
||||
nginx:
|
||||
image: nginx
|
||||
depends_on:
|
||||
- adapter
|
||||
networks:
|
||||
- inter
|
||||
- api
|
||||
ports:
|
||||
- 6041:6041
|
||||
- 6044:6044/udp
|
||||
command: [
|
||||
"sh",
|
||||
"-c",
|
||||
"while true;
|
||||
do curl -s http://adapter:6041/-/ping >/dev/null && break;
|
||||
done;
|
||||
printf 'server{listen 6041;location /{proxy_pass http://adapter:6041;}}'
|
||||
> /etc/nginx/conf.d/rest.conf;
|
||||
printf 'stream{server{listen 6044 udp;proxy_pass adapter:6044;}}'
|
||||
>> /etc/nginx/nginx.conf;cat /etc/nginx/nginx.conf;
|
||||
nginx -g 'daemon off;'",
|
||||
]
|
||||
volumes:
|
||||
taosdata-td1:
|
||||
taoslog-td1:
|
||||
taosdata-td2:
|
||||
taoslog-td2:
|
||||
|
|
|
@ -1,82 +1,82 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
# dockerbuild.sh
|
||||
# -n [version number]
|
||||
# -p [xxxx]
|
||||
# -V [stable | beta]
|
||||
|
||||
# set parameters by default value
|
||||
version=""
|
||||
passWord=""
|
||||
verType=""
|
||||
|
||||
while getopts "hn:p:V:" arg
|
||||
do
|
||||
case $arg in
|
||||
n)
|
||||
#echo "version=$OPTARG"
|
||||
version=$(echo $OPTARG)
|
||||
;;
|
||||
p)
|
||||
#echo "passWord=$OPTARG"
|
||||
passWord=$(echo $OPTARG)
|
||||
;;
|
||||
V)
|
||||
#echo "verType=$OPTARG"
|
||||
verType=$(echo $OPTARG)
|
||||
;;
|
||||
h)
|
||||
echo "Usage: `basename $0` -n [version number] "
|
||||
echo " -p [password for docker hub] "
|
||||
exit 0
|
||||
;;
|
||||
?) #unknow option
|
||||
echo "unkonw argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
echo "version=${version}"
|
||||
|
||||
#docker manifest rm tdengine/tdengine
|
||||
#docker manifest rm tdengine/tdengine:${version}
|
||||
if [ "$verType" == "beta" ]; then
|
||||
docker manifest create -a tdengine/tdengine-beta:${version} tdengine/tdengine-amd64-beta:${version} tdengine/tdengine-aarch64-beta:${version} tdengine/tdengine-aarch32-beta:${version}
|
||||
docker manifest create -a tdengine/tdengine-beta:latest tdengine/tdengine-amd64-beta:latest tdengine/tdengine-aarch64-beta:latest tdengine/tdengine-aarch32-beta:latest
|
||||
docker manifest rm tdengine/tdengine-beta:${version}
|
||||
docker manifest rm tdengine/tdengine-beta:latest
|
||||
docker manifest create -a tdengine/tdengine-beta:${version} tdengine/tdengine-amd64-beta:${version} tdengine/tdengine-aarch64-beta:${version} tdengine/tdengine-aarch32-beta:${version}
|
||||
docker manifest create -a tdengine/tdengine-beta:latest tdengine/tdengine-amd64-beta:latest tdengine/tdengine-aarch64-beta:latest tdengine/tdengine-aarch32-beta:latest
|
||||
docker manifest inspect tdengine/tdengine:latest
|
||||
docker manifest inspect tdengine/tdengine:${version}
|
||||
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
|
||||
docker manifest push tdengine/tdengine-beta:${version}
|
||||
docker manifest push tdengine/tdengine-beta:latest
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
docker manifest create -a tdengine/tdengine:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version}
|
||||
docker manifest create -a tdengine/tdengine:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest
|
||||
docker manifest rm tdengine/tdengine:latest
|
||||
docker manifest rm tdengine/tdengine:${version}
|
||||
docker manifest create -a tdengine/tdengine:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version}
|
||||
docker manifest create -a tdengine/tdengine:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest
|
||||
docker manifest inspect tdengine/tdengine:latest
|
||||
docker manifest inspect tdengine/tdengine:${version}
|
||||
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
|
||||
docker manifest push tdengine/tdengine:${version}
|
||||
docker manifest push tdengine/tdengine:latest
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# docker manifest create -a tdengine/${dockername}:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version}
|
||||
# docker manifest create -a tdengine/${dockername}:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest
|
||||
|
||||
# docker login -u tdengine -p ${passWord} #replace the docker registry username and password
|
||||
|
||||
# docker manifest push tdengine/tdengine:latest
|
||||
|
||||
# # how set latest version ???
|
||||
#!/bin/bash
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
# dockerbuild.sh
|
||||
# -n [version number]
|
||||
# -p [xxxx]
|
||||
# -V [stable | beta]
|
||||
|
||||
# set parameters by default value
|
||||
version=""
|
||||
passWord=""
|
||||
verType=""
|
||||
|
||||
while getopts "hn:p:V:" arg
|
||||
do
|
||||
case $arg in
|
||||
n)
|
||||
#echo "version=$OPTARG"
|
||||
version=$(echo $OPTARG)
|
||||
;;
|
||||
p)
|
||||
#echo "passWord=$OPTARG"
|
||||
passWord=$(echo $OPTARG)
|
||||
;;
|
||||
V)
|
||||
#echo "verType=$OPTARG"
|
||||
verType=$(echo $OPTARG)
|
||||
;;
|
||||
h)
|
||||
echo "Usage: `basename $0` -n [version number] "
|
||||
echo " -p [password for docker hub] "
|
||||
exit 0
|
||||
;;
|
||||
?) #unknow option
|
||||
echo "unkonw argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
echo "version=${version}"
|
||||
|
||||
#docker manifest rm tdengine/tdengine
|
||||
#docker manifest rm tdengine/tdengine:${version}
|
||||
if [ "$verType" == "beta" ]; then
|
||||
docker manifest create -a tdengine/tdengine-beta:${version} tdengine/tdengine-amd64-beta:${version} tdengine/tdengine-aarch64-beta:${version} tdengine/tdengine-aarch32-beta:${version}
|
||||
docker manifest create -a tdengine/tdengine-beta:latest tdengine/tdengine-amd64-beta:latest tdengine/tdengine-aarch64-beta:latest tdengine/tdengine-aarch32-beta:latest
|
||||
docker manifest rm tdengine/tdengine-beta:${version}
|
||||
docker manifest rm tdengine/tdengine-beta:latest
|
||||
docker manifest create -a tdengine/tdengine-beta:${version} tdengine/tdengine-amd64-beta:${version} tdengine/tdengine-aarch64-beta:${version} tdengine/tdengine-aarch32-beta:${version}
|
||||
docker manifest create -a tdengine/tdengine-beta:latest tdengine/tdengine-amd64-beta:latest tdengine/tdengine-aarch64-beta:latest tdengine/tdengine-aarch32-beta:latest
|
||||
docker manifest inspect tdengine/tdengine:latest
|
||||
docker manifest inspect tdengine/tdengine:${version}
|
||||
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
|
||||
docker manifest push tdengine/tdengine-beta:${version}
|
||||
docker manifest push tdengine/tdengine-beta:latest
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
docker manifest create -a tdengine/tdengine:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version}
|
||||
docker manifest create -a tdengine/tdengine:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest
|
||||
docker manifest rm tdengine/tdengine:latest
|
||||
docker manifest rm tdengine/tdengine:${version}
|
||||
docker manifest create -a tdengine/tdengine:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version}
|
||||
docker manifest create -a tdengine/tdengine:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest
|
||||
docker manifest inspect tdengine/tdengine:latest
|
||||
docker manifest inspect tdengine/tdengine:${version}
|
||||
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
|
||||
docker manifest push tdengine/tdengine:${version}
|
||||
docker manifest push tdengine/tdengine:latest
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# docker manifest create -a tdengine/${dockername}:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version}
|
||||
# docker manifest create -a tdengine/${dockername}:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest
|
||||
|
||||
# docker login -u tdengine -p ${passWord} #replace the docker registry username and password
|
||||
|
||||
# docker manifest push tdengine/tdengine:latest
|
||||
|
||||
# # how set latest version ???
|
||||
|
|
|
@ -1,174 +1,174 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
# dockerbuild.sh
|
||||
# -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...]
|
||||
# -n [version number]
|
||||
# -p [password for docker hub]
|
||||
# -V [stable | beta]
|
||||
# -f [pkg file]
|
||||
|
||||
# set parameters by default value
|
||||
cpuType=""
|
||||
cpuTypeAlias=""
|
||||
version=""
|
||||
passWord=""
|
||||
pkgFile=""
|
||||
verType="stable"
|
||||
dockerLatest="n"
|
||||
|
||||
while getopts "hc:n:p:f:V:a:b:" arg
|
||||
do
|
||||
case $arg in
|
||||
c)
|
||||
#echo "cpuType=$OPTARG"
|
||||
cpuType=$(echo $OPTARG)
|
||||
;;
|
||||
n)
|
||||
#echo "version=$OPTARG"
|
||||
version=$(echo $OPTARG)
|
||||
;;
|
||||
p)
|
||||
#echo "passWord=$OPTARG"
|
||||
passWord=$(echo $OPTARG)
|
||||
;;
|
||||
f)
|
||||
#echo "pkgFile=$OPTARG"
|
||||
pkgFile=$(echo $OPTARG)
|
||||
;;
|
||||
b)
|
||||
#echo "branchName=$OPTARG"
|
||||
branchName=$(echo $OPTARG)
|
||||
;;
|
||||
V)
|
||||
#echo "verType=$OPTARG"
|
||||
verType=$(echo $OPTARG)
|
||||
;;
|
||||
a)
|
||||
#echo "dockerLatest=$OPTARG"
|
||||
dockerLatest=$(echo $OPTARG)
|
||||
;;
|
||||
h)
|
||||
echo "Usage: `basename $0` -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...] "
|
||||
echo " -n [version number] "
|
||||
echo " -p [password for docker hub] "
|
||||
echo " -V [stable | beta] "
|
||||
echo " -f [pkg file] "
|
||||
echo " -a [y | n ] "
|
||||
exit 0
|
||||
;;
|
||||
?) #unknow option
|
||||
echo "unkonw argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
|
||||
# Check_verison()
|
||||
# {
|
||||
# }
|
||||
|
||||
|
||||
if [ "$verType" == "beta" ]; then
|
||||
dockername=${cpuType}-${verType}
|
||||
dirName=${pkgFile%-beta*}
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
dockername=${cpuType}
|
||||
dirName=${pkgFile%-Linux*}
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
echo "cpuType=${cpuType} version=${version} pkgFile=${pkgFile} verType=${verType} "
|
||||
echo "$(pwd)"
|
||||
echo "====NOTES: ${pkgFile} must be in the same directory as dockerbuild.sh===="
|
||||
|
||||
scriptDir=$(dirname $(readlink -f $0))
|
||||
comunityArchiveDir=/nas/TDengine/v$version/community # community version’package directory
|
||||
communityDir=${scriptDir}/../../../community
|
||||
DockerfilePath=${communityDir}/packaging/docker/
|
||||
Dockerfile=${communityDir}/packaging/docker/Dockerfile
|
||||
cd ${scriptDir}
|
||||
cp -f ${comunityArchiveDir}/${pkgFile} .
|
||||
|
||||
echo "dirName=${dirName}"
|
||||
|
||||
if [[ "${cpuType}" == "x64" ]] || [[ "${cpuType}" == "amd64" ]]; then
|
||||
cpuTypeAlias="amd64"
|
||||
elif [[ "${cpuType}" == "aarch64" ]]; then
|
||||
cpuTypeAlias="arm64"
|
||||
elif [[ "${cpuType}" == "aarch32" ]]; then
|
||||
cpuTypeAlias="armhf"
|
||||
else
|
||||
echo "Unknown cpuType: ${cpuType}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
docker build --rm -f "${Dockerfile}" --network=host -t tdengine/tdengine-${dockername}:${version} "." --build-arg pkgFile=${pkgFile} --build-arg dirName=${dirName} --build-arg cpuType=${cpuTypeAlias}
|
||||
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
|
||||
docker push tdengine/tdengine-${dockername}:${version}
|
||||
|
||||
if [ -n "$(docker ps -aq)" ] ;then
|
||||
echo "delete docker process"
|
||||
docker stop $(docker ps -aq)
|
||||
docker rm $(docker ps -aq)
|
||||
fi
|
||||
|
||||
if [ -n "$(pidof taosd)" ] ;then
|
||||
echo "kill taosd "
|
||||
kill -9 $(pidof taosd)
|
||||
fi
|
||||
|
||||
if [ -n "$(pidof power)" ] ;then
|
||||
echo "kill power "
|
||||
kill -9 $(pidof power)
|
||||
fi
|
||||
|
||||
|
||||
echo ">>>>>>>>>>>>> check whether tdengine/tdengine-${dockername}:${version} has been published"
|
||||
docker run -d --name doctest -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine-${dockername}:${version}
|
||||
sleep 2
|
||||
curl -u root:taosdata -d 'show variables;' 127.0.0.1:6041/rest/sql > temp1.data
|
||||
data_version=$( cat temp1.data |jq .data| jq '.[]' |grep "version" -A 2 -B 1 | jq ".[1]")
|
||||
echo "${data_version}"
|
||||
if [ "${data_version}" == "\"${version}\"" ] ; then
|
||||
echo "docker version is right "
|
||||
else
|
||||
echo "docker version is wrong "
|
||||
exit 1
|
||||
fi
|
||||
rm -rf temp1.data
|
||||
|
||||
# set this version to latest version
|
||||
if [ ${dockerLatest} == 'y' ] ;then
|
||||
docker tag tdengine/tdengine-${dockername}:${version} tdengine/tdengine-${dockername}:latest
|
||||
docker push tdengine/tdengine-${dockername}:latest
|
||||
echo ">>>>>>>>>>>>> check whether tdengine/tdengine-${dockername}:latest has been published correctly"
|
||||
docker run -d --name doctestla -p 7030-7049:6030-6049 -p 7030-7049:6030-6049/udp tdengine/tdengine-${dockername}:latest
|
||||
sleep 2
|
||||
curl -u root:taosdata -d 'show variables;' 127.0.0.1:7041/rest/sql > temp2.data
|
||||
version_latest=` cat temp2.data |jq .data| jq '.[]' |grep "version" -A 2 -B 1 | jq ".[1]" `
|
||||
echo "${version_latest}"
|
||||
if [ "${version_latest}" == "\"${version}\"" ] ; then
|
||||
echo "docker version is right "
|
||||
else
|
||||
echo "docker version is wrong "
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
rm -rf temp2.data
|
||||
|
||||
if [ -n "$(docker ps -aq)" ] ;then
|
||||
echo "delte docker process"
|
||||
docker stop $(docker ps -aq)
|
||||
docker rm $(docker ps -aq)
|
||||
fi
|
||||
|
||||
cd ${scriptDir}
|
||||
rm -f ${pkgFile}
|
||||
#!/bin/bash
|
||||
#
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
# dockerbuild.sh
|
||||
# -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...]
|
||||
# -n [version number]
|
||||
# -p [password for docker hub]
|
||||
# -V [stable | beta]
|
||||
# -f [pkg file]
|
||||
|
||||
# set parameters by default value
|
||||
cpuType=""
|
||||
cpuTypeAlias=""
|
||||
version=""
|
||||
passWord=""
|
||||
pkgFile=""
|
||||
verType="stable"
|
||||
dockerLatest="n"
|
||||
|
||||
while getopts "hc:n:p:f:V:a:b:" arg
|
||||
do
|
||||
case $arg in
|
||||
c)
|
||||
#echo "cpuType=$OPTARG"
|
||||
cpuType=$(echo $OPTARG)
|
||||
;;
|
||||
n)
|
||||
#echo "version=$OPTARG"
|
||||
version=$(echo $OPTARG)
|
||||
;;
|
||||
p)
|
||||
#echo "passWord=$OPTARG"
|
||||
passWord=$(echo $OPTARG)
|
||||
;;
|
||||
f)
|
||||
#echo "pkgFile=$OPTARG"
|
||||
pkgFile=$(echo $OPTARG)
|
||||
;;
|
||||
b)
|
||||
#echo "branchName=$OPTARG"
|
||||
branchName=$(echo $OPTARG)
|
||||
;;
|
||||
V)
|
||||
#echo "verType=$OPTARG"
|
||||
verType=$(echo $OPTARG)
|
||||
;;
|
||||
a)
|
||||
#echo "dockerLatest=$OPTARG"
|
||||
dockerLatest=$(echo $OPTARG)
|
||||
;;
|
||||
h)
|
||||
echo "Usage: `basename $0` -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...] "
|
||||
echo " -n [version number] "
|
||||
echo " -p [password for docker hub] "
|
||||
echo " -V [stable | beta] "
|
||||
echo " -f [pkg file] "
|
||||
echo " -a [y | n ] "
|
||||
exit 0
|
||||
;;
|
||||
?) #unknow option
|
||||
echo "unkonw argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
|
||||
# Check_verison()
|
||||
# {
|
||||
# }
|
||||
|
||||
|
||||
if [ "$verType" == "beta" ]; then
|
||||
dockername=${cpuType}-${verType}
|
||||
dirName=${pkgFile%-beta*}
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
dockername=${cpuType}
|
||||
dirName=${pkgFile%-Linux*}
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
echo "cpuType=${cpuType} version=${version} pkgFile=${pkgFile} verType=${verType} "
|
||||
echo "$(pwd)"
|
||||
echo "====NOTES: ${pkgFile} must be in the same directory as dockerbuild.sh===="
|
||||
|
||||
scriptDir=$(dirname $(readlink -f $0))
|
||||
comunityArchiveDir=/nas/TDengine/v$version/community # community version’package directory
|
||||
communityDir=${scriptDir}/../../../community
|
||||
DockerfilePath=${communityDir}/packaging/docker/
|
||||
Dockerfile=${communityDir}/packaging/docker/Dockerfile
|
||||
cd ${scriptDir}
|
||||
cp -f ${comunityArchiveDir}/${pkgFile} .
|
||||
|
||||
echo "dirName=${dirName}"
|
||||
|
||||
if [[ "${cpuType}" == "x64" ]] || [[ "${cpuType}" == "amd64" ]]; then
|
||||
cpuTypeAlias="amd64"
|
||||
elif [[ "${cpuType}" == "aarch64" ]]; then
|
||||
cpuTypeAlias="arm64"
|
||||
elif [[ "${cpuType}" == "aarch32" ]]; then
|
||||
cpuTypeAlias="armhf"
|
||||
else
|
||||
echo "Unknown cpuType: ${cpuType}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
docker build --rm -f "${Dockerfile}" --network=host -t tdengine/tdengine-${dockername}:${version} "." --build-arg pkgFile=${pkgFile} --build-arg dirName=${dirName} --build-arg cpuType=${cpuTypeAlias}
|
||||
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
|
||||
docker push tdengine/tdengine-${dockername}:${version}
|
||||
|
||||
if [ -n "$(docker ps -aq)" ] ;then
|
||||
echo "delete docker process"
|
||||
docker stop $(docker ps -aq)
|
||||
docker rm $(docker ps -aq)
|
||||
fi
|
||||
|
||||
if [ -n "$(pidof taosd)" ] ;then
|
||||
echo "kill taosd "
|
||||
kill -9 $(pidof taosd)
|
||||
fi
|
||||
|
||||
if [ -n "$(pidof power)" ] ;then
|
||||
echo "kill power "
|
||||
kill -9 $(pidof power)
|
||||
fi
|
||||
|
||||
|
||||
echo ">>>>>>>>>>>>> check whether tdengine/tdengine-${dockername}:${version} has been published"
|
||||
docker run -d --name doctest -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine-${dockername}:${version}
|
||||
sleep 2
|
||||
curl -u root:taosdata -d 'show variables;' 127.0.0.1:6041/rest/sql > temp1.data
|
||||
data_version=$( cat temp1.data |jq .data| jq '.[]' |grep "version" -A 2 -B 1 | jq ".[1]")
|
||||
echo "${data_version}"
|
||||
if [ "${data_version}" == "\"${version}\"" ] ; then
|
||||
echo "docker version is right "
|
||||
else
|
||||
echo "docker version is wrong "
|
||||
exit 1
|
||||
fi
|
||||
rm -rf temp1.data
|
||||
|
||||
# set this version to latest version
|
||||
if [ ${dockerLatest} == 'y' ] ;then
|
||||
docker tag tdengine/tdengine-${dockername}:${version} tdengine/tdengine-${dockername}:latest
|
||||
docker push tdengine/tdengine-${dockername}:latest
|
||||
echo ">>>>>>>>>>>>> check whether tdengine/tdengine-${dockername}:latest has been published correctly"
|
||||
docker run -d --name doctestla -p 7030-7049:6030-6049 -p 7030-7049:6030-6049/udp tdengine/tdengine-${dockername}:latest
|
||||
sleep 2
|
||||
curl -u root:taosdata -d 'show variables;' 127.0.0.1:7041/rest/sql > temp2.data
|
||||
version_latest=` cat temp2.data |jq .data| jq '.[]' |grep "version" -A 2 -B 1 | jq ".[1]" `
|
||||
echo "${version_latest}"
|
||||
if [ "${version_latest}" == "\"${version}\"" ] ; then
|
||||
echo "docker version is right "
|
||||
else
|
||||
echo "docker version is wrong "
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
rm -rf temp2.data
|
||||
|
||||
if [ -n "$(docker ps -aq)" ] ;then
|
||||
echo "delte docker process"
|
||||
docker stop $(docker ps -aq)
|
||||
docker rm $(docker ps -aq)
|
||||
fi
|
||||
|
||||
cd ${scriptDir}
|
||||
rm -f ${pkgFile}
|
||||
|
|
|
@ -1,56 +1,56 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
# dockerbuild.sh
|
||||
# -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...]
|
||||
# -n [version number]
|
||||
# -p [password for docker hub]
|
||||
|
||||
# set parameters by default value
|
||||
cpuType=aarch64
|
||||
verNumber=""
|
||||
passWord=""
|
||||
|
||||
while getopts "hc:n:p:f:" arg
|
||||
do
|
||||
case $arg in
|
||||
c)
|
||||
#echo "cpuType=$OPTARG"
|
||||
cpuType=$(echo $OPTARG)
|
||||
;;
|
||||
n)
|
||||
#echo "verNumber=$OPTARG"
|
||||
verNumber=$(echo $OPTARG)
|
||||
;;
|
||||
p)
|
||||
#echo "passWord=$OPTARG"
|
||||
passWord=$(echo $OPTARG)
|
||||
;;
|
||||
h)
|
||||
echo "Usage: `basename $0` -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...] "
|
||||
echo " -n [version number] "
|
||||
echo " -p [password for docker hub] "
|
||||
exit 0
|
||||
;;
|
||||
?) #unknow option
|
||||
echo "unkonw argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
pkgFile=TDengine-server-${verNumber}-Linux-${cpuType}.tar.gz
|
||||
|
||||
echo "cpuType=${cpuType} verNumber=${verNumber} pkgFile=${pkgFile} "
|
||||
|
||||
scriptDir=`pwd`
|
||||
pkgDir=$scriptDir/../../release/
|
||||
|
||||
cp -f ${pkgDir}/${pkgFile} .
|
||||
|
||||
./dockerbuild.sh -c ${cpuType} -f ${pkgFile} -n ${verNumber} -p ${passWord}
|
||||
|
||||
rm -f ${pkgFile}
|
||||
#!/bin/bash
|
||||
#
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
# dockerbuild.sh
|
||||
# -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...]
|
||||
# -n [version number]
|
||||
# -p [password for docker hub]
|
||||
|
||||
# set parameters by default value
|
||||
cpuType=aarch64
|
||||
verNumber=""
|
||||
passWord=""
|
||||
|
||||
while getopts "hc:n:p:f:" arg
|
||||
do
|
||||
case $arg in
|
||||
c)
|
||||
#echo "cpuType=$OPTARG"
|
||||
cpuType=$(echo $OPTARG)
|
||||
;;
|
||||
n)
|
||||
#echo "verNumber=$OPTARG"
|
||||
verNumber=$(echo $OPTARG)
|
||||
;;
|
||||
p)
|
||||
#echo "passWord=$OPTARG"
|
||||
passWord=$(echo $OPTARG)
|
||||
;;
|
||||
h)
|
||||
echo "Usage: `basename $0` -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...] "
|
||||
echo " -n [version number] "
|
||||
echo " -p [password for docker hub] "
|
||||
exit 0
|
||||
;;
|
||||
?) #unknow option
|
||||
echo "unkonw argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
pkgFile=TDengine-server-${verNumber}-Linux-${cpuType}.tar.gz
|
||||
|
||||
echo "cpuType=${cpuType} verNumber=${verNumber} pkgFile=${pkgFile} "
|
||||
|
||||
scriptDir=`pwd`
|
||||
pkgDir=$scriptDir/../../release/
|
||||
|
||||
cp -f ${pkgDir}/${pkgFile} .
|
||||
|
||||
./dockerbuild.sh -c ${cpuType} -f ${pkgFile} -n ${verNumber} -p ${passWord}
|
||||
|
||||
rm -f ${pkgFile}
|
||||
|
|
|
@ -1,62 +1,62 @@
|
|||
@echo off
|
||||
|
||||
set internal_dir=%~dp0\..\..\
|
||||
set community_dir=%~dp0\..
|
||||
cd %community_dir%
|
||||
git checkout -- .
|
||||
cd %community_dir%\packaging
|
||||
|
||||
:: %1 name %2 version
|
||||
if !%1==! GOTO USAGE
|
||||
if !%2==! GOTO USAGE
|
||||
if %1 == taos GOTO TAOS
|
||||
if %1 == power GOTO POWER
|
||||
if %1 == tq GOTO TQ
|
||||
if %1 == pro GOTO PRO
|
||||
if %1 == kh GOTO KH
|
||||
if %1 == jh GOTO JH
|
||||
GOTO USAGE
|
||||
|
||||
:TAOS
|
||||
goto RELEASE
|
||||
|
||||
:POWER
|
||||
call sed_power.bat %community_dir%
|
||||
goto RELEASE
|
||||
|
||||
:TQ
|
||||
call sed_tq.bat %community_dir%
|
||||
goto RELEASE
|
||||
|
||||
:PRO
|
||||
call sed_pro.bat %community_dir%
|
||||
goto RELEASE
|
||||
|
||||
:KH
|
||||
call sed_kh.bat %community_dir%
|
||||
goto RELEASE
|
||||
|
||||
:JH
|
||||
call sed_jh.bat %community_dir%
|
||||
goto RELEASE
|
||||
|
||||
:RELEASE
|
||||
echo release windows-client-64 for %1, version: %2
|
||||
if not exist %internal_dir%\debug\ver-%2-64bit-%1 (
|
||||
md %internal_dir%\debug\ver-%2-64bit-%1
|
||||
) else (
|
||||
rd /S /Q %internal_dir%\debug\ver-%2-64bit-%1
|
||||
md %internal_dir%\debug\ver-%2-64bit-%1
|
||||
)
|
||||
cd %internal_dir%\debug\ver-%2-64bit-%1
|
||||
call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" amd64
|
||||
cmake ../../ -G "NMake Makefiles" -DVERNUMBER=%2 -DCPUTYPE=x64
|
||||
set CL=/MP4
|
||||
nmake install
|
||||
goto EXIT0
|
||||
|
||||
:USAGE
|
||||
echo Usage: release.bat $productName $version
|
||||
goto EXIT0
|
||||
|
||||
@echo off
|
||||
|
||||
set internal_dir=%~dp0\..\..\
|
||||
set community_dir=%~dp0\..
|
||||
cd %community_dir%
|
||||
git checkout -- .
|
||||
cd %community_dir%\packaging
|
||||
|
||||
:: %1 name %2 version
|
||||
if !%1==! GOTO USAGE
|
||||
if !%2==! GOTO USAGE
|
||||
if %1 == taos GOTO TAOS
|
||||
if %1 == power GOTO POWER
|
||||
if %1 == tq GOTO TQ
|
||||
if %1 == pro GOTO PRO
|
||||
if %1 == kh GOTO KH
|
||||
if %1 == jh GOTO JH
|
||||
GOTO USAGE
|
||||
|
||||
:TAOS
|
||||
goto RELEASE
|
||||
|
||||
:POWER
|
||||
call sed_power.bat %community_dir%
|
||||
goto RELEASE
|
||||
|
||||
:TQ
|
||||
call sed_tq.bat %community_dir%
|
||||
goto RELEASE
|
||||
|
||||
:PRO
|
||||
call sed_pro.bat %community_dir%
|
||||
goto RELEASE
|
||||
|
||||
:KH
|
||||
call sed_kh.bat %community_dir%
|
||||
goto RELEASE
|
||||
|
||||
:JH
|
||||
call sed_jh.bat %community_dir%
|
||||
goto RELEASE
|
||||
|
||||
:RELEASE
|
||||
echo release windows-client-64 for %1, version: %2
|
||||
if not exist %internal_dir%\debug\ver-%2-64bit-%1 (
|
||||
md %internal_dir%\debug\ver-%2-64bit-%1
|
||||
) else (
|
||||
rd /S /Q %internal_dir%\debug\ver-%2-64bit-%1
|
||||
md %internal_dir%\debug\ver-%2-64bit-%1
|
||||
)
|
||||
cd %internal_dir%\debug\ver-%2-64bit-%1
|
||||
call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" amd64
|
||||
cmake ../../ -G "NMake Makefiles" -DVERNUMBER=%2 -DCPUTYPE=x64
|
||||
set CL=/MP4
|
||||
nmake install
|
||||
goto EXIT0
|
||||
|
||||
:USAGE
|
||||
echo Usage: release.bat $productName $version
|
||||
goto EXIT0
|
||||
|
||||
:EXIT0
|
|
@ -1,94 +1,315 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Generate the tar.gz package for linux os
|
||||
# Generate the deb package for ubuntu, or rpm package for centos, or tar.gz package for other linux os
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
# release.sh -v [cluster | edge]
|
||||
# -c [aarch32 | aarch64 | x64 | x86 | mips64 ...]
|
||||
# -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...]
|
||||
# -V [stable | beta]
|
||||
# -l [full | lite]
|
||||
# -s [static | dynamic]
|
||||
# -d [taos | ...]
|
||||
# -n [2.0.0.3]
|
||||
# -m [2.0.0.0]
|
||||
# -H [ false | true]
|
||||
|
||||
# set parameters by default value
|
||||
version="3.0.0.0"
|
||||
verMode=edge # [cluster, edge]
|
||||
verType=stable # [stable, beta]
|
||||
cpuType=x64 # [aarch32 | aarch64 | x64 | x86 | mips64 ...]
|
||||
osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...]
|
||||
pagMode=full # [full | lite]
|
||||
soMode=dynamic # [static | dynamic]
|
||||
dbName=taos # [taos | ...]
|
||||
allocator=glibc # [glibc | jemalloc]
|
||||
verNumber=""
|
||||
verNumberComp="2.0.0.0"
|
||||
httpdBuild=false
|
||||
|
||||
while getopts "hv:V:c:o:l:s:d:a:n:m:H:" arg; do
|
||||
case $arg in
|
||||
v)
|
||||
#echo "verMode=$OPTARG"
|
||||
verMode=$(echo $OPTARG)
|
||||
;;
|
||||
V)
|
||||
#echo "verType=$OPTARG"
|
||||
verType=$(echo $OPTARG)
|
||||
;;
|
||||
c)
|
||||
#echo "cpuType=$OPTARG"
|
||||
cpuType=$(echo $OPTARG)
|
||||
;;
|
||||
l)
|
||||
#echo "pagMode=$OPTARG"
|
||||
pagMode=$(echo $OPTARG)
|
||||
;;
|
||||
s)
|
||||
#echo "soMode=$OPTARG"
|
||||
soMode=$(echo $OPTARG)
|
||||
;;
|
||||
d)
|
||||
#echo "dbName=$OPTARG"
|
||||
dbName=$(echo $OPTARG)
|
||||
;;
|
||||
a)
|
||||
#echo "allocator=$OPTARG"
|
||||
allocator=$(echo $OPTARG)
|
||||
;;
|
||||
n)
|
||||
#echo "verNumber=$OPTARG"
|
||||
verNumber=$(echo $OPTARG)
|
||||
;;
|
||||
m)
|
||||
#echo "verNumberComp=$OPTARG"
|
||||
verNumberComp=$(echo $OPTARG)
|
||||
;;
|
||||
o)
|
||||
#echo "osType=$OPTARG"
|
||||
osType=$(echo $OPTARG)
|
||||
;;
|
||||
H)
|
||||
#echo "httpdBuild=$OPTARG"
|
||||
httpdBuild=$(echo $OPTARG)
|
||||
;;
|
||||
h)
|
||||
echo "Usage: $(basename $0) -v [cluster | edge] "
|
||||
echo " -c [aarch32 | aarch64 | x64 | x86 | mips64 ...] "
|
||||
echo " -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...] "
|
||||
echo " -V [stable | beta] "
|
||||
echo " -l [full | lite] "
|
||||
echo " -a [glibc | jemalloc] "
|
||||
echo " -s [static | dynamic] "
|
||||
echo " -d [taos | ...] "
|
||||
echo " -n [version number] "
|
||||
echo " -m [compatible version number] "
|
||||
echo " -H [false | true] "
|
||||
exit 0
|
||||
;;
|
||||
?) #unknow option
|
||||
echo "unkonw argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode} soMode=${soMode} dbName=${dbName} allocator=${allocator} verNumber=${verNumber} verNumberComp=${verNumberComp} httpdBuild=${httpdBuild}"
|
||||
|
||||
curr_dir=$(pwd)
|
||||
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -f ${script_dir}/..)"
|
||||
if [ "$osType" == "Darwin" ]; then
|
||||
script_dir=$(dirname $0)
|
||||
cd ${script_dir}
|
||||
script_dir="$(pwd)"
|
||||
top_dir=${script_dir}/..
|
||||
else
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -f ${script_dir}/..)"
|
||||
fi
|
||||
|
||||
echo "=======================new version number: ${verNumber}======================================"
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
function is_valid_version() {
|
||||
[ -z $1 ] && return 1 || :
|
||||
|
||||
rx='^([0-9]+\.){3}(\*|[0-9]+)$'
|
||||
if [[ $1 =~ $rx ]]; then
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
function vercomp() {
|
||||
if [[ $1 == $2 ]]; then
|
||||
echo 0
|
||||
exit 0
|
||||
fi
|
||||
|
||||
local IFS=.
|
||||
local i ver1=($1) ver2=($2)
|
||||
|
||||
# fill empty fields in ver1 with zeros
|
||||
for ((i = ${#ver1[@]}; i < ${#ver2[@]}; i++)); do
|
||||
ver1[i]=0
|
||||
done
|
||||
|
||||
for ((i = 0; i < ${#ver1[@]}; i++)); do
|
||||
if [[ -z ${ver2[i]} ]]; then
|
||||
# fill empty fields in ver2 with zeros
|
||||
ver2[i]=0
|
||||
fi
|
||||
if ((10#${ver1[i]} > 10#${ver2[i]})); then
|
||||
echo 1
|
||||
exit 0
|
||||
fi
|
||||
if ((10#${ver1[i]} < 10#${ver2[i]})); then
|
||||
echo 2
|
||||
exit 0
|
||||
fi
|
||||
done
|
||||
echo 0
|
||||
}
|
||||
|
||||
# 1. check version information
|
||||
if ( (! is_valid_version $verNumber) || (! is_valid_version $verNumberComp) || [[ "$(vercomp $verNumber $verNumberComp)" == '2' ]]); then
|
||||
echo "please enter correct version"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "=======================new version number: ${verNumber}, compatible version: ${verNumberComp}======================================"
|
||||
|
||||
build_time=$(date +"%F %R")
|
||||
|
||||
echo "script_dir: ${script_dir}"
|
||||
echo "top_dir: ${top_dir}"
|
||||
# get commint id from git
|
||||
gitinfo=$(git rev-parse --verify HEAD)
|
||||
|
||||
cd ${top_dir}
|
||||
# git checkout -- .
|
||||
# git checkout 3.0
|
||||
# git pull || :
|
||||
if [[ "$verMode" == "cluster" ]]; then
|
||||
enterprise_dir="${top_dir}/../enterprise"
|
||||
cd ${enterprise_dir}
|
||||
gitinfoOfInternal=$(git rev-parse --verify HEAD)
|
||||
else
|
||||
gitinfoOfInternal=NULL
|
||||
fi
|
||||
|
||||
echo "curr_dir: ${curr_dir}"
|
||||
cd "${curr_dir}"
|
||||
|
||||
# 2. cmake executable file
|
||||
compile_dir="${top_dir}/debug"
|
||||
# if [ -d ${compile_dir} ]; then
|
||||
# rm -rf ${compile_dir}
|
||||
# fi
|
||||
|
||||
mkdir -p ${compile_dir}
|
||||
|
||||
cd ${compile_dir}
|
||||
|
||||
echo "compile_dir: ${compile_dir}"
|
||||
|
||||
cmake .. -DBUILD_TOOLS=true
|
||||
make -j32
|
||||
|
||||
release_dir="${top_dir}/release"
|
||||
if [ -d ${release_dir} ]; then
|
||||
rm -rf ${release_dir}
|
||||
if [ -d ${compile_dir} ]; then
|
||||
${csudo}rm -rf ${compile_dir}
|
||||
fi
|
||||
|
||||
mkdir -p ${release_dir}
|
||||
cd ${release_dir}
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo}mkdir -p ${compile_dir}
|
||||
else
|
||||
mkdir -p ${compile_dir}
|
||||
fi
|
||||
cd ${compile_dir}
|
||||
|
||||
install_dir="${release_dir}/TDengine-server-${version}"
|
||||
mkdir -p ${install_dir}
|
||||
mkdir -p ${install_dir}/bin
|
||||
mkdir -p ${install_dir}/lib
|
||||
mkdir -p ${install_dir}/inc
|
||||
if [[ "$allocator" == "jemalloc" ]]; then
|
||||
allocator_macro="-DJEMALLOC_ENABLED=true"
|
||||
else
|
||||
allocator_macro=""
|
||||
fi
|
||||
|
||||
install_files="${script_dir}/tools/install.sh"
|
||||
chmod a+x ${script_dir}/tools/install.sh || :
|
||||
cp ${install_files} ${install_dir}
|
||||
if [[ "$dbName" != "taos" ]]; then
|
||||
source ${enterprise_dir}/packaging/oem/sed_$dbName.sh
|
||||
replace_community_$dbName
|
||||
fi
|
||||
|
||||
header_files="${top_dir}/include/client/taos.h ${top_dir}/include/util/taoserror.h"
|
||||
cp ${header_files} ${install_dir}/inc
|
||||
|
||||
bin_files="${compile_dir}/build/bin/taosd ${compile_dir}/build/bin/taos ${compile_dir}/build/bin/create_table ${compile_dir}/build/bin/tmq_sim ${script_dir}/tools/remove.sh ${compile_dir}/build/bin/taosBenchmark ${compile_dir}/build/bin/taosdump"
|
||||
cp -rf ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
|
||||
if [[ "$httpdBuild" == "true" ]]; then
|
||||
BUILD_HTTP=true
|
||||
else
|
||||
BUILD_HTTP=false
|
||||
fi
|
||||
|
||||
cp ${compile_dir}/build/lib/libtaos.so ${install_dir}/lib/
|
||||
cp ${compile_dir}/build/lib/libavro* ${install_dir}/lib/ > /dev/null || echo -e "failed to copy avro libraries"
|
||||
cp -rf ${compile_dir}/build/lib/pkgconfig ${install_dir}/lib/ > /dev/null || echo -e "failed to copy pkgconfig directory"
|
||||
if [[ "$verMode" == "cluster" ]]; then
|
||||
BUILD_HTTP=internal
|
||||
fi
|
||||
|
||||
if [[ "$pagMode" == "full" ]]; then
|
||||
BUILD_TOOLS=true
|
||||
else
|
||||
BUILD_TOOLS=false
|
||||
fi
|
||||
|
||||
#cp ${compile_dir}/source/dnode/mnode/impl/libmnode.so ${install_dir}/lib/
|
||||
#cp ${compile_dir}/source/dnode/qnode/libqnode.so ${install_dir}/lib/
|
||||
#cp ${compile_dir}/source/dnode/snode/libsnode.so ${install_dir}/lib/
|
||||
#cp ${compile_dir}/source/dnode/bnode/libbnode.so ${install_dir}/lib/
|
||||
#cp ${compile_dir}/source/libs/wal/libwal.so ${install_dir}/lib/
|
||||
#cp ${compile_dir}/source/libs/scheduler/libscheduler.so ${install_dir}/lib/
|
||||
#cp ${compile_dir}/source/libs/planner/libplanner.so ${install_dir}/lib/
|
||||
#cp ${compile_dir}/source/libs/parser/libparser.so ${install_dir}/lib/
|
||||
#cp ${compile_dir}/source/libs/qcom/libqcom.so ${install_dir}/lib/
|
||||
#cp ${compile_dir}/source/libs/transport/libtransport.so ${install_dir}/lib/
|
||||
#cp ${compile_dir}/source/libs/function/libfunction.so ${install_dir}/lib/
|
||||
#cp ${compile_dir}/source/common/libcommon.so ${install_dir}/lib/
|
||||
#cp ${compile_dir}/source/os/libos.so ${install_dir}/lib/
|
||||
#cp ${compile_dir}/source/dnode/mnode/sdb/libsdb.so ${install_dir}/lib/
|
||||
#cp ${compile_dir}/source/libs/catalog/libcatalog.so ${install_dir}/lib/
|
||||
# check support cpu type
|
||||
if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" == "aarch32" ]] || [[ "$cpuType" == "mips64" ]]; then
|
||||
if [ "$verMode" != "cluster" ]; then
|
||||
# community-version compile
|
||||
cmake ../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro}
|
||||
else
|
||||
if [[ "$dbName" != "taos" ]]; then
|
||||
replace_enterprise_$dbName
|
||||
fi
|
||||
cmake ../../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro}
|
||||
fi
|
||||
else
|
||||
echo "input cpuType=${cpuType} error!!!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
pkg_name=${install_dir}-Linux-x64
|
||||
CORES=$(grep -c ^processor /proc/cpuinfo)
|
||||
|
||||
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
|
||||
if [[ "$allocator" == "jemalloc" ]]; then
|
||||
# jemalloc need compile first, so disable parallel build
|
||||
make -j ${CORES} && ${csudo}make install
|
||||
else
|
||||
make -j ${CORES} && ${csudo}make install
|
||||
fi
|
||||
|
||||
cd ${curr_dir}
|
||||
|
||||
# 3. Call the corresponding script for packaging
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
if [[ "$verMode" != "cluster" ]] && [[ "$pagMode" == "full" ]] && [[ "$cpuType" == "x64" ]] && [[ "$dbName" == "taos" ]]; then
|
||||
ret='0'
|
||||
command -v dpkg >/dev/null 2>&1 || { ret='1'; }
|
||||
if [ "$ret" -eq 0 ]; then
|
||||
echo "====do deb package for the ubuntu system===="
|
||||
output_dir="${top_dir}/debs"
|
||||
if [ -d ${output_dir} ]; then
|
||||
${csudo}rm -rf ${output_dir}
|
||||
fi
|
||||
${csudo}mkdir -p ${output_dir}
|
||||
cd ${script_dir}/deb
|
||||
${csudo}./makedeb.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType}
|
||||
|
||||
if [[ "$pagMode" == "full" ]]; then
|
||||
if [ -d ${top_dir}/tools/taos-tools/packaging/deb ]; then
|
||||
cd ${top_dir}/tools/taos-tools/packaging/deb
|
||||
[ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0"
|
||||
|
||||
taos_tools_ver=$(git describe --tags | sed -e 's/ver-//g' | awk -F '-' '{print $1}')
|
||||
${csudo}./make-taos-tools-deb.sh ${top_dir} \
|
||||
${compile_dir} ${output_dir} ${taos_tools_ver} ${cpuType} ${osType} ${verMode} ${verType}
|
||||
fi
|
||||
fi
|
||||
else
|
||||
echo "==========dpkg command not exist, so not release deb package!!!"
|
||||
fi
|
||||
ret='0'
|
||||
command -v rpmbuild >/dev/null 2>&1 || { ret='1'; }
|
||||
if [ "$ret" -eq 0 ]; then
|
||||
echo "====do rpm package for the centos system===="
|
||||
output_dir="${top_dir}/rpms"
|
||||
if [ -d ${output_dir} ]; then
|
||||
${csudo}rm -rf ${output_dir}
|
||||
fi
|
||||
${csudo}mkdir -p ${output_dir}
|
||||
cd ${script_dir}/rpm
|
||||
${csudo}./makerpm.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType}
|
||||
|
||||
if [[ "$pagMode" == "full" ]]; then
|
||||
if [ -d ${top_dir}/tools/taos-tools/packaging/rpm ]; then
|
||||
cd ${top_dir}/tools/taos-tools/packaging/rpm
|
||||
[ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0"
|
||||
|
||||
taos_tools_ver=$(git describe --tags | sed -e 's/ver-//g' | awk -F '-' '{print $1}' | sed -e 's/-/_/g')
|
||||
${csudo}./make-taos-tools-rpm.sh ${top_dir} \
|
||||
${compile_dir} ${output_dir} ${taos_tools_ver} ${cpuType} ${osType} ${verMode} ${verType}
|
||||
fi
|
||||
fi
|
||||
else
|
||||
echo "==========rpmbuild command not exist, so not release rpm package!!!"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "====do tar.gz package for all systems===="
|
||||
cd ${script_dir}/tools
|
||||
|
||||
${csudo}./makepkg.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${verNumberComp} ${dbName}
|
||||
${csudo}./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
|
||||
# ${csudo}./makearbi.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
|
||||
|
||||
else
|
||||
# only make client for Darwin
|
||||
cd ${script_dir}/tools
|
||||
./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
|
||||
fi
|
||||
|
|
|
@ -1,87 +1,87 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Generate rpm package for centos
|
||||
|
||||
set -e
|
||||
# set -x
|
||||
|
||||
#curr_dir=$(pwd)
|
||||
compile_dir=$1
|
||||
output_dir=$2
|
||||
tdengine_ver=$3
|
||||
cpuType=$4
|
||||
osType=$5
|
||||
verMode=$6
|
||||
verType=$7
|
||||
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -f ${script_dir}/../..)"
|
||||
pkg_dir="${top_dir}/rpmworkroom"
|
||||
spec_file="${script_dir}/tdengine.spec"
|
||||
|
||||
#echo "curr_dir: ${curr_dir}"
|
||||
#echo "top_dir: ${top_dir}"
|
||||
#echo "script_dir: ${script_dir}"
|
||||
echo "compile_dir: ${compile_dir}"
|
||||
echo "pkg_dir: ${pkg_dir}"
|
||||
echo "spec_file: ${spec_file}"
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
function cp_rpm_package() {
|
||||
local cur_dir
|
||||
cd $1
|
||||
cur_dir=$(pwd)
|
||||
|
||||
for dirlist in "$(ls ${cur_dir})"; do
|
||||
if test -d ${dirlist}; then
|
||||
cd ${dirlist}
|
||||
cp_rpm_package ${cur_dir}/${dirlist}
|
||||
cd ..
|
||||
fi
|
||||
if test -e ${dirlist}; then
|
||||
cp ${cur_dir}/${dirlist} ${output_dir}/TDengine-${tdengine_ver}.rpm
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
if [ -d ${pkg_dir} ]; then
|
||||
${csudo}rm -rf ${pkg_dir}
|
||||
fi
|
||||
${csudo}mkdir -p ${pkg_dir}
|
||||
cd ${pkg_dir}
|
||||
|
||||
${csudo}mkdir -p BUILD BUILDROOT RPMS SOURCES SPECS SRPMS
|
||||
|
||||
${csudo}rpmbuild --define="_version ${tdengine_ver}" --define="_topdir ${pkg_dir}" --define="_compiledir ${compile_dir}" -bb ${spec_file}
|
||||
|
||||
# copy rpm package to output_dir, and modify package name, then clean temp dir
|
||||
#${csudo}cp -rf RPMS/* ${output_dir}
|
||||
cp_rpm_package ${pkg_dir}/RPMS
|
||||
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
rpmname="TDengine-server-"${tdengine_ver}-${osType}-${cpuType}
|
||||
elif [ "$verMode" == "edge" ]; then
|
||||
rpmname="TDengine-server"-${tdengine_ver}-${osType}-${cpuType}
|
||||
else
|
||||
echo "unknow verMode, nor cluster or edge"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$verType" == "beta" ]; then
|
||||
rpmname="TDengine-server-"${tdengine_ver}-${verType}-${osType}-${cpuType}".rpm"
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
rpmname=${rpmname}".rpm"
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mv ${output_dir}/TDengine-${tdengine_ver}.rpm ${output_dir}/${rpmname}
|
||||
|
||||
cd ..
|
||||
${csudo}rm -rf ${pkg_dir}
|
||||
#!/bin/bash
|
||||
#
|
||||
# Generate rpm package for centos
|
||||
|
||||
set -e
|
||||
# set -x
|
||||
|
||||
#curr_dir=$(pwd)
|
||||
compile_dir=$1
|
||||
output_dir=$2
|
||||
tdengine_ver=$3
|
||||
cpuType=$4
|
||||
osType=$5
|
||||
verMode=$6
|
||||
verType=$7
|
||||
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -f ${script_dir}/../..)"
|
||||
pkg_dir="${top_dir}/rpmworkroom"
|
||||
spec_file="${script_dir}/tdengine.spec"
|
||||
|
||||
#echo "curr_dir: ${curr_dir}"
|
||||
#echo "top_dir: ${top_dir}"
|
||||
#echo "script_dir: ${script_dir}"
|
||||
echo "compile_dir: ${compile_dir}"
|
||||
echo "pkg_dir: ${pkg_dir}"
|
||||
echo "spec_file: ${spec_file}"
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
function cp_rpm_package() {
|
||||
local cur_dir
|
||||
cd $1
|
||||
cur_dir=$(pwd)
|
||||
|
||||
for dirlist in "$(ls ${cur_dir})"; do
|
||||
if test -d ${dirlist}; then
|
||||
cd ${dirlist}
|
||||
cp_rpm_package ${cur_dir}/${dirlist}
|
||||
cd ..
|
||||
fi
|
||||
if test -e ${dirlist}; then
|
||||
cp ${cur_dir}/${dirlist} ${output_dir}/TDengine-${tdengine_ver}.rpm
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
if [ -d ${pkg_dir} ]; then
|
||||
${csudo}rm -rf ${pkg_dir}
|
||||
fi
|
||||
${csudo}mkdir -p ${pkg_dir}
|
||||
cd ${pkg_dir}
|
||||
|
||||
${csudo}mkdir -p BUILD BUILDROOT RPMS SOURCES SPECS SRPMS
|
||||
|
||||
${csudo}rpmbuild --define="_version ${tdengine_ver}" --define="_topdir ${pkg_dir}" --define="_compiledir ${compile_dir}" -bb ${spec_file}
|
||||
|
||||
# copy rpm package to output_dir, and modify package name, then clean temp dir
|
||||
#${csudo}cp -rf RPMS/* ${output_dir}
|
||||
cp_rpm_package ${pkg_dir}/RPMS
|
||||
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
rpmname="TDengine-server-"${tdengine_ver}-${osType}-${cpuType}
|
||||
elif [ "$verMode" == "edge" ]; then
|
||||
rpmname="TDengine-server"-${tdengine_ver}-${osType}-${cpuType}
|
||||
else
|
||||
echo "unknow verMode, nor cluster or edge"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$verType" == "beta" ]; then
|
||||
rpmname="TDengine-server-"${tdengine_ver}-${verType}-${osType}-${cpuType}".rpm"
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
rpmname=${rpmname}".rpm"
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mv ${output_dir}/TDengine-${tdengine_ver}.rpm ${output_dir}/${rpmname}
|
||||
|
||||
cd ..
|
||||
${csudo}rm -rf ${pkg_dir}
|
||||
|
|
|
@ -1,145 +1,145 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# taosd This shell script takes care of starting and stopping TDengine.
|
||||
#
|
||||
# chkconfig: 2345 99 01
|
||||
# description: TDengine is a districuted, scalable, high-performance Time Series Database
|
||||
# (TSDB). More than just a pure database, TDengine also provides the ability
|
||||
# to do stream computing, aggregation etc.
|
||||
#
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: taosd
|
||||
# Required-Start: $network $local_fs $remote_fs
|
||||
# Required-Stop: $network $local_fs $remote_fs
|
||||
# Short-Description: start and stop taosd
|
||||
# Description: TDengine is a districuted, scalable, high-performance Time Series Database
|
||||
# (TSDB). More than just a pure database, TDengine also provides the ability
|
||||
# to do stream computing, aggregation etc.
|
||||
### END INIT INFO
|
||||
|
||||
# Source init functions
|
||||
. /etc/init.d/functions
|
||||
|
||||
# Maximum number of open files
|
||||
MAX_OPEN_FILES=65535
|
||||
|
||||
# Default program options
|
||||
NAME=taosd
|
||||
PROG=/usr/local/taos/bin/taosd
|
||||
USER=root
|
||||
GROUP=root
|
||||
|
||||
# Default directories
|
||||
LOCK_DIR=/var/lock/subsys
|
||||
PID_DIR=/var/run/$NAME
|
||||
|
||||
# Set file names
|
||||
LOCK_FILE=$LOCK_DIR/$NAME
|
||||
PID_FILE=$PID_DIR/$NAME.pid
|
||||
|
||||
[ -e $PID_DIR ] || mkdir -p $PID_DIR
|
||||
|
||||
PROG_OPTS=""
|
||||
|
||||
start() {
|
||||
echo -n "Starting ${NAME}: "
|
||||
# check identity
|
||||
curid="`id -u -n`"
|
||||
if [ "$curid" != root ] && [ "$curid" != "$USER" ] ; then
|
||||
echo "Must be run as root or $USER, but was run as $curid"
|
||||
return 1
|
||||
fi
|
||||
# Sets the maximum number of open file descriptors allowed.
|
||||
ulimit -n $MAX_OPEN_FILES
|
||||
curulimit="`ulimit -n`"
|
||||
if [ "$curulimit" -lt $MAX_OPEN_FILES ] ; then
|
||||
echo "'ulimit -n' must be greater than or equal to $MAX_OPEN_FILES, is $curulimit"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [ "`id -u -n`" == root ] ; then
|
||||
# Changes the owner of the lock, and the pid files to allow
|
||||
# non-root OpenTSDB daemons to run /usr/share/opentsdb/bin/opentsdb_restart.py.
|
||||
touch $LOCK_FILE && chown $USER:$GROUP $LOCK_FILE
|
||||
touch $PID_FILE && chown $USER:$GROUP $PID_FILE
|
||||
daemon --user $USER --pidfile $PID_FILE "$PROG $PROG_OPTS &> /dev/null &"
|
||||
else
|
||||
# Don't have to change user.
|
||||
daemon --pidfile $PID_FILE "$PROG $PROG_OPTS &> /dev/null &"
|
||||
fi
|
||||
retval=$?
|
||||
sleep 2
|
||||
echo
|
||||
[ $retval -eq 0 ] && (findproc > $PID_FILE && touch $LOCK_FILE)
|
||||
return $retval
|
||||
}
|
||||
|
||||
stop() {
|
||||
echo -n "Stopping ${NAME}: "
|
||||
killproc -p $PID_FILE $NAME
|
||||
retval=$?
|
||||
echo
|
||||
# Non-root users don't have enough permission to remove pid and lock files.
|
||||
# So, the opentsdb_restart.py cannot get rid of the files, and the command
|
||||
# "service opentsdb status" will complain about the existing pid file.
|
||||
# Makes the pid file empty.
|
||||
echo > $PID_FILE
|
||||
[ $retval -eq 0 ] && (rm -f $PID_FILE && rm -f $LOCK_FILE)
|
||||
return $retval
|
||||
}
|
||||
|
||||
restart() {
|
||||
stop
|
||||
start
|
||||
}
|
||||
|
||||
reload() {
|
||||
restart
|
||||
}
|
||||
|
||||
force_reload() {
|
||||
restart
|
||||
}
|
||||
|
||||
rh_status() {
|
||||
# run checks to determine if the service is running or use generic status
|
||||
status -p $PID_FILE -l $LOCK_FILE $NAME
|
||||
}
|
||||
|
||||
rh_status_q() {
|
||||
rh_status >/dev/null 2>&1
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
rh_status_q && exit 0
|
||||
$1
|
||||
;;
|
||||
stop)
|
||||
rh_status_q || exit 0
|
||||
$1
|
||||
;;
|
||||
restart)
|
||||
$1
|
||||
;;
|
||||
reload)
|
||||
rh_status_q || exit 7
|
||||
$1
|
||||
;;
|
||||
force-reload)
|
||||
force_reload
|
||||
;;
|
||||
status)
|
||||
rh_status
|
||||
;;
|
||||
condrestart|try-restart)
|
||||
rh_status_q || exit 0
|
||||
restart
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
|
||||
exit 2
|
||||
esac
|
||||
|
||||
exit $?
|
||||
#!/bin/bash
|
||||
#
|
||||
# taosd This shell script takes care of starting and stopping TDengine.
|
||||
#
|
||||
# chkconfig: 2345 99 01
|
||||
# description: TDengine is a districuted, scalable, high-performance Time Series Database
|
||||
# (TSDB). More than just a pure database, TDengine also provides the ability
|
||||
# to do stream computing, aggregation etc.
|
||||
#
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: taosd
|
||||
# Required-Start: $network $local_fs $remote_fs
|
||||
# Required-Stop: $network $local_fs $remote_fs
|
||||
# Short-Description: start and stop taosd
|
||||
# Description: TDengine is a districuted, scalable, high-performance Time Series Database
|
||||
# (TSDB). More than just a pure database, TDengine also provides the ability
|
||||
# to do stream computing, aggregation etc.
|
||||
### END INIT INFO
|
||||
|
||||
# Source init functions
|
||||
. /etc/init.d/functions
|
||||
|
||||
# Maximum number of open files
|
||||
MAX_OPEN_FILES=65535
|
||||
|
||||
# Default program options
|
||||
NAME=taosd
|
||||
PROG=/usr/local/taos/bin/taosd
|
||||
USER=root
|
||||
GROUP=root
|
||||
|
||||
# Default directories
|
||||
LOCK_DIR=/var/lock/subsys
|
||||
PID_DIR=/var/run/$NAME
|
||||
|
||||
# Set file names
|
||||
LOCK_FILE=$LOCK_DIR/$NAME
|
||||
PID_FILE=$PID_DIR/$NAME.pid
|
||||
|
||||
[ -e $PID_DIR ] || mkdir -p $PID_DIR
|
||||
|
||||
PROG_OPTS=""
|
||||
|
||||
start() {
|
||||
echo -n "Starting ${NAME}: "
|
||||
# check identity
|
||||
curid="`id -u -n`"
|
||||
if [ "$curid" != root ] && [ "$curid" != "$USER" ] ; then
|
||||
echo "Must be run as root or $USER, but was run as $curid"
|
||||
return 1
|
||||
fi
|
||||
# Sets the maximum number of open file descriptors allowed.
|
||||
ulimit -n $MAX_OPEN_FILES
|
||||
curulimit="`ulimit -n`"
|
||||
if [ "$curulimit" -lt $MAX_OPEN_FILES ] ; then
|
||||
echo "'ulimit -n' must be greater than or equal to $MAX_OPEN_FILES, is $curulimit"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [ "`id -u -n`" == root ] ; then
|
||||
# Changes the owner of the lock, and the pid files to allow
|
||||
# non-root OpenTSDB daemons to run /usr/share/opentsdb/bin/opentsdb_restart.py.
|
||||
touch $LOCK_FILE && chown $USER:$GROUP $LOCK_FILE
|
||||
touch $PID_FILE && chown $USER:$GROUP $PID_FILE
|
||||
daemon --user $USER --pidfile $PID_FILE "$PROG $PROG_OPTS &> /dev/null &"
|
||||
else
|
||||
# Don't have to change user.
|
||||
daemon --pidfile $PID_FILE "$PROG $PROG_OPTS &> /dev/null &"
|
||||
fi
|
||||
retval=$?
|
||||
sleep 2
|
||||
echo
|
||||
[ $retval -eq 0 ] && (findproc > $PID_FILE && touch $LOCK_FILE)
|
||||
return $retval
|
||||
}
|
||||
|
||||
stop() {
|
||||
echo -n "Stopping ${NAME}: "
|
||||
killproc -p $PID_FILE $NAME
|
||||
retval=$?
|
||||
echo
|
||||
# Non-root users don't have enough permission to remove pid and lock files.
|
||||
# So, the opentsdb_restart.py cannot get rid of the files, and the command
|
||||
# "service opentsdb status" will complain about the existing pid file.
|
||||
# Makes the pid file empty.
|
||||
echo > $PID_FILE
|
||||
[ $retval -eq 0 ] && (rm -f $PID_FILE && rm -f $LOCK_FILE)
|
||||
return $retval
|
||||
}
|
||||
|
||||
restart() {
|
||||
stop
|
||||
start
|
||||
}
|
||||
|
||||
reload() {
|
||||
restart
|
||||
}
|
||||
|
||||
force_reload() {
|
||||
restart
|
||||
}
|
||||
|
||||
rh_status() {
|
||||
# run checks to determine if the service is running or use generic status
|
||||
status -p $PID_FILE -l $LOCK_FILE $NAME
|
||||
}
|
||||
|
||||
rh_status_q() {
|
||||
rh_status >/dev/null 2>&1
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
rh_status_q && exit 0
|
||||
$1
|
||||
;;
|
||||
stop)
|
||||
rh_status_q || exit 0
|
||||
$1
|
||||
;;
|
||||
restart)
|
||||
$1
|
||||
;;
|
||||
reload)
|
||||
rh_status_q || exit 7
|
||||
$1
|
||||
;;
|
||||
force-reload)
|
||||
force_reload
|
||||
;;
|
||||
status)
|
||||
rh_status
|
||||
;;
|
||||
condrestart|try-restart)
|
||||
rh_status_q || exit 0
|
||||
restart
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
|
||||
exit 2
|
||||
esac
|
||||
|
||||
exit $?
|
||||
|
|
|
@ -1,141 +1,141 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# tarbitratord This shell script takes care of starting and stopping tarbitrator.
|
||||
#
|
||||
# chkconfig: 2345 99 01
|
||||
# description: tarbitrator is a arbitrator used in TDengine cluster.
|
||||
#
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: taoscluster
|
||||
# Required-Start: $network $local_fs $remote_fs
|
||||
# Required-Stop: $network $local_fs $remote_fs
|
||||
# Short-Description: start and stop tarbitrator
|
||||
# Description: tarbitrator is a arbitrator used in TDengine cluster.
|
||||
### END INIT INFO
|
||||
|
||||
# Source init functions
|
||||
. /etc/init.d/functions
|
||||
|
||||
# Maximum number of open files
|
||||
MAX_OPEN_FILES=65535
|
||||
|
||||
# Default program options
|
||||
NAME=tarbitrator
|
||||
PROG=/usr/local/taos/bin/tarbitrator
|
||||
USER=root
|
||||
GROUP=root
|
||||
|
||||
# Default directories
|
||||
LOCK_DIR=/var/lock/subsys
|
||||
PID_DIR=/var/run/$NAME
|
||||
|
||||
# Set file names
|
||||
LOCK_FILE=$LOCK_DIR/$NAME
|
||||
PID_FILE=$PID_DIR/$NAME.pid
|
||||
|
||||
[ -e $PID_DIR ] || mkdir -p $PID_DIR
|
||||
|
||||
PROG_OPTS=""
|
||||
|
||||
start() {
|
||||
echo -n "Starting ${NAME}: "
|
||||
# check identity
|
||||
curid="`id -u -n`"
|
||||
if [ "$curid" != root ] && [ "$curid" != "$USER" ] ; then
|
||||
echo "Must be run as root or $USER, but was run as $curid"
|
||||
return 1
|
||||
fi
|
||||
# Sets the maximum number of open file descriptors allowed.
|
||||
ulimit -n $MAX_OPEN_FILES
|
||||
curulimit="`ulimit -n`"
|
||||
if [ "$curulimit" -lt $MAX_OPEN_FILES ] ; then
|
||||
echo "'ulimit -n' must be greater than or equal to $MAX_OPEN_FILES, is $curulimit"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [ "`id -u -n`" == root ] ; then
|
||||
# Changes the owner of the lock, and the pid files to allow
|
||||
# non-root OpenTSDB daemons to run /usr/share/opentsdb/bin/opentsdb_restart.py.
|
||||
touch $LOCK_FILE && chown $USER:$GROUP $LOCK_FILE
|
||||
touch $PID_FILE && chown $USER:$GROUP $PID_FILE
|
||||
daemon --user $USER --pidfile $PID_FILE "$PROG $PROG_OPTS &> /dev/null &"
|
||||
else
|
||||
# Don't have to change user.
|
||||
daemon --pidfile $PID_FILE "$PROG $PROG_OPTS &> /dev/null &"
|
||||
fi
|
||||
retval=$?
|
||||
sleep 2
|
||||
echo
|
||||
[ $retval -eq 0 ] && (findproc > $PID_FILE && touch $LOCK_FILE)
|
||||
return $retval
|
||||
}
|
||||
|
||||
stop() {
|
||||
echo -n "Stopping ${NAME}: "
|
||||
killproc -p $PID_FILE $NAME
|
||||
retval=$?
|
||||
echo
|
||||
# Non-root users don't have enough permission to remove pid and lock files.
|
||||
# So, the opentsdb_restart.py cannot get rid of the files, and the command
|
||||
# "service opentsdb status" will complain about the existing pid file.
|
||||
# Makes the pid file empty.
|
||||
echo > $PID_FILE
|
||||
[ $retval -eq 0 ] && (rm -f $PID_FILE && rm -f $LOCK_FILE)
|
||||
return $retval
|
||||
}
|
||||
|
||||
restart() {
|
||||
stop
|
||||
start
|
||||
}
|
||||
|
||||
reload() {
|
||||
restart
|
||||
}
|
||||
|
||||
force_reload() {
|
||||
restart
|
||||
}
|
||||
|
||||
rh_status() {
|
||||
# run checks to determine if the service is running or use generic status
|
||||
status -p $PID_FILE -l $LOCK_FILE $NAME
|
||||
}
|
||||
|
||||
rh_status_q() {
|
||||
rh_status >/dev/null 2>&1
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
rh_status_q && exit 0
|
||||
$1
|
||||
;;
|
||||
stop)
|
||||
rh_status_q || exit 0
|
||||
$1
|
||||
;;
|
||||
restart)
|
||||
$1
|
||||
;;
|
||||
reload)
|
||||
rh_status_q || exit 7
|
||||
$1
|
||||
;;
|
||||
force-reload)
|
||||
force_reload
|
||||
;;
|
||||
status)
|
||||
rh_status
|
||||
;;
|
||||
condrestart|try-restart)
|
||||
rh_status_q || exit 0
|
||||
restart
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
|
||||
exit 2
|
||||
esac
|
||||
|
||||
exit $?
|
||||
#!/bin/bash
|
||||
#
|
||||
# tarbitratord This shell script takes care of starting and stopping tarbitrator.
|
||||
#
|
||||
# chkconfig: 2345 99 01
|
||||
# description: tarbitrator is a arbitrator used in TDengine cluster.
|
||||
#
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: taoscluster
|
||||
# Required-Start: $network $local_fs $remote_fs
|
||||
# Required-Stop: $network $local_fs $remote_fs
|
||||
# Short-Description: start and stop tarbitrator
|
||||
# Description: tarbitrator is a arbitrator used in TDengine cluster.
|
||||
### END INIT INFO
|
||||
|
||||
# Source init functions
|
||||
. /etc/init.d/functions
|
||||
|
||||
# Maximum number of open files
|
||||
MAX_OPEN_FILES=65535
|
||||
|
||||
# Default program options
|
||||
NAME=tarbitrator
|
||||
PROG=/usr/local/taos/bin/tarbitrator
|
||||
USER=root
|
||||
GROUP=root
|
||||
|
||||
# Default directories
|
||||
LOCK_DIR=/var/lock/subsys
|
||||
PID_DIR=/var/run/$NAME
|
||||
|
||||
# Set file names
|
||||
LOCK_FILE=$LOCK_DIR/$NAME
|
||||
PID_FILE=$PID_DIR/$NAME.pid
|
||||
|
||||
[ -e $PID_DIR ] || mkdir -p $PID_DIR
|
||||
|
||||
PROG_OPTS=""
|
||||
|
||||
start() {
|
||||
echo -n "Starting ${NAME}: "
|
||||
# check identity
|
||||
curid="`id -u -n`"
|
||||
if [ "$curid" != root ] && [ "$curid" != "$USER" ] ; then
|
||||
echo "Must be run as root or $USER, but was run as $curid"
|
||||
return 1
|
||||
fi
|
||||
# Sets the maximum number of open file descriptors allowed.
|
||||
ulimit -n $MAX_OPEN_FILES
|
||||
curulimit="`ulimit -n`"
|
||||
if [ "$curulimit" -lt $MAX_OPEN_FILES ] ; then
|
||||
echo "'ulimit -n' must be greater than or equal to $MAX_OPEN_FILES, is $curulimit"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [ "`id -u -n`" == root ] ; then
|
||||
# Changes the owner of the lock, and the pid files to allow
|
||||
# non-root OpenTSDB daemons to run /usr/share/opentsdb/bin/opentsdb_restart.py.
|
||||
touch $LOCK_FILE && chown $USER:$GROUP $LOCK_FILE
|
||||
touch $PID_FILE && chown $USER:$GROUP $PID_FILE
|
||||
daemon --user $USER --pidfile $PID_FILE "$PROG $PROG_OPTS &> /dev/null &"
|
||||
else
|
||||
# Don't have to change user.
|
||||
daemon --pidfile $PID_FILE "$PROG $PROG_OPTS &> /dev/null &"
|
||||
fi
|
||||
retval=$?
|
||||
sleep 2
|
||||
echo
|
||||
[ $retval -eq 0 ] && (findproc > $PID_FILE && touch $LOCK_FILE)
|
||||
return $retval
|
||||
}
|
||||
|
||||
stop() {
|
||||
echo -n "Stopping ${NAME}: "
|
||||
killproc -p $PID_FILE $NAME
|
||||
retval=$?
|
||||
echo
|
||||
# Non-root users don't have enough permission to remove pid and lock files.
|
||||
# So, the opentsdb_restart.py cannot get rid of the files, and the command
|
||||
# "service opentsdb status" will complain about the existing pid file.
|
||||
# Makes the pid file empty.
|
||||
echo > $PID_FILE
|
||||
[ $retval -eq 0 ] && (rm -f $PID_FILE && rm -f $LOCK_FILE)
|
||||
return $retval
|
||||
}
|
||||
|
||||
restart() {
|
||||
stop
|
||||
start
|
||||
}
|
||||
|
||||
reload() {
|
||||
restart
|
||||
}
|
||||
|
||||
force_reload() {
|
||||
restart
|
||||
}
|
||||
|
||||
rh_status() {
|
||||
# run checks to determine if the service is running or use generic status
|
||||
status -p $PID_FILE -l $LOCK_FILE $NAME
|
||||
}
|
||||
|
||||
rh_status_q() {
|
||||
rh_status >/dev/null 2>&1
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
rh_status_q && exit 0
|
||||
$1
|
||||
;;
|
||||
stop)
|
||||
rh_status_q || exit 0
|
||||
$1
|
||||
;;
|
||||
restart)
|
||||
$1
|
||||
;;
|
||||
reload)
|
||||
rh_status_q || exit 7
|
||||
$1
|
||||
;;
|
||||
force-reload)
|
||||
force_reload
|
||||
;;
|
||||
status)
|
||||
rh_status
|
||||
;;
|
||||
condrestart|try-restart)
|
||||
rh_status_q || exit 0
|
||||
restart
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
|
||||
exit 2
|
||||
esac
|
||||
|
||||
exit $?
|
||||
|
|
|
@ -1,236 +1,236 @@
|
|||
%define homepath /usr/local/taos
|
||||
%define userlocalpath /usr/local
|
||||
%define cfg_install_dir /etc/taos
|
||||
%define __strip /bin/true
|
||||
|
||||
Name: tdengine
|
||||
Version: %{_version}
|
||||
Release: 3%{?dist}
|
||||
Summary: tdengine from taosdata
|
||||
Group: Application/Database
|
||||
License: AGPL
|
||||
URL: www.taosdata.com
|
||||
AutoReqProv: no
|
||||
|
||||
#BuildRoot: %_topdir/BUILDROOT
|
||||
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root
|
||||
|
||||
#Prefix: /usr/local/taos
|
||||
|
||||
#BuildRequires:
|
||||
#Requires:
|
||||
|
||||
%description
|
||||
Big Data Platform Designed and Optimized for IoT
|
||||
|
||||
#"prep" Nothing needs to be done
|
||||
#%prep
|
||||
#%setup -q
|
||||
#%setup -T
|
||||
|
||||
#"build" Nothing needs to be done
|
||||
#%build
|
||||
#%configure
|
||||
#make %{?_smp_mflags}
|
||||
|
||||
%install
|
||||
#make install DESTDIR=%{buildroot}
|
||||
rm -rf %{buildroot}
|
||||
|
||||
echo topdir: %{_topdir}
|
||||
echo version: %{_version}
|
||||
echo buildroot: %{buildroot}
|
||||
|
||||
libfile="libtaos.so.%{_version}"
|
||||
|
||||
# create install path, and cp file
|
||||
mkdir -p %{buildroot}%{homepath}/bin
|
||||
mkdir -p %{buildroot}%{homepath}/cfg
|
||||
#mkdir -p %{buildroot}%{homepath}/connector
|
||||
mkdir -p %{buildroot}%{homepath}/driver
|
||||
mkdir -p %{buildroot}%{homepath}/examples
|
||||
mkdir -p %{buildroot}%{homepath}/include
|
||||
#mkdir -p %{buildroot}%{homepath}/init.d
|
||||
mkdir -p %{buildroot}%{homepath}/script
|
||||
|
||||
cp %{_compiledir}/../packaging/cfg/taos.cfg %{buildroot}%{homepath}/cfg
|
||||
if [ -f %{_compiledir}/test/cfg/taosadapter.toml ]; then
|
||||
cp %{_compiledir}/test/cfg/taosadapter.toml %{buildroot}%{homepath}/cfg
|
||||
fi
|
||||
if [ -f %{_compiledir}/test/cfg/taosadapter.service ]; then
|
||||
cp %{_compiledir}/test/cfg/taosadapter.service %{buildroot}%{homepath}/cfg
|
||||
fi
|
||||
#cp %{_compiledir}/../packaging/rpm/taosd %{buildroot}%{homepath}/init.d
|
||||
cp %{_compiledir}/../packaging/tools/post.sh %{buildroot}%{homepath}/script
|
||||
cp %{_compiledir}/../packaging/tools/preun.sh %{buildroot}%{homepath}/script
|
||||
cp %{_compiledir}/../packaging/tools/startPre.sh %{buildroot}%{homepath}/bin
|
||||
cp %{_compiledir}/../packaging/tools/set_core.sh %{buildroot}%{homepath}/bin
|
||||
cp %{_compiledir}/../packaging/tools/taosd-dump-cfg.gdb %{buildroot}%{homepath}/bin
|
||||
cp %{_compiledir}/build/bin/taos %{buildroot}%{homepath}/bin
|
||||
cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin
|
||||
#cp %{_compiledir}/build/bin/taosBenchmark %{buildroot}%{homepath}/bin
|
||||
|
||||
if [ -f %{_compiledir}/build/bin/taosadapter ]; then
|
||||
cp %{_compiledir}/build/bin/taosadapter %{buildroot}%{homepath}/bin ||:
|
||||
fi
|
||||
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
|
||||
cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include
|
||||
cp %{_compiledir}/../src/inc/taosdef.h %{buildroot}%{homepath}/include
|
||||
cp %{_compiledir}/../src/inc/taoserror.h %{buildroot}%{homepath}/include
|
||||
#cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector
|
||||
#cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector
|
||||
#cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector
|
||||
#cp %{_compiledir}/build/lib/taos-jdbcdriver*.* %{buildroot}%{homepath}/connector ||:
|
||||
cp -r %{_compiledir}/../examples/* %{buildroot}%{homepath}/examples
|
||||
|
||||
if [ -f %{_compiledir}/build/bin/jemalloc-config ]; then
|
||||
mkdir -p %{buildroot}%{userlocalpath}/bin
|
||||
mkdir -p %{buildroot}%{userlocalpath}/lib
|
||||
mkdir -p %{buildroot}%{userlocalpath}/lib/pkgconfig
|
||||
mkdir -p %{buildroot}%{userlocalpath}/include
|
||||
mkdir -p %{buildroot}%{userlocalpath}/include/jemalloc
|
||||
mkdir -p %{buildroot}%{userlocalpath}/share
|
||||
mkdir -p %{buildroot}%{userlocalpath}/share/doc
|
||||
mkdir -p %{buildroot}%{userlocalpath}/share/doc/jemalloc
|
||||
mkdir -p %{buildroot}%{userlocalpath}/share/man
|
||||
mkdir -p %{buildroot}%{userlocalpath}/share/man/man3
|
||||
|
||||
cp %{_compiledir}/build/bin/jemalloc-config %{buildroot}%{userlocalpath}/bin/
|
||||
if [ -f %{_compiledir}/build/bin/jemalloc.sh ]; then
|
||||
cp %{_compiledir}/build/bin/jemalloc.sh %{buildroot}%{userlocalpath}/bin/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/bin/jeprof ]; then
|
||||
cp %{_compiledir}/build/bin/jeprof %{buildroot}%{userlocalpath}/bin/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/include/jemalloc/jemalloc.h ]; then
|
||||
cp %{_compiledir}/build/include/jemalloc/jemalloc.h %{buildroot}%{userlocalpath}/include/jemalloc/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/lib/libjemalloc.so.2 ]; then
|
||||
cp %{_compiledir}/build/lib/libjemalloc.so.2 %{buildroot}%{userlocalpath}/lib/
|
||||
ln -sf libjemalloc.so.2 %{buildroot}%{userlocalpath}/lib/libjemalloc.so
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/lib/libjemalloc.a ]; then
|
||||
cp %{_compiledir}/build/lib/libjemalloc.a %{buildroot}%{userlocalpath}/lib/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/lib/libjemalloc_pic.a ]; then
|
||||
cp %{_compiledir}/build/lib/libjemalloc_pic.a %{buildroot}%{userlocalpath}/lib/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/lib/pkgconfig/jemalloc.pc ]; then
|
||||
cp %{_compiledir}/build/lib/pkgconfig/jemalloc.pc %{buildroot}%{userlocalpath}/lib/pkgconfig/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/share/doc/jemalloc/jemalloc.html ]; then
|
||||
cp %{_compiledir}/build/share/doc/jemalloc/jemalloc.html %{buildroot}%{userlocalpath}/share/doc/jemalloc/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/share/man/man3/jemalloc.3 ]; then
|
||||
cp %{_compiledir}/build/share/man/man3/jemalloc.3 %{buildroot}%{userlocalpath}/share/man/man3/
|
||||
fi
|
||||
fi
|
||||
|
||||
#Scripts executed before installation
|
||||
%pre
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
# Stop the service if running
|
||||
if pidof taosd &> /dev/null; then
|
||||
if pidof systemd &> /dev/null; then
|
||||
${csudo}systemctl stop taosd || :
|
||||
elif $(which service &> /dev/null); then
|
||||
${csudo}service taosd stop || :
|
||||
else
|
||||
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
fi
|
||||
echo "Stop taosd service success!"
|
||||
sleep 1
|
||||
fi
|
||||
# if taos.cfg already exist, remove it
|
||||
if [ -f %{cfg_install_dir}/taos.cfg ]; then
|
||||
${csudo}rm -f %{cfg_install_dir}/cfg/taos.cfg || :
|
||||
fi
|
||||
|
||||
# if taosadapter.toml already exist, remove it
|
||||
if [ -f %{cfg_install_dir}/taosadapter.toml ]; then
|
||||
${csudo}rm -f %{cfg_install_dir}/cfg/taosadapter.toml || :
|
||||
fi
|
||||
|
||||
# there can not libtaos.so*, otherwise ln -s error
|
||||
${csudo}rm -f %{homepath}/driver/libtaos* || :
|
||||
|
||||
#Scripts executed after installation
|
||||
%post
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
cd %{homepath}/script
|
||||
${csudo}./post.sh
|
||||
|
||||
# Scripts executed before uninstall
|
||||
%preun
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
# only remove package to call preun.sh, not but update(2)
|
||||
if [ $1 -eq 0 ];then
|
||||
#cd %{homepath}/script
|
||||
#${csudo}./preun.sh
|
||||
|
||||
if [ -f %{homepath}/script/preun.sh ]; then
|
||||
cd %{homepath}/script
|
||||
${csudo}./preun.sh
|
||||
else
|
||||
bin_link_dir="/usr/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
inc_link_dir="/usr/include"
|
||||
|
||||
data_link_dir="/usr/local/taos/data"
|
||||
log_link_dir="/usr/local/taos/log"
|
||||
cfg_link_dir="/usr/local/taos/cfg"
|
||||
|
||||
# Remove all links
|
||||
${csudo}rm -f ${bin_link_dir}/taos || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosd || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosadapter || :
|
||||
${csudo}rm -f ${cfg_link_dir}/* || :
|
||||
${csudo}rm -f ${inc_link_dir}/taos.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
|
||||
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
||||
|
||||
${csudo}rm -f ${log_link_dir} || :
|
||||
${csudo}rm -f ${data_link_dir} || :
|
||||
|
||||
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Scripts executed after uninstall
|
||||
%postun
|
||||
|
||||
# clean build dir
|
||||
%clean
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
${csudo}rm -rf %{buildroot}
|
||||
|
||||
#Specify the files to be packaged
|
||||
%files
|
||||
/*
|
||||
#%doc
|
||||
|
||||
#Setting default permissions
|
||||
%defattr (-,root,root,0755)
|
||||
#%{prefix}
|
||||
|
||||
#%changelog
|
||||
%define homepath /usr/local/taos
|
||||
%define userlocalpath /usr/local
|
||||
%define cfg_install_dir /etc/taos
|
||||
%define __strip /bin/true
|
||||
|
||||
Name: tdengine
|
||||
Version: %{_version}
|
||||
Release: 3%{?dist}
|
||||
Summary: tdengine from taosdata
|
||||
Group: Application/Database
|
||||
License: AGPL
|
||||
URL: www.taosdata.com
|
||||
AutoReqProv: no
|
||||
|
||||
#BuildRoot: %_topdir/BUILDROOT
|
||||
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root
|
||||
|
||||
#Prefix: /usr/local/taos
|
||||
|
||||
#BuildRequires:
|
||||
#Requires:
|
||||
|
||||
%description
|
||||
Big Data Platform Designed and Optimized for IoT
|
||||
|
||||
#"prep" Nothing needs to be done
|
||||
#%prep
|
||||
#%setup -q
|
||||
#%setup -T
|
||||
|
||||
#"build" Nothing needs to be done
|
||||
#%build
|
||||
#%configure
|
||||
#make %{?_smp_mflags}
|
||||
|
||||
%install
|
||||
#make install DESTDIR=%{buildroot}
|
||||
rm -rf %{buildroot}
|
||||
|
||||
echo topdir: %{_topdir}
|
||||
echo version: %{_version}
|
||||
echo buildroot: %{buildroot}
|
||||
|
||||
libfile="libtaos.so.%{_version}"
|
||||
|
||||
# create install path, and cp file
|
||||
mkdir -p %{buildroot}%{homepath}/bin
|
||||
mkdir -p %{buildroot}%{homepath}/cfg
|
||||
#mkdir -p %{buildroot}%{homepath}/connector
|
||||
mkdir -p %{buildroot}%{homepath}/driver
|
||||
mkdir -p %{buildroot}%{homepath}/examples
|
||||
mkdir -p %{buildroot}%{homepath}/include
|
||||
#mkdir -p %{buildroot}%{homepath}/init.d
|
||||
mkdir -p %{buildroot}%{homepath}/script
|
||||
|
||||
cp %{_compiledir}/../packaging/cfg/taos.cfg %{buildroot}%{homepath}/cfg
|
||||
if [ -f %{_compiledir}/test/cfg/taosadapter.toml ]; then
|
||||
cp %{_compiledir}/test/cfg/taosadapter.toml %{buildroot}%{homepath}/cfg
|
||||
fi
|
||||
if [ -f %{_compiledir}/test/cfg/taosadapter.service ]; then
|
||||
cp %{_compiledir}/test/cfg/taosadapter.service %{buildroot}%{homepath}/cfg
|
||||
fi
|
||||
#cp %{_compiledir}/../packaging/rpm/taosd %{buildroot}%{homepath}/init.d
|
||||
cp %{_compiledir}/../packaging/tools/post.sh %{buildroot}%{homepath}/script
|
||||
cp %{_compiledir}/../packaging/tools/preun.sh %{buildroot}%{homepath}/script
|
||||
cp %{_compiledir}/../packaging/tools/startPre.sh %{buildroot}%{homepath}/bin
|
||||
cp %{_compiledir}/../packaging/tools/set_core.sh %{buildroot}%{homepath}/bin
|
||||
cp %{_compiledir}/../packaging/tools/taosd-dump-cfg.gdb %{buildroot}%{homepath}/bin
|
||||
cp %{_compiledir}/build/bin/taos %{buildroot}%{homepath}/bin
|
||||
cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin
|
||||
#cp %{_compiledir}/build/bin/taosBenchmark %{buildroot}%{homepath}/bin
|
||||
|
||||
if [ -f %{_compiledir}/build/bin/taosadapter ]; then
|
||||
cp %{_compiledir}/build/bin/taosadapter %{buildroot}%{homepath}/bin ||:
|
||||
fi
|
||||
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
|
||||
cp %{_compiledir}/../include/client/taos.h %{buildroot}%{homepath}/include
|
||||
cp %{_compiledir}/../include/common/taosdef.h %{buildroot}%{homepath}/include
|
||||
cp %{_compiledir}/../include/util/taoserror.h %{buildroot}%{homepath}/include
|
||||
#cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector
|
||||
#cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector
|
||||
#cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector
|
||||
#cp %{_compiledir}/build/lib/taos-jdbcdriver*.* %{buildroot}%{homepath}/connector ||:
|
||||
cp -r %{_compiledir}/../examples/* %{buildroot}%{homepath}/examples
|
||||
|
||||
if [ -f %{_compiledir}/build/bin/jemalloc-config ]; then
|
||||
mkdir -p %{buildroot}%{userlocalpath}/bin
|
||||
mkdir -p %{buildroot}%{userlocalpath}/lib
|
||||
mkdir -p %{buildroot}%{userlocalpath}/lib/pkgconfig
|
||||
mkdir -p %{buildroot}%{userlocalpath}/include
|
||||
mkdir -p %{buildroot}%{userlocalpath}/include/jemalloc
|
||||
mkdir -p %{buildroot}%{userlocalpath}/share
|
||||
mkdir -p %{buildroot}%{userlocalpath}/share/doc
|
||||
mkdir -p %{buildroot}%{userlocalpath}/share/doc/jemalloc
|
||||
mkdir -p %{buildroot}%{userlocalpath}/share/man
|
||||
mkdir -p %{buildroot}%{userlocalpath}/share/man/man3
|
||||
|
||||
cp %{_compiledir}/build/bin/jemalloc-config %{buildroot}%{userlocalpath}/bin/
|
||||
if [ -f %{_compiledir}/build/bin/jemalloc.sh ]; then
|
||||
cp %{_compiledir}/build/bin/jemalloc.sh %{buildroot}%{userlocalpath}/bin/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/bin/jeprof ]; then
|
||||
cp %{_compiledir}/build/bin/jeprof %{buildroot}%{userlocalpath}/bin/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/include/jemalloc/jemalloc.h ]; then
|
||||
cp %{_compiledir}/build/include/jemalloc/jemalloc.h %{buildroot}%{userlocalpath}/include/jemalloc/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/lib/libjemalloc.so.2 ]; then
|
||||
cp %{_compiledir}/build/lib/libjemalloc.so.2 %{buildroot}%{userlocalpath}/lib/
|
||||
ln -sf libjemalloc.so.2 %{buildroot}%{userlocalpath}/lib/libjemalloc.so
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/lib/libjemalloc.a ]; then
|
||||
cp %{_compiledir}/build/lib/libjemalloc.a %{buildroot}%{userlocalpath}/lib/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/lib/libjemalloc_pic.a ]; then
|
||||
cp %{_compiledir}/build/lib/libjemalloc_pic.a %{buildroot}%{userlocalpath}/lib/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/lib/pkgconfig/jemalloc.pc ]; then
|
||||
cp %{_compiledir}/build/lib/pkgconfig/jemalloc.pc %{buildroot}%{userlocalpath}/lib/pkgconfig/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/share/doc/jemalloc/jemalloc.html ]; then
|
||||
cp %{_compiledir}/build/share/doc/jemalloc/jemalloc.html %{buildroot}%{userlocalpath}/share/doc/jemalloc/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/share/man/man3/jemalloc.3 ]; then
|
||||
cp %{_compiledir}/build/share/man/man3/jemalloc.3 %{buildroot}%{userlocalpath}/share/man/man3/
|
||||
fi
|
||||
fi
|
||||
|
||||
#Scripts executed before installation
|
||||
%pre
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
# Stop the service if running
|
||||
if pidof taosd &> /dev/null; then
|
||||
if pidof systemd &> /dev/null; then
|
||||
${csudo}systemctl stop taosd || :
|
||||
elif $(which service &> /dev/null); then
|
||||
${csudo}service taosd stop || :
|
||||
else
|
||||
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
fi
|
||||
echo "Stop taosd service success!"
|
||||
sleep 1
|
||||
fi
|
||||
# if taos.cfg already exist, remove it
|
||||
if [ -f %{cfg_install_dir}/taos.cfg ]; then
|
||||
${csudo}rm -f %{cfg_install_dir}/cfg/taos.cfg || :
|
||||
fi
|
||||
|
||||
# if taosadapter.toml already exist, remove it
|
||||
if [ -f %{cfg_install_dir}/taosadapter.toml ]; then
|
||||
${csudo}rm -f %{cfg_install_dir}/cfg/taosadapter.toml || :
|
||||
fi
|
||||
|
||||
# there can not libtaos.so*, otherwise ln -s error
|
||||
${csudo}rm -f %{homepath}/driver/libtaos* || :
|
||||
|
||||
#Scripts executed after installation
|
||||
%post
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
cd %{homepath}/script
|
||||
${csudo}./post.sh
|
||||
|
||||
# Scripts executed before uninstall
|
||||
%preun
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
# only remove package to call preun.sh, not but update(2)
|
||||
if [ $1 -eq 0 ];then
|
||||
#cd %{homepath}/script
|
||||
#${csudo}./preun.sh
|
||||
|
||||
if [ -f %{homepath}/script/preun.sh ]; then
|
||||
cd %{homepath}/script
|
||||
${csudo}./preun.sh
|
||||
else
|
||||
bin_link_dir="/usr/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
inc_link_dir="/usr/include"
|
||||
|
||||
data_link_dir="/usr/local/taos/data"
|
||||
log_link_dir="/usr/local/taos/log"
|
||||
cfg_link_dir="/usr/local/taos/cfg"
|
||||
|
||||
# Remove all links
|
||||
${csudo}rm -f ${bin_link_dir}/taos || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosd || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosadapter || :
|
||||
${csudo}rm -f ${cfg_link_dir}/* || :
|
||||
${csudo}rm -f ${inc_link_dir}/taos.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
|
||||
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
||||
|
||||
${csudo}rm -f ${log_link_dir} || :
|
||||
${csudo}rm -f ${data_link_dir} || :
|
||||
|
||||
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Scripts executed after uninstall
|
||||
%postun
|
||||
|
||||
# clean build dir
|
||||
%clean
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
${csudo}rm -rf %{buildroot}
|
||||
|
||||
#Specify the files to be packaged
|
||||
%files
|
||||
/*
|
||||
#%doc
|
||||
|
||||
#Setting default permissions
|
||||
%defattr (-,root,root,0755)
|
||||
#%{prefix}
|
||||
|
||||
#%changelog
|
||||
|
|
|
@ -1,52 +1,52 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
CSI=$(echo -e "\033[")
|
||||
CRED="${CSI}1;31m"
|
||||
CFAILURE="$CRED"
|
||||
CEND="${CSI}0m"
|
||||
if [ -n "$(grep 'Aliyun Linux release' /etc/issue)" -o -e /etc/redhat-release ]; then
|
||||
OS=CentOS
|
||||
[ -n "$(grep ' 7\.' /etc/redhat-release 2> /dev/null)" ] && CentOS_RHEL_version=7
|
||||
[ -n "$(grep ' 6\.' /etc/redhat-release 2> /dev/null)" -o -n "$(grep 'Aliyun Linux release6 15' /etc/issue)" ] && CentOS_RHEL_version=6
|
||||
[ -n "$(grep ' 5\.' /etc/redhat-release 2> /dev/null)" -o -n "$(grep 'Aliyun Linux release5' /etc/issue)" ] && CentOS_RHEL_version=5
|
||||
elif [ -n "$(grep 'Amazon Linux AMI release' /etc/issue)" -o -e /etc/system-release ]; then
|
||||
OS=CentOS
|
||||
CentOS_RHEL_version=6
|
||||
elif [ -n "$(grep 'bian' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == "Debian" ]; then
|
||||
OS=Debian
|
||||
[ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; }
|
||||
Debian_version=$(lsb_release -sr | awk -F. '{print $1}')
|
||||
elif [ -n "$(grep 'Deepin' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == "Deepin" ]; then
|
||||
OS=Debian
|
||||
[ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; }
|
||||
Debian_version=$(lsb_release -sr | awk -F. '{print $1}')
|
||||
elif [ -n "$(grep 'Kali GNU/Linux Rolling' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == "Kali" ]; then
|
||||
OS=Debian
|
||||
[ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; }
|
||||
if [ -n "$(grep 'VERSION="2016.*"' /etc/os-release)" ]; then
|
||||
Debian_version=8
|
||||
else
|
||||
echo "${CFAILURE}Does not support this OS, Please contact the author! ${CEND}"
|
||||
kill -9 $$
|
||||
fi
|
||||
elif [ -n "$(grep 'Ubuntu' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == "Ubuntu" -o -n "$(grep 'Linux Mint' /etc/issue)" ]; then
|
||||
OS=Ubuntu
|
||||
[ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; }
|
||||
Ubuntu_version=$(lsb_release -sr | awk -F. '{print $1}')
|
||||
[ -n "$(grep 'Linux Mint 18' /etc/issue)" ] && Ubuntu_version=16
|
||||
elif [ -n "$(grep 'elementary' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == 'elementary' ]; then
|
||||
OS=Ubuntu
|
||||
[ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; }
|
||||
Ubuntu_version=16
|
||||
else
|
||||
echo "${CFAILURE}Does not support this OS, Please contact the author! ${CEND}"
|
||||
kill -9 $$
|
||||
fi
|
||||
|
||||
echo "${CFAILURE}${OS}${CEND}"
|
||||
if [ "$OS" == 'CentOS' ]; then
|
||||
echo ${CentOS_RHEL_version}
|
||||
else
|
||||
echo ${Ubuntu_version}
|
||||
fi
|
||||
|
||||
#!/bin/bash
|
||||
#
|
||||
CSI=$(echo -e "\033[")
|
||||
CRED="${CSI}1;31m"
|
||||
CFAILURE="$CRED"
|
||||
CEND="${CSI}0m"
|
||||
if [ -n "$(grep 'Aliyun Linux release' /etc/issue)" -o -e /etc/redhat-release ]; then
|
||||
OS=CentOS
|
||||
[ -n "$(grep ' 7\.' /etc/redhat-release 2> /dev/null)" ] && CentOS_RHEL_version=7
|
||||
[ -n "$(grep ' 6\.' /etc/redhat-release 2> /dev/null)" -o -n "$(grep 'Aliyun Linux release6 15' /etc/issue)" ] && CentOS_RHEL_version=6
|
||||
[ -n "$(grep ' 5\.' /etc/redhat-release 2> /dev/null)" -o -n "$(grep 'Aliyun Linux release5' /etc/issue)" ] && CentOS_RHEL_version=5
|
||||
elif [ -n "$(grep 'Amazon Linux AMI release' /etc/issue)" -o -e /etc/system-release ]; then
|
||||
OS=CentOS
|
||||
CentOS_RHEL_version=6
|
||||
elif [ -n "$(grep 'bian' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == "Debian" ]; then
|
||||
OS=Debian
|
||||
[ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; }
|
||||
Debian_version=$(lsb_release -sr | awk -F. '{print $1}')
|
||||
elif [ -n "$(grep 'Deepin' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == "Deepin" ]; then
|
||||
OS=Debian
|
||||
[ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; }
|
||||
Debian_version=$(lsb_release -sr | awk -F. '{print $1}')
|
||||
elif [ -n "$(grep 'Kali GNU/Linux Rolling' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == "Kali" ]; then
|
||||
OS=Debian
|
||||
[ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; }
|
||||
if [ -n "$(grep 'VERSION="2016.*"' /etc/os-release)" ]; then
|
||||
Debian_version=8
|
||||
else
|
||||
echo "${CFAILURE}Does not support this OS, Please contact the author! ${CEND}"
|
||||
kill -9 $$
|
||||
fi
|
||||
elif [ -n "$(grep 'Ubuntu' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == "Ubuntu" -o -n "$(grep 'Linux Mint' /etc/issue)" ]; then
|
||||
OS=Ubuntu
|
||||
[ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; }
|
||||
Ubuntu_version=$(lsb_release -sr | awk -F. '{print $1}')
|
||||
[ -n "$(grep 'Linux Mint 18' /etc/issue)" ] && Ubuntu_version=16
|
||||
elif [ -n "$(grep 'elementary' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == 'elementary' ]; then
|
||||
OS=Ubuntu
|
||||
[ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; }
|
||||
Ubuntu_version=16
|
||||
else
|
||||
echo "${CFAILURE}Does not support this OS, Please contact the author! ${CEND}"
|
||||
kill -9 $$
|
||||
fi
|
||||
|
||||
echo "${CFAILURE}${OS}${CEND}"
|
||||
if [ "$OS" == 'CentOS' ]; then
|
||||
echo ${CentOS_RHEL_version}
|
||||
else
|
||||
echo ${Ubuntu_version}
|
||||
fi
|
||||
|
||||
|
|
|
@ -1,21 +1,21 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
|
||||
log_dir=$1
|
||||
result_file=$2
|
||||
|
||||
if [ ! -n "$1" ];then
|
||||
echo "Pleas input the director of taosdlog."
|
||||
echo "usage: ./get_client.sh <taosdlog directory> <result file>"
|
||||
exit 1
|
||||
else
|
||||
log_dir=$1
|
||||
fi
|
||||
|
||||
if [ ! -n "$2" ];then
|
||||
result_file=clientInfo.txt
|
||||
else
|
||||
result_file=$2
|
||||
fi
|
||||
|
||||
grep "new TCP connection" ${log_dir}/taosdlog.* | sed -e "s/0x.* from / /"|sed -e "s/,.*$//"|sed -e "s/:[0-9]*$//"|sort -r|uniq -f 2|sort -k 3 -r|uniq -f 2 > ${result_file}
|
||||
#!/bin/bash
|
||||
#
|
||||
|
||||
log_dir=$1
|
||||
result_file=$2
|
||||
|
||||
if [ ! -n "$1" ];then
|
||||
echo "Pleas input the director of taosdlog."
|
||||
echo "usage: ./get_client.sh <taosdlog directory> <result file>"
|
||||
exit 1
|
||||
else
|
||||
log_dir=$1
|
||||
fi
|
||||
|
||||
if [ ! -n "$2" ];then
|
||||
result_file=clientInfo.txt
|
||||
else
|
||||
result_file=$2
|
||||
fi
|
||||
|
||||
grep "new TCP connection" ${log_dir}/taosdlog.* | sed -e "s/0x.* from / /"|sed -e "s/,.*$//"|sed -e "s/:[0-9]*$//"|sort -r|uniq -f 2|sort -k 3 -r|uniq -f 2 > ${result_file}
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# This file is used to install TAOS time-series database on linux systems. The operating system
|
||||
# is required to use systemd to manage services at boot
|
||||
|
||||
set -e
|
||||
# set -x
|
||||
|
||||
# -----------------------Variables definition---------------------
|
||||
OS=$(cat /etc/*-release | grep "^NAME=" | cut -d= -f2)
|
||||
len=$(echo ${#OS})
|
||||
len=$((len-2))
|
||||
retval=$(echo -ne ${OS:1:${len}} | cut -d" " -f1)
|
||||
echo -ne $retval
|
||||
#!/bin/bash
|
||||
#
|
||||
# This file is used to install TAOS time-series database on linux systems. The operating system
|
||||
# is required to use systemd to manage services at boot
|
||||
|
||||
set -e
|
||||
# set -x
|
||||
|
||||
# -----------------------Variables definition---------------------
|
||||
OS=$(cat /etc/*-release | grep "^NAME=" | cut -d= -f2)
|
||||
len=$(echo ${#OS})
|
||||
len=$((len-2))
|
||||
retval=$(echo -ne ${OS:1:${len}} | cut -d" " -f1)
|
||||
echo -ne $retval
|
||||
|
|
|
@ -1,15 +1,15 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# This file is used to install TAOS time-series database on linux systems. The operating system
|
||||
# is required to use systemd to manage services at boot
|
||||
|
||||
set -e
|
||||
# set -x
|
||||
|
||||
# -----------------------Variables definition---------------------
|
||||
verinfo=$(cat $1 | grep " version" | cut -d '"' -f2)
|
||||
verinfo=$(echo $verinfo | tr "\n" " ")
|
||||
len=$(echo ${#verinfo})
|
||||
len=$((len-1))
|
||||
retval=$(echo -ne ${verinfo:0:${len}})
|
||||
echo -ne $retval
|
||||
#!/bin/bash
|
||||
#
|
||||
# This file is used to install TAOS time-series database on linux systems. The operating system
|
||||
# is required to use systemd to manage services at boot
|
||||
|
||||
set -e
|
||||
# set -x
|
||||
|
||||
# -----------------------Variables definition---------------------
|
||||
verinfo=$(cat $1 | grep " version" | cut -d '"' -f2)
|
||||
verinfo=$(echo $verinfo | tr "\n" " ")
|
||||
len=$(echo ${#verinfo})
|
||||
len=$((len-1))
|
||||
retval=$(echo -ne ${verinfo:0:${len}})
|
||||
echo -ne $retval
|
||||
|
|
|
@ -485,6 +485,17 @@ function install_service() {
|
|||
# fi
|
||||
}
|
||||
|
||||
function install_config() {
|
||||
if [ ! -f ${cfg_install_dir}/${configFile} ]; then
|
||||
${csudo}mkdir -p ${cfg_install_dir}
|
||||
[ -f ${script_dir}/cfg/${configFile} ] && ${csudo}cp ${script_dir}/cfg/${configFile} ${cfg_install_dir}
|
||||
${csudo}chmod 644 ${cfg_install_dir}/*
|
||||
fi
|
||||
|
||||
${csudo}cp -f ${script_dir}/cfg/${configFile} ${install_main_dir}/cfg/${configFile}.org
|
||||
${csudo}ln -s ${cfg_install_dir}/${configFile} ${install_main_dir}/cfg
|
||||
}
|
||||
|
||||
function install_TDengine() {
|
||||
# Start to install
|
||||
echo -e "${GREEN}Start to install TDengine...${NC}"
|
||||
|
@ -500,7 +511,7 @@ function install_TDengine() {
|
|||
# For installing new
|
||||
install_bin
|
||||
install_service
|
||||
#install_config
|
||||
install_config
|
||||
|
||||
# Ask if to start the service
|
||||
#echo
|
||||
|
@ -539,7 +550,7 @@ function install_TDengine() {
|
|||
echo
|
||||
else # Only install client
|
||||
install_bin
|
||||
#install_config
|
||||
install_config
|
||||
echo
|
||||
echo -e "\033[44;32;1mTDengine client is installed successfully!${NC}"
|
||||
fi
|
||||
|
|
|
@ -1,339 +1,339 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# This file is used to install database on linux systems. The operating system
|
||||
# is required to use systemd to manage services at boot
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
# -----------------------Variables definition---------------------
|
||||
script_dir=$(dirname $(readlink -f "$0"))
|
||||
|
||||
bin_link_dir="/usr/bin"
|
||||
#inc_link_dir="/usr/include"
|
||||
|
||||
#install main path
|
||||
install_main_dir="/usr/local/tarbitrator"
|
||||
|
||||
# old bin dir
|
||||
bin_dir="/usr/local/tarbitrator/bin"
|
||||
|
||||
service_config_dir="/etc/systemd/system"
|
||||
|
||||
# Color setting
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
GREEN_DARK='\033[0;32m'
|
||||
GREEN_UNDERLINE='\033[4;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
csudo=""
|
||||
if command -v sudo >/dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
update_flag=0
|
||||
|
||||
initd_mod=0
|
||||
service_mod=2
|
||||
if pidof systemd &>/dev/null; then
|
||||
service_mod=0
|
||||
elif $(which service &>/dev/null); then
|
||||
service_mod=1
|
||||
service_config_dir="/etc/init.d"
|
||||
if $(which chkconfig &>/dev/null); then
|
||||
initd_mod=1
|
||||
elif $(which insserv &>/dev/null); then
|
||||
initd_mod=2
|
||||
elif $(which update-rc.d &>/dev/null); then
|
||||
initd_mod=3
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
|
||||
# get the operating system type for using the corresponding init file
|
||||
# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification
|
||||
#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
|
||||
if [[ -e /etc/os-release ]]; then
|
||||
osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) || :
|
||||
else
|
||||
osinfo=""
|
||||
fi
|
||||
#echo "osinfo: ${osinfo}"
|
||||
os_type=0
|
||||
if echo $osinfo | grep -qwi "ubuntu"; then
|
||||
# echo "This is ubuntu system"
|
||||
os_type=1
|
||||
elif echo $osinfo | grep -qwi "debian"; then
|
||||
# echo "This is debian system"
|
||||
os_type=1
|
||||
elif echo $osinfo | grep -qwi "Kylin"; then
|
||||
# echo "This is Kylin system"
|
||||
os_type=1
|
||||
elif echo $osinfo | grep -qwi "centos"; then
|
||||
# echo "This is centos system"
|
||||
os_type=2
|
||||
elif echo $osinfo | grep -qwi "fedora"; then
|
||||
# echo "This is fedora system"
|
||||
os_type=2
|
||||
else
|
||||
echo " osinfo: ${osinfo}"
|
||||
echo " This is an officially unverified linux system,"
|
||||
echo " if there are any problems with the installation and operation, "
|
||||
echo " please feel free to contact taosdata.com for support."
|
||||
os_type=1
|
||||
fi
|
||||
|
||||
function kill_tarbitrator() {
|
||||
pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function install_main_path() {
|
||||
#create install main dir and all sub dir
|
||||
${csudo}rm -rf ${install_main_dir} || :
|
||||
${csudo}mkdir -p ${install_main_dir}
|
||||
${csudo}mkdir -p ${install_main_dir}/bin
|
||||
#${csudo}mkdir -p ${install_main_dir}/include
|
||||
${csudo}mkdir -p ${install_main_dir}/init.d
|
||||
}
|
||||
|
||||
function install_bin() {
|
||||
# Remove links
|
||||
${csudo}rm -f ${bin_link_dir}/rmtarbitrator || :
|
||||
${csudo}rm -f ${bin_link_dir}/tarbitrator || :
|
||||
${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/*
|
||||
|
||||
#Make link
|
||||
[ -x ${install_main_dir}/bin/remove_arbi.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove_arbi.sh ${bin_link_dir}/rmtarbitrator || :
|
||||
[ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo}ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :
|
||||
}
|
||||
|
||||
function install_header() {
|
||||
${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || :
|
||||
${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/*
|
||||
${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
|
||||
${csudo}ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h
|
||||
${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
|
||||
}
|
||||
|
||||
function install_jemalloc() {
|
||||
jemalloc_dir=${script_dir}/jemalloc
|
||||
|
||||
if [ -d ${jemalloc_dir} ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/bin
|
||||
|
||||
if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/bin/jeprof ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/include/jemalloc
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/lib
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib
|
||||
${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
|
||||
${csudo}/usr/bin/install -c -d /usr/local/lib
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
|
||||
fi
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/share/doc/jemalloc
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/share/man/man3
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3
|
||||
fi
|
||||
|
||||
if [ -d /etc/ld.so.conf.d ]; then
|
||||
echo "/usr/local/lib" | ${csudo}tee /etc/ld.so.conf.d/jemalloc.conf >/dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf"
|
||||
${csudo}ldconfig
|
||||
else
|
||||
echo "/etc/ld.so.conf.d not found!"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_service_on_sysvinit() {
|
||||
if pidof tarbitrator &>/dev/null; then
|
||||
${csudo}service tarbitratord stop || :
|
||||
fi
|
||||
|
||||
if ((${initd_mod} == 1)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo}chkconfig --del tarbitratord || :
|
||||
fi
|
||||
elif ((${initd_mod} == 2)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo}insserv -r tarbitratord || :
|
||||
fi
|
||||
elif ((${initd_mod} == 3)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo}update-rc.d -f tarbitratord remove || :
|
||||
fi
|
||||
fi
|
||||
|
||||
${csudo}rm -f ${service_config_dir}/tarbitratord || :
|
||||
|
||||
if $(which init &>/dev/null); then
|
||||
${csudo}init q || :
|
||||
fi
|
||||
}
|
||||
|
||||
function install_service_on_sysvinit() {
|
||||
clean_service_on_sysvinit
|
||||
sleep 1
|
||||
|
||||
if ((${os_type} == 1)); then
|
||||
${csudo}cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord
|
||||
${csudo}cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord
|
||||
elif ((${os_type} == 2)); then
|
||||
${csudo}cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord
|
||||
${csudo}cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord
|
||||
fi
|
||||
|
||||
if ((${initd_mod} == 1)); then
|
||||
${csudo}chkconfig --add tarbitratord || :
|
||||
${csudo}chkconfig --level 2345 tarbitratord on || :
|
||||
elif ((${initd_mod} == 2)); then
|
||||
${csudo}insserv tarbitratord || :
|
||||
${csudo}insserv -d tarbitratord || :
|
||||
elif ((${initd_mod} == 3)); then
|
||||
${csudo}update-rc.d tarbitratord defaults || :
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_service_on_systemd() {
|
||||
tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
|
||||
if systemctl is-active --quiet tarbitratord; then
|
||||
echo "tarbitrator is running, stopping it..."
|
||||
${csudo}systemctl stop tarbitratord &>/dev/null || echo &>/dev/null
|
||||
fi
|
||||
${csudo}systemctl disable tarbitratord &>/dev/null || echo &>/dev/null
|
||||
|
||||
${csudo}rm -f ${tarbitratord_service_config}
|
||||
}
|
||||
|
||||
function install_service_on_systemd() {
|
||||
clean_service_on_systemd
|
||||
|
||||
tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
|
||||
|
||||
${csudo}bash -c "echo '[Unit]' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo '[Install]' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}"
|
||||
${csudo}systemctl enable tarbitratord
|
||||
}
|
||||
|
||||
function install_service() {
|
||||
if ((${service_mod} == 0)); then
|
||||
install_service_on_systemd
|
||||
elif ((${service_mod} == 1)); then
|
||||
install_service_on_sysvinit
|
||||
else
|
||||
kill_tarbitrator
|
||||
fi
|
||||
}
|
||||
|
||||
function update_TDengine() {
|
||||
# Start to update
|
||||
echo -e "${GREEN}Start to update TDengine's arbitrator ...${NC}"
|
||||
# Stop the service if running
|
||||
if pidof tarbitrator &>/dev/null; then
|
||||
if ((${service_mod} == 0)); then
|
||||
${csudo}systemctl stop tarbitratord || :
|
||||
elif ((${service_mod} == 1)); then
|
||||
${csudo}service tarbitratord stop || :
|
||||
else
|
||||
kill_tarbitrator
|
||||
fi
|
||||
sleep 1
|
||||
fi
|
||||
|
||||
install_main_path
|
||||
#install_header
|
||||
install_bin
|
||||
install_service
|
||||
install_jemalloc
|
||||
|
||||
echo
|
||||
if ((${service_mod} == 0)); then
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}systemctl start tarbitratord${NC}"
|
||||
elif ((${service_mod} == 1)); then
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}service tarbitratord start${NC}"
|
||||
else
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ./tarbitrator${NC}"
|
||||
fi
|
||||
echo
|
||||
echo -e "\033[44;32;1mTDengine's arbitrator is updated successfully!${NC}"
|
||||
}
|
||||
|
||||
function install_TDengine() {
|
||||
# Start to install
|
||||
echo -e "${GREEN}Start to install TDengine's arbitrator ...${NC}"
|
||||
|
||||
install_main_path
|
||||
#install_header
|
||||
install_bin
|
||||
install_service
|
||||
install_jemalloc
|
||||
|
||||
echo
|
||||
if ((${service_mod} == 0)); then
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}systemctl start tarbitratord${NC}"
|
||||
elif ((${service_mod} == 1)); then
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}service tarbitratord start${NC}"
|
||||
else
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: tarbitrator${NC}"
|
||||
fi
|
||||
|
||||
echo -e "\033[44;32;1mTDengine's arbitrator is installed successfully!${NC}"
|
||||
echo
|
||||
}
|
||||
|
||||
## ==============================Main program starts from here============================
|
||||
# Install server and client
|
||||
if [ -x ${bin_dir}/tarbitrator ]; then
|
||||
update_flag=1
|
||||
update_TDengine
|
||||
else
|
||||
install_TDengine
|
||||
fi
|
||||
#!/bin/bash
|
||||
#
|
||||
# This file is used to install database on linux systems. The operating system
|
||||
# is required to use systemd to manage services at boot
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
# -----------------------Variables definition---------------------
|
||||
script_dir=$(dirname $(readlink -f "$0"))
|
||||
|
||||
bin_link_dir="/usr/bin"
|
||||
#inc_link_dir="/usr/include"
|
||||
|
||||
#install main path
|
||||
install_main_dir="/usr/local/tarbitrator"
|
||||
|
||||
# old bin dir
|
||||
bin_dir="/usr/local/tarbitrator/bin"
|
||||
|
||||
service_config_dir="/etc/systemd/system"
|
||||
|
||||
# Color setting
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
GREEN_DARK='\033[0;32m'
|
||||
GREEN_UNDERLINE='\033[4;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
csudo=""
|
||||
if command -v sudo >/dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
update_flag=0
|
||||
|
||||
initd_mod=0
|
||||
service_mod=2
|
||||
if pidof systemd &>/dev/null; then
|
||||
service_mod=0
|
||||
elif $(which service &>/dev/null); then
|
||||
service_mod=1
|
||||
service_config_dir="/etc/init.d"
|
||||
if $(which chkconfig &>/dev/null); then
|
||||
initd_mod=1
|
||||
elif $(which insserv &>/dev/null); then
|
||||
initd_mod=2
|
||||
elif $(which update-rc.d &>/dev/null); then
|
||||
initd_mod=3
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
|
||||
# get the operating system type for using the corresponding init file
|
||||
# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification
|
||||
#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
|
||||
if [[ -e /etc/os-release ]]; then
|
||||
osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) || :
|
||||
else
|
||||
osinfo=""
|
||||
fi
|
||||
#echo "osinfo: ${osinfo}"
|
||||
os_type=0
|
||||
if echo $osinfo | grep -qwi "ubuntu"; then
|
||||
# echo "This is ubuntu system"
|
||||
os_type=1
|
||||
elif echo $osinfo | grep -qwi "debian"; then
|
||||
# echo "This is debian system"
|
||||
os_type=1
|
||||
elif echo $osinfo | grep -qwi "Kylin"; then
|
||||
# echo "This is Kylin system"
|
||||
os_type=1
|
||||
elif echo $osinfo | grep -qwi "centos"; then
|
||||
# echo "This is centos system"
|
||||
os_type=2
|
||||
elif echo $osinfo | grep -qwi "fedora"; then
|
||||
# echo "This is fedora system"
|
||||
os_type=2
|
||||
else
|
||||
echo " osinfo: ${osinfo}"
|
||||
echo " This is an officially unverified linux system,"
|
||||
echo " if there are any problems with the installation and operation, "
|
||||
echo " please feel free to contact taosdata.com for support."
|
||||
os_type=1
|
||||
fi
|
||||
|
||||
function kill_tarbitrator() {
|
||||
pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function install_main_path() {
|
||||
#create install main dir and all sub dir
|
||||
${csudo}rm -rf ${install_main_dir} || :
|
||||
${csudo}mkdir -p ${install_main_dir}
|
||||
${csudo}mkdir -p ${install_main_dir}/bin
|
||||
#${csudo}mkdir -p ${install_main_dir}/include
|
||||
${csudo}mkdir -p ${install_main_dir}/init.d
|
||||
}
|
||||
|
||||
function install_bin() {
|
||||
# Remove links
|
||||
${csudo}rm -f ${bin_link_dir}/rmtarbitrator || :
|
||||
${csudo}rm -f ${bin_link_dir}/tarbitrator || :
|
||||
${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/*
|
||||
|
||||
#Make link
|
||||
[ -x ${install_main_dir}/bin/remove_arbi.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove_arbi.sh ${bin_link_dir}/rmtarbitrator || :
|
||||
[ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo}ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :
|
||||
}
|
||||
|
||||
function install_header() {
|
||||
${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || :
|
||||
${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/*
|
||||
${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
|
||||
${csudo}ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h
|
||||
${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
|
||||
}
|
||||
|
||||
function install_jemalloc() {
|
||||
jemalloc_dir=${script_dir}/jemalloc
|
||||
|
||||
if [ -d ${jemalloc_dir} ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/bin
|
||||
|
||||
if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/bin/jeprof ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/include/jemalloc
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/lib
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib
|
||||
${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
|
||||
${csudo}/usr/bin/install -c -d /usr/local/lib
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
|
||||
fi
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/share/doc/jemalloc
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/share/man/man3
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3
|
||||
fi
|
||||
|
||||
if [ -d /etc/ld.so.conf.d ]; then
|
||||
echo "/usr/local/lib" | ${csudo}tee /etc/ld.so.conf.d/jemalloc.conf >/dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf"
|
||||
${csudo}ldconfig
|
||||
else
|
||||
echo "/etc/ld.so.conf.d not found!"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_service_on_sysvinit() {
|
||||
if pidof tarbitrator &>/dev/null; then
|
||||
${csudo}service tarbitratord stop || :
|
||||
fi
|
||||
|
||||
if ((${initd_mod} == 1)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo}chkconfig --del tarbitratord || :
|
||||
fi
|
||||
elif ((${initd_mod} == 2)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo}insserv -r tarbitratord || :
|
||||
fi
|
||||
elif ((${initd_mod} == 3)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo}update-rc.d -f tarbitratord remove || :
|
||||
fi
|
||||
fi
|
||||
|
||||
${csudo}rm -f ${service_config_dir}/tarbitratord || :
|
||||
|
||||
if $(which init &>/dev/null); then
|
||||
${csudo}init q || :
|
||||
fi
|
||||
}
|
||||
|
||||
function install_service_on_sysvinit() {
|
||||
clean_service_on_sysvinit
|
||||
sleep 1
|
||||
|
||||
if ((${os_type} == 1)); then
|
||||
${csudo}cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord
|
||||
${csudo}cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord
|
||||
elif ((${os_type} == 2)); then
|
||||
${csudo}cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord
|
||||
${csudo}cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord
|
||||
fi
|
||||
|
||||
if ((${initd_mod} == 1)); then
|
||||
${csudo}chkconfig --add tarbitratord || :
|
||||
${csudo}chkconfig --level 2345 tarbitratord on || :
|
||||
elif ((${initd_mod} == 2)); then
|
||||
${csudo}insserv tarbitratord || :
|
||||
${csudo}insserv -d tarbitratord || :
|
||||
elif ((${initd_mod} == 3)); then
|
||||
${csudo}update-rc.d tarbitratord defaults || :
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_service_on_systemd() {
|
||||
tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
|
||||
if systemctl is-active --quiet tarbitratord; then
|
||||
echo "tarbitrator is running, stopping it..."
|
||||
${csudo}systemctl stop tarbitratord &>/dev/null || echo &>/dev/null
|
||||
fi
|
||||
${csudo}systemctl disable tarbitratord &>/dev/null || echo &>/dev/null
|
||||
|
||||
${csudo}rm -f ${tarbitratord_service_config}
|
||||
}
|
||||
|
||||
function install_service_on_systemd() {
|
||||
clean_service_on_systemd
|
||||
|
||||
tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
|
||||
|
||||
${csudo}bash -c "echo '[Unit]' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo '[Install]' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}"
|
||||
${csudo}systemctl enable tarbitratord
|
||||
}
|
||||
|
||||
function install_service() {
|
||||
if ((${service_mod} == 0)); then
|
||||
install_service_on_systemd
|
||||
elif ((${service_mod} == 1)); then
|
||||
install_service_on_sysvinit
|
||||
else
|
||||
kill_tarbitrator
|
||||
fi
|
||||
}
|
||||
|
||||
function update_TDengine() {
|
||||
# Start to update
|
||||
echo -e "${GREEN}Start to update TDengine's arbitrator ...${NC}"
|
||||
# Stop the service if running
|
||||
if pidof tarbitrator &>/dev/null; then
|
||||
if ((${service_mod} == 0)); then
|
||||
${csudo}systemctl stop tarbitratord || :
|
||||
elif ((${service_mod} == 1)); then
|
||||
${csudo}service tarbitratord stop || :
|
||||
else
|
||||
kill_tarbitrator
|
||||
fi
|
||||
sleep 1
|
||||
fi
|
||||
|
||||
install_main_path
|
||||
#install_header
|
||||
install_bin
|
||||
install_service
|
||||
install_jemalloc
|
||||
|
||||
echo
|
||||
if ((${service_mod} == 0)); then
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}systemctl start tarbitratord${NC}"
|
||||
elif ((${service_mod} == 1)); then
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}service tarbitratord start${NC}"
|
||||
else
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ./tarbitrator${NC}"
|
||||
fi
|
||||
echo
|
||||
echo -e "\033[44;32;1mTDengine's arbitrator is updated successfully!${NC}"
|
||||
}
|
||||
|
||||
function install_TDengine() {
|
||||
# Start to install
|
||||
echo -e "${GREEN}Start to install TDengine's arbitrator ...${NC}"
|
||||
|
||||
install_main_path
|
||||
#install_header
|
||||
install_bin
|
||||
install_service
|
||||
install_jemalloc
|
||||
|
||||
echo
|
||||
if ((${service_mod} == 0)); then
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}systemctl start tarbitratord${NC}"
|
||||
elif ((${service_mod} == 1)); then
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}service tarbitratord start${NC}"
|
||||
else
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: tarbitrator${NC}"
|
||||
fi
|
||||
|
||||
echo -e "\033[44;32;1mTDengine's arbitrator is installed successfully!${NC}"
|
||||
echo
|
||||
}
|
||||
|
||||
## ==============================Main program starts from here============================
|
||||
# Install server and client
|
||||
if [ -x ${bin_dir}/tarbitrator ]; then
|
||||
update_flag=1
|
||||
update_TDengine
|
||||
else
|
||||
install_TDengine
|
||||
fi
|
||||
|
|
|
@ -1,320 +1,305 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# This file is used to install TDengine client on linux systems. The operating system
|
||||
# is required to use systemd to manage services at boot
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
# -----------------------Variables definition---------------------
|
||||
|
||||
dataDir="/var/lib/taos"
|
||||
logDir="/var/log/taos"
|
||||
productName="TDengine"
|
||||
installDir="/usr/local/taos"
|
||||
configDir="/etc/taos"
|
||||
serverName="taosd"
|
||||
clientName="taos"
|
||||
uninstallScript="rmtaos"
|
||||
configFile="taos.cfg"
|
||||
tarName="taos.tar.gz"
|
||||
|
||||
osType=Linux
|
||||
pagMode=full
|
||||
verMode=edge
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
script_dir=$(dirname $(readlink -f "$0"))
|
||||
# Dynamic directory
|
||||
data_dir=${dataDir}
|
||||
log_dir=${logDir}
|
||||
else
|
||||
script_dir=`dirname $0`
|
||||
cd ${script_dir}
|
||||
script_dir="$(pwd)"
|
||||
data_dir=${dataDir}
|
||||
log_dir=~/${productName}/log
|
||||
fi
|
||||
|
||||
log_link_dir="${installDir}/log"
|
||||
|
||||
cfg_install_dir=${configDir}
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
bin_link_dir="/usr/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
lib64_link_dir="/usr/lib64"
|
||||
inc_link_dir="/usr/include"
|
||||
else
|
||||
bin_link_dir="/usr/local/bin"
|
||||
lib_link_dir="/usr/local/lib"
|
||||
inc_link_dir="/usr/local/include"
|
||||
fi
|
||||
|
||||
#install main path
|
||||
install_main_dir="${installDir}"
|
||||
|
||||
# old bin dir
|
||||
bin_dir="${installDir}/bin"
|
||||
|
||||
# Color setting
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
GREEN_DARK='\033[0;32m'
|
||||
GREEN_UNDERLINE='\033[4;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
update_flag=0
|
||||
|
||||
function kill_client() {
|
||||
pid=$(ps -ef | grep "${clientName}" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function install_main_path() {
|
||||
#create install main dir and all sub dir
|
||||
${csudo}rm -rf ${install_main_dir} || :
|
||||
${csudo}mkdir -p ${install_main_dir}
|
||||
${csudo}mkdir -p ${install_main_dir}/cfg
|
||||
${csudo}mkdir -p ${install_main_dir}/bin
|
||||
${csudo}mkdir -p ${install_main_dir}/driver
|
||||
if [ $productName == "TDengine" ]; then
|
||||
${csudo}mkdir -p ${install_main_dir}/examples
|
||||
fi
|
||||
${csudo}mkdir -p ${install_main_dir}/include
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
${csudo}mkdir -p ${install_main_dir}/connector
|
||||
fi
|
||||
}
|
||||
|
||||
function install_bin() {
|
||||
# Remove links
|
||||
${csudo}rm -f ${bin_link_dir}/${clientName} || :
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo}rm -f ${bin_link_dir}/taosdemo || :
|
||||
fi
|
||||
${csudo}rm -f ${bin_link_dir}/${uninstallScript} || :
|
||||
${csudo}rm -f ${bin_link_dir}/set_core || :
|
||||
|
||||
${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/*
|
||||
|
||||
#Make link
|
||||
[ -x ${install_main_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} || :
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
|
||||
fi
|
||||
[ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/${uninstallScript} || :
|
||||
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
|
||||
}
|
||||
|
||||
function clean_lib() {
|
||||
sudo rm -f /usr/lib/libtaos.* || :
|
||||
sudo rm -rf ${lib_dir} || :
|
||||
}
|
||||
|
||||
function install_lib() {
|
||||
# Remove links
|
||||
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
||||
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
|
||||
#${csudo}rm -rf ${v15_java_app_dir} || :
|
||||
|
||||
${csudo}cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/*
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
|
||||
${csudo}ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
|
||||
|
||||
if [ -d "${lib64_link_dir}" ]; then
|
||||
${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
|
||||
${csudo}ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
|
||||
fi
|
||||
else
|
||||
${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib
|
||||
${csudo}ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib
|
||||
fi
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo}ldconfig
|
||||
else
|
||||
${csudo}update_dyld_shared_cache
|
||||
fi
|
||||
}
|
||||
|
||||
function install_header() {
|
||||
${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || :
|
||||
${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/*
|
||||
${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
|
||||
${csudo}ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h
|
||||
${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
|
||||
}
|
||||
|
||||
function install_jemalloc() {
|
||||
jemalloc_dir=${script_dir}/jemalloc
|
||||
|
||||
if [ -d ${jemalloc_dir} ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/bin
|
||||
|
||||
if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/bin/jeprof ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/include/jemalloc
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/lib
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib
|
||||
${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
|
||||
${csudo}/usr/bin/install -c -d /usr/local/lib
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
|
||||
fi
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/share/doc/jemalloc
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/share/man/man3
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3
|
||||
fi
|
||||
|
||||
if [ -d /etc/ld.so.conf.d ]; then
|
||||
echo "/usr/local/lib" | ${csudo}tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf"
|
||||
${csudo}ldconfig
|
||||
else
|
||||
echo "/etc/ld.so.conf.d not found!"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function install_config() {
|
||||
if [ ! -f ${cfg_install_dir}/${configFile} ]; then
|
||||
${csudo}mkdir -p ${cfg_install_dir}
|
||||
[ -f ${script_dir}/cfg/${configFile} ] && ${csudo}cp ${script_dir}/cfg/${configFile} ${cfg_install_dir}
|
||||
${csudo}chmod 644 ${cfg_install_dir}/*
|
||||
fi
|
||||
|
||||
${csudo}cp -f ${script_dir}/cfg/${configFile} ${install_main_dir}/cfg/${configFile}.org
|
||||
${csudo}ln -s ${cfg_install_dir}/${configFile} ${install_main_dir}/cfg
|
||||
}
|
||||
|
||||
|
||||
function install_log() {
|
||||
${csudo}rm -rf ${log_dir} || :
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir}
|
||||
else
|
||||
mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir}
|
||||
fi
|
||||
${csudo}ln -s ${log_dir} ${install_main_dir}/log
|
||||
}
|
||||
|
||||
function install_connector() {
|
||||
${csudo}cp -rf ${script_dir}/connector/ ${install_main_dir}/
|
||||
}
|
||||
|
||||
function install_examples() {
|
||||
if [ -d ${script_dir}/examples ]; then
|
||||
${csudo}cp -rf ${script_dir}/examples/* ${install_main_dir}/examples
|
||||
fi
|
||||
}
|
||||
|
||||
function update_TDengine() {
|
||||
# Start to update
|
||||
if [ ! -e ${tarName} ]; then
|
||||
echo "File ${tarName} does not exist"
|
||||
exit 1
|
||||
fi
|
||||
tar -zxf ${tarName}
|
||||
|
||||
echo -e "${GREEN}Start to update ${productName} client...${NC}"
|
||||
# Stop the client shell if running
|
||||
if pidof ${clientName} &> /dev/null; then
|
||||
kill_client
|
||||
sleep 1
|
||||
fi
|
||||
|
||||
install_main_path
|
||||
|
||||
install_log
|
||||
install_header
|
||||
install_lib
|
||||
install_jemalloc
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
install_connector
|
||||
fi
|
||||
install_examples
|
||||
install_bin
|
||||
install_config
|
||||
|
||||
echo
|
||||
echo -e "\033[44;32;1m${productName} client is updated successfully!${NC}"
|
||||
|
||||
rm -rf $(tar -tf ${tarName})
|
||||
}
|
||||
|
||||
function install_TDengine() {
|
||||
# Start to install
|
||||
if [ ! -e ${tarName} ]; then
|
||||
echo "File ${tarName} does not exist"
|
||||
exit 1
|
||||
fi
|
||||
tar -zxf ${tarName}
|
||||
|
||||
echo -e "${GREEN}Start to install ${productName} client...${NC}"
|
||||
|
||||
install_main_path
|
||||
install_log
|
||||
install_header
|
||||
install_lib
|
||||
install_jemalloc
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
install_connector
|
||||
fi
|
||||
install_examples
|
||||
install_bin
|
||||
install_config
|
||||
|
||||
echo
|
||||
echo -e "\033[44;32;1m${productName} client is installed successfully!${NC}"
|
||||
|
||||
rm -rf $(tar -tf ${tarName})
|
||||
}
|
||||
|
||||
|
||||
## ==============================Main program starts from here============================
|
||||
# Install or updata client and client
|
||||
# if server is already install, don't install client
|
||||
if [ -e ${bin_dir}/${serverName} ]; then
|
||||
echo -e "\033[44;32;1mThere are already installed ${productName} server, so don't need install client!${NC}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ -x ${bin_dir}/${clientName} ]; then
|
||||
update_flag=1
|
||||
update_TDengine
|
||||
else
|
||||
install_TDengine
|
||||
fi
|
||||
#!/bin/bash
|
||||
#
|
||||
# This file is used to install TDengine client on linux systems. The operating system
|
||||
# is required to use systemd to manage services at boot
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
# -----------------------Variables definition---------------------
|
||||
|
||||
dataDir="/var/lib/taos"
|
||||
logDir="/var/log/taos"
|
||||
productName="TDengine"
|
||||
installDir="/usr/local/taos"
|
||||
configDir="/etc/taos"
|
||||
serverName="taosd"
|
||||
clientName="taos"
|
||||
uninstallScript="rmtaos"
|
||||
configFile="taos.cfg"
|
||||
|
||||
osType=Linux
|
||||
pagMode=full
|
||||
verMode=edge
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
script_dir=$(dirname $(readlink -f "$0"))
|
||||
# Dynamic directory
|
||||
data_dir=${dataDir}
|
||||
log_dir=${logDir}
|
||||
else
|
||||
script_dir=`dirname $0`
|
||||
cd ${script_dir}
|
||||
script_dir="$(pwd)"
|
||||
data_dir=${dataDir}
|
||||
log_dir=~/${productName}/log
|
||||
fi
|
||||
|
||||
log_link_dir="${installDir}/log"
|
||||
|
||||
cfg_install_dir=${configDir}
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
bin_link_dir="/usr/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
lib64_link_dir="/usr/lib64"
|
||||
inc_link_dir="/usr/include"
|
||||
else
|
||||
bin_link_dir="/usr/local/bin"
|
||||
lib_link_dir="/usr/local/lib"
|
||||
inc_link_dir="/usr/local/include"
|
||||
fi
|
||||
|
||||
#install main path
|
||||
install_main_dir="${installDir}"
|
||||
|
||||
# old bin dir
|
||||
bin_dir="${installDir}/bin"
|
||||
|
||||
# Color setting
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
GREEN_DARK='\033[0;32m'
|
||||
GREEN_UNDERLINE='\033[4;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
update_flag=0
|
||||
|
||||
function kill_client() {
|
||||
pid=$(ps -ef | grep "${clientName}" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function install_main_path() {
|
||||
#create install main dir and all sub dir
|
||||
${csudo}rm -rf ${install_main_dir} || :
|
||||
${csudo}mkdir -p ${install_main_dir}
|
||||
${csudo}mkdir -p ${install_main_dir}/cfg
|
||||
${csudo}mkdir -p ${install_main_dir}/bin
|
||||
${csudo}mkdir -p ${install_main_dir}/driver
|
||||
if [ $productName == "TDengine" ]; then
|
||||
${csudo}mkdir -p ${install_main_dir}/examples
|
||||
fi
|
||||
${csudo}mkdir -p ${install_main_dir}/include
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
${csudo}mkdir -p ${install_main_dir}/connector
|
||||
fi
|
||||
}
|
||||
|
||||
function install_bin() {
|
||||
# Remove links
|
||||
${csudo}rm -f ${bin_link_dir}/${clientName} || :
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo}rm -f ${bin_link_dir}/taosdemo || :
|
||||
fi
|
||||
${csudo}rm -f ${bin_link_dir}/${uninstallScript} || :
|
||||
${csudo}rm -f ${bin_link_dir}/set_core || :
|
||||
|
||||
${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/*
|
||||
|
||||
#Make link
|
||||
[ -x ${install_main_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} || :
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
|
||||
fi
|
||||
[ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/${uninstallScript} || :
|
||||
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
|
||||
}
|
||||
|
||||
function clean_lib() {
|
||||
sudo rm -f /usr/lib/libtaos.* || :
|
||||
sudo rm -rf ${lib_dir} || :
|
||||
}
|
||||
|
||||
function install_lib() {
|
||||
# Remove links
|
||||
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
||||
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
|
||||
#${csudo}rm -rf ${v15_java_app_dir} || :
|
||||
|
||||
${csudo}cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/*
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
|
||||
${csudo}ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
|
||||
|
||||
if [ -d "${lib64_link_dir}" ]; then
|
||||
${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
|
||||
${csudo}ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
|
||||
fi
|
||||
else
|
||||
${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib
|
||||
${csudo}ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib
|
||||
fi
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo}ldconfig
|
||||
else
|
||||
${csudo}update_dyld_shared_cache
|
||||
fi
|
||||
}
|
||||
|
||||
function install_header() {
|
||||
${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || :
|
||||
${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/*
|
||||
${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
|
||||
${csudo}ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h
|
||||
${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
|
||||
}
|
||||
|
||||
function install_jemalloc() {
|
||||
jemalloc_dir=${script_dir}/jemalloc
|
||||
|
||||
if [ -d ${jemalloc_dir} ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/bin
|
||||
|
||||
if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/bin/jeprof ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/include/jemalloc
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/lib
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib
|
||||
${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
|
||||
${csudo}/usr/bin/install -c -d /usr/local/lib
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
|
||||
fi
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/share/doc/jemalloc
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/share/man/man3
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3
|
||||
fi
|
||||
|
||||
if [ -d /etc/ld.so.conf.d ]; then
|
||||
echo "/usr/local/lib" | ${csudo}tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf"
|
||||
${csudo}ldconfig
|
||||
else
|
||||
echo "/etc/ld.so.conf.d not found!"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function install_config() {
|
||||
if [ ! -f ${cfg_install_dir}/${configFile} ]; then
|
||||
${csudo}mkdir -p ${cfg_install_dir}
|
||||
[ -f ${script_dir}/cfg/${configFile} ] && ${csudo}cp ${script_dir}/cfg/${configFile} ${cfg_install_dir}
|
||||
${csudo}chmod 644 ${cfg_install_dir}/*
|
||||
fi
|
||||
|
||||
${csudo}cp -f ${script_dir}/cfg/${configFile} ${install_main_dir}/cfg/${configFile}.org
|
||||
${csudo}ln -s ${cfg_install_dir}/${configFile} ${install_main_dir}/cfg
|
||||
}
|
||||
|
||||
|
||||
function install_log() {
|
||||
${csudo}rm -rf ${log_dir} || :
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir}
|
||||
else
|
||||
mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir}
|
||||
fi
|
||||
${csudo}ln -s ${log_dir} ${install_main_dir}/log
|
||||
}
|
||||
|
||||
function install_connector() {
|
||||
${csudo}cp -rf ${script_dir}/connector/ ${install_main_dir}/
|
||||
}
|
||||
|
||||
function install_examples() {
|
||||
if [ -d ${script_dir}/examples ]; then
|
||||
${csudo}cp -rf ${script_dir}/examples/* ${install_main_dir}/examples
|
||||
fi
|
||||
}
|
||||
|
||||
function update_TDengine() {
|
||||
# Start to update
|
||||
echo -e "${GREEN}Start to update ${productName} client...${NC}"
|
||||
# Stop the client shell if running
|
||||
if pidof ${clientName} &> /dev/null; then
|
||||
kill_client
|
||||
sleep 1
|
||||
fi
|
||||
|
||||
install_main_path
|
||||
|
||||
install_log
|
||||
install_header
|
||||
install_lib
|
||||
install_jemalloc
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
install_connector
|
||||
fi
|
||||
install_examples
|
||||
install_bin
|
||||
install_config
|
||||
|
||||
echo
|
||||
echo -e "\033[44;32;1m${productName} client is updated successfully!${NC}"
|
||||
}
|
||||
|
||||
function install_TDengine() {
|
||||
# Start to install
|
||||
echo -e "${GREEN}Start to install ${productName} client...${NC}"
|
||||
|
||||
install_main_path
|
||||
install_log
|
||||
install_header
|
||||
install_lib
|
||||
install_jemalloc
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
install_connector
|
||||
fi
|
||||
install_examples
|
||||
install_bin
|
||||
install_config
|
||||
|
||||
echo
|
||||
echo -e "\033[44;32;1m${productName} client is installed successfully!${NC}"
|
||||
|
||||
rm -rf $(tar -tf ${tarName})
|
||||
}
|
||||
|
||||
|
||||
## ==============================Main program starts from here============================
|
||||
# Install or updata client and client
|
||||
# if server is already install, don't install client
|
||||
if [ -e ${bin_dir}/${serverName} ]; then
|
||||
echo -e "\033[44;32;1mThere are already installed ${productName} server, so don't need install client!${NC}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ -x ${bin_dir}/${clientName} ]; then
|
||||
update_flag=1
|
||||
update_TDengine
|
||||
else
|
||||
install_TDengine
|
||||
fi
|
||||
|
|
|
@ -1,71 +1,71 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Generate arbitrator's tar.gz setup package for all os system
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
curr_dir=$(pwd)
|
||||
compile_dir=$1
|
||||
version=$2
|
||||
build_time=$3
|
||||
cpuType=$4
|
||||
osType=$5
|
||||
verMode=$6
|
||||
verType=$7
|
||||
pagMode=$8
|
||||
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -f ${script_dir}/../..)"
|
||||
|
||||
productName="TDengine"
|
||||
|
||||
# create compressed install file.
|
||||
build_dir="${compile_dir}/build"
|
||||
code_dir="${top_dir}/src"
|
||||
release_dir="${top_dir}/release"
|
||||
|
||||
#package_name='linux'
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
install_dir="${release_dir}/${productName}-enterprise-arbitrator-${version}"
|
||||
else
|
||||
install_dir="${release_dir}/${productName}-arbitrator-${version}"
|
||||
fi
|
||||
|
||||
# Directories and files.
|
||||
bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi.sh"
|
||||
install_files="${script_dir}/install_arbi.sh"
|
||||
|
||||
#header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h"
|
||||
init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord
|
||||
init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord
|
||||
|
||||
# make directories.
|
||||
mkdir -p ${install_dir} && cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install_arbi.sh || :
|
||||
#mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc || :
|
||||
mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
|
||||
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
|
||||
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
|
||||
|
||||
cd ${release_dir}
|
||||
|
||||
# install_dir has been distinguishes cluster from edege, so comments this code
|
||||
pkg_name=${install_dir}-${osType}-${cpuType}
|
||||
|
||||
if [[ "$verType" == "beta" ]] || [[ "$verType" == "preRelease" ]]; then
|
||||
pkg_name=${install_dir}-${verType}-${osType}-${cpuType}
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
pkg_name=${pkg_name}
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
|
||||
exitcode=$?
|
||||
if [ "$exitcode" != "0" ]; then
|
||||
echo "tar ${pkg_name}.tar.gz error !!!"
|
||||
exit $exitcode
|
||||
fi
|
||||
|
||||
cd ${curr_dir}
|
||||
#!/bin/bash
|
||||
#
|
||||
# Generate arbitrator's tar.gz setup package for all os system
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
curr_dir=$(pwd)
|
||||
compile_dir=$1
|
||||
version=$2
|
||||
build_time=$3
|
||||
cpuType=$4
|
||||
osType=$5
|
||||
verMode=$6
|
||||
verType=$7
|
||||
pagMode=$8
|
||||
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -f ${script_dir}/../..)"
|
||||
|
||||
productName="TDengine"
|
||||
|
||||
# create compressed install file.
|
||||
build_dir="${compile_dir}/build"
|
||||
code_dir="${top_dir}"
|
||||
release_dir="${top_dir}/release"
|
||||
|
||||
#package_name='linux'
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
install_dir="${release_dir}/${productName}-enterprise-arbitrator-${version}"
|
||||
else
|
||||
install_dir="${release_dir}/${productName}-arbitrator-${version}"
|
||||
fi
|
||||
|
||||
# Directories and files.
|
||||
bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi.sh"
|
||||
install_files="${script_dir}/install_arbi.sh"
|
||||
|
||||
#header_files="${code_dir}/include/client/taos.h ${code_dir}/include/common/taosdef.h ${code_dir}/include/util/taoserror.h"
|
||||
init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord
|
||||
init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord
|
||||
|
||||
# make directories.
|
||||
mkdir -p ${install_dir} && cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install_arbi.sh || :
|
||||
#mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc || :
|
||||
mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
|
||||
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
|
||||
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
|
||||
|
||||
cd ${release_dir}
|
||||
|
||||
# install_dir has been distinguishes cluster from edege, so comments this code
|
||||
pkg_name=${install_dir}-${osType}-${cpuType}
|
||||
|
||||
if [[ "$verType" == "beta" ]] || [[ "$verType" == "preRelease" ]]; then
|
||||
pkg_name=${install_dir}-${verType}-${osType}-${cpuType}
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
pkg_name=${pkg_name}
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
|
||||
exitcode=$?
|
||||
if [ "$exitcode" != "0" ]; then
|
||||
echo "tar ${pkg_name}.tar.gz error !!!"
|
||||
exit $exitcode
|
||||
fi
|
||||
|
||||
cd ${curr_dir}
|
||||
|
|
|
@ -1,246 +1,246 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Generate tar.gz package for linux client in all os system
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
curr_dir=$(pwd)
|
||||
compile_dir=$1
|
||||
version=$2
|
||||
build_time=$3
|
||||
cpuType=$4
|
||||
osType=$5
|
||||
verMode=$6
|
||||
verType=$7
|
||||
pagMode=$8
|
||||
dbName=$9
|
||||
|
||||
productName="TDengine"
|
||||
clientName="taos"
|
||||
configFile="taos.cfg"
|
||||
tarName="taos.tar.gz"
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -f ${script_dir}/../..)"
|
||||
else
|
||||
script_dir=$(dirname $0)
|
||||
cd ${script_dir}
|
||||
script_dir="$(pwd)"
|
||||
top_dir=${script_dir}/../..
|
||||
fi
|
||||
|
||||
# create compressed install file.
|
||||
build_dir="${compile_dir}/build"
|
||||
code_dir="${top_dir}/src"
|
||||
release_dir="${top_dir}/release"
|
||||
|
||||
#package_name='linux'
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
install_dir="${release_dir}/${productName}-enterprise-client-${version}"
|
||||
else
|
||||
install_dir="${release_dir}/${productName}-client-${version}"
|
||||
fi
|
||||
|
||||
# Directories and files.
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
if [ "$pagMode" == "lite" ]; then
|
||||
strip ${build_dir}/bin/${clientName}
|
||||
bin_files="${build_dir}/bin/${clientName} \
|
||||
${script_dir}/remove_client.sh"
|
||||
else
|
||||
bin_files="${build_dir}/bin/${clientName} \
|
||||
${script_dir}/remove_client.sh \
|
||||
${script_dir}/set_core.sh \
|
||||
${script_dir}/get_client.sh"
|
||||
fi
|
||||
lib_files="${build_dir}/lib/libtaos.so.${version}"
|
||||
else
|
||||
bin_files="${build_dir}/bin/${clientName} ${script_dir}/remove_client.sh"
|
||||
lib_files="${build_dir}/lib/libtaos.${version}.dylib"
|
||||
fi
|
||||
|
||||
header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h"
|
||||
if [ "$dbName" != "taos" ]; then
|
||||
cfg_dir="${top_dir}/../enterprise/packaging/cfg"
|
||||
else
|
||||
cfg_dir="${top_dir}/packaging/cfg"
|
||||
fi
|
||||
|
||||
install_files="${script_dir}/install_client.sh"
|
||||
|
||||
# make directories.
|
||||
mkdir -p ${install_dir}
|
||||
mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
|
||||
mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/${configFile} ${install_dir}/cfg/${configFile}
|
||||
mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/*
|
||||
|
||||
if [ -f ${build_dir}/bin/jemalloc-config ]; then
|
||||
mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
|
||||
cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin
|
||||
if [ -f ${build_dir}/bin/jemalloc.sh ]; then
|
||||
cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin
|
||||
fi
|
||||
if [ -f ${build_dir}/bin/jeprof ]; then
|
||||
cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin
|
||||
fi
|
||||
if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then
|
||||
cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then
|
||||
cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib
|
||||
ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/libjemalloc.a ]; then
|
||||
cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
|
||||
cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then
|
||||
cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig
|
||||
fi
|
||||
if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then
|
||||
cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc
|
||||
fi
|
||||
if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then
|
||||
cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3
|
||||
fi
|
||||
fi
|
||||
|
||||
cd ${install_dir}
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
tar -zcv -f ${tarName} * --remove-files || :
|
||||
else
|
||||
tar -zcv -f ${tarName} * || :
|
||||
mv ${tarName} ..
|
||||
rm -rf ./*
|
||||
mv ../${tarName} .
|
||||
fi
|
||||
|
||||
cd ${curr_dir}
|
||||
cp ${install_files} ${install_dir}
|
||||
if [ "$osType" == "Darwin" ]; then
|
||||
sed 's/osType=Linux/osType=Darwin/g' ${install_dir}/install_client.sh >>install_client_temp.sh
|
||||
mv install_client_temp.sh ${install_dir}/install_client.sh
|
||||
fi
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install_client.sh >>install_client_temp.sh
|
||||
mv install_client_temp.sh ${install_dir}/install_client.sh
|
||||
fi
|
||||
|
||||
if [ "$pagMode" == "lite" ]; then
|
||||
sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install_client.sh >>install_client_temp.sh
|
||||
mv install_client_temp.sh ${install_dir}/install_client.sh
|
||||
fi
|
||||
chmod a+x ${install_dir}/install_client.sh
|
||||
|
||||
if [[ $productName == "TDengine" ]]; then
|
||||
# Copy example code
|
||||
mkdir -p ${install_dir}/examples
|
||||
examples_dir="${top_dir}/examples"
|
||||
cp -r ${examples_dir}/c ${install_dir}/examples
|
||||
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
||||
cp -r ${examples_dir}/JDBC ${install_dir}/examples
|
||||
cp -r ${examples_dir}/matlab ${install_dir}/examples
|
||||
cp -r ${examples_dir}/python ${install_dir}/examples
|
||||
cp -r ${examples_dir}/R ${install_dir}/examples
|
||||
cp -r ${examples_dir}/go ${install_dir}/examples
|
||||
cp -r ${examples_dir}/nodejs ${install_dir}/examples
|
||||
cp -r ${examples_dir}/C# ${install_dir}/examples
|
||||
mkdir -p ${install_dir}/examples/taosbenchmark-json && cp ${examples_dir}/../src/kit/taos-tools/example/* ${install_dir}/examples/taosbenchmark-json
|
||||
fi
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
# Copy connector
|
||||
connector_dir="${code_dir}/connector"
|
||||
mkdir -p ${install_dir}/connector
|
||||
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
cp ${build_dir}/lib/*.jar ${install_dir}/connector || :
|
||||
fi
|
||||
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
|
||||
cp -r ${connector_dir}/go ${install_dir}/connector
|
||||
else
|
||||
echo "WARNING: go connector not found, please check if want to use it!"
|
||||
fi
|
||||
git clone --depth 1 https://github.com/taosdata/taos-connector-python ${install_dir}/connector/python
|
||||
rm -rf ${install_dir}/connector/python/.git ||:
|
||||
# cp -r ${connector_dir}/python ${install_dir}/connector
|
||||
git clone --depth 1 https://github.com/taosdata/taos-connector-node ${install_dir}/connector/nodejs
|
||||
rm -rf ${install_dir}/connector/nodejs/.git ||:
|
||||
|
||||
git clone --depth 1 https://github.com/taosdata/taos-connector-dotnet ${install_dir}/connector/dotnet
|
||||
rm -rf ${install_dir}/connector/dotnet/.git ||:
|
||||
# cp -r ${connector_dir}/nodejs ${install_dir}/connector
|
||||
git clone --depth 1 https://github.com/taosdata/libtaos-rs ${install_dir}/connector/rust
|
||||
rm -rf ${install_dir}/connector/rust/.git ||:
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
# Copy driver
|
||||
mkdir -p ${install_dir}/driver
|
||||
cp ${lib_files} ${install_dir}/driver
|
||||
|
||||
# Copy connector
|
||||
connector_dir="${code_dir}/connector"
|
||||
mkdir -p ${install_dir}/connector
|
||||
|
||||
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
cp ${build_dir}/lib/*.jar ${install_dir}/connector || :
|
||||
fi
|
||||
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
|
||||
cp -r ${connector_dir}/go ${install_dir}/connector
|
||||
else
|
||||
echo "WARNING: go connector not found, please check if want to use it!"
|
||||
fi
|
||||
cp -r ${connector_dir}/python ${install_dir}/connector
|
||||
cp -r ${connector_dir}/nodejs ${install_dir}/connector
|
||||
fi
|
||||
# Copy release note
|
||||
# cp ${script_dir}/release_note ${install_dir}
|
||||
|
||||
# exit 1
|
||||
|
||||
cd ${release_dir}
|
||||
|
||||
# install_dir has been distinguishes cluster from edege, so comments this code
|
||||
pkg_name=${install_dir}-${osType}-${cpuType}
|
||||
|
||||
# if [ "$verMode" == "cluster" ]; then
|
||||
# pkg_name=${install_dir}-${osType}-${cpuType}
|
||||
# elif [ "$verMode" == "edge" ]; then
|
||||
# pkg_name=${install_dir}-${osType}-${cpuType}
|
||||
# else
|
||||
# echo "unknow verMode, nor cluster or edge"
|
||||
# exit 1
|
||||
# fi
|
||||
|
||||
if [[ "$verType" == "beta" ]] || [[ "$verType" == "preRelease" ]]; then
|
||||
pkg_name=${install_dir}-${verType}-${osType}-${cpuType}
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
pkg_name=${pkg_name}
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$pagMode" == "lite" ]; then
|
||||
pkg_name=${pkg_name}-Lite
|
||||
fi
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
|
||||
else
|
||||
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) || :
|
||||
mv "$(basename ${pkg_name}).tar.gz" ..
|
||||
rm -rf ./*
|
||||
mv ../"$(basename ${pkg_name}).tar.gz" .
|
||||
fi
|
||||
|
||||
cd ${curr_dir}
|
||||
#!/bin/bash
|
||||
#
|
||||
# Generate tar.gz package for linux client in all os system
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
curr_dir=$(pwd)
|
||||
compile_dir=$1
|
||||
version=$2
|
||||
build_time=$3
|
||||
cpuType=$4
|
||||
osType=$5
|
||||
verMode=$6
|
||||
verType=$7
|
||||
pagMode=$8
|
||||
dbName=$9
|
||||
|
||||
productName="TDengine"
|
||||
clientName="taos"
|
||||
configFile="taos.cfg"
|
||||
tarName="taos.tar.gz"
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -f ${script_dir}/../..)"
|
||||
else
|
||||
script_dir=$(dirname $0)
|
||||
cd ${script_dir}
|
||||
script_dir="$(pwd)"
|
||||
top_dir=${script_dir}/../..
|
||||
fi
|
||||
|
||||
# create compressed install file.
|
||||
build_dir="${compile_dir}/build"
|
||||
code_dir="${top_dir}"
|
||||
release_dir="${top_dir}/release"
|
||||
|
||||
#package_name='linux'
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
install_dir="${release_dir}/${productName}-enterprise-client-${version}"
|
||||
else
|
||||
install_dir="${release_dir}/${productName}-client-${version}"
|
||||
fi
|
||||
|
||||
# Directories and files.
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
if [ "$pagMode" == "lite" ]; then
|
||||
strip ${build_dir}/bin/${clientName}
|
||||
bin_files="${build_dir}/bin/${clientName} \
|
||||
${script_dir}/remove_client.sh"
|
||||
else
|
||||
bin_files="${build_dir}/bin/${clientName} \
|
||||
${script_dir}/remove_client.sh \
|
||||
${script_dir}/set_core.sh \
|
||||
${script_dir}/get_client.sh"
|
||||
fi
|
||||
lib_files="${build_dir}/lib/libtaos.so.${version}"
|
||||
else
|
||||
bin_files="${build_dir}/bin/${clientName} ${script_dir}/remove_client.sh"
|
||||
lib_files="${build_dir}/lib/libtaos.${version}.dylib"
|
||||
fi
|
||||
|
||||
header_files="${code_dir}/include/client/taos.h ${code_dir}/include/common/taosdef.h ${code_dir}/include/util/taoserror.h"
|
||||
if [ "$dbName" != "taos" ]; then
|
||||
cfg_dir="${top_dir}/../enterprise/packaging/cfg"
|
||||
else
|
||||
cfg_dir="${top_dir}/packaging/cfg"
|
||||
fi
|
||||
|
||||
install_files="${script_dir}/install_client.sh"
|
||||
|
||||
# make directories.
|
||||
mkdir -p ${install_dir}
|
||||
mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
|
||||
mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/${configFile} ${install_dir}/cfg/${configFile}
|
||||
mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/*
|
||||
|
||||
if [ -f ${build_dir}/bin/jemalloc-config ]; then
|
||||
mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
|
||||
cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin
|
||||
if [ -f ${build_dir}/bin/jemalloc.sh ]; then
|
||||
cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin
|
||||
fi
|
||||
if [ -f ${build_dir}/bin/jeprof ]; then
|
||||
cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin
|
||||
fi
|
||||
if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then
|
||||
cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then
|
||||
cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib
|
||||
ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/libjemalloc.a ]; then
|
||||
cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
|
||||
cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then
|
||||
cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig
|
||||
fi
|
||||
if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then
|
||||
cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc
|
||||
fi
|
||||
if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then
|
||||
cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3
|
||||
fi
|
||||
fi
|
||||
|
||||
cd ${install_dir}
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
tar -zcv -f ${tarName} * --remove-files || :
|
||||
else
|
||||
tar -zcv -f ${tarName} * || :
|
||||
mv ${tarName} ..
|
||||
rm -rf ./*
|
||||
mv ../${tarName} .
|
||||
fi
|
||||
|
||||
cd ${curr_dir}
|
||||
cp ${install_files} ${install_dir}
|
||||
if [ "$osType" == "Darwin" ]; then
|
||||
sed 's/osType=Linux/osType=Darwin/g' ${install_dir}/install_client.sh >>install_client_temp.sh
|
||||
mv install_client_temp.sh ${install_dir}/install_client.sh
|
||||
fi
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install_client.sh >>install_client_temp.sh
|
||||
mv install_client_temp.sh ${install_dir}/install_client.sh
|
||||
fi
|
||||
|
||||
if [ "$pagMode" == "lite" ]; then
|
||||
sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install_client.sh >>install_client_temp.sh
|
||||
mv install_client_temp.sh ${install_dir}/install_client.sh
|
||||
fi
|
||||
chmod a+x ${install_dir}/install_client.sh
|
||||
|
||||
if [[ $productName == "TDengine" ]]; then
|
||||
# Copy example code
|
||||
mkdir -p ${install_dir}/examples
|
||||
examples_dir="${top_dir}/examples"
|
||||
cp -r ${examples_dir}/c ${install_dir}/examples
|
||||
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
||||
cp -r ${examples_dir}/JDBC ${install_dir}/examples
|
||||
cp -r ${examples_dir}/matlab ${install_dir}/examples
|
||||
cp -r ${examples_dir}/python ${install_dir}/examples
|
||||
cp -r ${examples_dir}/R ${install_dir}/examples
|
||||
cp -r ${examples_dir}/go ${install_dir}/examples
|
||||
cp -r ${examples_dir}/nodejs ${install_dir}/examples
|
||||
cp -r ${examples_dir}/C# ${install_dir}/examples
|
||||
mkdir -p ${install_dir}/examples/taosbenchmark-json && cp ${examples_dir}/../tools/taos-tools/example/* ${install_dir}/examples/taosbenchmark-json
|
||||
fi
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
# Copy connector
|
||||
connector_dir="${code_dir}/connector"
|
||||
mkdir -p ${install_dir}/connector
|
||||
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
cp ${build_dir}/lib/*.jar ${install_dir}/connector || :
|
||||
fi
|
||||
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
|
||||
cp -r ${connector_dir}/go ${install_dir}/connector
|
||||
else
|
||||
echo "WARNING: go connector not found, please check if want to use it!"
|
||||
fi
|
||||
git clone --depth 1 https://github.com/taosdata/taos-connector-python ${install_dir}/connector/python
|
||||
rm -rf ${install_dir}/connector/python/.git ||:
|
||||
# cp -r ${connector_dir}/python ${install_dir}/connector
|
||||
git clone --depth 1 https://github.com/taosdata/taos-connector-node ${install_dir}/connector/nodejs
|
||||
rm -rf ${install_dir}/connector/nodejs/.git ||:
|
||||
|
||||
git clone --depth 1 https://github.com/taosdata/taos-connector-dotnet ${install_dir}/connector/dotnet
|
||||
rm -rf ${install_dir}/connector/dotnet/.git ||:
|
||||
# cp -r ${connector_dir}/nodejs ${install_dir}/connector
|
||||
git clone --depth 1 https://github.com/taosdata/libtaos-rs ${install_dir}/connector/rust
|
||||
rm -rf ${install_dir}/connector/rust/.git ||:
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
# Copy driver
|
||||
mkdir -p ${install_dir}/driver
|
||||
cp ${lib_files} ${install_dir}/driver
|
||||
|
||||
# Copy connector
|
||||
connector_dir="${code_dir}/connector"
|
||||
mkdir -p ${install_dir}/connector
|
||||
|
||||
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
cp ${build_dir}/lib/*.jar ${install_dir}/connector || :
|
||||
fi
|
||||
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
|
||||
cp -r ${connector_dir}/go ${install_dir}/connector
|
||||
else
|
||||
echo "WARNING: go connector not found, please check if want to use it!"
|
||||
fi
|
||||
cp -r ${connector_dir}/python ${install_dir}/connector || :
|
||||
cp -r ${connector_dir}/nodejs ${install_dir}/connector || :
|
||||
fi
|
||||
# Copy release note
|
||||
# cp ${script_dir}/release_note ${install_dir}
|
||||
|
||||
# exit 1
|
||||
|
||||
cd ${release_dir}
|
||||
|
||||
# install_dir has been distinguishes cluster from edege, so comments this code
|
||||
pkg_name=${install_dir}-${osType}-${cpuType}
|
||||
|
||||
# if [ "$verMode" == "cluster" ]; then
|
||||
# pkg_name=${install_dir}-${osType}-${cpuType}
|
||||
# elif [ "$verMode" == "edge" ]; then
|
||||
# pkg_name=${install_dir}-${osType}-${cpuType}
|
||||
# else
|
||||
# echo "unknow verMode, nor cluster or edge"
|
||||
# exit 1
|
||||
# fi
|
||||
|
||||
if [[ "$verType" == "beta" ]] || [[ "$verType" == "preRelease" ]]; then
|
||||
pkg_name=${install_dir}-${verType}-${osType}-${cpuType}
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
pkg_name=${pkg_name}
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$pagMode" == "lite" ]; then
|
||||
pkg_name=${pkg_name}-Lite
|
||||
fi
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
|
||||
else
|
||||
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) || :
|
||||
mv "$(basename ${pkg_name}).tar.gz" ..
|
||||
rm -rf ./*
|
||||
mv ../"$(basename ${pkg_name}).tar.gz" .
|
||||
fi
|
||||
|
||||
cd ${curr_dir}
|
||||
|
|
|
@ -1,378 +1,378 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Generate tar.gz package for all os system
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
curr_dir=$(pwd)
|
||||
compile_dir=$1
|
||||
version=$2
|
||||
build_time=$3
|
||||
cpuType=$4
|
||||
osType=$5
|
||||
verMode=$6
|
||||
verType=$7
|
||||
pagMode=$8
|
||||
versionComp=$9
|
||||
dbName=${10}
|
||||
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -f ${script_dir}/../..)"
|
||||
|
||||
productName="TDengine"
|
||||
serverName="taosd"
|
||||
clientName="taos"
|
||||
configFile="taos.cfg"
|
||||
tarName="taos.tar.gz"
|
||||
dumpName="taosdump"
|
||||
benchmarkName="taosBenchmark"
|
||||
toolsName="taostools"
|
||||
adapterName="taosadapter"
|
||||
defaultPasswd="taosdata"
|
||||
|
||||
# create compressed install file.
|
||||
build_dir="${compile_dir}/build"
|
||||
code_dir="${top_dir}/src"
|
||||
release_dir="${top_dir}/release"
|
||||
|
||||
#package_name='linux'
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
install_dir="${release_dir}/${productName}-enterprise-server-${version}"
|
||||
else
|
||||
install_dir="${release_dir}/${productName}-server-${version}"
|
||||
fi
|
||||
|
||||
if [ -d ${top_dir}/src/kit/taos-tools/packaging/deb ]; then
|
||||
cd ${top_dir}/src/kit/taos-tools/packaging/deb
|
||||
[ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0"
|
||||
|
||||
taostools_ver=$(git describe --tags | sed -e 's/ver-//g' | awk -F '-' '{print $1}')
|
||||
taostools_install_dir="${release_dir}/${clientName}Tools-${taostools_ver}"
|
||||
|
||||
cd ${curr_dir}
|
||||
else
|
||||
taostools_install_dir="${release_dir}/${clientName}Tools-${version}"
|
||||
fi
|
||||
|
||||
# Directories and files
|
||||
if [ "$pagMode" == "lite" ]; then
|
||||
strip ${build_dir}/bin/${serverName}
|
||||
strip ${build_dir}/bin/${clientName}
|
||||
# lite version doesn't include taosadapter, which will lead to no restful interface
|
||||
bin_files="${build_dir}/bin/${serverName} ${build_dir}/bin/${clientName} ${script_dir}/remove.sh ${script_dir}/startPre.sh ${build_dir}/bin/taosBenchmark"
|
||||
taostools_bin_files=""
|
||||
else
|
||||
|
||||
wget https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh -O ${build_dir}/bin/TDinsight.sh \
|
||||
&& echo "TDinsight.sh downloaded!" \
|
||||
|| echo "failed to download TDinsight.sh"
|
||||
# download TDinsight caches
|
||||
orig_pwd=$(pwd)
|
||||
tdinsight_caches=""
|
||||
cd ${build_dir}/bin/ && \
|
||||
chmod +x TDinsight.sh
|
||||
tdinsight_caches=$(./TDinsight.sh --download-only | xargs -i printf "${build_dir}/bin/{} ")
|
||||
cd $orig_pwd
|
||||
echo "TDinsight caches: $tdinsight_caches"
|
||||
|
||||
taostools_bin_files=" ${build_dir}/bin/taosdump \
|
||||
${build_dir}/bin/taosBenchmark \
|
||||
${build_dir}/bin/TDinsight.sh \
|
||||
$tdinsight_caches"
|
||||
|
||||
bin_files="${build_dir}/bin/${serverName} \
|
||||
${build_dir}/bin/${clientName} \
|
||||
${taostools_bin_files} \
|
||||
${build_dir}/bin/taosadapter \
|
||||
${build_dir}/bin/tarbitrator\
|
||||
${script_dir}/remove.sh \
|
||||
${script_dir}/set_core.sh \
|
||||
${script_dir}/run_taosd_and_taosadapter.sh \
|
||||
${script_dir}/startPre.sh \
|
||||
${script_dir}/taosd-dump-cfg.gdb"
|
||||
fi
|
||||
|
||||
lib_files="${build_dir}/lib/libtaos.so.${version}"
|
||||
header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h"
|
||||
|
||||
if [ "$dbName" != "taos" ]; then
|
||||
cfg_dir="${top_dir}/../enterprise/packaging/cfg"
|
||||
else
|
||||
cfg_dir="${top_dir}/packaging/cfg"
|
||||
fi
|
||||
|
||||
install_files="${script_dir}/install.sh"
|
||||
nginx_dir="${code_dir}/../../enterprise/src/plugins/web"
|
||||
|
||||
init_file_deb=${script_dir}/../deb/taosd
|
||||
init_file_rpm=${script_dir}/../rpm/taosd
|
||||
init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord
|
||||
init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord
|
||||
|
||||
# make directories.
|
||||
mkdir -p ${install_dir}
|
||||
mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
|
||||
mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/${configFile} ${install_dir}/cfg/${configFile}
|
||||
|
||||
if [ -f "${compile_dir}/test/cfg/taosadapter.toml" ]; then
|
||||
cp ${compile_dir}/test/cfg/taosadapter.toml ${install_dir}/cfg || :
|
||||
fi
|
||||
|
||||
if [ -f "${compile_dir}/test/cfg/taosadapter.service" ]; then
|
||||
cp ${compile_dir}/test/cfg/taosadapter.service ${install_dir}/cfg || :
|
||||
fi
|
||||
|
||||
if [ -f "${cfg_dir}/${serverName}.service" ]; then
|
||||
cp ${cfg_dir}/${serverName}.service ${install_dir}/cfg || :
|
||||
fi
|
||||
|
||||
if [ -f "${top_dir}/packaging/cfg/tarbitratord.service" ]; then
|
||||
cp ${top_dir}/packaging/cfg/tarbitratord.service ${install_dir}/cfg || :
|
||||
fi
|
||||
|
||||
if [ -f "${top_dir}/packaging/cfg/nginxd.service" ]; then
|
||||
cp ${top_dir}/packaging/cfg/nginxd.service ${install_dir}/cfg || :
|
||||
fi
|
||||
|
||||
mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
|
||||
mkdir -p ${install_dir}/init.d && cp ${init_file_deb} ${install_dir}/init.d/${serverName}.deb
|
||||
mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/${serverName}.rpm
|
||||
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
|
||||
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
|
||||
|
||||
if [ $adapterName != "taosadapter" ]; then
|
||||
mv ${install_dir}/cfg/taosadapter.toml ${install_dir}/cfg/$adapterName.toml
|
||||
sed -i "s/path = \"\/var\/log\/taos\"/path = \"\/var\/log\/${productName}\"/g" ${install_dir}/cfg/$adapterName.toml
|
||||
sed -i "s/password = \"taosdata\"/password = \"${defaultPasswd}\"/g" ${install_dir}/cfg/$adapterName.toml
|
||||
|
||||
mv ${install_dir}/cfg/taosadapter.service ${install_dir}/cfg/$adapterName.service
|
||||
sed -i "s/TDengine/${productName}/g" ${install_dir}/cfg/$adapterName.service
|
||||
sed -i "s/taosAdapter/${adapterName}/g" ${install_dir}/cfg/$adapterName.service
|
||||
sed -i "s/taosadapter/${adapterName}/g" ${install_dir}/cfg/$adapterName.service
|
||||
|
||||
mv ${install_dir}/bin/taosadapter ${install_dir}/bin/${adapterName}
|
||||
mv ${install_dir}/bin/run_taosd_and_taosadapter.sh ${install_dir}/bin/run_${serverName}_and_${adapterName}.sh
|
||||
mv ${install_dir}/bin/taosd-dump-cfg.gdb ${install_dir}/bin/${serverName}-dump-cfg.gdb
|
||||
fi
|
||||
|
||||
if [ -n "${taostools_bin_files}" ]; then
|
||||
mkdir -p ${taostools_install_dir} || echo -e "failed to create ${taostools_install_dir}"
|
||||
mkdir -p ${taostools_install_dir}/bin \
|
||||
&& cp ${taostools_bin_files} ${taostools_install_dir}/bin \
|
||||
&& chmod a+x ${taostools_install_dir}/bin/* || :
|
||||
|
||||
if [ -f ${top_dir}/src/kit/taos-tools/packaging/tools/install-taostools.sh ]; then
|
||||
cp ${top_dir}/src/kit/taos-tools/packaging/tools/install-taostools.sh \
|
||||
${taostools_install_dir}/ > /dev/null \
|
||||
&& chmod a+x ${taostools_install_dir}/install-taostools.sh \
|
||||
|| echo -e "failed to copy install-taostools.sh"
|
||||
else
|
||||
echo -e "install-taostools.sh not found"
|
||||
fi
|
||||
|
||||
if [ -f ${top_dir}/src/kit/taos-tools/packaging/tools/uninstall-taostools.sh ]; then
|
||||
cp ${top_dir}/src/kit/taos-tools/packaging/tools/uninstall-taostools.sh \
|
||||
${taostools_install_dir}/ > /dev/null \
|
||||
&& chmod a+x ${taostools_install_dir}/uninstall-taostools.sh \
|
||||
|| echo -e "failed to copy uninstall-taostools.sh"
|
||||
else
|
||||
echo -e "uninstall-taostools.sh not found"
|
||||
fi
|
||||
|
||||
if [ -f ${build_dir}/lib/libavro.so.23.0.0 ]; then
|
||||
mkdir -p ${taostools_install_dir}/avro/{lib,lib/pkgconfig} || echo -e "failed to create ${taostools_install_dir}/avro"
|
||||
cp ${build_dir}/lib/libavro.* ${taostools_install_dir}/avro/lib
|
||||
cp ${build_dir}/lib/pkgconfig/avro-c.pc ${taostools_install_dir}/avro/lib/pkgconfig
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -f ${build_dir}/bin/jemalloc-config ]; then
|
||||
mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
|
||||
cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin
|
||||
if [ -f ${build_dir}/bin/jemalloc.sh ]; then
|
||||
cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin
|
||||
fi
|
||||
if [ -f ${build_dir}/bin/jeprof ]; then
|
||||
cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin
|
||||
fi
|
||||
if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then
|
||||
cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then
|
||||
cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib
|
||||
ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/libjemalloc.a ]; then
|
||||
cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
|
||||
cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then
|
||||
cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig
|
||||
fi
|
||||
if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then
|
||||
cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc
|
||||
fi
|
||||
if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then
|
||||
cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove.sh >>remove_temp.sh
|
||||
mv remove_temp.sh ${install_dir}/bin/remove.sh
|
||||
|
||||
mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd
|
||||
cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png
|
||||
rm -rf ${install_dir}/nginxd/png
|
||||
|
||||
if [ "$cpuType" == "aarch64" ]; then
|
||||
cp -f ${install_dir}/nginxd/sbin/arm/64bit/nginx ${install_dir}/nginxd/sbin/
|
||||
elif [ "$cpuType" == "aarch32" ]; then
|
||||
cp -f ${install_dir}/nginxd/sbin/arm/32bit/nginx ${install_dir}/nginxd/sbin/
|
||||
fi
|
||||
rm -rf ${install_dir}/nginxd/sbin/arm
|
||||
fi
|
||||
|
||||
cd ${install_dir}
|
||||
tar -zcv -f ${tarName} * --remove-files || :
|
||||
exitcode=$?
|
||||
if [ "$exitcode" != "0" ]; then
|
||||
echo "tar ${tarName} error !!!"
|
||||
exit $exitcode
|
||||
fi
|
||||
|
||||
cd ${curr_dir}
|
||||
cp ${install_files} ${install_dir}
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install.sh >>install_temp.sh
|
||||
mv install_temp.sh ${install_dir}/install.sh
|
||||
fi
|
||||
if [ "$pagMode" == "lite" ]; then
|
||||
sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install.sh >>install_temp.sh
|
||||
mv install_temp.sh ${install_dir}/install.sh
|
||||
fi
|
||||
chmod a+x ${install_dir}/install.sh
|
||||
|
||||
if [[ $dbName == "taos" ]]; then
|
||||
# Copy example code
|
||||
mkdir -p ${install_dir}/examples
|
||||
examples_dir="${top_dir}/examples"
|
||||
cp -r ${examples_dir}/c ${install_dir}/examples
|
||||
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
||||
if [ -d ${examples_dir}/JDBC/connectionPools/target ]; then
|
||||
rm -rf ${examples_dir}/JDBC/connectionPools/target
|
||||
fi
|
||||
if [ -d ${examples_dir}/JDBC/JDBCDemo/target ]; then
|
||||
rm -rf ${examples_dir}/JDBC/JDBCDemo/target
|
||||
fi
|
||||
if [ -d ${examples_dir}/JDBC/mybatisplus-demo/target ]; then
|
||||
rm -rf ${examples_dir}/JDBC/mybatisplus-demo/target
|
||||
fi
|
||||
if [ -d ${examples_dir}/JDBC/springbootdemo/target ]; then
|
||||
rm -rf ${examples_dir}/JDBC/springbootdemo/target
|
||||
fi
|
||||
if [ -d ${examples_dir}/JDBC/SpringJdbcTemplate/target ]; then
|
||||
rm -rf ${examples_dir}/JDBC/SpringJdbcTemplate/target
|
||||
fi
|
||||
if [ -d ${examples_dir}/JDBC/taosdemo/target ]; then
|
||||
rm -rf ${examples_dir}/JDBC/taosdemo/target
|
||||
fi
|
||||
|
||||
cp -r ${examples_dir}/JDBC ${install_dir}/examples
|
||||
cp -r ${examples_dir}/matlab ${install_dir}/examples
|
||||
cp -r ${examples_dir}/python ${install_dir}/examples
|
||||
cp -r ${examples_dir}/R ${install_dir}/examples
|
||||
cp -r ${examples_dir}/go ${install_dir}/examples
|
||||
cp -r ${examples_dir}/nodejs ${install_dir}/examples
|
||||
cp -r ${examples_dir}/C# ${install_dir}/examples
|
||||
mkdir -p ${install_dir}/examples/taosbenchmark-json && cp ${examples_dir}/../src/kit/taos-tools/example/* ${install_dir}/examples/taosbenchmark-json
|
||||
fi
|
||||
fi
|
||||
|
||||
# Copy driver
|
||||
mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" >${install_dir}/driver/vercomp.txt
|
||||
|
||||
# Copy connector
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
connector_dir="${code_dir}/connector"
|
||||
mkdir -p ${install_dir}/connector
|
||||
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
||||
cp ${build_dir}/lib/*.jar ${install_dir}/connector || :
|
||||
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
|
||||
cp -r ${connector_dir}/go ${install_dir}/connector
|
||||
else
|
||||
echo "WARNING: go connector not found, please check if want to use it!"
|
||||
fi
|
||||
git clone --depth 1 https://github.com/taosdata/taos-connector-python ${install_dir}/connector/python
|
||||
rm -rf ${install_dir}/connector/python/.git ||:
|
||||
|
||||
git clone --depth 1 https://github.com/taosdata/taos-connector-node ${install_dir}/connector/nodejs
|
||||
rm -rf ${install_dir}/connector/nodejs/.git ||:
|
||||
|
||||
git clone --depth 1 https://github.com/taosdata/taos-connector-dotnet ${install_dir}/connector/dotnet
|
||||
rm -rf ${install_dir}/connector/dotnet/.git ||:
|
||||
|
||||
git clone --depth 1 https://github.com/taosdata/libtaos-rs ${install_dir}/connector/rust
|
||||
rm -rf ${install_dir}/connector/rust/.git ||:
|
||||
# cp -r ${connector_dir}/python ${install_dir}/connector
|
||||
# cp -r ${connector_dir}/nodejs ${install_dir}/connector
|
||||
fi
|
||||
fi
|
||||
|
||||
# Copy release note
|
||||
cp ${script_dir}/release_note ${install_dir}
|
||||
|
||||
# exit 1
|
||||
|
||||
cd ${release_dir}
|
||||
|
||||
# install_dir has been distinguishes cluster from edege, so comments this code
|
||||
pkg_name=${install_dir}-${osType}-${cpuType}
|
||||
|
||||
taostools_pkg_name=${taostools_install_dir}-${osType}-${cpuType}
|
||||
|
||||
# if [ "$verMode" == "cluster" ]; then
|
||||
# pkg_name=${install_dir}-${osType}-${cpuType}
|
||||
# elif [ "$verMode" == "edge" ]; then
|
||||
# pkg_name=${install_dir}-${osType}-${cpuType}
|
||||
# else
|
||||
# echo "unknow verMode, nor cluster or edge"
|
||||
# exit 1
|
||||
# fi
|
||||
|
||||
if [[ "$verType" == "beta" ]] || [[ "$verType" == "preRelease" ]]; then
|
||||
pkg_name=${install_dir}-${verType}-${osType}-${cpuType}
|
||||
taostools_pkg_name=${taostools_install_dir}-${verType}-${osType}-${cpuType}
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
pkg_name=${pkg_name}
|
||||
taostools_pkg_name=${taostools_pkg_name}
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$pagMode" == "lite" ]; then
|
||||
pkg_name=${pkg_name}-Lite
|
||||
fi
|
||||
|
||||
tar -zcv -f "$(basename ${pkg_name}).tar.gz" "$(basename ${install_dir})" --remove-files || :
|
||||
exitcode=$?
|
||||
if [ "$exitcode" != "0" ]; then
|
||||
echo "tar ${pkg_name}.tar.gz error !!!"
|
||||
exit $exitcode
|
||||
fi
|
||||
|
||||
if [ -n "${taostools_bin_files}" ]; then
|
||||
wget https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh -O ${taostools_install_dir}/bin/TDinsight.sh && echo "TDinsight.sh downloaded!"|| echo "failed to download TDinsight.sh"
|
||||
tar -zcv -f "$(basename ${taostools_pkg_name}).tar.gz" "$(basename ${taostools_install_dir})" --remove-files || :
|
||||
exitcode=$?
|
||||
if [ "$exitcode" != "0" ]; then
|
||||
echo "tar ${taostools_pkg_name}.tar.gz error !!!"
|
||||
exit $exitcode
|
||||
fi
|
||||
fi
|
||||
|
||||
cd ${curr_dir}
|
||||
#!/bin/bash
|
||||
#
|
||||
# Generate tar.gz package for all os system
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
curr_dir=$(pwd)
|
||||
compile_dir=$1
|
||||
version=$2
|
||||
build_time=$3
|
||||
cpuType=$4
|
||||
osType=$5
|
||||
verMode=$6
|
||||
verType=$7
|
||||
pagMode=$8
|
||||
versionComp=$9
|
||||
dbName=${10}
|
||||
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -f ${script_dir}/../..)"
|
||||
|
||||
productName="TDengine"
|
||||
serverName="taosd"
|
||||
clientName="taos"
|
||||
configFile="taos.cfg"
|
||||
tarName="taos.tar.gz"
|
||||
dumpName="taosdump"
|
||||
benchmarkName="taosBenchmark"
|
||||
toolsName="taostools"
|
||||
adapterName="taosadapter"
|
||||
defaultPasswd="taosdata"
|
||||
|
||||
# create compressed install file.
|
||||
build_dir="${compile_dir}/build"
|
||||
code_dir="${top_dir}"
|
||||
release_dir="${top_dir}/release"
|
||||
|
||||
#package_name='linux'
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
install_dir="${release_dir}/${productName}-enterprise-server-${version}"
|
||||
else
|
||||
install_dir="${release_dir}/${productName}-server-${version}"
|
||||
fi
|
||||
|
||||
if [ -d ${top_dir}/tools/taos-tools/packaging/deb ]; then
|
||||
cd ${top_dir}/tools/taos-tools/packaging/deb
|
||||
[ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0"
|
||||
|
||||
taostools_ver=$(git describe --tags | sed -e 's/ver-//g' | awk -F '-' '{print $1}')
|
||||
taostools_install_dir="${release_dir}/${clientName}Tools-${taostools_ver}"
|
||||
|
||||
cd ${curr_dir}
|
||||
else
|
||||
taostools_install_dir="${release_dir}/${clientName}Tools-${version}"
|
||||
fi
|
||||
|
||||
# Directories and files
|
||||
if [ "$pagMode" == "lite" ]; then
|
||||
strip ${build_dir}/bin/${serverName}
|
||||
strip ${build_dir}/bin/${clientName}
|
||||
# lite version doesn't include taosadapter, which will lead to no restful interface
|
||||
bin_files="${build_dir}/bin/${serverName} ${build_dir}/bin/${clientName} ${script_dir}/remove.sh ${script_dir}/startPre.sh ${build_dir}/bin/taosBenchmark"
|
||||
taostools_bin_files=""
|
||||
else
|
||||
|
||||
wget https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh -O ${build_dir}/bin/TDinsight.sh \
|
||||
&& echo "TDinsight.sh downloaded!" \
|
||||
|| echo "failed to download TDinsight.sh"
|
||||
# download TDinsight caches
|
||||
orig_pwd=$(pwd)
|
||||
tdinsight_caches=""
|
||||
cd ${build_dir}/bin/ && \
|
||||
chmod +x TDinsight.sh
|
||||
tdinsight_caches=$(./TDinsight.sh --download-only | xargs -i printf "${build_dir}/bin/{} ")
|
||||
cd $orig_pwd
|
||||
echo "TDinsight caches: $tdinsight_caches"
|
||||
|
||||
taostools_bin_files=" ${build_dir}/bin/taosdump \
|
||||
${build_dir}/bin/taosBenchmark \
|
||||
${build_dir}/bin/TDinsight.sh \
|
||||
$tdinsight_caches"
|
||||
|
||||
bin_files="${build_dir}/bin/${serverName} \
|
||||
${build_dir}/bin/${clientName} \
|
||||
${taostools_bin_files} \
|
||||
${build_dir}/bin/taosadapter \
|
||||
${build_dir}/bin/tarbitrator\
|
||||
${script_dir}/remove.sh \
|
||||
${script_dir}/set_core.sh \
|
||||
${script_dir}/run_taosd_and_taosadapter.sh \
|
||||
${script_dir}/startPre.sh \
|
||||
${script_dir}/taosd-dump-cfg.gdb"
|
||||
fi
|
||||
|
||||
lib_files="${build_dir}/lib/libtaos.so.${version}"
|
||||
header_files="${code_dir}/include/client/taos.h ${code_dir}/include/common/taosdef.h ${code_dir}/include/util/taoserror.h"
|
||||
|
||||
if [ "$dbName" != "taos" ]; then
|
||||
cfg_dir="${top_dir}/../enterprise/packaging/cfg"
|
||||
else
|
||||
cfg_dir="${top_dir}/packaging/cfg"
|
||||
fi
|
||||
|
||||
install_files="${script_dir}/install.sh"
|
||||
nginx_dir="${top_dir}/../enterprise/src/plugins/web"
|
||||
|
||||
init_file_deb=${script_dir}/../deb/taosd
|
||||
init_file_rpm=${script_dir}/../rpm/taosd
|
||||
init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord
|
||||
init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord
|
||||
|
||||
# make directories.
|
||||
mkdir -p ${install_dir}
|
||||
mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
|
||||
mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/${configFile} ${install_dir}/cfg/${configFile}
|
||||
|
||||
if [ -f "${compile_dir}/test/cfg/taosadapter.toml" ]; then
|
||||
cp ${compile_dir}/test/cfg/taosadapter.toml ${install_dir}/cfg || :
|
||||
fi
|
||||
|
||||
if [ -f "${compile_dir}/test/cfg/taosadapter.service" ]; then
|
||||
cp ${compile_dir}/test/cfg/taosadapter.service ${install_dir}/cfg || :
|
||||
fi
|
||||
|
||||
if [ -f "${cfg_dir}/${serverName}.service" ]; then
|
||||
cp ${cfg_dir}/${serverName}.service ${install_dir}/cfg || :
|
||||
fi
|
||||
|
||||
if [ -f "${top_dir}/packaging/cfg/tarbitratord.service" ]; then
|
||||
cp ${top_dir}/packaging/cfg/tarbitratord.service ${install_dir}/cfg || :
|
||||
fi
|
||||
|
||||
if [ -f "${top_dir}/packaging/cfg/nginxd.service" ]; then
|
||||
cp ${top_dir}/packaging/cfg/nginxd.service ${install_dir}/cfg || :
|
||||
fi
|
||||
|
||||
mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
|
||||
mkdir -p ${install_dir}/init.d && cp ${init_file_deb} ${install_dir}/init.d/${serverName}.deb
|
||||
mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/${serverName}.rpm
|
||||
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
|
||||
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
|
||||
|
||||
if [ $adapterName != "taosadapter" ]; then
|
||||
mv ${install_dir}/cfg/taosadapter.toml ${install_dir}/cfg/$adapterName.toml
|
||||
sed -i "s/path = \"\/var\/log\/taos\"/path = \"\/var\/log\/${productName}\"/g" ${install_dir}/cfg/$adapterName.toml
|
||||
sed -i "s/password = \"taosdata\"/password = \"${defaultPasswd}\"/g" ${install_dir}/cfg/$adapterName.toml
|
||||
|
||||
mv ${install_dir}/cfg/taosadapter.service ${install_dir}/cfg/$adapterName.service
|
||||
sed -i "s/TDengine/${productName}/g" ${install_dir}/cfg/$adapterName.service
|
||||
sed -i "s/taosAdapter/${adapterName}/g" ${install_dir}/cfg/$adapterName.service
|
||||
sed -i "s/taosadapter/${adapterName}/g" ${install_dir}/cfg/$adapterName.service
|
||||
|
||||
mv ${install_dir}/bin/taosadapter ${install_dir}/bin/${adapterName}
|
||||
mv ${install_dir}/bin/run_taosd_and_taosadapter.sh ${install_dir}/bin/run_${serverName}_and_${adapterName}.sh
|
||||
mv ${install_dir}/bin/taosd-dump-cfg.gdb ${install_dir}/bin/${serverName}-dump-cfg.gdb
|
||||
fi
|
||||
|
||||
if [ -n "${taostools_bin_files}" ]; then
|
||||
mkdir -p ${taostools_install_dir} || echo -e "failed to create ${taostools_install_dir}"
|
||||
mkdir -p ${taostools_install_dir}/bin \
|
||||
&& cp ${taostools_bin_files} ${taostools_install_dir}/bin \
|
||||
&& chmod a+x ${taostools_install_dir}/bin/* || :
|
||||
|
||||
if [ -f ${top_dir}/tools/taos-tools/packaging/tools/install-taostools.sh ]; then
|
||||
cp ${top_dir}/tools/taos-tools/packaging/tools/install-taostools.sh \
|
||||
${taostools_install_dir}/ > /dev/null \
|
||||
&& chmod a+x ${taostools_install_dir}/install-taostools.sh \
|
||||
|| echo -e "failed to copy install-taostools.sh"
|
||||
else
|
||||
echo -e "install-taostools.sh not found"
|
||||
fi
|
||||
|
||||
if [ -f ${top_dir}/tools/taos-tools/packaging/tools/uninstall-taostools.sh ]; then
|
||||
cp ${top_dir}/tools/taos-tools/packaging/tools/uninstall-taostools.sh \
|
||||
${taostools_install_dir}/ > /dev/null \
|
||||
&& chmod a+x ${taostools_install_dir}/uninstall-taostools.sh \
|
||||
|| echo -e "failed to copy uninstall-taostools.sh"
|
||||
else
|
||||
echo -e "uninstall-taostools.sh not found"
|
||||
fi
|
||||
|
||||
if [ -f ${build_dir}/lib/libavro.so.23.0.0 ]; then
|
||||
mkdir -p ${taostools_install_dir}/avro/{lib,lib/pkgconfig} || echo -e "failed to create ${taostools_install_dir}/avro"
|
||||
cp ${build_dir}/lib/libavro.* ${taostools_install_dir}/avro/lib
|
||||
cp ${build_dir}/lib/pkgconfig/avro-c.pc ${taostools_install_dir}/avro/lib/pkgconfig
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -f ${build_dir}/bin/jemalloc-config ]; then
|
||||
mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
|
||||
cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin
|
||||
if [ -f ${build_dir}/bin/jemalloc.sh ]; then
|
||||
cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin
|
||||
fi
|
||||
if [ -f ${build_dir}/bin/jeprof ]; then
|
||||
cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin
|
||||
fi
|
||||
if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then
|
||||
cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then
|
||||
cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib
|
||||
ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/libjemalloc.a ]; then
|
||||
cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
|
||||
cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then
|
||||
cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig
|
||||
fi
|
||||
if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then
|
||||
cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc
|
||||
fi
|
||||
if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then
|
||||
cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove.sh >>remove_temp.sh
|
||||
mv remove_temp.sh ${install_dir}/bin/remove.sh
|
||||
|
||||
mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd
|
||||
cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png
|
||||
rm -rf ${install_dir}/nginxd/png
|
||||
|
||||
if [ "$cpuType" == "aarch64" ]; then
|
||||
cp -f ${install_dir}/nginxd/sbin/arm/64bit/nginx ${install_dir}/nginxd/sbin/
|
||||
elif [ "$cpuType" == "aarch32" ]; then
|
||||
cp -f ${install_dir}/nginxd/sbin/arm/32bit/nginx ${install_dir}/nginxd/sbin/
|
||||
fi
|
||||
rm -rf ${install_dir}/nginxd/sbin/arm
|
||||
fi
|
||||
|
||||
cd ${install_dir}
|
||||
tar -zcv -f ${tarName} * --remove-files || :
|
||||
exitcode=$?
|
||||
if [ "$exitcode" != "0" ]; then
|
||||
echo "tar ${tarName} error !!!"
|
||||
exit $exitcode
|
||||
fi
|
||||
|
||||
cd ${curr_dir}
|
||||
cp ${install_files} ${install_dir}
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install.sh >>install_temp.sh
|
||||
mv install_temp.sh ${install_dir}/install.sh
|
||||
fi
|
||||
if [ "$pagMode" == "lite" ]; then
|
||||
sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install.sh >>install_temp.sh
|
||||
mv install_temp.sh ${install_dir}/install.sh
|
||||
fi
|
||||
chmod a+x ${install_dir}/install.sh
|
||||
|
||||
if [[ $dbName == "taos" ]]; then
|
||||
# Copy example code
|
||||
mkdir -p ${install_dir}/examples
|
||||
examples_dir="${top_dir}/examples"
|
||||
cp -r ${examples_dir}/c ${install_dir}/examples
|
||||
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
||||
if [ -d ${examples_dir}/JDBC/connectionPools/target ]; then
|
||||
rm -rf ${examples_dir}/JDBC/connectionPools/target
|
||||
fi
|
||||
if [ -d ${examples_dir}/JDBC/JDBCDemo/target ]; then
|
||||
rm -rf ${examples_dir}/JDBC/JDBCDemo/target
|
||||
fi
|
||||
if [ -d ${examples_dir}/JDBC/mybatisplus-demo/target ]; then
|
||||
rm -rf ${examples_dir}/JDBC/mybatisplus-demo/target
|
||||
fi
|
||||
if [ -d ${examples_dir}/JDBC/springbootdemo/target ]; then
|
||||
rm -rf ${examples_dir}/JDBC/springbootdemo/target
|
||||
fi
|
||||
if [ -d ${examples_dir}/JDBC/SpringJdbcTemplate/target ]; then
|
||||
rm -rf ${examples_dir}/JDBC/SpringJdbcTemplate/target
|
||||
fi
|
||||
if [ -d ${examples_dir}/JDBC/taosdemo/target ]; then
|
||||
rm -rf ${examples_dir}/JDBC/taosdemo/target
|
||||
fi
|
||||
|
||||
cp -r ${examples_dir}/JDBC ${install_dir}/examples
|
||||
cp -r ${examples_dir}/matlab ${install_dir}/examples
|
||||
cp -r ${examples_dir}/python ${install_dir}/examples
|
||||
cp -r ${examples_dir}/R ${install_dir}/examples
|
||||
cp -r ${examples_dir}/go ${install_dir}/examples
|
||||
cp -r ${examples_dir}/nodejs ${install_dir}/examples
|
||||
cp -r ${examples_dir}/C# ${install_dir}/examples
|
||||
mkdir -p ${install_dir}/examples/taosbenchmark-json && cp ${examples_dir}/../tools/taos-tools/example/* ${install_dir}/examples/taosbenchmark-json
|
||||
fi
|
||||
fi
|
||||
|
||||
# Copy driver
|
||||
mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" >${install_dir}/driver/vercomp.txt
|
||||
|
||||
# Copy connector
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
connector_dir="${code_dir}/connector"
|
||||
mkdir -p ${install_dir}/connector
|
||||
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
||||
cp ${build_dir}/lib/*.jar ${install_dir}/connector || :
|
||||
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
|
||||
cp -r ${connector_dir}/go ${install_dir}/connector
|
||||
else
|
||||
echo "WARNING: go connector not found, please check if want to use it!"
|
||||
fi
|
||||
git clone --depth 1 https://github.com/taosdata/taos-connector-python ${install_dir}/connector/python
|
||||
rm -rf ${install_dir}/connector/python/.git ||:
|
||||
|
||||
git clone --depth 1 https://github.com/taosdata/taos-connector-node ${install_dir}/connector/nodejs
|
||||
rm -rf ${install_dir}/connector/nodejs/.git ||:
|
||||
|
||||
git clone --depth 1 https://github.com/taosdata/taos-connector-dotnet ${install_dir}/connector/dotnet
|
||||
rm -rf ${install_dir}/connector/dotnet/.git ||:
|
||||
|
||||
git clone --depth 1 https://github.com/taosdata/libtaos-rs ${install_dir}/connector/rust
|
||||
rm -rf ${install_dir}/connector/rust/.git ||:
|
||||
# cp -r ${connector_dir}/python ${install_dir}/connector
|
||||
# cp -r ${connector_dir}/nodejs ${install_dir}/connector
|
||||
fi
|
||||
fi
|
||||
|
||||
# Copy release note
|
||||
cp ${script_dir}/release_note ${install_dir}
|
||||
|
||||
# exit 1
|
||||
|
||||
cd ${release_dir}
|
||||
|
||||
# install_dir has been distinguishes cluster from edege, so comments this code
|
||||
pkg_name=${install_dir}-${osType}-${cpuType}
|
||||
|
||||
taostools_pkg_name=${taostools_install_dir}-${osType}-${cpuType}
|
||||
|
||||
# if [ "$verMode" == "cluster" ]; then
|
||||
# pkg_name=${install_dir}-${osType}-${cpuType}
|
||||
# elif [ "$verMode" == "edge" ]; then
|
||||
# pkg_name=${install_dir}-${osType}-${cpuType}
|
||||
# else
|
||||
# echo "unknow verMode, nor cluster or edge"
|
||||
# exit 1
|
||||
# fi
|
||||
|
||||
if [[ "$verType" == "beta" ]] || [[ "$verType" == "preRelease" ]]; then
|
||||
pkg_name=${install_dir}-${verType}-${osType}-${cpuType}
|
||||
taostools_pkg_name=${taostools_install_dir}-${verType}-${osType}-${cpuType}
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
pkg_name=${pkg_name}
|
||||
taostools_pkg_name=${taostools_pkg_name}
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$pagMode" == "lite" ]; then
|
||||
pkg_name=${pkg_name}-Lite
|
||||
fi
|
||||
|
||||
tar -zcv -f "$(basename ${pkg_name}).tar.gz" "$(basename ${install_dir})" --remove-files || :
|
||||
exitcode=$?
|
||||
if [ "$exitcode" != "0" ]; then
|
||||
echo "tar ${pkg_name}.tar.gz error !!!"
|
||||
exit $exitcode
|
||||
fi
|
||||
|
||||
if [ -n "${taostools_bin_files}" ]; then
|
||||
wget https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh -O ${taostools_install_dir}/bin/TDinsight.sh && echo "TDinsight.sh downloaded!"|| echo "failed to download TDinsight.sh"
|
||||
tar -zcv -f "$(basename ${taostools_pkg_name}).tar.gz" "$(basename ${taostools_install_dir})" --remove-files || :
|
||||
exitcode=$?
|
||||
if [ "$exitcode" != "0" ]; then
|
||||
echo "tar ${taostools_pkg_name}.tar.gz error !!!"
|
||||
exit $exitcode
|
||||
fi
|
||||
fi
|
||||
|
||||
cd ${curr_dir}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,142 +1,142 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Script to stop the service and uninstall TSDB
|
||||
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
bin_link_dir="/usr/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
lib64_link_dir="/usr/lib64"
|
||||
inc_link_dir="/usr/include"
|
||||
|
||||
data_link_dir="/usr/local/taos/data"
|
||||
log_link_dir="/usr/local/taos/log"
|
||||
cfg_link_dir="/usr/local/taos/cfg"
|
||||
|
||||
service_config_dir="/etc/systemd/system"
|
||||
taos_service_name="taosd"
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
initd_mod=0
|
||||
service_mod=2
|
||||
if pidof systemd &> /dev/null; then
|
||||
service_mod=0
|
||||
elif $(which service &> /dev/null); then
|
||||
service_mod=1
|
||||
service_config_dir="/etc/init.d"
|
||||
if $(which chkconfig &> /dev/null); then
|
||||
initd_mod=1
|
||||
elif $(which insserv &> /dev/null); then
|
||||
initd_mod=2
|
||||
elif $(which update-rc.d &> /dev/null); then
|
||||
initd_mod=3
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
|
||||
function kill_taosadapter() {
|
||||
pid=$(ps -ef | grep "taosadapter" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function kill_taosd() {
|
||||
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_service_on_systemd() {
|
||||
taosadapter_service_config="${service_config_dir}/taosadapter.service"
|
||||
if systemctl is-active --quiet taosadapter; then
|
||||
echo "taosadapter is running, stopping it..."
|
||||
${csudo}systemctl stop taosadapter &> /dev/null || echo &> /dev/null
|
||||
fi
|
||||
|
||||
taosd_service_config="${service_config_dir}/${taos_service_name}.service"
|
||||
|
||||
if systemctl is-active --quiet ${taos_service_name}; then
|
||||
echo "TDengine taosd is running, stopping it..."
|
||||
${csudo}systemctl stop ${taos_service_name} &> /dev/null || echo &> /dev/null
|
||||
fi
|
||||
${csudo}systemctl disable ${taos_service_name} &> /dev/null || echo &> /dev/null
|
||||
|
||||
${csudo}rm -f ${taosd_service_config}
|
||||
|
||||
[ -f ${taosadapter_service_config} ] && ${csudo}rm -f ${taosadapter_service_config}
|
||||
|
||||
}
|
||||
|
||||
function clean_service_on_sysvinit() {
|
||||
#restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
|
||||
#${csudo}sed -i "\|${restart_config_str}|d" /etc/inittab || :
|
||||
|
||||
if pidof taosd &> /dev/null; then
|
||||
echo "TDengine taosd is running, stopping it..."
|
||||
${csudo}service taosd stop || :
|
||||
fi
|
||||
|
||||
if ((${initd_mod}==1)); then
|
||||
${csudo}chkconfig --del taosd || :
|
||||
elif ((${initd_mod}==2)); then
|
||||
${csudo}insserv -r taosd || :
|
||||
elif ((${initd_mod}==3)); then
|
||||
${csudo}update-rc.d -f taosd remove || :
|
||||
fi
|
||||
|
||||
${csudo}rm -f ${service_config_dir}/taosd || :
|
||||
|
||||
if $(which init &> /dev/null); then
|
||||
${csudo}init q || :
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_service() {
|
||||
if ((${service_mod}==0)); then
|
||||
clean_service_on_systemd
|
||||
elif ((${service_mod}==1)); then
|
||||
clean_service_on_sysvinit
|
||||
else
|
||||
# must manual stop taosd
|
||||
kill_taosadapter
|
||||
kill_taosd
|
||||
fi
|
||||
}
|
||||
|
||||
# Stop service and disable booting start.
|
||||
clean_service
|
||||
|
||||
# Remove all links
|
||||
${csudo}rm -f ${bin_link_dir}/taos || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosd || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosadapter || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosBenchmark || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosdemo || :
|
||||
${csudo}rm -f ${bin_link_dir}/set_core || :
|
||||
${csudo}rm -f ${cfg_link_dir}/*.new || :
|
||||
${csudo}rm -f ${inc_link_dir}/taos.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
|
||||
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
||||
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
|
||||
|
||||
${csudo}rm -f ${log_link_dir} || :
|
||||
${csudo}rm -f ${data_link_dir} || :
|
||||
|
||||
if ((${service_mod}==2)); then
|
||||
kill_taosadapter
|
||||
kill_taosd
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}TDengine is removed successfully!${NC}"
|
||||
#!/bin/bash
|
||||
#
|
||||
# Script to stop the service and uninstall TSDB
|
||||
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
bin_link_dir="/usr/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
lib64_link_dir="/usr/lib64"
|
||||
inc_link_dir="/usr/include"
|
||||
|
||||
data_link_dir="/usr/local/taos/data"
|
||||
log_link_dir="/usr/local/taos/log"
|
||||
cfg_link_dir="/usr/local/taos/cfg"
|
||||
|
||||
service_config_dir="/etc/systemd/system"
|
||||
taos_service_name="taosd"
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
initd_mod=0
|
||||
service_mod=2
|
||||
if pidof systemd &> /dev/null; then
|
||||
service_mod=0
|
||||
elif $(which service &> /dev/null); then
|
||||
service_mod=1
|
||||
service_config_dir="/etc/init.d"
|
||||
if $(which chkconfig &> /dev/null); then
|
||||
initd_mod=1
|
||||
elif $(which insserv &> /dev/null); then
|
||||
initd_mod=2
|
||||
elif $(which update-rc.d &> /dev/null); then
|
||||
initd_mod=3
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
|
||||
function kill_taosadapter() {
|
||||
pid=$(ps -ef | grep "taosadapter" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function kill_taosd() {
|
||||
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_service_on_systemd() {
|
||||
taosadapter_service_config="${service_config_dir}/taosadapter.service"
|
||||
if systemctl is-active --quiet taosadapter; then
|
||||
echo "taosadapter is running, stopping it..."
|
||||
${csudo}systemctl stop taosadapter &> /dev/null || echo &> /dev/null
|
||||
fi
|
||||
|
||||
taosd_service_config="${service_config_dir}/${taos_service_name}.service"
|
||||
|
||||
if systemctl is-active --quiet ${taos_service_name}; then
|
||||
echo "TDengine taosd is running, stopping it..."
|
||||
${csudo}systemctl stop ${taos_service_name} &> /dev/null || echo &> /dev/null
|
||||
fi
|
||||
${csudo}systemctl disable ${taos_service_name} &> /dev/null || echo &> /dev/null
|
||||
|
||||
${csudo}rm -f ${taosd_service_config}
|
||||
|
||||
[ -f ${taosadapter_service_config} ] && ${csudo}rm -f ${taosadapter_service_config}
|
||||
|
||||
}
|
||||
|
||||
function clean_service_on_sysvinit() {
|
||||
#restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
|
||||
#${csudo}sed -i "\|${restart_config_str}|d" /etc/inittab || :
|
||||
|
||||
if pidof taosd &> /dev/null; then
|
||||
echo "TDengine taosd is running, stopping it..."
|
||||
${csudo}service taosd stop || :
|
||||
fi
|
||||
|
||||
if ((${initd_mod}==1)); then
|
||||
${csudo}chkconfig --del taosd || :
|
||||
elif ((${initd_mod}==2)); then
|
||||
${csudo}insserv -r taosd || :
|
||||
elif ((${initd_mod}==3)); then
|
||||
${csudo}update-rc.d -f taosd remove || :
|
||||
fi
|
||||
|
||||
${csudo}rm -f ${service_config_dir}/taosd || :
|
||||
|
||||
if $(which init &> /dev/null); then
|
||||
${csudo}init q || :
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_service() {
|
||||
if ((${service_mod}==0)); then
|
||||
clean_service_on_systemd
|
||||
elif ((${service_mod}==1)); then
|
||||
clean_service_on_sysvinit
|
||||
else
|
||||
# must manual stop taosd
|
||||
kill_taosadapter
|
||||
kill_taosd
|
||||
fi
|
||||
}
|
||||
|
||||
# Stop service and disable booting start.
|
||||
clean_service
|
||||
|
||||
# Remove all links
|
||||
${csudo}rm -f ${bin_link_dir}/taos || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosd || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosadapter || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosBenchmark || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosdemo || :
|
||||
${csudo}rm -f ${bin_link_dir}/set_core || :
|
||||
${csudo}rm -f ${cfg_link_dir}/*.new || :
|
||||
${csudo}rm -f ${inc_link_dir}/taos.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
|
||||
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
||||
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
|
||||
|
||||
${csudo}rm -f ${log_link_dir} || :
|
||||
${csudo}rm -f ${data_link_dir} || :
|
||||
|
||||
if ((${service_mod}==2)); then
|
||||
kill_taosadapter
|
||||
kill_taosd
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}TDengine is removed successfully!${NC}"
|
||||
|
|
|
@ -1,136 +1,136 @@
|
|||
taos-1.6.4.0 (Release on 2019-12-01)
|
||||
Bug fixed:
|
||||
1.Look for possible causes of file corruption and fix them
|
||||
2.Encapsulate memory allocation functions to reduce the possibility of crashes
|
||||
3.Increase Arm64 compilation options
|
||||
4.Remove most of the warnings in the code
|
||||
5.Provide a variety of connector usage documents
|
||||
6.Network connection can be selected in udp and tcp
|
||||
7.Allow the maximum number of Tags to be 32
|
||||
8.Bugs reported by the user
|
||||
|
||||
taos-1.5.2.6 (Release on 2019-05-13)
|
||||
Bug fixed:
|
||||
- Nchar strings sometimes were wrongly truncated on Window
|
||||
- Importing data from file throws an error of "invalid SQL"
|
||||
|
||||
taos-1.5.2.5 (Release on 2019-05-13)
|
||||
Bug fixed:
|
||||
- Long timespan data import sometimes affects query result
|
||||
- Synchronzation of cluster dnodes worked incorrectly when importing
|
||||
|
||||
taos-1.5.2.4 (Release on 2019-05-10)
|
||||
New Features:
|
||||
- Optimized Windows client installation: now users don't need to copy taos.dll manually
|
||||
- Changed the priority of taos.cfg and JDBC URL: parameters in JDCB URL now has a higher priority than parameters in taos.cfg
|
||||
Bug fixed:
|
||||
- Expired data files were not deleted corrected
|
||||
- Occasionally importing returned "affected rows" which larger than 0, but 0 row was actually written into db
|
||||
- Commit log is occupied by too many import-to-file requests, which blocked further data importing
|
||||
- Cloud service shows a wrong number of available days with current balance
|
||||
- Other minor issues
|
||||
|
||||
taos-1.5.1 (Release on 2019-04-09)
|
||||
New Features:
|
||||
- Maximum number of rows returned by "top/bottom" methods increased from 20 to 100
|
||||
- Improved the performance of "first/last" methods
|
||||
- Increased system stability
|
||||
Bug fixed:
|
||||
- Connection failure when query on huge STables through TPC
|
||||
- The primary timestamp is occasionally returned as NULL in some queries
|
||||
- Operation failure when updating a tag value to NULL
|
||||
- Stream calculation couldn't start at certain occasions
|
||||
|
||||
taos-1.5.0 (Release on 2019-03-11)
|
||||
New Features:
|
||||
- New syntax to automatically create tables when inserting values into non-existing tables
|
||||
- New syntax "slimit/soffset" to pagenate groups in a query result set
|
||||
- Support "top/bottom" queries on a supertable
|
||||
- High performance statistic aggregation function "apercentile"
|
||||
- Remove "first_t/last_t" functions; improve the performance of "first/last" function
|
||||
- Add pre-aggregation for bool type values
|
||||
- Supports fixed-length streaming computation, i.e. users may define an end time for a stream
|
||||
- New JAVA API for SQL subscription, supports table/supertable/SQL query subscription
|
||||
Bug fixed:
|
||||
- Data file broken issue when frequently using "import"
|
||||
- Using "spread" on a super table may return negative values
|
||||
- RPC bug that random network packets might cause the RPC module to crush
|
||||
|
||||
taos-1.4.15 (Released on 2019-01-23)
|
||||
New Features:
|
||||
- JDBC Driver now supports configuring timezone, locale, cfgdir in JDBC url
|
||||
- A new API is added to validate if a table creation sql statement is correct in syntax without actually creating that table
|
||||
Bugs Fixed:
|
||||
- "select last(*) from STable" sometimest returned incorrect number of rows
|
||||
- JDBC driver method ResultSetMetaData.getColumnClassName() returned wrong values.
|
||||
- Web shell automatically changed query string to lower case
|
||||
|
||||
taos-1.4.14 (Released on 2018-12-22)
|
||||
New Features:
|
||||
- C Driver support for integration with Python
|
||||
- JDBC Driver support for integration with R and MATLAB
|
||||
|
||||
taos-1.4.13 (Released on 2018-12-14)
|
||||
Bugs Fixed:
|
||||
- Clients failed to connect to server due to unexpected and invalid packets recieved by the server.
|
||||
Features Added:
|
||||
- Add support to HikariCP in TSDB JDBC driver.
|
||||
|
||||
taos-1.4.12 (Released on 2018-12-08)
|
||||
Bugs Fixed:
|
||||
- Querying data while inserting into the database might return incomplete resultsets.
|
||||
Features Added:
|
||||
- A new python driver is added.
|
||||
- Increased system stability.
|
||||
- Changed meaning of database configuration paramerter 'ablocks'. 'ablocks' used to refer to the number of total cache blocks in memory, now it refers to average number of cache blocks for each table in memory.
|
||||
|
||||
taos-1.4.11 (Released on 2018-11-23)
|
||||
Bugs Fixed:
|
||||
- Thread memory leaking during high-frequency committing.
|
||||
- Master dnode selection failure caused by accidental network issues.
|
||||
Features Added:
|
||||
- Change keyword "metrics" to "stables", i.e. supertables; the previous query "show metrics" is now changed to "show stables".
|
||||
- Add an error message mechanism in C# driver. An error with message "Failed to connect to server" is thrown when fetching data experienced a network connection interruption during data transmitting.
|
||||
|
||||
taos-1.4.10 (Released on 2018-11-13)
|
||||
Bugs Fixed:
|
||||
- Taosdump failed while exporting extremely large datasets to a .sql file.
|
||||
- Commit status did not change correctly if the last commit was triggered by commit threshold time (ctime) and no more new data was written to DB during the next ctime period.
|
||||
Features Added:
|
||||
- Support importing historical data from Telegraf interface.
|
||||
- Support MyBatis framework in TSDB JDBC Driver.
|
||||
- Change result set row indexing in JDBC Driver. Result set row indexes now starts from 1 instead of 0.
|
||||
|
||||
taos-1.4.9 (Released on 2018-11-02)
|
||||
Bugs Fixed:
|
||||
- Dumping data using UTF-8 format in client shell failed.
|
||||
- Tag query failed using C# Driver.
|
||||
- Committing data to disk failed if DB files were corrupted.
|
||||
- Continuously pressing Ctrl+c in client shell for multiple times produced a segmentation fault.
|
||||
Features Added:
|
||||
- Changed the display pattern in shell for taosdump.
|
||||
- Add a check to the status of an existing resultset before firing a new query in a single JDBC connection. A connection can only have a single open resultset, and the resultset must be closed before one can execute new queries.
|
||||
|
||||
|
||||
taos-1.4.7 (Released on 2018-10-25)
|
||||
Bug Fixed:
|
||||
- UTF-8 encoding in JDBC Driver did not give the correct Chinese characters.
|
||||
- Fix crash error when where clause is too long.
|
||||
Features Added:
|
||||
- Add check on database properties, force ablocks to be at least (4 * tables) in a vnode.
|
||||
- Check if pVgroup is empty in sdb.
|
||||
|
||||
taos-1.4.6 (Released on 2018-10-21)
|
||||
Bug Fixed:
|
||||
- Fix wrong symbol addition while export csv file.
|
||||
Features Added:
|
||||
- Update grafana plugins.
|
||||
- Update python drivers.
|
||||
- Add error code explanation in JDBC Driver.
|
||||
- Prohibit login while the version of server and client are not match.
|
||||
|
||||
taos-1.4.5 (Released on 2018-10-17)
|
||||
Bug Fixed:
|
||||
- Fix HTTP request truncation bug in Telegraf interface.
|
||||
Features Added:
|
||||
- Support nchar and null object in JDBC Driver.
|
||||
taos-1.6.4.0 (Release on 2019-12-01)
|
||||
Bug fixed:
|
||||
1.Look for possible causes of file corruption and fix them
|
||||
2.Encapsulate memory allocation functions to reduce the possibility of crashes
|
||||
3.Increase Arm64 compilation options
|
||||
4.Remove most of the warnings in the code
|
||||
5.Provide a variety of connector usage documents
|
||||
6.Network connection can be selected in udp and tcp
|
||||
7.Allow the maximum number of Tags to be 32
|
||||
8.Bugs reported by the user
|
||||
|
||||
taos-1.5.2.6 (Release on 2019-05-13)
|
||||
Bug fixed:
|
||||
- Nchar strings sometimes were wrongly truncated on Window
|
||||
- Importing data from file throws an error of "invalid SQL"
|
||||
|
||||
taos-1.5.2.5 (Release on 2019-05-13)
|
||||
Bug fixed:
|
||||
- Long timespan data import sometimes affects query result
|
||||
- Synchronzation of cluster dnodes worked incorrectly when importing
|
||||
|
||||
taos-1.5.2.4 (Release on 2019-05-10)
|
||||
New Features:
|
||||
- Optimized Windows client installation: now users don't need to copy taos.dll manually
|
||||
- Changed the priority of taos.cfg and JDBC URL: parameters in JDCB URL now has a higher priority than parameters in taos.cfg
|
||||
Bug fixed:
|
||||
- Expired data files were not deleted corrected
|
||||
- Occasionally importing returned "affected rows" which larger than 0, but 0 row was actually written into db
|
||||
- Commit log is occupied by too many import-to-file requests, which blocked further data importing
|
||||
- Cloud service shows a wrong number of available days with current balance
|
||||
- Other minor issues
|
||||
|
||||
taos-1.5.1 (Release on 2019-04-09)
|
||||
New Features:
|
||||
- Maximum number of rows returned by "top/bottom" methods increased from 20 to 100
|
||||
- Improved the performance of "first/last" methods
|
||||
- Increased system stability
|
||||
Bug fixed:
|
||||
- Connection failure when query on huge STables through TPC
|
||||
- The primary timestamp is occasionally returned as NULL in some queries
|
||||
- Operation failure when updating a tag value to NULL
|
||||
- Stream calculation couldn't start at certain occasions
|
||||
|
||||
taos-1.5.0 (Release on 2019-03-11)
|
||||
New Features:
|
||||
- New syntax to automatically create tables when inserting values into non-existing tables
|
||||
- New syntax "slimit/soffset" to pagenate groups in a query result set
|
||||
- Support "top/bottom" queries on a supertable
|
||||
- High performance statistic aggregation function "apercentile"
|
||||
- Remove "first_t/last_t" functions; improve the performance of "first/last" function
|
||||
- Add pre-aggregation for bool type values
|
||||
- Supports fixed-length streaming computation, i.e. users may define an end time for a stream
|
||||
- New JAVA API for SQL subscription, supports table/supertable/SQL query subscription
|
||||
Bug fixed:
|
||||
- Data file broken issue when frequently using "import"
|
||||
- Using "spread" on a super table may return negative values
|
||||
- RPC bug that random network packets might cause the RPC module to crush
|
||||
|
||||
taos-1.4.15 (Released on 2019-01-23)
|
||||
New Features:
|
||||
- JDBC Driver now supports configuring timezone, locale, cfgdir in JDBC url
|
||||
- A new API is added to validate if a table creation sql statement is correct in syntax without actually creating that table
|
||||
Bugs Fixed:
|
||||
- "select last(*) from STable" sometimest returned incorrect number of rows
|
||||
- JDBC driver method ResultSetMetaData.getColumnClassName() returned wrong values.
|
||||
- Web shell automatically changed query string to lower case
|
||||
|
||||
taos-1.4.14 (Released on 2018-12-22)
|
||||
New Features:
|
||||
- C Driver support for integration with Python
|
||||
- JDBC Driver support for integration with R and MATLAB
|
||||
|
||||
taos-1.4.13 (Released on 2018-12-14)
|
||||
Bugs Fixed:
|
||||
- Clients failed to connect to server due to unexpected and invalid packets recieved by the server.
|
||||
Features Added:
|
||||
- Add support to HikariCP in TSDB JDBC driver.
|
||||
|
||||
taos-1.4.12 (Released on 2018-12-08)
|
||||
Bugs Fixed:
|
||||
- Querying data while inserting into the database might return incomplete resultsets.
|
||||
Features Added:
|
||||
- A new python driver is added.
|
||||
- Increased system stability.
|
||||
- Changed meaning of database configuration paramerter 'ablocks'. 'ablocks' used to refer to the number of total cache blocks in memory, now it refers to average number of cache blocks for each table in memory.
|
||||
|
||||
taos-1.4.11 (Released on 2018-11-23)
|
||||
Bugs Fixed:
|
||||
- Thread memory leaking during high-frequency committing.
|
||||
- Master dnode selection failure caused by accidental network issues.
|
||||
Features Added:
|
||||
- Change keyword "metrics" to "stables", i.e. supertables; the previous query "show metrics" is now changed to "show stables".
|
||||
- Add an error message mechanism in C# driver. An error with message "Failed to connect to server" is thrown when fetching data experienced a network connection interruption during data transmitting.
|
||||
|
||||
taos-1.4.10 (Released on 2018-11-13)
|
||||
Bugs Fixed:
|
||||
- Taosdump failed while exporting extremely large datasets to a .sql file.
|
||||
- Commit status did not change correctly if the last commit was triggered by commit threshold time (ctime) and no more new data was written to DB during the next ctime period.
|
||||
Features Added:
|
||||
- Support importing historical data from Telegraf interface.
|
||||
- Support MyBatis framework in TSDB JDBC Driver.
|
||||
- Change result set row indexing in JDBC Driver. Result set row indexes now starts from 1 instead of 0.
|
||||
|
||||
taos-1.4.9 (Released on 2018-11-02)
|
||||
Bugs Fixed:
|
||||
- Dumping data using UTF-8 format in client shell failed.
|
||||
- Tag query failed using C# Driver.
|
||||
- Committing data to disk failed if DB files were corrupted.
|
||||
- Continuously pressing Ctrl+c in client shell for multiple times produced a segmentation fault.
|
||||
Features Added:
|
||||
- Changed the display pattern in shell for taosdump.
|
||||
- Add a check to the status of an existing resultset before firing a new query in a single JDBC connection. A connection can only have a single open resultset, and the resultset must be closed before one can execute new queries.
|
||||
|
||||
|
||||
taos-1.4.7 (Released on 2018-10-25)
|
||||
Bug Fixed:
|
||||
- UTF-8 encoding in JDBC Driver did not give the correct Chinese characters.
|
||||
- Fix crash error when where clause is too long.
|
||||
Features Added:
|
||||
- Add check on database properties, force ablocks to be at least (4 * tables) in a vnode.
|
||||
- Check if pVgroup is empty in sdb.
|
||||
|
||||
taos-1.4.6 (Released on 2018-10-21)
|
||||
Bug Fixed:
|
||||
- Fix wrong symbol addition while export csv file.
|
||||
Features Added:
|
||||
- Update grafana plugins.
|
||||
- Update python drivers.
|
||||
- Add error code explanation in JDBC Driver.
|
||||
- Prohibit login while the version of server and client are not match.
|
||||
|
||||
taos-1.4.5 (Released on 2018-10-17)
|
||||
Bug Fixed:
|
||||
- Fix HTTP request truncation bug in Telegraf interface.
|
||||
Features Added:
|
||||
- Support nchar and null object in JDBC Driver.
|
||||
|
|
|
@ -1,130 +1,130 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Script to stop the service and uninstall TDengine's arbitrator
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
verMode=edge
|
||||
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
#install main path
|
||||
install_main_dir="/usr/local/tarbitrator"
|
||||
bin_link_dir="/usr/bin"
|
||||
#inc_link_dir="/usr/include"
|
||||
|
||||
service_config_dir="/etc/systemd/system"
|
||||
tarbitrator_service_name="tarbitratord"
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
initd_mod=0
|
||||
service_mod=2
|
||||
if pidof systemd &> /dev/null; then
|
||||
service_mod=0
|
||||
elif $(which service &> /dev/null); then
|
||||
service_mod=1
|
||||
service_config_dir="/etc/init.d"
|
||||
if $(which chkconfig &> /dev/null); then
|
||||
initd_mod=1
|
||||
elif $(which insserv &> /dev/null); then
|
||||
initd_mod=2
|
||||
elif $(which update-rc.d &> /dev/null); then
|
||||
initd_mod=3
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
|
||||
function kill_tarbitrator() {
|
||||
pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
function clean_bin() {
|
||||
# Remove link
|
||||
${csudo}rm -f ${bin_link_dir}/tarbitrator || :
|
||||
}
|
||||
|
||||
function clean_header() {
|
||||
# Remove link
|
||||
${csudo}rm -f ${inc_link_dir}/taos.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
|
||||
}
|
||||
|
||||
function clean_log() {
|
||||
# Remove link
|
||||
${csudo}rm -rf /arbitrator.log || :
|
||||
}
|
||||
|
||||
function clean_service_on_systemd() {
|
||||
tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service"
|
||||
|
||||
if systemctl is-active --quiet ${tarbitrator_service_name}; then
|
||||
echo "TDengine tarbitrator is running, stopping it..."
|
||||
${csudo}systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
|
||||
fi
|
||||
${csudo}systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
|
||||
|
||||
${csudo}rm -f ${tarbitratord_service_config}
|
||||
}
|
||||
|
||||
function clean_service_on_sysvinit() {
|
||||
if pidof tarbitrator &> /dev/null; then
|
||||
echo "TDengine's tarbitrator is running, stopping it..."
|
||||
${csudo}service tarbitratord stop || :
|
||||
fi
|
||||
|
||||
if ((${initd_mod}==1)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo}chkconfig --del tarbitratord || :
|
||||
fi
|
||||
elif ((${initd_mod}==2)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo}insserv -r tarbitratord || :
|
||||
fi
|
||||
elif ((${initd_mod}==3)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo}update-rc.d -f tarbitratord remove || :
|
||||
fi
|
||||
fi
|
||||
|
||||
${csudo}rm -f ${service_config_dir}/tarbitratord || :
|
||||
|
||||
if $(which init &> /dev/null); then
|
||||
${csudo}init q || :
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_service() {
|
||||
if ((${service_mod}==0)); then
|
||||
clean_service_on_systemd
|
||||
elif ((${service_mod}==1)); then
|
||||
clean_service_on_sysvinit
|
||||
else
|
||||
# must manual stop
|
||||
kill_tarbitrator
|
||||
fi
|
||||
}
|
||||
|
||||
# Stop service and disable booting start.
|
||||
clean_service
|
||||
# Remove binary file and links
|
||||
clean_bin
|
||||
# Remove header file.
|
||||
##clean_header
|
||||
# Remove log file
|
||||
clean_log
|
||||
|
||||
${csudo}rm -rf ${install_main_dir}
|
||||
|
||||
echo -e "${GREEN}TDengine's arbitrator is removed successfully!${NC}"
|
||||
#!/bin/bash
|
||||
#
|
||||
# Script to stop the service and uninstall TDengine's arbitrator
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
verMode=edge
|
||||
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
#install main path
|
||||
install_main_dir="/usr/local/tarbitrator"
|
||||
bin_link_dir="/usr/bin"
|
||||
#inc_link_dir="/usr/include"
|
||||
|
||||
service_config_dir="/etc/systemd/system"
|
||||
tarbitrator_service_name="tarbitratord"
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
initd_mod=0
|
||||
service_mod=2
|
||||
if pidof systemd &> /dev/null; then
|
||||
service_mod=0
|
||||
elif $(which service &> /dev/null); then
|
||||
service_mod=1
|
||||
service_config_dir="/etc/init.d"
|
||||
if $(which chkconfig &> /dev/null); then
|
||||
initd_mod=1
|
||||
elif $(which insserv &> /dev/null); then
|
||||
initd_mod=2
|
||||
elif $(which update-rc.d &> /dev/null); then
|
||||
initd_mod=3
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
|
||||
function kill_tarbitrator() {
|
||||
pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
function clean_bin() {
|
||||
# Remove link
|
||||
${csudo}rm -f ${bin_link_dir}/tarbitrator || :
|
||||
}
|
||||
|
||||
function clean_header() {
|
||||
# Remove link
|
||||
${csudo}rm -f ${inc_link_dir}/taos.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
|
||||
}
|
||||
|
||||
function clean_log() {
|
||||
# Remove link
|
||||
${csudo}rm -rf /arbitrator.log || :
|
||||
}
|
||||
|
||||
function clean_service_on_systemd() {
|
||||
tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service"
|
||||
|
||||
if systemctl is-active --quiet ${tarbitrator_service_name}; then
|
||||
echo "TDengine tarbitrator is running, stopping it..."
|
||||
${csudo}systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
|
||||
fi
|
||||
${csudo}systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
|
||||
|
||||
${csudo}rm -f ${tarbitratord_service_config}
|
||||
}
|
||||
|
||||
function clean_service_on_sysvinit() {
|
||||
if pidof tarbitrator &> /dev/null; then
|
||||
echo "TDengine's tarbitrator is running, stopping it..."
|
||||
${csudo}service tarbitratord stop || :
|
||||
fi
|
||||
|
||||
if ((${initd_mod}==1)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo}chkconfig --del tarbitratord || :
|
||||
fi
|
||||
elif ((${initd_mod}==2)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo}insserv -r tarbitratord || :
|
||||
fi
|
||||
elif ((${initd_mod}==3)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo}update-rc.d -f tarbitratord remove || :
|
||||
fi
|
||||
fi
|
||||
|
||||
${csudo}rm -f ${service_config_dir}/tarbitratord || :
|
||||
|
||||
if $(which init &> /dev/null); then
|
||||
${csudo}init q || :
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_service() {
|
||||
if ((${service_mod}==0)); then
|
||||
clean_service_on_systemd
|
||||
elif ((${service_mod}==1)); then
|
||||
clean_service_on_sysvinit
|
||||
else
|
||||
# must manual stop
|
||||
kill_tarbitrator
|
||||
fi
|
||||
}
|
||||
|
||||
# Stop service and disable booting start.
|
||||
clean_service
|
||||
# Remove binary file and links
|
||||
clean_bin
|
||||
# Remove header file.
|
||||
##clean_header
|
||||
# Remove log file
|
||||
clean_log
|
||||
|
||||
${csudo}rm -rf ${install_main_dir}
|
||||
|
||||
echo -e "${GREEN}TDengine's arbitrator is removed successfully!${NC}"
|
||||
|
|
|
@ -1,85 +1,85 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Script to stop the client and uninstall database, but retain the config and log files.
|
||||
set -e
|
||||
# set -x
|
||||
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
installDir="/usr/local/taos"
|
||||
clientName="taos"
|
||||
uninstallScript="rmtaos"
|
||||
|
||||
#install main path
|
||||
install_main_dir=${installDir}
|
||||
|
||||
log_link_dir=${installDir}/log
|
||||
cfg_link_dir=${installDir}/cfg
|
||||
bin_link_dir="/usr/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
lib64_link_dir="/usr/lib64"
|
||||
inc_link_dir="/usr/include"
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
function kill_client() {
|
||||
if [ -n "$(pidof ${clientName})" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_bin() {
|
||||
# Remove link
|
||||
${csudo}rm -f ${bin_link_dir}/${clientName} || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosdemo || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosdump || :
|
||||
${csudo}rm -f ${bin_link_dir}/${uninstallScript} || :
|
||||
${csudo}rm -f ${bin_link_dir}/set_core || :
|
||||
}
|
||||
|
||||
function clean_lib() {
|
||||
# Remove link
|
||||
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
||||
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
|
||||
#${csudo}rm -rf ${v15_java_app_dir} || :
|
||||
}
|
||||
|
||||
function clean_header() {
|
||||
# Remove link
|
||||
${csudo}rm -f ${inc_link_dir}/taos.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
|
||||
}
|
||||
|
||||
function clean_config() {
|
||||
# Remove link
|
||||
${csudo}rm -f ${cfg_link_dir}/* || :
|
||||
}
|
||||
|
||||
function clean_log() {
|
||||
# Remove link
|
||||
${csudo}rm -rf ${log_link_dir} || :
|
||||
}
|
||||
|
||||
# Stop client.
|
||||
kill_client
|
||||
# Remove binary file and links
|
||||
clean_bin
|
||||
# Remove header file.
|
||||
clean_header
|
||||
# Remove lib file
|
||||
clean_lib
|
||||
# Remove link log directory
|
||||
clean_log
|
||||
# Remove link configuration file
|
||||
clean_config
|
||||
|
||||
${csudo}rm -rf ${install_main_dir}
|
||||
|
||||
echo -e "${GREEN}TDengine client is removed successfully!${NC}"
|
||||
echo
|
||||
#!/bin/bash
|
||||
#
|
||||
# Script to stop the client and uninstall database, but retain the config and log files.
|
||||
set -e
|
||||
# set -x
|
||||
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
installDir="/usr/local/taos"
|
||||
clientName="taos"
|
||||
uninstallScript="rmtaos"
|
||||
|
||||
#install main path
|
||||
install_main_dir=${installDir}
|
||||
|
||||
log_link_dir=${installDir}/log
|
||||
cfg_link_dir=${installDir}/cfg
|
||||
bin_link_dir="/usr/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
lib64_link_dir="/usr/lib64"
|
||||
inc_link_dir="/usr/include"
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
function kill_client() {
|
||||
if [ -n "$(pidof ${clientName})" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_bin() {
|
||||
# Remove link
|
||||
${csudo}rm -f ${bin_link_dir}/${clientName} || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosdemo || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosdump || :
|
||||
${csudo}rm -f ${bin_link_dir}/${uninstallScript} || :
|
||||
${csudo}rm -f ${bin_link_dir}/set_core || :
|
||||
}
|
||||
|
||||
function clean_lib() {
|
||||
# Remove link
|
||||
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
||||
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
|
||||
#${csudo}rm -rf ${v15_java_app_dir} || :
|
||||
}
|
||||
|
||||
function clean_header() {
|
||||
# Remove link
|
||||
${csudo}rm -f ${inc_link_dir}/taos.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
|
||||
}
|
||||
|
||||
function clean_config() {
|
||||
# Remove link
|
||||
${csudo}rm -f ${cfg_link_dir}/* || :
|
||||
}
|
||||
|
||||
function clean_log() {
|
||||
# Remove link
|
||||
${csudo}rm -rf ${log_link_dir} || :
|
||||
}
|
||||
|
||||
# Stop client.
|
||||
kill_client
|
||||
# Remove binary file and links
|
||||
clean_bin
|
||||
# Remove header file.
|
||||
clean_header
|
||||
# Remove lib file
|
||||
clean_lib
|
||||
# Remove link log directory
|
||||
clean_log
|
||||
# Remove link configuration file
|
||||
clean_config
|
||||
|
||||
${csudo}rm -rf ${install_main_dir}
|
||||
|
||||
echo -e "${GREEN}TDengine client is removed successfully!${NC}"
|
||||
echo
|
||||
|
|
|
@ -1,39 +1,39 @@
|
|||
#!/bin/bash
|
||||
|
||||
# This script is used to repaire links when you what to move TDengine
|
||||
# data to other places and to access data.
|
||||
|
||||
# Read link path
|
||||
read -p "Please enter link directory such as /var/lib/taos/tsdb: " linkDir
|
||||
|
||||
while true; do
|
||||
if [ ! -d $linkDir ]; then
|
||||
read -p "Paht not exists, please enter the correct link path:" linkDir
|
||||
continue
|
||||
fi
|
||||
break
|
||||
done
|
||||
|
||||
declare -A dirHash
|
||||
|
||||
for linkFile in $(find -L $linkDir -xtype l); do
|
||||
targetFile=$(readlink -f $linkFile)
|
||||
echo "targetFile: ${targetFile}"
|
||||
# TODO : Extract directory part and basename part
|
||||
dirName=$(dirname $(dirname ${targetFile}))
|
||||
baseName=$(basename $(dirname ${targetFile}))/$(basename ${targetFile})
|
||||
|
||||
# TODO :
|
||||
newDir="${dirHash["$dirName"]}"
|
||||
if [ -z "${dirHash["$dirName"]}" ]; then
|
||||
read -p "Please enter the directory to replace ${dirName}:" newDir
|
||||
|
||||
read -p "Do you want to replcace all[y/N]?" replcaceAll
|
||||
if [[ ( "${replcaceAll}" == "y") || ( "${replcaceAll}" == "Y") ]]; then
|
||||
dirHash["$dirName"]="$newDir"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Replcace the file
|
||||
ln -sf "${newDir}/${baseName}" "${linkFile}"
|
||||
done
|
||||
#!/bin/bash
|
||||
|
||||
# This script is used to repaire links when you what to move TDengine
|
||||
# data to other places and to access data.
|
||||
|
||||
# Read link path
|
||||
read -p "Please enter link directory such as /var/lib/taos/tsdb: " linkDir
|
||||
|
||||
while true; do
|
||||
if [ ! -d $linkDir ]; then
|
||||
read -p "Paht not exists, please enter the correct link path:" linkDir
|
||||
continue
|
||||
fi
|
||||
break
|
||||
done
|
||||
|
||||
declare -A dirHash
|
||||
|
||||
for linkFile in $(find -L $linkDir -xtype l); do
|
||||
targetFile=$(readlink -f $linkFile)
|
||||
echo "targetFile: ${targetFile}"
|
||||
# TODO : Extract directory part and basename part
|
||||
dirName=$(dirname $(dirname ${targetFile}))
|
||||
baseName=$(basename $(dirname ${targetFile}))/$(basename ${targetFile})
|
||||
|
||||
# TODO :
|
||||
newDir="${dirHash["$dirName"]}"
|
||||
if [ -z "${dirHash["$dirName"]}" ]; then
|
||||
read -p "Please enter the directory to replace ${dirName}:" newDir
|
||||
|
||||
read -p "Do you want to replcace all[y/N]?" replcaceAll
|
||||
if [[ ( "${replcaceAll}" == "y") || ( "${replcaceAll}" == "Y") ]]; then
|
||||
dirHash["$dirName"]="$newDir"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Replcace the file
|
||||
ln -sf "${newDir}/${baseName}" "${linkFile}"
|
||||
done
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
#!/bin/bash
|
||||
[[ -x /usr/bin/taosadapter ]] && /usr/bin/taosadapter &
|
||||
taosd
|
||||
#!/bin/bash
|
||||
[[ -x /usr/bin/taosadapter ]] && /usr/bin/taosadapter &
|
||||
taosd
|
||||
|
|
|
@ -1,40 +1,40 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# This file is used to set config for core when taosd crash
|
||||
|
||||
# Color setting
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
GREEN_DARK='\033[0;32m'
|
||||
GREEN_UNDERLINE='\033[4;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
# set -e
|
||||
# set -x
|
||||
corePath=$1
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
if [[ ! -n ${corePath} ]]; then
|
||||
echo -e -n "${GREEN}Please enter a file directory to save the coredump file${NC}:"
|
||||
read corePath
|
||||
while true; do
|
||||
if [[ ! -z "$corePath" ]]; then
|
||||
break
|
||||
else
|
||||
read -p "Please enter a file directory to save the coredump file:" corePath
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
ulimit -c unlimited
|
||||
${csudo}sed -i '/ulimit -c unlimited/d' /etc/profile ||:
|
||||
${csudo}sed -i '$a\ulimit -c unlimited' /etc/profile ||:
|
||||
source /etc/profile
|
||||
|
||||
${csudo}mkdir -p ${corePath} ||:
|
||||
${csudo}sysctl -w kernel.core_pattern=${corePath}/core-%e-%p ||:
|
||||
${csudo}echo "${corePath}/core-%e-%p" | ${csudo}tee /proc/sys/kernel/core_pattern ||:
|
||||
#!/bin/bash
|
||||
#
|
||||
# This file is used to set config for core when taosd crash
|
||||
|
||||
# Color setting
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
GREEN_DARK='\033[0;32m'
|
||||
GREEN_UNDERLINE='\033[4;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
# set -e
|
||||
# set -x
|
||||
corePath=$1
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
if [[ ! -n ${corePath} ]]; then
|
||||
echo -e -n "${GREEN}Please enter a file directory to save the coredump file${NC}:"
|
||||
read corePath
|
||||
while true; do
|
||||
if [[ ! -z "$corePath" ]]; then
|
||||
break
|
||||
else
|
||||
read -p "Please enter a file directory to save the coredump file:" corePath
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
ulimit -c unlimited
|
||||
${csudo}sed -i '/ulimit -c unlimited/d' /etc/profile ||:
|
||||
${csudo}sed -i '$a\ulimit -c unlimited' /etc/profile ||:
|
||||
source /etc/profile
|
||||
|
||||
${csudo}mkdir -p ${corePath} ||:
|
||||
${csudo}sysctl -w kernel.core_pattern=${corePath}/core-%e-%p ||:
|
||||
${csudo}echo "${corePath}/core-%e-%p" | ${csudo}tee /proc/sys/kernel/core_pattern ||:
|
||||
|
|
|
@ -1,144 +1,144 @@
|
|||
# Usage:
|
||||
# sudo gdb -x ./taosd-dump-cfg.gdb
|
||||
|
||||
define attach_pidof
|
||||
if $argc != 1
|
||||
help attach_pidof
|
||||
else
|
||||
shell echo -e "\
|
||||
set \$PID = "$(echo $(pidof $arg0) 0 | cut -d " " -f 1)"\n\
|
||||
if \$PID > 0\n\
|
||||
attach "$(pidof -s $arg0)"\n\
|
||||
else\n\
|
||||
print \"Process '"$arg0"' not found\"\n\
|
||||
end" > /tmp/gdb.pidof
|
||||
source /tmp/gdb.pidof
|
||||
end
|
||||
end
|
||||
|
||||
document attach_pidof
|
||||
Attach to process by name
|
||||
Usage: attach_pidof PROG_NAME
|
||||
end
|
||||
|
||||
set $TAOS_CFG_VTYPE_INT8 = 0
|
||||
set $TAOS_CFG_VTYPE_INT16 = 1
|
||||
set $TAOS_CFG_VTYPE_INT32 = 2
|
||||
set $TAOS_CFG_VTYPE_FLOAT = 3
|
||||
set $TAOS_CFG_VTYPE_STRING = 4
|
||||
set $TAOS_CFG_VTYPE_IPSTR = 5
|
||||
set $TAOS_CFG_VTYPE_DIRECTORY = 6
|
||||
|
||||
set $TSDB_CFG_CTYPE_B_CONFIG = 1U
|
||||
set $TSDB_CFG_CTYPE_B_SHOW = 2U
|
||||
set $TSDB_CFG_CTYPE_B_LOG = 4U
|
||||
set $TSDB_CFG_CTYPE_B_CLIENT = 8U
|
||||
set $TSDB_CFG_CTYPE_B_OPTION = 16U
|
||||
set $TSDB_CFG_CTYPE_B_NOT_PRINT = 32U
|
||||
|
||||
set $TSDB_CFG_PRINT_LEN = 53
|
||||
|
||||
define print_blank
|
||||
if $argc == 1
|
||||
set $blank_len = $arg0
|
||||
while $blank_len > 0
|
||||
printf "%s", " "
|
||||
set $blank_len = $blank_len - 1
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
define dump_cfg
|
||||
if $argc != 1
|
||||
help dump_cfg
|
||||
else
|
||||
set $blen = $TSDB_CFG_PRINT_LEN - (int)strlen($arg0.option)
|
||||
if $blen < 0
|
||||
$blen = 0
|
||||
end
|
||||
#printf "%s: %d\n", "******blen: ", $blen
|
||||
printf "%s: ", $arg0.option
|
||||
print_blank $blen
|
||||
|
||||
if $arg0.valType == $TAOS_CFG_VTYPE_INT8
|
||||
printf "%d\n", *((int8_t *) $arg0.ptr)
|
||||
else
|
||||
if $arg0.valType == $TAOS_CFG_VTYPE_INT16
|
||||
printf "%d\n", *((int16_t *) $arg0.ptr)
|
||||
else
|
||||
if $arg0.valType == $TAOS_CFG_VTYPE_INT32
|
||||
printf "%d\n", *((int32_t *) $arg0.ptr)
|
||||
else
|
||||
if $arg0.valType == $TAOS_CFG_VTYPE_FLOAT
|
||||
printf "%f\n", *((float *) $arg0.ptr)
|
||||
else
|
||||
printf "%s\n", $arg0.ptr
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
document dump_cfg
|
||||
Dump a cfg entry
|
||||
Usage: dump_cfg cfg
|
||||
end
|
||||
|
||||
set pagination off
|
||||
|
||||
attach_pidof taosd
|
||||
|
||||
set $idx=0
|
||||
#print tsGlobalConfigNum
|
||||
#set $end=$1
|
||||
set $end=tsGlobalConfigNum
|
||||
|
||||
p "*=*=*=*=*=*=*=*=*= taos global config:"
|
||||
#while ($idx .lt. $end)
|
||||
while ($idx < $end)
|
||||
# print tsGlobalConfig[$idx].option
|
||||
set $cfg = tsGlobalConfig[$idx]
|
||||
set $tsce = tscEmbedded
|
||||
# p "1"
|
||||
if ($tsce == 0)
|
||||
if !($cfg.cfgType & $TSDB_CFG_CTYPE_B_CLIENT)
|
||||
end
|
||||
else
|
||||
if $cfg.cfgType & $TSDB_CFG_CTYPE_B_NOT_PRINT
|
||||
else
|
||||
if !($cfg.cfgType & $TSDB_CFG_CTYPE_B_SHOW)
|
||||
else
|
||||
dump_cfg $cfg
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
set $idx=$idx+1
|
||||
end
|
||||
|
||||
set $idx=0
|
||||
|
||||
p "*=*=*=*=*=*=*=*=*= taos local config:"
|
||||
while ($idx < $end)
|
||||
set $cfg = tsGlobalConfig[$idx]
|
||||
set $tsce = tscEmbedded
|
||||
if ($tsce == 0)
|
||||
if !($cfg.cfgType & $TSDB_CFG_CTYPE_B_CLIENT)
|
||||
end
|
||||
else
|
||||
if $cfg.cfgType & $TSDB_CFG_CTYPE_B_NOT_PRINT
|
||||
else
|
||||
if ($cfg.cfgType & $TSDB_CFG_CTYPE_B_SHOW)
|
||||
else
|
||||
dump_cfg $cfg
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
set $idx=$idx+1
|
||||
end
|
||||
|
||||
detach
|
||||
|
||||
quit
|
||||
# Usage:
|
||||
# sudo gdb -x ./taosd-dump-cfg.gdb
|
||||
|
||||
define attach_pidof
|
||||
if $argc != 1
|
||||
help attach_pidof
|
||||
else
|
||||
shell echo -e "\
|
||||
set \$PID = "$(echo $(pidof $arg0) 0 | cut -d " " -f 1)"\n\
|
||||
if \$PID > 0\n\
|
||||
attach "$(pidof -s $arg0)"\n\
|
||||
else\n\
|
||||
print \"Process '"$arg0"' not found\"\n\
|
||||
end" > /tmp/gdb.pidof
|
||||
source /tmp/gdb.pidof
|
||||
end
|
||||
end
|
||||
|
||||
document attach_pidof
|
||||
Attach to process by name
|
||||
Usage: attach_pidof PROG_NAME
|
||||
end
|
||||
|
||||
set $TAOS_CFG_VTYPE_INT8 = 0
|
||||
set $TAOS_CFG_VTYPE_INT16 = 1
|
||||
set $TAOS_CFG_VTYPE_INT32 = 2
|
||||
set $TAOS_CFG_VTYPE_FLOAT = 3
|
||||
set $TAOS_CFG_VTYPE_STRING = 4
|
||||
set $TAOS_CFG_VTYPE_IPSTR = 5
|
||||
set $TAOS_CFG_VTYPE_DIRECTORY = 6
|
||||
|
||||
set $TSDB_CFG_CTYPE_B_CONFIG = 1U
|
||||
set $TSDB_CFG_CTYPE_B_SHOW = 2U
|
||||
set $TSDB_CFG_CTYPE_B_LOG = 4U
|
||||
set $TSDB_CFG_CTYPE_B_CLIENT = 8U
|
||||
set $TSDB_CFG_CTYPE_B_OPTION = 16U
|
||||
set $TSDB_CFG_CTYPE_B_NOT_PRINT = 32U
|
||||
|
||||
set $TSDB_CFG_PRINT_LEN = 53
|
||||
|
||||
define print_blank
|
||||
if $argc == 1
|
||||
set $blank_len = $arg0
|
||||
while $blank_len > 0
|
||||
printf "%s", " "
|
||||
set $blank_len = $blank_len - 1
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
define dump_cfg
|
||||
if $argc != 1
|
||||
help dump_cfg
|
||||
else
|
||||
set $blen = $TSDB_CFG_PRINT_LEN - (int)strlen($arg0.option)
|
||||
if $blen < 0
|
||||
$blen = 0
|
||||
end
|
||||
#printf "%s: %d\n", "******blen: ", $blen
|
||||
printf "%s: ", $arg0.option
|
||||
print_blank $blen
|
||||
|
||||
if $arg0.valType == $TAOS_CFG_VTYPE_INT8
|
||||
printf "%d\n", *((int8_t *) $arg0.ptr)
|
||||
else
|
||||
if $arg0.valType == $TAOS_CFG_VTYPE_INT16
|
||||
printf "%d\n", *((int16_t *) $arg0.ptr)
|
||||
else
|
||||
if $arg0.valType == $TAOS_CFG_VTYPE_INT32
|
||||
printf "%d\n", *((int32_t *) $arg0.ptr)
|
||||
else
|
||||
if $arg0.valType == $TAOS_CFG_VTYPE_FLOAT
|
||||
printf "%f\n", *((float *) $arg0.ptr)
|
||||
else
|
||||
printf "%s\n", $arg0.ptr
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
document dump_cfg
|
||||
Dump a cfg entry
|
||||
Usage: dump_cfg cfg
|
||||
end
|
||||
|
||||
set pagination off
|
||||
|
||||
attach_pidof taosd
|
||||
|
||||
set $idx=0
|
||||
#print tsGlobalConfigNum
|
||||
#set $end=$1
|
||||
set $end=tsGlobalConfigNum
|
||||
|
||||
p "*=*=*=*=*=*=*=*=*= taos global config:"
|
||||
#while ($idx .lt. $end)
|
||||
while ($idx < $end)
|
||||
# print tsGlobalConfig[$idx].option
|
||||
set $cfg = tsGlobalConfig[$idx]
|
||||
set $tsce = tscEmbedded
|
||||
# p "1"
|
||||
if ($tsce == 0)
|
||||
if !($cfg.cfgType & $TSDB_CFG_CTYPE_B_CLIENT)
|
||||
end
|
||||
else
|
||||
if $cfg.cfgType & $TSDB_CFG_CTYPE_B_NOT_PRINT
|
||||
else
|
||||
if !($cfg.cfgType & $TSDB_CFG_CTYPE_B_SHOW)
|
||||
else
|
||||
dump_cfg $cfg
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
set $idx=$idx+1
|
||||
end
|
||||
|
||||
set $idx=0
|
||||
|
||||
p "*=*=*=*=*=*=*=*=*= taos local config:"
|
||||
while ($idx < $end)
|
||||
set $cfg = tsGlobalConfig[$idx]
|
||||
set $tsce = tscEmbedded
|
||||
if ($tsce == 0)
|
||||
if !($cfg.cfgType & $TSDB_CFG_CTYPE_B_CLIENT)
|
||||
end
|
||||
else
|
||||
if $cfg.cfgType & $TSDB_CFG_CTYPE_B_NOT_PRINT
|
||||
else
|
||||
if ($cfg.cfgType & $TSDB_CFG_CTYPE_B_SHOW)
|
||||
else
|
||||
dump_cfg $cfg
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
set $idx=$idx+1
|
||||
end
|
||||
|
||||
detach
|
||||
|
||||
quit
|
||||
|
|
|
@ -60,7 +60,7 @@ static void registerRequest(SRequestObj *pRequest) {
|
|||
static void deregisterRequest(SRequestObj *pRequest) {
|
||||
assert(pRequest != NULL);
|
||||
|
||||
STscObj *pTscObj = pRequest->pTscObj;
|
||||
STscObj * pTscObj = pRequest->pTscObj;
|
||||
SInstanceSummary *pActivity = &pTscObj->pAppInfo->summary;
|
||||
|
||||
int32_t currentInst = atomic_sub_fetch_64((int64_t *)&pActivity->currentRequests, 1);
|
||||
|
@ -91,7 +91,6 @@ static bool clientRpcRfp(int32_t code) {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
// TODO refactor
|
||||
void *openTransporter(const char *user, const char *auth, int32_t numOfThread) {
|
||||
SRpcInit rpcInit;
|
||||
|
@ -105,10 +104,6 @@ void *openTransporter(const char *user, const char *auth, int32_t numOfThread) {
|
|||
rpcInit.connType = TAOS_CONN_CLIENT;
|
||||
rpcInit.user = (char *)user;
|
||||
rpcInit.idleTime = tsShellActivityTimer * 1000;
|
||||
rpcInit.ckey = "key";
|
||||
rpcInit.spi = 1;
|
||||
rpcInit.secret = (char *)auth;
|
||||
|
||||
void *pDnodeConn = rpcOpen(&rpcInit);
|
||||
if (pDnodeConn == NULL) {
|
||||
tscError("failed to init connection to server");
|
||||
|
@ -318,7 +313,7 @@ int taos_options_imp(TSDB_OPTION option, const char *str) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
SConfig *pCfg = taosGetCfg();
|
||||
SConfig * pCfg = taosGetCfg();
|
||||
SConfigItem *pItem = NULL;
|
||||
|
||||
switch (option) {
|
||||
|
|
|
@ -23,6 +23,8 @@
|
|||
#include "tmsgtype.h"
|
||||
#include "tpagedbuf.h"
|
||||
#include "tref.h"
|
||||
#include "cJSON.h"
|
||||
#include "tdataformat.h"
|
||||
|
||||
static int32_t initEpSetFromCfg(const char* firstEp, const char* secondEp, SCorEpSet* pEpSet);
|
||||
static SMsgSendInfo* buildConnectMsg(SRequestObj* pRequest);
|
||||
|
@ -268,7 +270,7 @@ void setResSchemaInfo(SReqResultInfo* pResInfo, const SSchema* pSchema, int32_t
|
|||
|
||||
if (pSchema[i].type == TSDB_DATA_TYPE_VARCHAR) {
|
||||
pResInfo->userFields[i].bytes -= VARSTR_HEADER_SIZE;
|
||||
} else if (pSchema[i].type == TSDB_DATA_TYPE_NCHAR) {
|
||||
} else if (pSchema[i].type == TSDB_DATA_TYPE_NCHAR || pSchema[i].type == TSDB_DATA_TYPE_JSON) {
|
||||
pResInfo->userFields[i].bytes = (pResInfo->userFields[i].bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
|
||||
}
|
||||
|
||||
|
@ -291,7 +293,7 @@ int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList
|
|||
|
||||
SQueryResult res = {.code = 0, .numOfRows = 0, .msgSize = ERROR_MSG_BUF_DEFAULT_SIZE, .msg = pRequest->msgBuf};
|
||||
int32_t code = schedulerExecJob(pTransporter, pNodeList, pDag, &pRequest->body.queryJob, pRequest->sqlstr,
|
||||
pRequest->metric.start, &res);
|
||||
pRequest->metric.start, &res);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
if (pRequest->body.queryJob != 0) {
|
||||
schedulerFreeJob(pRequest->body.queryJob);
|
||||
|
@ -325,7 +327,7 @@ int32_t getQueryPlan(SRequestObj* pRequest, SQuery* pQuery, SArray** pNodeList)
|
|||
int32_t validateSversion(SRequestObj* pRequest, void* res) {
|
||||
SArray* pArray = NULL;
|
||||
int32_t code = 0;
|
||||
|
||||
|
||||
if (TDMT_VND_SUBMIT == pRequest->type) {
|
||||
SSubmitRsp* pRsp = (SSubmitRsp*)res;
|
||||
if (pRsp->nBlocks <= 0) {
|
||||
|
@ -337,14 +339,13 @@ int32_t validateSversion(SRequestObj* pRequest, void* res) {
|
|||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
|
||||
for (int32_t i = 0; i < pRsp->nBlocks; ++i) {
|
||||
SSubmitBlkRsp *blk = pRsp->pBlocks + i;
|
||||
STbSVersion tbSver = {.tbFName = blk->tblFName, .sver = blk->sver};
|
||||
SSubmitBlkRsp* blk = pRsp->pBlocks + i;
|
||||
STbSVersion tbSver = {.tbFName = blk->tblFName, .sver = blk->sver};
|
||||
taosArrayPush(pArray, &tbSver);
|
||||
}
|
||||
} else if (TDMT_VND_QUERY == pRequest->type) {
|
||||
|
||||
}
|
||||
|
||||
SCatalog* pCatalog = NULL;
|
||||
|
@ -365,11 +366,10 @@ void freeRequestRes(SRequestObj* pRequest, void* res) {
|
|||
if (NULL == res) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
if (TDMT_VND_SUBMIT == pRequest->type) {
|
||||
tFreeSSubmitRsp((SSubmitRsp*)res);
|
||||
} else if (TDMT_VND_QUERY == pRequest->type) {
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -805,6 +805,101 @@ static int32_t doPrepareResPtr(SReqResultInfo* pResInfo) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static char* parseTagDatatoJson(void *p){
|
||||
char* string = NULL;
|
||||
cJSON *json = cJSON_CreateObject();
|
||||
if (json == NULL)
|
||||
{
|
||||
goto end;
|
||||
}
|
||||
|
||||
int16_t nCols = kvRowNCols(p);
|
||||
char tagJsonKey[256] = {0};
|
||||
for (int j = 0; j < nCols; ++j) {
|
||||
SColIdx * pColIdx = kvRowColIdxAt(p, j);
|
||||
char* val = (char*)(kvRowColVal(p, pColIdx));
|
||||
if (j == 0){
|
||||
if(*val == TSDB_DATA_TYPE_NULL){
|
||||
string = taosMemoryCalloc(1, 8);
|
||||
sprintf(varDataVal(string), "%s", TSDB_DATA_NULL_STR_L);
|
||||
varDataSetLen(string, strlen(varDataVal(string)));
|
||||
goto end;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// json key encode by binary
|
||||
memset(tagJsonKey, 0, sizeof(tagJsonKey));
|
||||
memcpy(tagJsonKey, varDataVal(val), varDataLen(val));
|
||||
// json value
|
||||
val += varDataTLen(val);
|
||||
char* realData = POINTER_SHIFT(val, CHAR_BYTES);
|
||||
char type = *val;
|
||||
if(type == TSDB_DATA_TYPE_NULL) {
|
||||
cJSON* value = cJSON_CreateNull();
|
||||
if (value == NULL)
|
||||
{
|
||||
goto end;
|
||||
}
|
||||
cJSON_AddItemToObject(json, tagJsonKey, value);
|
||||
}else if(type == TSDB_DATA_TYPE_NCHAR) {
|
||||
cJSON* value = NULL;
|
||||
if (varDataLen(realData) > 0){
|
||||
char *tagJsonValue = taosMemoryCalloc(varDataLen(realData), 1);
|
||||
int32_t length = taosUcs4ToMbs((TdUcs4 *)varDataVal(realData), varDataLen(realData), tagJsonValue);
|
||||
if (length < 0) {
|
||||
tscError("charset:%s to %s. val:%s convert json value failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, val);
|
||||
taosMemoryFree(tagJsonValue);
|
||||
goto end;
|
||||
}
|
||||
value = cJSON_CreateString(tagJsonValue);
|
||||
taosMemoryFree(tagJsonValue);
|
||||
if (value == NULL)
|
||||
{
|
||||
goto end;
|
||||
}
|
||||
}else if(varDataLen(realData) == 0){
|
||||
value = cJSON_CreateString("");
|
||||
}else{
|
||||
ASSERT(0);
|
||||
}
|
||||
|
||||
cJSON_AddItemToObject(json, tagJsonKey, value);
|
||||
}else if(type == TSDB_DATA_TYPE_DOUBLE){
|
||||
double jsonVd = *(double*)(realData);
|
||||
cJSON* value = cJSON_CreateNumber(jsonVd);
|
||||
if (value == NULL)
|
||||
{
|
||||
goto end;
|
||||
}
|
||||
cJSON_AddItemToObject(json, tagJsonKey, value);
|
||||
// }else if(type == TSDB_DATA_TYPE_BIGINT){
|
||||
// int64_t jsonVd = *(int64_t*)(realData);
|
||||
// cJSON* value = cJSON_CreateNumber((double)jsonVd);
|
||||
// if (value == NULL)
|
||||
// {
|
||||
// goto end;
|
||||
// }
|
||||
// cJSON_AddItemToObject(json, tagJsonKey, value);
|
||||
}else if (type == TSDB_DATA_TYPE_BOOL) {
|
||||
char jsonVd = *(char*)(realData);
|
||||
cJSON* value = cJSON_CreateBool(jsonVd);
|
||||
if (value == NULL)
|
||||
{
|
||||
goto end;
|
||||
}
|
||||
cJSON_AddItemToObject(json, tagJsonKey, value);
|
||||
}else{
|
||||
ASSERT(0);
|
||||
}
|
||||
|
||||
}
|
||||
string = cJSON_PrintUnformatted(json);
|
||||
end:
|
||||
cJSON_Delete(json);
|
||||
return string;
|
||||
}
|
||||
|
||||
static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int32_t numOfCols, int32_t* colLength) {
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
int32_t type = pResultInfo->fields[i].type;
|
||||
|
@ -835,9 +930,7 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int
|
|||
|
||||
pResultInfo->pCol[i].pData = pResultInfo->convertBuf[i];
|
||||
pResultInfo->row[i] = pResultInfo->pCol[i].pData;
|
||||
}
|
||||
|
||||
if (type == TSDB_DATA_TYPE_JSON) {
|
||||
}else if (type == TSDB_DATA_TYPE_JSON && colLength[i] > 0) {
|
||||
char* p = taosMemoryRealloc(pResultInfo->convertBuf[i], colLength[i]);
|
||||
if (p == NULL) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
@ -850,6 +943,7 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int
|
|||
if (pCol->offset[j] != -1) {
|
||||
char* pStart = pCol->offset[j] + pCol->pData;
|
||||
|
||||
|
||||
int32_t jsonInnerType = *pStart;
|
||||
char* jsonInnerData = pStart + CHAR_BYTES;
|
||||
char dst[TSDB_MAX_JSON_TAG_LEN] = {0};
|
||||
|
@ -857,15 +951,9 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int
|
|||
sprintf(varDataVal(dst), "%s", TSDB_DATA_NULL_STR_L);
|
||||
varDataSetLen(dst, strlen(varDataVal(dst)));
|
||||
} else if (jsonInnerType == TSDB_DATA_TYPE_JSON) {
|
||||
int32_t length =
|
||||
taosUcs4ToMbs((TdUcs4*)varDataVal(jsonInnerData), varDataLen(jsonInnerData), varDataVal(dst));
|
||||
|
||||
if (length <= 0) {
|
||||
tscError("charset:%s to %s. val:%s convert failed.", DEFAULT_UNICODE_ENCODEC, tsCharset,
|
||||
varDataVal(jsonInnerData));
|
||||
length = 0;
|
||||
}
|
||||
varDataSetLen(dst, length);
|
||||
char *jsonString = parseTagDatatoJson(jsonInnerData);
|
||||
STR_TO_VARSTR(dst, jsonString);
|
||||
taosMemoryFree(jsonString);
|
||||
} else if (jsonInnerType == TSDB_DATA_TYPE_NCHAR) { // value -> "value"
|
||||
*(char*)varDataVal(dst) = '\"';
|
||||
int32_t length = taosUcs4ToMbs((TdUcs4*)varDataVal(jsonInnerData), varDataLen(jsonInnerData),
|
||||
|
@ -1022,7 +1110,6 @@ TSDB_SERVER_STATUS taos_check_server_status(const char* fqdn, int port, char* de
|
|||
SRpcInit rpcInit = {0};
|
||||
char pass[TSDB_PASSWORD_LEN + 1] = {0};
|
||||
|
||||
taosEncryptPass_c((uint8_t*)("_pwd"), strlen("_pwd"), pass);
|
||||
rpcInit.label = "CHK";
|
||||
rpcInit.numOfThreads = 1;
|
||||
rpcInit.cfp = NULL;
|
||||
|
@ -1030,9 +1117,6 @@ TSDB_SERVER_STATUS taos_check_server_status(const char* fqdn, int port, char* de
|
|||
rpcInit.connType = TAOS_CONN_CLIENT;
|
||||
rpcInit.idleTime = tsShellActivityTimer * 1000;
|
||||
rpcInit.user = "_dnd";
|
||||
rpcInit.ckey = "_key";
|
||||
rpcInit.spi = 1;
|
||||
rpcInit.secret = pass;
|
||||
|
||||
clientRpc = rpcOpen(&rpcInit);
|
||||
if (clientRpc == NULL) {
|
||||
|
|
|
@ -65,12 +65,14 @@ for (int i = 1; i < keyLen; ++i) { \
|
|||
#define OTD_TIMESTAMP_COLUMN_NAME "ts"
|
||||
#define OTD_METRIC_VALUE_COLUMN_NAME "value"
|
||||
|
||||
#define TS "_ts"
|
||||
#define TS_LEN 3
|
||||
#define TAG "_tagNone"
|
||||
#define TAG_LEN 8
|
||||
#define VALUE "value"
|
||||
#define VALUE_LEN 5
|
||||
#define TS "_ts"
|
||||
#define TS_LEN 3
|
||||
#define TAG "_tag"
|
||||
#define TAG_LEN 4
|
||||
#define TAG_VALUE "NULL"
|
||||
#define TAG_VALUE_LEN 4
|
||||
#define VALUE "value"
|
||||
#define VALUE_LEN 5
|
||||
|
||||
#define BINARY_ADD_LEN 2 // "binary" 2 means " "
|
||||
#define NCHAR_ADD_LEN 3 // L"nchar" 3 means L" "
|
||||
|
@ -598,25 +600,33 @@ static bool smlParseNumber(SSmlKv *kvVal, SSmlMsgBuf *msg){
|
|||
kvVal->type = TSDB_DATA_TYPE_FLOAT;
|
||||
kvVal->f = (float)result;
|
||||
}else if ((left == 1 && *endptr == 'i') || (left == 3 && strncasecmp(endptr, "i64", left) == 0)){
|
||||
if(result >= (double)INT64_MAX){
|
||||
kvVal->i = INT64_MAX;
|
||||
}else if(result <= (double)INT64_MIN){
|
||||
kvVal->i = INT64_MIN;
|
||||
}else{
|
||||
kvVal->i = result;
|
||||
if(smlDoubleToInt64OverFlow(result)){
|
||||
errno = 0;
|
||||
int64_t tmp = taosStr2Int64(pVal, &endptr, 10);
|
||||
if(errno == ERANGE){
|
||||
smlBuildInvalidDataMsg(msg, "big int out of range[-9223372036854775808,9223372036854775807]", pVal);
|
||||
return false;
|
||||
}
|
||||
kvVal->type = TSDB_DATA_TYPE_BIGINT;
|
||||
kvVal->i = tmp;
|
||||
return true;
|
||||
}
|
||||
kvVal->type = TSDB_DATA_TYPE_BIGINT;
|
||||
kvVal->i = (int64_t)result;
|
||||
}else if ((left == 3 && strncasecmp(endptr, "u64", left) == 0)){
|
||||
if(result < 0){
|
||||
smlBuildInvalidDataMsg(msg, "unsigned big int is too large, out of precision", pVal);
|
||||
return false;
|
||||
}
|
||||
if(result >= (double)UINT64_MAX){
|
||||
kvVal->u = UINT64_MAX;
|
||||
}else{
|
||||
kvVal->u = result;
|
||||
if(result >= (double)UINT64_MAX || result < 0){
|
||||
errno = 0;
|
||||
uint64_t tmp = taosStr2UInt64(pVal, &endptr, 10);
|
||||
if(errno == ERANGE || result < 0){
|
||||
smlBuildInvalidDataMsg(msg, "unsigned big int out of range[0,18446744073709551615]", pVal);
|
||||
return false;
|
||||
}
|
||||
kvVal->type = TSDB_DATA_TYPE_UBIGINT;
|
||||
kvVal->u = tmp;
|
||||
return true;
|
||||
}
|
||||
kvVal->type = TSDB_DATA_TYPE_UBIGINT;
|
||||
kvVal->u = result;
|
||||
}else if (left == 3 && strncasecmp(endptr, "i32", left) == 0){
|
||||
if(!IS_VALID_INT(result)){
|
||||
smlBuildInvalidDataMsg(msg, "int out of range[-2147483648,2147483647]", pVal);
|
||||
|
@ -1103,8 +1113,7 @@ static int32_t smlParseTelnetString(SSmlHandle *info, const char* sql, SSmlTable
|
|||
kv->keyLen = VALUE_LEN;
|
||||
kv->value = value;
|
||||
kv->length = valueLen;
|
||||
if(!smlParseValue(kv, &info->msgBuf) || kv->type == TSDB_DATA_TYPE_BINARY
|
||||
|| kv->type == TSDB_DATA_TYPE_NCHAR || kv->type == TSDB_DATA_TYPE_BOOL){
|
||||
if(!smlParseValue(kv, &info->msgBuf)){
|
||||
return TSDB_CODE_SML_INVALID_DATA;
|
||||
}
|
||||
|
||||
|
@ -1124,8 +1133,8 @@ static int32_t smlParseCols(const char* data, int32_t len, SArray *cols, char *c
|
|||
if(!kv) return TSDB_CODE_OUT_OF_MEMORY;
|
||||
kv->key = TAG;
|
||||
kv->keyLen = TAG_LEN;
|
||||
kv->value = TAG;
|
||||
kv->length = TAG_LEN;
|
||||
kv->value = TAG_VALUE;
|
||||
kv->length = TAG_VALUE_LEN;
|
||||
kv->type = TSDB_DATA_TYPE_NCHAR;
|
||||
if(cols) taosArrayPush(cols, &kv);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -2264,6 +2273,7 @@ static int32_t smlParseLine(SSmlHandle *info, char* lines[], int numLines){
|
|||
uError("SML:0x%" PRIx64 " smlParseJSON failed:%s", info->id, *lines);
|
||||
return code;
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < numLines; ++i) {
|
||||
|
|
|
@ -1435,7 +1435,7 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t wait_time) {
|
|||
|
||||
while (1) {
|
||||
tmqHandleAllDelayedTask(tmq);
|
||||
tmqPollImpl(tmq, wait_time);
|
||||
if (tmqPollImpl(tmq, wait_time) < 0) return NULL;
|
||||
|
||||
rspObj = tmqHandleAllRsp(tmq, wait_time, false);
|
||||
if (rspObj) {
|
||||
|
|
|
@ -567,6 +567,7 @@ TEST(testCase, insert_test) {
|
|||
taos_free_result(pRes);
|
||||
taos_close(pConn);
|
||||
}
|
||||
#endif
|
||||
|
||||
TEST(testCase, projection_query_tables) {
|
||||
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
|
||||
|
@ -605,7 +606,7 @@ TEST(testCase, projection_query_tables) {
|
|||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
for(int32_t i = 0; i < 10000000; i += 20) {
|
||||
for(int32_t i = 0; i < 100000; i += 20) {
|
||||
char sql[1024] = {0};
|
||||
sprintf(sql,
|
||||
"insert into tu values(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)"
|
||||
|
@ -625,7 +626,7 @@ TEST(testCase, projection_query_tables) {
|
|||
|
||||
printf("start to insert next table\n");
|
||||
|
||||
for(int32_t i = 0; i < 10000000; i += 20) {
|
||||
for(int32_t i = 0; i < 100000; i += 20) {
|
||||
char sql[1024] = {0};
|
||||
sprintf(sql,
|
||||
"insert into tu2 values(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)"
|
||||
|
@ -692,8 +693,6 @@ TEST(testCase, projection_query_stables) {
|
|||
taos_close(pConn);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
TEST(testCase, agg_query_tables) {
|
||||
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
|
||||
ASSERT_NE(pConn, nullptr);
|
||||
|
|
|
@ -208,6 +208,7 @@ TEST(testCase, smlParseCols_Error_Test) {
|
|||
memcpy(sql, data[i], len + 1);
|
||||
SArray *cols = taosArrayInit(8, POINTER_BYTES);
|
||||
int32_t ret = smlParseCols(sql, len, cols, NULL, false, dumplicateKey, &msgBuf);
|
||||
printf("i:%d\n",i);
|
||||
ASSERT_NE(ret, TSDB_CODE_SUCCESS);
|
||||
taosHashClear(dumplicateKey);
|
||||
taosMemoryFree(sql);
|
||||
|
@ -272,11 +273,11 @@ TEST(testCase, smlParseCols_tag_Test) {
|
|||
|
||||
// nchar
|
||||
kv = (SSmlKv *)taosArrayGetP(cols, 0);
|
||||
ASSERT_EQ(strncasecmp(kv->key, TAG, strlen(TAG)), 0);
|
||||
ASSERT_EQ(kv->keyLen, strlen(TAG));
|
||||
ASSERT_EQ(strncasecmp(kv->key, TAG, TAG_LEN), 0);
|
||||
ASSERT_EQ(kv->keyLen, TAG_LEN);
|
||||
ASSERT_EQ(kv->type, TSDB_DATA_TYPE_NCHAR);
|
||||
ASSERT_EQ(kv->length, strlen(TAG));
|
||||
ASSERT_EQ(strncasecmp(kv->value, TAG, strlen(TAG)), 0);
|
||||
ASSERT_EQ(kv->length, TAG_LEN);
|
||||
ASSERT_EQ(strncasecmp(kv->value, TAG_VALUE, TAG_VALUE_LEN), 0);
|
||||
taosMemoryFree(kv);
|
||||
|
||||
taosArrayDestroy(cols);
|
||||
|
@ -506,7 +507,7 @@ TEST(testCase, smlProcess_influx_Test) {
|
|||
"readings,name=truck_0,fleet=South,driver=Trish,model=H-2,device_version=v2.3 load_capacity=1500,fuel_capacity=150,nominal_fuel_consumption=12,latitude=52.31854,longitude=4.72037,elevation=124,heading=221,grade=0,fuel_consumption=25 1451608403000000000",
|
||||
"readings,name=truck_0,fleet=South,driver=Trish,model=H-2,device_version=v2.3 fuel_capacity=150,nominal_fuel_consumption=12,latitude=52.31854,longitude=4.72037,elevation=124,velocity=0,heading=221,grade=0,fuel_consumption=25 1451609404000000000",
|
||||
"readings,name=truck_0,fleet=South,driver=Trish,model=H-2,device_version=v2.3 fuel_consumption=25,grade=0 1451619405000000000",
|
||||
"readings,name=truck_1,fleet=South,driver=Albert,model=F-150,device_version=v1.5 load_capacity=2000,fuel_capacity=200,nominal_fuel_consumption=15,latitude=72.45258,longitude=68.83761,elevation=255,velocity=0,heading=181,grade=0,fuel_consumption=25 145160640600000000",
|
||||
"readings,name=truck_1,fleet=South,driver=Albert,model=F-150,device_version=v1.5 load_capacity=2000,fuel_capacity=200,nominal_fuel_consumption=15,latitude=72.45258,longitude=68.83761,elevation=255,velocity=0,heading=181,grade=0,fuel_consumption=25 1451606406000000000",
|
||||
"readings,name=truck_2,driver=Derek,model=F-150,device_version=v1.5 load_capacity=2000,fuel_capacity=200,nominal_fuel_consumption=15,latitude=24.5208,longitude=28.09377,elevation=428,velocity=0,heading=304,grade=0,fuel_consumption=25 1451606407000000000",
|
||||
"readings,name=truck_2,fleet=North,driver=Derek,model=F-150 load_capacity=2000,fuel_capacity=200,nominal_fuel_consumption=15,latitude=24.5208,longitude=28.09377,elevation=428,velocity=0,heading=304,grade=0,fuel_consumption=25 1451609408000000000",
|
||||
"readings,fleet=South,name=truck_0,driver=Trish,model=H-2,device_version=v2.3 fuel_consumption=25,grade=0 1451629409000000000",
|
||||
|
@ -745,7 +746,7 @@ TEST(testCase, smlProcess_json1_Test) {
|
|||
" }\n"
|
||||
" }\n"
|
||||
"]";
|
||||
int ret = smlProcess(info, (char **)(&sql), -1);
|
||||
int ret = smlProcess(info, (char **)(&sql), 1);
|
||||
ASSERT_EQ(ret, 0);
|
||||
|
||||
// case 1
|
||||
|
@ -1220,4 +1221,56 @@ TEST(testCase, sml_TD15662_Test) {
|
|||
ASSERT_EQ(ts, 1626006833639000000);
|
||||
|
||||
taos_free_result(res);
|
||||
}
|
||||
|
||||
TEST(testCase, sml_TD15735_Test) {
|
||||
TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
|
||||
ASSERT_NE(taos, nullptr);
|
||||
|
||||
TAOS_RES* pRes = taos_query(taos, "create database if not exists sml_db");
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(taos, "use sml_db");
|
||||
taos_free_result(pRes);
|
||||
|
||||
SRequestObj *request = (SRequestObj *)createRequest((STscObj*)taos, NULL, NULL, TSDB_SQL_INSERT);
|
||||
ASSERT_NE(request, nullptr);
|
||||
|
||||
SSmlHandle *info = smlBuildSmlInfo(taos, request, TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS);
|
||||
ASSERT_NE(info, nullptr);
|
||||
|
||||
const char *sql[1] = {
|
||||
"{'metric': 'pekoiw', 'timestamp': {'value': 1626006833639000000, 'type': 'ns'}, 'value': {'value': False, 'type': 'bool'}, 'tags': {'t0': {'value': True, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {'value': 9223372036854775807, 'type': 'bigint'}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'binaryTagValue', 'type': 'binary'}, 't8': {'value': 'ncharTagValue', 'type': 'nchar'}}}",
|
||||
};
|
||||
int32_t ret = smlProcess(info, (char**)sql, sizeof(sql)/sizeof(sql[0]));
|
||||
ASSERT_NE(ret, 0);
|
||||
|
||||
destroyRequest(request);
|
||||
smlDestroyInfo(info);
|
||||
}
|
||||
|
||||
TEST(testCase, sml_TD15742_Test) {
|
||||
TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
|
||||
ASSERT_NE(taos, nullptr);
|
||||
|
||||
TAOS_RES* pRes = taos_query(taos, "create database if not exists TD15742");
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(taos, "use TD15742");
|
||||
taos_free_result(pRes);
|
||||
|
||||
SRequestObj *request = (SRequestObj *)createRequest((STscObj*)taos, NULL, NULL, TSDB_SQL_INSERT);
|
||||
ASSERT_NE(request, nullptr);
|
||||
|
||||
SSmlHandle *info = smlBuildSmlInfo(taos, request, TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS);
|
||||
ASSERT_NE(info, nullptr);
|
||||
|
||||
const char *sql[] = {
|
||||
"zgzbix 1626006833641 False id=zgzbix_992_38861 t0=t t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7=\"binaryTagValue\" t8=L\"ncharTagValue\"",
|
||||
};
|
||||
int ret = smlProcess(info, (char**)sql, sizeof(sql)/sizeof(sql[0]));
|
||||
ASSERT_EQ(ret, 0);
|
||||
|
||||
destroyRequest(request);
|
||||
smlDestroyInfo(info);
|
||||
}
|
|
@ -122,10 +122,14 @@ int32_t colDataAppend(SColumnInfoData* pColumnInfoData, uint32_t currentRow, con
|
|||
dataLen = 0;
|
||||
} else if (*pData == TSDB_DATA_TYPE_NCHAR) {
|
||||
dataLen = varDataTLen(pData + CHAR_BYTES);
|
||||
} else if (*pData == TSDB_DATA_TYPE_BIGINT || *pData == TSDB_DATA_TYPE_DOUBLE) {
|
||||
dataLen = LONG_BYTES;
|
||||
} else if (*pData == TSDB_DATA_TYPE_DOUBLE) {
|
||||
dataLen = DOUBLE_BYTES;
|
||||
} else if (*pData == TSDB_DATA_TYPE_BOOL) {
|
||||
dataLen = CHAR_BYTES;
|
||||
} else if (*pData == TSDB_DATA_TYPE_JSON) {
|
||||
dataLen = kvRowLen(pData + CHAR_BYTES);
|
||||
} else {
|
||||
ASSERT(0);
|
||||
}
|
||||
dataLen += CHAR_BYTES;
|
||||
}
|
||||
|
|
|
@ -40,11 +40,11 @@ bool tsPrintAuth = false;
|
|||
|
||||
// multi process
|
||||
int32_t tsMultiProcess = 0;
|
||||
int32_t tsMnodeShmSize = TSDB_MAX_WAL_SIZE * 2 + 128;
|
||||
int32_t tsVnodeShmSize = TSDB_MAX_WAL_SIZE * 10 + 128;
|
||||
int32_t tsQnodeShmSize = TSDB_MAX_WAL_SIZE * 4 + 128;
|
||||
int32_t tsSnodeShmSize = TSDB_MAX_WAL_SIZE * 4 + 128;
|
||||
int32_t tsBnodeShmSize = TSDB_MAX_WAL_SIZE * 4 + 128;
|
||||
int32_t tsMnodeShmSize = TSDB_MAX_WAL_SIZE * 2 + 1024;
|
||||
int32_t tsVnodeShmSize = TSDB_MAX_WAL_SIZE * 10 + 1024;
|
||||
int32_t tsQnodeShmSize = TSDB_MAX_WAL_SIZE * 4 + 1024;
|
||||
int32_t tsSnodeShmSize = TSDB_MAX_WAL_SIZE * 4 + 1024;
|
||||
int32_t tsBnodeShmSize = TSDB_MAX_WAL_SIZE * 4 + 1024;
|
||||
int32_t tsNumOfShmThreads = 1;
|
||||
|
||||
// queue & threads
|
||||
|
@ -380,11 +380,11 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
|||
if (cfgAddBool(pCfg, "deadLockKillQuery", tsDeadLockKillQuery, 0) != 0) return -1;
|
||||
|
||||
if (cfgAddInt32(pCfg, "multiProcess", tsMultiProcess, 0, 2, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "mnodeShmSize", tsMnodeShmSize, TSDB_MAX_WAL_SIZE + 128, INT32_MAX, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "vnodeShmSize", tsVnodeShmSize, TSDB_MAX_WAL_SIZE + 128, INT32_MAX, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "qnodeShmSize", tsQnodeShmSize, TSDB_MAX_WAL_SIZE + 128, INT32_MAX, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "snodeShmSize", tsSnodeShmSize, TSDB_MAX_WAL_SIZE + 128, INT32_MAX, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "bnodeShmSize", tsBnodeShmSize, TSDB_MAX_WAL_SIZE + 128, INT32_MAX, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "mnodeShmSize", tsMnodeShmSize, TSDB_MAX_WAL_SIZE * 2 + 1024, INT32_MAX, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "vnodeShmSize", tsVnodeShmSize, TSDB_MAX_WAL_SIZE * 2 + 1024, INT32_MAX, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "qnodeShmSize", tsQnodeShmSize, TSDB_MAX_WAL_SIZE * 2 + 1024, INT32_MAX, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "snodeShmSize", tsSnodeShmSize, TSDB_MAX_WAL_SIZE * 2 + 1024, INT32_MAX, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "bnodeShmSize", tsBnodeShmSize, TSDB_MAX_WAL_SIZE * 2 + 1024, INT32_MAX, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "mumOfShmThreads", tsNumOfShmThreads, 1, 1024, 0) != 0) return -1;
|
||||
|
||||
tsNumOfRpcThreads = tsNumOfCores / 2;
|
||||
|
|
|
@ -2627,6 +2627,35 @@ int32_t tDeserializeSMDropTopicReq(void *buf, int32_t bufLen, SMDropTopicReq *pR
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t tSerializeSMDropCgroupReq(void *buf, int32_t bufLen, SMDropCgroupReq *pReq) {
|
||||
SEncoder encoder = {0};
|
||||
tEncoderInit(&encoder, buf, bufLen);
|
||||
|
||||
if (tStartEncode(&encoder) < 0) return -1;
|
||||
if (tEncodeCStr(&encoder, pReq->topic) < 0) return -1;
|
||||
if (tEncodeCStr(&encoder, pReq->cgroup) < 0) return -1;
|
||||
if (tEncodeI8(&encoder, pReq->igNotExists) < 0) return -1;
|
||||
tEndEncode(&encoder);
|
||||
|
||||
int32_t tlen = encoder.pos;
|
||||
tEncoderClear(&encoder);
|
||||
return tlen;
|
||||
}
|
||||
|
||||
int32_t tDeserializeSMDropCgroupReq(void *buf, int32_t bufLen, SMDropCgroupReq *pReq) {
|
||||
SDecoder decoder = {0};
|
||||
tDecoderInit(&decoder, buf, bufLen);
|
||||
|
||||
if (tStartDecode(&decoder) < 0) return -1;
|
||||
if (tDecodeCStrTo(&decoder, pReq->topic) < 0) return -1;
|
||||
if (tDecodeCStrTo(&decoder, pReq->cgroup) < 0) return -1;
|
||||
if (tDecodeI8(&decoder, &pReq->igNotExists) < 0) return -1;
|
||||
tEndDecode(&decoder);
|
||||
|
||||
tDecoderClear(&decoder);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tSerializeSCMCreateTopicReq(void *buf, int32_t bufLen, const SCMCreateTopicReq *pReq) {
|
||||
int32_t sqlLen = 0;
|
||||
int32_t astLen = 0;
|
||||
|
|
|
@ -36,9 +36,9 @@ typedef struct SBnodeMgmt {
|
|||
|
||||
// bmHandle.c
|
||||
SArray *bmGetMsgHandles();
|
||||
int32_t bmProcessCreateReq(const SMgmtInputOpt *pInput, SRpcMsg *pReq);
|
||||
int32_t bmProcessDropReq(const SMgmtInputOpt *pInput, SRpcMsg *pReq);
|
||||
int32_t bmProcessGetMonBmInfoReq(SBnodeMgmt *pMgmt, SRpcMsg *pReq);
|
||||
int32_t bmProcessCreateReq(const SMgmtInputOpt *pInput, SRpcMsg *pMsg);
|
||||
int32_t bmProcessDropReq(const SMgmtInputOpt *pInput, SRpcMsg *pMsg);
|
||||
int32_t bmProcessGetMonBmInfoReq(SBnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
|
||||
// bmWorker.c
|
||||
int32_t bmStartWorker(SBnodeMgmt *pMgmt);
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
|
||||
void bmGetMonitorInfo(SBnodeMgmt *pMgmt, SMonBmInfo *bmInfo) {}
|
||||
|
||||
int32_t bmProcessGetMonBmInfoReq(SBnodeMgmt *pMgmt, SRpcMsg *pReq) {
|
||||
int32_t bmProcessGetMonBmInfoReq(SBnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
||||
SMonBmInfo bmInfo = {0};
|
||||
bmGetMonitorInfo(pMgmt, &bmInfo);
|
||||
dmGetMonitorSystemInfo(&bmInfo.sys);
|
||||
|
@ -37,17 +37,15 @@ int32_t bmProcessGetMonBmInfoReq(SBnodeMgmt *pMgmt, SRpcMsg *pReq) {
|
|||
}
|
||||
|
||||
tSerializeSMonBmInfo(pRsp, rspLen, &bmInfo);
|
||||
pReq->info.rsp = pRsp;
|
||||
pReq->info.rspLen = rspLen;
|
||||
pMsg->info.rsp = pRsp;
|
||||
pMsg->info.rspLen = rspLen;
|
||||
tFreeSMonBmInfo(&bmInfo);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t bmProcessCreateReq(const SMgmtInputOpt *pInput, SRpcMsg *pMsg) {
|
||||
SRpcMsg *pReq = pMsg;
|
||||
|
||||
SDCreateBnodeReq createReq = {0};
|
||||
if (tDeserializeSCreateDropMQSBNodeReq(pReq->pCont, pReq->contLen, &createReq) != 0) {
|
||||
if (tDeserializeSCreateDropMQSBNodeReq(pMsg->pCont, pMsg->contLen, &createReq) != 0) {
|
||||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
return -1;
|
||||
}
|
||||
|
@ -68,10 +66,8 @@ int32_t bmProcessCreateReq(const SMgmtInputOpt *pInput, SRpcMsg *pMsg) {
|
|||
}
|
||||
|
||||
int32_t bmProcessDropReq(const SMgmtInputOpt *pInput, SRpcMsg *pMsg) {
|
||||
SRpcMsg *pReq = pMsg;
|
||||
|
||||
SDDropBnodeReq dropReq = {0};
|
||||
if (tDeserializeSCreateDropMQSBNodeReq(pReq->pCont, pReq->contLen, &dropReq) != 0) {
|
||||
if (tDeserializeSCreateDropMQSBNodeReq(pMsg->pCont, pMsg->contLen, &dropReq) != 0) {
|
||||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -41,18 +41,18 @@ typedef struct SMnodeMgmt {
|
|||
|
||||
// mmFile.c
|
||||
int32_t mmReadFile(SMnodeMgmt *pMgmt, bool *pDeployed);
|
||||
int32_t mmWriteFile(SMnodeMgmt *pMgmt, SDCreateMnodeReq *pReq, bool deployed);
|
||||
int32_t mmWriteFile(SMnodeMgmt *pMgmt, SDCreateMnodeReq *pMsg, bool deployed);
|
||||
|
||||
// mmInt.c
|
||||
int32_t mmAlter(SMnodeMgmt *pMgmt, SDAlterMnodeReq *pReq);
|
||||
int32_t mmAlter(SMnodeMgmt *pMgmt, SDAlterMnodeReq *pMsg);
|
||||
|
||||
// mmHandle.c
|
||||
SArray *mmGetMsgHandles();
|
||||
int32_t mmProcessCreateReq(const SMgmtInputOpt *pInput, SRpcMsg *pMsg);
|
||||
int32_t mmProcessDropReq(const SMgmtInputOpt *pInput, SRpcMsg *pMsg);
|
||||
int32_t mmProcessAlterReq(SMnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
int32_t mmProcessGetMonitorInfoReq(SMnodeMgmt *pMgmt, SRpcMsg *pReq);
|
||||
int32_t mmProcessGetLoadsReq(SMnodeMgmt *pMgmt, SRpcMsg *pReq);
|
||||
int32_t mmProcessGetMonitorInfoReq(SMnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
int32_t mmProcessGetLoadsReq(SMnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
|
||||
// mmWorker.c
|
||||
int32_t mmStartWorker(SMnodeMgmt *pMgmt);
|
||||
|
@ -62,10 +62,10 @@ int32_t mmPutNodeMsgToSyncQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
|||
int32_t mmPutNodeMsgToReadQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
int32_t mmPutNodeMsgToQueryQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
int32_t mmPutNodeMsgToMonitorQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
int32_t mmPutRpcMsgToQueryQueue(SMnodeMgmt *pMgmt, SRpcMsg *pRpc);
|
||||
int32_t mmPutRpcMsgToReadQueue(SMnodeMgmt *pMgmt, SRpcMsg *pRpc);
|
||||
int32_t mmPutRpcMsgToWriteQueue(SMnodeMgmt *pMgmt, SRpcMsg *pRpc);
|
||||
int32_t mmPutRpcMsgToSyncQueue(SMnodeMgmt *pMgmt, SRpcMsg *pRpc);
|
||||
int32_t mmPutRpcMsgToQueryQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
int32_t mmPutRpcMsgToReadQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
int32_t mmPutRpcMsgToWriteQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
int32_t mmPutRpcMsgToSyncQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -104,7 +104,7 @@ _OVER:
|
|||
return code;
|
||||
}
|
||||
|
||||
int32_t mmWriteFile(SMnodeMgmt *pMgmt, SDCreateMnodeReq *pReq, bool deployed) {
|
||||
int32_t mmWriteFile(SMnodeMgmt *pMgmt, SDCreateMnodeReq *pMsg, bool deployed) {
|
||||
char file[PATH_MAX] = {0};
|
||||
char realfile[PATH_MAX] = {0};
|
||||
snprintf(file, sizeof(file), "%s%smnode.json.bak", pMgmt->path, TD_DIRSEP);
|
||||
|
@ -124,11 +124,11 @@ int32_t mmWriteFile(SMnodeMgmt *pMgmt, SDCreateMnodeReq *pReq, bool deployed) {
|
|||
len += snprintf(content + len, maxLen - len, "{\n");
|
||||
len += snprintf(content + len, maxLen - len, " \"mnodes\": [{\n");
|
||||
|
||||
int8_t replica = (pReq != NULL ? pReq->replica : pMgmt->replica);
|
||||
int8_t replica = (pMsg != NULL ? pMsg->replica : pMgmt->replica);
|
||||
for (int32_t i = 0; i < replica; ++i) {
|
||||
SReplica *pReplica = &pMgmt->replicas[i];
|
||||
if (pReq != NULL) {
|
||||
pReplica = &pReq->replicas[i];
|
||||
if (pMsg != NULL) {
|
||||
pReplica = &pMsg->replicas[i];
|
||||
}
|
||||
len += snprintf(content + len, maxLen - len, " \"id\": %d,\n", pReplica->id);
|
||||
len += snprintf(content + len, maxLen - len, " \"fqdn\": \"%s\",\n", pReplica->fqdn);
|
||||
|
|
|
@ -25,7 +25,7 @@ void mmGetMnodeLoads(SMnodeMgmt *pMgmt, SMonMloadInfo *pInfo) {
|
|||
mndGetLoad(pMgmt->pMnode, &pInfo->load);
|
||||
}
|
||||
|
||||
int32_t mmProcessGetMonitorInfoReq(SMnodeMgmt *pMgmt, SRpcMsg *pReq) {
|
||||
int32_t mmProcessGetMonitorInfoReq(SMnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
||||
SMonMmInfo mmInfo = {0};
|
||||
mmGetMonitorInfo(pMgmt, &mmInfo);
|
||||
dmGetMonitorSystemInfo(&mmInfo.sys);
|
||||
|
@ -44,13 +44,13 @@ int32_t mmProcessGetMonitorInfoReq(SMnodeMgmt *pMgmt, SRpcMsg *pReq) {
|
|||
}
|
||||
|
||||
tSerializeSMonMmInfo(pRsp, rspLen, &mmInfo);
|
||||
pReq->info.rsp = pRsp;
|
||||
pReq->info.rspLen = rspLen;
|
||||
pMsg->info.rsp = pRsp;
|
||||
pMsg->info.rspLen = rspLen;
|
||||
tFreeSMonMmInfo(&mmInfo);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t mmProcessGetLoadsReq(SMnodeMgmt *pMgmt, SRpcMsg *pReq) {
|
||||
int32_t mmProcessGetLoadsReq(SMnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
||||
SMonMloadInfo mloads = {0};
|
||||
mmGetMnodeLoads(pMgmt, &mloads);
|
||||
|
||||
|
@ -67,16 +67,14 @@ int32_t mmProcessGetLoadsReq(SMnodeMgmt *pMgmt, SRpcMsg *pReq) {
|
|||
}
|
||||
|
||||
tSerializeSMonMloadInfo(pRsp, rspLen, &mloads);
|
||||
pReq->info.rsp = pRsp;
|
||||
pReq->info.rspLen = rspLen;
|
||||
pMsg->info.rsp = pRsp;
|
||||
pMsg->info.rspLen = rspLen;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t mmProcessCreateReq(const SMgmtInputOpt *pInput, SRpcMsg *pMsg) {
|
||||
SRpcMsg *pReq = pMsg;
|
||||
|
||||
SDCreateMnodeReq createReq = {0};
|
||||
if (tDeserializeSDCreateMnodeReq(pReq->pCont, pReq->contLen, &createReq) != 0) {
|
||||
if (tDeserializeSDCreateMnodeReq(pMsg->pCont, pMsg->contLen, &createReq) != 0) {
|
||||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
return -1;
|
||||
}
|
||||
|
@ -101,10 +99,8 @@ int32_t mmProcessCreateReq(const SMgmtInputOpt *pInput, SRpcMsg *pMsg) {
|
|||
}
|
||||
|
||||
int32_t mmProcessDropReq(const SMgmtInputOpt *pInput, SRpcMsg *pMsg) {
|
||||
SRpcMsg *pReq = pMsg;
|
||||
|
||||
SDDropMnodeReq dropReq = {0};
|
||||
if (tDeserializeSCreateDropMQSBNodeReq(pReq->pCont, pReq->contLen, &dropReq) != 0) {
|
||||
if (tDeserializeSCreateDropMQSBNodeReq(pMsg->pCont, pMsg->contLen, &dropReq) != 0) {
|
||||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
return -1;
|
||||
}
|
||||
|
@ -129,10 +125,8 @@ int32_t mmProcessDropReq(const SMgmtInputOpt *pInput, SRpcMsg *pMsg) {
|
|||
}
|
||||
|
||||
int32_t mmProcessAlterReq(SMnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
||||
SRpcMsg *pReq = pMsg;
|
||||
|
||||
SDAlterMnodeReq alterReq = {0};
|
||||
if (tDeserializeSDCreateMnodeReq(pReq->pCont, pReq->contLen, &alterReq) != 0) {
|
||||
if (tDeserializeSDCreateMnodeReq(pMsg->pCont, pMsg->contLen, &alterReq) != 0) {
|
||||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -87,9 +87,9 @@ static int32_t mmBuildOptionFromReq(SMnodeMgmt *pMgmt, SMnodeOpt *pOption, SDCre
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t mmAlter(SMnodeMgmt *pMgmt, SDAlterMnodeReq *pReq) {
|
||||
int32_t mmAlter(SMnodeMgmt *pMgmt, SDAlterMnodeReq *pMsg) {
|
||||
SMnodeOpt option = {0};
|
||||
if (mmBuildOptionFromReq(pMgmt, &option, pReq) != 0) {
|
||||
if (mmBuildOptionFromReq(pMgmt, &option, pMsg) != 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -98,7 +98,7 @@ int32_t mmAlter(SMnodeMgmt *pMgmt, SDAlterMnodeReq *pReq) {
|
|||
}
|
||||
|
||||
bool deployed = true;
|
||||
if (mmWriteFile(pMgmt, pReq, deployed) != 0) {
|
||||
if (mmWriteFile(pMgmt, pMsg, deployed) != 0) {
|
||||
dError("failed to write mnode file since %s", terrstr());
|
||||
return -1;
|
||||
}
|
||||
|
@ -135,7 +135,7 @@ static int32_t mmOpen(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) {
|
|||
pMgmt->msgCb.queueFps[QUERY_QUEUE] = (PutToQueueFp)mmPutRpcMsgToQueryQueue;
|
||||
pMgmt->msgCb.queueFps[READ_QUEUE] = (PutToQueueFp)mmPutRpcMsgToReadQueue;
|
||||
pMgmt->msgCb.queueFps[WRITE_QUEUE] = (PutToQueueFp)mmPutRpcMsgToWriteQueue;
|
||||
pMgmt->msgCb.queueFps[SYNC_QUEUE] = (PutToQueueFp)mmPutRpcMsgToWriteQueue;
|
||||
pMgmt->msgCb.queueFps[SYNC_QUEUE] = (PutToQueueFp)mmPutRpcMsgToSyncQueue;
|
||||
pMgmt->msgCb.mgmt = pMgmt;
|
||||
|
||||
bool deployed = false;
|
||||
|
|
|
@ -126,7 +126,7 @@ int32_t mmPutRpcMsgToReadQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|||
return mmPutRpcMsgToWorker(&pMgmt->readWorker, pMsg);
|
||||
}
|
||||
|
||||
int32_t mmPutMsgToSyncQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg) { return mmPutRpcMsgToWorker(&pMgmt->syncWorker, pMsg); }
|
||||
int32_t mmPutRpcMsgToSyncQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg) { return mmPutRpcMsgToWorker(&pMgmt->syncWorker, pMsg); }
|
||||
|
||||
int32_t mmStartWorker(SMnodeMgmt *pMgmt) {
|
||||
SSingleWorkerCfg qCfg = {
|
||||
|
|
|
@ -39,7 +39,7 @@ typedef struct SQnodeMgmt {
|
|||
SArray *qmGetMsgHandles();
|
||||
int32_t qmProcessCreateReq(const SMgmtInputOpt *pInput, SRpcMsg *pMsg);
|
||||
int32_t qmProcessDropReq(const SMgmtInputOpt *pInput, SRpcMsg *pMsg);
|
||||
int32_t qmProcessGetMonitorInfoReq(SQnodeMgmt *pMgmt, SRpcMsg *pReq);
|
||||
int32_t qmProcessGetMonitorInfoReq(SQnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
|
||||
// qmWorker.c
|
||||
int32_t qmPutRpcMsgToQueryQueue(SQnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
|
||||
void qmGetMonitorInfo(SQnodeMgmt *pMgmt, SMonQmInfo *qmInfo) {}
|
||||
|
||||
int32_t qmProcessGetMonitorInfoReq(SQnodeMgmt *pMgmt, SRpcMsg *pReq) {
|
||||
int32_t qmProcessGetMonitorInfoReq(SQnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
||||
SMonQmInfo qmInfo = {0};
|
||||
qmGetMonitorInfo(pMgmt, &qmInfo);
|
||||
dmGetMonitorSystemInfo(&qmInfo.sys);
|
||||
|
@ -37,17 +37,15 @@ int32_t qmProcessGetMonitorInfoReq(SQnodeMgmt *pMgmt, SRpcMsg *pReq) {
|
|||
}
|
||||
|
||||
tSerializeSMonQmInfo(pRsp, rspLen, &qmInfo);
|
||||
pReq->info.rsp = pRsp;
|
||||
pReq->info.rspLen = rspLen;
|
||||
pMsg->info.rsp = pRsp;
|
||||
pMsg->info.rspLen = rspLen;
|
||||
tFreeSMonQmInfo(&qmInfo);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t qmProcessCreateReq(const SMgmtInputOpt *pInput, SRpcMsg *pMsg) {
|
||||
SRpcMsg *pReq = pMsg;
|
||||
|
||||
SDCreateQnodeReq createReq = {0};
|
||||
if (tDeserializeSCreateDropMQSBNodeReq(pReq->pCont, pReq->contLen, &createReq) != 0) {
|
||||
if (tDeserializeSCreateDropMQSBNodeReq(pMsg->pCont, pMsg->contLen, &createReq) != 0) {
|
||||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
return -1;
|
||||
}
|
||||
|
@ -68,10 +66,8 @@ int32_t qmProcessCreateReq(const SMgmtInputOpt *pInput, SRpcMsg *pMsg) {
|
|||
}
|
||||
|
||||
int32_t qmProcessDropReq(const SMgmtInputOpt *pInput, SRpcMsg *pMsg) {
|
||||
SRpcMsg *pReq = pMsg;
|
||||
|
||||
SDDropQnodeReq dropReq = {0};
|
||||
if (tDeserializeSCreateDropMQSBNodeReq(pReq->pCont, pReq->contLen, &dropReq) != 0) {
|
||||
if (tDeserializeSCreateDropMQSBNodeReq(pMsg->pCont, pMsg->contLen, &dropReq) != 0) {
|
||||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@ typedef struct SSnodeMgmt {
|
|||
SArray *smGetMsgHandles();
|
||||
int32_t smProcessCreateReq(const SMgmtInputOpt *pInput, SRpcMsg *pMsg);
|
||||
int32_t smProcessDropReq(const SMgmtInputOpt *pInput, SRpcMsg *pMsg);
|
||||
int32_t smProcessGetMonitorInfoReq(SSnodeMgmt *pMgmt, SRpcMsg *pReq);
|
||||
int32_t smProcessGetMonitorInfoReq(SSnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
|
||||
// smWorker.c
|
||||
int32_t smStartWorker(SSnodeMgmt *pMgmt);
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
|
||||
void smGetMonitorInfo(SSnodeMgmt *pMgmt, SMonSmInfo *smInfo) {}
|
||||
|
||||
int32_t smProcessGetMonitorInfoReq(SSnodeMgmt *pMgmt, SRpcMsg *pReq) {
|
||||
int32_t smProcessGetMonitorInfoReq(SSnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
||||
SMonSmInfo smInfo = {0};
|
||||
smGetMonitorInfo(pMgmt, &smInfo);
|
||||
dmGetMonitorSystemInfo(&smInfo.sys);
|
||||
|
@ -37,17 +37,15 @@ int32_t smProcessGetMonitorInfoReq(SSnodeMgmt *pMgmt, SRpcMsg *pReq) {
|
|||
}
|
||||
|
||||
tSerializeSMonSmInfo(pRsp, rspLen, &smInfo);
|
||||
pReq->info.rsp = pRsp;
|
||||
pReq->info.rspLen = rspLen;
|
||||
pMsg->info.rsp = pRsp;
|
||||
pMsg->info.rspLen = rspLen;
|
||||
tFreeSMonSmInfo(&smInfo);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t smProcessCreateReq(const SMgmtInputOpt *pInput, SRpcMsg *pMsg) {
|
||||
SRpcMsg *pReq = pMsg;
|
||||
|
||||
SDCreateSnodeReq createReq = {0};
|
||||
if (tDeserializeSCreateDropMQSBNodeReq(pReq->pCont, pReq->contLen, &createReq) != 0) {
|
||||
if (tDeserializeSCreateDropMQSBNodeReq(pMsg->pCont, pMsg->contLen, &createReq) != 0) {
|
||||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
return -1;
|
||||
}
|
||||
|
@ -68,10 +66,8 @@ int32_t smProcessCreateReq(const SMgmtInputOpt *pInput, SRpcMsg *pMsg) {
|
|||
}
|
||||
|
||||
int32_t smProcessDropReq(const SMgmtInputOpt *pInput, SRpcMsg *pMsg) {
|
||||
SRpcMsg *pReq = pMsg;
|
||||
|
||||
SDDropSnodeReq dropReq = {0};
|
||||
if (tDeserializeSCreateDropMQSBNodeReq(pReq->pCont, pReq->contLen, &dropReq) != 0) {
|
||||
if (tDeserializeSCreateDropMQSBNodeReq(pMsg->pCont, pMsg->contLen, &dropReq) != 0) {
|
||||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -84,10 +84,10 @@ void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode);
|
|||
|
||||
// vmHandle.c
|
||||
SArray *vmGetMsgHandles();
|
||||
int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pReq);
|
||||
int32_t vmProcessDropVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pReq);
|
||||
int32_t vmProcessGetMonitorInfoReq(SVnodeMgmt *pMgmt, SRpcMsg *pReq);
|
||||
int32_t vmProcessGetLoadsReq(SVnodeMgmt *pMgmt, SRpcMsg *pReq);
|
||||
int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
int32_t vmProcessDropVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
int32_t vmProcessGetMonitorInfoReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
int32_t vmProcessGetLoadsReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
|
||||
// vmFile.c
|
||||
int32_t vmGetVnodeListFromFile(SVnodeMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t *numOfVnodes);
|
||||
|
|
|
@ -82,7 +82,7 @@ void vmGetMonitorInfo(SVnodeMgmt *pMgmt, SMonVmInfo *pInfo) {
|
|||
taosArrayDestroy(pVloads);
|
||||
}
|
||||
|
||||
int32_t vmProcessGetMonitorInfoReq(SVnodeMgmt *pMgmt, SRpcMsg *pReq) {
|
||||
int32_t vmProcessGetMonitorInfoReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
||||
SMonVmInfo vmInfo = {0};
|
||||
vmGetMonitorInfo(pMgmt, &vmInfo);
|
||||
dmGetMonitorSystemInfo(&vmInfo.sys);
|
||||
|
@ -101,13 +101,13 @@ int32_t vmProcessGetMonitorInfoReq(SVnodeMgmt *pMgmt, SRpcMsg *pReq) {
|
|||
}
|
||||
|
||||
tSerializeSMonVmInfo(pRsp, rspLen, &vmInfo);
|
||||
pReq->info.rsp = pRsp;
|
||||
pReq->info.rspLen = rspLen;
|
||||
pMsg->info.rsp = pRsp;
|
||||
pMsg->info.rspLen = rspLen;
|
||||
tFreeSMonVmInfo(&vmInfo);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t vmProcessGetLoadsReq(SVnodeMgmt *pMgmt, SRpcMsg *pReq) {
|
||||
int32_t vmProcessGetLoadsReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
||||
SMonVloadInfo vloads = {0};
|
||||
vmGetVnodeLoads(pMgmt, &vloads);
|
||||
|
||||
|
@ -124,8 +124,8 @@ int32_t vmProcessGetLoadsReq(SVnodeMgmt *pMgmt, SRpcMsg *pReq) {
|
|||
}
|
||||
|
||||
tSerializeSMonVloadInfo(pRsp, rspLen, &vloads);
|
||||
pReq->info.rsp = pRsp;
|
||||
pReq->info.rspLen = rspLen;
|
||||
pMsg->info.rsp = pRsp;
|
||||
pMsg->info.rspLen = rspLen;
|
||||
tFreeSMonVloadInfo(&vloads);
|
||||
return 0;
|
||||
}
|
||||
|
@ -174,12 +174,11 @@ static void vmGenerateWrapperCfg(SVnodeMgmt *pMgmt, SCreateVnodeReq *pCreate, SW
|
|||
}
|
||||
|
||||
int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
||||
SRpcMsg *pReq = pMsg;
|
||||
SCreateVnodeReq createReq = {0};
|
||||
int32_t code = -1;
|
||||
char path[TSDB_FILENAME_LEN] = {0};
|
||||
|
||||
if (tDeserializeSCreateVnodeReq(pReq->pCont, pReq->contLen, &createReq) != 0) {
|
||||
if (tDeserializeSCreateVnodeReq(pMsg->pCont, pMsg->contLen, &createReq) != 0) {
|
||||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
return -1;
|
||||
}
|
||||
|
@ -242,9 +241,8 @@ _OVER:
|
|||
}
|
||||
|
||||
int32_t vmProcessDropVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
||||
SRpcMsg *pReq = pMsg;
|
||||
SDropVnodeReq dropReq = {0};
|
||||
if (tDeserializeSDropVnodeReq(pReq->pCont, pReq->contLen, &dropReq) != 0) {
|
||||
if (tDeserializeSDropVnodeReq(pMsg->pCont, pMsg->contLen, &dropReq) != 0) {
|
||||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -22,21 +22,19 @@
|
|||
static inline void vmSendRsp(SRpcMsg *pMsg, int32_t code) {
|
||||
SRpcMsg rsp = {
|
||||
.code = code,
|
||||
.info = pMsg->info,
|
||||
.pCont = pMsg->info.rsp,
|
||||
.contLen = pMsg->info.rspLen,
|
||||
.info = pMsg->info,
|
||||
};
|
||||
tmsgSendRsp(&rsp);
|
||||
}
|
||||
|
||||
static void vmProcessMgmtMonitorQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
|
||||
SVnodeMgmt *pMgmt = pInfo->ahandle;
|
||||
int32_t code = -1;
|
||||
dTrace("msg:%p, get from vnode queue, type:%s", pMsg, TMSG_INFO(pMsg->msgType));
|
||||
|
||||
int32_t code = -1;
|
||||
tmsg_t msgType = pMsg->msgType;
|
||||
dTrace("msg:%p, get from vnode queue, type:%s", pMsg, TMSG_INFO(msgType));
|
||||
|
||||
switch (msgType) {
|
||||
switch (pMsg->msgType) {
|
||||
case TDMT_MON_VM_INFO:
|
||||
code = vmProcessGetMonitorInfoReq(pMgmt, pMsg);
|
||||
break;
|
||||
|
@ -54,7 +52,7 @@ static void vmProcessMgmtMonitorQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
|
|||
dError("msg:%p, not processed in vnode queue", pMsg);
|
||||
}
|
||||
|
||||
if (msgType & 1u) {
|
||||
if (IsReq(pMsg)) {
|
||||
if (code != 0 && terrno != 0) code = terrno;
|
||||
vmSendRsp(pMsg, code);
|
||||
}
|
||||
|
@ -96,7 +94,6 @@ static void vmProcessFetchQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
|
|||
|
||||
static void vmProcessWriteQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
|
||||
SVnodeObj *pVnode = pInfo->ahandle;
|
||||
|
||||
SArray *pArray = taosArrayInit(numOfMsgs, sizeof(SRpcMsg *));
|
||||
if (pArray == NULL) {
|
||||
dError("failed to process %d msgs in write-queue since %s", numOfMsgs, terrstr());
|
||||
|
@ -116,7 +113,7 @@ static void vmProcessWriteQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO
|
|||
|
||||
for (int i = 0; i < taosArrayGetSize(pArray); i++) {
|
||||
SRpcMsg *pMsg = *(SRpcMsg **)taosArrayGet(pArray, i);
|
||||
SRpcMsg rsp = {.info = pMsg->info, .pCont = NULL, .contLen = 0};
|
||||
SRpcMsg rsp = {.info = pMsg->info};
|
||||
|
||||
int32_t ret = syncPropose(vnodeGetSyncHandle(pVnode->pImpl), pMsg, false);
|
||||
if (ret == TAOS_SYNC_PROPOSE_NOT_LEADER) {
|
||||
|
@ -126,12 +123,10 @@ static void vmProcessWriteQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO
|
|||
syncGetEpSet(vnodeGetSyncHandle(pVnode->pImpl), &newEpSet);
|
||||
newEpSet.inUse = (newEpSet.inUse + 1) % newEpSet.numOfEps;
|
||||
tmsgSendRedirectRsp(&rsp, &newEpSet);
|
||||
|
||||
} else if (ret == TAOS_SYNC_PROPOSE_OTHER_ERROR) {
|
||||
rsp.code = TSDB_CODE_SYN_INTERNAL_ERROR;
|
||||
tmsgSendRsp(&rsp);
|
||||
} else if (ret == TAOS_SYNC_PROPOSE_SUCCESS) {
|
||||
// ok
|
||||
// send response in applyQ
|
||||
} else {
|
||||
assert(0);
|
||||
|
@ -150,16 +145,13 @@ static void vmProcessWriteQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO
|
|||
|
||||
static void vmProcessApplyQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
|
||||
SVnodeObj *pVnode = pInfo->ahandle;
|
||||
SRpcMsg *pMsg = NULL;
|
||||
SRpcMsg rsp;
|
||||
|
||||
for (int32_t i = 0; i < numOfMsgs; ++i) {
|
||||
SRpcMsg *pMsg = NULL;
|
||||
taosGetQitem(qall, (void **)&pMsg);
|
||||
|
||||
// init response rpc msg
|
||||
rsp.code = 0;
|
||||
rsp.pCont = NULL;
|
||||
rsp.contLen = 0;
|
||||
SRpcMsg rsp = {0};
|
||||
|
||||
// get original rpc msg
|
||||
assert(pMsg->msgType == TDMT_VND_SYNC_APPLY_MSG);
|
||||
|
@ -178,7 +170,6 @@ static void vmProcessApplyQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO
|
|||
rpcFreeCont(originalRpcMsg.pCont);
|
||||
|
||||
// if leader, send response
|
||||
// if (pMsg->rpcMsg.handle != NULL && pMsg->rpcMsg.ahandle != NULL) {
|
||||
if (pMsg->info.handle != NULL) {
|
||||
rsp.info = pMsg->info;
|
||||
tmsgSendRsp(&rsp);
|
||||
|
@ -191,21 +182,19 @@ static void vmProcessApplyQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO
|
|||
|
||||
static void vmProcessSyncQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
|
||||
SVnodeObj *pVnode = pInfo->ahandle;
|
||||
SRpcMsg *pMsg = NULL;
|
||||
|
||||
for (int32_t i = 0; i < numOfMsgs; ++i) {
|
||||
SRpcMsg *pMsg = NULL;
|
||||
taosGetQitem(qall, (void **)&pMsg);
|
||||
|
||||
// todo
|
||||
SRpcMsg *pRsp = NULL;
|
||||
int32_t ret = vnodeProcessSyncReq(pVnode->pImpl, pMsg, &pRsp);
|
||||
if (ret != 0) {
|
||||
// if leader, send response
|
||||
int32_t code = vnodeProcessSyncReq(pVnode->pImpl, pMsg, NULL);
|
||||
if (code != 0) {
|
||||
if (pMsg->info.handle != NULL) {
|
||||
SRpcMsg rsp = {0};
|
||||
rsp.code = terrno;
|
||||
rsp.info = pMsg->info;
|
||||
dTrace("msg:%p, process sync queue error since code:%s", pMsg, terrstr());
|
||||
SRpcMsg rsp = {
|
||||
.code = (terrno < 0) ? terrno : code,
|
||||
.info = pMsg->info,
|
||||
};
|
||||
dTrace("msg:%p, failed to process sync queue since %s", pMsg, terrstr());
|
||||
tmsgSendRsp(&rsp);
|
||||
}
|
||||
}
|
||||
|
@ -217,9 +206,9 @@ static void vmProcessSyncQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOf
|
|||
|
||||
static void vmProcessMergeQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
|
||||
SVnodeObj *pVnode = pInfo->ahandle;
|
||||
SRpcMsg *pMsg = NULL;
|
||||
|
||||
for (int32_t i = 0; i < numOfMsgs; ++i) {
|
||||
SRpcMsg *pMsg = NULL;
|
||||
taosGetQitem(qall, (void **)&pMsg);
|
||||
|
||||
dTrace("msg:%p, get from vnode-merge queue", pMsg);
|
||||
|
@ -309,22 +298,24 @@ int32_t vmPutNodeMsgToMgmtQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|||
|
||||
int32_t vmPutNodeMsgToMonitorQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
||||
SSingleWorker *pWorker = &pMgmt->monitorWorker;
|
||||
|
||||
dTrace("msg:%p, put into vnode-monitor worker, type:%s", pMsg, TMSG_INFO(pMsg->msgType));
|
||||
taosWriteQitem(pWorker->queue, pMsg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t vmPutRpcMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pRpc, EQueueType qtype) {
|
||||
SMsgHead *pHead = pRpc->pCont;
|
||||
|
||||
SMsgHead *pHead = pRpc->pCont;
|
||||
SVnodeObj *pVnode = vmAcquireVnode(pMgmt, pHead->vgId);
|
||||
if (pVnode == NULL) return -1;
|
||||
|
||||
SRpcMsg *pMsg = taosAllocateQitem(sizeof(SRpcMsg), RPC_QITEM);
|
||||
int32_t code = 0;
|
||||
|
||||
if (pMsg != NULL) {
|
||||
if (pMsg == NULL) {
|
||||
rpcFreeCont(pRpc->pCont);
|
||||
pRpc->pCont = NULL;
|
||||
code = -1;
|
||||
} else {
|
||||
memcpy(pMsg, pRpc, sizeof(SRpcMsg));
|
||||
switch (qtype) {
|
||||
case WRITE_QUEUE:
|
||||
|
@ -429,7 +420,7 @@ int32_t vmAllocQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
dDebug("vgId:%d, vnode queue is alloced", pVnode->vgId);
|
||||
dDebug("vgId:%d, queue is alloced", pVnode->vgId);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -446,7 +437,7 @@ void vmFreeQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
|
|||
pVnode->pQueryQ = NULL;
|
||||
pVnode->pFetchQ = NULL;
|
||||
pVnode->pMergeQ = NULL;
|
||||
dDebug("vgId:%d, vnode queue is freed", pVnode->vgId);
|
||||
dDebug("vgId:%d, queue is freed", pVnode->vgId);
|
||||
}
|
||||
|
||||
int32_t vmStartWorker(SVnodeMgmt *pMgmt) {
|
||||
|
@ -497,7 +488,7 @@ int32_t vmStartWorker(SVnodeMgmt *pMgmt) {
|
|||
.param = pMgmt,
|
||||
};
|
||||
if (tSingleWorkerInit(&pMgmt->monitorWorker, &mCfg) != 0) {
|
||||
dError("failed to start mnode vnode-monitor worker since %s", terrstr());
|
||||
dError("failed to start vnode-monitor worker since %s", terrstr());
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -161,10 +161,12 @@ void dmGetVnodeLoads(SMonVloadInfo *pInfo) {
|
|||
void dmGetMnodeLoads(SMonMloadInfo *pInfo) {
|
||||
SDnode *pDnode = dmInstance();
|
||||
SMgmtWrapper *pWrapper = &pDnode->wrappers[MNODE];
|
||||
if (tsMultiProcess) {
|
||||
dmSendLocalRecv(pDnode, TDMT_MON_MM_LOAD, tDeserializeSMonMloadInfo, pInfo);
|
||||
} else if (pWrapper->pMgmt != NULL) {
|
||||
mmGetMnodeLoads(pWrapper->pMgmt, pInfo);
|
||||
if (dmMarkWrapper(pWrapper) == 0) {
|
||||
if (tsMultiProcess) {
|
||||
dmSendLocalRecv(pDnode, TDMT_MON_MM_LOAD, tDeserializeSMonMloadInfo, pInfo);
|
||||
} else if (pWrapper->pMgmt != NULL) {
|
||||
mmGetMnodeLoads(pWrapper->pMgmt, pInfo);
|
||||
}
|
||||
dmReleaseWrapper(pWrapper);
|
||||
}
|
||||
dmReleaseWrapper(pWrapper);
|
||||
}
|
||||
|
|
|
@ -103,7 +103,7 @@ static inline int32_t dmPushToProcQueue(SProc *proc, SProcQueue *queue, SRpcMsg
|
|||
return -1;
|
||||
}
|
||||
|
||||
if (ftype == DND_FUNC_REQ && IsReq(pMsg) && pMsg->code == 0 && handle != 0) {
|
||||
if (ftype == DND_FUNC_REQ && IsReq(pMsg) && pMsg->code == 0 && handle != 0 && pMsg->info.noResp == 0) {
|
||||
if (taosHashPut(proc->hash, &handle, sizeof(int64_t), &pMsg->info, sizeof(SRpcConnInfo)) != 0) {
|
||||
taosThreadMutexUnlock(&queue->mutex);
|
||||
return -1;
|
||||
|
|
|
@ -49,9 +49,9 @@ int32_t dmProcessNodeMsg(SMgmtWrapper *pWrapper, SRpcMsg *pMsg) {
|
|||
}
|
||||
|
||||
static void dmProcessRpcMsg(SDnode *pDnode, SRpcMsg *pRpc, SEpSet *pEpSet) {
|
||||
SDnodeTrans *pTrans = &pDnode->trans;
|
||||
SDnodeTrans * pTrans = &pDnode->trans;
|
||||
int32_t code = -1;
|
||||
SRpcMsg *pMsg = NULL;
|
||||
SRpcMsg * pMsg = NULL;
|
||||
bool needRelease = false;
|
||||
SDnodeHandle *pHandle = &pTrans->msgHandles[TMSG_INDEX(pRpc->msgType)];
|
||||
SMgmtWrapper *pWrapper = NULL;
|
||||
|
@ -179,11 +179,11 @@ int32_t dmInitMsgHandle(SDnode *pDnode) {
|
|||
|
||||
for (EDndNodeType ntype = DNODE; ntype < NODE_END; ++ntype) {
|
||||
SMgmtWrapper *pWrapper = &pDnode->wrappers[ntype];
|
||||
SArray *pArray = (*pWrapper->func.getHandlesFp)();
|
||||
SArray * pArray = (*pWrapper->func.getHandlesFp)();
|
||||
if (pArray == NULL) return -1;
|
||||
|
||||
for (int32_t i = 0; i < taosArrayGetSize(pArray); ++i) {
|
||||
SMgmtHandle *pMgmt = taosArrayGet(pArray, i);
|
||||
SMgmtHandle * pMgmt = taosArrayGet(pArray, i);
|
||||
SDnodeHandle *pHandle = &pTrans->msgHandles[TMSG_INDEX(pMgmt->msgType)];
|
||||
if (pMgmt->needCheckVgId) {
|
||||
pHandle->needCheckVgId = pMgmt->needCheckVgId;
|
||||
|
@ -200,94 +200,54 @@ int32_t dmInitMsgHandle(SDnode *pDnode) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void dmSendRpcRedirectRsp(const SRpcMsg *pReq) {
|
||||
SDnode *pDnode = dmInstance();
|
||||
SEpSet epSet = {0};
|
||||
dmGetMnodeEpSet(&pDnode->data, &epSet);
|
||||
|
||||
dDebug("RPC %p, req is redirected, num:%d use:%d", pReq->info.handle, epSet.numOfEps, epSet.inUse);
|
||||
for (int32_t i = 0; i < epSet.numOfEps; ++i) {
|
||||
dDebug("mnode index:%d %s:%u", i, epSet.eps[i].fqdn, epSet.eps[i].port);
|
||||
if (strcmp(epSet.eps[i].fqdn, tsLocalFqdn) == 0 && epSet.eps[i].port == tsServerPort) {
|
||||
epSet.inUse = (i + 1) % epSet.numOfEps;
|
||||
}
|
||||
|
||||
epSet.eps[i].port = htons(epSet.eps[i].port);
|
||||
}
|
||||
|
||||
SMEpSet msg = {.epSet = epSet};
|
||||
int32_t len = tSerializeSMEpSet(NULL, 0, &msg);
|
||||
|
||||
SRpcMsg rsp = {
|
||||
.code = TSDB_CODE_RPC_REDIRECT,
|
||||
.info = pReq->info,
|
||||
.contLen = len,
|
||||
};
|
||||
rsp.pCont = rpcMallocCont(len);
|
||||
tSerializeSMEpSet(rsp.pCont, len, &msg);
|
||||
rpcSendResponse(&rsp);
|
||||
}
|
||||
|
||||
static inline void dmSendRecv(SEpSet *pEpSet, SRpcMsg *pReq, SRpcMsg *pRsp) {
|
||||
static inline int32_t dmSendReq(const SEpSet *pEpSet, SRpcMsg *pMsg) {
|
||||
SDnode *pDnode = dmInstance();
|
||||
if (pDnode->status != DND_STAT_RUNNING) {
|
||||
pRsp->code = TSDB_CODE_NODE_OFFLINE;
|
||||
rpcFreeCont(pReq->pCont);
|
||||
pReq->pCont = NULL;
|
||||
} else {
|
||||
rpcSendRecv(pDnode->trans.clientRpc, pEpSet, pReq, pRsp);
|
||||
}
|
||||
}
|
||||
|
||||
static inline int32_t dmSendReq(const SEpSet *pEpSet, SRpcMsg *pReq) {
|
||||
SDnode *pDnode = dmInstance();
|
||||
if (pDnode->status != DND_STAT_RUNNING) {
|
||||
rpcFreeCont(pReq->pCont);
|
||||
pReq->pCont = NULL;
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
pMsg->pCont = NULL;
|
||||
terrno = TSDB_CODE_NODE_OFFLINE;
|
||||
dError("failed to send rpc msg since %s, handle:%p", terrstr(), pReq->info.handle);
|
||||
dError("failed to send rpc msg since %s, handle:%p", terrstr(), pMsg->info.handle);
|
||||
return -1;
|
||||
} else {
|
||||
rpcSendRequest(pDnode->trans.clientRpc, pEpSet, pReq, NULL);
|
||||
rpcSendRequest(pDnode->trans.clientRpc, pEpSet, pMsg, NULL);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void dmSendRsp(SRpcMsg *pMsg) {
|
||||
SMgmtWrapper *pWrapper = pMsg->info.wrapper;
|
||||
if (pMsg->code == TSDB_CODE_NODE_REDIRECT) {
|
||||
dmSendRpcRedirectRsp(pMsg);
|
||||
if (InChildProc(pWrapper)) {
|
||||
dmPutToProcPQueue(&pWrapper->proc, pMsg, DND_FUNC_RSP);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
pMsg->pCont = NULL;
|
||||
} else {
|
||||
if (InChildProc(pWrapper)) {
|
||||
dmPutToProcPQueue(&pWrapper->proc, pMsg, DND_FUNC_RSP);
|
||||
} else {
|
||||
rpcSendResponse(pMsg);
|
||||
}
|
||||
rpcSendResponse(pMsg);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void dmSendRedirectRsp(SRpcMsg *pMsg, const SEpSet *pNewEpSet) {
|
||||
SMgmtWrapper *pWrapper = pMsg->info.wrapper;
|
||||
if (InChildProc(pWrapper)) {
|
||||
dmPutToProcPQueue(&pWrapper->proc, pMsg, DND_FUNC_RSP);
|
||||
} else {
|
||||
SRpcMsg rsp = {0};
|
||||
SMEpSet msg = {.epSet = *pNewEpSet};
|
||||
int32_t len = tSerializeSMEpSet(NULL, 0, &msg);
|
||||
rsp.pCont = rpcMallocCont(len);
|
||||
rsp.contLen = len;
|
||||
tSerializeSMEpSet(rsp.pCont, len, &msg);
|
||||
SRpcMsg rsp = {.code = TSDB_CODE_RPC_REDIRECT, .info = pMsg->info};
|
||||
SMEpSet msg = {.epSet = *pNewEpSet};
|
||||
int32_t contLen = tSerializeSMEpSet(NULL, 0, &msg);
|
||||
|
||||
rsp.code = TSDB_CODE_RPC_REDIRECT;
|
||||
rsp.info = pMsg->info;
|
||||
rpcSendResponse(&rsp);
|
||||
rsp.pCont = rpcMallocCont(contLen);
|
||||
if (rsp.pCont == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
} else {
|
||||
tSerializeSMEpSet(rsp.pCont, contLen, &msg);
|
||||
rsp.contLen = contLen;
|
||||
}
|
||||
dmSendRsp(&rsp);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
pMsg->pCont = NULL;
|
||||
}
|
||||
|
||||
static inline void dmRegisterBrokenLinkArg(SRpcMsg *pMsg) {
|
||||
SMgmtWrapper *pWrapper = pMsg->info.wrapper;
|
||||
if (InChildProc(pWrapper)) {
|
||||
dmPutToProcPQueue(&pWrapper->proc, pMsg, DND_FUNC_REGIST);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
pMsg->pCont = NULL;
|
||||
} else {
|
||||
rpcRegisterBrokenLinkArg(pMsg);
|
||||
}
|
||||
|
@ -316,15 +276,9 @@ int32_t dmInitClient(SDnode *pDnode) {
|
|||
rpcInit.connType = TAOS_CONN_CLIENT;
|
||||
rpcInit.idleTime = tsShellActivityTimer * 1000;
|
||||
rpcInit.user = INTERNAL_USER;
|
||||
rpcInit.ckey = INTERNAL_CKEY;
|
||||
rpcInit.spi = 1;
|
||||
rpcInit.parent = pDnode;
|
||||
rpcInit.rfp = rpcRfp;
|
||||
|
||||
char pass[TSDB_PASSWORD_LEN + 1] = {0};
|
||||
taosEncryptPass_c((uint8_t *)(INTERNAL_SECRET), strlen(INTERNAL_SECRET), pass);
|
||||
rpcInit.secret = pass;
|
||||
|
||||
pTrans->clientRpc = rpcOpen(&rpcInit);
|
||||
if (pTrans->clientRpc == NULL) {
|
||||
dError("failed to init dnode rpc client");
|
||||
|
@ -389,3 +343,34 @@ SMsgCb dmGetMsgcb(SDnode *pDnode) {
|
|||
};
|
||||
return msgCb;
|
||||
}
|
||||
|
||||
static void dmSendMnodeRedirectRsp(SRpcMsg *pMsg) {
|
||||
SDnode *pDnode = dmInstance();
|
||||
SEpSet epSet = {0};
|
||||
dmGetMnodeEpSet(&pDnode->data, &epSet);
|
||||
|
||||
dDebug("msg:%p, is redirected, num:%d use:%d", pMsg, epSet.numOfEps, epSet.inUse);
|
||||
for (int32_t i = 0; i < epSet.numOfEps; ++i) {
|
||||
dDebug("mnode index:%d %s:%u", i, epSet.eps[i].fqdn, epSet.eps[i].port);
|
||||
if (strcmp(epSet.eps[i].fqdn, tsLocalFqdn) == 0 && epSet.eps[i].port == tsServerPort) {
|
||||
epSet.inUse = (i + 1) % epSet.numOfEps;
|
||||
}
|
||||
|
||||
epSet.eps[i].port = htons(epSet.eps[i].port);
|
||||
}
|
||||
|
||||
SRpcMsg rsp = {.code = TSDB_CODE_RPC_REDIRECT, .info = pMsg->info};
|
||||
SMEpSet msg = {.epSet = epSet};
|
||||
int32_t contLen = tSerializeSMEpSet(NULL, 0, &msg);
|
||||
rsp.pCont = rpcMallocCont(contLen);
|
||||
if (rsp.pCont == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
} else {
|
||||
tSerializeSMEpSet(rsp.pCont, contLen, &msg);
|
||||
rsp.contLen = contLen;
|
||||
}
|
||||
|
||||
dmSendRsp(&rsp);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
pMsg->pCont = NULL;
|
||||
}
|
||||
|
|
|
@ -48,10 +48,10 @@ void TestClient::DoInit() {
|
|||
rpcInit.connType = TAOS_CONN_CLIENT;
|
||||
rpcInit.idleTime = 30 * 1000;
|
||||
rpcInit.user = (char*)this->user;
|
||||
rpcInit.ckey = (char*)"key";
|
||||
// rpcInit.ckey = (char*)"key";
|
||||
rpcInit.parent = this;
|
||||
rpcInit.secret = (char*)secretEncrypt;
|
||||
rpcInit.spi = 1;
|
||||
// rpcInit.secret = (char*)secretEncrypt;
|
||||
// rpcInit.spi = 1;
|
||||
|
||||
clientRpc = rpcOpen(&rpcInit);
|
||||
ASSERT(clientRpc);
|
||||
|
|
|
@ -848,7 +848,7 @@ static int32_t mndRetrieveConsumer(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *
|
|||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)cgroup, false);
|
||||
|
||||
// app id
|
||||
// client id
|
||||
char clientId[TSDB_CGROUP_LEN + VARSTR_HEADER_SIZE] = {0};
|
||||
tstrncpy(varDataVal(clientId), pConsumer->clientId, TSDB_CGROUP_LEN);
|
||||
varDataSetLen(clientId, strlen(varDataVal(clientId)));
|
||||
|
|
|
@ -624,14 +624,12 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
|
|||
|
||||
SRpcMsg rpcMsg = {.msgType = TDMT_DND_CONFIG_DNODE, .pCont = pBuf, .contLen = bufLen, .info = pReq->info};
|
||||
|
||||
mInfo("dnode:%d, app:%p config:%s req send to dnode", cfgReq.dnodeId, rpcMsg.info.ahandle, cfgReq.config);
|
||||
tmsgSendReq(&epSet, &rpcMsg);
|
||||
|
||||
return 0;
|
||||
mDebug("dnode:%d, send config req to dnode, app:%p", cfgReq.dnodeId, rpcMsg.info.ahandle);
|
||||
return tmsgSendReq(&epSet, &rpcMsg);
|
||||
}
|
||||
|
||||
static int32_t mndProcessConfigDnodeRsp(SRpcMsg *pRsp) {
|
||||
mInfo("app:%p config rsp from dnode", pRsp->info.ahandle);
|
||||
mDebug("config rsp from dnode, app:%p", pRsp->info.ahandle);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
|
|
@ -45,20 +45,20 @@ typedef struct SVnodeCfg SVnodeCfg;
|
|||
|
||||
extern const SVnodeCfg vnodeCfgDefault;
|
||||
|
||||
int vnodeInit(int nthreads);
|
||||
int32_t vnodeInit(int32_t nthreads);
|
||||
void vnodeCleanup();
|
||||
int vnodeCreate(const char *path, SVnodeCfg *pCfg, STfs *pTfs);
|
||||
int32_t vnodeCreate(const char *path, SVnodeCfg *pCfg, STfs *pTfs);
|
||||
void vnodeDestroy(const char *path, STfs *pTfs);
|
||||
SVnode *vnodeOpen(const char *path, STfs *pTfs, SMsgCb msgCb);
|
||||
void vnodeClose(SVnode *pVnode);
|
||||
int vnodePreprocessWriteReqs(SVnode *pVnode, SArray *pMsgs, int64_t *version);
|
||||
int vnodeProcessWriteReq(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRpcMsg *pRsp);
|
||||
int vnodeProcessCMsg(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp);
|
||||
int vnodeProcessSyncReq(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp);
|
||||
int vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg);
|
||||
int vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo);
|
||||
int32_t vnodePreprocessWriteReqs(SVnode *pVnode, SArray *pMsgs, int64_t *version);
|
||||
int32_t vnodeProcessWriteReq(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRpcMsg *pRsp);
|
||||
int32_t vnodeProcessCMsg(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp);
|
||||
int32_t vnodeProcessSyncReq(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp);
|
||||
int32_t vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg);
|
||||
int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo);
|
||||
int32_t vnodeGetLoad(SVnode *pVnode, SVnodeLoad *pLoad);
|
||||
int vnodeValidateTableHash(SVnode *pVnode, char *tableFName);
|
||||
int32_t vnodeValidateTableHash(SVnode *pVnode, char *tableFName);
|
||||
|
||||
int32_t vnodeStart(SVnode *pVnode);
|
||||
void vnodeStop(SVnode *pVnode);
|
||||
|
@ -74,8 +74,8 @@ typedef struct SMetaEntry SMetaEntry;
|
|||
|
||||
void metaReaderInit(SMetaReader *pReader, SMeta *pMeta, int32_t flags);
|
||||
void metaReaderClear(SMetaReader *pReader);
|
||||
int metaGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid);
|
||||
int metaReadNext(SMetaReader *pReader);
|
||||
int32_t metaGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid);
|
||||
int32_t metaReadNext(SMetaReader *pReader);
|
||||
const void *metaGetTableTagVal(SMetaEntry *pEntry, int16_t cid);
|
||||
|
||||
#if 1 // refact APIs below (TODO)
|
||||
|
@ -86,7 +86,7 @@ typedef struct SMTbCursor SMTbCursor;
|
|||
|
||||
SMTbCursor *metaOpenTbCursor(SMeta *pMeta);
|
||||
void metaCloseTbCursor(SMTbCursor *pTbCur);
|
||||
int metaTbCursorNext(SMTbCursor *pTbCur);
|
||||
int32_t metaTbCursorNext(SMTbCursor *pTbCur);
|
||||
#endif
|
||||
|
||||
// tsdb
|
||||
|
@ -124,12 +124,13 @@ typedef struct STqReadHandle STqReadHandle;
|
|||
STqReadHandle *tqInitSubmitMsgScanner(SMeta *pMeta);
|
||||
|
||||
void tqReadHandleSetColIdList(STqReadHandle *pReadHandle, SArray *pColIdList);
|
||||
int tqReadHandleSetTbUidList(STqReadHandle *pHandle, const SArray *tbUidList);
|
||||
int tqReadHandleAddTbUidList(STqReadHandle *pHandle, const SArray *tbUidList);
|
||||
int tqReadHandleRemoveTbUidList(STqReadHandle* pHandle, const SArray* tbUidList);
|
||||
int32_t tqReadHandleSetTbUidList(STqReadHandle *pHandle, const SArray *tbUidList);
|
||||
int32_t tqReadHandleAddTbUidList(STqReadHandle *pHandle, const SArray *tbUidList);
|
||||
int32_t tqReadHandleRemoveTbUidList(STqReadHandle* pHandle, const SArray* tbUidList);
|
||||
|
||||
int32_t tqReadHandleSetMsg(STqReadHandle *pHandle, SSubmitReq *pMsg, int64_t ver);
|
||||
bool tqNextDataBlock(STqReadHandle *pHandle);
|
||||
bool tqNextDataBlockFilterOut(STqReadHandle *pHandle, SHashObj *filterOutUids);
|
||||
int32_t tqRetrieveDataBlock(SArray **ppCols, STqReadHandle *pHandle, uint64_t *pGroupId, uint64_t *pUid,
|
||||
int32_t *pNumOfRows, int16_t *pNumOfCols);
|
||||
|
||||
|
@ -208,15 +209,15 @@ struct SMetaReader {
|
|||
SDecoder coder;
|
||||
SMetaEntry me;
|
||||
void *pBuf;
|
||||
int szBuf;
|
||||
int32_t szBuf;
|
||||
};
|
||||
|
||||
struct SMTbCursor {
|
||||
TBC *pDbc;
|
||||
void *pKey;
|
||||
void *pVal;
|
||||
int kLen;
|
||||
int vLen;
|
||||
int32_t kLen;
|
||||
int32_t vLen;
|
||||
SMetaReader mr;
|
||||
};
|
||||
|
||||
|
|
|
@ -163,6 +163,7 @@ typedef struct {
|
|||
int8_t withSchema;
|
||||
int8_t withTag;
|
||||
char* qmsg;
|
||||
SHashObj* pDropTbUid;
|
||||
STqPushHandle pushHandle;
|
||||
// SRWLatch lock;
|
||||
SWalReadHandle* pWalReader;
|
||||
|
|
|
@ -24,7 +24,6 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
// vnodeDebug ====================
|
||||
// clang-format off
|
||||
#define vFatal(...) do { if (vDebugFlag & DEBUG_FATAL) { taosPrintLog("VND FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }} while(0)
|
||||
#define vError(...) do { if (vDebugFlag & DEBUG_ERROR) { taosPrintLog("VND ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }} while(0)
|
||||
|
@ -34,17 +33,17 @@ extern "C" {
|
|||
#define vTrace(...) do { if (vDebugFlag & DEBUG_TRACE) { taosPrintLog("VND ", DEBUG_TRACE, vDebugFlag, __VA_ARGS__); }} while(0)
|
||||
// clang-format on
|
||||
|
||||
// vnodeCfg ====================
|
||||
// vnodeCfg.c
|
||||
extern const SVnodeCfg vnodeCfgDefault;
|
||||
|
||||
int vnodeCheckCfg(const SVnodeCfg*);
|
||||
int vnodeEncodeConfig(const void* pObj, SJson* pJson);
|
||||
int vnodeDecodeConfig(const SJson* pJson, void* pObj);
|
||||
int32_t vnodeCheckCfg(const SVnodeCfg*);
|
||||
int32_t vnodeEncodeConfig(const void* pObj, SJson* pJson);
|
||||
int32_t vnodeDecodeConfig(const SJson* pJson, void* pObj);
|
||||
|
||||
// vnodeModule ====================
|
||||
int vnodeScheduleTask(int (*execute)(void*), void* arg);
|
||||
// vnodeModule.c
|
||||
int32_t vnodeScheduleTask(int32_t (*execute)(void*), void* arg);
|
||||
|
||||
// vnodeBufPool ====================
|
||||
// vnodeBufPool.c
|
||||
typedef struct SVBufPoolNode SVBufPoolNode;
|
||||
struct SVBufPoolNode {
|
||||
SVBufPoolNode* prev;
|
||||
|
@ -62,38 +61,29 @@ struct SVBufPool {
|
|||
SVBufPoolNode node;
|
||||
};
|
||||
|
||||
int vnodeOpenBufPool(SVnode* pVnode, int64_t size);
|
||||
int vnodeCloseBufPool(SVnode* pVnode);
|
||||
void vnodeBufPoolReset(SVBufPool* pPool);
|
||||
int32_t vnodeOpenBufPool(SVnode* pVnode, int64_t size);
|
||||
int32_t vnodeCloseBufPool(SVnode* pVnode);
|
||||
void vnodeBufPoolReset(SVBufPool* pPool);
|
||||
|
||||
// vnodeQuery ====================
|
||||
int vnodeQueryOpen(SVnode* pVnode);
|
||||
void vnodeQueryClose(SVnode* pVnode);
|
||||
int vnodeGetTableMeta(SVnode* pVnode, SRpcMsg* pMsg);
|
||||
// vnodeQuery.c
|
||||
int32_t vnodeQueryOpen(SVnode* pVnode);
|
||||
void vnodeQueryClose(SVnode* pVnode);
|
||||
int32_t vnodeGetTableMeta(SVnode* pVnode, SRpcMsg* pMsg);
|
||||
|
||||
// vnodeCommit ====================
|
||||
int vnodeBegin(SVnode* pVnode);
|
||||
int vnodeShouldCommit(SVnode* pVnode);
|
||||
int vnodeCommit(SVnode* pVnode);
|
||||
int vnodeSaveInfo(const char* dir, const SVnodeInfo* pCfg);
|
||||
int vnodeCommitInfo(const char* dir, const SVnodeInfo* pInfo);
|
||||
int vnodeLoadInfo(const char* dir, SVnodeInfo* pInfo);
|
||||
int vnodeSyncCommit(SVnode* pVnode);
|
||||
int vnodeAsyncCommit(SVnode* pVnode);
|
||||
// vnodeCommit.c
|
||||
int32_t vnodeBegin(SVnode* pVnode);
|
||||
int32_t vnodeShouldCommit(SVnode* pVnode);
|
||||
int32_t vnodeCommit(SVnode* pVnode);
|
||||
int32_t vnodeSaveInfo(const char* dir, const SVnodeInfo* pCfg);
|
||||
int32_t vnodeCommitInfo(const char* dir, const SVnodeInfo* pInfo);
|
||||
int32_t vnodeLoadInfo(const char* dir, SVnodeInfo* pInfo);
|
||||
int32_t vnodeSyncCommit(SVnode* pVnode);
|
||||
int32_t vnodeAsyncCommit(SVnode* pVnode);
|
||||
|
||||
// vnodeCommit ====================
|
||||
// vnodeSync.c
|
||||
int32_t vnodeSyncOpen(SVnode* pVnode, char* path);
|
||||
int32_t vnodeSyncStart(SVnode* pVnode);
|
||||
void vnodeSyncStart(SVnode* pVnode);
|
||||
void vnodeSyncClose(SVnode* pVnode);
|
||||
void vnodeSyncSetQ(SVnode* pVnode, void* qHandle);
|
||||
void vnodeSyncSetRpc(SVnode* pVnode, void* rpcHandle);
|
||||
int32_t vnodeSyncEqMsg(void* qHandle, SRpcMsg* pMsg);
|
||||
int32_t vnodeSendMsg(void* rpcHandle, const SEpSet* pEpSet, SRpcMsg* pMsg);
|
||||
void vnodeSyncCommitCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta);
|
||||
void vnodeSyncPreCommitCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta);
|
||||
void vnodeSyncRollBackCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta);
|
||||
int32_t vnodeSyncGetSnapshotCb(struct SSyncFSM* pFsm, SSnapshot* pSnapshot);
|
||||
SSyncFSM* syncVnodeMakeFsm();
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -82,7 +82,7 @@ int metaCreateSTable(SMeta* pMeta, int64_t version, SVCreateStbReq*
|
|||
int metaAlterSTable(SMeta* pMeta, int64_t version, SVCreateStbReq* pReq);
|
||||
int metaDropSTable(SMeta* pMeta, int64_t verison, SVDropStbReq* pReq);
|
||||
int metaCreateTable(SMeta* pMeta, int64_t version, SVCreateTbReq* pReq);
|
||||
int metaDropTable(SMeta* pMeta, int64_t version, SVDropTbReq* pReq);
|
||||
int metaDropTable(SMeta* pMeta, int64_t version, SVDropTbReq* pReq, SArray* tbUids);
|
||||
int metaAlterTable(SMeta* pMeta, int64_t version, SVAlterTbReq* pReq);
|
||||
SSchemaWrapper* metaGetTableSchema(SMeta* pMeta, tb_uid_t uid, int32_t sver, bool isinline);
|
||||
STSchema* metaGetTbTSchema(SMeta* pMeta, tb_uid_t uid, int32_t sver);
|
||||
|
@ -104,7 +104,7 @@ int tsdbOpen(SVnode* pVnode, STsdb** ppTsdb, const char* dir, STsdbKeep
|
|||
int tsdbClose(STsdb** pTsdb);
|
||||
int tsdbBegin(STsdb* pTsdb);
|
||||
int tsdbCommit(STsdb* pTsdb);
|
||||
int tsdbScanAndConvertSubmitMsg(STsdb *pTsdb, const SSubmitReq *pMsg);
|
||||
int tsdbScanAndConvertSubmitMsg(STsdb* pTsdb, const SSubmitReq* pMsg);
|
||||
int tsdbInsertData(STsdb* pTsdb, int64_t version, SSubmitReq* pMsg, SSubmitRsp* pRsp);
|
||||
int tsdbInsertTableData(STsdb* pTsdb, SSubmitMsgIter* pMsgIter, SSubmitBlk* pBlock, SSubmitBlkRsp* pRsp);
|
||||
tsdbReaderT* tsdbQueryTables(SVnode* pVnode, SQueryTableDataCond* pCond, STableGroupInfo* groupList, uint64_t qId,
|
||||
|
@ -118,7 +118,7 @@ STQ* tqOpen(const char* path, SVnode* pVnode, SWal* pWal);
|
|||
void tqClose(STQ*);
|
||||
int tqPushMsg(STQ*, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver);
|
||||
int tqCommit(STQ*);
|
||||
int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList);
|
||||
int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd);
|
||||
int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen);
|
||||
int32_t tqProcessVgDeleteReq(STQ* pTq, char* msg, int32_t msgLen);
|
||||
int32_t tqProcessTaskExec(STQ* pTq, char* msg, int32_t msgLen, int32_t workerId);
|
||||
|
|
|
@ -56,8 +56,8 @@ int metaDecodeEntry(SDecoder *pCoder, SMetaEntry *pME) {
|
|||
if (tDecodeCStr(pCoder, &pME->name) < 0) return -1;
|
||||
|
||||
if (pME->type == TSDB_SUPER_TABLE) {
|
||||
if (tDecodeSSchemaWrapperEx(pCoder, &pME->stbEntry.schema) < 0) return -1;
|
||||
if (tDecodeSSchemaWrapperEx(pCoder, &pME->stbEntry.schemaTag) < 0) return -1;
|
||||
if (tDecodeSSchemaWrapper(pCoder, &pME->stbEntry.schema) < 0) return -1;
|
||||
if (tDecodeSSchemaWrapper(pCoder, &pME->stbEntry.schemaTag) < 0) return -1;
|
||||
} else if (pME->type == TSDB_CHILD_TABLE) {
|
||||
if (tDecodeI64(pCoder, &pME->ctbEntry.ctime) < 0) return -1;
|
||||
if (tDecodeI32(pCoder, &pME->ctbEntry.ttlDays) < 0) return -1;
|
||||
|
@ -67,10 +67,10 @@ int metaDecodeEntry(SDecoder *pCoder, SMetaEntry *pME) {
|
|||
if (tDecodeI64(pCoder, &pME->ntbEntry.ctime) < 0) return -1;
|
||||
if (tDecodeI32(pCoder, &pME->ntbEntry.ttlDays) < 0) return -1;
|
||||
if (tDecodeI32v(pCoder, &pME->ntbEntry.ncid) < 0) return -1;
|
||||
if (tDecodeSSchemaWrapperEx(pCoder, &pME->ntbEntry.schema) < 0) return -1;
|
||||
if (tDecodeSSchemaWrapper(pCoder, &pME->ntbEntry.schema) < 0) return -1;
|
||||
} else if (pME->type == TSDB_TSMA_TABLE) {
|
||||
pME->smaEntry.tsma = tDecoderMalloc(pCoder, sizeof(STSma));
|
||||
if(!pME->smaEntry.tsma) {
|
||||
if (!pME->smaEntry.tsma) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -255,7 +255,7 @@ _err:
|
|||
return -1;
|
||||
}
|
||||
|
||||
int metaDropTable(SMeta *pMeta, int64_t version, SVDropTbReq *pReq) {
|
||||
int metaDropTable(SMeta *pMeta, int64_t version, SVDropTbReq *pReq, SArray *tbUids) {
|
||||
TBC *pTbDbc = NULL;
|
||||
TBC *pUidIdxc = NULL;
|
||||
TBC *pNameIdxc = NULL;
|
||||
|
@ -336,6 +336,7 @@ int metaDropTable(SMeta *pMeta, int64_t version, SVDropTbReq *pReq) {
|
|||
if (type == TSDB_CHILD_TABLE) {
|
||||
ctime = me.ctbEntry.ctime;
|
||||
suid = me.ctbEntry.suid;
|
||||
taosArrayPush(tbUids, &me.uid);
|
||||
} else if (type == TSDB_NORMAL_TABLE) {
|
||||
ctime = me.ntbEntry.ctime;
|
||||
suid = 0;
|
||||
|
@ -906,4 +907,4 @@ static int metaHandleEntry(SMeta *pMeta, const SMetaEntry *pME) {
|
|||
_err:
|
||||
metaULock(pMeta);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -104,17 +104,26 @@ static void tdSRowDemo() {
|
|||
taosMemoryFree(pTSChema);
|
||||
}
|
||||
|
||||
int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList) {
|
||||
int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) {
|
||||
void* pIter = NULL;
|
||||
STqExec* pExec = NULL;
|
||||
while (1) {
|
||||
pIter = taosHashIterate(pTq->execs, pIter);
|
||||
if (pIter == NULL) break;
|
||||
pExec = (STqExec*)pIter;
|
||||
if (pExec->subType == TOPIC_SUB_TYPE__DB) continue;
|
||||
for (int32_t i = 0; i < 5; i++) {
|
||||
int32_t code = qUpdateQualifiedTableId(pExec->task[i], tbUidList, true);
|
||||
ASSERT(code == 0);
|
||||
if (pExec->subType == TOPIC_SUB_TYPE__DB) {
|
||||
if (!isAdd) {
|
||||
int32_t sz = taosArrayGetSize(tbUidList);
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
int64_t tbUid = *(int64_t*)taosArrayGet(tbUidList, i);
|
||||
taosHashPut(pExec->pDropTbUid, &tbUid, sizeof(int64_t), NULL, 0);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (int32_t i = 0; i < 5; i++) {
|
||||
int32_t code = qUpdateQualifiedTableId(pExec->task[i], tbUidList, isAdd);
|
||||
ASSERT(code == 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
@ -582,7 +591,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
|
|||
rsp.withSchema = 1;
|
||||
STqReadHandle* pReader = pExec->pExecReader[workerId];
|
||||
tqReadHandleSetMsg(pReader, pCont, 0);
|
||||
while (tqNextDataBlock(pReader)) {
|
||||
while (tqNextDataBlockFilterOut(pReader, pExec->pDropTbUid)) {
|
||||
SSDataBlock block = {0};
|
||||
if (tqRetrieveDataBlock(&block.pDataBlock, pReader, &block.info.groupId, &block.info.uid, &block.info.rows,
|
||||
&block.info.numOfCols) < 0) {
|
||||
|
@ -915,9 +924,10 @@ int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen) {
|
|||
req.qmsg = NULL;
|
||||
|
||||
pExec->pWalReader = walOpenReadHandle(pTq->pVnode->pWal);
|
||||
for (int32_t i = 0; i < 5; i++) {
|
||||
pExec->pExecReader[i] = tqInitSubmitMsgScanner(pTq->pVnode->pMeta);
|
||||
if (pExec->subType == TOPIC_SUB_TYPE__TABLE) {
|
||||
if (pExec->subType == TOPIC_SUB_TYPE__TABLE) {
|
||||
for (int32_t i = 0; i < 5; i++) {
|
||||
pExec->pExecReader[i] = tqInitSubmitMsgScanner(pTq->pVnode->pMeta);
|
||||
|
||||
SReadHandle handle = {
|
||||
.reader = pExec->pExecReader[i],
|
||||
.meta = pTq->pVnode->pMeta,
|
||||
|
@ -925,9 +935,12 @@ int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen) {
|
|||
};
|
||||
pExec->task[i] = qCreateStreamExecTaskInfo(pExec->qmsg, &handle);
|
||||
ASSERT(pExec->task[i]);
|
||||
} else {
|
||||
pExec->task[i] = NULL;
|
||||
}
|
||||
} else {
|
||||
for (int32_t i = 0; i < 5; i++) {
|
||||
pExec->pExecReader[i] = tqInitSubmitMsgScanner(pTq->pVnode->pMeta);
|
||||
}
|
||||
pExec->pDropTbUid = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
|
||||
}
|
||||
taosHashPut(pTq->execs, req.subKey, strlen(req.subKey), pExec, sizeof(STqExec));
|
||||
return 0;
|
||||
|
@ -1045,6 +1058,57 @@ int32_t tqProcessStreamTrigger(STQ* pTq, void* data, int32_t dataLen, int32_t wo
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t tqProcessStreamTriggerNew(STQ* pTq, SSubmitReq* data) {
|
||||
SStreamDataSubmit* pSubmit = NULL;
|
||||
|
||||
// build data
|
||||
pSubmit = taosAllocateQitem(sizeof(SStreamDataSubmit), DEF_QITEM);
|
||||
if (pSubmit == NULL) return -1;
|
||||
pSubmit->dataRef = taosMemoryMalloc(sizeof(int32_t));
|
||||
if (pSubmit->dataRef == NULL) goto FAIL;
|
||||
*pSubmit->dataRef = 1;
|
||||
pSubmit->data = data;
|
||||
pSubmit->type = STREAM_INPUT__DATA_BLOCK;
|
||||
|
||||
void* pIter = NULL;
|
||||
while (1) {
|
||||
pIter = taosHashIterate(pTq->pStreamTasks, pIter);
|
||||
if (pIter == NULL) break;
|
||||
SStreamTask* pTask = (SStreamTask*)pIter;
|
||||
if (pTask->inputType == TASK_INPUT_TYPE__SUMBIT_BLOCK) {
|
||||
streamEnqueueDataSubmit(pTask, pSubmit);
|
||||
// TODO cal back pressure
|
||||
}
|
||||
// check run
|
||||
int8_t execStatus = atomic_load_8(&pTask->status);
|
||||
if (execStatus == TASK_STATUS__IDLE || execStatus == TASK_STATUS__CLOSING) {
|
||||
SStreamTaskRunReq* pReq = taosMemoryMalloc(sizeof(SStreamTaskRunReq));
|
||||
if (pReq == NULL) continue;
|
||||
// TODO: do we need htonl?
|
||||
pReq->head.vgId = pTq->pVnode->config.vgId;
|
||||
pReq->streamId = pTask->streamId;
|
||||
pReq->taskId = pTask->taskId;
|
||||
SRpcMsg msg = {
|
||||
.msgType = 0,
|
||||
.pCont = pReq,
|
||||
.contLen = sizeof(SStreamTaskRunReq),
|
||||
};
|
||||
tmsgPutToQueue(&pTq->pVnode->msgCb, FETCH_QUEUE, &msg);
|
||||
}
|
||||
}
|
||||
streamDataSubmitRefDec(pSubmit);
|
||||
|
||||
return 0;
|
||||
FAIL:
|
||||
if (pSubmit) {
|
||||
if (pSubmit->dataRef) {
|
||||
taosMemoryFree(pSubmit->dataRef);
|
||||
}
|
||||
taosFreeQitem(pSubmit);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t tqProcessTaskExec(STQ* pTq, char* msg, int32_t msgLen, int32_t workerId) {
|
||||
SStreamTaskExecReq req;
|
||||
tDecodeSStreamTaskExecReq(msg, &req);
|
||||
|
|
|
@ -34,21 +34,11 @@ STqReadHandle* tqInitSubmitMsgScanner(SMeta* pMeta) {
|
|||
|
||||
int32_t tqReadHandleSetMsg(STqReadHandle* pReadHandle, SSubmitReq* pMsg, int64_t ver) {
|
||||
pReadHandle->pMsg = pMsg;
|
||||
// pMsg->length = htonl(pMsg->length);
|
||||
// pMsg->numOfBlocks = htonl(pMsg->numOfBlocks);
|
||||
|
||||
// iterate and convert
|
||||
if (tInitSubmitMsgIter(pMsg, &pReadHandle->msgIter) < 0) return -1;
|
||||
while (true) {
|
||||
if (tGetSubmitMsgNext(&pReadHandle->msgIter, &pReadHandle->pBlock) < 0) return -1;
|
||||
if (pReadHandle->pBlock == NULL) break;
|
||||
|
||||
// pReadHandle->pBlock->uid = htobe64(pReadHandle->pBlock->uid);
|
||||
// pReadHandle->pBlock->suid = htobe64(pReadHandle->pBlock->suid);
|
||||
// pReadHandle->pBlock->sversion = htonl(pReadHandle->pBlock->sversion);
|
||||
// pReadHandle->pBlock->dataLen = htonl(pReadHandle->pBlock->dataLen);
|
||||
// pReadHandle->pBlock->schemaLen = htonl(pReadHandle->pBlock->schemaLen);
|
||||
// pReadHandle->pBlock->numOfRows = htons(pReadHandle->pBlock->numOfRows);
|
||||
}
|
||||
|
||||
if (tInitSubmitMsgIter(pMsg, &pReadHandle->msgIter) < 0) return -1;
|
||||
|
@ -64,22 +54,28 @@ bool tqNextDataBlock(STqReadHandle* pHandle) {
|
|||
}
|
||||
if (pHandle->pBlock == NULL) return false;
|
||||
|
||||
/*pHandle->pBlock->uid = htobe64(pHandle->pBlock->uid);*/
|
||||
/*if (pHandle->tbUid == pHandle->pBlock->uid) {*/
|
||||
if (pHandle->tbIdHash == NULL) {
|
||||
return true;
|
||||
}
|
||||
void* ret = taosHashGet(pHandle->tbIdHash, &pHandle->msgIter.uid, sizeof(int64_t));
|
||||
if (ret != NULL) {
|
||||
/*printf("retrieve one tb %ld\n", pHandle->pBlock->uid);*/
|
||||
/*pHandle->pBlock->tid = htonl(pHandle->pBlock->tid);*/
|
||||
/*pHandle->pBlock->sversion = htonl(pHandle->pBlock->sversion);*/
|
||||
/*pHandle->pBlock->dataLen = htonl(pHandle->pBlock->dataLen);*/
|
||||
/*pHandle->pBlock->schemaLen = htonl(pHandle->pBlock->schemaLen);*/
|
||||
/*pHandle->pBlock->numOfRows = htons(pHandle->pBlock->numOfRows);*/
|
||||
return true;
|
||||
/*} else {*/
|
||||
/*printf("skip one tb %ld\n", pHandle->pBlock->uid);*/
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool tqNextDataBlockFilterOut(STqReadHandle* pHandle, SHashObj* filterOutUids) {
|
||||
while (1) {
|
||||
if (tGetSubmitMsgNext(&pHandle->msgIter, &pHandle->pBlock) < 0) {
|
||||
return false;
|
||||
}
|
||||
if (pHandle->pBlock == NULL) return false;
|
||||
|
||||
ASSERT(pHandle->tbIdHash == NULL);
|
||||
void* ret = taosHashGet(filterOutUids, &pHandle->msgIter.uid, sizeof(int64_t));
|
||||
if (ret == NULL) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
|
|
|
@ -180,8 +180,6 @@ void vnodeClose(SVnode *pVnode) {
|
|||
|
||||
// start the sync timer after the queue is ready
|
||||
int32_t vnodeStart(SVnode *pVnode) {
|
||||
vnodeSyncSetQ(pVnode, NULL);
|
||||
vnodeSyncSetRpc(pVnode, NULL);
|
||||
vnodeSyncStart(pVnode);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -118,13 +118,13 @@ int vnodeProcessWriteReq(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRpcMsg
|
|||
break;
|
||||
}
|
||||
|
||||
vDebug("vgId:%d process %s request success, version: %" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType), version);
|
||||
|
||||
if (tqPushMsg(pVnode->pTq, pMsg->pCont, pMsg->contLen, pMsg->msgType, version) < 0) {
|
||||
vError("vgId:%d failed to push msg to TQ since %s", TD_VID(pVnode), tstrerror(terrno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
vDebug("vgId:%d process %s request success, version: %" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType), version);
|
||||
|
||||
// commit if need
|
||||
if (vnodeShouldCommit(pVnode)) {
|
||||
vInfo("vgId:%d commit at version %" PRId64, TD_VID(pVnode), version);
|
||||
|
@ -386,7 +386,7 @@ static int vnodeProcessCreateTbReq(SVnode *pVnode, int64_t version, void *pReq,
|
|||
|
||||
tDecoderClear(&decoder);
|
||||
|
||||
tqUpdateTbUidList(pVnode->pTq, tbUids);
|
||||
tqUpdateTbUidList(pVnode->pTq, tbUids, true);
|
||||
tdUpdateTbUidList(pVnode->pSma, pStore);
|
||||
tdUidStoreFree(pStore);
|
||||
|
||||
|
@ -517,6 +517,7 @@ static int vnodeProcessDropTbReq(SVnode *pVnode, int64_t version, void *pReq, in
|
|||
SDecoder decoder = {0};
|
||||
SEncoder encoder = {0};
|
||||
int ret;
|
||||
SArray *tbUids = NULL;
|
||||
|
||||
pRsp->msgType = TDMT_VND_DROP_TABLE_RSP;
|
||||
pRsp->pCont = NULL;
|
||||
|
@ -533,13 +534,16 @@ static int vnodeProcessDropTbReq(SVnode *pVnode, int64_t version, void *pReq, in
|
|||
}
|
||||
|
||||
// process req
|
||||
tbUids = taosArrayInit(req.nReqs, sizeof(int64_t));
|
||||
rsp.pArray = taosArrayInit(req.nReqs, sizeof(SVDropTbRsp));
|
||||
if (tbUids == NULL || rsp.pArray == NULL) goto _exit;
|
||||
|
||||
for (int iReq = 0; iReq < req.nReqs; iReq++) {
|
||||
SVDropTbReq *pDropTbReq = req.pReqs + iReq;
|
||||
SVDropTbRsp dropTbRsp = {0};
|
||||
|
||||
/* code */
|
||||
ret = metaDropTable(pVnode->pMeta, version, pDropTbReq);
|
||||
ret = metaDropTable(pVnode->pMeta, version, pDropTbReq, tbUids);
|
||||
if (ret < 0) {
|
||||
if (pDropTbReq->igNotExists && terrno == TSDB_CODE_VND_TABLE_NOT_EXIST) {
|
||||
dropTbRsp.code = TSDB_CODE_SUCCESS;
|
||||
|
@ -553,7 +557,10 @@ static int vnodeProcessDropTbReq(SVnode *pVnode, int64_t version, void *pReq, in
|
|||
taosArrayPush(rsp.pArray, &dropTbRsp);
|
||||
}
|
||||
|
||||
tqUpdateTbUidList(pVnode->pTq, tbUids, false);
|
||||
|
||||
_exit:
|
||||
taosArrayDestroy(tbUids);
|
||||
tDecoderClear(&decoder);
|
||||
tEncodeSize(tEncodeSVDropTbBatchRsp, &rsp, pRsp->contLen, ret);
|
||||
pRsp->pCont = rpcMallocCont(pRsp->contLen);
|
||||
|
@ -675,7 +682,7 @@ static int vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, in
|
|||
|
||||
submitBlkRsp.uid = createTbReq.uid;
|
||||
submitBlkRsp.tblFName = taosMemoryMalloc(strlen(pVnode->config.dbname) + strlen(createTbReq.name) + 2);
|
||||
sprintf(submitBlkRsp.tblFName, "%s.%s", pVnode->config.dbname, createTbReq.name);
|
||||
sprintf(submitBlkRsp.tblFName, "%s.", pVnode->config.dbname);
|
||||
|
||||
msgIter.uid = createTbReq.uid;
|
||||
if (createTbReq.type == TSDB_CHILD_TABLE) {
|
||||
|
|
|
@ -13,90 +13,62 @@
|
|||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#define _DEFAULT_SOURCE
|
||||
#include "vnd.h"
|
||||
|
||||
static int32_t vnodeSyncEqMsg(const SMsgCb *msgcb, SRpcMsg *pMsg);
|
||||
static int32_t vnodeSyncSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg);
|
||||
static SSyncFSM *vnodeSyncMakeFsm(SVnode *pVnode);
|
||||
static void vnodeSyncCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta);
|
||||
static void vnodeSyncPreCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta);
|
||||
static void vnodeSyncRollBackMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta);
|
||||
static int32_t vnodeSyncGetSnapshot(SSyncFSM *pFsm, SSnapshot *pSnapshot);
|
||||
|
||||
int32_t vnodeSyncOpen(SVnode *pVnode, char *path) {
|
||||
SSyncInfo syncInfo;
|
||||
syncInfo.vgId = pVnode->config.vgId;
|
||||
SSyncCfg *pCfg = &(syncInfo.syncCfg);
|
||||
pCfg->replicaNum = pVnode->config.syncCfg.replicaNum;
|
||||
pCfg->myIndex = pVnode->config.syncCfg.myIndex;
|
||||
memcpy(pCfg->nodeInfo, pVnode->config.syncCfg.nodeInfo, sizeof(pCfg->nodeInfo));
|
||||
SSyncInfo syncInfo = {
|
||||
.vgId = pVnode->config.vgId,
|
||||
.syncCfg = pVnode->config.syncCfg,
|
||||
.pWal = pVnode->pWal,
|
||||
.msgcb = NULL,
|
||||
.FpSendMsg = vnodeSyncSendMsg,
|
||||
.FpEqMsg = vnodeSyncEqMsg,
|
||||
};
|
||||
|
||||
snprintf(syncInfo.path, sizeof(syncInfo.path), "%s/sync", path);
|
||||
syncInfo.pWal = pVnode->pWal;
|
||||
|
||||
syncInfo.pFsm = syncVnodeMakeFsm(pVnode);
|
||||
syncInfo.rpcClient = NULL;
|
||||
syncInfo.FpSendMsg = vnodeSendMsg;
|
||||
syncInfo.queue = NULL;
|
||||
syncInfo.FpEqMsg = vnodeSyncEqMsg;
|
||||
snprintf(syncInfo.path, sizeof(syncInfo.path), "%s%ssync", path, TD_DIRSEP);
|
||||
syncInfo.pFsm = vnodeSyncMakeFsm(pVnode);
|
||||
|
||||
pVnode->sync = syncOpen(&syncInfo);
|
||||
assert(pVnode->sync > 0);
|
||||
if (pVnode->sync <= 0) {
|
||||
vError("vgId:%d, failed to open sync since %s", pVnode->config.vgId, terrstr());
|
||||
return -1;
|
||||
}
|
||||
|
||||
// for test
|
||||
setPingTimerMS(pVnode->sync, 3000);
|
||||
setElectTimerMS(pVnode->sync, 500);
|
||||
setHeartbeatTimerMS(pVnode->sync, 100);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t vnodeSyncStart(SVnode *pVnode) {
|
||||
void vnodeSyncStart(SVnode *pVnode) {
|
||||
syncSetMsgCb(pVnode->sync, &pVnode->msgCb);
|
||||
syncStart(pVnode->sync);
|
||||
}
|
||||
|
||||
void vnodeSyncClose(SVnode *pVnode) { syncStop(pVnode->sync); }
|
||||
|
||||
int32_t vnodeSyncEqMsg(const SMsgCb *msgcb, SRpcMsg *pMsg) { return tmsgPutToQueue(msgcb, SYNC_QUEUE, pMsg); }
|
||||
|
||||
int32_t vnodeSyncSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg) { return tmsgSendReq(pEpSet, pMsg); }
|
||||
|
||||
int32_t vnodeSyncGetSnapshot(SSyncFSM *pFsm, SSnapshot *pSnapshot) {
|
||||
vnodeGetSnapshot(pFsm->data, pSnapshot);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void vnodeSyncClose(SVnode *pVnode) {
|
||||
// stop by ref id
|
||||
syncStop(pVnode->sync);
|
||||
}
|
||||
|
||||
void vnodeSyncSetQ(SVnode *pVnode, void *qHandle) { syncSetQ(pVnode->sync, (void *)(&(pVnode->msgCb))); }
|
||||
|
||||
void vnodeSyncSetRpc(SVnode *pVnode, void *rpcHandle) { syncSetRpc(pVnode->sync, (void *)(&(pVnode->msgCb))); }
|
||||
|
||||
int32_t vnodeSyncEqMsg(void *qHandle, SRpcMsg *pMsg) {
|
||||
int32_t ret = 0;
|
||||
SMsgCb *pMsgCb = qHandle;
|
||||
if (pMsgCb->queueFps[SYNC_QUEUE] != NULL) {
|
||||
tmsgPutToQueue(qHandle, SYNC_QUEUE, pMsg);
|
||||
} else {
|
||||
vError("vnodeSyncEqMsg queue is NULL, SYNC_QUEUE:%d", SYNC_QUEUE);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int32_t vnodeSendMsg(void *rpcHandle, const SEpSet *pEpSet, SRpcMsg *pMsg) {
|
||||
int32_t ret = 0;
|
||||
SMsgCb *pMsgCb = rpcHandle;
|
||||
if (pMsgCb->queueFps[SYNC_QUEUE] != NULL) {
|
||||
pMsg->info.noResp = 1;
|
||||
tmsgSendReq(pEpSet, pMsg);
|
||||
} else {
|
||||
vError("vnodeSendMsg queue is NULL, SYNC_QUEUE:%d", SYNC_QUEUE);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int32_t vnodeSyncGetSnapshotCb(struct SSyncFSM *pFsm, SSnapshot *pSnapshot) {
|
||||
SVnode *pVnode = (SVnode *)(pFsm->data);
|
||||
vnodeGetSnapshot(pVnode, pSnapshot);
|
||||
|
||||
/*
|
||||
pSnapshot->data = NULL;
|
||||
pSnapshot->lastApplyIndex = 0;
|
||||
pSnapshot->lastApplyTerm = 0;
|
||||
*/
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void vnodeSyncCommitCb(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) {
|
||||
void vnodeSyncCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) {
|
||||
SyncIndex beginIndex = SYNC_INDEX_INVALID;
|
||||
if (pFsm->FpGetSnapshot != NULL) {
|
||||
SSnapshot snapshot;
|
||||
SSnapshot snapshot = {0};
|
||||
pFsm->FpGetSnapshot(pFsm, &snapshot);
|
||||
beginIndex = snapshot.lastApplyIndex;
|
||||
}
|
||||
|
@ -147,7 +119,7 @@ void vnodeSyncCommitCb(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cb
|
|||
}
|
||||
}
|
||||
|
||||
void vnodeSyncPreCommitCb(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) {
|
||||
void vnodeSyncPreCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) {
|
||||
char logBuf[256];
|
||||
snprintf(logBuf, sizeof(logBuf),
|
||||
"==callback== ==PreCommitCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s \n", pFsm, cbMeta.index,
|
||||
|
@ -155,19 +127,19 @@ void vnodeSyncPreCommitCb(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta
|
|||
syncRpcMsgLog2(logBuf, (SRpcMsg *)pMsg);
|
||||
}
|
||||
|
||||
void vnodeSyncRollBackCb(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) {
|
||||
void vnodeSyncRollBackMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) {
|
||||
char logBuf[256];
|
||||
snprintf(logBuf, sizeof(logBuf), "==callback== ==RollBackCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s \n",
|
||||
pFsm, cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state));
|
||||
syncRpcMsgLog2(logBuf, (SRpcMsg *)pMsg);
|
||||
}
|
||||
|
||||
SSyncFSM *syncVnodeMakeFsm(SVnode *pVnode) {
|
||||
SSyncFSM *pFsm = (SSyncFSM *)taosMemoryMalloc(sizeof(SSyncFSM));
|
||||
SSyncFSM *vnodeSyncMakeFsm(SVnode *pVnode) {
|
||||
SSyncFSM *pFsm = taosMemoryCalloc(1, sizeof(SSyncFSM));
|
||||
pFsm->data = pVnode;
|
||||
pFsm->FpCommitCb = vnodeSyncCommitCb;
|
||||
pFsm->FpPreCommitCb = vnodeSyncPreCommitCb;
|
||||
pFsm->FpRollBackCb = vnodeSyncRollBackCb;
|
||||
pFsm->FpGetSnapshot = vnodeSyncGetSnapshotCb;
|
||||
pFsm->FpCommitCb = vnodeSyncCommitMsg;
|
||||
pFsm->FpPreCommitCb = vnodeSyncPreCommitMsg;
|
||||
pFsm->FpRollBackCb = vnodeSyncRollBackMsg;
|
||||
pFsm->FpGetSnapshot = vnodeSyncGetSnapshot;
|
||||
return pFsm;
|
||||
}
|
||||
}
|
|
@ -300,10 +300,26 @@ void addTagPseudoColumnData(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock)
|
|||
if (fmIsScanPseudoColumnFunc(functionId)) {
|
||||
setTbNameColData(pTableScanInfo->readHandle.meta, pBlock, pColInfoData, functionId);
|
||||
} else { // these are tags
|
||||
const char* p = metaGetTableTagVal(&mr.me, pExpr->base.pParam[0].pCol->colId);
|
||||
const char* p = NULL;
|
||||
if(pColInfoData->info.type == TSDB_DATA_TYPE_JSON){
|
||||
const uint8_t *tmp = mr.me.ctbEntry.pTags;
|
||||
char *data = taosMemoryCalloc(kvRowLen(tmp) + 1, 1);
|
||||
if(data == NULL){
|
||||
qError("doTagScan calloc error:%d", kvRowLen(tmp) + 1);
|
||||
return;
|
||||
}
|
||||
*data = TSDB_DATA_TYPE_JSON;
|
||||
memcpy(data+1, tmp, kvRowLen(tmp));
|
||||
p = data;
|
||||
}else{
|
||||
p = metaGetTableTagVal(&mr.me, pExpr->base.pParam[0].pCol->colId);
|
||||
}
|
||||
for (int32_t i = 0; i < pBlock->info.rows; ++i) {
|
||||
colDataAppend(pColInfoData, i, p, (p == NULL));
|
||||
}
|
||||
if(pColInfoData->info.type == TSDB_DATA_TYPE_JSON){
|
||||
taosMemoryFree((void*)p);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1587,8 +1603,21 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) {
|
|||
STR_TO_VARSTR(str, mr.me.name);
|
||||
colDataAppend(pDst, count, str, false);
|
||||
} else { // it is a tag value
|
||||
const char* p = metaGetTableTagVal(&mr.me, pExprInfo[j].base.pParam[0].pCol->colId);
|
||||
colDataAppend(pDst, count, p, (p == NULL));
|
||||
if(pDst->info.type == TSDB_DATA_TYPE_JSON){
|
||||
const uint8_t *tmp = mr.me.ctbEntry.pTags;
|
||||
char *data = taosMemoryCalloc(kvRowLen(tmp) + 1, 1);
|
||||
if(data == NULL){
|
||||
qError("doTagScan calloc error:%d", kvRowLen(tmp) + 1);
|
||||
return NULL;
|
||||
}
|
||||
*data = TSDB_DATA_TYPE_JSON;
|
||||
memcpy(data+1, tmp, kvRowLen(tmp));
|
||||
colDataAppend(pDst, count, data, false);
|
||||
taosMemoryFree(data);
|
||||
}else{
|
||||
const char* p = metaGetTableTagVal(&mr.me, pExprInfo[j].base.pParam[0].pCol->colId);
|
||||
colDataAppend(pDst, count, p, (p == NULL));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -120,7 +120,13 @@ int32_t sampleFunction(SqlFunctionCtx* pCtx);
|
|||
bool getTailFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv);
|
||||
bool tailFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo);
|
||||
int32_t tailFunction(SqlFunctionCtx* pCtx);
|
||||
int32_t tailFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
|
||||
//int32_t tailFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
|
||||
|
||||
bool getUniqueFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv);
|
||||
bool uniqueFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo);
|
||||
int32_t uniqueFunction(SqlFunctionCtx *pCtx);
|
||||
//int32_t uniqueFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
|
||||
|
||||
|
||||
bool getSelectivityFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv);
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue