Merge remote-tracking branch 'origin/3.0' into feature/qnode
This commit is contained in:
commit
54a1ecf932
|
@ -121,7 +121,7 @@ def pre_test_win(){
|
|||
set
|
||||
date /t
|
||||
time /t
|
||||
rd /s /Q C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\debug
|
||||
rd /s /Q C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\debug || exit 0
|
||||
'''
|
||||
bat '''
|
||||
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
|
||||
|
|
|
@ -236,6 +236,7 @@ typedef struct SSelectStmt {
|
|||
bool isTimeOrderQuery;
|
||||
bool hasAggFuncs;
|
||||
bool hasRepeatScanFuncs;
|
||||
bool hasNonstdSQLFunc;
|
||||
} SSelectStmt;
|
||||
|
||||
typedef enum ESetOperatorType { SET_OP_TYPE_UNION_ALL = 1, SET_OP_TYPE_UNION } ESetOperatorType;
|
||||
|
|
|
@ -107,6 +107,7 @@ static FORCE_INLINE void streamDataSubmitRefDec(SStreamDataSubmit* pDataSubmit)
|
|||
if (ref == 0) {
|
||||
taosMemoryFree(pDataSubmit->data);
|
||||
taosMemoryFree(pDataSubmit->dataRef);
|
||||
taosFreeQitem(pDataSubmit);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -279,6 +280,12 @@ typedef struct {
|
|||
SArray* res; // SArray<SSDataBlock>
|
||||
} SStreamSinkReq;
|
||||
|
||||
typedef struct {
|
||||
SMsgHead head;
|
||||
int64_t streamId;
|
||||
int32_t taskId;
|
||||
} SStreamTaskRunReq;
|
||||
|
||||
int32_t streamEnqueueDataSubmit(SStreamTask* pTask, SStreamDataSubmit* input);
|
||||
int32_t streamEnqueueDataBlk(SStreamTask* pTask, SStreamDataBlock* input);
|
||||
int32_t streamDequeueOutput(SStreamTask* pTask, void** output);
|
||||
|
|
|
@ -20,14 +20,11 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
//#include <tdatablock.h>
|
||||
#include "os.h"
|
||||
|
||||
#include "cJSON.h"
|
||||
#include "tdef.h"
|
||||
//#include "taosdef.h"
|
||||
//#include "trpc.h"
|
||||
//#include "wal.h"
|
||||
#include "tmsgcb.h"
|
||||
|
||||
typedef uint64_t SyncNodeId;
|
||||
typedef int32_t SyncGroupId;
|
||||
|
@ -132,11 +129,10 @@ typedef struct SSyncInfo {
|
|||
char path[TSDB_FILENAME_LEN];
|
||||
SWal* pWal;
|
||||
SSyncFSM* pFsm;
|
||||
SMsgCb* msgcb;
|
||||
|
||||
void* rpcClient;
|
||||
int32_t (*FpSendMsg)(void* rpcClient, const SEpSet* pEpSet, SRpcMsg* pMsg);
|
||||
void* queue;
|
||||
int32_t (*FpEqMsg)(void* queue, SRpcMsg* pMsg);
|
||||
int32_t (*FpSendMsg)(const SEpSet* pEpSet, SRpcMsg* pMsg);
|
||||
int32_t (*FpEqMsg)(const SMsgCb* msgcb, SRpcMsg* pMsg);
|
||||
|
||||
} SSyncInfo;
|
||||
|
||||
|
|
|
@ -20,13 +20,10 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
//#include <tdatablock.h>
|
||||
#include "os.h"
|
||||
|
||||
#include "cJSON.h"
|
||||
//#include "taosdef.h"
|
||||
#include "trpc.h"
|
||||
//#include "wal.h"
|
||||
|
||||
// ------------------ ds -------------------
|
||||
typedef struct SRaftId {
|
||||
|
@ -43,8 +40,7 @@ void syncNodeRelease(SSyncNode* pNode);
|
|||
|
||||
int32_t syncGetRespRpc(int64_t rid, uint64_t index, SRpcMsg* msg);
|
||||
int32_t syncGetAndDelRespRpc(int64_t rid, uint64_t index, SRpcMsg* msg);
|
||||
void syncSetQ(int64_t rid, void* queueHandle);
|
||||
void syncSetRpc(int64_t rid, void* rpcHandle);
|
||||
void syncSetMsgCb(int64_t rid, const SMsgCb* msgcb);
|
||||
char* sync2SimpleStr(int64_t rid);
|
||||
|
||||
// set timer ms
|
||||
|
|
|
@ -38,7 +38,7 @@ typedef struct {
|
|||
|
||||
typedef struct SRpcHandleInfo {
|
||||
// rpc info
|
||||
void *handle; // rpc handle returned to app
|
||||
void * handle; // rpc handle returned to app
|
||||
int64_t refId; // refid, used by server
|
||||
int32_t noResp; // has response or not(default 0, 0: resp, 1: no resp);
|
||||
int32_t persistHandle; // persist handle or not
|
||||
|
@ -49,13 +49,13 @@ typedef struct SRpcHandleInfo {
|
|||
void *node; // node mgmt handle
|
||||
|
||||
// resp info
|
||||
void *rsp;
|
||||
void * rsp;
|
||||
int32_t rspLen;
|
||||
} SRpcHandleInfo;
|
||||
|
||||
typedef struct SRpcMsg {
|
||||
tmsg_t msgType;
|
||||
void *pCont;
|
||||
void * pCont;
|
||||
int32_t contLen;
|
||||
int32_t code;
|
||||
SRpcHandleInfo info;
|
||||
|
@ -63,11 +63,6 @@ typedef struct SRpcMsg {
|
|||
} SRpcMsg;
|
||||
|
||||
typedef void (*RpcCfp)(void *parent, SRpcMsg *, SEpSet *rf);
|
||||
typedef int (*RpcAfp)(void *parent, char *tableId, char *spi, char *encrypt, char *secret, char *ckey);
|
||||
///
|
||||
// // SRpcMsg code
|
||||
// REDIERE,
|
||||
// NOT READY, EpSet
|
||||
typedef bool (*RpcRfp)(int32_t code);
|
||||
|
||||
typedef struct SRpcInit {
|
||||
|
@ -80,18 +75,11 @@ typedef struct SRpcInit {
|
|||
int idleTime; // milliseconds, 0 means idle timer is disabled
|
||||
|
||||
// the following is for client app ecurity only
|
||||
char *user; // user name
|
||||
char spi; // security parameter index
|
||||
char encrypt; // encrypt algorithm
|
||||
char *secret; // key for authentication
|
||||
char *ckey; // ciphering key
|
||||
char *user; // user name
|
||||
|
||||
// call back to process incoming msg, code shall be ignored by server app
|
||||
RpcCfp cfp;
|
||||
|
||||
// call back to retrieve the client auth info, for server app only
|
||||
RpcAfp afp;
|
||||
|
||||
// user defined retry func
|
||||
RpcRfp rfp;
|
||||
|
||||
|
|
|
@ -649,6 +649,7 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_PAR_INVALID_TBNAME TAOS_DEF_ERROR_CODE(0, 0x264C)
|
||||
#define TSDB_CODE_PAR_INVALID_FUNCTION_NAME TAOS_DEF_ERROR_CODE(0, 0x264D)
|
||||
#define TSDB_CODE_PAR_COMMENT_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x264E)
|
||||
#define TSDB_CODE_PAR_NOT_ALLOWED_FUNC TAOS_DEF_ERROR_CODE(0, 0x264F)
|
||||
|
||||
//planner
|
||||
#define TSDB_CODE_PLAN_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x2700)
|
||||
|
|
|
@ -1,22 +1,22 @@
|
|||
[Unit]
|
||||
Description=Nginx For TDengine Service
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=forking
|
||||
PIDFile=/usr/local/nginxd/logs/nginx.pid
|
||||
ExecStart=/usr/local/nginxd/sbin/nginx
|
||||
ExecStop=/usr/local/nginxd/sbin/nginx -s stop
|
||||
TimeoutStopSec=1000000s
|
||||
LimitNOFILE=infinity
|
||||
LimitNPROC=infinity
|
||||
LimitCORE=infinity
|
||||
TimeoutStartSec=0
|
||||
StandardOutput=null
|
||||
Restart=always
|
||||
StartLimitBurst=3
|
||||
StartLimitInterval=60s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
[Unit]
|
||||
Description=Nginx For TDengine Service
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=forking
|
||||
PIDFile=/usr/local/nginxd/logs/nginx.pid
|
||||
ExecStart=/usr/local/nginxd/sbin/nginx
|
||||
ExecStop=/usr/local/nginxd/sbin/nginx -s stop
|
||||
TimeoutStopSec=1000000s
|
||||
LimitNOFILE=infinity
|
||||
LimitNPROC=infinity
|
||||
LimitCORE=infinity
|
||||
TimeoutStartSec=0
|
||||
StandardOutput=null
|
||||
Restart=always
|
||||
StartLimitBurst=3
|
||||
StartLimitInterval=60s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
|
|
@ -1,312 +1,312 @@
|
|||
########################################################
|
||||
# #
|
||||
# TDengine Configuration #
|
||||
# Any questions, please email support@taosdata.com #
|
||||
# #
|
||||
########################################################
|
||||
|
||||
# first fully qualified domain name (FQDN) for TDengine system
|
||||
# firstEp hostname:6030
|
||||
|
||||
# local fully qualified domain name (FQDN)
|
||||
# fqdn hostname
|
||||
|
||||
# first port number for the connection (12 continuous UDP/TCP port number are used)
|
||||
# serverPort 6030
|
||||
|
||||
# log file's directory
|
||||
# logDir /var/log/taos
|
||||
|
||||
# data file's directory
|
||||
# dataDir /var/lib/taos
|
||||
|
||||
# temporary file's directory
|
||||
# tempDir /tmp/
|
||||
|
||||
# the arbitrator's fully qualified domain name (FQDN) for TDengine system, for cluster only
|
||||
# arbitrator arbitrator_hostname:6042
|
||||
|
||||
# number of threads per CPU core
|
||||
# numOfThreadsPerCore 1.0
|
||||
|
||||
# number of threads to commit cache data
|
||||
# numOfCommitThreads 4
|
||||
|
||||
# the proportion of total CPU cores available for query processing
|
||||
# 2.0: the query threads will be set to double of the CPU cores.
|
||||
# 1.0: all CPU cores are available for query processing [default].
|
||||
# 0.5: only half of the CPU cores are available for query.
|
||||
# 0.0: only one core available.
|
||||
# ratioOfQueryCores 1.0
|
||||
|
||||
# the last_row/first/last aggregator will not change the original column name in the result fields
|
||||
keepColumnName 1
|
||||
|
||||
# number of management nodes in the system
|
||||
# numOfMnodes 1
|
||||
|
||||
# enable/disable backuping vnode directory when removing vnode
|
||||
# vnodeBak 1
|
||||
|
||||
# enable/disable installation / usage report
|
||||
# telemetryReporting 1
|
||||
|
||||
# enable/disable load balancing
|
||||
# balance 1
|
||||
|
||||
# role for dnode. 0 - any, 1 - mnode, 2 - dnode
|
||||
# role 0
|
||||
|
||||
# max timer control blocks
|
||||
# maxTmrCtrl 512
|
||||
|
||||
# time interval of system monitor, seconds
|
||||
# monitorInterval 30
|
||||
|
||||
# number of seconds allowed for a dnode to be offline, for cluster only
|
||||
# offlineThreshold 864000
|
||||
|
||||
# RPC re-try timer, millisecond
|
||||
# rpcTimer 300
|
||||
|
||||
# RPC maximum time for ack, seconds.
|
||||
# rpcMaxTime 600
|
||||
|
||||
# time interval of dnode status reporting to mnode, seconds, for cluster only
|
||||
# statusInterval 1
|
||||
|
||||
# time interval of heart beat from shell to dnode, seconds
|
||||
# shellActivityTimer 3
|
||||
|
||||
# minimum sliding window time, milli-second
|
||||
# minSlidingTime 10
|
||||
|
||||
# minimum time window, milli-second
|
||||
# minIntervalTime 10
|
||||
|
||||
# maximum delay before launching a stream computation, milli-second
|
||||
# maxStreamCompDelay 20000
|
||||
|
||||
# maximum delay before launching a stream computation for the first time, milli-second
|
||||
# maxFirstStreamCompDelay 10000
|
||||
|
||||
# retry delay when a stream computation fails, milli-second
|
||||
# retryStreamCompDelay 10
|
||||
|
||||
# the delayed time for launching a stream computation, from 0.1(default, 10% of whole computing time window) to 0.9
|
||||
# streamCompDelayRatio 0.1
|
||||
|
||||
# max number of vgroups per db, 0 means configured automatically
|
||||
# maxVgroupsPerDb 0
|
||||
|
||||
# max number of tables per vnode
|
||||
# maxTablesPerVnode 1000000
|
||||
|
||||
# cache block size (Mbyte)
|
||||
# cache 16
|
||||
|
||||
# number of cache blocks per vnode
|
||||
# blocks 6
|
||||
|
||||
# number of days per DB file
|
||||
# days 10
|
||||
|
||||
# number of days to keep DB file
|
||||
# keep 3650
|
||||
|
||||
# minimum rows of records in file block
|
||||
# minRows 100
|
||||
|
||||
# maximum rows of records in file block
|
||||
# maxRows 4096
|
||||
|
||||
# the number of acknowledgments required for successful data writing
|
||||
# quorum 1
|
||||
|
||||
# enable/disable compression
|
||||
# comp 2
|
||||
|
||||
# write ahead log (WAL) level, 0: no wal; 1: write wal, but no fysnc; 2: write wal, and call fsync
|
||||
# walLevel 1
|
||||
|
||||
# if walLevel is set to 2, the cycle of fsync being executed, if set to 0, fsync is called right away
|
||||
# fsync 3000
|
||||
|
||||
# number of replications, for cluster only
|
||||
# replica 1
|
||||
|
||||
# the compressed rpc message, option:
|
||||
# -1 (no compression)
|
||||
# 0 (all message compressed),
|
||||
# > 0 (rpc message body which larger than this value will be compressed)
|
||||
# compressMsgSize -1
|
||||
|
||||
# query retrieved column data compression option:
|
||||
# -1 (no compression)
|
||||
# 0 (all retrieved column data compressed),
|
||||
# > 0 (any retrieved column size greater than this value all data will be compressed.)
|
||||
# compressColData -1
|
||||
|
||||
# max length of an SQL
|
||||
# maxSQLLength 65480
|
||||
|
||||
# max length of WildCards
|
||||
# maxWildCardsLength 100
|
||||
|
||||
# the maximum number of records allowed for super table time sorting
|
||||
# maxNumOfOrderedRes 100000
|
||||
|
||||
# system time zone
|
||||
# timezone Asia/Shanghai (CST, +0800)
|
||||
# system time zone (for windows 10)
|
||||
# timezone UTC-8
|
||||
|
||||
# system locale
|
||||
# locale en_US.UTF-8
|
||||
|
||||
# default system charset
|
||||
# charset UTF-8
|
||||
|
||||
# max number of connections allowed in dnode
|
||||
# maxShellConns 5000
|
||||
|
||||
# max number of connections allowed in client
|
||||
# maxConnections 5000
|
||||
|
||||
# stop writing logs when the disk size of the log folder is less than this value
|
||||
# minimalLogDirGB 1.0
|
||||
|
||||
# stop writing temporary files when the disk size of the tmp folder is less than this value
|
||||
# minimalTmpDirGB 1.0
|
||||
|
||||
# if disk free space is less than this value, taosd service exit directly within startup process
|
||||
# minimalDataDirGB 2.0
|
||||
|
||||
# One mnode is equal to the number of vnode consumed
|
||||
# mnodeEqualVnodeNum 4
|
||||
|
||||
# enbale/disable http service
|
||||
# http 1
|
||||
|
||||
# enable/disable system monitor
|
||||
# monitor 1
|
||||
|
||||
# enable/disable recording the SQL statements via restful interface
|
||||
# httpEnableRecordSql 0
|
||||
|
||||
# number of threads used to process http requests
|
||||
# httpMaxThreads 2
|
||||
|
||||
# maximum number of rows returned by the restful interface
|
||||
# restfulRowLimit 10240
|
||||
|
||||
# database name must be specified in restful interface if the following parameter is set, off by default
|
||||
# httpDbNameMandatory 1
|
||||
|
||||
# http keep alive, default is 30 seconds
|
||||
# httpKeepAlive 30000
|
||||
|
||||
# The following parameter is used to limit the maximum number of lines in log files.
|
||||
# max number of lines per log filters
|
||||
# numOfLogLines 10000000
|
||||
|
||||
# enable/disable async log
|
||||
# asyncLog 1
|
||||
|
||||
# time of keeping log files, days
|
||||
# logKeepDays 0
|
||||
|
||||
|
||||
# The following parameters are used for debug purpose only.
|
||||
# debugFlag 8 bits mask: FILE-SCREEN-UNUSED-HeartBeat-DUMP-TRACE_WARN-ERROR
|
||||
# 131: output warning and error
|
||||
# 135: output debug, warning and error
|
||||
# 143: output trace, debug, warning and error to log
|
||||
# 199: output debug, warning and error to both screen and file
|
||||
# 207: output trace, debug, warning and error to both screen and file
|
||||
|
||||
# debug flag for all log type, take effect when non-zero value
|
||||
# debugFlag 0
|
||||
|
||||
# debug flag for meta management messages
|
||||
# mDebugFlag 135
|
||||
|
||||
# debug flag for dnode messages
|
||||
# dDebugFlag 135
|
||||
|
||||
# debug flag for sync module
|
||||
# sDebugFlag 135
|
||||
|
||||
# debug flag for WAL
|
||||
# wDebugFlag 135
|
||||
|
||||
# debug flag for SDB
|
||||
# sdbDebugFlag 135
|
||||
|
||||
# debug flag for RPC
|
||||
# rpcDebugFlag 131
|
||||
|
||||
# debug flag for TAOS TIMER
|
||||
# tmrDebugFlag 131
|
||||
|
||||
# debug flag for TDengine client
|
||||
# cDebugFlag 131
|
||||
|
||||
# debug flag for JNI
|
||||
# jniDebugFlag 131
|
||||
|
||||
# debug flag for storage
|
||||
# uDebugFlag 131
|
||||
|
||||
# debug flag for http server
|
||||
# httpDebugFlag 131
|
||||
|
||||
# debug flag for monitor
|
||||
# monDebugFlag 131
|
||||
|
||||
# debug flag for query
|
||||
# qDebugFlag 131
|
||||
|
||||
# debug flag for vnode
|
||||
# vDebugFlag 131
|
||||
|
||||
# debug flag for TSDB
|
||||
# tsdbDebugFlag 131
|
||||
|
||||
# debug flag for continue query
|
||||
# cqDebugFlag 131
|
||||
|
||||
# enable/disable recording the SQL in taos client
|
||||
# enableRecordSql 0
|
||||
|
||||
# generate core file when service crash
|
||||
# enableCoreFile 1
|
||||
|
||||
# maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden
|
||||
# maxBinaryDisplayWidth 30
|
||||
|
||||
# enable/disable stream (continuous query)
|
||||
# stream 1
|
||||
|
||||
# in retrieve blocking model, only in 50% query threads will be used in query processing in dnode
|
||||
# retrieveBlockingModel 0
|
||||
|
||||
# the maximum allowed query buffer size in MB during query processing for each data node
|
||||
# -1 no limit (default)
|
||||
# 0 no query allowed, queries are disabled
|
||||
# queryBufferSize -1
|
||||
|
||||
# percent of redundant data in tsdb meta will compact meta data,0 means donot compact
|
||||
# tsdbMetaCompactRatio 0
|
||||
|
||||
# default string type used for storing JSON String, options can be binary/nchar, default is nchar
|
||||
# defaultJSONStrType nchar
|
||||
|
||||
# force TCP transmission
|
||||
# rpcForceTcp 0
|
||||
|
||||
# unit MB. Flush vnode wal file if walSize > walFlushSize and walSize > cache*0.5*blocks
|
||||
# walFlushSize 1024
|
||||
|
||||
# unit Hour. Latency of data migration
|
||||
# keepTimeOffset 0
|
||||
########################################################
|
||||
# #
|
||||
# TDengine Configuration #
|
||||
# Any questions, please email support@taosdata.com #
|
||||
# #
|
||||
########################################################
|
||||
|
||||
# first fully qualified domain name (FQDN) for TDengine system
|
||||
# firstEp hostname:6030
|
||||
|
||||
# local fully qualified domain name (FQDN)
|
||||
# fqdn hostname
|
||||
|
||||
# first port number for the connection (12 continuous UDP/TCP port number are used)
|
||||
# serverPort 6030
|
||||
|
||||
# log file's directory
|
||||
# logDir /var/log/taos
|
||||
|
||||
# data file's directory
|
||||
# dataDir /var/lib/taos
|
||||
|
||||
# temporary file's directory
|
||||
# tempDir /tmp/
|
||||
|
||||
# the arbitrator's fully qualified domain name (FQDN) for TDengine system, for cluster only
|
||||
# arbitrator arbitrator_hostname:6042
|
||||
|
||||
# number of threads per CPU core
|
||||
# numOfThreadsPerCore 1.0
|
||||
|
||||
# number of threads to commit cache data
|
||||
# numOfCommitThreads 4
|
||||
|
||||
# the proportion of total CPU cores available for query processing
|
||||
# 2.0: the query threads will be set to double of the CPU cores.
|
||||
# 1.0: all CPU cores are available for query processing [default].
|
||||
# 0.5: only half of the CPU cores are available for query.
|
||||
# 0.0: only one core available.
|
||||
# ratioOfQueryCores 1.0
|
||||
|
||||
# the last_row/first/last aggregator will not change the original column name in the result fields
|
||||
keepColumnName 1
|
||||
|
||||
# number of management nodes in the system
|
||||
# numOfMnodes 1
|
||||
|
||||
# enable/disable backuping vnode directory when removing vnode
|
||||
# vnodeBak 1
|
||||
|
||||
# enable/disable installation / usage report
|
||||
# telemetryReporting 1
|
||||
|
||||
# enable/disable load balancing
|
||||
# balance 1
|
||||
|
||||
# role for dnode. 0 - any, 1 - mnode, 2 - dnode
|
||||
# role 0
|
||||
|
||||
# max timer control blocks
|
||||
# maxTmrCtrl 512
|
||||
|
||||
# time interval of system monitor, seconds
|
||||
# monitorInterval 30
|
||||
|
||||
# number of seconds allowed for a dnode to be offline, for cluster only
|
||||
# offlineThreshold 864000
|
||||
|
||||
# RPC re-try timer, millisecond
|
||||
# rpcTimer 300
|
||||
|
||||
# RPC maximum time for ack, seconds.
|
||||
# rpcMaxTime 600
|
||||
|
||||
# time interval of dnode status reporting to mnode, seconds, for cluster only
|
||||
# statusInterval 1
|
||||
|
||||
# time interval of heart beat from shell to dnode, seconds
|
||||
# shellActivityTimer 3
|
||||
|
||||
# minimum sliding window time, milli-second
|
||||
# minSlidingTime 10
|
||||
|
||||
# minimum time window, milli-second
|
||||
# minIntervalTime 10
|
||||
|
||||
# maximum delay before launching a stream computation, milli-second
|
||||
# maxStreamCompDelay 20000
|
||||
|
||||
# maximum delay before launching a stream computation for the first time, milli-second
|
||||
# maxFirstStreamCompDelay 10000
|
||||
|
||||
# retry delay when a stream computation fails, milli-second
|
||||
# retryStreamCompDelay 10
|
||||
|
||||
# the delayed time for launching a stream computation, from 0.1(default, 10% of whole computing time window) to 0.9
|
||||
# streamCompDelayRatio 0.1
|
||||
|
||||
# max number of vgroups per db, 0 means configured automatically
|
||||
# maxVgroupsPerDb 0
|
||||
|
||||
# max number of tables per vnode
|
||||
# maxTablesPerVnode 1000000
|
||||
|
||||
# cache block size (Mbyte)
|
||||
# cache 16
|
||||
|
||||
# number of cache blocks per vnode
|
||||
# blocks 6
|
||||
|
||||
# number of days per DB file
|
||||
# days 10
|
||||
|
||||
# number of days to keep DB file
|
||||
# keep 3650
|
||||
|
||||
# minimum rows of records in file block
|
||||
# minRows 100
|
||||
|
||||
# maximum rows of records in file block
|
||||
# maxRows 4096
|
||||
|
||||
# the number of acknowledgments required for successful data writing
|
||||
# quorum 1
|
||||
|
||||
# enable/disable compression
|
||||
# comp 2
|
||||
|
||||
# write ahead log (WAL) level, 0: no wal; 1: write wal, but no fysnc; 2: write wal, and call fsync
|
||||
# walLevel 1
|
||||
|
||||
# if walLevel is set to 2, the cycle of fsync being executed, if set to 0, fsync is called right away
|
||||
# fsync 3000
|
||||
|
||||
# number of replications, for cluster only
|
||||
# replica 1
|
||||
|
||||
# the compressed rpc message, option:
|
||||
# -1 (no compression)
|
||||
# 0 (all message compressed),
|
||||
# > 0 (rpc message body which larger than this value will be compressed)
|
||||
# compressMsgSize -1
|
||||
|
||||
# query retrieved column data compression option:
|
||||
# -1 (no compression)
|
||||
# 0 (all retrieved column data compressed),
|
||||
# > 0 (any retrieved column size greater than this value all data will be compressed.)
|
||||
# compressColData -1
|
||||
|
||||
# max length of an SQL
|
||||
# maxSQLLength 65480
|
||||
|
||||
# max length of WildCards
|
||||
# maxWildCardsLength 100
|
||||
|
||||
# the maximum number of records allowed for super table time sorting
|
||||
# maxNumOfOrderedRes 100000
|
||||
|
||||
# system time zone
|
||||
# timezone Asia/Shanghai (CST, +0800)
|
||||
# system time zone (for windows 10)
|
||||
# timezone UTC-8
|
||||
|
||||
# system locale
|
||||
# locale en_US.UTF-8
|
||||
|
||||
# default system charset
|
||||
# charset UTF-8
|
||||
|
||||
# max number of connections allowed in dnode
|
||||
# maxShellConns 5000
|
||||
|
||||
# max number of connections allowed in client
|
||||
# maxConnections 5000
|
||||
|
||||
# stop writing logs when the disk size of the log folder is less than this value
|
||||
# minimalLogDirGB 1.0
|
||||
|
||||
# stop writing temporary files when the disk size of the tmp folder is less than this value
|
||||
# minimalTmpDirGB 1.0
|
||||
|
||||
# if disk free space is less than this value, taosd service exit directly within startup process
|
||||
# minimalDataDirGB 2.0
|
||||
|
||||
# One mnode is equal to the number of vnode consumed
|
||||
# mnodeEqualVnodeNum 4
|
||||
|
||||
# enbale/disable http service
|
||||
# http 1
|
||||
|
||||
# enable/disable system monitor
|
||||
# monitor 1
|
||||
|
||||
# enable/disable recording the SQL statements via restful interface
|
||||
# httpEnableRecordSql 0
|
||||
|
||||
# number of threads used to process http requests
|
||||
# httpMaxThreads 2
|
||||
|
||||
# maximum number of rows returned by the restful interface
|
||||
# restfulRowLimit 10240
|
||||
|
||||
# database name must be specified in restful interface if the following parameter is set, off by default
|
||||
# httpDbNameMandatory 1
|
||||
|
||||
# http keep alive, default is 30 seconds
|
||||
# httpKeepAlive 30000
|
||||
|
||||
# The following parameter is used to limit the maximum number of lines in log files.
|
||||
# max number of lines per log filters
|
||||
# numOfLogLines 10000000
|
||||
|
||||
# enable/disable async log
|
||||
# asyncLog 1
|
||||
|
||||
# time of keeping log files, days
|
||||
# logKeepDays 0
|
||||
|
||||
|
||||
# The following parameters are used for debug purpose only.
|
||||
# debugFlag 8 bits mask: FILE-SCREEN-UNUSED-HeartBeat-DUMP-TRACE_WARN-ERROR
|
||||
# 131: output warning and error
|
||||
# 135: output debug, warning and error
|
||||
# 143: output trace, debug, warning and error to log
|
||||
# 199: output debug, warning and error to both screen and file
|
||||
# 207: output trace, debug, warning and error to both screen and file
|
||||
|
||||
# debug flag for all log type, take effect when non-zero value
|
||||
# debugFlag 0
|
||||
|
||||
# debug flag for meta management messages
|
||||
# mDebugFlag 135
|
||||
|
||||
# debug flag for dnode messages
|
||||
# dDebugFlag 135
|
||||
|
||||
# debug flag for sync module
|
||||
# sDebugFlag 135
|
||||
|
||||
# debug flag for WAL
|
||||
# wDebugFlag 135
|
||||
|
||||
# debug flag for SDB
|
||||
# sdbDebugFlag 135
|
||||
|
||||
# debug flag for RPC
|
||||
# rpcDebugFlag 131
|
||||
|
||||
# debug flag for TAOS TIMER
|
||||
# tmrDebugFlag 131
|
||||
|
||||
# debug flag for TDengine client
|
||||
# cDebugFlag 131
|
||||
|
||||
# debug flag for JNI
|
||||
# jniDebugFlag 131
|
||||
|
||||
# debug flag for storage
|
||||
# uDebugFlag 131
|
||||
|
||||
# debug flag for http server
|
||||
# httpDebugFlag 131
|
||||
|
||||
# debug flag for monitor
|
||||
# monDebugFlag 131
|
||||
|
||||
# debug flag for query
|
||||
# qDebugFlag 131
|
||||
|
||||
# debug flag for vnode
|
||||
# vDebugFlag 131
|
||||
|
||||
# debug flag for TSDB
|
||||
# tsdbDebugFlag 131
|
||||
|
||||
# debug flag for continue query
|
||||
# cqDebugFlag 131
|
||||
|
||||
# enable/disable recording the SQL in taos client
|
||||
# enableRecordSql 0
|
||||
|
||||
# generate core file when service crash
|
||||
# enableCoreFile 1
|
||||
|
||||
# maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden
|
||||
# maxBinaryDisplayWidth 30
|
||||
|
||||
# enable/disable stream (continuous query)
|
||||
# stream 1
|
||||
|
||||
# in retrieve blocking model, only in 50% query threads will be used in query processing in dnode
|
||||
# retrieveBlockingModel 0
|
||||
|
||||
# the maximum allowed query buffer size in MB during query processing for each data node
|
||||
# -1 no limit (default)
|
||||
# 0 no query allowed, queries are disabled
|
||||
# queryBufferSize -1
|
||||
|
||||
# percent of redundant data in tsdb meta will compact meta data,0 means donot compact
|
||||
# tsdbMetaCompactRatio 0
|
||||
|
||||
# default string type used for storing JSON String, options can be binary/nchar, default is nchar
|
||||
# defaultJSONStrType nchar
|
||||
|
||||
# force TCP transmission
|
||||
# rpcForceTcp 0
|
||||
|
||||
# unit MB. Flush vnode wal file if walSize > walFlushSize and walSize > cache*0.5*blocks
|
||||
# walFlushSize 1024
|
||||
|
||||
# unit Hour. Latency of data migration
|
||||
# keepTimeOffset 0
|
||||
|
|
|
@ -1,21 +1,21 @@
|
|||
[Unit]
|
||||
Description=TDengine server service
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart=/usr/bin/taosd
|
||||
ExecStartPre=/usr/local/taos/bin/startPre.sh
|
||||
TimeoutStopSec=1000000s
|
||||
LimitNOFILE=infinity
|
||||
LimitNPROC=infinity
|
||||
LimitCORE=infinity
|
||||
TimeoutStartSec=0
|
||||
StandardOutput=null
|
||||
Restart=always
|
||||
StartLimitBurst=3
|
||||
StartLimitInterval=60s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
[Unit]
|
||||
Description=TDengine server service
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart=/usr/bin/taosd
|
||||
ExecStartPre=/usr/local/taos/bin/startPre.sh
|
||||
TimeoutStopSec=1000000s
|
||||
LimitNOFILE=infinity
|
||||
LimitNPROC=infinity
|
||||
LimitCORE=infinity
|
||||
TimeoutStartSec=0
|
||||
StandardOutput=null
|
||||
Restart=always
|
||||
StartLimitBurst=3
|
||||
StartLimitInterval=60s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
|
|
@ -1,20 +1,20 @@
|
|||
[Unit]
|
||||
Description=TDengine arbitrator service
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart=/usr/bin/tarbitrator
|
||||
TimeoutStopSec=1000000s
|
||||
LimitNOFILE=infinity
|
||||
LimitNPROC=infinity
|
||||
LimitCORE=infinity
|
||||
TimeoutStartSec=0
|
||||
StandardOutput=null
|
||||
Restart=always
|
||||
StartLimitBurst=3
|
||||
StartLimitInterval=60s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
[Unit]
|
||||
Description=TDengine arbitrator service
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart=/usr/bin/tarbitrator
|
||||
TimeoutStopSec=1000000s
|
||||
LimitNOFILE=infinity
|
||||
LimitNPROC=infinity
|
||||
LimitCORE=infinity
|
||||
TimeoutStartSec=0
|
||||
StandardOutput=null
|
||||
Restart=always
|
||||
StartLimitBurst=3
|
||||
StartLimitInterval=60s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
|
|
@ -1,252 +1,252 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# This file is used to install database on linux systems. The operating system
|
||||
# is required to use systemd to manage services at boot
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
verMode=edge
|
||||
pagMode=full
|
||||
|
||||
iplist=""
|
||||
serverFqdn=""
|
||||
|
||||
# -----------------------Variables definition---------------------
|
||||
script_dir="../release"
|
||||
# Dynamic directory
|
||||
data_dir="/var/lib/taos"
|
||||
log_dir="/var/log/taos"
|
||||
|
||||
data_link_dir="/usr/local/taos/data"
|
||||
log_link_dir="/usr/local/taos/log"
|
||||
|
||||
cfg_install_dir="/etc/taos"
|
||||
|
||||
bin_link_dir="/usr/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
lib64_link_dir="/usr/lib64"
|
||||
inc_link_dir="/usr/include"
|
||||
|
||||
#install main path
|
||||
install_main_dir="/usr/local/taos"
|
||||
|
||||
# old bin dir
|
||||
sbin_dir="/usr/local/taos/bin"
|
||||
|
||||
temp_version=""
|
||||
fin_result=""
|
||||
|
||||
service_config_dir="/etc/systemd/system"
|
||||
nginx_port=6060
|
||||
nginx_dir="/usr/local/nginxd"
|
||||
|
||||
# Color setting
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
GREEN_DARK='\033[0;32m'
|
||||
GREEN_UNDERLINE='\033[4;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
# ============================= get input parameters =================================================
|
||||
|
||||
# install.sh -v [server | client] -e [yes | no] -i [systemd | service | ...]
|
||||
|
||||
# set parameters by default value
|
||||
interactiveFqdn=yes # [yes | no]
|
||||
verType=server # [server | client]
|
||||
initType=systemd # [systemd | service | ...]
|
||||
|
||||
while getopts "hv:d:" arg
|
||||
do
|
||||
case $arg in
|
||||
d)
|
||||
#echo "interactiveFqdn=$OPTARG"
|
||||
script_dir=$( echo $OPTARG )
|
||||
;;
|
||||
h)
|
||||
echo "Usage: `basename $0` -d scripy_path"
|
||||
exit 0
|
||||
;;
|
||||
?) #unknow option
|
||||
echo "unkonw argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
#echo "verType=${verType} interactiveFqdn=${interactiveFqdn}"
|
||||
|
||||
function kill_process() {
|
||||
pid=$(ps -ef | grep "$1" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function check_file() {
|
||||
#check file whether exists
|
||||
if [ ! -e $1/$2 ];then
|
||||
echo -e "$1/$2 \033[31mnot exists\033[0m!quit"
|
||||
fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
|
||||
echo -e $fin_result
|
||||
exit 8
|
||||
fi
|
||||
}
|
||||
|
||||
function get_package_name() {
|
||||
var=$1
|
||||
if [[ $1 =~ 'aarch' ]];then
|
||||
echo ${var::-21}
|
||||
else
|
||||
echo ${var::-17}
|
||||
fi
|
||||
}
|
||||
|
||||
function check_link() {
|
||||
#check Link whether exists or broken
|
||||
if [ -L $1 ] ; then
|
||||
if [ ! -e $1 ] ; then
|
||||
echo -e "$1 \033[31Broken link\033[0m"
|
||||
fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
|
||||
echo -e $fin_result
|
||||
exit 8
|
||||
fi
|
||||
else
|
||||
echo -e "$1 \033[31mnot exists\033[0m!quit"
|
||||
fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
|
||||
echo -e $fin_result
|
||||
exit 8
|
||||
fi
|
||||
}
|
||||
|
||||
function check_main_path() {
|
||||
#check install main dir and all sub dir
|
||||
main_dir=("" "cfg" "bin" "connector" "driver" "examples" "include" "init.d")
|
||||
for i in "${main_dir[@]}";do
|
||||
check_file ${install_main_dir} $i
|
||||
done
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
nginx_main_dir=("admin" "conf" "html" "sbin" "logs")
|
||||
for i in "${nginx_main_dir[@]}";do
|
||||
check_file ${nginx_dir} $i
|
||||
done
|
||||
fi
|
||||
echo -e "Check main path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function check_bin_path() {
|
||||
# check install bin dir and all sub dir
|
||||
bin_dir=("taos" "taosd" "taosadapter" "taosdemo" "remove.sh" "tarbitrator" "set_core.sh")
|
||||
for i in "${bin_dir[@]}";do
|
||||
check_file ${sbin_dir} $i
|
||||
done
|
||||
lbin_dir=("taos" "taosd" "taosadapter" "taosdemo" "rmtaos" "tarbitrator" "set_core")
|
||||
for i in "${lbin_dir[@]}";do
|
||||
check_link ${bin_link_dir}/$i
|
||||
done
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
check_file ${nginx_dir}/sbin nginx
|
||||
fi
|
||||
echo -e "Check bin path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function check_lib_path() {
|
||||
# check all links
|
||||
check_link ${lib_link_dir}/libtaos.so
|
||||
check_link ${lib_link_dir}/libtaos.so.1
|
||||
|
||||
if [[ -d ${lib64_link_dir} ]]; then
|
||||
check_link ${lib64_link_dir}/libtaos.so
|
||||
check_link ${lib64_link_dir}/libtaos.so.1
|
||||
fi
|
||||
echo -e "Check lib path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function check_header_path() {
|
||||
# check all header
|
||||
header_dir=("taos.h" "taosdef.h" "taoserror.h")
|
||||
for i in "${header_dir[@]}";do
|
||||
check_link ${inc_link_dir}/$i
|
||||
done
|
||||
echo -e "Check bin path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function check_taosadapter_config_dir() {
|
||||
# check all config
|
||||
check_file ${cfg_install_dir} taosadapter.toml
|
||||
check_file ${cfg_install_dir} taosadapter.service
|
||||
check_file ${install_main_dir}/cfg taosadapter.toml.org
|
||||
echo -e "Check conf path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function check_config_dir() {
|
||||
# check all config
|
||||
check_file ${cfg_install_dir} taos.cfg
|
||||
check_file ${install_main_dir}/cfg taos.cfg.org
|
||||
echo -e "Check conf path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function check_log_path() {
|
||||
# check log path
|
||||
check_file ${log_dir}
|
||||
echo -e "Check log path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function check_data_path() {
|
||||
# check data path
|
||||
check_file ${data_dir}
|
||||
echo -e "Check data path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function install_TDengine() {
|
||||
cd ${script_dir}
|
||||
tar zxf $1
|
||||
temp_version=$(get_package_name $1)
|
||||
cd $(get_package_name $1)
|
||||
echo -e "\033[32muninstall TDengine && install TDengine...\033[0m"
|
||||
rmtaos >/dev/null 2>&1 || echo 'taosd not installed' && echo -e '\n\n' |./install.sh >/dev/null 2>&1
|
||||
echo -e "\033[32mTDengine has been installed!\033[0m"
|
||||
echo -e "\033[32mTDengine is starting...\033[0m"
|
||||
kill_process taos && systemctl start taosd && sleep 10
|
||||
}
|
||||
|
||||
function test_TDengine() {
|
||||
check_main_path
|
||||
check_bin_path
|
||||
check_lib_path
|
||||
check_header_path
|
||||
check_config_dir
|
||||
check_taosadapter_config_dir
|
||||
check_log_path
|
||||
check_data_path
|
||||
result=`taos -s 'create database test ;create table test.tt(ts timestamp ,i int);insert into test.tt values(now,11);select * from test.tt' 2>&1 ||:`
|
||||
if [[ $result =~ "Unable to establish" ]];then
|
||||
echo -e "\033[31mTDengine connect failed\033[0m"
|
||||
fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
|
||||
echo -e $fin_result
|
||||
exit 8
|
||||
fi
|
||||
echo -e "Check TDengine connect:\033[32mOK\033[0m!"
|
||||
fin_result=$fin_result"\033[32m$temp_version\033[0m test OK!\n"
|
||||
}
|
||||
# ## ==============================Main program starts from here============================
|
||||
TD_package_name=`ls ${script_dir}/*server*gz |awk -F '/' '{print $NF}' `
|
||||
temp=`pwd`
|
||||
for i in $TD_package_name;do
|
||||
if [[ $i =~ 'enterprise' ]];then
|
||||
verMode="cluster"
|
||||
else
|
||||
verMode=""
|
||||
fi
|
||||
cd $temp
|
||||
install_TDengine $i
|
||||
test_TDengine
|
||||
done
|
||||
echo "============================================================"
|
||||
echo -e $fin_result
|
||||
#!/bin/bash
|
||||
#
|
||||
# This file is used to install database on linux systems. The operating system
|
||||
# is required to use systemd to manage services at boot
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
verMode=edge
|
||||
pagMode=full
|
||||
|
||||
iplist=""
|
||||
serverFqdn=""
|
||||
|
||||
# -----------------------Variables definition---------------------
|
||||
script_dir="../release"
|
||||
# Dynamic directory
|
||||
data_dir="/var/lib/taos"
|
||||
log_dir="/var/log/taos"
|
||||
|
||||
data_link_dir="/usr/local/taos/data"
|
||||
log_link_dir="/usr/local/taos/log"
|
||||
|
||||
cfg_install_dir="/etc/taos"
|
||||
|
||||
bin_link_dir="/usr/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
lib64_link_dir="/usr/lib64"
|
||||
inc_link_dir="/usr/include"
|
||||
|
||||
#install main path
|
||||
install_main_dir="/usr/local/taos"
|
||||
|
||||
# old bin dir
|
||||
sbin_dir="/usr/local/taos/bin"
|
||||
|
||||
temp_version=""
|
||||
fin_result=""
|
||||
|
||||
service_config_dir="/etc/systemd/system"
|
||||
nginx_port=6060
|
||||
nginx_dir="/usr/local/nginxd"
|
||||
|
||||
# Color setting
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
GREEN_DARK='\033[0;32m'
|
||||
GREEN_UNDERLINE='\033[4;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
# ============================= get input parameters =================================================
|
||||
|
||||
# install.sh -v [server | client] -e [yes | no] -i [systemd | service | ...]
|
||||
|
||||
# set parameters by default value
|
||||
interactiveFqdn=yes # [yes | no]
|
||||
verType=server # [server | client]
|
||||
initType=systemd # [systemd | service | ...]
|
||||
|
||||
while getopts "hv:d:" arg
|
||||
do
|
||||
case $arg in
|
||||
d)
|
||||
#echo "interactiveFqdn=$OPTARG"
|
||||
script_dir=$( echo $OPTARG )
|
||||
;;
|
||||
h)
|
||||
echo "Usage: `basename $0` -d scripy_path"
|
||||
exit 0
|
||||
;;
|
||||
?) #unknow option
|
||||
echo "unkonw argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
#echo "verType=${verType} interactiveFqdn=${interactiveFqdn}"
|
||||
|
||||
function kill_process() {
|
||||
pid=$(ps -ef | grep "$1" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function check_file() {
|
||||
#check file whether exists
|
||||
if [ ! -e $1/$2 ];then
|
||||
echo -e "$1/$2 \033[31mnot exists\033[0m!quit"
|
||||
fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
|
||||
echo -e $fin_result
|
||||
exit 8
|
||||
fi
|
||||
}
|
||||
|
||||
function get_package_name() {
|
||||
var=$1
|
||||
if [[ $1 =~ 'aarch' ]];then
|
||||
echo ${var::-21}
|
||||
else
|
||||
echo ${var::-17}
|
||||
fi
|
||||
}
|
||||
|
||||
function check_link() {
|
||||
#check Link whether exists or broken
|
||||
if [ -L $1 ] ; then
|
||||
if [ ! -e $1 ] ; then
|
||||
echo -e "$1 \033[31Broken link\033[0m"
|
||||
fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
|
||||
echo -e $fin_result
|
||||
exit 8
|
||||
fi
|
||||
else
|
||||
echo -e "$1 \033[31mnot exists\033[0m!quit"
|
||||
fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
|
||||
echo -e $fin_result
|
||||
exit 8
|
||||
fi
|
||||
}
|
||||
|
||||
function check_main_path() {
|
||||
#check install main dir and all sub dir
|
||||
main_dir=("" "cfg" "bin" "connector" "driver" "examples" "include" "init.d")
|
||||
for i in "${main_dir[@]}";do
|
||||
check_file ${install_main_dir} $i
|
||||
done
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
nginx_main_dir=("admin" "conf" "html" "sbin" "logs")
|
||||
for i in "${nginx_main_dir[@]}";do
|
||||
check_file ${nginx_dir} $i
|
||||
done
|
||||
fi
|
||||
echo -e "Check main path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function check_bin_path() {
|
||||
# check install bin dir and all sub dir
|
||||
bin_dir=("taos" "taosd" "taosadapter" "taosdemo" "remove.sh" "tarbitrator" "set_core.sh")
|
||||
for i in "${bin_dir[@]}";do
|
||||
check_file ${sbin_dir} $i
|
||||
done
|
||||
lbin_dir=("taos" "taosd" "taosadapter" "taosdemo" "rmtaos" "tarbitrator" "set_core")
|
||||
for i in "${lbin_dir[@]}";do
|
||||
check_link ${bin_link_dir}/$i
|
||||
done
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
check_file ${nginx_dir}/sbin nginx
|
||||
fi
|
||||
echo -e "Check bin path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function check_lib_path() {
|
||||
# check all links
|
||||
check_link ${lib_link_dir}/libtaos.so
|
||||
check_link ${lib_link_dir}/libtaos.so.1
|
||||
|
||||
if [[ -d ${lib64_link_dir} ]]; then
|
||||
check_link ${lib64_link_dir}/libtaos.so
|
||||
check_link ${lib64_link_dir}/libtaos.so.1
|
||||
fi
|
||||
echo -e "Check lib path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function check_header_path() {
|
||||
# check all header
|
||||
header_dir=("taos.h" "taosdef.h" "taoserror.h")
|
||||
for i in "${header_dir[@]}";do
|
||||
check_link ${inc_link_dir}/$i
|
||||
done
|
||||
echo -e "Check bin path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function check_taosadapter_config_dir() {
|
||||
# check all config
|
||||
check_file ${cfg_install_dir} taosadapter.toml
|
||||
check_file ${cfg_install_dir} taosadapter.service
|
||||
check_file ${install_main_dir}/cfg taosadapter.toml.org
|
||||
echo -e "Check conf path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function check_config_dir() {
|
||||
# check all config
|
||||
check_file ${cfg_install_dir} taos.cfg
|
||||
check_file ${install_main_dir}/cfg taos.cfg.org
|
||||
echo -e "Check conf path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function check_log_path() {
|
||||
# check log path
|
||||
check_file ${log_dir}
|
||||
echo -e "Check log path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function check_data_path() {
|
||||
# check data path
|
||||
check_file ${data_dir}
|
||||
echo -e "Check data path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function install_TDengine() {
|
||||
cd ${script_dir}
|
||||
tar zxf $1
|
||||
temp_version=$(get_package_name $1)
|
||||
cd $(get_package_name $1)
|
||||
echo -e "\033[32muninstall TDengine && install TDengine...\033[0m"
|
||||
rmtaos >/dev/null 2>&1 || echo 'taosd not installed' && echo -e '\n\n' |./install.sh >/dev/null 2>&1
|
||||
echo -e "\033[32mTDengine has been installed!\033[0m"
|
||||
echo -e "\033[32mTDengine is starting...\033[0m"
|
||||
kill_process taos && systemctl start taosd && sleep 10
|
||||
}
|
||||
|
||||
function test_TDengine() {
|
||||
check_main_path
|
||||
check_bin_path
|
||||
check_lib_path
|
||||
check_header_path
|
||||
check_config_dir
|
||||
check_taosadapter_config_dir
|
||||
check_log_path
|
||||
check_data_path
|
||||
result=`taos -s 'create database test ;create table test.tt(ts timestamp ,i int);insert into test.tt values(now,11);select * from test.tt' 2>&1 ||:`
|
||||
if [[ $result =~ "Unable to establish" ]];then
|
||||
echo -e "\033[31mTDengine connect failed\033[0m"
|
||||
fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
|
||||
echo -e $fin_result
|
||||
exit 8
|
||||
fi
|
||||
echo -e "Check TDengine connect:\033[32mOK\033[0m!"
|
||||
fin_result=$fin_result"\033[32m$temp_version\033[0m test OK!\n"
|
||||
}
|
||||
# ## ==============================Main program starts from here============================
|
||||
TD_package_name=`ls ${script_dir}/*server*gz |awk -F '/' '{print $NF}' `
|
||||
temp=`pwd`
|
||||
for i in $TD_package_name;do
|
||||
if [[ $i =~ 'enterprise' ]];then
|
||||
verMode="cluster"
|
||||
else
|
||||
verMode=""
|
||||
fi
|
||||
cd $temp
|
||||
install_TDengine $i
|
||||
test_TDengine
|
||||
done
|
||||
echo "============================================================"
|
||||
echo -e $fin_result
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
Package: tdengine
|
||||
Version: 1.0.0
|
||||
Section: utils
|
||||
Priority: optional
|
||||
#Essential: no
|
||||
#Depends: no
|
||||
#Suggests: no
|
||||
Architecture: amd64
|
||||
Installed-Size: 66666
|
||||
Maintainer: support@taosdata.com
|
||||
Provides: taosdata
|
||||
Homepage: http://taosdata.com
|
||||
Description: Big Data Platform Designed and Optimized for IoT.
|
||||
Package: tdengine
|
||||
Version: 1.0.0
|
||||
Section: utils
|
||||
Priority: optional
|
||||
#Essential: no
|
||||
#Depends: no
|
||||
#Suggests: no
|
||||
Architecture: amd64
|
||||
Installed-Size: 66666
|
||||
Maintainer: support@taosdata.com
|
||||
Provides: taosdata
|
||||
Homepage: http://taosdata.com
|
||||
Description: Big Data Platform Designed and Optimized for IoT.
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
#!/bin/bash
|
||||
#set -x
|
||||
#path=`pwd`
|
||||
insmetaPath="/usr/local/taos/script"
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
${csudo}chmod -R 744 ${insmetaPath}
|
||||
cd ${insmetaPath}
|
||||
${csudo}./post.sh
|
||||
#!/bin/bash
|
||||
#set -x
|
||||
#path=`pwd`
|
||||
insmetaPath="/usr/local/taos/script"
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
${csudo}chmod -R 744 ${insmetaPath}
|
||||
cd ${insmetaPath}
|
||||
${csudo}./post.sh
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
#!/bin/bash
|
||||
|
||||
#!/bin/bash
|
||||
|
||||
|
|
|
@ -1,40 +1,40 @@
|
|||
#!/bin/bash
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
# Stop the service if running
|
||||
if pidof taosd &> /dev/null; then
|
||||
if pidof systemd &> /dev/null; then
|
||||
${csudo}systemctl stop taosd || :
|
||||
elif $(which service &> /dev/null); then
|
||||
${csudo}service taosd stop || :
|
||||
else
|
||||
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
fi
|
||||
echo "Stop taosd service success!"
|
||||
sleep 1
|
||||
fi
|
||||
|
||||
# if taos.cfg already softlink, remove it
|
||||
cfg_install_dir="/etc/taos"
|
||||
install_main_dir="/usr/local/taos"
|
||||
if [ -f "${install_main_dir}/taos.cfg" ]; then
|
||||
${csudo}rm -f ${install_main_dir}/cfg/taos.cfg || :
|
||||
fi
|
||||
|
||||
if [ -f "${install_main_dir}/taosadapter.toml" ]; then
|
||||
${csudo}rm -f ${install_main_dir}/cfg/taosadapter.toml || :
|
||||
fi
|
||||
|
||||
if [ -f "${install_main_dir}/taosadapter.service" ]; then
|
||||
${csudo}rm -f ${install_main_dir}/cfg/taosadapter.service || :
|
||||
fi
|
||||
|
||||
# there can not libtaos.so*, otherwise ln -s error
|
||||
${csudo}rm -f ${install_main_dir}/driver/libtaos* || :
|
||||
#!/bin/bash
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
# Stop the service if running
|
||||
if pidof taosd &> /dev/null; then
|
||||
if pidof systemd &> /dev/null; then
|
||||
${csudo}systemctl stop taosd || :
|
||||
elif $(which service &> /dev/null); then
|
||||
${csudo}service taosd stop || :
|
||||
else
|
||||
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
fi
|
||||
echo "Stop taosd service success!"
|
||||
sleep 1
|
||||
fi
|
||||
|
||||
# if taos.cfg already softlink, remove it
|
||||
cfg_install_dir="/etc/taos"
|
||||
install_main_dir="/usr/local/taos"
|
||||
if [ -f "${install_main_dir}/taos.cfg" ]; then
|
||||
${csudo}rm -f ${install_main_dir}/cfg/taos.cfg || :
|
||||
fi
|
||||
|
||||
if [ -f "${install_main_dir}/taosadapter.toml" ]; then
|
||||
${csudo}rm -f ${install_main_dir}/cfg/taosadapter.toml || :
|
||||
fi
|
||||
|
||||
if [ -f "${install_main_dir}/taosadapter.service" ]; then
|
||||
${csudo}rm -f ${install_main_dir}/cfg/taosadapter.service || :
|
||||
fi
|
||||
|
||||
# there can not libtaos.so*, otherwise ln -s error
|
||||
${csudo}rm -f ${install_main_dir}/driver/libtaos* || :
|
||||
|
|
|
@ -1,42 +1,42 @@
|
|||
#!/bin/bash
|
||||
|
||||
insmetaPath="/usr/local/taos/script"
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
${csudo}chmod -R 744 ${insmetaPath} || :
|
||||
#cd ${insmetaPath}
|
||||
#${csudo}./preun.sh
|
||||
if [ -f ${insmetaPath}/preun.sh ]; then
|
||||
cd ${insmetaPath}
|
||||
${csudo}./preun.sh
|
||||
else
|
||||
bin_link_dir="/usr/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
inc_link_dir="/usr/include"
|
||||
|
||||
data_link_dir="/usr/local/taos/data"
|
||||
log_link_dir="/usr/local/taos/log"
|
||||
cfg_link_dir="/usr/local/taos/cfg"
|
||||
|
||||
# Remove all links
|
||||
${csudo}rm -f ${bin_link_dir}/taos || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosd || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosadapter || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosdemo || :
|
||||
${csudo}rm -f ${cfg_link_dir}/* || :
|
||||
${csudo}rm -f ${inc_link_dir}/taos.h || :
|
||||
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
||||
|
||||
${csudo}rm -f ${log_link_dir} || :
|
||||
${csudo}rm -f ${data_link_dir} || :
|
||||
|
||||
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
fi
|
||||
|
||||
#!/bin/bash
|
||||
|
||||
insmetaPath="/usr/local/taos/script"
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
${csudo}chmod -R 744 ${insmetaPath} || :
|
||||
#cd ${insmetaPath}
|
||||
#${csudo}./preun.sh
|
||||
if [ -f ${insmetaPath}/preun.sh ]; then
|
||||
cd ${insmetaPath}
|
||||
${csudo}./preun.sh
|
||||
else
|
||||
bin_link_dir="/usr/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
inc_link_dir="/usr/include"
|
||||
|
||||
data_link_dir="/usr/local/taos/data"
|
||||
log_link_dir="/usr/local/taos/log"
|
||||
cfg_link_dir="/usr/local/taos/cfg"
|
||||
|
||||
# Remove all links
|
||||
${csudo}rm -f ${bin_link_dir}/taos || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosd || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosadapter || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosdemo || :
|
||||
${csudo}rm -f ${cfg_link_dir}/* || :
|
||||
${csudo}rm -f ${inc_link_dir}/taos.h || :
|
||||
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
||||
|
||||
${csudo}rm -f ${log_link_dir} || :
|
||||
${csudo}rm -f ${data_link_dir} || :
|
||||
|
||||
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
fi
|
||||
|
||||
|
|
|
@ -1,147 +1,147 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Generate deb package for ubuntu
|
||||
set -e
|
||||
# set -x
|
||||
|
||||
#curr_dir=$(pwd)
|
||||
compile_dir=$1
|
||||
output_dir=$2
|
||||
tdengine_ver=$3
|
||||
cpuType=$4
|
||||
osType=$5
|
||||
verMode=$6
|
||||
verType=$7
|
||||
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -f ${script_dir}/../..)"
|
||||
pkg_dir="${top_dir}/debworkroom"
|
||||
|
||||
#echo "curr_dir: ${curr_dir}"
|
||||
#echo "top_dir: ${top_dir}"
|
||||
#echo "script_dir: ${script_dir}"
|
||||
echo "compile_dir: ${compile_dir}"
|
||||
echo "pkg_dir: ${pkg_dir}"
|
||||
|
||||
if [ -d ${pkg_dir} ]; then
|
||||
rm -rf ${pkg_dir}
|
||||
fi
|
||||
mkdir -p ${pkg_dir}
|
||||
cd ${pkg_dir}
|
||||
|
||||
libfile="libtaos.so.${tdengine_ver}"
|
||||
|
||||
# create install dir
|
||||
install_home_path="/usr/local/taos"
|
||||
mkdir -p ${pkg_dir}${install_home_path}
|
||||
mkdir -p ${pkg_dir}${install_home_path}/bin
|
||||
mkdir -p ${pkg_dir}${install_home_path}/cfg
|
||||
#mkdir -p ${pkg_dir}${install_home_path}/connector
|
||||
mkdir -p ${pkg_dir}${install_home_path}/driver
|
||||
mkdir -p ${pkg_dir}${install_home_path}/examples
|
||||
mkdir -p ${pkg_dir}${install_home_path}/include
|
||||
#mkdir -p ${pkg_dir}${install_home_path}/init.d
|
||||
mkdir -p ${pkg_dir}${install_home_path}/script
|
||||
|
||||
cp ${compile_dir}/../packaging/cfg/taos.cfg ${pkg_dir}${install_home_path}/cfg
|
||||
if [ -f "${compile_dir}/test/cfg/taosadapter.toml" ]; then
|
||||
cp ${compile_dir}/test/cfg/taosadapter.toml ${pkg_dir}${install_home_path}/cfg || :
|
||||
fi
|
||||
if [ -f "${compile_dir}/test/cfg/taosadapter.service" ]; then
|
||||
cp ${compile_dir}/test/cfg/taosadapter.service ${pkg_dir}${install_home_path}/cfg || :
|
||||
fi
|
||||
|
||||
#cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_path}/init.d
|
||||
cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script
|
||||
cp ${compile_dir}/../packaging/tools/preun.sh ${pkg_dir}${install_home_path}/script
|
||||
cp ${compile_dir}/../packaging/tools/startPre.sh ${pkg_dir}${install_home_path}/bin
|
||||
cp ${compile_dir}/../packaging/tools/set_core.sh ${pkg_dir}${install_home_path}/bin
|
||||
cp ${compile_dir}/../packaging/tools/taosd-dump-cfg.gdb ${pkg_dir}${install_home_path}/bin
|
||||
|
||||
cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin
|
||||
#cp ${compile_dir}/build/bin/taosBenchmark ${pkg_dir}${install_home_path}/bin
|
||||
|
||||
if [ -f "${compile_dir}/build/bin/taosadapter" ]; then
|
||||
cp ${compile_dir}/build/bin/taosadapter ${pkg_dir}${install_home_path}/bin ||:
|
||||
fi
|
||||
|
||||
cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin
|
||||
cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver
|
||||
cp ${compile_dir}/../src/inc/taos.h ${pkg_dir}${install_home_path}/include
|
||||
cp ${compile_dir}/../src/inc/taosdef.h ${pkg_dir}${install_home_path}/include
|
||||
cp ${compile_dir}/../src/inc/taoserror.h ${pkg_dir}${install_home_path}/include
|
||||
cp -r ${top_dir}/examples/* ${pkg_dir}${install_home_path}/examples
|
||||
#cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector
|
||||
#cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector
|
||||
#cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector
|
||||
#cp ${compile_dir}/build/lib/taos-jdbcdriver*.* ${pkg_dir}${install_home_path}/connector ||:
|
||||
|
||||
install_user_local_path="/usr/local"
|
||||
|
||||
if [ -f ${compile_dir}/build/bin/jemalloc-config ]; then
|
||||
mkdir -p ${pkg_dir}${install_user_local_path}/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
|
||||
cp ${compile_dir}/build/bin/jemalloc-config ${pkg_dir}${install_user_local_path}/bin/
|
||||
if [ -f ${compile_dir}/build/bin/jemalloc.sh ]; then
|
||||
cp ${compile_dir}/build/bin/jemalloc.sh ${pkg_dir}${install_user_local_path}/bin/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/bin/jeprof ]; then
|
||||
cp ${compile_dir}/build/bin/jeprof ${pkg_dir}${install_user_local_path}/bin/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/include/jemalloc/jemalloc.h ]; then
|
||||
cp ${compile_dir}/build/include/jemalloc/jemalloc.h ${pkg_dir}${install_user_local_path}/include/jemalloc/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/lib/libjemalloc.so.2 ]; then
|
||||
cp ${compile_dir}/build/lib/libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/
|
||||
ln -sf libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/libjemalloc.so
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/lib/libjemalloc.a ]; then
|
||||
cp ${compile_dir}/build/lib/libjemalloc.a ${pkg_dir}${install_user_local_path}/lib/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/lib/libjemalloc_pic.a ]; then
|
||||
cp ${compile_dir}/build/lib/libjemalloc_pic.a ${pkg_dir}${install_user_local_path}/lib/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ]; then
|
||||
cp ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ${pkg_dir}${install_user_local_path}/lib/pkgconfig/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/share/doc/jemalloc/jemalloc.html ]; then
|
||||
cp ${compile_dir}/build/share/doc/jemalloc/jemalloc.html ${pkg_dir}${install_user_local_path}/share/doc/jemalloc/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/share/man/man3/jemalloc.3 ]; then
|
||||
cp ${compile_dir}/build/share/man/man3/jemalloc.3 ${pkg_dir}${install_user_local_path}/share/man/man3/
|
||||
fi
|
||||
fi
|
||||
|
||||
cp -r ${compile_dir}/../packaging/deb/DEBIAN ${pkg_dir}/
|
||||
chmod 755 ${pkg_dir}/DEBIAN/*
|
||||
|
||||
# modify version of control
|
||||
debver="Version: "$tdengine_ver
|
||||
sed -i "2c$debver" ${pkg_dir}/DEBIAN/control
|
||||
|
||||
#get taos version, then set deb name
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
debname="TDengine-server-"${tdengine_ver}-${osType}-${cpuType}
|
||||
elif [ "$verMode" == "edge" ]; then
|
||||
debname="TDengine-server"-${tdengine_ver}-${osType}-${cpuType}
|
||||
else
|
||||
echo "unknow verMode, nor cluster or edge"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$verType" == "beta" ]; then
|
||||
debname="TDengine-server-"${tdengine_ver}-${verType}-${osType}-${cpuType}".deb"
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
debname=${debname}".deb"
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# make deb package
|
||||
dpkg -b ${pkg_dir} $debname
|
||||
echo "make deb package success!"
|
||||
|
||||
cp ${pkg_dir}/*.deb ${output_dir}
|
||||
|
||||
# clean temp dir
|
||||
rm -rf ${pkg_dir}
|
||||
#!/bin/bash
|
||||
#
|
||||
# Generate deb package for ubuntu
|
||||
set -e
|
||||
# set -x
|
||||
|
||||
#curr_dir=$(pwd)
|
||||
compile_dir=$1
|
||||
output_dir=$2
|
||||
tdengine_ver=$3
|
||||
cpuType=$4
|
||||
osType=$5
|
||||
verMode=$6
|
||||
verType=$7
|
||||
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -f ${script_dir}/../..)"
|
||||
pkg_dir="${top_dir}/debworkroom"
|
||||
|
||||
#echo "curr_dir: ${curr_dir}"
|
||||
#echo "top_dir: ${top_dir}"
|
||||
#echo "script_dir: ${script_dir}"
|
||||
echo "compile_dir: ${compile_dir}"
|
||||
echo "pkg_dir: ${pkg_dir}"
|
||||
|
||||
if [ -d ${pkg_dir} ]; then
|
||||
rm -rf ${pkg_dir}
|
||||
fi
|
||||
mkdir -p ${pkg_dir}
|
||||
cd ${pkg_dir}
|
||||
|
||||
libfile="libtaos.so.${tdengine_ver}"
|
||||
|
||||
# create install dir
|
||||
install_home_path="/usr/local/taos"
|
||||
mkdir -p ${pkg_dir}${install_home_path}
|
||||
mkdir -p ${pkg_dir}${install_home_path}/bin
|
||||
mkdir -p ${pkg_dir}${install_home_path}/cfg
|
||||
#mkdir -p ${pkg_dir}${install_home_path}/connector
|
||||
mkdir -p ${pkg_dir}${install_home_path}/driver
|
||||
mkdir -p ${pkg_dir}${install_home_path}/examples
|
||||
mkdir -p ${pkg_dir}${install_home_path}/include
|
||||
#mkdir -p ${pkg_dir}${install_home_path}/init.d
|
||||
mkdir -p ${pkg_dir}${install_home_path}/script
|
||||
|
||||
cp ${compile_dir}/../packaging/cfg/taos.cfg ${pkg_dir}${install_home_path}/cfg
|
||||
if [ -f "${compile_dir}/test/cfg/taosadapter.toml" ]; then
|
||||
cp ${compile_dir}/test/cfg/taosadapter.toml ${pkg_dir}${install_home_path}/cfg || :
|
||||
fi
|
||||
if [ -f "${compile_dir}/test/cfg/taosadapter.service" ]; then
|
||||
cp ${compile_dir}/test/cfg/taosadapter.service ${pkg_dir}${install_home_path}/cfg || :
|
||||
fi
|
||||
|
||||
#cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_path}/init.d
|
||||
cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script
|
||||
cp ${compile_dir}/../packaging/tools/preun.sh ${pkg_dir}${install_home_path}/script
|
||||
cp ${compile_dir}/../packaging/tools/startPre.sh ${pkg_dir}${install_home_path}/bin
|
||||
cp ${compile_dir}/../packaging/tools/set_core.sh ${pkg_dir}${install_home_path}/bin
|
||||
cp ${compile_dir}/../packaging/tools/taosd-dump-cfg.gdb ${pkg_dir}${install_home_path}/bin
|
||||
|
||||
cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin
|
||||
#cp ${compile_dir}/build/bin/taosBenchmark ${pkg_dir}${install_home_path}/bin
|
||||
|
||||
if [ -f "${compile_dir}/build/bin/taosadapter" ]; then
|
||||
cp ${compile_dir}/build/bin/taosadapter ${pkg_dir}${install_home_path}/bin ||:
|
||||
fi
|
||||
|
||||
cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin
|
||||
cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver
|
||||
cp ${compile_dir}/../include/client/taos.h ${pkg_dir}${install_home_path}/include
|
||||
cp ${compile_dir}/../include/common/taosdef.h ${pkg_dir}${install_home_path}/include
|
||||
cp ${compile_dir}/../include/util/taoserror.h ${pkg_dir}${install_home_path}/include
|
||||
cp -r ${top_dir}/examples/* ${pkg_dir}${install_home_path}/examples
|
||||
#cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector
|
||||
#cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector
|
||||
#cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector
|
||||
#cp ${compile_dir}/build/lib/taos-jdbcdriver*.* ${pkg_dir}${install_home_path}/connector ||:
|
||||
|
||||
install_user_local_path="/usr/local"
|
||||
|
||||
if [ -f ${compile_dir}/build/bin/jemalloc-config ]; then
|
||||
mkdir -p ${pkg_dir}${install_user_local_path}/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
|
||||
cp ${compile_dir}/build/bin/jemalloc-config ${pkg_dir}${install_user_local_path}/bin/
|
||||
if [ -f ${compile_dir}/build/bin/jemalloc.sh ]; then
|
||||
cp ${compile_dir}/build/bin/jemalloc.sh ${pkg_dir}${install_user_local_path}/bin/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/bin/jeprof ]; then
|
||||
cp ${compile_dir}/build/bin/jeprof ${pkg_dir}${install_user_local_path}/bin/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/include/jemalloc/jemalloc.h ]; then
|
||||
cp ${compile_dir}/build/include/jemalloc/jemalloc.h ${pkg_dir}${install_user_local_path}/include/jemalloc/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/lib/libjemalloc.so.2 ]; then
|
||||
cp ${compile_dir}/build/lib/libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/
|
||||
ln -sf libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/libjemalloc.so
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/lib/libjemalloc.a ]; then
|
||||
cp ${compile_dir}/build/lib/libjemalloc.a ${pkg_dir}${install_user_local_path}/lib/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/lib/libjemalloc_pic.a ]; then
|
||||
cp ${compile_dir}/build/lib/libjemalloc_pic.a ${pkg_dir}${install_user_local_path}/lib/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ]; then
|
||||
cp ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ${pkg_dir}${install_user_local_path}/lib/pkgconfig/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/share/doc/jemalloc/jemalloc.html ]; then
|
||||
cp ${compile_dir}/build/share/doc/jemalloc/jemalloc.html ${pkg_dir}${install_user_local_path}/share/doc/jemalloc/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/share/man/man3/jemalloc.3 ]; then
|
||||
cp ${compile_dir}/build/share/man/man3/jemalloc.3 ${pkg_dir}${install_user_local_path}/share/man/man3/
|
||||
fi
|
||||
fi
|
||||
|
||||
cp -r ${compile_dir}/../packaging/deb/DEBIAN ${pkg_dir}/
|
||||
chmod 755 ${pkg_dir}/DEBIAN/*
|
||||
|
||||
# modify version of control
|
||||
debver="Version: "$tdengine_ver
|
||||
sed -i "2c$debver" ${pkg_dir}/DEBIAN/control
|
||||
|
||||
#get taos version, then set deb name
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
debname="TDengine-server-"${tdengine_ver}-${osType}-${cpuType}
|
||||
elif [ "$verMode" == "edge" ]; then
|
||||
debname="TDengine-server"-${tdengine_ver}-${osType}-${cpuType}
|
||||
else
|
||||
echo "unknow verMode, nor cluster or edge"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$verType" == "beta" ]; then
|
||||
debname="TDengine-server-"${tdengine_ver}-${verType}-${osType}-${cpuType}".deb"
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
debname=${debname}".deb"
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# make deb package
|
||||
dpkg -b ${pkg_dir} $debname
|
||||
echo "make deb package success!"
|
||||
|
||||
cp ${pkg_dir}/*.deb ${output_dir}
|
||||
|
||||
# clean temp dir
|
||||
rm -rf ${pkg_dir}
|
||||
|
|
|
@ -1,95 +1,95 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Modified from original source: Elastic Search
|
||||
# https://github.com/elasticsearch/elasticsearch
|
||||
# Thank you to the Elastic Search authors
|
||||
#
|
||||
# chkconfig: 2345 99 01
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: TDengine
|
||||
# Required-Start: $local_fs $network $syslog
|
||||
# Required-Stop: $local_fs $network $syslog
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: Starts TDengine taosd
|
||||
# Description: Starts TDengine taosd, a time-series database engine
|
||||
### END INIT INFO
|
||||
|
||||
set -e
|
||||
|
||||
PATH="/bin:/usr/bin:/sbin:/usr/sbin"
|
||||
NAME="TDengine"
|
||||
USER="root"
|
||||
GROUP="root"
|
||||
DAEMON="/usr/local/taos/bin/taosd"
|
||||
DAEMON_OPTS=""
|
||||
|
||||
HTTPD_NAME="taosadapter"
|
||||
DAEMON_HTTPD_NAME=$HTTPD_NAME
|
||||
DAEMON_HTTPD="/usr/local/taos/bin/$HTTPD_NAME"
|
||||
|
||||
PID_FILE="/var/run/$NAME.pid"
|
||||
APPARGS=""
|
||||
|
||||
# Maximum number of open files
|
||||
MAX_OPEN_FILES=65535
|
||||
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
|
||||
log_action_begin_msg "Starting TDengine..."
|
||||
$DAEMON_HTTPD &
|
||||
if start-stop-daemon --test --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS &> /dev/null; then
|
||||
|
||||
touch "$PID_FILE" && chown "$USER":"$GROUP" "$PID_FILE"
|
||||
|
||||
if [ -n "$MAX_OPEN_FILES" ]; then
|
||||
ulimit -n $MAX_OPEN_FILES
|
||||
fi
|
||||
|
||||
start-stop-daemon --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS
|
||||
|
||||
log_end_msg $?
|
||||
fi
|
||||
;;
|
||||
|
||||
stop)
|
||||
log_action_begin_msg "Stopping TDengine..."
|
||||
pkill -9 $DAEMON_HTTPD_NAME
|
||||
set +e
|
||||
if [ -f "$PID_FILE" ]; then
|
||||
start-stop-daemon --stop --pidfile "$PID_FILE" --user "$USER" --retry=TERM/120/KILL/5 > /dev/null
|
||||
if [ $? -eq 1 ]; then
|
||||
log_action_cont_msg "TSD is not running but pid file exists, cleaning up"
|
||||
elif [ $? -eq 3 ]; then
|
||||
PID="`cat $PID_FILE`"
|
||||
log_failure_msg "Failed to stop TDengine (pid $PID)"
|
||||
exit 1
|
||||
fi
|
||||
rm -f "$PID_FILE"
|
||||
else
|
||||
log_action_cont_msg "TDengine was not running"
|
||||
fi
|
||||
log_action_end_msg 0
|
||||
set -e
|
||||
;;
|
||||
|
||||
restart|force-reload)
|
||||
if [ -f "$PID_FILE" ]; then
|
||||
$0 stop
|
||||
sleep 1
|
||||
fi
|
||||
$0 start
|
||||
;;
|
||||
status)
|
||||
status_of_proc -p "$PID_FILE" "$DAEMON" "$NAME"
|
||||
;;
|
||||
*)
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
||||
#!/bin/bash
|
||||
#
|
||||
# Modified from original source: Elastic Search
|
||||
# https://github.com/elasticsearch/elasticsearch
|
||||
# Thank you to the Elastic Search authors
|
||||
#
|
||||
# chkconfig: 2345 99 01
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: TDengine
|
||||
# Required-Start: $local_fs $network $syslog
|
||||
# Required-Stop: $local_fs $network $syslog
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: Starts TDengine taosd
|
||||
# Description: Starts TDengine taosd, a time-series database engine
|
||||
### END INIT INFO
|
||||
|
||||
set -e
|
||||
|
||||
PATH="/bin:/usr/bin:/sbin:/usr/sbin"
|
||||
NAME="TDengine"
|
||||
USER="root"
|
||||
GROUP="root"
|
||||
DAEMON="/usr/local/taos/bin/taosd"
|
||||
DAEMON_OPTS=""
|
||||
|
||||
HTTPD_NAME="taosadapter"
|
||||
DAEMON_HTTPD_NAME=$HTTPD_NAME
|
||||
DAEMON_HTTPD="/usr/local/taos/bin/$HTTPD_NAME"
|
||||
|
||||
PID_FILE="/var/run/$NAME.pid"
|
||||
APPARGS=""
|
||||
|
||||
# Maximum number of open files
|
||||
MAX_OPEN_FILES=65535
|
||||
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
|
||||
log_action_begin_msg "Starting TDengine..."
|
||||
$DAEMON_HTTPD &
|
||||
if start-stop-daemon --test --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS &> /dev/null; then
|
||||
|
||||
touch "$PID_FILE" && chown "$USER":"$GROUP" "$PID_FILE"
|
||||
|
||||
if [ -n "$MAX_OPEN_FILES" ]; then
|
||||
ulimit -n $MAX_OPEN_FILES
|
||||
fi
|
||||
|
||||
start-stop-daemon --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS
|
||||
|
||||
log_end_msg $?
|
||||
fi
|
||||
;;
|
||||
|
||||
stop)
|
||||
log_action_begin_msg "Stopping TDengine..."
|
||||
pkill -9 $DAEMON_HTTPD_NAME
|
||||
set +e
|
||||
if [ -f "$PID_FILE" ]; then
|
||||
start-stop-daemon --stop --pidfile "$PID_FILE" --user "$USER" --retry=TERM/120/KILL/5 > /dev/null
|
||||
if [ $? -eq 1 ]; then
|
||||
log_action_cont_msg "TSD is not running but pid file exists, cleaning up"
|
||||
elif [ $? -eq 3 ]; then
|
||||
PID="`cat $PID_FILE`"
|
||||
log_failure_msg "Failed to stop TDengine (pid $PID)"
|
||||
exit 1
|
||||
fi
|
||||
rm -f "$PID_FILE"
|
||||
else
|
||||
log_action_cont_msg "TDengine was not running"
|
||||
fi
|
||||
log_action_end_msg 0
|
||||
set -e
|
||||
;;
|
||||
|
||||
restart|force-reload)
|
||||
if [ -f "$PID_FILE" ]; then
|
||||
$0 stop
|
||||
sleep 1
|
||||
fi
|
||||
$0 start
|
||||
;;
|
||||
status)
|
||||
status_of_proc -p "$PID_FILE" "$DAEMON" "$NAME"
|
||||
;;
|
||||
*)
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
||||
|
|
|
@ -1,88 +1,88 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Modified from original source: Elastic Search
|
||||
# https://github.com/elasticsearch/elasticsearch
|
||||
# Thank you to the Elastic Search authors
|
||||
#
|
||||
# chkconfig: 2345 99 01
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: taoscluster
|
||||
# Required-Start: $local_fs $network $syslog
|
||||
# Required-Stop: $local_fs $network $syslog
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: Starts taoscluster tarbitrator
|
||||
# Description: Starts taoscluster tarbitrator, a arbitrator
|
||||
### END INIT INFO
|
||||
|
||||
set -e
|
||||
|
||||
PATH="/bin:/usr/bin:/sbin:/usr/sbin"
|
||||
NAME="taoscluster"
|
||||
USER="root"
|
||||
GROUP="root"
|
||||
DAEMON="/usr/local/taos/bin/tarbitrator"
|
||||
DAEMON_OPTS=""
|
||||
PID_FILE="/var/run/$NAME.pid"
|
||||
APPARGS=""
|
||||
|
||||
# Maximum number of open files
|
||||
MAX_OPEN_FILES=65535
|
||||
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
|
||||
log_action_begin_msg "Starting tarbitrator..."
|
||||
if start-stop-daemon --test --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS &> /dev/null; then
|
||||
|
||||
touch "$PID_FILE" && chown "$USER":"$GROUP" "$PID_FILE"
|
||||
|
||||
if [ -n "$MAX_OPEN_FILES" ]; then
|
||||
ulimit -n $MAX_OPEN_FILES
|
||||
fi
|
||||
|
||||
start-stop-daemon --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS
|
||||
|
||||
log_end_msg $?
|
||||
fi
|
||||
;;
|
||||
|
||||
stop)
|
||||
log_action_begin_msg "Stopping tarbitrator..."
|
||||
set +e
|
||||
if [ -f "$PID_FILE" ]; then
|
||||
start-stop-daemon --stop --pidfile "$PID_FILE" --user "$USER" --retry=TERM/120/KILL/5 > /dev/null
|
||||
if [ $? -eq 1 ]; then
|
||||
log_action_cont_msg "TSD is not running but pid file exists, cleaning up"
|
||||
elif [ $? -eq 3 ]; then
|
||||
PID="`cat $PID_FILE`"
|
||||
log_failure_msg "Failed to stop tarbitrator (pid $PID)"
|
||||
exit 1
|
||||
fi
|
||||
rm -f "$PID_FILE"
|
||||
else
|
||||
log_action_cont_msg "tarbitrator was not running"
|
||||
fi
|
||||
log_action_end_msg 0
|
||||
set -e
|
||||
;;
|
||||
|
||||
restart|force-reload)
|
||||
if [ -f "$PID_FILE" ]; then
|
||||
$0 stop
|
||||
sleep 1
|
||||
fi
|
||||
$0 start
|
||||
;;
|
||||
status)
|
||||
status_of_proc -p "$PID_FILE" "$DAEMON" "$NAME"
|
||||
;;
|
||||
*)
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
||||
#!/bin/bash
|
||||
#
|
||||
# Modified from original source: Elastic Search
|
||||
# https://github.com/elasticsearch/elasticsearch
|
||||
# Thank you to the Elastic Search authors
|
||||
#
|
||||
# chkconfig: 2345 99 01
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: taoscluster
|
||||
# Required-Start: $local_fs $network $syslog
|
||||
# Required-Stop: $local_fs $network $syslog
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: Starts taoscluster tarbitrator
|
||||
# Description: Starts taoscluster tarbitrator, a arbitrator
|
||||
### END INIT INFO
|
||||
|
||||
set -e
|
||||
|
||||
PATH="/bin:/usr/bin:/sbin:/usr/sbin"
|
||||
NAME="taoscluster"
|
||||
USER="root"
|
||||
GROUP="root"
|
||||
DAEMON="/usr/local/taos/bin/tarbitrator"
|
||||
DAEMON_OPTS=""
|
||||
PID_FILE="/var/run/$NAME.pid"
|
||||
APPARGS=""
|
||||
|
||||
# Maximum number of open files
|
||||
MAX_OPEN_FILES=65535
|
||||
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
|
||||
log_action_begin_msg "Starting tarbitrator..."
|
||||
if start-stop-daemon --test --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS &> /dev/null; then
|
||||
|
||||
touch "$PID_FILE" && chown "$USER":"$GROUP" "$PID_FILE"
|
||||
|
||||
if [ -n "$MAX_OPEN_FILES" ]; then
|
||||
ulimit -n $MAX_OPEN_FILES
|
||||
fi
|
||||
|
||||
start-stop-daemon --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS
|
||||
|
||||
log_end_msg $?
|
||||
fi
|
||||
;;
|
||||
|
||||
stop)
|
||||
log_action_begin_msg "Stopping tarbitrator..."
|
||||
set +e
|
||||
if [ -f "$PID_FILE" ]; then
|
||||
start-stop-daemon --stop --pidfile "$PID_FILE" --user "$USER" --retry=TERM/120/KILL/5 > /dev/null
|
||||
if [ $? -eq 1 ]; then
|
||||
log_action_cont_msg "TSD is not running but pid file exists, cleaning up"
|
||||
elif [ $? -eq 3 ]; then
|
||||
PID="`cat $PID_FILE`"
|
||||
log_failure_msg "Failed to stop tarbitrator (pid $PID)"
|
||||
exit 1
|
||||
fi
|
||||
rm -f "$PID_FILE"
|
||||
else
|
||||
log_action_cont_msg "tarbitrator was not running"
|
||||
fi
|
||||
log_action_end_msg 0
|
||||
set -e
|
||||
;;
|
||||
|
||||
restart|force-reload)
|
||||
if [ -f "$PID_FILE" ]; then
|
||||
$0 stop
|
||||
sleep 1
|
||||
fi
|
||||
$0 start
|
||||
;;
|
||||
status)
|
||||
status_of_proc -p "$PID_FILE" "$DAEMON" "$NAME"
|
||||
;;
|
||||
*)
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
||||
|
|
|
@ -1,32 +1,32 @@
|
|||
FROM ubuntu:18.04
|
||||
|
||||
WORKDIR /root
|
||||
|
||||
ARG pkgFile
|
||||
ARG dirName
|
||||
ARG cpuType
|
||||
RUN echo ${pkgFile} && echo ${dirName}
|
||||
|
||||
COPY ${pkgFile} /root/
|
||||
RUN tar -zxf ${pkgFile}
|
||||
WORKDIR /root/
|
||||
RUN cd /root/${dirName}/ && /bin/bash install.sh -e no && cd /root
|
||||
RUN rm /root/${pkgFile}
|
||||
RUN rm -rf /root/${dirName}
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
RUN apt-get clean && apt-get update && apt-get install -y locales tzdata netcat && locale-gen en_US.UTF-8
|
||||
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib" \
|
||||
LC_CTYPE=en_US.UTF-8 \
|
||||
LANG=en_US.UTF-8 \
|
||||
LC_ALL=en_US.UTF-8
|
||||
|
||||
COPY ./bin/* /usr/bin/
|
||||
|
||||
ENV TINI_VERSION v0.19.0
|
||||
RUN bash -c 'echo -e "Downloading tini-${cpuType} ..."'
|
||||
ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-${cpuType} /tini
|
||||
RUN chmod +x /tini
|
||||
ENTRYPOINT ["/tini", "--", "/usr/bin/entrypoint.sh"]
|
||||
CMD ["taosd"]
|
||||
VOLUME [ "/var/lib/taos", "/var/log/taos", "/corefile" ]
|
||||
FROM ubuntu:18.04
|
||||
|
||||
WORKDIR /root
|
||||
|
||||
ARG pkgFile
|
||||
ARG dirName
|
||||
ARG cpuType
|
||||
RUN echo ${pkgFile} && echo ${dirName}
|
||||
|
||||
COPY ${pkgFile} /root/
|
||||
RUN tar -zxf ${pkgFile}
|
||||
WORKDIR /root/
|
||||
RUN cd /root/${dirName}/ && /bin/bash install.sh -e no && cd /root
|
||||
RUN rm /root/${pkgFile}
|
||||
RUN rm -rf /root/${dirName}
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
RUN apt-get clean && apt-get update && apt-get install -y locales tzdata netcat && locale-gen en_US.UTF-8
|
||||
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib" \
|
||||
LC_CTYPE=en_US.UTF-8 \
|
||||
LANG=en_US.UTF-8 \
|
||||
LC_ALL=en_US.UTF-8
|
||||
|
||||
COPY ./bin/* /usr/bin/
|
||||
|
||||
ENV TINI_VERSION v0.19.0
|
||||
RUN bash -c 'echo -e "Downloading tini-${cpuType} ..."'
|
||||
ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-${cpuType} /tini
|
||||
RUN chmod +x /tini
|
||||
ENTRYPOINT ["/tini", "--", "/usr/bin/entrypoint.sh"]
|
||||
CMD ["taosd"]
|
||||
VOLUME [ "/var/lib/taos", "/var/log/taos", "/corefile" ]
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,83 +1,83 @@
|
|||
#!/bin/sh
|
||||
set -e
|
||||
# for TZ awareness
|
||||
if [ "$TZ" != "" ]; then
|
||||
ln -sf /usr/share/zoneinfo/$TZ /etc/localtime
|
||||
echo $TZ >/etc/timezone
|
||||
fi
|
||||
|
||||
# option to disable taosadapter, default is no
|
||||
DISABLE_ADAPTER=${TAOS_DISABLE_ADAPTER:-0}
|
||||
unset TAOS_DISABLE_ADAPTER
|
||||
|
||||
# to get mnodeEpSet from data dir
|
||||
DATA_DIR=${TAOS_DATA_DIR:-/var/lib/taos}
|
||||
|
||||
# append env to custom taos.cfg
|
||||
CFG_DIR=/tmp/taos
|
||||
CFG_FILE=$CFG_DIR/taos.cfg
|
||||
|
||||
mkdir -p $CFG_DIR >/dev/null 2>&1
|
||||
|
||||
[ -f /etc/taos/taos.cfg ] && cat /etc/taos/taos.cfg | grep -E -v "^#|^\s*$" >$CFG_FILE
|
||||
env-to-cfg >>$CFG_FILE
|
||||
|
||||
FQDN=$(cat $CFG_FILE | grep -E -v "^#|^$" | grep fqdn | tail -n1 | sed -E 's/.*fqdn\s+//')
|
||||
|
||||
# ensure the fqdn is resolved as localhost
|
||||
grep "$FQDN" /etc/hosts >/dev/null || echo "127.0.0.1 $FQDN" >>/etc/hosts
|
||||
|
||||
# parse first ep host and port
|
||||
FIRST_EP_HOST=${TAOS_FIRST_EP%:*}
|
||||
FIRST_EP_PORT=${TAOS_FIRST_EP#*:}
|
||||
|
||||
# in case of custom server port
|
||||
SERVER_PORT=$(cat $CFG_FILE | grep -E -v "^#|^$" | grep serverPort | tail -n1 | sed -E 's/.*serverPort\s+//')
|
||||
SERVER_PORT=${SERVER_PORT:-6030}
|
||||
|
||||
# for other binaries like interpreters
|
||||
if echo $1 | grep -E "taosd$" - >/dev/null; then
|
||||
true # will run taosd
|
||||
else
|
||||
cp -f $CFG_FILE /etc/taos/taos.cfg || true
|
||||
$@
|
||||
exit $?
|
||||
fi
|
||||
|
||||
set +e
|
||||
ulimit -c unlimited
|
||||
# set core files pattern, maybe failed
|
||||
sysctl -w kernel.core_pattern=/corefile/core-$FQDN-%e-%p >/dev/null >&1
|
||||
set -e
|
||||
|
||||
if [ "$DISABLE_ADAPTER" = "0" ]; then
|
||||
which taosadapter >/dev/null && taosadapter &
|
||||
# wait for 6041 port ready
|
||||
for _ in $(seq 1 20); do
|
||||
nc -z localhost 6041 && break
|
||||
sleep 0.5
|
||||
done
|
||||
fi
|
||||
|
||||
# if has mnode ep set or the host is first ep or not for cluster, just start.
|
||||
if [ -f "$DATA_DIR/dnode/mnodeEpSet.json" ] ||
|
||||
[ "$TAOS_FQDN" = "$FIRST_EP_HOST" ]; then
|
||||
$@ -c $CFG_DIR
|
||||
# others will first wait the first ep ready.
|
||||
else
|
||||
if [ "$TAOS_FIRST_EP" = "" ]; then
|
||||
echo "run TDengine with single node."
|
||||
$@ -c $CFG_DIR
|
||||
exit $?
|
||||
fi
|
||||
while true; do
|
||||
es=0
|
||||
taos -h $FIRST_EP_HOST -P $FIRST_EP_PORT -n startup >/dev/null || es=$?
|
||||
if [ "$es" -eq 0 ]; then
|
||||
taos -h $FIRST_EP_HOST -P $FIRST_EP_PORT -s "create dnode \"$FQDN:$SERVER_PORT\";"
|
||||
break
|
||||
fi
|
||||
sleep 1s
|
||||
done
|
||||
$@ -c $CFG_DIR
|
||||
fi
|
||||
#!/bin/sh
|
||||
set -e
|
||||
# for TZ awareness
|
||||
if [ "$TZ" != "" ]; then
|
||||
ln -sf /usr/share/zoneinfo/$TZ /etc/localtime
|
||||
echo $TZ >/etc/timezone
|
||||
fi
|
||||
|
||||
# option to disable taosadapter, default is no
|
||||
DISABLE_ADAPTER=${TAOS_DISABLE_ADAPTER:-0}
|
||||
unset TAOS_DISABLE_ADAPTER
|
||||
|
||||
# to get mnodeEpSet from data dir
|
||||
DATA_DIR=${TAOS_DATA_DIR:-/var/lib/taos}
|
||||
|
||||
# append env to custom taos.cfg
|
||||
CFG_DIR=/tmp/taos
|
||||
CFG_FILE=$CFG_DIR/taos.cfg
|
||||
|
||||
mkdir -p $CFG_DIR >/dev/null 2>&1
|
||||
|
||||
[ -f /etc/taos/taos.cfg ] && cat /etc/taos/taos.cfg | grep -E -v "^#|^\s*$" >$CFG_FILE
|
||||
env-to-cfg >>$CFG_FILE
|
||||
|
||||
FQDN=$(cat $CFG_FILE | grep -E -v "^#|^$" | grep fqdn | tail -n1 | sed -E 's/.*fqdn\s+//')
|
||||
|
||||
# ensure the fqdn is resolved as localhost
|
||||
grep "$FQDN" /etc/hosts >/dev/null || echo "127.0.0.1 $FQDN" >>/etc/hosts
|
||||
|
||||
# parse first ep host and port
|
||||
FIRST_EP_HOST=${TAOS_FIRST_EP%:*}
|
||||
FIRST_EP_PORT=${TAOS_FIRST_EP#*:}
|
||||
|
||||
# in case of custom server port
|
||||
SERVER_PORT=$(cat $CFG_FILE | grep -E -v "^#|^$" | grep serverPort | tail -n1 | sed -E 's/.*serverPort\s+//')
|
||||
SERVER_PORT=${SERVER_PORT:-6030}
|
||||
|
||||
# for other binaries like interpreters
|
||||
if echo $1 | grep -E "taosd$" - >/dev/null; then
|
||||
true # will run taosd
|
||||
else
|
||||
cp -f $CFG_FILE /etc/taos/taos.cfg || true
|
||||
$@
|
||||
exit $?
|
||||
fi
|
||||
|
||||
set +e
|
||||
ulimit -c unlimited
|
||||
# set core files pattern, maybe failed
|
||||
sysctl -w kernel.core_pattern=/corefile/core-$FQDN-%e-%p >/dev/null >&1
|
||||
set -e
|
||||
|
||||
if [ "$DISABLE_ADAPTER" = "0" ]; then
|
||||
which taosadapter >/dev/null && taosadapter &
|
||||
# wait for 6041 port ready
|
||||
for _ in $(seq 1 20); do
|
||||
nc -z localhost 6041 && break
|
||||
sleep 0.5
|
||||
done
|
||||
fi
|
||||
|
||||
# if has mnode ep set or the host is first ep or not for cluster, just start.
|
||||
if [ -f "$DATA_DIR/dnode/mnodeEpSet.json" ] ||
|
||||
[ "$TAOS_FQDN" = "$FIRST_EP_HOST" ]; then
|
||||
$@ -c $CFG_DIR
|
||||
# others will first wait the first ep ready.
|
||||
else
|
||||
if [ "$TAOS_FIRST_EP" = "" ]; then
|
||||
echo "run TDengine with single node."
|
||||
$@ -c $CFG_DIR
|
||||
exit $?
|
||||
fi
|
||||
while true; do
|
||||
es=0
|
||||
taos -h $FIRST_EP_HOST -P $FIRST_EP_PORT -n startup >/dev/null || es=$?
|
||||
if [ "$es" -eq 0 ]; then
|
||||
taos -h $FIRST_EP_HOST -P $FIRST_EP_PORT -s "create dnode \"$FQDN:$SERVER_PORT\";"
|
||||
break
|
||||
fi
|
||||
sleep 1s
|
||||
done
|
||||
$@ -c $CFG_DIR
|
||||
fi
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
#!/bin/sh
|
||||
set -e
|
||||
self=$0
|
||||
|
||||
snake_to_camel_case() {
|
||||
echo $1 | awk -F _ '{printf "%s", $1; for(i=2; i<=NF; i++) printf "%s", toupper(substr($i,1,1)) substr($i,2); print"";}'
|
||||
}
|
||||
|
||||
if echo $1 | grep -E "^$" - >/dev/null; then
|
||||
export |grep -E 'TAOS_.*' -o| sed 's/TAOS_//' |tr A-Z a-z | awk -F"=" '{print "name=$(""'$self' " $1"); echo $name "$2}' |sh
|
||||
else
|
||||
snake_to_camel_case $1
|
||||
fi
|
||||
#!/bin/sh
|
||||
set -e
|
||||
self=$0
|
||||
|
||||
snake_to_camel_case() {
|
||||
echo $1 | awk -F _ '{printf "%s", $1; for(i=2; i<=NF; i++) printf "%s", toupper(substr($i,1,1)) substr($i,2); print"";}'
|
||||
}
|
||||
|
||||
if echo $1 | grep -E "^$" - >/dev/null; then
|
||||
export |grep -E 'TAOS_.*' -o| sed 's/TAOS_//' |tr A-Z a-z | awk -F"=" '{print "name=$(""'$self' " $1"); echo $name "$2}' |sh
|
||||
else
|
||||
snake_to_camel_case $1
|
||||
fi
|
||||
|
|
|
@ -1,77 +1,77 @@
|
|||
version: "3"
|
||||
|
||||
networks:
|
||||
inter:
|
||||
api:
|
||||
|
||||
services:
|
||||
arbitrator:
|
||||
image: tdengine/tdengine:$VERSION
|
||||
command: tarbitrator
|
||||
networks:
|
||||
- inter
|
||||
td-1:
|
||||
image: tdengine/tdengine:$VERSION
|
||||
networks:
|
||||
- inter
|
||||
environment:
|
||||
TAOS_FQDN: "td-1"
|
||||
TAOS_FIRST_EP: "td-1"
|
||||
TAOS_NUM_OF_MNODES: "2"
|
||||
TAOS_REPLICA: "2"
|
||||
TAOS_ARBITRATOR: arbitrator:6042
|
||||
volumes:
|
||||
- taosdata-td1:/var/lib/taos/
|
||||
- taoslog-td1:/var/log/taos/
|
||||
td-2:
|
||||
image: tdengine/tdengine:$VERSION
|
||||
networks:
|
||||
- inter
|
||||
environment:
|
||||
TAOS_FQDN: "td-2"
|
||||
TAOS_FIRST_EP: "td-1"
|
||||
TAOS_NUM_OF_MNODES: "2"
|
||||
TAOS_REPLICA: "2"
|
||||
TAOS_ARBITRATOR: arbitrator:6042
|
||||
volumes:
|
||||
- taosdata-td2:/var/lib/taos/
|
||||
- taoslog-td2:/var/log/taos/
|
||||
adapter:
|
||||
image: tdengine/tdengine:$VERSION
|
||||
command: taosadapter
|
||||
networks:
|
||||
- inter
|
||||
environment:
|
||||
TAOS_FIRST_EP: "td-1"
|
||||
TOAS_SECOND_EP: "td-2"
|
||||
deploy:
|
||||
replicas: 4
|
||||
update_config:
|
||||
parallelism: 4
|
||||
nginx:
|
||||
image: nginx
|
||||
depends_on:
|
||||
- adapter
|
||||
networks:
|
||||
- inter
|
||||
- api
|
||||
ports:
|
||||
- 6041:6041
|
||||
- 6044:6044/udp
|
||||
command: [
|
||||
"sh",
|
||||
"-c",
|
||||
"while true;
|
||||
do curl -s http://adapter:6041/-/ping >/dev/null && break;
|
||||
done;
|
||||
printf 'server{listen 6041;location /{proxy_pass http://adapter:6041;}}'
|
||||
> /etc/nginx/conf.d/rest.conf;
|
||||
printf 'stream{server{listen 6044 udp;proxy_pass adapter:6044;}}'
|
||||
>> /etc/nginx/nginx.conf;cat /etc/nginx/nginx.conf;
|
||||
nginx -g 'daemon off;'",
|
||||
]
|
||||
volumes:
|
||||
taosdata-td1:
|
||||
taoslog-td1:
|
||||
taosdata-td2:
|
||||
taoslog-td2:
|
||||
version: "3"
|
||||
|
||||
networks:
|
||||
inter:
|
||||
api:
|
||||
|
||||
services:
|
||||
arbitrator:
|
||||
image: tdengine/tdengine:$VERSION
|
||||
command: tarbitrator
|
||||
networks:
|
||||
- inter
|
||||
td-1:
|
||||
image: tdengine/tdengine:$VERSION
|
||||
networks:
|
||||
- inter
|
||||
environment:
|
||||
TAOS_FQDN: "td-1"
|
||||
TAOS_FIRST_EP: "td-1"
|
||||
TAOS_NUM_OF_MNODES: "2"
|
||||
TAOS_REPLICA: "2"
|
||||
TAOS_ARBITRATOR: arbitrator:6042
|
||||
volumes:
|
||||
- taosdata-td1:/var/lib/taos/
|
||||
- taoslog-td1:/var/log/taos/
|
||||
td-2:
|
||||
image: tdengine/tdengine:$VERSION
|
||||
networks:
|
||||
- inter
|
||||
environment:
|
||||
TAOS_FQDN: "td-2"
|
||||
TAOS_FIRST_EP: "td-1"
|
||||
TAOS_NUM_OF_MNODES: "2"
|
||||
TAOS_REPLICA: "2"
|
||||
TAOS_ARBITRATOR: arbitrator:6042
|
||||
volumes:
|
||||
- taosdata-td2:/var/lib/taos/
|
||||
- taoslog-td2:/var/log/taos/
|
||||
adapter:
|
||||
image: tdengine/tdengine:$VERSION
|
||||
command: taosadapter
|
||||
networks:
|
||||
- inter
|
||||
environment:
|
||||
TAOS_FIRST_EP: "td-1"
|
||||
TOAS_SECOND_EP: "td-2"
|
||||
deploy:
|
||||
replicas: 4
|
||||
update_config:
|
||||
parallelism: 4
|
||||
nginx:
|
||||
image: nginx
|
||||
depends_on:
|
||||
- adapter
|
||||
networks:
|
||||
- inter
|
||||
- api
|
||||
ports:
|
||||
- 6041:6041
|
||||
- 6044:6044/udp
|
||||
command: [
|
||||
"sh",
|
||||
"-c",
|
||||
"while true;
|
||||
do curl -s http://adapter:6041/-/ping >/dev/null && break;
|
||||
done;
|
||||
printf 'server{listen 6041;location /{proxy_pass http://adapter:6041;}}'
|
||||
> /etc/nginx/conf.d/rest.conf;
|
||||
printf 'stream{server{listen 6044 udp;proxy_pass adapter:6044;}}'
|
||||
>> /etc/nginx/nginx.conf;cat /etc/nginx/nginx.conf;
|
||||
nginx -g 'daemon off;'",
|
||||
]
|
||||
volumes:
|
||||
taosdata-td1:
|
||||
taoslog-td1:
|
||||
taosdata-td2:
|
||||
taoslog-td2:
|
||||
|
|
|
@ -1,82 +1,82 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
# dockerbuild.sh
|
||||
# -n [version number]
|
||||
# -p [xxxx]
|
||||
# -V [stable | beta]
|
||||
|
||||
# set parameters by default value
|
||||
version=""
|
||||
passWord=""
|
||||
verType=""
|
||||
|
||||
while getopts "hn:p:V:" arg
|
||||
do
|
||||
case $arg in
|
||||
n)
|
||||
#echo "version=$OPTARG"
|
||||
version=$(echo $OPTARG)
|
||||
;;
|
||||
p)
|
||||
#echo "passWord=$OPTARG"
|
||||
passWord=$(echo $OPTARG)
|
||||
;;
|
||||
V)
|
||||
#echo "verType=$OPTARG"
|
||||
verType=$(echo $OPTARG)
|
||||
;;
|
||||
h)
|
||||
echo "Usage: `basename $0` -n [version number] "
|
||||
echo " -p [password for docker hub] "
|
||||
exit 0
|
||||
;;
|
||||
?) #unknow option
|
||||
echo "unkonw argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
echo "version=${version}"
|
||||
|
||||
#docker manifest rm tdengine/tdengine
|
||||
#docker manifest rm tdengine/tdengine:${version}
|
||||
if [ "$verType" == "beta" ]; then
|
||||
docker manifest create -a tdengine/tdengine-beta:${version} tdengine/tdengine-amd64-beta:${version} tdengine/tdengine-aarch64-beta:${version} tdengine/tdengine-aarch32-beta:${version}
|
||||
docker manifest create -a tdengine/tdengine-beta:latest tdengine/tdengine-amd64-beta:latest tdengine/tdengine-aarch64-beta:latest tdengine/tdengine-aarch32-beta:latest
|
||||
docker manifest rm tdengine/tdengine-beta:${version}
|
||||
docker manifest rm tdengine/tdengine-beta:latest
|
||||
docker manifest create -a tdengine/tdengine-beta:${version} tdengine/tdengine-amd64-beta:${version} tdengine/tdengine-aarch64-beta:${version} tdengine/tdengine-aarch32-beta:${version}
|
||||
docker manifest create -a tdengine/tdengine-beta:latest tdengine/tdengine-amd64-beta:latest tdengine/tdengine-aarch64-beta:latest tdengine/tdengine-aarch32-beta:latest
|
||||
docker manifest inspect tdengine/tdengine:latest
|
||||
docker manifest inspect tdengine/tdengine:${version}
|
||||
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
|
||||
docker manifest push tdengine/tdengine-beta:${version}
|
||||
docker manifest push tdengine/tdengine-beta:latest
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
docker manifest create -a tdengine/tdengine:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version}
|
||||
docker manifest create -a tdengine/tdengine:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest
|
||||
docker manifest rm tdengine/tdengine:latest
|
||||
docker manifest rm tdengine/tdengine:${version}
|
||||
docker manifest create -a tdengine/tdengine:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version}
|
||||
docker manifest create -a tdengine/tdengine:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest
|
||||
docker manifest inspect tdengine/tdengine:latest
|
||||
docker manifest inspect tdengine/tdengine:${version}
|
||||
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
|
||||
docker manifest push tdengine/tdengine:${version}
|
||||
docker manifest push tdengine/tdengine:latest
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# docker manifest create -a tdengine/${dockername}:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version}
|
||||
# docker manifest create -a tdengine/${dockername}:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest
|
||||
|
||||
# docker login -u tdengine -p ${passWord} #replace the docker registry username and password
|
||||
|
||||
# docker manifest push tdengine/tdengine:latest
|
||||
|
||||
# # how set latest version ???
|
||||
#!/bin/bash
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
# dockerbuild.sh
|
||||
# -n [version number]
|
||||
# -p [xxxx]
|
||||
# -V [stable | beta]
|
||||
|
||||
# set parameters by default value
|
||||
version=""
|
||||
passWord=""
|
||||
verType=""
|
||||
|
||||
while getopts "hn:p:V:" arg
|
||||
do
|
||||
case $arg in
|
||||
n)
|
||||
#echo "version=$OPTARG"
|
||||
version=$(echo $OPTARG)
|
||||
;;
|
||||
p)
|
||||
#echo "passWord=$OPTARG"
|
||||
passWord=$(echo $OPTARG)
|
||||
;;
|
||||
V)
|
||||
#echo "verType=$OPTARG"
|
||||
verType=$(echo $OPTARG)
|
||||
;;
|
||||
h)
|
||||
echo "Usage: `basename $0` -n [version number] "
|
||||
echo " -p [password for docker hub] "
|
||||
exit 0
|
||||
;;
|
||||
?) #unknow option
|
||||
echo "unkonw argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
echo "version=${version}"
|
||||
|
||||
#docker manifest rm tdengine/tdengine
|
||||
#docker manifest rm tdengine/tdengine:${version}
|
||||
if [ "$verType" == "beta" ]; then
|
||||
docker manifest create -a tdengine/tdengine-beta:${version} tdengine/tdengine-amd64-beta:${version} tdengine/tdengine-aarch64-beta:${version} tdengine/tdengine-aarch32-beta:${version}
|
||||
docker manifest create -a tdengine/tdengine-beta:latest tdengine/tdengine-amd64-beta:latest tdengine/tdengine-aarch64-beta:latest tdengine/tdengine-aarch32-beta:latest
|
||||
docker manifest rm tdengine/tdengine-beta:${version}
|
||||
docker manifest rm tdengine/tdengine-beta:latest
|
||||
docker manifest create -a tdengine/tdengine-beta:${version} tdengine/tdengine-amd64-beta:${version} tdengine/tdengine-aarch64-beta:${version} tdengine/tdengine-aarch32-beta:${version}
|
||||
docker manifest create -a tdengine/tdengine-beta:latest tdengine/tdengine-amd64-beta:latest tdengine/tdengine-aarch64-beta:latest tdengine/tdengine-aarch32-beta:latest
|
||||
docker manifest inspect tdengine/tdengine:latest
|
||||
docker manifest inspect tdengine/tdengine:${version}
|
||||
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
|
||||
docker manifest push tdengine/tdengine-beta:${version}
|
||||
docker manifest push tdengine/tdengine-beta:latest
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
docker manifest create -a tdengine/tdengine:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version}
|
||||
docker manifest create -a tdengine/tdengine:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest
|
||||
docker manifest rm tdengine/tdengine:latest
|
||||
docker manifest rm tdengine/tdengine:${version}
|
||||
docker manifest create -a tdengine/tdengine:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version}
|
||||
docker manifest create -a tdengine/tdengine:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest
|
||||
docker manifest inspect tdengine/tdengine:latest
|
||||
docker manifest inspect tdengine/tdengine:${version}
|
||||
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
|
||||
docker manifest push tdengine/tdengine:${version}
|
||||
docker manifest push tdengine/tdengine:latest
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# docker manifest create -a tdengine/${dockername}:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version}
|
||||
# docker manifest create -a tdengine/${dockername}:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest
|
||||
|
||||
# docker login -u tdengine -p ${passWord} #replace the docker registry username and password
|
||||
|
||||
# docker manifest push tdengine/tdengine:latest
|
||||
|
||||
# # how set latest version ???
|
||||
|
|
|
@ -1,174 +1,174 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
# dockerbuild.sh
|
||||
# -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...]
|
||||
# -n [version number]
|
||||
# -p [password for docker hub]
|
||||
# -V [stable | beta]
|
||||
# -f [pkg file]
|
||||
|
||||
# set parameters by default value
|
||||
cpuType=""
|
||||
cpuTypeAlias=""
|
||||
version=""
|
||||
passWord=""
|
||||
pkgFile=""
|
||||
verType="stable"
|
||||
dockerLatest="n"
|
||||
|
||||
while getopts "hc:n:p:f:V:a:b:" arg
|
||||
do
|
||||
case $arg in
|
||||
c)
|
||||
#echo "cpuType=$OPTARG"
|
||||
cpuType=$(echo $OPTARG)
|
||||
;;
|
||||
n)
|
||||
#echo "version=$OPTARG"
|
||||
version=$(echo $OPTARG)
|
||||
;;
|
||||
p)
|
||||
#echo "passWord=$OPTARG"
|
||||
passWord=$(echo $OPTARG)
|
||||
;;
|
||||
f)
|
||||
#echo "pkgFile=$OPTARG"
|
||||
pkgFile=$(echo $OPTARG)
|
||||
;;
|
||||
b)
|
||||
#echo "branchName=$OPTARG"
|
||||
branchName=$(echo $OPTARG)
|
||||
;;
|
||||
V)
|
||||
#echo "verType=$OPTARG"
|
||||
verType=$(echo $OPTARG)
|
||||
;;
|
||||
a)
|
||||
#echo "dockerLatest=$OPTARG"
|
||||
dockerLatest=$(echo $OPTARG)
|
||||
;;
|
||||
h)
|
||||
echo "Usage: `basename $0` -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...] "
|
||||
echo " -n [version number] "
|
||||
echo " -p [password for docker hub] "
|
||||
echo " -V [stable | beta] "
|
||||
echo " -f [pkg file] "
|
||||
echo " -a [y | n ] "
|
||||
exit 0
|
||||
;;
|
||||
?) #unknow option
|
||||
echo "unkonw argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
|
||||
# Check_verison()
|
||||
# {
|
||||
# }
|
||||
|
||||
|
||||
if [ "$verType" == "beta" ]; then
|
||||
dockername=${cpuType}-${verType}
|
||||
dirName=${pkgFile%-beta*}
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
dockername=${cpuType}
|
||||
dirName=${pkgFile%-Linux*}
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
echo "cpuType=${cpuType} version=${version} pkgFile=${pkgFile} verType=${verType} "
|
||||
echo "$(pwd)"
|
||||
echo "====NOTES: ${pkgFile} must be in the same directory as dockerbuild.sh===="
|
||||
|
||||
scriptDir=$(dirname $(readlink -f $0))
|
||||
comunityArchiveDir=/nas/TDengine/v$version/community # community version’package directory
|
||||
communityDir=${scriptDir}/../../../community
|
||||
DockerfilePath=${communityDir}/packaging/docker/
|
||||
Dockerfile=${communityDir}/packaging/docker/Dockerfile
|
||||
cd ${scriptDir}
|
||||
cp -f ${comunityArchiveDir}/${pkgFile} .
|
||||
|
||||
echo "dirName=${dirName}"
|
||||
|
||||
if [[ "${cpuType}" == "x64" ]] || [[ "${cpuType}" == "amd64" ]]; then
|
||||
cpuTypeAlias="amd64"
|
||||
elif [[ "${cpuType}" == "aarch64" ]]; then
|
||||
cpuTypeAlias="arm64"
|
||||
elif [[ "${cpuType}" == "aarch32" ]]; then
|
||||
cpuTypeAlias="armhf"
|
||||
else
|
||||
echo "Unknown cpuType: ${cpuType}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
docker build --rm -f "${Dockerfile}" --network=host -t tdengine/tdengine-${dockername}:${version} "." --build-arg pkgFile=${pkgFile} --build-arg dirName=${dirName} --build-arg cpuType=${cpuTypeAlias}
|
||||
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
|
||||
docker push tdengine/tdengine-${dockername}:${version}
|
||||
|
||||
if [ -n "$(docker ps -aq)" ] ;then
|
||||
echo "delete docker process"
|
||||
docker stop $(docker ps -aq)
|
||||
docker rm $(docker ps -aq)
|
||||
fi
|
||||
|
||||
if [ -n "$(pidof taosd)" ] ;then
|
||||
echo "kill taosd "
|
||||
kill -9 $(pidof taosd)
|
||||
fi
|
||||
|
||||
if [ -n "$(pidof power)" ] ;then
|
||||
echo "kill power "
|
||||
kill -9 $(pidof power)
|
||||
fi
|
||||
|
||||
|
||||
echo ">>>>>>>>>>>>> check whether tdengine/tdengine-${dockername}:${version} has been published"
|
||||
docker run -d --name doctest -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine-${dockername}:${version}
|
||||
sleep 2
|
||||
curl -u root:taosdata -d 'show variables;' 127.0.0.1:6041/rest/sql > temp1.data
|
||||
data_version=$( cat temp1.data |jq .data| jq '.[]' |grep "version" -A 2 -B 1 | jq ".[1]")
|
||||
echo "${data_version}"
|
||||
if [ "${data_version}" == "\"${version}\"" ] ; then
|
||||
echo "docker version is right "
|
||||
else
|
||||
echo "docker version is wrong "
|
||||
exit 1
|
||||
fi
|
||||
rm -rf temp1.data
|
||||
|
||||
# set this version to latest version
|
||||
if [ ${dockerLatest} == 'y' ] ;then
|
||||
docker tag tdengine/tdengine-${dockername}:${version} tdengine/tdengine-${dockername}:latest
|
||||
docker push tdengine/tdengine-${dockername}:latest
|
||||
echo ">>>>>>>>>>>>> check whether tdengine/tdengine-${dockername}:latest has been published correctly"
|
||||
docker run -d --name doctestla -p 7030-7049:6030-6049 -p 7030-7049:6030-6049/udp tdengine/tdengine-${dockername}:latest
|
||||
sleep 2
|
||||
curl -u root:taosdata -d 'show variables;' 127.0.0.1:7041/rest/sql > temp2.data
|
||||
version_latest=` cat temp2.data |jq .data| jq '.[]' |grep "version" -A 2 -B 1 | jq ".[1]" `
|
||||
echo "${version_latest}"
|
||||
if [ "${version_latest}" == "\"${version}\"" ] ; then
|
||||
echo "docker version is right "
|
||||
else
|
||||
echo "docker version is wrong "
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
rm -rf temp2.data
|
||||
|
||||
if [ -n "$(docker ps -aq)" ] ;then
|
||||
echo "delte docker process"
|
||||
docker stop $(docker ps -aq)
|
||||
docker rm $(docker ps -aq)
|
||||
fi
|
||||
|
||||
cd ${scriptDir}
|
||||
rm -f ${pkgFile}
|
||||
#!/bin/bash
|
||||
#
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
# dockerbuild.sh
|
||||
# -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...]
|
||||
# -n [version number]
|
||||
# -p [password for docker hub]
|
||||
# -V [stable | beta]
|
||||
# -f [pkg file]
|
||||
|
||||
# set parameters by default value
|
||||
cpuType=""
|
||||
cpuTypeAlias=""
|
||||
version=""
|
||||
passWord=""
|
||||
pkgFile=""
|
||||
verType="stable"
|
||||
dockerLatest="n"
|
||||
|
||||
while getopts "hc:n:p:f:V:a:b:" arg
|
||||
do
|
||||
case $arg in
|
||||
c)
|
||||
#echo "cpuType=$OPTARG"
|
||||
cpuType=$(echo $OPTARG)
|
||||
;;
|
||||
n)
|
||||
#echo "version=$OPTARG"
|
||||
version=$(echo $OPTARG)
|
||||
;;
|
||||
p)
|
||||
#echo "passWord=$OPTARG"
|
||||
passWord=$(echo $OPTARG)
|
||||
;;
|
||||
f)
|
||||
#echo "pkgFile=$OPTARG"
|
||||
pkgFile=$(echo $OPTARG)
|
||||
;;
|
||||
b)
|
||||
#echo "branchName=$OPTARG"
|
||||
branchName=$(echo $OPTARG)
|
||||
;;
|
||||
V)
|
||||
#echo "verType=$OPTARG"
|
||||
verType=$(echo $OPTARG)
|
||||
;;
|
||||
a)
|
||||
#echo "dockerLatest=$OPTARG"
|
||||
dockerLatest=$(echo $OPTARG)
|
||||
;;
|
||||
h)
|
||||
echo "Usage: `basename $0` -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...] "
|
||||
echo " -n [version number] "
|
||||
echo " -p [password for docker hub] "
|
||||
echo " -V [stable | beta] "
|
||||
echo " -f [pkg file] "
|
||||
echo " -a [y | n ] "
|
||||
exit 0
|
||||
;;
|
||||
?) #unknow option
|
||||
echo "unkonw argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
|
||||
# Check_verison()
|
||||
# {
|
||||
# }
|
||||
|
||||
|
||||
if [ "$verType" == "beta" ]; then
|
||||
dockername=${cpuType}-${verType}
|
||||
dirName=${pkgFile%-beta*}
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
dockername=${cpuType}
|
||||
dirName=${pkgFile%-Linux*}
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
echo "cpuType=${cpuType} version=${version} pkgFile=${pkgFile} verType=${verType} "
|
||||
echo "$(pwd)"
|
||||
echo "====NOTES: ${pkgFile} must be in the same directory as dockerbuild.sh===="
|
||||
|
||||
scriptDir=$(dirname $(readlink -f $0))
|
||||
comunityArchiveDir=/nas/TDengine/v$version/community # community version’package directory
|
||||
communityDir=${scriptDir}/../../../community
|
||||
DockerfilePath=${communityDir}/packaging/docker/
|
||||
Dockerfile=${communityDir}/packaging/docker/Dockerfile
|
||||
cd ${scriptDir}
|
||||
cp -f ${comunityArchiveDir}/${pkgFile} .
|
||||
|
||||
echo "dirName=${dirName}"
|
||||
|
||||
if [[ "${cpuType}" == "x64" ]] || [[ "${cpuType}" == "amd64" ]]; then
|
||||
cpuTypeAlias="amd64"
|
||||
elif [[ "${cpuType}" == "aarch64" ]]; then
|
||||
cpuTypeAlias="arm64"
|
||||
elif [[ "${cpuType}" == "aarch32" ]]; then
|
||||
cpuTypeAlias="armhf"
|
||||
else
|
||||
echo "Unknown cpuType: ${cpuType}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
docker build --rm -f "${Dockerfile}" --network=host -t tdengine/tdengine-${dockername}:${version} "." --build-arg pkgFile=${pkgFile} --build-arg dirName=${dirName} --build-arg cpuType=${cpuTypeAlias}
|
||||
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
|
||||
docker push tdengine/tdengine-${dockername}:${version}
|
||||
|
||||
if [ -n "$(docker ps -aq)" ] ;then
|
||||
echo "delete docker process"
|
||||
docker stop $(docker ps -aq)
|
||||
docker rm $(docker ps -aq)
|
||||
fi
|
||||
|
||||
if [ -n "$(pidof taosd)" ] ;then
|
||||
echo "kill taosd "
|
||||
kill -9 $(pidof taosd)
|
||||
fi
|
||||
|
||||
if [ -n "$(pidof power)" ] ;then
|
||||
echo "kill power "
|
||||
kill -9 $(pidof power)
|
||||
fi
|
||||
|
||||
|
||||
echo ">>>>>>>>>>>>> check whether tdengine/tdengine-${dockername}:${version} has been published"
|
||||
docker run -d --name doctest -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine-${dockername}:${version}
|
||||
sleep 2
|
||||
curl -u root:taosdata -d 'show variables;' 127.0.0.1:6041/rest/sql > temp1.data
|
||||
data_version=$( cat temp1.data |jq .data| jq '.[]' |grep "version" -A 2 -B 1 | jq ".[1]")
|
||||
echo "${data_version}"
|
||||
if [ "${data_version}" == "\"${version}\"" ] ; then
|
||||
echo "docker version is right "
|
||||
else
|
||||
echo "docker version is wrong "
|
||||
exit 1
|
||||
fi
|
||||
rm -rf temp1.data
|
||||
|
||||
# set this version to latest version
|
||||
if [ ${dockerLatest} == 'y' ] ;then
|
||||
docker tag tdengine/tdengine-${dockername}:${version} tdengine/tdengine-${dockername}:latest
|
||||
docker push tdengine/tdengine-${dockername}:latest
|
||||
echo ">>>>>>>>>>>>> check whether tdengine/tdengine-${dockername}:latest has been published correctly"
|
||||
docker run -d --name doctestla -p 7030-7049:6030-6049 -p 7030-7049:6030-6049/udp tdengine/tdengine-${dockername}:latest
|
||||
sleep 2
|
||||
curl -u root:taosdata -d 'show variables;' 127.0.0.1:7041/rest/sql > temp2.data
|
||||
version_latest=` cat temp2.data |jq .data| jq '.[]' |grep "version" -A 2 -B 1 | jq ".[1]" `
|
||||
echo "${version_latest}"
|
||||
if [ "${version_latest}" == "\"${version}\"" ] ; then
|
||||
echo "docker version is right "
|
||||
else
|
||||
echo "docker version is wrong "
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
rm -rf temp2.data
|
||||
|
||||
if [ -n "$(docker ps -aq)" ] ;then
|
||||
echo "delte docker process"
|
||||
docker stop $(docker ps -aq)
|
||||
docker rm $(docker ps -aq)
|
||||
fi
|
||||
|
||||
cd ${scriptDir}
|
||||
rm -f ${pkgFile}
|
||||
|
|
|
@ -1,56 +1,56 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
# dockerbuild.sh
|
||||
# -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...]
|
||||
# -n [version number]
|
||||
# -p [password for docker hub]
|
||||
|
||||
# set parameters by default value
|
||||
cpuType=aarch64
|
||||
verNumber=""
|
||||
passWord=""
|
||||
|
||||
while getopts "hc:n:p:f:" arg
|
||||
do
|
||||
case $arg in
|
||||
c)
|
||||
#echo "cpuType=$OPTARG"
|
||||
cpuType=$(echo $OPTARG)
|
||||
;;
|
||||
n)
|
||||
#echo "verNumber=$OPTARG"
|
||||
verNumber=$(echo $OPTARG)
|
||||
;;
|
||||
p)
|
||||
#echo "passWord=$OPTARG"
|
||||
passWord=$(echo $OPTARG)
|
||||
;;
|
||||
h)
|
||||
echo "Usage: `basename $0` -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...] "
|
||||
echo " -n [version number] "
|
||||
echo " -p [password for docker hub] "
|
||||
exit 0
|
||||
;;
|
||||
?) #unknow option
|
||||
echo "unkonw argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
pkgFile=TDengine-server-${verNumber}-Linux-${cpuType}.tar.gz
|
||||
|
||||
echo "cpuType=${cpuType} verNumber=${verNumber} pkgFile=${pkgFile} "
|
||||
|
||||
scriptDir=`pwd`
|
||||
pkgDir=$scriptDir/../../release/
|
||||
|
||||
cp -f ${pkgDir}/${pkgFile} .
|
||||
|
||||
./dockerbuild.sh -c ${cpuType} -f ${pkgFile} -n ${verNumber} -p ${passWord}
|
||||
|
||||
rm -f ${pkgFile}
|
||||
#!/bin/bash
|
||||
#
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
# dockerbuild.sh
|
||||
# -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...]
|
||||
# -n [version number]
|
||||
# -p [password for docker hub]
|
||||
|
||||
# set parameters by default value
|
||||
cpuType=aarch64
|
||||
verNumber=""
|
||||
passWord=""
|
||||
|
||||
while getopts "hc:n:p:f:" arg
|
||||
do
|
||||
case $arg in
|
||||
c)
|
||||
#echo "cpuType=$OPTARG"
|
||||
cpuType=$(echo $OPTARG)
|
||||
;;
|
||||
n)
|
||||
#echo "verNumber=$OPTARG"
|
||||
verNumber=$(echo $OPTARG)
|
||||
;;
|
||||
p)
|
||||
#echo "passWord=$OPTARG"
|
||||
passWord=$(echo $OPTARG)
|
||||
;;
|
||||
h)
|
||||
echo "Usage: `basename $0` -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...] "
|
||||
echo " -n [version number] "
|
||||
echo " -p [password for docker hub] "
|
||||
exit 0
|
||||
;;
|
||||
?) #unknow option
|
||||
echo "unkonw argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
pkgFile=TDengine-server-${verNumber}-Linux-${cpuType}.tar.gz
|
||||
|
||||
echo "cpuType=${cpuType} verNumber=${verNumber} pkgFile=${pkgFile} "
|
||||
|
||||
scriptDir=`pwd`
|
||||
pkgDir=$scriptDir/../../release/
|
||||
|
||||
cp -f ${pkgDir}/${pkgFile} .
|
||||
|
||||
./dockerbuild.sh -c ${cpuType} -f ${pkgFile} -n ${verNumber} -p ${passWord}
|
||||
|
||||
rm -f ${pkgFile}
|
||||
|
|
|
@ -1,62 +1,62 @@
|
|||
@echo off
|
||||
|
||||
set internal_dir=%~dp0\..\..\
|
||||
set community_dir=%~dp0\..
|
||||
cd %community_dir%
|
||||
git checkout -- .
|
||||
cd %community_dir%\packaging
|
||||
|
||||
:: %1 name %2 version
|
||||
if !%1==! GOTO USAGE
|
||||
if !%2==! GOTO USAGE
|
||||
if %1 == taos GOTO TAOS
|
||||
if %1 == power GOTO POWER
|
||||
if %1 == tq GOTO TQ
|
||||
if %1 == pro GOTO PRO
|
||||
if %1 == kh GOTO KH
|
||||
if %1 == jh GOTO JH
|
||||
GOTO USAGE
|
||||
|
||||
:TAOS
|
||||
goto RELEASE
|
||||
|
||||
:POWER
|
||||
call sed_power.bat %community_dir%
|
||||
goto RELEASE
|
||||
|
||||
:TQ
|
||||
call sed_tq.bat %community_dir%
|
||||
goto RELEASE
|
||||
|
||||
:PRO
|
||||
call sed_pro.bat %community_dir%
|
||||
goto RELEASE
|
||||
|
||||
:KH
|
||||
call sed_kh.bat %community_dir%
|
||||
goto RELEASE
|
||||
|
||||
:JH
|
||||
call sed_jh.bat %community_dir%
|
||||
goto RELEASE
|
||||
|
||||
:RELEASE
|
||||
echo release windows-client-64 for %1, version: %2
|
||||
if not exist %internal_dir%\debug\ver-%2-64bit-%1 (
|
||||
md %internal_dir%\debug\ver-%2-64bit-%1
|
||||
) else (
|
||||
rd /S /Q %internal_dir%\debug\ver-%2-64bit-%1
|
||||
md %internal_dir%\debug\ver-%2-64bit-%1
|
||||
)
|
||||
cd %internal_dir%\debug\ver-%2-64bit-%1
|
||||
call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" amd64
|
||||
cmake ../../ -G "NMake Makefiles" -DVERNUMBER=%2 -DCPUTYPE=x64
|
||||
set CL=/MP4
|
||||
nmake install
|
||||
goto EXIT0
|
||||
|
||||
:USAGE
|
||||
echo Usage: release.bat $productName $version
|
||||
goto EXIT0
|
||||
|
||||
@echo off
|
||||
|
||||
set internal_dir=%~dp0\..\..\
|
||||
set community_dir=%~dp0\..
|
||||
cd %community_dir%
|
||||
git checkout -- .
|
||||
cd %community_dir%\packaging
|
||||
|
||||
:: %1 name %2 version
|
||||
if !%1==! GOTO USAGE
|
||||
if !%2==! GOTO USAGE
|
||||
if %1 == taos GOTO TAOS
|
||||
if %1 == power GOTO POWER
|
||||
if %1 == tq GOTO TQ
|
||||
if %1 == pro GOTO PRO
|
||||
if %1 == kh GOTO KH
|
||||
if %1 == jh GOTO JH
|
||||
GOTO USAGE
|
||||
|
||||
:TAOS
|
||||
goto RELEASE
|
||||
|
||||
:POWER
|
||||
call sed_power.bat %community_dir%
|
||||
goto RELEASE
|
||||
|
||||
:TQ
|
||||
call sed_tq.bat %community_dir%
|
||||
goto RELEASE
|
||||
|
||||
:PRO
|
||||
call sed_pro.bat %community_dir%
|
||||
goto RELEASE
|
||||
|
||||
:KH
|
||||
call sed_kh.bat %community_dir%
|
||||
goto RELEASE
|
||||
|
||||
:JH
|
||||
call sed_jh.bat %community_dir%
|
||||
goto RELEASE
|
||||
|
||||
:RELEASE
|
||||
echo release windows-client-64 for %1, version: %2
|
||||
if not exist %internal_dir%\debug\ver-%2-64bit-%1 (
|
||||
md %internal_dir%\debug\ver-%2-64bit-%1
|
||||
) else (
|
||||
rd /S /Q %internal_dir%\debug\ver-%2-64bit-%1
|
||||
md %internal_dir%\debug\ver-%2-64bit-%1
|
||||
)
|
||||
cd %internal_dir%\debug\ver-%2-64bit-%1
|
||||
call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" amd64
|
||||
cmake ../../ -G "NMake Makefiles" -DVERNUMBER=%2 -DCPUTYPE=x64
|
||||
set CL=/MP4
|
||||
nmake install
|
||||
goto EXIT0
|
||||
|
||||
:USAGE
|
||||
echo Usage: release.bat $productName $version
|
||||
goto EXIT0
|
||||
|
||||
:EXIT0
|
|
@ -1,94 +1,315 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Generate the tar.gz package for linux os
|
||||
# Generate the deb package for ubuntu, or rpm package for centos, or tar.gz package for other linux os
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
# release.sh -v [cluster | edge]
|
||||
# -c [aarch32 | aarch64 | x64 | x86 | mips64 ...]
|
||||
# -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...]
|
||||
# -V [stable | beta]
|
||||
# -l [full | lite]
|
||||
# -s [static | dynamic]
|
||||
# -d [taos | ...]
|
||||
# -n [2.0.0.3]
|
||||
# -m [2.0.0.0]
|
||||
# -H [ false | true]
|
||||
|
||||
# set parameters by default value
|
||||
version="3.0.0.0"
|
||||
verMode=edge # [cluster, edge]
|
||||
verType=stable # [stable, beta]
|
||||
cpuType=x64 # [aarch32 | aarch64 | x64 | x86 | mips64 ...]
|
||||
osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...]
|
||||
pagMode=full # [full | lite]
|
||||
soMode=dynamic # [static | dynamic]
|
||||
dbName=taos # [taos | ...]
|
||||
allocator=glibc # [glibc | jemalloc]
|
||||
verNumber=""
|
||||
verNumberComp="2.0.0.0"
|
||||
httpdBuild=false
|
||||
|
||||
while getopts "hv:V:c:o:l:s:d:a:n:m:H:" arg; do
|
||||
case $arg in
|
||||
v)
|
||||
#echo "verMode=$OPTARG"
|
||||
verMode=$(echo $OPTARG)
|
||||
;;
|
||||
V)
|
||||
#echo "verType=$OPTARG"
|
||||
verType=$(echo $OPTARG)
|
||||
;;
|
||||
c)
|
||||
#echo "cpuType=$OPTARG"
|
||||
cpuType=$(echo $OPTARG)
|
||||
;;
|
||||
l)
|
||||
#echo "pagMode=$OPTARG"
|
||||
pagMode=$(echo $OPTARG)
|
||||
;;
|
||||
s)
|
||||
#echo "soMode=$OPTARG"
|
||||
soMode=$(echo $OPTARG)
|
||||
;;
|
||||
d)
|
||||
#echo "dbName=$OPTARG"
|
||||
dbName=$(echo $OPTARG)
|
||||
;;
|
||||
a)
|
||||
#echo "allocator=$OPTARG"
|
||||
allocator=$(echo $OPTARG)
|
||||
;;
|
||||
n)
|
||||
#echo "verNumber=$OPTARG"
|
||||
verNumber=$(echo $OPTARG)
|
||||
;;
|
||||
m)
|
||||
#echo "verNumberComp=$OPTARG"
|
||||
verNumberComp=$(echo $OPTARG)
|
||||
;;
|
||||
o)
|
||||
#echo "osType=$OPTARG"
|
||||
osType=$(echo $OPTARG)
|
||||
;;
|
||||
H)
|
||||
#echo "httpdBuild=$OPTARG"
|
||||
httpdBuild=$(echo $OPTARG)
|
||||
;;
|
||||
h)
|
||||
echo "Usage: $(basename $0) -v [cluster | edge] "
|
||||
echo " -c [aarch32 | aarch64 | x64 | x86 | mips64 ...] "
|
||||
echo " -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...] "
|
||||
echo " -V [stable | beta] "
|
||||
echo " -l [full | lite] "
|
||||
echo " -a [glibc | jemalloc] "
|
||||
echo " -s [static | dynamic] "
|
||||
echo " -d [taos | ...] "
|
||||
echo " -n [version number] "
|
||||
echo " -m [compatible version number] "
|
||||
echo " -H [false | true] "
|
||||
exit 0
|
||||
;;
|
||||
?) #unknow option
|
||||
echo "unkonw argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode} soMode=${soMode} dbName=${dbName} allocator=${allocator} verNumber=${verNumber} verNumberComp=${verNumberComp} httpdBuild=${httpdBuild}"
|
||||
|
||||
curr_dir=$(pwd)
|
||||
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -f ${script_dir}/..)"
|
||||
if [ "$osType" == "Darwin" ]; then
|
||||
script_dir=$(dirname $0)
|
||||
cd ${script_dir}
|
||||
script_dir="$(pwd)"
|
||||
top_dir=${script_dir}/..
|
||||
else
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -f ${script_dir}/..)"
|
||||
fi
|
||||
|
||||
echo "=======================new version number: ${verNumber}======================================"
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
function is_valid_version() {
|
||||
[ -z $1 ] && return 1 || :
|
||||
|
||||
rx='^([0-9]+\.){3}(\*|[0-9]+)$'
|
||||
if [[ $1 =~ $rx ]]; then
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
function vercomp() {
|
||||
if [[ $1 == $2 ]]; then
|
||||
echo 0
|
||||
exit 0
|
||||
fi
|
||||
|
||||
local IFS=.
|
||||
local i ver1=($1) ver2=($2)
|
||||
|
||||
# fill empty fields in ver1 with zeros
|
||||
for ((i = ${#ver1[@]}; i < ${#ver2[@]}; i++)); do
|
||||
ver1[i]=0
|
||||
done
|
||||
|
||||
for ((i = 0; i < ${#ver1[@]}; i++)); do
|
||||
if [[ -z ${ver2[i]} ]]; then
|
||||
# fill empty fields in ver2 with zeros
|
||||
ver2[i]=0
|
||||
fi
|
||||
if ((10#${ver1[i]} > 10#${ver2[i]})); then
|
||||
echo 1
|
||||
exit 0
|
||||
fi
|
||||
if ((10#${ver1[i]} < 10#${ver2[i]})); then
|
||||
echo 2
|
||||
exit 0
|
||||
fi
|
||||
done
|
||||
echo 0
|
||||
}
|
||||
|
||||
# 1. check version information
|
||||
if ( (! is_valid_version $verNumber) || (! is_valid_version $verNumberComp) || [[ "$(vercomp $verNumber $verNumberComp)" == '2' ]]); then
|
||||
echo "please enter correct version"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "=======================new version number: ${verNumber}, compatible version: ${verNumberComp}======================================"
|
||||
|
||||
build_time=$(date +"%F %R")
|
||||
|
||||
echo "script_dir: ${script_dir}"
|
||||
echo "top_dir: ${top_dir}"
|
||||
# get commint id from git
|
||||
gitinfo=$(git rev-parse --verify HEAD)
|
||||
|
||||
cd ${top_dir}
|
||||
# git checkout -- .
|
||||
# git checkout 3.0
|
||||
# git pull || :
|
||||
if [[ "$verMode" == "cluster" ]]; then
|
||||
enterprise_dir="${top_dir}/../enterprise"
|
||||
cd ${enterprise_dir}
|
||||
gitinfoOfInternal=$(git rev-parse --verify HEAD)
|
||||
else
|
||||
gitinfoOfInternal=NULL
|
||||
fi
|
||||
|
||||
echo "curr_dir: ${curr_dir}"
|
||||
cd "${curr_dir}"
|
||||
|
||||
# 2. cmake executable file
|
||||
compile_dir="${top_dir}/debug"
|
||||
# if [ -d ${compile_dir} ]; then
|
||||
# rm -rf ${compile_dir}
|
||||
# fi
|
||||
|
||||
mkdir -p ${compile_dir}
|
||||
|
||||
cd ${compile_dir}
|
||||
|
||||
echo "compile_dir: ${compile_dir}"
|
||||
|
||||
cmake .. -DBUILD_TOOLS=true
|
||||
make -j32
|
||||
|
||||
release_dir="${top_dir}/release"
|
||||
if [ -d ${release_dir} ]; then
|
||||
rm -rf ${release_dir}
|
||||
if [ -d ${compile_dir} ]; then
|
||||
${csudo}rm -rf ${compile_dir}
|
||||
fi
|
||||
|
||||
mkdir -p ${release_dir}
|
||||
cd ${release_dir}
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo}mkdir -p ${compile_dir}
|
||||
else
|
||||
mkdir -p ${compile_dir}
|
||||
fi
|
||||
cd ${compile_dir}
|
||||
|
||||
install_dir="${release_dir}/TDengine-server-${version}"
|
||||
mkdir -p ${install_dir}
|
||||
mkdir -p ${install_dir}/bin
|
||||
mkdir -p ${install_dir}/lib
|
||||
mkdir -p ${install_dir}/inc
|
||||
if [[ "$allocator" == "jemalloc" ]]; then
|
||||
allocator_macro="-DJEMALLOC_ENABLED=true"
|
||||
else
|
||||
allocator_macro=""
|
||||
fi
|
||||
|
||||
install_files="${script_dir}/tools/install.sh"
|
||||
chmod a+x ${script_dir}/tools/install.sh || :
|
||||
cp ${install_files} ${install_dir}
|
||||
if [[ "$dbName" != "taos" ]]; then
|
||||
source ${enterprise_dir}/packaging/oem/sed_$dbName.sh
|
||||
replace_community_$dbName
|
||||
fi
|
||||
|
||||
header_files="${top_dir}/include/client/taos.h ${top_dir}/include/util/taoserror.h"
|
||||
cp ${header_files} ${install_dir}/inc
|
||||
|
||||
bin_files="${compile_dir}/build/bin/taosd ${compile_dir}/build/bin/taos ${compile_dir}/build/bin/create_table ${compile_dir}/build/bin/tmq_sim ${script_dir}/tools/remove.sh ${compile_dir}/build/bin/taosBenchmark ${compile_dir}/build/bin/taosdump"
|
||||
cp -rf ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
|
||||
if [[ "$httpdBuild" == "true" ]]; then
|
||||
BUILD_HTTP=true
|
||||
else
|
||||
BUILD_HTTP=false
|
||||
fi
|
||||
|
||||
cp ${compile_dir}/build/lib/libtaos.so ${install_dir}/lib/
|
||||
cp ${compile_dir}/build/lib/libavro* ${install_dir}/lib/ > /dev/null || echo -e "failed to copy avro libraries"
|
||||
cp -rf ${compile_dir}/build/lib/pkgconfig ${install_dir}/lib/ > /dev/null || echo -e "failed to copy pkgconfig directory"
|
||||
if [[ "$verMode" == "cluster" ]]; then
|
||||
BUILD_HTTP=internal
|
||||
fi
|
||||
|
||||
if [[ "$pagMode" == "full" ]]; then
|
||||
BUILD_TOOLS=true
|
||||
else
|
||||
BUILD_TOOLS=false
|
||||
fi
|
||||
|
||||
#cp ${compile_dir}/source/dnode/mnode/impl/libmnode.so ${install_dir}/lib/
|
||||
#cp ${compile_dir}/source/dnode/qnode/libqnode.so ${install_dir}/lib/
|
||||
#cp ${compile_dir}/source/dnode/snode/libsnode.so ${install_dir}/lib/
|
||||
#cp ${compile_dir}/source/dnode/bnode/libbnode.so ${install_dir}/lib/
|
||||
#cp ${compile_dir}/source/libs/wal/libwal.so ${install_dir}/lib/
|
||||
#cp ${compile_dir}/source/libs/scheduler/libscheduler.so ${install_dir}/lib/
|
||||
#cp ${compile_dir}/source/libs/planner/libplanner.so ${install_dir}/lib/
|
||||
#cp ${compile_dir}/source/libs/parser/libparser.so ${install_dir}/lib/
|
||||
#cp ${compile_dir}/source/libs/qcom/libqcom.so ${install_dir}/lib/
|
||||
#cp ${compile_dir}/source/libs/transport/libtransport.so ${install_dir}/lib/
|
||||
#cp ${compile_dir}/source/libs/function/libfunction.so ${install_dir}/lib/
|
||||
#cp ${compile_dir}/source/common/libcommon.so ${install_dir}/lib/
|
||||
#cp ${compile_dir}/source/os/libos.so ${install_dir}/lib/
|
||||
#cp ${compile_dir}/source/dnode/mnode/sdb/libsdb.so ${install_dir}/lib/
|
||||
#cp ${compile_dir}/source/libs/catalog/libcatalog.so ${install_dir}/lib/
|
||||
# check support cpu type
|
||||
if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" == "aarch32" ]] || [[ "$cpuType" == "mips64" ]]; then
|
||||
if [ "$verMode" != "cluster" ]; then
|
||||
# community-version compile
|
||||
cmake ../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro}
|
||||
else
|
||||
if [[ "$dbName" != "taos" ]]; then
|
||||
replace_enterprise_$dbName
|
||||
fi
|
||||
cmake ../../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro}
|
||||
fi
|
||||
else
|
||||
echo "input cpuType=${cpuType} error!!!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
pkg_name=${install_dir}-Linux-x64
|
||||
CORES=$(grep -c ^processor /proc/cpuinfo)
|
||||
|
||||
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
|
||||
if [[ "$allocator" == "jemalloc" ]]; then
|
||||
# jemalloc need compile first, so disable parallel build
|
||||
make -j ${CORES} && ${csudo}make install
|
||||
else
|
||||
make -j ${CORES} && ${csudo}make install
|
||||
fi
|
||||
|
||||
cd ${curr_dir}
|
||||
|
||||
# 3. Call the corresponding script for packaging
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
if [[ "$verMode" != "cluster" ]] && [[ "$pagMode" == "full" ]] && [[ "$cpuType" == "x64" ]] && [[ "$dbName" == "taos" ]]; then
|
||||
ret='0'
|
||||
command -v dpkg >/dev/null 2>&1 || { ret='1'; }
|
||||
if [ "$ret" -eq 0 ]; then
|
||||
echo "====do deb package for the ubuntu system===="
|
||||
output_dir="${top_dir}/debs"
|
||||
if [ -d ${output_dir} ]; then
|
||||
${csudo}rm -rf ${output_dir}
|
||||
fi
|
||||
${csudo}mkdir -p ${output_dir}
|
||||
cd ${script_dir}/deb
|
||||
${csudo}./makedeb.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType}
|
||||
|
||||
if [[ "$pagMode" == "full" ]]; then
|
||||
if [ -d ${top_dir}/tools/taos-tools/packaging/deb ]; then
|
||||
cd ${top_dir}/tools/taos-tools/packaging/deb
|
||||
[ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0"
|
||||
|
||||
taos_tools_ver=$(git describe --tags | sed -e 's/ver-//g' | awk -F '-' '{print $1}')
|
||||
${csudo}./make-taos-tools-deb.sh ${top_dir} \
|
||||
${compile_dir} ${output_dir} ${taos_tools_ver} ${cpuType} ${osType} ${verMode} ${verType}
|
||||
fi
|
||||
fi
|
||||
else
|
||||
echo "==========dpkg command not exist, so not release deb package!!!"
|
||||
fi
|
||||
ret='0'
|
||||
command -v rpmbuild >/dev/null 2>&1 || { ret='1'; }
|
||||
if [ "$ret" -eq 0 ]; then
|
||||
echo "====do rpm package for the centos system===="
|
||||
output_dir="${top_dir}/rpms"
|
||||
if [ -d ${output_dir} ]; then
|
||||
${csudo}rm -rf ${output_dir}
|
||||
fi
|
||||
${csudo}mkdir -p ${output_dir}
|
||||
cd ${script_dir}/rpm
|
||||
${csudo}./makerpm.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType}
|
||||
|
||||
if [[ "$pagMode" == "full" ]]; then
|
||||
if [ -d ${top_dir}/tools/taos-tools/packaging/rpm ]; then
|
||||
cd ${top_dir}/tools/taos-tools/packaging/rpm
|
||||
[ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0"
|
||||
|
||||
taos_tools_ver=$(git describe --tags | sed -e 's/ver-//g' | awk -F '-' '{print $1}' | sed -e 's/-/_/g')
|
||||
${csudo}./make-taos-tools-rpm.sh ${top_dir} \
|
||||
${compile_dir} ${output_dir} ${taos_tools_ver} ${cpuType} ${osType} ${verMode} ${verType}
|
||||
fi
|
||||
fi
|
||||
else
|
||||
echo "==========rpmbuild command not exist, so not release rpm package!!!"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "====do tar.gz package for all systems===="
|
||||
cd ${script_dir}/tools
|
||||
|
||||
${csudo}./makepkg.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${verNumberComp} ${dbName}
|
||||
${csudo}./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
|
||||
# ${csudo}./makearbi.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
|
||||
|
||||
else
|
||||
# only make client for Darwin
|
||||
cd ${script_dir}/tools
|
||||
./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
|
||||
fi
|
||||
|
|
|
@ -1,87 +1,87 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Generate rpm package for centos
|
||||
|
||||
set -e
|
||||
# set -x
|
||||
|
||||
#curr_dir=$(pwd)
|
||||
compile_dir=$1
|
||||
output_dir=$2
|
||||
tdengine_ver=$3
|
||||
cpuType=$4
|
||||
osType=$5
|
||||
verMode=$6
|
||||
verType=$7
|
||||
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -f ${script_dir}/../..)"
|
||||
pkg_dir="${top_dir}/rpmworkroom"
|
||||
spec_file="${script_dir}/tdengine.spec"
|
||||
|
||||
#echo "curr_dir: ${curr_dir}"
|
||||
#echo "top_dir: ${top_dir}"
|
||||
#echo "script_dir: ${script_dir}"
|
||||
echo "compile_dir: ${compile_dir}"
|
||||
echo "pkg_dir: ${pkg_dir}"
|
||||
echo "spec_file: ${spec_file}"
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
function cp_rpm_package() {
|
||||
local cur_dir
|
||||
cd $1
|
||||
cur_dir=$(pwd)
|
||||
|
||||
for dirlist in "$(ls ${cur_dir})"; do
|
||||
if test -d ${dirlist}; then
|
||||
cd ${dirlist}
|
||||
cp_rpm_package ${cur_dir}/${dirlist}
|
||||
cd ..
|
||||
fi
|
||||
if test -e ${dirlist}; then
|
||||
cp ${cur_dir}/${dirlist} ${output_dir}/TDengine-${tdengine_ver}.rpm
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
if [ -d ${pkg_dir} ]; then
|
||||
${csudo}rm -rf ${pkg_dir}
|
||||
fi
|
||||
${csudo}mkdir -p ${pkg_dir}
|
||||
cd ${pkg_dir}
|
||||
|
||||
${csudo}mkdir -p BUILD BUILDROOT RPMS SOURCES SPECS SRPMS
|
||||
|
||||
${csudo}rpmbuild --define="_version ${tdengine_ver}" --define="_topdir ${pkg_dir}" --define="_compiledir ${compile_dir}" -bb ${spec_file}
|
||||
|
||||
# copy rpm package to output_dir, and modify package name, then clean temp dir
|
||||
#${csudo}cp -rf RPMS/* ${output_dir}
|
||||
cp_rpm_package ${pkg_dir}/RPMS
|
||||
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
rpmname="TDengine-server-"${tdengine_ver}-${osType}-${cpuType}
|
||||
elif [ "$verMode" == "edge" ]; then
|
||||
rpmname="TDengine-server"-${tdengine_ver}-${osType}-${cpuType}
|
||||
else
|
||||
echo "unknow verMode, nor cluster or edge"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$verType" == "beta" ]; then
|
||||
rpmname="TDengine-server-"${tdengine_ver}-${verType}-${osType}-${cpuType}".rpm"
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
rpmname=${rpmname}".rpm"
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mv ${output_dir}/TDengine-${tdengine_ver}.rpm ${output_dir}/${rpmname}
|
||||
|
||||
cd ..
|
||||
${csudo}rm -rf ${pkg_dir}
|
||||
#!/bin/bash
|
||||
#
|
||||
# Generate rpm package for centos
|
||||
|
||||
set -e
|
||||
# set -x
|
||||
|
||||
#curr_dir=$(pwd)
|
||||
compile_dir=$1
|
||||
output_dir=$2
|
||||
tdengine_ver=$3
|
||||
cpuType=$4
|
||||
osType=$5
|
||||
verMode=$6
|
||||
verType=$7
|
||||
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -f ${script_dir}/../..)"
|
||||
pkg_dir="${top_dir}/rpmworkroom"
|
||||
spec_file="${script_dir}/tdengine.spec"
|
||||
|
||||
#echo "curr_dir: ${curr_dir}"
|
||||
#echo "top_dir: ${top_dir}"
|
||||
#echo "script_dir: ${script_dir}"
|
||||
echo "compile_dir: ${compile_dir}"
|
||||
echo "pkg_dir: ${pkg_dir}"
|
||||
echo "spec_file: ${spec_file}"
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
function cp_rpm_package() {
|
||||
local cur_dir
|
||||
cd $1
|
||||
cur_dir=$(pwd)
|
||||
|
||||
for dirlist in "$(ls ${cur_dir})"; do
|
||||
if test -d ${dirlist}; then
|
||||
cd ${dirlist}
|
||||
cp_rpm_package ${cur_dir}/${dirlist}
|
||||
cd ..
|
||||
fi
|
||||
if test -e ${dirlist}; then
|
||||
cp ${cur_dir}/${dirlist} ${output_dir}/TDengine-${tdengine_ver}.rpm
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
if [ -d ${pkg_dir} ]; then
|
||||
${csudo}rm -rf ${pkg_dir}
|
||||
fi
|
||||
${csudo}mkdir -p ${pkg_dir}
|
||||
cd ${pkg_dir}
|
||||
|
||||
${csudo}mkdir -p BUILD BUILDROOT RPMS SOURCES SPECS SRPMS
|
||||
|
||||
${csudo}rpmbuild --define="_version ${tdengine_ver}" --define="_topdir ${pkg_dir}" --define="_compiledir ${compile_dir}" -bb ${spec_file}
|
||||
|
||||
# copy rpm package to output_dir, and modify package name, then clean temp dir
|
||||
#${csudo}cp -rf RPMS/* ${output_dir}
|
||||
cp_rpm_package ${pkg_dir}/RPMS
|
||||
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
rpmname="TDengine-server-"${tdengine_ver}-${osType}-${cpuType}
|
||||
elif [ "$verMode" == "edge" ]; then
|
||||
rpmname="TDengine-server"-${tdengine_ver}-${osType}-${cpuType}
|
||||
else
|
||||
echo "unknow verMode, nor cluster or edge"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$verType" == "beta" ]; then
|
||||
rpmname="TDengine-server-"${tdengine_ver}-${verType}-${osType}-${cpuType}".rpm"
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
rpmname=${rpmname}".rpm"
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mv ${output_dir}/TDengine-${tdengine_ver}.rpm ${output_dir}/${rpmname}
|
||||
|
||||
cd ..
|
||||
${csudo}rm -rf ${pkg_dir}
|
||||
|
|
|
@ -1,145 +1,145 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# taosd This shell script takes care of starting and stopping TDengine.
|
||||
#
|
||||
# chkconfig: 2345 99 01
|
||||
# description: TDengine is a districuted, scalable, high-performance Time Series Database
|
||||
# (TSDB). More than just a pure database, TDengine also provides the ability
|
||||
# to do stream computing, aggregation etc.
|
||||
#
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: taosd
|
||||
# Required-Start: $network $local_fs $remote_fs
|
||||
# Required-Stop: $network $local_fs $remote_fs
|
||||
# Short-Description: start and stop taosd
|
||||
# Description: TDengine is a districuted, scalable, high-performance Time Series Database
|
||||
# (TSDB). More than just a pure database, TDengine also provides the ability
|
||||
# to do stream computing, aggregation etc.
|
||||
### END INIT INFO
|
||||
|
||||
# Source init functions
|
||||
. /etc/init.d/functions
|
||||
|
||||
# Maximum number of open files
|
||||
MAX_OPEN_FILES=65535
|
||||
|
||||
# Default program options
|
||||
NAME=taosd
|
||||
PROG=/usr/local/taos/bin/taosd
|
||||
USER=root
|
||||
GROUP=root
|
||||
|
||||
# Default directories
|
||||
LOCK_DIR=/var/lock/subsys
|
||||
PID_DIR=/var/run/$NAME
|
||||
|
||||
# Set file names
|
||||
LOCK_FILE=$LOCK_DIR/$NAME
|
||||
PID_FILE=$PID_DIR/$NAME.pid
|
||||
|
||||
[ -e $PID_DIR ] || mkdir -p $PID_DIR
|
||||
|
||||
PROG_OPTS=""
|
||||
|
||||
start() {
|
||||
echo -n "Starting ${NAME}: "
|
||||
# check identity
|
||||
curid="`id -u -n`"
|
||||
if [ "$curid" != root ] && [ "$curid" != "$USER" ] ; then
|
||||
echo "Must be run as root or $USER, but was run as $curid"
|
||||
return 1
|
||||
fi
|
||||
# Sets the maximum number of open file descriptors allowed.
|
||||
ulimit -n $MAX_OPEN_FILES
|
||||
curulimit="`ulimit -n`"
|
||||
if [ "$curulimit" -lt $MAX_OPEN_FILES ] ; then
|
||||
echo "'ulimit -n' must be greater than or equal to $MAX_OPEN_FILES, is $curulimit"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [ "`id -u -n`" == root ] ; then
|
||||
# Changes the owner of the lock, and the pid files to allow
|
||||
# non-root OpenTSDB daemons to run /usr/share/opentsdb/bin/opentsdb_restart.py.
|
||||
touch $LOCK_FILE && chown $USER:$GROUP $LOCK_FILE
|
||||
touch $PID_FILE && chown $USER:$GROUP $PID_FILE
|
||||
daemon --user $USER --pidfile $PID_FILE "$PROG $PROG_OPTS &> /dev/null &"
|
||||
else
|
||||
# Don't have to change user.
|
||||
daemon --pidfile $PID_FILE "$PROG $PROG_OPTS &> /dev/null &"
|
||||
fi
|
||||
retval=$?
|
||||
sleep 2
|
||||
echo
|
||||
[ $retval -eq 0 ] && (findproc > $PID_FILE && touch $LOCK_FILE)
|
||||
return $retval
|
||||
}
|
||||
|
||||
stop() {
|
||||
echo -n "Stopping ${NAME}: "
|
||||
killproc -p $PID_FILE $NAME
|
||||
retval=$?
|
||||
echo
|
||||
# Non-root users don't have enough permission to remove pid and lock files.
|
||||
# So, the opentsdb_restart.py cannot get rid of the files, and the command
|
||||
# "service opentsdb status" will complain about the existing pid file.
|
||||
# Makes the pid file empty.
|
||||
echo > $PID_FILE
|
||||
[ $retval -eq 0 ] && (rm -f $PID_FILE && rm -f $LOCK_FILE)
|
||||
return $retval
|
||||
}
|
||||
|
||||
restart() {
|
||||
stop
|
||||
start
|
||||
}
|
||||
|
||||
reload() {
|
||||
restart
|
||||
}
|
||||
|
||||
force_reload() {
|
||||
restart
|
||||
}
|
||||
|
||||
rh_status() {
|
||||
# run checks to determine if the service is running or use generic status
|
||||
status -p $PID_FILE -l $LOCK_FILE $NAME
|
||||
}
|
||||
|
||||
rh_status_q() {
|
||||
rh_status >/dev/null 2>&1
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
rh_status_q && exit 0
|
||||
$1
|
||||
;;
|
||||
stop)
|
||||
rh_status_q || exit 0
|
||||
$1
|
||||
;;
|
||||
restart)
|
||||
$1
|
||||
;;
|
||||
reload)
|
||||
rh_status_q || exit 7
|
||||
$1
|
||||
;;
|
||||
force-reload)
|
||||
force_reload
|
||||
;;
|
||||
status)
|
||||
rh_status
|
||||
;;
|
||||
condrestart|try-restart)
|
||||
rh_status_q || exit 0
|
||||
restart
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
|
||||
exit 2
|
||||
esac
|
||||
|
||||
exit $?
|
||||
#!/bin/bash
|
||||
#
|
||||
# taosd This shell script takes care of starting and stopping TDengine.
|
||||
#
|
||||
# chkconfig: 2345 99 01
|
||||
# description: TDengine is a districuted, scalable, high-performance Time Series Database
|
||||
# (TSDB). More than just a pure database, TDengine also provides the ability
|
||||
# to do stream computing, aggregation etc.
|
||||
#
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: taosd
|
||||
# Required-Start: $network $local_fs $remote_fs
|
||||
# Required-Stop: $network $local_fs $remote_fs
|
||||
# Short-Description: start and stop taosd
|
||||
# Description: TDengine is a districuted, scalable, high-performance Time Series Database
|
||||
# (TSDB). More than just a pure database, TDengine also provides the ability
|
||||
# to do stream computing, aggregation etc.
|
||||
### END INIT INFO
|
||||
|
||||
# Source init functions
|
||||
. /etc/init.d/functions
|
||||
|
||||
# Maximum number of open files
|
||||
MAX_OPEN_FILES=65535
|
||||
|
||||
# Default program options
|
||||
NAME=taosd
|
||||
PROG=/usr/local/taos/bin/taosd
|
||||
USER=root
|
||||
GROUP=root
|
||||
|
||||
# Default directories
|
||||
LOCK_DIR=/var/lock/subsys
|
||||
PID_DIR=/var/run/$NAME
|
||||
|
||||
# Set file names
|
||||
LOCK_FILE=$LOCK_DIR/$NAME
|
||||
PID_FILE=$PID_DIR/$NAME.pid
|
||||
|
||||
[ -e $PID_DIR ] || mkdir -p $PID_DIR
|
||||
|
||||
PROG_OPTS=""
|
||||
|
||||
start() {
|
||||
echo -n "Starting ${NAME}: "
|
||||
# check identity
|
||||
curid="`id -u -n`"
|
||||
if [ "$curid" != root ] && [ "$curid" != "$USER" ] ; then
|
||||
echo "Must be run as root or $USER, but was run as $curid"
|
||||
return 1
|
||||
fi
|
||||
# Sets the maximum number of open file descriptors allowed.
|
||||
ulimit -n $MAX_OPEN_FILES
|
||||
curulimit="`ulimit -n`"
|
||||
if [ "$curulimit" -lt $MAX_OPEN_FILES ] ; then
|
||||
echo "'ulimit -n' must be greater than or equal to $MAX_OPEN_FILES, is $curulimit"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [ "`id -u -n`" == root ] ; then
|
||||
# Changes the owner of the lock, and the pid files to allow
|
||||
# non-root OpenTSDB daemons to run /usr/share/opentsdb/bin/opentsdb_restart.py.
|
||||
touch $LOCK_FILE && chown $USER:$GROUP $LOCK_FILE
|
||||
touch $PID_FILE && chown $USER:$GROUP $PID_FILE
|
||||
daemon --user $USER --pidfile $PID_FILE "$PROG $PROG_OPTS &> /dev/null &"
|
||||
else
|
||||
# Don't have to change user.
|
||||
daemon --pidfile $PID_FILE "$PROG $PROG_OPTS &> /dev/null &"
|
||||
fi
|
||||
retval=$?
|
||||
sleep 2
|
||||
echo
|
||||
[ $retval -eq 0 ] && (findproc > $PID_FILE && touch $LOCK_FILE)
|
||||
return $retval
|
||||
}
|
||||
|
||||
stop() {
|
||||
echo -n "Stopping ${NAME}: "
|
||||
killproc -p $PID_FILE $NAME
|
||||
retval=$?
|
||||
echo
|
||||
# Non-root users don't have enough permission to remove pid and lock files.
|
||||
# So, the opentsdb_restart.py cannot get rid of the files, and the command
|
||||
# "service opentsdb status" will complain about the existing pid file.
|
||||
# Makes the pid file empty.
|
||||
echo > $PID_FILE
|
||||
[ $retval -eq 0 ] && (rm -f $PID_FILE && rm -f $LOCK_FILE)
|
||||
return $retval
|
||||
}
|
||||
|
||||
restart() {
|
||||
stop
|
||||
start
|
||||
}
|
||||
|
||||
reload() {
|
||||
restart
|
||||
}
|
||||
|
||||
force_reload() {
|
||||
restart
|
||||
}
|
||||
|
||||
rh_status() {
|
||||
# run checks to determine if the service is running or use generic status
|
||||
status -p $PID_FILE -l $LOCK_FILE $NAME
|
||||
}
|
||||
|
||||
rh_status_q() {
|
||||
rh_status >/dev/null 2>&1
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
rh_status_q && exit 0
|
||||
$1
|
||||
;;
|
||||
stop)
|
||||
rh_status_q || exit 0
|
||||
$1
|
||||
;;
|
||||
restart)
|
||||
$1
|
||||
;;
|
||||
reload)
|
||||
rh_status_q || exit 7
|
||||
$1
|
||||
;;
|
||||
force-reload)
|
||||
force_reload
|
||||
;;
|
||||
status)
|
||||
rh_status
|
||||
;;
|
||||
condrestart|try-restart)
|
||||
rh_status_q || exit 0
|
||||
restart
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
|
||||
exit 2
|
||||
esac
|
||||
|
||||
exit $?
|
||||
|
|
|
@ -1,141 +1,141 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# tarbitratord This shell script takes care of starting and stopping tarbitrator.
|
||||
#
|
||||
# chkconfig: 2345 99 01
|
||||
# description: tarbitrator is a arbitrator used in TDengine cluster.
|
||||
#
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: taoscluster
|
||||
# Required-Start: $network $local_fs $remote_fs
|
||||
# Required-Stop: $network $local_fs $remote_fs
|
||||
# Short-Description: start and stop tarbitrator
|
||||
# Description: tarbitrator is a arbitrator used in TDengine cluster.
|
||||
### END INIT INFO
|
||||
|
||||
# Source init functions
|
||||
. /etc/init.d/functions
|
||||
|
||||
# Maximum number of open files
|
||||
MAX_OPEN_FILES=65535
|
||||
|
||||
# Default program options
|
||||
NAME=tarbitrator
|
||||
PROG=/usr/local/taos/bin/tarbitrator
|
||||
USER=root
|
||||
GROUP=root
|
||||
|
||||
# Default directories
|
||||
LOCK_DIR=/var/lock/subsys
|
||||
PID_DIR=/var/run/$NAME
|
||||
|
||||
# Set file names
|
||||
LOCK_FILE=$LOCK_DIR/$NAME
|
||||
PID_FILE=$PID_DIR/$NAME.pid
|
||||
|
||||
[ -e $PID_DIR ] || mkdir -p $PID_DIR
|
||||
|
||||
PROG_OPTS=""
|
||||
|
||||
start() {
|
||||
echo -n "Starting ${NAME}: "
|
||||
# check identity
|
||||
curid="`id -u -n`"
|
||||
if [ "$curid" != root ] && [ "$curid" != "$USER" ] ; then
|
||||
echo "Must be run as root or $USER, but was run as $curid"
|
||||
return 1
|
||||
fi
|
||||
# Sets the maximum number of open file descriptors allowed.
|
||||
ulimit -n $MAX_OPEN_FILES
|
||||
curulimit="`ulimit -n`"
|
||||
if [ "$curulimit" -lt $MAX_OPEN_FILES ] ; then
|
||||
echo "'ulimit -n' must be greater than or equal to $MAX_OPEN_FILES, is $curulimit"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [ "`id -u -n`" == root ] ; then
|
||||
# Changes the owner of the lock, and the pid files to allow
|
||||
# non-root OpenTSDB daemons to run /usr/share/opentsdb/bin/opentsdb_restart.py.
|
||||
touch $LOCK_FILE && chown $USER:$GROUP $LOCK_FILE
|
||||
touch $PID_FILE && chown $USER:$GROUP $PID_FILE
|
||||
daemon --user $USER --pidfile $PID_FILE "$PROG $PROG_OPTS &> /dev/null &"
|
||||
else
|
||||
# Don't have to change user.
|
||||
daemon --pidfile $PID_FILE "$PROG $PROG_OPTS &> /dev/null &"
|
||||
fi
|
||||
retval=$?
|
||||
sleep 2
|
||||
echo
|
||||
[ $retval -eq 0 ] && (findproc > $PID_FILE && touch $LOCK_FILE)
|
||||
return $retval
|
||||
}
|
||||
|
||||
stop() {
|
||||
echo -n "Stopping ${NAME}: "
|
||||
killproc -p $PID_FILE $NAME
|
||||
retval=$?
|
||||
echo
|
||||
# Non-root users don't have enough permission to remove pid and lock files.
|
||||
# So, the opentsdb_restart.py cannot get rid of the files, and the command
|
||||
# "service opentsdb status" will complain about the existing pid file.
|
||||
# Makes the pid file empty.
|
||||
echo > $PID_FILE
|
||||
[ $retval -eq 0 ] && (rm -f $PID_FILE && rm -f $LOCK_FILE)
|
||||
return $retval
|
||||
}
|
||||
|
||||
restart() {
|
||||
stop
|
||||
start
|
||||
}
|
||||
|
||||
reload() {
|
||||
restart
|
||||
}
|
||||
|
||||
force_reload() {
|
||||
restart
|
||||
}
|
||||
|
||||
rh_status() {
|
||||
# run checks to determine if the service is running or use generic status
|
||||
status -p $PID_FILE -l $LOCK_FILE $NAME
|
||||
}
|
||||
|
||||
rh_status_q() {
|
||||
rh_status >/dev/null 2>&1
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
rh_status_q && exit 0
|
||||
$1
|
||||
;;
|
||||
stop)
|
||||
rh_status_q || exit 0
|
||||
$1
|
||||
;;
|
||||
restart)
|
||||
$1
|
||||
;;
|
||||
reload)
|
||||
rh_status_q || exit 7
|
||||
$1
|
||||
;;
|
||||
force-reload)
|
||||
force_reload
|
||||
;;
|
||||
status)
|
||||
rh_status
|
||||
;;
|
||||
condrestart|try-restart)
|
||||
rh_status_q || exit 0
|
||||
restart
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
|
||||
exit 2
|
||||
esac
|
||||
|
||||
exit $?
|
||||
#!/bin/bash
|
||||
#
|
||||
# tarbitratord This shell script takes care of starting and stopping tarbitrator.
|
||||
#
|
||||
# chkconfig: 2345 99 01
|
||||
# description: tarbitrator is a arbitrator used in TDengine cluster.
|
||||
#
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: taoscluster
|
||||
# Required-Start: $network $local_fs $remote_fs
|
||||
# Required-Stop: $network $local_fs $remote_fs
|
||||
# Short-Description: start and stop tarbitrator
|
||||
# Description: tarbitrator is a arbitrator used in TDengine cluster.
|
||||
### END INIT INFO
|
||||
|
||||
# Source init functions
|
||||
. /etc/init.d/functions
|
||||
|
||||
# Maximum number of open files
|
||||
MAX_OPEN_FILES=65535
|
||||
|
||||
# Default program options
|
||||
NAME=tarbitrator
|
||||
PROG=/usr/local/taos/bin/tarbitrator
|
||||
USER=root
|
||||
GROUP=root
|
||||
|
||||
# Default directories
|
||||
LOCK_DIR=/var/lock/subsys
|
||||
PID_DIR=/var/run/$NAME
|
||||
|
||||
# Set file names
|
||||
LOCK_FILE=$LOCK_DIR/$NAME
|
||||
PID_FILE=$PID_DIR/$NAME.pid
|
||||
|
||||
[ -e $PID_DIR ] || mkdir -p $PID_DIR
|
||||
|
||||
PROG_OPTS=""
|
||||
|
||||
start() {
|
||||
echo -n "Starting ${NAME}: "
|
||||
# check identity
|
||||
curid="`id -u -n`"
|
||||
if [ "$curid" != root ] && [ "$curid" != "$USER" ] ; then
|
||||
echo "Must be run as root or $USER, but was run as $curid"
|
||||
return 1
|
||||
fi
|
||||
# Sets the maximum number of open file descriptors allowed.
|
||||
ulimit -n $MAX_OPEN_FILES
|
||||
curulimit="`ulimit -n`"
|
||||
if [ "$curulimit" -lt $MAX_OPEN_FILES ] ; then
|
||||
echo "'ulimit -n' must be greater than or equal to $MAX_OPEN_FILES, is $curulimit"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [ "`id -u -n`" == root ] ; then
|
||||
# Changes the owner of the lock, and the pid files to allow
|
||||
# non-root OpenTSDB daemons to run /usr/share/opentsdb/bin/opentsdb_restart.py.
|
||||
touch $LOCK_FILE && chown $USER:$GROUP $LOCK_FILE
|
||||
touch $PID_FILE && chown $USER:$GROUP $PID_FILE
|
||||
daemon --user $USER --pidfile $PID_FILE "$PROG $PROG_OPTS &> /dev/null &"
|
||||
else
|
||||
# Don't have to change user.
|
||||
daemon --pidfile $PID_FILE "$PROG $PROG_OPTS &> /dev/null &"
|
||||
fi
|
||||
retval=$?
|
||||
sleep 2
|
||||
echo
|
||||
[ $retval -eq 0 ] && (findproc > $PID_FILE && touch $LOCK_FILE)
|
||||
return $retval
|
||||
}
|
||||
|
||||
stop() {
|
||||
echo -n "Stopping ${NAME}: "
|
||||
killproc -p $PID_FILE $NAME
|
||||
retval=$?
|
||||
echo
|
||||
# Non-root users don't have enough permission to remove pid and lock files.
|
||||
# So, the opentsdb_restart.py cannot get rid of the files, and the command
|
||||
# "service opentsdb status" will complain about the existing pid file.
|
||||
# Makes the pid file empty.
|
||||
echo > $PID_FILE
|
||||
[ $retval -eq 0 ] && (rm -f $PID_FILE && rm -f $LOCK_FILE)
|
||||
return $retval
|
||||
}
|
||||
|
||||
restart() {
|
||||
stop
|
||||
start
|
||||
}
|
||||
|
||||
reload() {
|
||||
restart
|
||||
}
|
||||
|
||||
force_reload() {
|
||||
restart
|
||||
}
|
||||
|
||||
rh_status() {
|
||||
# run checks to determine if the service is running or use generic status
|
||||
status -p $PID_FILE -l $LOCK_FILE $NAME
|
||||
}
|
||||
|
||||
rh_status_q() {
|
||||
rh_status >/dev/null 2>&1
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
rh_status_q && exit 0
|
||||
$1
|
||||
;;
|
||||
stop)
|
||||
rh_status_q || exit 0
|
||||
$1
|
||||
;;
|
||||
restart)
|
||||
$1
|
||||
;;
|
||||
reload)
|
||||
rh_status_q || exit 7
|
||||
$1
|
||||
;;
|
||||
force-reload)
|
||||
force_reload
|
||||
;;
|
||||
status)
|
||||
rh_status
|
||||
;;
|
||||
condrestart|try-restart)
|
||||
rh_status_q || exit 0
|
||||
restart
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
|
||||
exit 2
|
||||
esac
|
||||
|
||||
exit $?
|
||||
|
|
|
@ -1,236 +1,236 @@
|
|||
%define homepath /usr/local/taos
|
||||
%define userlocalpath /usr/local
|
||||
%define cfg_install_dir /etc/taos
|
||||
%define __strip /bin/true
|
||||
|
||||
Name: tdengine
|
||||
Version: %{_version}
|
||||
Release: 3%{?dist}
|
||||
Summary: tdengine from taosdata
|
||||
Group: Application/Database
|
||||
License: AGPL
|
||||
URL: www.taosdata.com
|
||||
AutoReqProv: no
|
||||
|
||||
#BuildRoot: %_topdir/BUILDROOT
|
||||
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root
|
||||
|
||||
#Prefix: /usr/local/taos
|
||||
|
||||
#BuildRequires:
|
||||
#Requires:
|
||||
|
||||
%description
|
||||
Big Data Platform Designed and Optimized for IoT
|
||||
|
||||
#"prep" Nothing needs to be done
|
||||
#%prep
|
||||
#%setup -q
|
||||
#%setup -T
|
||||
|
||||
#"build" Nothing needs to be done
|
||||
#%build
|
||||
#%configure
|
||||
#make %{?_smp_mflags}
|
||||
|
||||
%install
|
||||
#make install DESTDIR=%{buildroot}
|
||||
rm -rf %{buildroot}
|
||||
|
||||
echo topdir: %{_topdir}
|
||||
echo version: %{_version}
|
||||
echo buildroot: %{buildroot}
|
||||
|
||||
libfile="libtaos.so.%{_version}"
|
||||
|
||||
# create install path, and cp file
|
||||
mkdir -p %{buildroot}%{homepath}/bin
|
||||
mkdir -p %{buildroot}%{homepath}/cfg
|
||||
#mkdir -p %{buildroot}%{homepath}/connector
|
||||
mkdir -p %{buildroot}%{homepath}/driver
|
||||
mkdir -p %{buildroot}%{homepath}/examples
|
||||
mkdir -p %{buildroot}%{homepath}/include
|
||||
#mkdir -p %{buildroot}%{homepath}/init.d
|
||||
mkdir -p %{buildroot}%{homepath}/script
|
||||
|
||||
cp %{_compiledir}/../packaging/cfg/taos.cfg %{buildroot}%{homepath}/cfg
|
||||
if [ -f %{_compiledir}/test/cfg/taosadapter.toml ]; then
|
||||
cp %{_compiledir}/test/cfg/taosadapter.toml %{buildroot}%{homepath}/cfg
|
||||
fi
|
||||
if [ -f %{_compiledir}/test/cfg/taosadapter.service ]; then
|
||||
cp %{_compiledir}/test/cfg/taosadapter.service %{buildroot}%{homepath}/cfg
|
||||
fi
|
||||
#cp %{_compiledir}/../packaging/rpm/taosd %{buildroot}%{homepath}/init.d
|
||||
cp %{_compiledir}/../packaging/tools/post.sh %{buildroot}%{homepath}/script
|
||||
cp %{_compiledir}/../packaging/tools/preun.sh %{buildroot}%{homepath}/script
|
||||
cp %{_compiledir}/../packaging/tools/startPre.sh %{buildroot}%{homepath}/bin
|
||||
cp %{_compiledir}/../packaging/tools/set_core.sh %{buildroot}%{homepath}/bin
|
||||
cp %{_compiledir}/../packaging/tools/taosd-dump-cfg.gdb %{buildroot}%{homepath}/bin
|
||||
cp %{_compiledir}/build/bin/taos %{buildroot}%{homepath}/bin
|
||||
cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin
|
||||
#cp %{_compiledir}/build/bin/taosBenchmark %{buildroot}%{homepath}/bin
|
||||
|
||||
if [ -f %{_compiledir}/build/bin/taosadapter ]; then
|
||||
cp %{_compiledir}/build/bin/taosadapter %{buildroot}%{homepath}/bin ||:
|
||||
fi
|
||||
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
|
||||
cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include
|
||||
cp %{_compiledir}/../src/inc/taosdef.h %{buildroot}%{homepath}/include
|
||||
cp %{_compiledir}/../src/inc/taoserror.h %{buildroot}%{homepath}/include
|
||||
#cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector
|
||||
#cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector
|
||||
#cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector
|
||||
#cp %{_compiledir}/build/lib/taos-jdbcdriver*.* %{buildroot}%{homepath}/connector ||:
|
||||
cp -r %{_compiledir}/../examples/* %{buildroot}%{homepath}/examples
|
||||
|
||||
if [ -f %{_compiledir}/build/bin/jemalloc-config ]; then
|
||||
mkdir -p %{buildroot}%{userlocalpath}/bin
|
||||
mkdir -p %{buildroot}%{userlocalpath}/lib
|
||||
mkdir -p %{buildroot}%{userlocalpath}/lib/pkgconfig
|
||||
mkdir -p %{buildroot}%{userlocalpath}/include
|
||||
mkdir -p %{buildroot}%{userlocalpath}/include/jemalloc
|
||||
mkdir -p %{buildroot}%{userlocalpath}/share
|
||||
mkdir -p %{buildroot}%{userlocalpath}/share/doc
|
||||
mkdir -p %{buildroot}%{userlocalpath}/share/doc/jemalloc
|
||||
mkdir -p %{buildroot}%{userlocalpath}/share/man
|
||||
mkdir -p %{buildroot}%{userlocalpath}/share/man/man3
|
||||
|
||||
cp %{_compiledir}/build/bin/jemalloc-config %{buildroot}%{userlocalpath}/bin/
|
||||
if [ -f %{_compiledir}/build/bin/jemalloc.sh ]; then
|
||||
cp %{_compiledir}/build/bin/jemalloc.sh %{buildroot}%{userlocalpath}/bin/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/bin/jeprof ]; then
|
||||
cp %{_compiledir}/build/bin/jeprof %{buildroot}%{userlocalpath}/bin/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/include/jemalloc/jemalloc.h ]; then
|
||||
cp %{_compiledir}/build/include/jemalloc/jemalloc.h %{buildroot}%{userlocalpath}/include/jemalloc/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/lib/libjemalloc.so.2 ]; then
|
||||
cp %{_compiledir}/build/lib/libjemalloc.so.2 %{buildroot}%{userlocalpath}/lib/
|
||||
ln -sf libjemalloc.so.2 %{buildroot}%{userlocalpath}/lib/libjemalloc.so
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/lib/libjemalloc.a ]; then
|
||||
cp %{_compiledir}/build/lib/libjemalloc.a %{buildroot}%{userlocalpath}/lib/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/lib/libjemalloc_pic.a ]; then
|
||||
cp %{_compiledir}/build/lib/libjemalloc_pic.a %{buildroot}%{userlocalpath}/lib/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/lib/pkgconfig/jemalloc.pc ]; then
|
||||
cp %{_compiledir}/build/lib/pkgconfig/jemalloc.pc %{buildroot}%{userlocalpath}/lib/pkgconfig/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/share/doc/jemalloc/jemalloc.html ]; then
|
||||
cp %{_compiledir}/build/share/doc/jemalloc/jemalloc.html %{buildroot}%{userlocalpath}/share/doc/jemalloc/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/share/man/man3/jemalloc.3 ]; then
|
||||
cp %{_compiledir}/build/share/man/man3/jemalloc.3 %{buildroot}%{userlocalpath}/share/man/man3/
|
||||
fi
|
||||
fi
|
||||
|
||||
#Scripts executed before installation
|
||||
%pre
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
# Stop the service if running
|
||||
if pidof taosd &> /dev/null; then
|
||||
if pidof systemd &> /dev/null; then
|
||||
${csudo}systemctl stop taosd || :
|
||||
elif $(which service &> /dev/null); then
|
||||
${csudo}service taosd stop || :
|
||||
else
|
||||
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
fi
|
||||
echo "Stop taosd service success!"
|
||||
sleep 1
|
||||
fi
|
||||
# if taos.cfg already exist, remove it
|
||||
if [ -f %{cfg_install_dir}/taos.cfg ]; then
|
||||
${csudo}rm -f %{cfg_install_dir}/cfg/taos.cfg || :
|
||||
fi
|
||||
|
||||
# if taosadapter.toml already exist, remove it
|
||||
if [ -f %{cfg_install_dir}/taosadapter.toml ]; then
|
||||
${csudo}rm -f %{cfg_install_dir}/cfg/taosadapter.toml || :
|
||||
fi
|
||||
|
||||
# there can not libtaos.so*, otherwise ln -s error
|
||||
${csudo}rm -f %{homepath}/driver/libtaos* || :
|
||||
|
||||
#Scripts executed after installation
|
||||
%post
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
cd %{homepath}/script
|
||||
${csudo}./post.sh
|
||||
|
||||
# Scripts executed before uninstall
|
||||
%preun
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
# only remove package to call preun.sh, not but update(2)
|
||||
if [ $1 -eq 0 ];then
|
||||
#cd %{homepath}/script
|
||||
#${csudo}./preun.sh
|
||||
|
||||
if [ -f %{homepath}/script/preun.sh ]; then
|
||||
cd %{homepath}/script
|
||||
${csudo}./preun.sh
|
||||
else
|
||||
bin_link_dir="/usr/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
inc_link_dir="/usr/include"
|
||||
|
||||
data_link_dir="/usr/local/taos/data"
|
||||
log_link_dir="/usr/local/taos/log"
|
||||
cfg_link_dir="/usr/local/taos/cfg"
|
||||
|
||||
# Remove all links
|
||||
${csudo}rm -f ${bin_link_dir}/taos || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosd || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosadapter || :
|
||||
${csudo}rm -f ${cfg_link_dir}/* || :
|
||||
${csudo}rm -f ${inc_link_dir}/taos.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
|
||||
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
||||
|
||||
${csudo}rm -f ${log_link_dir} || :
|
||||
${csudo}rm -f ${data_link_dir} || :
|
||||
|
||||
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Scripts executed after uninstall
|
||||
%postun
|
||||
|
||||
# clean build dir
|
||||
%clean
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
${csudo}rm -rf %{buildroot}
|
||||
|
||||
#Specify the files to be packaged
|
||||
%files
|
||||
/*
|
||||
#%doc
|
||||
|
||||
#Setting default permissions
|
||||
%defattr (-,root,root,0755)
|
||||
#%{prefix}
|
||||
|
||||
#%changelog
|
||||
%define homepath /usr/local/taos
|
||||
%define userlocalpath /usr/local
|
||||
%define cfg_install_dir /etc/taos
|
||||
%define __strip /bin/true
|
||||
|
||||
Name: tdengine
|
||||
Version: %{_version}
|
||||
Release: 3%{?dist}
|
||||
Summary: tdengine from taosdata
|
||||
Group: Application/Database
|
||||
License: AGPL
|
||||
URL: www.taosdata.com
|
||||
AutoReqProv: no
|
||||
|
||||
#BuildRoot: %_topdir/BUILDROOT
|
||||
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root
|
||||
|
||||
#Prefix: /usr/local/taos
|
||||
|
||||
#BuildRequires:
|
||||
#Requires:
|
||||
|
||||
%description
|
||||
Big Data Platform Designed and Optimized for IoT
|
||||
|
||||
#"prep" Nothing needs to be done
|
||||
#%prep
|
||||
#%setup -q
|
||||
#%setup -T
|
||||
|
||||
#"build" Nothing needs to be done
|
||||
#%build
|
||||
#%configure
|
||||
#make %{?_smp_mflags}
|
||||
|
||||
%install
|
||||
#make install DESTDIR=%{buildroot}
|
||||
rm -rf %{buildroot}
|
||||
|
||||
echo topdir: %{_topdir}
|
||||
echo version: %{_version}
|
||||
echo buildroot: %{buildroot}
|
||||
|
||||
libfile="libtaos.so.%{_version}"
|
||||
|
||||
# create install path, and cp file
|
||||
mkdir -p %{buildroot}%{homepath}/bin
|
||||
mkdir -p %{buildroot}%{homepath}/cfg
|
||||
#mkdir -p %{buildroot}%{homepath}/connector
|
||||
mkdir -p %{buildroot}%{homepath}/driver
|
||||
mkdir -p %{buildroot}%{homepath}/examples
|
||||
mkdir -p %{buildroot}%{homepath}/include
|
||||
#mkdir -p %{buildroot}%{homepath}/init.d
|
||||
mkdir -p %{buildroot}%{homepath}/script
|
||||
|
||||
cp %{_compiledir}/../packaging/cfg/taos.cfg %{buildroot}%{homepath}/cfg
|
||||
if [ -f %{_compiledir}/test/cfg/taosadapter.toml ]; then
|
||||
cp %{_compiledir}/test/cfg/taosadapter.toml %{buildroot}%{homepath}/cfg
|
||||
fi
|
||||
if [ -f %{_compiledir}/test/cfg/taosadapter.service ]; then
|
||||
cp %{_compiledir}/test/cfg/taosadapter.service %{buildroot}%{homepath}/cfg
|
||||
fi
|
||||
#cp %{_compiledir}/../packaging/rpm/taosd %{buildroot}%{homepath}/init.d
|
||||
cp %{_compiledir}/../packaging/tools/post.sh %{buildroot}%{homepath}/script
|
||||
cp %{_compiledir}/../packaging/tools/preun.sh %{buildroot}%{homepath}/script
|
||||
cp %{_compiledir}/../packaging/tools/startPre.sh %{buildroot}%{homepath}/bin
|
||||
cp %{_compiledir}/../packaging/tools/set_core.sh %{buildroot}%{homepath}/bin
|
||||
cp %{_compiledir}/../packaging/tools/taosd-dump-cfg.gdb %{buildroot}%{homepath}/bin
|
||||
cp %{_compiledir}/build/bin/taos %{buildroot}%{homepath}/bin
|
||||
cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin
|
||||
#cp %{_compiledir}/build/bin/taosBenchmark %{buildroot}%{homepath}/bin
|
||||
|
||||
if [ -f %{_compiledir}/build/bin/taosadapter ]; then
|
||||
cp %{_compiledir}/build/bin/taosadapter %{buildroot}%{homepath}/bin ||:
|
||||
fi
|
||||
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
|
||||
cp %{_compiledir}/../include/client/taos.h %{buildroot}%{homepath}/include
|
||||
cp %{_compiledir}/../include/common/taosdef.h %{buildroot}%{homepath}/include
|
||||
cp %{_compiledir}/../include/util/taoserror.h %{buildroot}%{homepath}/include
|
||||
#cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector
|
||||
#cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector
|
||||
#cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector
|
||||
#cp %{_compiledir}/build/lib/taos-jdbcdriver*.* %{buildroot}%{homepath}/connector ||:
|
||||
cp -r %{_compiledir}/../examples/* %{buildroot}%{homepath}/examples
|
||||
|
||||
if [ -f %{_compiledir}/build/bin/jemalloc-config ]; then
|
||||
mkdir -p %{buildroot}%{userlocalpath}/bin
|
||||
mkdir -p %{buildroot}%{userlocalpath}/lib
|
||||
mkdir -p %{buildroot}%{userlocalpath}/lib/pkgconfig
|
||||
mkdir -p %{buildroot}%{userlocalpath}/include
|
||||
mkdir -p %{buildroot}%{userlocalpath}/include/jemalloc
|
||||
mkdir -p %{buildroot}%{userlocalpath}/share
|
||||
mkdir -p %{buildroot}%{userlocalpath}/share/doc
|
||||
mkdir -p %{buildroot}%{userlocalpath}/share/doc/jemalloc
|
||||
mkdir -p %{buildroot}%{userlocalpath}/share/man
|
||||
mkdir -p %{buildroot}%{userlocalpath}/share/man/man3
|
||||
|
||||
cp %{_compiledir}/build/bin/jemalloc-config %{buildroot}%{userlocalpath}/bin/
|
||||
if [ -f %{_compiledir}/build/bin/jemalloc.sh ]; then
|
||||
cp %{_compiledir}/build/bin/jemalloc.sh %{buildroot}%{userlocalpath}/bin/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/bin/jeprof ]; then
|
||||
cp %{_compiledir}/build/bin/jeprof %{buildroot}%{userlocalpath}/bin/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/include/jemalloc/jemalloc.h ]; then
|
||||
cp %{_compiledir}/build/include/jemalloc/jemalloc.h %{buildroot}%{userlocalpath}/include/jemalloc/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/lib/libjemalloc.so.2 ]; then
|
||||
cp %{_compiledir}/build/lib/libjemalloc.so.2 %{buildroot}%{userlocalpath}/lib/
|
||||
ln -sf libjemalloc.so.2 %{buildroot}%{userlocalpath}/lib/libjemalloc.so
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/lib/libjemalloc.a ]; then
|
||||
cp %{_compiledir}/build/lib/libjemalloc.a %{buildroot}%{userlocalpath}/lib/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/lib/libjemalloc_pic.a ]; then
|
||||
cp %{_compiledir}/build/lib/libjemalloc_pic.a %{buildroot}%{userlocalpath}/lib/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/lib/pkgconfig/jemalloc.pc ]; then
|
||||
cp %{_compiledir}/build/lib/pkgconfig/jemalloc.pc %{buildroot}%{userlocalpath}/lib/pkgconfig/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/share/doc/jemalloc/jemalloc.html ]; then
|
||||
cp %{_compiledir}/build/share/doc/jemalloc/jemalloc.html %{buildroot}%{userlocalpath}/share/doc/jemalloc/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/share/man/man3/jemalloc.3 ]; then
|
||||
cp %{_compiledir}/build/share/man/man3/jemalloc.3 %{buildroot}%{userlocalpath}/share/man/man3/
|
||||
fi
|
||||
fi
|
||||
|
||||
#Scripts executed before installation
|
||||
%pre
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
# Stop the service if running
|
||||
if pidof taosd &> /dev/null; then
|
||||
if pidof systemd &> /dev/null; then
|
||||
${csudo}systemctl stop taosd || :
|
||||
elif $(which service &> /dev/null); then
|
||||
${csudo}service taosd stop || :
|
||||
else
|
||||
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
fi
|
||||
echo "Stop taosd service success!"
|
||||
sleep 1
|
||||
fi
|
||||
# if taos.cfg already exist, remove it
|
||||
if [ -f %{cfg_install_dir}/taos.cfg ]; then
|
||||
${csudo}rm -f %{cfg_install_dir}/cfg/taos.cfg || :
|
||||
fi
|
||||
|
||||
# if taosadapter.toml already exist, remove it
|
||||
if [ -f %{cfg_install_dir}/taosadapter.toml ]; then
|
||||
${csudo}rm -f %{cfg_install_dir}/cfg/taosadapter.toml || :
|
||||
fi
|
||||
|
||||
# there can not libtaos.so*, otherwise ln -s error
|
||||
${csudo}rm -f %{homepath}/driver/libtaos* || :
|
||||
|
||||
#Scripts executed after installation
|
||||
%post
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
cd %{homepath}/script
|
||||
${csudo}./post.sh
|
||||
|
||||
# Scripts executed before uninstall
|
||||
%preun
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
# only remove package to call preun.sh, not but update(2)
|
||||
if [ $1 -eq 0 ];then
|
||||
#cd %{homepath}/script
|
||||
#${csudo}./preun.sh
|
||||
|
||||
if [ -f %{homepath}/script/preun.sh ]; then
|
||||
cd %{homepath}/script
|
||||
${csudo}./preun.sh
|
||||
else
|
||||
bin_link_dir="/usr/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
inc_link_dir="/usr/include"
|
||||
|
||||
data_link_dir="/usr/local/taos/data"
|
||||
log_link_dir="/usr/local/taos/log"
|
||||
cfg_link_dir="/usr/local/taos/cfg"
|
||||
|
||||
# Remove all links
|
||||
${csudo}rm -f ${bin_link_dir}/taos || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosd || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosadapter || :
|
||||
${csudo}rm -f ${cfg_link_dir}/* || :
|
||||
${csudo}rm -f ${inc_link_dir}/taos.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
|
||||
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
||||
|
||||
${csudo}rm -f ${log_link_dir} || :
|
||||
${csudo}rm -f ${data_link_dir} || :
|
||||
|
||||
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Scripts executed after uninstall
|
||||
%postun
|
||||
|
||||
# clean build dir
|
||||
%clean
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
${csudo}rm -rf %{buildroot}
|
||||
|
||||
#Specify the files to be packaged
|
||||
%files
|
||||
/*
|
||||
#%doc
|
||||
|
||||
#Setting default permissions
|
||||
%defattr (-,root,root,0755)
|
||||
#%{prefix}
|
||||
|
||||
#%changelog
|
||||
|
|
|
@ -1,52 +1,52 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
CSI=$(echo -e "\033[")
|
||||
CRED="${CSI}1;31m"
|
||||
CFAILURE="$CRED"
|
||||
CEND="${CSI}0m"
|
||||
if [ -n "$(grep 'Aliyun Linux release' /etc/issue)" -o -e /etc/redhat-release ]; then
|
||||
OS=CentOS
|
||||
[ -n "$(grep ' 7\.' /etc/redhat-release 2> /dev/null)" ] && CentOS_RHEL_version=7
|
||||
[ -n "$(grep ' 6\.' /etc/redhat-release 2> /dev/null)" -o -n "$(grep 'Aliyun Linux release6 15' /etc/issue)" ] && CentOS_RHEL_version=6
|
||||
[ -n "$(grep ' 5\.' /etc/redhat-release 2> /dev/null)" -o -n "$(grep 'Aliyun Linux release5' /etc/issue)" ] && CentOS_RHEL_version=5
|
||||
elif [ -n "$(grep 'Amazon Linux AMI release' /etc/issue)" -o -e /etc/system-release ]; then
|
||||
OS=CentOS
|
||||
CentOS_RHEL_version=6
|
||||
elif [ -n "$(grep 'bian' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == "Debian" ]; then
|
||||
OS=Debian
|
||||
[ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; }
|
||||
Debian_version=$(lsb_release -sr | awk -F. '{print $1}')
|
||||
elif [ -n "$(grep 'Deepin' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == "Deepin" ]; then
|
||||
OS=Debian
|
||||
[ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; }
|
||||
Debian_version=$(lsb_release -sr | awk -F. '{print $1}')
|
||||
elif [ -n "$(grep 'Kali GNU/Linux Rolling' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == "Kali" ]; then
|
||||
OS=Debian
|
||||
[ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; }
|
||||
if [ -n "$(grep 'VERSION="2016.*"' /etc/os-release)" ]; then
|
||||
Debian_version=8
|
||||
else
|
||||
echo "${CFAILURE}Does not support this OS, Please contact the author! ${CEND}"
|
||||
kill -9 $$
|
||||
fi
|
||||
elif [ -n "$(grep 'Ubuntu' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == "Ubuntu" -o -n "$(grep 'Linux Mint' /etc/issue)" ]; then
|
||||
OS=Ubuntu
|
||||
[ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; }
|
||||
Ubuntu_version=$(lsb_release -sr | awk -F. '{print $1}')
|
||||
[ -n "$(grep 'Linux Mint 18' /etc/issue)" ] && Ubuntu_version=16
|
||||
elif [ -n "$(grep 'elementary' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == 'elementary' ]; then
|
||||
OS=Ubuntu
|
||||
[ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; }
|
||||
Ubuntu_version=16
|
||||
else
|
||||
echo "${CFAILURE}Does not support this OS, Please contact the author! ${CEND}"
|
||||
kill -9 $$
|
||||
fi
|
||||
|
||||
echo "${CFAILURE}${OS}${CEND}"
|
||||
if [ "$OS" == 'CentOS' ]; then
|
||||
echo ${CentOS_RHEL_version}
|
||||
else
|
||||
echo ${Ubuntu_version}
|
||||
fi
|
||||
|
||||
#!/bin/bash
|
||||
#
|
||||
CSI=$(echo -e "\033[")
|
||||
CRED="${CSI}1;31m"
|
||||
CFAILURE="$CRED"
|
||||
CEND="${CSI}0m"
|
||||
if [ -n "$(grep 'Aliyun Linux release' /etc/issue)" -o -e /etc/redhat-release ]; then
|
||||
OS=CentOS
|
||||
[ -n "$(grep ' 7\.' /etc/redhat-release 2> /dev/null)" ] && CentOS_RHEL_version=7
|
||||
[ -n "$(grep ' 6\.' /etc/redhat-release 2> /dev/null)" -o -n "$(grep 'Aliyun Linux release6 15' /etc/issue)" ] && CentOS_RHEL_version=6
|
||||
[ -n "$(grep ' 5\.' /etc/redhat-release 2> /dev/null)" -o -n "$(grep 'Aliyun Linux release5' /etc/issue)" ] && CentOS_RHEL_version=5
|
||||
elif [ -n "$(grep 'Amazon Linux AMI release' /etc/issue)" -o -e /etc/system-release ]; then
|
||||
OS=CentOS
|
||||
CentOS_RHEL_version=6
|
||||
elif [ -n "$(grep 'bian' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == "Debian" ]; then
|
||||
OS=Debian
|
||||
[ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; }
|
||||
Debian_version=$(lsb_release -sr | awk -F. '{print $1}')
|
||||
elif [ -n "$(grep 'Deepin' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == "Deepin" ]; then
|
||||
OS=Debian
|
||||
[ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; }
|
||||
Debian_version=$(lsb_release -sr | awk -F. '{print $1}')
|
||||
elif [ -n "$(grep 'Kali GNU/Linux Rolling' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == "Kali" ]; then
|
||||
OS=Debian
|
||||
[ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; }
|
||||
if [ -n "$(grep 'VERSION="2016.*"' /etc/os-release)" ]; then
|
||||
Debian_version=8
|
||||
else
|
||||
echo "${CFAILURE}Does not support this OS, Please contact the author! ${CEND}"
|
||||
kill -9 $$
|
||||
fi
|
||||
elif [ -n "$(grep 'Ubuntu' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == "Ubuntu" -o -n "$(grep 'Linux Mint' /etc/issue)" ]; then
|
||||
OS=Ubuntu
|
||||
[ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; }
|
||||
Ubuntu_version=$(lsb_release -sr | awk -F. '{print $1}')
|
||||
[ -n "$(grep 'Linux Mint 18' /etc/issue)" ] && Ubuntu_version=16
|
||||
elif [ -n "$(grep 'elementary' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == 'elementary' ]; then
|
||||
OS=Ubuntu
|
||||
[ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; }
|
||||
Ubuntu_version=16
|
||||
else
|
||||
echo "${CFAILURE}Does not support this OS, Please contact the author! ${CEND}"
|
||||
kill -9 $$
|
||||
fi
|
||||
|
||||
echo "${CFAILURE}${OS}${CEND}"
|
||||
if [ "$OS" == 'CentOS' ]; then
|
||||
echo ${CentOS_RHEL_version}
|
||||
else
|
||||
echo ${Ubuntu_version}
|
||||
fi
|
||||
|
||||
|
|
|
@ -1,21 +1,21 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
|
||||
log_dir=$1
|
||||
result_file=$2
|
||||
|
||||
if [ ! -n "$1" ];then
|
||||
echo "Pleas input the director of taosdlog."
|
||||
echo "usage: ./get_client.sh <taosdlog directory> <result file>"
|
||||
exit 1
|
||||
else
|
||||
log_dir=$1
|
||||
fi
|
||||
|
||||
if [ ! -n "$2" ];then
|
||||
result_file=clientInfo.txt
|
||||
else
|
||||
result_file=$2
|
||||
fi
|
||||
|
||||
grep "new TCP connection" ${log_dir}/taosdlog.* | sed -e "s/0x.* from / /"|sed -e "s/,.*$//"|sed -e "s/:[0-9]*$//"|sort -r|uniq -f 2|sort -k 3 -r|uniq -f 2 > ${result_file}
|
||||
#!/bin/bash
|
||||
#
|
||||
|
||||
log_dir=$1
|
||||
result_file=$2
|
||||
|
||||
if [ ! -n "$1" ];then
|
||||
echo "Pleas input the director of taosdlog."
|
||||
echo "usage: ./get_client.sh <taosdlog directory> <result file>"
|
||||
exit 1
|
||||
else
|
||||
log_dir=$1
|
||||
fi
|
||||
|
||||
if [ ! -n "$2" ];then
|
||||
result_file=clientInfo.txt
|
||||
else
|
||||
result_file=$2
|
||||
fi
|
||||
|
||||
grep "new TCP connection" ${log_dir}/taosdlog.* | sed -e "s/0x.* from / /"|sed -e "s/,.*$//"|sed -e "s/:[0-9]*$//"|sort -r|uniq -f 2|sort -k 3 -r|uniq -f 2 > ${result_file}
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# This file is used to install TAOS time-series database on linux systems. The operating system
|
||||
# is required to use systemd to manage services at boot
|
||||
|
||||
set -e
|
||||
# set -x
|
||||
|
||||
# -----------------------Variables definition---------------------
|
||||
OS=$(cat /etc/*-release | grep "^NAME=" | cut -d= -f2)
|
||||
len=$(echo ${#OS})
|
||||
len=$((len-2))
|
||||
retval=$(echo -ne ${OS:1:${len}} | cut -d" " -f1)
|
||||
echo -ne $retval
|
||||
#!/bin/bash
|
||||
#
|
||||
# This file is used to install TAOS time-series database on linux systems. The operating system
|
||||
# is required to use systemd to manage services at boot
|
||||
|
||||
set -e
|
||||
# set -x
|
||||
|
||||
# -----------------------Variables definition---------------------
|
||||
OS=$(cat /etc/*-release | grep "^NAME=" | cut -d= -f2)
|
||||
len=$(echo ${#OS})
|
||||
len=$((len-2))
|
||||
retval=$(echo -ne ${OS:1:${len}} | cut -d" " -f1)
|
||||
echo -ne $retval
|
||||
|
|
|
@ -1,15 +1,15 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# This file is used to install TAOS time-series database on linux systems. The operating system
|
||||
# is required to use systemd to manage services at boot
|
||||
|
||||
set -e
|
||||
# set -x
|
||||
|
||||
# -----------------------Variables definition---------------------
|
||||
verinfo=$(cat $1 | grep " version" | cut -d '"' -f2)
|
||||
verinfo=$(echo $verinfo | tr "\n" " ")
|
||||
len=$(echo ${#verinfo})
|
||||
len=$((len-1))
|
||||
retval=$(echo -ne ${verinfo:0:${len}})
|
||||
echo -ne $retval
|
||||
#!/bin/bash
|
||||
#
|
||||
# This file is used to install TAOS time-series database on linux systems. The operating system
|
||||
# is required to use systemd to manage services at boot
|
||||
|
||||
set -e
|
||||
# set -x
|
||||
|
||||
# -----------------------Variables definition---------------------
|
||||
verinfo=$(cat $1 | grep " version" | cut -d '"' -f2)
|
||||
verinfo=$(echo $verinfo | tr "\n" " ")
|
||||
len=$(echo ${#verinfo})
|
||||
len=$((len-1))
|
||||
retval=$(echo -ne ${verinfo:0:${len}})
|
||||
echo -ne $retval
|
||||
|
|
|
@ -485,6 +485,17 @@ function install_service() {
|
|||
# fi
|
||||
}
|
||||
|
||||
function install_config() {
|
||||
if [ ! -f ${cfg_install_dir}/${configFile} ]; then
|
||||
${csudo}mkdir -p ${cfg_install_dir}
|
||||
[ -f ${script_dir}/cfg/${configFile} ] && ${csudo}cp ${script_dir}/cfg/${configFile} ${cfg_install_dir}
|
||||
${csudo}chmod 644 ${cfg_install_dir}/*
|
||||
fi
|
||||
|
||||
${csudo}cp -f ${script_dir}/cfg/${configFile} ${install_main_dir}/cfg/${configFile}.org
|
||||
${csudo}ln -s ${cfg_install_dir}/${configFile} ${install_main_dir}/cfg
|
||||
}
|
||||
|
||||
function install_TDengine() {
|
||||
# Start to install
|
||||
echo -e "${GREEN}Start to install TDengine...${NC}"
|
||||
|
@ -500,7 +511,7 @@ function install_TDengine() {
|
|||
# For installing new
|
||||
install_bin
|
||||
install_service
|
||||
#install_config
|
||||
install_config
|
||||
|
||||
# Ask if to start the service
|
||||
#echo
|
||||
|
@ -539,7 +550,7 @@ function install_TDengine() {
|
|||
echo
|
||||
else # Only install client
|
||||
install_bin
|
||||
#install_config
|
||||
install_config
|
||||
echo
|
||||
echo -e "\033[44;32;1mTDengine client is installed successfully!${NC}"
|
||||
fi
|
||||
|
|
|
@ -1,339 +1,339 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# This file is used to install database on linux systems. The operating system
|
||||
# is required to use systemd to manage services at boot
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
# -----------------------Variables definition---------------------
|
||||
script_dir=$(dirname $(readlink -f "$0"))
|
||||
|
||||
bin_link_dir="/usr/bin"
|
||||
#inc_link_dir="/usr/include"
|
||||
|
||||
#install main path
|
||||
install_main_dir="/usr/local/tarbitrator"
|
||||
|
||||
# old bin dir
|
||||
bin_dir="/usr/local/tarbitrator/bin"
|
||||
|
||||
service_config_dir="/etc/systemd/system"
|
||||
|
||||
# Color setting
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
GREEN_DARK='\033[0;32m'
|
||||
GREEN_UNDERLINE='\033[4;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
csudo=""
|
||||
if command -v sudo >/dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
update_flag=0
|
||||
|
||||
initd_mod=0
|
||||
service_mod=2
|
||||
if pidof systemd &>/dev/null; then
|
||||
service_mod=0
|
||||
elif $(which service &>/dev/null); then
|
||||
service_mod=1
|
||||
service_config_dir="/etc/init.d"
|
||||
if $(which chkconfig &>/dev/null); then
|
||||
initd_mod=1
|
||||
elif $(which insserv &>/dev/null); then
|
||||
initd_mod=2
|
||||
elif $(which update-rc.d &>/dev/null); then
|
||||
initd_mod=3
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
|
||||
# get the operating system type for using the corresponding init file
|
||||
# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification
|
||||
#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
|
||||
if [[ -e /etc/os-release ]]; then
|
||||
osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) || :
|
||||
else
|
||||
osinfo=""
|
||||
fi
|
||||
#echo "osinfo: ${osinfo}"
|
||||
os_type=0
|
||||
if echo $osinfo | grep -qwi "ubuntu"; then
|
||||
# echo "This is ubuntu system"
|
||||
os_type=1
|
||||
elif echo $osinfo | grep -qwi "debian"; then
|
||||
# echo "This is debian system"
|
||||
os_type=1
|
||||
elif echo $osinfo | grep -qwi "Kylin"; then
|
||||
# echo "This is Kylin system"
|
||||
os_type=1
|
||||
elif echo $osinfo | grep -qwi "centos"; then
|
||||
# echo "This is centos system"
|
||||
os_type=2
|
||||
elif echo $osinfo | grep -qwi "fedora"; then
|
||||
# echo "This is fedora system"
|
||||
os_type=2
|
||||
else
|
||||
echo " osinfo: ${osinfo}"
|
||||
echo " This is an officially unverified linux system,"
|
||||
echo " if there are any problems with the installation and operation, "
|
||||
echo " please feel free to contact taosdata.com for support."
|
||||
os_type=1
|
||||
fi
|
||||
|
||||
function kill_tarbitrator() {
|
||||
pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function install_main_path() {
|
||||
#create install main dir and all sub dir
|
||||
${csudo}rm -rf ${install_main_dir} || :
|
||||
${csudo}mkdir -p ${install_main_dir}
|
||||
${csudo}mkdir -p ${install_main_dir}/bin
|
||||
#${csudo}mkdir -p ${install_main_dir}/include
|
||||
${csudo}mkdir -p ${install_main_dir}/init.d
|
||||
}
|
||||
|
||||
function install_bin() {
|
||||
# Remove links
|
||||
${csudo}rm -f ${bin_link_dir}/rmtarbitrator || :
|
||||
${csudo}rm -f ${bin_link_dir}/tarbitrator || :
|
||||
${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/*
|
||||
|
||||
#Make link
|
||||
[ -x ${install_main_dir}/bin/remove_arbi.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove_arbi.sh ${bin_link_dir}/rmtarbitrator || :
|
||||
[ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo}ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :
|
||||
}
|
||||
|
||||
function install_header() {
|
||||
${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || :
|
||||
${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/*
|
||||
${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
|
||||
${csudo}ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h
|
||||
${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
|
||||
}
|
||||
|
||||
function install_jemalloc() {
|
||||
jemalloc_dir=${script_dir}/jemalloc
|
||||
|
||||
if [ -d ${jemalloc_dir} ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/bin
|
||||
|
||||
if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/bin/jeprof ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/include/jemalloc
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/lib
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib
|
||||
${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
|
||||
${csudo}/usr/bin/install -c -d /usr/local/lib
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
|
||||
fi
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/share/doc/jemalloc
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/share/man/man3
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3
|
||||
fi
|
||||
|
||||
if [ -d /etc/ld.so.conf.d ]; then
|
||||
echo "/usr/local/lib" | ${csudo}tee /etc/ld.so.conf.d/jemalloc.conf >/dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf"
|
||||
${csudo}ldconfig
|
||||
else
|
||||
echo "/etc/ld.so.conf.d not found!"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_service_on_sysvinit() {
|
||||
if pidof tarbitrator &>/dev/null; then
|
||||
${csudo}service tarbitratord stop || :
|
||||
fi
|
||||
|
||||
if ((${initd_mod} == 1)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo}chkconfig --del tarbitratord || :
|
||||
fi
|
||||
elif ((${initd_mod} == 2)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo}insserv -r tarbitratord || :
|
||||
fi
|
||||
elif ((${initd_mod} == 3)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo}update-rc.d -f tarbitratord remove || :
|
||||
fi
|
||||
fi
|
||||
|
||||
${csudo}rm -f ${service_config_dir}/tarbitratord || :
|
||||
|
||||
if $(which init &>/dev/null); then
|
||||
${csudo}init q || :
|
||||
fi
|
||||
}
|
||||
|
||||
function install_service_on_sysvinit() {
|
||||
clean_service_on_sysvinit
|
||||
sleep 1
|
||||
|
||||
if ((${os_type} == 1)); then
|
||||
${csudo}cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord
|
||||
${csudo}cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord
|
||||
elif ((${os_type} == 2)); then
|
||||
${csudo}cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord
|
||||
${csudo}cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord
|
||||
fi
|
||||
|
||||
if ((${initd_mod} == 1)); then
|
||||
${csudo}chkconfig --add tarbitratord || :
|
||||
${csudo}chkconfig --level 2345 tarbitratord on || :
|
||||
elif ((${initd_mod} == 2)); then
|
||||
${csudo}insserv tarbitratord || :
|
||||
${csudo}insserv -d tarbitratord || :
|
||||
elif ((${initd_mod} == 3)); then
|
||||
${csudo}update-rc.d tarbitratord defaults || :
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_service_on_systemd() {
|
||||
tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
|
||||
if systemctl is-active --quiet tarbitratord; then
|
||||
echo "tarbitrator is running, stopping it..."
|
||||
${csudo}systemctl stop tarbitratord &>/dev/null || echo &>/dev/null
|
||||
fi
|
||||
${csudo}systemctl disable tarbitratord &>/dev/null || echo &>/dev/null
|
||||
|
||||
${csudo}rm -f ${tarbitratord_service_config}
|
||||
}
|
||||
|
||||
function install_service_on_systemd() {
|
||||
clean_service_on_systemd
|
||||
|
||||
tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
|
||||
|
||||
${csudo}bash -c "echo '[Unit]' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo '[Install]' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}"
|
||||
${csudo}systemctl enable tarbitratord
|
||||
}
|
||||
|
||||
function install_service() {
|
||||
if ((${service_mod} == 0)); then
|
||||
install_service_on_systemd
|
||||
elif ((${service_mod} == 1)); then
|
||||
install_service_on_sysvinit
|
||||
else
|
||||
kill_tarbitrator
|
||||
fi
|
||||
}
|
||||
|
||||
function update_TDengine() {
|
||||
# Start to update
|
||||
echo -e "${GREEN}Start to update TDengine's arbitrator ...${NC}"
|
||||
# Stop the service if running
|
||||
if pidof tarbitrator &>/dev/null; then
|
||||
if ((${service_mod} == 0)); then
|
||||
${csudo}systemctl stop tarbitratord || :
|
||||
elif ((${service_mod} == 1)); then
|
||||
${csudo}service tarbitratord stop || :
|
||||
else
|
||||
kill_tarbitrator
|
||||
fi
|
||||
sleep 1
|
||||
fi
|
||||
|
||||
install_main_path
|
||||
#install_header
|
||||
install_bin
|
||||
install_service
|
||||
install_jemalloc
|
||||
|
||||
echo
|
||||
if ((${service_mod} == 0)); then
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}systemctl start tarbitratord${NC}"
|
||||
elif ((${service_mod} == 1)); then
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}service tarbitratord start${NC}"
|
||||
else
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ./tarbitrator${NC}"
|
||||
fi
|
||||
echo
|
||||
echo -e "\033[44;32;1mTDengine's arbitrator is updated successfully!${NC}"
|
||||
}
|
||||
|
||||
function install_TDengine() {
|
||||
# Start to install
|
||||
echo -e "${GREEN}Start to install TDengine's arbitrator ...${NC}"
|
||||
|
||||
install_main_path
|
||||
#install_header
|
||||
install_bin
|
||||
install_service
|
||||
install_jemalloc
|
||||
|
||||
echo
|
||||
if ((${service_mod} == 0)); then
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}systemctl start tarbitratord${NC}"
|
||||
elif ((${service_mod} == 1)); then
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}service tarbitratord start${NC}"
|
||||
else
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: tarbitrator${NC}"
|
||||
fi
|
||||
|
||||
echo -e "\033[44;32;1mTDengine's arbitrator is installed successfully!${NC}"
|
||||
echo
|
||||
}
|
||||
|
||||
## ==============================Main program starts from here============================
|
||||
# Install server and client
|
||||
if [ -x ${bin_dir}/tarbitrator ]; then
|
||||
update_flag=1
|
||||
update_TDengine
|
||||
else
|
||||
install_TDengine
|
||||
fi
|
||||
#!/bin/bash
|
||||
#
|
||||
# This file is used to install database on linux systems. The operating system
|
||||
# is required to use systemd to manage services at boot
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
# -----------------------Variables definition---------------------
|
||||
script_dir=$(dirname $(readlink -f "$0"))
|
||||
|
||||
bin_link_dir="/usr/bin"
|
||||
#inc_link_dir="/usr/include"
|
||||
|
||||
#install main path
|
||||
install_main_dir="/usr/local/tarbitrator"
|
||||
|
||||
# old bin dir
|
||||
bin_dir="/usr/local/tarbitrator/bin"
|
||||
|
||||
service_config_dir="/etc/systemd/system"
|
||||
|
||||
# Color setting
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
GREEN_DARK='\033[0;32m'
|
||||
GREEN_UNDERLINE='\033[4;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
csudo=""
|
||||
if command -v sudo >/dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
update_flag=0
|
||||
|
||||
initd_mod=0
|
||||
service_mod=2
|
||||
if pidof systemd &>/dev/null; then
|
||||
service_mod=0
|
||||
elif $(which service &>/dev/null); then
|
||||
service_mod=1
|
||||
service_config_dir="/etc/init.d"
|
||||
if $(which chkconfig &>/dev/null); then
|
||||
initd_mod=1
|
||||
elif $(which insserv &>/dev/null); then
|
||||
initd_mod=2
|
||||
elif $(which update-rc.d &>/dev/null); then
|
||||
initd_mod=3
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
|
||||
# get the operating system type for using the corresponding init file
|
||||
# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification
|
||||
#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
|
||||
if [[ -e /etc/os-release ]]; then
|
||||
osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) || :
|
||||
else
|
||||
osinfo=""
|
||||
fi
|
||||
#echo "osinfo: ${osinfo}"
|
||||
os_type=0
|
||||
if echo $osinfo | grep -qwi "ubuntu"; then
|
||||
# echo "This is ubuntu system"
|
||||
os_type=1
|
||||
elif echo $osinfo | grep -qwi "debian"; then
|
||||
# echo "This is debian system"
|
||||
os_type=1
|
||||
elif echo $osinfo | grep -qwi "Kylin"; then
|
||||
# echo "This is Kylin system"
|
||||
os_type=1
|
||||
elif echo $osinfo | grep -qwi "centos"; then
|
||||
# echo "This is centos system"
|
||||
os_type=2
|
||||
elif echo $osinfo | grep -qwi "fedora"; then
|
||||
# echo "This is fedora system"
|
||||
os_type=2
|
||||
else
|
||||
echo " osinfo: ${osinfo}"
|
||||
echo " This is an officially unverified linux system,"
|
||||
echo " if there are any problems with the installation and operation, "
|
||||
echo " please feel free to contact taosdata.com for support."
|
||||
os_type=1
|
||||
fi
|
||||
|
||||
function kill_tarbitrator() {
|
||||
pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function install_main_path() {
|
||||
#create install main dir and all sub dir
|
||||
${csudo}rm -rf ${install_main_dir} || :
|
||||
${csudo}mkdir -p ${install_main_dir}
|
||||
${csudo}mkdir -p ${install_main_dir}/bin
|
||||
#${csudo}mkdir -p ${install_main_dir}/include
|
||||
${csudo}mkdir -p ${install_main_dir}/init.d
|
||||
}
|
||||
|
||||
function install_bin() {
|
||||
# Remove links
|
||||
${csudo}rm -f ${bin_link_dir}/rmtarbitrator || :
|
||||
${csudo}rm -f ${bin_link_dir}/tarbitrator || :
|
||||
${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/*
|
||||
|
||||
#Make link
|
||||
[ -x ${install_main_dir}/bin/remove_arbi.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove_arbi.sh ${bin_link_dir}/rmtarbitrator || :
|
||||
[ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo}ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :
|
||||
}
|
||||
|
||||
function install_header() {
|
||||
${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || :
|
||||
${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/*
|
||||
${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
|
||||
${csudo}ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h
|
||||
${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
|
||||
}
|
||||
|
||||
function install_jemalloc() {
|
||||
jemalloc_dir=${script_dir}/jemalloc
|
||||
|
||||
if [ -d ${jemalloc_dir} ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/bin
|
||||
|
||||
if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/bin/jeprof ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/include/jemalloc
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/lib
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib
|
||||
${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
|
||||
${csudo}/usr/bin/install -c -d /usr/local/lib
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
|
||||
fi
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/share/doc/jemalloc
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/share/man/man3
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3
|
||||
fi
|
||||
|
||||
if [ -d /etc/ld.so.conf.d ]; then
|
||||
echo "/usr/local/lib" | ${csudo}tee /etc/ld.so.conf.d/jemalloc.conf >/dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf"
|
||||
${csudo}ldconfig
|
||||
else
|
||||
echo "/etc/ld.so.conf.d not found!"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_service_on_sysvinit() {
|
||||
if pidof tarbitrator &>/dev/null; then
|
||||
${csudo}service tarbitratord stop || :
|
||||
fi
|
||||
|
||||
if ((${initd_mod} == 1)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo}chkconfig --del tarbitratord || :
|
||||
fi
|
||||
elif ((${initd_mod} == 2)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo}insserv -r tarbitratord || :
|
||||
fi
|
||||
elif ((${initd_mod} == 3)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo}update-rc.d -f tarbitratord remove || :
|
||||
fi
|
||||
fi
|
||||
|
||||
${csudo}rm -f ${service_config_dir}/tarbitratord || :
|
||||
|
||||
if $(which init &>/dev/null); then
|
||||
${csudo}init q || :
|
||||
fi
|
||||
}
|
||||
|
||||
function install_service_on_sysvinit() {
|
||||
clean_service_on_sysvinit
|
||||
sleep 1
|
||||
|
||||
if ((${os_type} == 1)); then
|
||||
${csudo}cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord
|
||||
${csudo}cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord
|
||||
elif ((${os_type} == 2)); then
|
||||
${csudo}cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord
|
||||
${csudo}cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord
|
||||
fi
|
||||
|
||||
if ((${initd_mod} == 1)); then
|
||||
${csudo}chkconfig --add tarbitratord || :
|
||||
${csudo}chkconfig --level 2345 tarbitratord on || :
|
||||
elif ((${initd_mod} == 2)); then
|
||||
${csudo}insserv tarbitratord || :
|
||||
${csudo}insserv -d tarbitratord || :
|
||||
elif ((${initd_mod} == 3)); then
|
||||
${csudo}update-rc.d tarbitratord defaults || :
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_service_on_systemd() {
|
||||
tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
|
||||
if systemctl is-active --quiet tarbitratord; then
|
||||
echo "tarbitrator is running, stopping it..."
|
||||
${csudo}systemctl stop tarbitratord &>/dev/null || echo &>/dev/null
|
||||
fi
|
||||
${csudo}systemctl disable tarbitratord &>/dev/null || echo &>/dev/null
|
||||
|
||||
${csudo}rm -f ${tarbitratord_service_config}
|
||||
}
|
||||
|
||||
function install_service_on_systemd() {
|
||||
clean_service_on_systemd
|
||||
|
||||
tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
|
||||
|
||||
${csudo}bash -c "echo '[Unit]' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo '[Install]' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}"
|
||||
${csudo}systemctl enable tarbitratord
|
||||
}
|
||||
|
||||
function install_service() {
|
||||
if ((${service_mod} == 0)); then
|
||||
install_service_on_systemd
|
||||
elif ((${service_mod} == 1)); then
|
||||
install_service_on_sysvinit
|
||||
else
|
||||
kill_tarbitrator
|
||||
fi
|
||||
}
|
||||
|
||||
function update_TDengine() {
|
||||
# Start to update
|
||||
echo -e "${GREEN}Start to update TDengine's arbitrator ...${NC}"
|
||||
# Stop the service if running
|
||||
if pidof tarbitrator &>/dev/null; then
|
||||
if ((${service_mod} == 0)); then
|
||||
${csudo}systemctl stop tarbitratord || :
|
||||
elif ((${service_mod} == 1)); then
|
||||
${csudo}service tarbitratord stop || :
|
||||
else
|
||||
kill_tarbitrator
|
||||
fi
|
||||
sleep 1
|
||||
fi
|
||||
|
||||
install_main_path
|
||||
#install_header
|
||||
install_bin
|
||||
install_service
|
||||
install_jemalloc
|
||||
|
||||
echo
|
||||
if ((${service_mod} == 0)); then
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}systemctl start tarbitratord${NC}"
|
||||
elif ((${service_mod} == 1)); then
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}service tarbitratord start${NC}"
|
||||
else
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ./tarbitrator${NC}"
|
||||
fi
|
||||
echo
|
||||
echo -e "\033[44;32;1mTDengine's arbitrator is updated successfully!${NC}"
|
||||
}
|
||||
|
||||
function install_TDengine() {
|
||||
# Start to install
|
||||
echo -e "${GREEN}Start to install TDengine's arbitrator ...${NC}"
|
||||
|
||||
install_main_path
|
||||
#install_header
|
||||
install_bin
|
||||
install_service
|
||||
install_jemalloc
|
||||
|
||||
echo
|
||||
if ((${service_mod} == 0)); then
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}systemctl start tarbitratord${NC}"
|
||||
elif ((${service_mod} == 1)); then
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}service tarbitratord start${NC}"
|
||||
else
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: tarbitrator${NC}"
|
||||
fi
|
||||
|
||||
echo -e "\033[44;32;1mTDengine's arbitrator is installed successfully!${NC}"
|
||||
echo
|
||||
}
|
||||
|
||||
## ==============================Main program starts from here============================
|
||||
# Install server and client
|
||||
if [ -x ${bin_dir}/tarbitrator ]; then
|
||||
update_flag=1
|
||||
update_TDengine
|
||||
else
|
||||
install_TDengine
|
||||
fi
|
||||
|
|
|
@ -1,320 +1,305 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# This file is used to install TDengine client on linux systems. The operating system
|
||||
# is required to use systemd to manage services at boot
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
# -----------------------Variables definition---------------------
|
||||
|
||||
dataDir="/var/lib/taos"
|
||||
logDir="/var/log/taos"
|
||||
productName="TDengine"
|
||||
installDir="/usr/local/taos"
|
||||
configDir="/etc/taos"
|
||||
serverName="taosd"
|
||||
clientName="taos"
|
||||
uninstallScript="rmtaos"
|
||||
configFile="taos.cfg"
|
||||
tarName="taos.tar.gz"
|
||||
|
||||
osType=Linux
|
||||
pagMode=full
|
||||
verMode=edge
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
script_dir=$(dirname $(readlink -f "$0"))
|
||||
# Dynamic directory
|
||||
data_dir=${dataDir}
|
||||
log_dir=${logDir}
|
||||
else
|
||||
script_dir=`dirname $0`
|
||||
cd ${script_dir}
|
||||
script_dir="$(pwd)"
|
||||
data_dir=${dataDir}
|
||||
log_dir=~/${productName}/log
|
||||
fi
|
||||
|
||||
log_link_dir="${installDir}/log"
|
||||
|
||||
cfg_install_dir=${configDir}
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
bin_link_dir="/usr/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
lib64_link_dir="/usr/lib64"
|
||||
inc_link_dir="/usr/include"
|
||||
else
|
||||
bin_link_dir="/usr/local/bin"
|
||||
lib_link_dir="/usr/local/lib"
|
||||
inc_link_dir="/usr/local/include"
|
||||
fi
|
||||
|
||||
#install main path
|
||||
install_main_dir="${installDir}"
|
||||
|
||||
# old bin dir
|
||||
bin_dir="${installDir}/bin"
|
||||
|
||||
# Color setting
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
GREEN_DARK='\033[0;32m'
|
||||
GREEN_UNDERLINE='\033[4;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
update_flag=0
|
||||
|
||||
function kill_client() {
|
||||
pid=$(ps -ef | grep "${clientName}" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function install_main_path() {
|
||||
#create install main dir and all sub dir
|
||||
${csudo}rm -rf ${install_main_dir} || :
|
||||
${csudo}mkdir -p ${install_main_dir}
|
||||
${csudo}mkdir -p ${install_main_dir}/cfg
|
||||
${csudo}mkdir -p ${install_main_dir}/bin
|
||||
${csudo}mkdir -p ${install_main_dir}/driver
|
||||
if [ $productName == "TDengine" ]; then
|
||||
${csudo}mkdir -p ${install_main_dir}/examples
|
||||
fi
|
||||
${csudo}mkdir -p ${install_main_dir}/include
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
${csudo}mkdir -p ${install_main_dir}/connector
|
||||
fi
|
||||
}
|
||||
|
||||
function install_bin() {
|
||||
# Remove links
|
||||
${csudo}rm -f ${bin_link_dir}/${clientName} || :
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo}rm -f ${bin_link_dir}/taosdemo || :
|
||||
fi
|
||||
${csudo}rm -f ${bin_link_dir}/${uninstallScript} || :
|
||||
${csudo}rm -f ${bin_link_dir}/set_core || :
|
||||
|
||||
${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/*
|
||||
|
||||
#Make link
|
||||
[ -x ${install_main_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} || :
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
|
||||
fi
|
||||
[ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/${uninstallScript} || :
|
||||
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
|
||||
}
|
||||
|
||||
function clean_lib() {
|
||||
sudo rm -f /usr/lib/libtaos.* || :
|
||||
sudo rm -rf ${lib_dir} || :
|
||||
}
|
||||
|
||||
function install_lib() {
|
||||
# Remove links
|
||||
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
||||
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
|
||||
#${csudo}rm -rf ${v15_java_app_dir} || :
|
||||
|
||||
${csudo}cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/*
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
|
||||
${csudo}ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
|
||||
|
||||
if [ -d "${lib64_link_dir}" ]; then
|
||||
${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
|
||||
${csudo}ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
|
||||
fi
|
||||
else
|
||||
${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib
|
||||
${csudo}ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib
|
||||
fi
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo}ldconfig
|
||||
else
|
||||
${csudo}update_dyld_shared_cache
|
||||
fi
|
||||
}
|
||||
|
||||
function install_header() {
|
||||
${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || :
|
||||
${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/*
|
||||
${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
|
||||
${csudo}ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h
|
||||
${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
|
||||
}
|
||||
|
||||
function install_jemalloc() {
|
||||
jemalloc_dir=${script_dir}/jemalloc
|
||||
|
||||
if [ -d ${jemalloc_dir} ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/bin
|
||||
|
||||
if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/bin/jeprof ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/include/jemalloc
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/lib
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib
|
||||
${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
|
||||
${csudo}/usr/bin/install -c -d /usr/local/lib
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
|
||||
fi
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/share/doc/jemalloc
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/share/man/man3
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3
|
||||
fi
|
||||
|
||||
if [ -d /etc/ld.so.conf.d ]; then
|
||||
echo "/usr/local/lib" | ${csudo}tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf"
|
||||
${csudo}ldconfig
|
||||
else
|
||||
echo "/etc/ld.so.conf.d not found!"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function install_config() {
|
||||
if [ ! -f ${cfg_install_dir}/${configFile} ]; then
|
||||
${csudo}mkdir -p ${cfg_install_dir}
|
||||
[ -f ${script_dir}/cfg/${configFile} ] && ${csudo}cp ${script_dir}/cfg/${configFile} ${cfg_install_dir}
|
||||
${csudo}chmod 644 ${cfg_install_dir}/*
|
||||
fi
|
||||
|
||||
${csudo}cp -f ${script_dir}/cfg/${configFile} ${install_main_dir}/cfg/${configFile}.org
|
||||
${csudo}ln -s ${cfg_install_dir}/${configFile} ${install_main_dir}/cfg
|
||||
}
|
||||
|
||||
|
||||
function install_log() {
|
||||
${csudo}rm -rf ${log_dir} || :
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir}
|
||||
else
|
||||
mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir}
|
||||
fi
|
||||
${csudo}ln -s ${log_dir} ${install_main_dir}/log
|
||||
}
|
||||
|
||||
function install_connector() {
|
||||
${csudo}cp -rf ${script_dir}/connector/ ${install_main_dir}/
|
||||
}
|
||||
|
||||
function install_examples() {
|
||||
if [ -d ${script_dir}/examples ]; then
|
||||
${csudo}cp -rf ${script_dir}/examples/* ${install_main_dir}/examples
|
||||
fi
|
||||
}
|
||||
|
||||
function update_TDengine() {
|
||||
# Start to update
|
||||
if [ ! -e ${tarName} ]; then
|
||||
echo "File ${tarName} does not exist"
|
||||
exit 1
|
||||
fi
|
||||
tar -zxf ${tarName}
|
||||
|
||||
echo -e "${GREEN}Start to update ${productName} client...${NC}"
|
||||
# Stop the client shell if running
|
||||
if pidof ${clientName} &> /dev/null; then
|
||||
kill_client
|
||||
sleep 1
|
||||
fi
|
||||
|
||||
install_main_path
|
||||
|
||||
install_log
|
||||
install_header
|
||||
install_lib
|
||||
install_jemalloc
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
install_connector
|
||||
fi
|
||||
install_examples
|
||||
install_bin
|
||||
install_config
|
||||
|
||||
echo
|
||||
echo -e "\033[44;32;1m${productName} client is updated successfully!${NC}"
|
||||
|
||||
rm -rf $(tar -tf ${tarName})
|
||||
}
|
||||
|
||||
function install_TDengine() {
|
||||
# Start to install
|
||||
if [ ! -e ${tarName} ]; then
|
||||
echo "File ${tarName} does not exist"
|
||||
exit 1
|
||||
fi
|
||||
tar -zxf ${tarName}
|
||||
|
||||
echo -e "${GREEN}Start to install ${productName} client...${NC}"
|
||||
|
||||
install_main_path
|
||||
install_log
|
||||
install_header
|
||||
install_lib
|
||||
install_jemalloc
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
install_connector
|
||||
fi
|
||||
install_examples
|
||||
install_bin
|
||||
install_config
|
||||
|
||||
echo
|
||||
echo -e "\033[44;32;1m${productName} client is installed successfully!${NC}"
|
||||
|
||||
rm -rf $(tar -tf ${tarName})
|
||||
}
|
||||
|
||||
|
||||
## ==============================Main program starts from here============================
|
||||
# Install or updata client and client
|
||||
# if server is already install, don't install client
|
||||
if [ -e ${bin_dir}/${serverName} ]; then
|
||||
echo -e "\033[44;32;1mThere are already installed ${productName} server, so don't need install client!${NC}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ -x ${bin_dir}/${clientName} ]; then
|
||||
update_flag=1
|
||||
update_TDengine
|
||||
else
|
||||
install_TDengine
|
||||
fi
|
||||
#!/bin/bash
|
||||
#
|
||||
# This file is used to install TDengine client on linux systems. The operating system
|
||||
# is required to use systemd to manage services at boot
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
# -----------------------Variables definition---------------------
|
||||
|
||||
dataDir="/var/lib/taos"
|
||||
logDir="/var/log/taos"
|
||||
productName="TDengine"
|
||||
installDir="/usr/local/taos"
|
||||
configDir="/etc/taos"
|
||||
serverName="taosd"
|
||||
clientName="taos"
|
||||
uninstallScript="rmtaos"
|
||||
configFile="taos.cfg"
|
||||
|
||||
osType=Linux
|
||||
pagMode=full
|
||||
verMode=edge
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
script_dir=$(dirname $(readlink -f "$0"))
|
||||
# Dynamic directory
|
||||
data_dir=${dataDir}
|
||||
log_dir=${logDir}
|
||||
else
|
||||
script_dir=`dirname $0`
|
||||
cd ${script_dir}
|
||||
script_dir="$(pwd)"
|
||||
data_dir=${dataDir}
|
||||
log_dir=~/${productName}/log
|
||||
fi
|
||||
|
||||
log_link_dir="${installDir}/log"
|
||||
|
||||
cfg_install_dir=${configDir}
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
bin_link_dir="/usr/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
lib64_link_dir="/usr/lib64"
|
||||
inc_link_dir="/usr/include"
|
||||
else
|
||||
bin_link_dir="/usr/local/bin"
|
||||
lib_link_dir="/usr/local/lib"
|
||||
inc_link_dir="/usr/local/include"
|
||||
fi
|
||||
|
||||
#install main path
|
||||
install_main_dir="${installDir}"
|
||||
|
||||
# old bin dir
|
||||
bin_dir="${installDir}/bin"
|
||||
|
||||
# Color setting
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
GREEN_DARK='\033[0;32m'
|
||||
GREEN_UNDERLINE='\033[4;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
update_flag=0
|
||||
|
||||
function kill_client() {
|
||||
pid=$(ps -ef | grep "${clientName}" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function install_main_path() {
|
||||
#create install main dir and all sub dir
|
||||
${csudo}rm -rf ${install_main_dir} || :
|
||||
${csudo}mkdir -p ${install_main_dir}
|
||||
${csudo}mkdir -p ${install_main_dir}/cfg
|
||||
${csudo}mkdir -p ${install_main_dir}/bin
|
||||
${csudo}mkdir -p ${install_main_dir}/driver
|
||||
if [ $productName == "TDengine" ]; then
|
||||
${csudo}mkdir -p ${install_main_dir}/examples
|
||||
fi
|
||||
${csudo}mkdir -p ${install_main_dir}/include
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
${csudo}mkdir -p ${install_main_dir}/connector
|
||||
fi
|
||||
}
|
||||
|
||||
function install_bin() {
|
||||
# Remove links
|
||||
${csudo}rm -f ${bin_link_dir}/${clientName} || :
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo}rm -f ${bin_link_dir}/taosdemo || :
|
||||
fi
|
||||
${csudo}rm -f ${bin_link_dir}/${uninstallScript} || :
|
||||
${csudo}rm -f ${bin_link_dir}/set_core || :
|
||||
|
||||
${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/*
|
||||
|
||||
#Make link
|
||||
[ -x ${install_main_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} || :
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
|
||||
fi
|
||||
[ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/${uninstallScript} || :
|
||||
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
|
||||
}
|
||||
|
||||
function clean_lib() {
|
||||
sudo rm -f /usr/lib/libtaos.* || :
|
||||
sudo rm -rf ${lib_dir} || :
|
||||
}
|
||||
|
||||
function install_lib() {
|
||||
# Remove links
|
||||
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
||||
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
|
||||
#${csudo}rm -rf ${v15_java_app_dir} || :
|
||||
|
||||
${csudo}cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/*
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
|
||||
${csudo}ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
|
||||
|
||||
if [ -d "${lib64_link_dir}" ]; then
|
||||
${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
|
||||
${csudo}ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
|
||||
fi
|
||||
else
|
||||
${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib
|
||||
${csudo}ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib
|
||||
fi
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo}ldconfig
|
||||
else
|
||||
${csudo}update_dyld_shared_cache
|
||||
fi
|
||||
}
|
||||
|
||||
function install_header() {
|
||||
${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || :
|
||||
${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/*
|
||||
${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
|
||||
${csudo}ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h
|
||||
${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
|
||||
}
|
||||
|
||||
function install_jemalloc() {
|
||||
jemalloc_dir=${script_dir}/jemalloc
|
||||
|
||||
if [ -d ${jemalloc_dir} ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/bin
|
||||
|
||||
if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/bin/jeprof ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/include/jemalloc
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/lib
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib
|
||||
${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
|
||||
${csudo}/usr/bin/install -c -d /usr/local/lib
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
|
||||
fi
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/share/doc/jemalloc
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/share/man/man3
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3
|
||||
fi
|
||||
|
||||
if [ -d /etc/ld.so.conf.d ]; then
|
||||
echo "/usr/local/lib" | ${csudo}tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf"
|
||||
${csudo}ldconfig
|
||||
else
|
||||
echo "/etc/ld.so.conf.d not found!"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function install_config() {
|
||||
if [ ! -f ${cfg_install_dir}/${configFile} ]; then
|
||||
${csudo}mkdir -p ${cfg_install_dir}
|
||||
[ -f ${script_dir}/cfg/${configFile} ] && ${csudo}cp ${script_dir}/cfg/${configFile} ${cfg_install_dir}
|
||||
${csudo}chmod 644 ${cfg_install_dir}/*
|
||||
fi
|
||||
|
||||
${csudo}cp -f ${script_dir}/cfg/${configFile} ${install_main_dir}/cfg/${configFile}.org
|
||||
${csudo}ln -s ${cfg_install_dir}/${configFile} ${install_main_dir}/cfg
|
||||
}
|
||||
|
||||
|
||||
function install_log() {
|
||||
${csudo}rm -rf ${log_dir} || :
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir}
|
||||
else
|
||||
mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir}
|
||||
fi
|
||||
${csudo}ln -s ${log_dir} ${install_main_dir}/log
|
||||
}
|
||||
|
||||
function install_connector() {
|
||||
${csudo}cp -rf ${script_dir}/connector/ ${install_main_dir}/
|
||||
}
|
||||
|
||||
function install_examples() {
|
||||
if [ -d ${script_dir}/examples ]; then
|
||||
${csudo}cp -rf ${script_dir}/examples/* ${install_main_dir}/examples
|
||||
fi
|
||||
}
|
||||
|
||||
function update_TDengine() {
|
||||
# Start to update
|
||||
echo -e "${GREEN}Start to update ${productName} client...${NC}"
|
||||
# Stop the client shell if running
|
||||
if pidof ${clientName} &> /dev/null; then
|
||||
kill_client
|
||||
sleep 1
|
||||
fi
|
||||
|
||||
install_main_path
|
||||
|
||||
install_log
|
||||
install_header
|
||||
install_lib
|
||||
install_jemalloc
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
install_connector
|
||||
fi
|
||||
install_examples
|
||||
install_bin
|
||||
install_config
|
||||
|
||||
echo
|
||||
echo -e "\033[44;32;1m${productName} client is updated successfully!${NC}"
|
||||
}
|
||||
|
||||
function install_TDengine() {
|
||||
# Start to install
|
||||
echo -e "${GREEN}Start to install ${productName} client...${NC}"
|
||||
|
||||
install_main_path
|
||||
install_log
|
||||
install_header
|
||||
install_lib
|
||||
install_jemalloc
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
install_connector
|
||||
fi
|
||||
install_examples
|
||||
install_bin
|
||||
install_config
|
||||
|
||||
echo
|
||||
echo -e "\033[44;32;1m${productName} client is installed successfully!${NC}"
|
||||
|
||||
rm -rf $(tar -tf ${tarName})
|
||||
}
|
||||
|
||||
|
||||
## ==============================Main program starts from here============================
|
||||
# Install or updata client and client
|
||||
# if server is already install, don't install client
|
||||
if [ -e ${bin_dir}/${serverName} ]; then
|
||||
echo -e "\033[44;32;1mThere are already installed ${productName} server, so don't need install client!${NC}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ -x ${bin_dir}/${clientName} ]; then
|
||||
update_flag=1
|
||||
update_TDengine
|
||||
else
|
||||
install_TDengine
|
||||
fi
|
||||
|
|
|
@ -1,71 +1,71 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Generate arbitrator's tar.gz setup package for all os system
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
curr_dir=$(pwd)
|
||||
compile_dir=$1
|
||||
version=$2
|
||||
build_time=$3
|
||||
cpuType=$4
|
||||
osType=$5
|
||||
verMode=$6
|
||||
verType=$7
|
||||
pagMode=$8
|
||||
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -f ${script_dir}/../..)"
|
||||
|
||||
productName="TDengine"
|
||||
|
||||
# create compressed install file.
|
||||
build_dir="${compile_dir}/build"
|
||||
code_dir="${top_dir}/src"
|
||||
release_dir="${top_dir}/release"
|
||||
|
||||
#package_name='linux'
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
install_dir="${release_dir}/${productName}-enterprise-arbitrator-${version}"
|
||||
else
|
||||
install_dir="${release_dir}/${productName}-arbitrator-${version}"
|
||||
fi
|
||||
|
||||
# Directories and files.
|
||||
bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi.sh"
|
||||
install_files="${script_dir}/install_arbi.sh"
|
||||
|
||||
#header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h"
|
||||
init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord
|
||||
init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord
|
||||
|
||||
# make directories.
|
||||
mkdir -p ${install_dir} && cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install_arbi.sh || :
|
||||
#mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc || :
|
||||
mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
|
||||
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
|
||||
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
|
||||
|
||||
cd ${release_dir}
|
||||
|
||||
# install_dir has been distinguishes cluster from edege, so comments this code
|
||||
pkg_name=${install_dir}-${osType}-${cpuType}
|
||||
|
||||
if [[ "$verType" == "beta" ]] || [[ "$verType" == "preRelease" ]]; then
|
||||
pkg_name=${install_dir}-${verType}-${osType}-${cpuType}
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
pkg_name=${pkg_name}
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
|
||||
exitcode=$?
|
||||
if [ "$exitcode" != "0" ]; then
|
||||
echo "tar ${pkg_name}.tar.gz error !!!"
|
||||
exit $exitcode
|
||||
fi
|
||||
|
||||
cd ${curr_dir}
|
||||
#!/bin/bash
|
||||
#
|
||||
# Generate arbitrator's tar.gz setup package for all os system
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
curr_dir=$(pwd)
|
||||
compile_dir=$1
|
||||
version=$2
|
||||
build_time=$3
|
||||
cpuType=$4
|
||||
osType=$5
|
||||
verMode=$6
|
||||
verType=$7
|
||||
pagMode=$8
|
||||
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -f ${script_dir}/../..)"
|
||||
|
||||
productName="TDengine"
|
||||
|
||||
# create compressed install file.
|
||||
build_dir="${compile_dir}/build"
|
||||
code_dir="${top_dir}"
|
||||
release_dir="${top_dir}/release"
|
||||
|
||||
#package_name='linux'
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
install_dir="${release_dir}/${productName}-enterprise-arbitrator-${version}"
|
||||
else
|
||||
install_dir="${release_dir}/${productName}-arbitrator-${version}"
|
||||
fi
|
||||
|
||||
# Directories and files.
|
||||
bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi.sh"
|
||||
install_files="${script_dir}/install_arbi.sh"
|
||||
|
||||
#header_files="${code_dir}/include/client/taos.h ${code_dir}/include/common/taosdef.h ${code_dir}/include/util/taoserror.h"
|
||||
init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord
|
||||
init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord
|
||||
|
||||
# make directories.
|
||||
mkdir -p ${install_dir} && cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install_arbi.sh || :
|
||||
#mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc || :
|
||||
mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
|
||||
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
|
||||
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
|
||||
|
||||
cd ${release_dir}
|
||||
|
||||
# install_dir has been distinguishes cluster from edege, so comments this code
|
||||
pkg_name=${install_dir}-${osType}-${cpuType}
|
||||
|
||||
if [[ "$verType" == "beta" ]] || [[ "$verType" == "preRelease" ]]; then
|
||||
pkg_name=${install_dir}-${verType}-${osType}-${cpuType}
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
pkg_name=${pkg_name}
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
|
||||
exitcode=$?
|
||||
if [ "$exitcode" != "0" ]; then
|
||||
echo "tar ${pkg_name}.tar.gz error !!!"
|
||||
exit $exitcode
|
||||
fi
|
||||
|
||||
cd ${curr_dir}
|
||||
|
|
|
@ -1,246 +1,246 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Generate tar.gz package for linux client in all os system
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
curr_dir=$(pwd)
|
||||
compile_dir=$1
|
||||
version=$2
|
||||
build_time=$3
|
||||
cpuType=$4
|
||||
osType=$5
|
||||
verMode=$6
|
||||
verType=$7
|
||||
pagMode=$8
|
||||
dbName=$9
|
||||
|
||||
productName="TDengine"
|
||||
clientName="taos"
|
||||
configFile="taos.cfg"
|
||||
tarName="taos.tar.gz"
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -f ${script_dir}/../..)"
|
||||
else
|
||||
script_dir=$(dirname $0)
|
||||
cd ${script_dir}
|
||||
script_dir="$(pwd)"
|
||||
top_dir=${script_dir}/../..
|
||||
fi
|
||||
|
||||
# create compressed install file.
|
||||
build_dir="${compile_dir}/build"
|
||||
code_dir="${top_dir}/src"
|
||||
release_dir="${top_dir}/release"
|
||||
|
||||
#package_name='linux'
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
install_dir="${release_dir}/${productName}-enterprise-client-${version}"
|
||||
else
|
||||
install_dir="${release_dir}/${productName}-client-${version}"
|
||||
fi
|
||||
|
||||
# Directories and files.
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
if [ "$pagMode" == "lite" ]; then
|
||||
strip ${build_dir}/bin/${clientName}
|
||||
bin_files="${build_dir}/bin/${clientName} \
|
||||
${script_dir}/remove_client.sh"
|
||||
else
|
||||
bin_files="${build_dir}/bin/${clientName} \
|
||||
${script_dir}/remove_client.sh \
|
||||
${script_dir}/set_core.sh \
|
||||
${script_dir}/get_client.sh"
|
||||
fi
|
||||
lib_files="${build_dir}/lib/libtaos.so.${version}"
|
||||
else
|
||||
bin_files="${build_dir}/bin/${clientName} ${script_dir}/remove_client.sh"
|
||||
lib_files="${build_dir}/lib/libtaos.${version}.dylib"
|
||||
fi
|
||||
|
||||
header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h"
|
||||
if [ "$dbName" != "taos" ]; then
|
||||
cfg_dir="${top_dir}/../enterprise/packaging/cfg"
|
||||
else
|
||||
cfg_dir="${top_dir}/packaging/cfg"
|
||||
fi
|
||||
|
||||
install_files="${script_dir}/install_client.sh"
|
||||
|
||||
# make directories.
|
||||
mkdir -p ${install_dir}
|
||||
mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
|
||||
mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/${configFile} ${install_dir}/cfg/${configFile}
|
||||
mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/*
|
||||
|
||||
if [ -f ${build_dir}/bin/jemalloc-config ]; then
|
||||
mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
|
||||
cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin
|
||||
if [ -f ${build_dir}/bin/jemalloc.sh ]; then
|
||||
cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin
|
||||
fi
|
||||
if [ -f ${build_dir}/bin/jeprof ]; then
|
||||
cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin
|
||||
fi
|
||||
if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then
|
||||
cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then
|
||||
cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib
|
||||
ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/libjemalloc.a ]; then
|
||||
cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
|
||||
cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then
|
||||
cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig
|
||||
fi
|
||||
if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then
|
||||
cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc
|
||||
fi
|
||||
if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then
|
||||
cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3
|
||||
fi
|
||||
fi
|
||||
|
||||
cd ${install_dir}
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
tar -zcv -f ${tarName} * --remove-files || :
|
||||
else
|
||||
tar -zcv -f ${tarName} * || :
|
||||
mv ${tarName} ..
|
||||
rm -rf ./*
|
||||
mv ../${tarName} .
|
||||
fi
|
||||
|
||||
cd ${curr_dir}
|
||||
cp ${install_files} ${install_dir}
|
||||
if [ "$osType" == "Darwin" ]; then
|
||||
sed 's/osType=Linux/osType=Darwin/g' ${install_dir}/install_client.sh >>install_client_temp.sh
|
||||
mv install_client_temp.sh ${install_dir}/install_client.sh
|
||||
fi
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install_client.sh >>install_client_temp.sh
|
||||
mv install_client_temp.sh ${install_dir}/install_client.sh
|
||||
fi
|
||||
|
||||
if [ "$pagMode" == "lite" ]; then
|
||||
sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install_client.sh >>install_client_temp.sh
|
||||
mv install_client_temp.sh ${install_dir}/install_client.sh
|
||||
fi
|
||||
chmod a+x ${install_dir}/install_client.sh
|
||||
|
||||
if [[ $productName == "TDengine" ]]; then
|
||||
# Copy example code
|
||||
mkdir -p ${install_dir}/examples
|
||||
examples_dir="${top_dir}/examples"
|
||||
cp -r ${examples_dir}/c ${install_dir}/examples
|
||||
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
||||
cp -r ${examples_dir}/JDBC ${install_dir}/examples
|
||||
cp -r ${examples_dir}/matlab ${install_dir}/examples
|
||||
cp -r ${examples_dir}/python ${install_dir}/examples
|
||||
cp -r ${examples_dir}/R ${install_dir}/examples
|
||||
cp -r ${examples_dir}/go ${install_dir}/examples
|
||||
cp -r ${examples_dir}/nodejs ${install_dir}/examples
|
||||
cp -r ${examples_dir}/C# ${install_dir}/examples
|
||||
mkdir -p ${install_dir}/examples/taosbenchmark-json && cp ${examples_dir}/../src/kit/taos-tools/example/* ${install_dir}/examples/taosbenchmark-json
|
||||
fi
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
# Copy connector
|
||||
connector_dir="${code_dir}/connector"
|
||||
mkdir -p ${install_dir}/connector
|
||||
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
cp ${build_dir}/lib/*.jar ${install_dir}/connector || :
|
||||
fi
|
||||
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
|
||||
cp -r ${connector_dir}/go ${install_dir}/connector
|
||||
else
|
||||
echo "WARNING: go connector not found, please check if want to use it!"
|
||||
fi
|
||||
git clone --depth 1 https://github.com/taosdata/taos-connector-python ${install_dir}/connector/python
|
||||
rm -rf ${install_dir}/connector/python/.git ||:
|
||||
# cp -r ${connector_dir}/python ${install_dir}/connector
|
||||
git clone --depth 1 https://github.com/taosdata/taos-connector-node ${install_dir}/connector/nodejs
|
||||
rm -rf ${install_dir}/connector/nodejs/.git ||:
|
||||
|
||||
git clone --depth 1 https://github.com/taosdata/taos-connector-dotnet ${install_dir}/connector/dotnet
|
||||
rm -rf ${install_dir}/connector/dotnet/.git ||:
|
||||
# cp -r ${connector_dir}/nodejs ${install_dir}/connector
|
||||
git clone --depth 1 https://github.com/taosdata/libtaos-rs ${install_dir}/connector/rust
|
||||
rm -rf ${install_dir}/connector/rust/.git ||:
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
# Copy driver
|
||||
mkdir -p ${install_dir}/driver
|
||||
cp ${lib_files} ${install_dir}/driver
|
||||
|
||||
# Copy connector
|
||||
connector_dir="${code_dir}/connector"
|
||||
mkdir -p ${install_dir}/connector
|
||||
|
||||
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
cp ${build_dir}/lib/*.jar ${install_dir}/connector || :
|
||||
fi
|
||||
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
|
||||
cp -r ${connector_dir}/go ${install_dir}/connector
|
||||
else
|
||||
echo "WARNING: go connector not found, please check if want to use it!"
|
||||
fi
|
||||
cp -r ${connector_dir}/python ${install_dir}/connector
|
||||
cp -r ${connector_dir}/nodejs ${install_dir}/connector
|
||||
fi
|
||||
# Copy release note
|
||||
# cp ${script_dir}/release_note ${install_dir}
|
||||
|
||||
# exit 1
|
||||
|
||||
cd ${release_dir}
|
||||
|
||||
# install_dir has been distinguishes cluster from edege, so comments this code
|
||||
pkg_name=${install_dir}-${osType}-${cpuType}
|
||||
|
||||
# if [ "$verMode" == "cluster" ]; then
|
||||
# pkg_name=${install_dir}-${osType}-${cpuType}
|
||||
# elif [ "$verMode" == "edge" ]; then
|
||||
# pkg_name=${install_dir}-${osType}-${cpuType}
|
||||
# else
|
||||
# echo "unknow verMode, nor cluster or edge"
|
||||
# exit 1
|
||||
# fi
|
||||
|
||||
if [[ "$verType" == "beta" ]] || [[ "$verType" == "preRelease" ]]; then
|
||||
pkg_name=${install_dir}-${verType}-${osType}-${cpuType}
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
pkg_name=${pkg_name}
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$pagMode" == "lite" ]; then
|
||||
pkg_name=${pkg_name}-Lite
|
||||
fi
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
|
||||
else
|
||||
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) || :
|
||||
mv "$(basename ${pkg_name}).tar.gz" ..
|
||||
rm -rf ./*
|
||||
mv ../"$(basename ${pkg_name}).tar.gz" .
|
||||
fi
|
||||
|
||||
cd ${curr_dir}
|
||||
#!/bin/bash
|
||||
#
|
||||
# Generate tar.gz package for linux client in all os system
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
curr_dir=$(pwd)
|
||||
compile_dir=$1
|
||||
version=$2
|
||||
build_time=$3
|
||||
cpuType=$4
|
||||
osType=$5
|
||||
verMode=$6
|
||||
verType=$7
|
||||
pagMode=$8
|
||||
dbName=$9
|
||||
|
||||
productName="TDengine"
|
||||
clientName="taos"
|
||||
configFile="taos.cfg"
|
||||
tarName="taos.tar.gz"
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -f ${script_dir}/../..)"
|
||||
else
|
||||
script_dir=$(dirname $0)
|
||||
cd ${script_dir}
|
||||
script_dir="$(pwd)"
|
||||
top_dir=${script_dir}/../..
|
||||
fi
|
||||
|
||||
# create compressed install file.
|
||||
build_dir="${compile_dir}/build"
|
||||
code_dir="${top_dir}"
|
||||
release_dir="${top_dir}/release"
|
||||
|
||||
#package_name='linux'
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
install_dir="${release_dir}/${productName}-enterprise-client-${version}"
|
||||
else
|
||||
install_dir="${release_dir}/${productName}-client-${version}"
|
||||
fi
|
||||
|
||||
# Directories and files.
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
if [ "$pagMode" == "lite" ]; then
|
||||
strip ${build_dir}/bin/${clientName}
|
||||
bin_files="${build_dir}/bin/${clientName} \
|
||||
${script_dir}/remove_client.sh"
|
||||
else
|
||||
bin_files="${build_dir}/bin/${clientName} \
|
||||
${script_dir}/remove_client.sh \
|
||||
${script_dir}/set_core.sh \
|
||||
${script_dir}/get_client.sh"
|
||||
fi
|
||||
lib_files="${build_dir}/lib/libtaos.so.${version}"
|
||||
else
|
||||
bin_files="${build_dir}/bin/${clientName} ${script_dir}/remove_client.sh"
|
||||
lib_files="${build_dir}/lib/libtaos.${version}.dylib"
|
||||
fi
|
||||
|
||||
header_files="${code_dir}/include/client/taos.h ${code_dir}/include/common/taosdef.h ${code_dir}/include/util/taoserror.h"
|
||||
if [ "$dbName" != "taos" ]; then
|
||||
cfg_dir="${top_dir}/../enterprise/packaging/cfg"
|
||||
else
|
||||
cfg_dir="${top_dir}/packaging/cfg"
|
||||
fi
|
||||
|
||||
install_files="${script_dir}/install_client.sh"
|
||||
|
||||
# make directories.
|
||||
mkdir -p ${install_dir}
|
||||
mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
|
||||
mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/${configFile} ${install_dir}/cfg/${configFile}
|
||||
mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/*
|
||||
|
||||
if [ -f ${build_dir}/bin/jemalloc-config ]; then
|
||||
mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
|
||||
cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin
|
||||
if [ -f ${build_dir}/bin/jemalloc.sh ]; then
|
||||
cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin
|
||||
fi
|
||||
if [ -f ${build_dir}/bin/jeprof ]; then
|
||||
cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin
|
||||
fi
|
||||
if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then
|
||||
cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then
|
||||
cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib
|
||||
ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/libjemalloc.a ]; then
|
||||
cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
|
||||
cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then
|
||||
cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig
|
||||
fi
|
||||
if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then
|
||||
cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc
|
||||
fi
|
||||
if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then
|
||||
cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3
|
||||
fi
|
||||
fi
|
||||
|
||||
cd ${install_dir}
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
tar -zcv -f ${tarName} * --remove-files || :
|
||||
else
|
||||
tar -zcv -f ${tarName} * || :
|
||||
mv ${tarName} ..
|
||||
rm -rf ./*
|
||||
mv ../${tarName} .
|
||||
fi
|
||||
|
||||
cd ${curr_dir}
|
||||
cp ${install_files} ${install_dir}
|
||||
if [ "$osType" == "Darwin" ]; then
|
||||
sed 's/osType=Linux/osType=Darwin/g' ${install_dir}/install_client.sh >>install_client_temp.sh
|
||||
mv install_client_temp.sh ${install_dir}/install_client.sh
|
||||
fi
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install_client.sh >>install_client_temp.sh
|
||||
mv install_client_temp.sh ${install_dir}/install_client.sh
|
||||
fi
|
||||
|
||||
if [ "$pagMode" == "lite" ]; then
|
||||
sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install_client.sh >>install_client_temp.sh
|
||||
mv install_client_temp.sh ${install_dir}/install_client.sh
|
||||
fi
|
||||
chmod a+x ${install_dir}/install_client.sh
|
||||
|
||||
if [[ $productName == "TDengine" ]]; then
|
||||
# Copy example code
|
||||
mkdir -p ${install_dir}/examples
|
||||
examples_dir="${top_dir}/examples"
|
||||
cp -r ${examples_dir}/c ${install_dir}/examples
|
||||
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
||||
cp -r ${examples_dir}/JDBC ${install_dir}/examples
|
||||
cp -r ${examples_dir}/matlab ${install_dir}/examples
|
||||
cp -r ${examples_dir}/python ${install_dir}/examples
|
||||
cp -r ${examples_dir}/R ${install_dir}/examples
|
||||
cp -r ${examples_dir}/go ${install_dir}/examples
|
||||
cp -r ${examples_dir}/nodejs ${install_dir}/examples
|
||||
cp -r ${examples_dir}/C# ${install_dir}/examples
|
||||
mkdir -p ${install_dir}/examples/taosbenchmark-json && cp ${examples_dir}/../tools/taos-tools/example/* ${install_dir}/examples/taosbenchmark-json
|
||||
fi
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
# Copy connector
|
||||
connector_dir="${code_dir}/connector"
|
||||
mkdir -p ${install_dir}/connector
|
||||
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
cp ${build_dir}/lib/*.jar ${install_dir}/connector || :
|
||||
fi
|
||||
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
|
||||
cp -r ${connector_dir}/go ${install_dir}/connector
|
||||
else
|
||||
echo "WARNING: go connector not found, please check if want to use it!"
|
||||
fi
|
||||
git clone --depth 1 https://github.com/taosdata/taos-connector-python ${install_dir}/connector/python
|
||||
rm -rf ${install_dir}/connector/python/.git ||:
|
||||
# cp -r ${connector_dir}/python ${install_dir}/connector
|
||||
git clone --depth 1 https://github.com/taosdata/taos-connector-node ${install_dir}/connector/nodejs
|
||||
rm -rf ${install_dir}/connector/nodejs/.git ||:
|
||||
|
||||
git clone --depth 1 https://github.com/taosdata/taos-connector-dotnet ${install_dir}/connector/dotnet
|
||||
rm -rf ${install_dir}/connector/dotnet/.git ||:
|
||||
# cp -r ${connector_dir}/nodejs ${install_dir}/connector
|
||||
git clone --depth 1 https://github.com/taosdata/libtaos-rs ${install_dir}/connector/rust
|
||||
rm -rf ${install_dir}/connector/rust/.git ||:
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
# Copy driver
|
||||
mkdir -p ${install_dir}/driver
|
||||
cp ${lib_files} ${install_dir}/driver
|
||||
|
||||
# Copy connector
|
||||
connector_dir="${code_dir}/connector"
|
||||
mkdir -p ${install_dir}/connector
|
||||
|
||||
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
cp ${build_dir}/lib/*.jar ${install_dir}/connector || :
|
||||
fi
|
||||
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
|
||||
cp -r ${connector_dir}/go ${install_dir}/connector
|
||||
else
|
||||
echo "WARNING: go connector not found, please check if want to use it!"
|
||||
fi
|
||||
cp -r ${connector_dir}/python ${install_dir}/connector || :
|
||||
cp -r ${connector_dir}/nodejs ${install_dir}/connector || :
|
||||
fi
|
||||
# Copy release note
|
||||
# cp ${script_dir}/release_note ${install_dir}
|
||||
|
||||
# exit 1
|
||||
|
||||
cd ${release_dir}
|
||||
|
||||
# install_dir has been distinguishes cluster from edege, so comments this code
|
||||
pkg_name=${install_dir}-${osType}-${cpuType}
|
||||
|
||||
# if [ "$verMode" == "cluster" ]; then
|
||||
# pkg_name=${install_dir}-${osType}-${cpuType}
|
||||
# elif [ "$verMode" == "edge" ]; then
|
||||
# pkg_name=${install_dir}-${osType}-${cpuType}
|
||||
# else
|
||||
# echo "unknow verMode, nor cluster or edge"
|
||||
# exit 1
|
||||
# fi
|
||||
|
||||
if [[ "$verType" == "beta" ]] || [[ "$verType" == "preRelease" ]]; then
|
||||
pkg_name=${install_dir}-${verType}-${osType}-${cpuType}
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
pkg_name=${pkg_name}
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$pagMode" == "lite" ]; then
|
||||
pkg_name=${pkg_name}-Lite
|
||||
fi
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
|
||||
else
|
||||
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) || :
|
||||
mv "$(basename ${pkg_name}).tar.gz" ..
|
||||
rm -rf ./*
|
||||
mv ../"$(basename ${pkg_name}).tar.gz" .
|
||||
fi
|
||||
|
||||
cd ${curr_dir}
|
||||
|
|
|
@ -1,378 +1,378 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Generate tar.gz package for all os system
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
curr_dir=$(pwd)
|
||||
compile_dir=$1
|
||||
version=$2
|
||||
build_time=$3
|
||||
cpuType=$4
|
||||
osType=$5
|
||||
verMode=$6
|
||||
verType=$7
|
||||
pagMode=$8
|
||||
versionComp=$9
|
||||
dbName=${10}
|
||||
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -f ${script_dir}/../..)"
|
||||
|
||||
productName="TDengine"
|
||||
serverName="taosd"
|
||||
clientName="taos"
|
||||
configFile="taos.cfg"
|
||||
tarName="taos.tar.gz"
|
||||
dumpName="taosdump"
|
||||
benchmarkName="taosBenchmark"
|
||||
toolsName="taostools"
|
||||
adapterName="taosadapter"
|
||||
defaultPasswd="taosdata"
|
||||
|
||||
# create compressed install file.
|
||||
build_dir="${compile_dir}/build"
|
||||
code_dir="${top_dir}/src"
|
||||
release_dir="${top_dir}/release"
|
||||
|
||||
#package_name='linux'
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
install_dir="${release_dir}/${productName}-enterprise-server-${version}"
|
||||
else
|
||||
install_dir="${release_dir}/${productName}-server-${version}"
|
||||
fi
|
||||
|
||||
if [ -d ${top_dir}/src/kit/taos-tools/packaging/deb ]; then
|
||||
cd ${top_dir}/src/kit/taos-tools/packaging/deb
|
||||
[ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0"
|
||||
|
||||
taostools_ver=$(git describe --tags | sed -e 's/ver-//g' | awk -F '-' '{print $1}')
|
||||
taostools_install_dir="${release_dir}/${clientName}Tools-${taostools_ver}"
|
||||
|
||||
cd ${curr_dir}
|
||||
else
|
||||
taostools_install_dir="${release_dir}/${clientName}Tools-${version}"
|
||||
fi
|
||||
|
||||
# Directories and files
|
||||
if [ "$pagMode" == "lite" ]; then
|
||||
strip ${build_dir}/bin/${serverName}
|
||||
strip ${build_dir}/bin/${clientName}
|
||||
# lite version doesn't include taosadapter, which will lead to no restful interface
|
||||
bin_files="${build_dir}/bin/${serverName} ${build_dir}/bin/${clientName} ${script_dir}/remove.sh ${script_dir}/startPre.sh ${build_dir}/bin/taosBenchmark"
|
||||
taostools_bin_files=""
|
||||
else
|
||||
|
||||
wget https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh -O ${build_dir}/bin/TDinsight.sh \
|
||||
&& echo "TDinsight.sh downloaded!" \
|
||||
|| echo "failed to download TDinsight.sh"
|
||||
# download TDinsight caches
|
||||
orig_pwd=$(pwd)
|
||||
tdinsight_caches=""
|
||||
cd ${build_dir}/bin/ && \
|
||||
chmod +x TDinsight.sh
|
||||
tdinsight_caches=$(./TDinsight.sh --download-only | xargs -i printf "${build_dir}/bin/{} ")
|
||||
cd $orig_pwd
|
||||
echo "TDinsight caches: $tdinsight_caches"
|
||||
|
||||
taostools_bin_files=" ${build_dir}/bin/taosdump \
|
||||
${build_dir}/bin/taosBenchmark \
|
||||
${build_dir}/bin/TDinsight.sh \
|
||||
$tdinsight_caches"
|
||||
|
||||
bin_files="${build_dir}/bin/${serverName} \
|
||||
${build_dir}/bin/${clientName} \
|
||||
${taostools_bin_files} \
|
||||
${build_dir}/bin/taosadapter \
|
||||
${build_dir}/bin/tarbitrator\
|
||||
${script_dir}/remove.sh \
|
||||
${script_dir}/set_core.sh \
|
||||
${script_dir}/run_taosd_and_taosadapter.sh \
|
||||
${script_dir}/startPre.sh \
|
||||
${script_dir}/taosd-dump-cfg.gdb"
|
||||
fi
|
||||
|
||||
lib_files="${build_dir}/lib/libtaos.so.${version}"
|
||||
header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h"
|
||||
|
||||
if [ "$dbName" != "taos" ]; then
|
||||
cfg_dir="${top_dir}/../enterprise/packaging/cfg"
|
||||
else
|
||||
cfg_dir="${top_dir}/packaging/cfg"
|
||||
fi
|
||||
|
||||
install_files="${script_dir}/install.sh"
|
||||
nginx_dir="${code_dir}/../../enterprise/src/plugins/web"
|
||||
|
||||
init_file_deb=${script_dir}/../deb/taosd
|
||||
init_file_rpm=${script_dir}/../rpm/taosd
|
||||
init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord
|
||||
init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord
|
||||
|
||||
# make directories.
|
||||
mkdir -p ${install_dir}
|
||||
mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
|
||||
mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/${configFile} ${install_dir}/cfg/${configFile}
|
||||
|
||||
if [ -f "${compile_dir}/test/cfg/taosadapter.toml" ]; then
|
||||
cp ${compile_dir}/test/cfg/taosadapter.toml ${install_dir}/cfg || :
|
||||
fi
|
||||
|
||||
if [ -f "${compile_dir}/test/cfg/taosadapter.service" ]; then
|
||||
cp ${compile_dir}/test/cfg/taosadapter.service ${install_dir}/cfg || :
|
||||
fi
|
||||
|
||||
if [ -f "${cfg_dir}/${serverName}.service" ]; then
|
||||
cp ${cfg_dir}/${serverName}.service ${install_dir}/cfg || :
|
||||
fi
|
||||
|
||||
if [ -f "${top_dir}/packaging/cfg/tarbitratord.service" ]; then
|
||||
cp ${top_dir}/packaging/cfg/tarbitratord.service ${install_dir}/cfg || :
|
||||
fi
|
||||
|
||||
if [ -f "${top_dir}/packaging/cfg/nginxd.service" ]; then
|
||||
cp ${top_dir}/packaging/cfg/nginxd.service ${install_dir}/cfg || :
|
||||
fi
|
||||
|
||||
mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
|
||||
mkdir -p ${install_dir}/init.d && cp ${init_file_deb} ${install_dir}/init.d/${serverName}.deb
|
||||
mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/${serverName}.rpm
|
||||
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
|
||||
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
|
||||
|
||||
if [ $adapterName != "taosadapter" ]; then
|
||||
mv ${install_dir}/cfg/taosadapter.toml ${install_dir}/cfg/$adapterName.toml
|
||||
sed -i "s/path = \"\/var\/log\/taos\"/path = \"\/var\/log\/${productName}\"/g" ${install_dir}/cfg/$adapterName.toml
|
||||
sed -i "s/password = \"taosdata\"/password = \"${defaultPasswd}\"/g" ${install_dir}/cfg/$adapterName.toml
|
||||
|
||||
mv ${install_dir}/cfg/taosadapter.service ${install_dir}/cfg/$adapterName.service
|
||||
sed -i "s/TDengine/${productName}/g" ${install_dir}/cfg/$adapterName.service
|
||||
sed -i "s/taosAdapter/${adapterName}/g" ${install_dir}/cfg/$adapterName.service
|
||||
sed -i "s/taosadapter/${adapterName}/g" ${install_dir}/cfg/$adapterName.service
|
||||
|
||||
mv ${install_dir}/bin/taosadapter ${install_dir}/bin/${adapterName}
|
||||
mv ${install_dir}/bin/run_taosd_and_taosadapter.sh ${install_dir}/bin/run_${serverName}_and_${adapterName}.sh
|
||||
mv ${install_dir}/bin/taosd-dump-cfg.gdb ${install_dir}/bin/${serverName}-dump-cfg.gdb
|
||||
fi
|
||||
|
||||
if [ -n "${taostools_bin_files}" ]; then
|
||||
mkdir -p ${taostools_install_dir} || echo -e "failed to create ${taostools_install_dir}"
|
||||
mkdir -p ${taostools_install_dir}/bin \
|
||||
&& cp ${taostools_bin_files} ${taostools_install_dir}/bin \
|
||||
&& chmod a+x ${taostools_install_dir}/bin/* || :
|
||||
|
||||
if [ -f ${top_dir}/src/kit/taos-tools/packaging/tools/install-taostools.sh ]; then
|
||||
cp ${top_dir}/src/kit/taos-tools/packaging/tools/install-taostools.sh \
|
||||
${taostools_install_dir}/ > /dev/null \
|
||||
&& chmod a+x ${taostools_install_dir}/install-taostools.sh \
|
||||
|| echo -e "failed to copy install-taostools.sh"
|
||||
else
|
||||
echo -e "install-taostools.sh not found"
|
||||
fi
|
||||
|
||||
if [ -f ${top_dir}/src/kit/taos-tools/packaging/tools/uninstall-taostools.sh ]; then
|
||||
cp ${top_dir}/src/kit/taos-tools/packaging/tools/uninstall-taostools.sh \
|
||||
${taostools_install_dir}/ > /dev/null \
|
||||
&& chmod a+x ${taostools_install_dir}/uninstall-taostools.sh \
|
||||
|| echo -e "failed to copy uninstall-taostools.sh"
|
||||
else
|
||||
echo -e "uninstall-taostools.sh not found"
|
||||
fi
|
||||
|
||||
if [ -f ${build_dir}/lib/libavro.so.23.0.0 ]; then
|
||||
mkdir -p ${taostools_install_dir}/avro/{lib,lib/pkgconfig} || echo -e "failed to create ${taostools_install_dir}/avro"
|
||||
cp ${build_dir}/lib/libavro.* ${taostools_install_dir}/avro/lib
|
||||
cp ${build_dir}/lib/pkgconfig/avro-c.pc ${taostools_install_dir}/avro/lib/pkgconfig
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -f ${build_dir}/bin/jemalloc-config ]; then
|
||||
mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
|
||||
cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin
|
||||
if [ -f ${build_dir}/bin/jemalloc.sh ]; then
|
||||
cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin
|
||||
fi
|
||||
if [ -f ${build_dir}/bin/jeprof ]; then
|
||||
cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin
|
||||
fi
|
||||
if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then
|
||||
cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then
|
||||
cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib
|
||||
ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/libjemalloc.a ]; then
|
||||
cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
|
||||
cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then
|
||||
cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig
|
||||
fi
|
||||
if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then
|
||||
cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc
|
||||
fi
|
||||
if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then
|
||||
cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove.sh >>remove_temp.sh
|
||||
mv remove_temp.sh ${install_dir}/bin/remove.sh
|
||||
|
||||
mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd
|
||||
cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png
|
||||
rm -rf ${install_dir}/nginxd/png
|
||||
|
||||
if [ "$cpuType" == "aarch64" ]; then
|
||||
cp -f ${install_dir}/nginxd/sbin/arm/64bit/nginx ${install_dir}/nginxd/sbin/
|
||||
elif [ "$cpuType" == "aarch32" ]; then
|
||||
cp -f ${install_dir}/nginxd/sbin/arm/32bit/nginx ${install_dir}/nginxd/sbin/
|
||||
fi
|
||||
rm -rf ${install_dir}/nginxd/sbin/arm
|
||||
fi
|
||||
|
||||
cd ${install_dir}
|
||||
tar -zcv -f ${tarName} * --remove-files || :
|
||||
exitcode=$?
|
||||
if [ "$exitcode" != "0" ]; then
|
||||
echo "tar ${tarName} error !!!"
|
||||
exit $exitcode
|
||||
fi
|
||||
|
||||
cd ${curr_dir}
|
||||
cp ${install_files} ${install_dir}
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install.sh >>install_temp.sh
|
||||
mv install_temp.sh ${install_dir}/install.sh
|
||||
fi
|
||||
if [ "$pagMode" == "lite" ]; then
|
||||
sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install.sh >>install_temp.sh
|
||||
mv install_temp.sh ${install_dir}/install.sh
|
||||
fi
|
||||
chmod a+x ${install_dir}/install.sh
|
||||
|
||||
if [[ $dbName == "taos" ]]; then
|
||||
# Copy example code
|
||||
mkdir -p ${install_dir}/examples
|
||||
examples_dir="${top_dir}/examples"
|
||||
cp -r ${examples_dir}/c ${install_dir}/examples
|
||||
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
||||
if [ -d ${examples_dir}/JDBC/connectionPools/target ]; then
|
||||
rm -rf ${examples_dir}/JDBC/connectionPools/target
|
||||
fi
|
||||
if [ -d ${examples_dir}/JDBC/JDBCDemo/target ]; then
|
||||
rm -rf ${examples_dir}/JDBC/JDBCDemo/target
|
||||
fi
|
||||
if [ -d ${examples_dir}/JDBC/mybatisplus-demo/target ]; then
|
||||
rm -rf ${examples_dir}/JDBC/mybatisplus-demo/target
|
||||
fi
|
||||
if [ -d ${examples_dir}/JDBC/springbootdemo/target ]; then
|
||||
rm -rf ${examples_dir}/JDBC/springbootdemo/target
|
||||
fi
|
||||
if [ -d ${examples_dir}/JDBC/SpringJdbcTemplate/target ]; then
|
||||
rm -rf ${examples_dir}/JDBC/SpringJdbcTemplate/target
|
||||
fi
|
||||
if [ -d ${examples_dir}/JDBC/taosdemo/target ]; then
|
||||
rm -rf ${examples_dir}/JDBC/taosdemo/target
|
||||
fi
|
||||
|
||||
cp -r ${examples_dir}/JDBC ${install_dir}/examples
|
||||
cp -r ${examples_dir}/matlab ${install_dir}/examples
|
||||
cp -r ${examples_dir}/python ${install_dir}/examples
|
||||
cp -r ${examples_dir}/R ${install_dir}/examples
|
||||
cp -r ${examples_dir}/go ${install_dir}/examples
|
||||
cp -r ${examples_dir}/nodejs ${install_dir}/examples
|
||||
cp -r ${examples_dir}/C# ${install_dir}/examples
|
||||
mkdir -p ${install_dir}/examples/taosbenchmark-json && cp ${examples_dir}/../src/kit/taos-tools/example/* ${install_dir}/examples/taosbenchmark-json
|
||||
fi
|
||||
fi
|
||||
|
||||
# Copy driver
|
||||
mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" >${install_dir}/driver/vercomp.txt
|
||||
|
||||
# Copy connector
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
connector_dir="${code_dir}/connector"
|
||||
mkdir -p ${install_dir}/connector
|
||||
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
||||
cp ${build_dir}/lib/*.jar ${install_dir}/connector || :
|
||||
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
|
||||
cp -r ${connector_dir}/go ${install_dir}/connector
|
||||
else
|
||||
echo "WARNING: go connector not found, please check if want to use it!"
|
||||
fi
|
||||
git clone --depth 1 https://github.com/taosdata/taos-connector-python ${install_dir}/connector/python
|
||||
rm -rf ${install_dir}/connector/python/.git ||:
|
||||
|
||||
git clone --depth 1 https://github.com/taosdata/taos-connector-node ${install_dir}/connector/nodejs
|
||||
rm -rf ${install_dir}/connector/nodejs/.git ||:
|
||||
|
||||
git clone --depth 1 https://github.com/taosdata/taos-connector-dotnet ${install_dir}/connector/dotnet
|
||||
rm -rf ${install_dir}/connector/dotnet/.git ||:
|
||||
|
||||
git clone --depth 1 https://github.com/taosdata/libtaos-rs ${install_dir}/connector/rust
|
||||
rm -rf ${install_dir}/connector/rust/.git ||:
|
||||
# cp -r ${connector_dir}/python ${install_dir}/connector
|
||||
# cp -r ${connector_dir}/nodejs ${install_dir}/connector
|
||||
fi
|
||||
fi
|
||||
|
||||
# Copy release note
|
||||
cp ${script_dir}/release_note ${install_dir}
|
||||
|
||||
# exit 1
|
||||
|
||||
cd ${release_dir}
|
||||
|
||||
# install_dir has been distinguishes cluster from edege, so comments this code
|
||||
pkg_name=${install_dir}-${osType}-${cpuType}
|
||||
|
||||
taostools_pkg_name=${taostools_install_dir}-${osType}-${cpuType}
|
||||
|
||||
# if [ "$verMode" == "cluster" ]; then
|
||||
# pkg_name=${install_dir}-${osType}-${cpuType}
|
||||
# elif [ "$verMode" == "edge" ]; then
|
||||
# pkg_name=${install_dir}-${osType}-${cpuType}
|
||||
# else
|
||||
# echo "unknow verMode, nor cluster or edge"
|
||||
# exit 1
|
||||
# fi
|
||||
|
||||
if [[ "$verType" == "beta" ]] || [[ "$verType" == "preRelease" ]]; then
|
||||
pkg_name=${install_dir}-${verType}-${osType}-${cpuType}
|
||||
taostools_pkg_name=${taostools_install_dir}-${verType}-${osType}-${cpuType}
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
pkg_name=${pkg_name}
|
||||
taostools_pkg_name=${taostools_pkg_name}
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$pagMode" == "lite" ]; then
|
||||
pkg_name=${pkg_name}-Lite
|
||||
fi
|
||||
|
||||
tar -zcv -f "$(basename ${pkg_name}).tar.gz" "$(basename ${install_dir})" --remove-files || :
|
||||
exitcode=$?
|
||||
if [ "$exitcode" != "0" ]; then
|
||||
echo "tar ${pkg_name}.tar.gz error !!!"
|
||||
exit $exitcode
|
||||
fi
|
||||
|
||||
if [ -n "${taostools_bin_files}" ]; then
|
||||
wget https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh -O ${taostools_install_dir}/bin/TDinsight.sh && echo "TDinsight.sh downloaded!"|| echo "failed to download TDinsight.sh"
|
||||
tar -zcv -f "$(basename ${taostools_pkg_name}).tar.gz" "$(basename ${taostools_install_dir})" --remove-files || :
|
||||
exitcode=$?
|
||||
if [ "$exitcode" != "0" ]; then
|
||||
echo "tar ${taostools_pkg_name}.tar.gz error !!!"
|
||||
exit $exitcode
|
||||
fi
|
||||
fi
|
||||
|
||||
cd ${curr_dir}
|
||||
#!/bin/bash
|
||||
#
|
||||
# Generate tar.gz package for all os system
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
curr_dir=$(pwd)
|
||||
compile_dir=$1
|
||||
version=$2
|
||||
build_time=$3
|
||||
cpuType=$4
|
||||
osType=$5
|
||||
verMode=$6
|
||||
verType=$7
|
||||
pagMode=$8
|
||||
versionComp=$9
|
||||
dbName=${10}
|
||||
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -f ${script_dir}/../..)"
|
||||
|
||||
productName="TDengine"
|
||||
serverName="taosd"
|
||||
clientName="taos"
|
||||
configFile="taos.cfg"
|
||||
tarName="taos.tar.gz"
|
||||
dumpName="taosdump"
|
||||
benchmarkName="taosBenchmark"
|
||||
toolsName="taostools"
|
||||
adapterName="taosadapter"
|
||||
defaultPasswd="taosdata"
|
||||
|
||||
# create compressed install file.
|
||||
build_dir="${compile_dir}/build"
|
||||
code_dir="${top_dir}"
|
||||
release_dir="${top_dir}/release"
|
||||
|
||||
#package_name='linux'
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
install_dir="${release_dir}/${productName}-enterprise-server-${version}"
|
||||
else
|
||||
install_dir="${release_dir}/${productName}-server-${version}"
|
||||
fi
|
||||
|
||||
if [ -d ${top_dir}/tools/taos-tools/packaging/deb ]; then
|
||||
cd ${top_dir}/tools/taos-tools/packaging/deb
|
||||
[ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0"
|
||||
|
||||
taostools_ver=$(git describe --tags | sed -e 's/ver-//g' | awk -F '-' '{print $1}')
|
||||
taostools_install_dir="${release_dir}/${clientName}Tools-${taostools_ver}"
|
||||
|
||||
cd ${curr_dir}
|
||||
else
|
||||
taostools_install_dir="${release_dir}/${clientName}Tools-${version}"
|
||||
fi
|
||||
|
||||
# Directories and files
|
||||
if [ "$pagMode" == "lite" ]; then
|
||||
strip ${build_dir}/bin/${serverName}
|
||||
strip ${build_dir}/bin/${clientName}
|
||||
# lite version doesn't include taosadapter, which will lead to no restful interface
|
||||
bin_files="${build_dir}/bin/${serverName} ${build_dir}/bin/${clientName} ${script_dir}/remove.sh ${script_dir}/startPre.sh ${build_dir}/bin/taosBenchmark"
|
||||
taostools_bin_files=""
|
||||
else
|
||||
|
||||
wget https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh -O ${build_dir}/bin/TDinsight.sh \
|
||||
&& echo "TDinsight.sh downloaded!" \
|
||||
|| echo "failed to download TDinsight.sh"
|
||||
# download TDinsight caches
|
||||
orig_pwd=$(pwd)
|
||||
tdinsight_caches=""
|
||||
cd ${build_dir}/bin/ && \
|
||||
chmod +x TDinsight.sh
|
||||
tdinsight_caches=$(./TDinsight.sh --download-only | xargs -i printf "${build_dir}/bin/{} ")
|
||||
cd $orig_pwd
|
||||
echo "TDinsight caches: $tdinsight_caches"
|
||||
|
||||
taostools_bin_files=" ${build_dir}/bin/taosdump \
|
||||
${build_dir}/bin/taosBenchmark \
|
||||
${build_dir}/bin/TDinsight.sh \
|
||||
$tdinsight_caches"
|
||||
|
||||
bin_files="${build_dir}/bin/${serverName} \
|
||||
${build_dir}/bin/${clientName} \
|
||||
${taostools_bin_files} \
|
||||
${build_dir}/bin/taosadapter \
|
||||
${build_dir}/bin/tarbitrator\
|
||||
${script_dir}/remove.sh \
|
||||
${script_dir}/set_core.sh \
|
||||
${script_dir}/run_taosd_and_taosadapter.sh \
|
||||
${script_dir}/startPre.sh \
|
||||
${script_dir}/taosd-dump-cfg.gdb"
|
||||
fi
|
||||
|
||||
lib_files="${build_dir}/lib/libtaos.so.${version}"
|
||||
header_files="${code_dir}/include/client/taos.h ${code_dir}/include/common/taosdef.h ${code_dir}/include/util/taoserror.h"
|
||||
|
||||
if [ "$dbName" != "taos" ]; then
|
||||
cfg_dir="${top_dir}/../enterprise/packaging/cfg"
|
||||
else
|
||||
cfg_dir="${top_dir}/packaging/cfg"
|
||||
fi
|
||||
|
||||
install_files="${script_dir}/install.sh"
|
||||
nginx_dir="${top_dir}/../enterprise/src/plugins/web"
|
||||
|
||||
init_file_deb=${script_dir}/../deb/taosd
|
||||
init_file_rpm=${script_dir}/../rpm/taosd
|
||||
init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord
|
||||
init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord
|
||||
|
||||
# make directories.
|
||||
mkdir -p ${install_dir}
|
||||
mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
|
||||
mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/${configFile} ${install_dir}/cfg/${configFile}
|
||||
|
||||
if [ -f "${compile_dir}/test/cfg/taosadapter.toml" ]; then
|
||||
cp ${compile_dir}/test/cfg/taosadapter.toml ${install_dir}/cfg || :
|
||||
fi
|
||||
|
||||
if [ -f "${compile_dir}/test/cfg/taosadapter.service" ]; then
|
||||
cp ${compile_dir}/test/cfg/taosadapter.service ${install_dir}/cfg || :
|
||||
fi
|
||||
|
||||
if [ -f "${cfg_dir}/${serverName}.service" ]; then
|
||||
cp ${cfg_dir}/${serverName}.service ${install_dir}/cfg || :
|
||||
fi
|
||||
|
||||
if [ -f "${top_dir}/packaging/cfg/tarbitratord.service" ]; then
|
||||
cp ${top_dir}/packaging/cfg/tarbitratord.service ${install_dir}/cfg || :
|
||||
fi
|
||||
|
||||
if [ -f "${top_dir}/packaging/cfg/nginxd.service" ]; then
|
||||
cp ${top_dir}/packaging/cfg/nginxd.service ${install_dir}/cfg || :
|
||||
fi
|
||||
|
||||
mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
|
||||
mkdir -p ${install_dir}/init.d && cp ${init_file_deb} ${install_dir}/init.d/${serverName}.deb
|
||||
mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/${serverName}.rpm
|
||||
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
|
||||
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
|
||||
|
||||
if [ $adapterName != "taosadapter" ]; then
|
||||
mv ${install_dir}/cfg/taosadapter.toml ${install_dir}/cfg/$adapterName.toml
|
||||
sed -i "s/path = \"\/var\/log\/taos\"/path = \"\/var\/log\/${productName}\"/g" ${install_dir}/cfg/$adapterName.toml
|
||||
sed -i "s/password = \"taosdata\"/password = \"${defaultPasswd}\"/g" ${install_dir}/cfg/$adapterName.toml
|
||||
|
||||
mv ${install_dir}/cfg/taosadapter.service ${install_dir}/cfg/$adapterName.service
|
||||
sed -i "s/TDengine/${productName}/g" ${install_dir}/cfg/$adapterName.service
|
||||
sed -i "s/taosAdapter/${adapterName}/g" ${install_dir}/cfg/$adapterName.service
|
||||
sed -i "s/taosadapter/${adapterName}/g" ${install_dir}/cfg/$adapterName.service
|
||||
|
||||
mv ${install_dir}/bin/taosadapter ${install_dir}/bin/${adapterName}
|
||||
mv ${install_dir}/bin/run_taosd_and_taosadapter.sh ${install_dir}/bin/run_${serverName}_and_${adapterName}.sh
|
||||
mv ${install_dir}/bin/taosd-dump-cfg.gdb ${install_dir}/bin/${serverName}-dump-cfg.gdb
|
||||
fi
|
||||
|
||||
if [ -n "${taostools_bin_files}" ]; then
|
||||
mkdir -p ${taostools_install_dir} || echo -e "failed to create ${taostools_install_dir}"
|
||||
mkdir -p ${taostools_install_dir}/bin \
|
||||
&& cp ${taostools_bin_files} ${taostools_install_dir}/bin \
|
||||
&& chmod a+x ${taostools_install_dir}/bin/* || :
|
||||
|
||||
if [ -f ${top_dir}/tools/taos-tools/packaging/tools/install-taostools.sh ]; then
|
||||
cp ${top_dir}/tools/taos-tools/packaging/tools/install-taostools.sh \
|
||||
${taostools_install_dir}/ > /dev/null \
|
||||
&& chmod a+x ${taostools_install_dir}/install-taostools.sh \
|
||||
|| echo -e "failed to copy install-taostools.sh"
|
||||
else
|
||||
echo -e "install-taostools.sh not found"
|
||||
fi
|
||||
|
||||
if [ -f ${top_dir}/tools/taos-tools/packaging/tools/uninstall-taostools.sh ]; then
|
||||
cp ${top_dir}/tools/taos-tools/packaging/tools/uninstall-taostools.sh \
|
||||
${taostools_install_dir}/ > /dev/null \
|
||||
&& chmod a+x ${taostools_install_dir}/uninstall-taostools.sh \
|
||||
|| echo -e "failed to copy uninstall-taostools.sh"
|
||||
else
|
||||
echo -e "uninstall-taostools.sh not found"
|
||||
fi
|
||||
|
||||
if [ -f ${build_dir}/lib/libavro.so.23.0.0 ]; then
|
||||
mkdir -p ${taostools_install_dir}/avro/{lib,lib/pkgconfig} || echo -e "failed to create ${taostools_install_dir}/avro"
|
||||
cp ${build_dir}/lib/libavro.* ${taostools_install_dir}/avro/lib
|
||||
cp ${build_dir}/lib/pkgconfig/avro-c.pc ${taostools_install_dir}/avro/lib/pkgconfig
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -f ${build_dir}/bin/jemalloc-config ]; then
|
||||
mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
|
||||
cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin
|
||||
if [ -f ${build_dir}/bin/jemalloc.sh ]; then
|
||||
cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin
|
||||
fi
|
||||
if [ -f ${build_dir}/bin/jeprof ]; then
|
||||
cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin
|
||||
fi
|
||||
if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then
|
||||
cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then
|
||||
cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib
|
||||
ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/libjemalloc.a ]; then
|
||||
cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
|
||||
cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then
|
||||
cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig
|
||||
fi
|
||||
if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then
|
||||
cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc
|
||||
fi
|
||||
if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then
|
||||
cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove.sh >>remove_temp.sh
|
||||
mv remove_temp.sh ${install_dir}/bin/remove.sh
|
||||
|
||||
mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd
|
||||
cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png
|
||||
rm -rf ${install_dir}/nginxd/png
|
||||
|
||||
if [ "$cpuType" == "aarch64" ]; then
|
||||
cp -f ${install_dir}/nginxd/sbin/arm/64bit/nginx ${install_dir}/nginxd/sbin/
|
||||
elif [ "$cpuType" == "aarch32" ]; then
|
||||
cp -f ${install_dir}/nginxd/sbin/arm/32bit/nginx ${install_dir}/nginxd/sbin/
|
||||
fi
|
||||
rm -rf ${install_dir}/nginxd/sbin/arm
|
||||
fi
|
||||
|
||||
cd ${install_dir}
|
||||
tar -zcv -f ${tarName} * --remove-files || :
|
||||
exitcode=$?
|
||||
if [ "$exitcode" != "0" ]; then
|
||||
echo "tar ${tarName} error !!!"
|
||||
exit $exitcode
|
||||
fi
|
||||
|
||||
cd ${curr_dir}
|
||||
cp ${install_files} ${install_dir}
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install.sh >>install_temp.sh
|
||||
mv install_temp.sh ${install_dir}/install.sh
|
||||
fi
|
||||
if [ "$pagMode" == "lite" ]; then
|
||||
sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install.sh >>install_temp.sh
|
||||
mv install_temp.sh ${install_dir}/install.sh
|
||||
fi
|
||||
chmod a+x ${install_dir}/install.sh
|
||||
|
||||
if [[ $dbName == "taos" ]]; then
|
||||
# Copy example code
|
||||
mkdir -p ${install_dir}/examples
|
||||
examples_dir="${top_dir}/examples"
|
||||
cp -r ${examples_dir}/c ${install_dir}/examples
|
||||
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
||||
if [ -d ${examples_dir}/JDBC/connectionPools/target ]; then
|
||||
rm -rf ${examples_dir}/JDBC/connectionPools/target
|
||||
fi
|
||||
if [ -d ${examples_dir}/JDBC/JDBCDemo/target ]; then
|
||||
rm -rf ${examples_dir}/JDBC/JDBCDemo/target
|
||||
fi
|
||||
if [ -d ${examples_dir}/JDBC/mybatisplus-demo/target ]; then
|
||||
rm -rf ${examples_dir}/JDBC/mybatisplus-demo/target
|
||||
fi
|
||||
if [ -d ${examples_dir}/JDBC/springbootdemo/target ]; then
|
||||
rm -rf ${examples_dir}/JDBC/springbootdemo/target
|
||||
fi
|
||||
if [ -d ${examples_dir}/JDBC/SpringJdbcTemplate/target ]; then
|
||||
rm -rf ${examples_dir}/JDBC/SpringJdbcTemplate/target
|
||||
fi
|
||||
if [ -d ${examples_dir}/JDBC/taosdemo/target ]; then
|
||||
rm -rf ${examples_dir}/JDBC/taosdemo/target
|
||||
fi
|
||||
|
||||
cp -r ${examples_dir}/JDBC ${install_dir}/examples
|
||||
cp -r ${examples_dir}/matlab ${install_dir}/examples
|
||||
cp -r ${examples_dir}/python ${install_dir}/examples
|
||||
cp -r ${examples_dir}/R ${install_dir}/examples
|
||||
cp -r ${examples_dir}/go ${install_dir}/examples
|
||||
cp -r ${examples_dir}/nodejs ${install_dir}/examples
|
||||
cp -r ${examples_dir}/C# ${install_dir}/examples
|
||||
mkdir -p ${install_dir}/examples/taosbenchmark-json && cp ${examples_dir}/../tools/taos-tools/example/* ${install_dir}/examples/taosbenchmark-json
|
||||
fi
|
||||
fi
|
||||
|
||||
# Copy driver
|
||||
mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" >${install_dir}/driver/vercomp.txt
|
||||
|
||||
# Copy connector
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
connector_dir="${code_dir}/connector"
|
||||
mkdir -p ${install_dir}/connector
|
||||
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
||||
cp ${build_dir}/lib/*.jar ${install_dir}/connector || :
|
||||
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
|
||||
cp -r ${connector_dir}/go ${install_dir}/connector
|
||||
else
|
||||
echo "WARNING: go connector not found, please check if want to use it!"
|
||||
fi
|
||||
git clone --depth 1 https://github.com/taosdata/taos-connector-python ${install_dir}/connector/python
|
||||
rm -rf ${install_dir}/connector/python/.git ||:
|
||||
|
||||
git clone --depth 1 https://github.com/taosdata/taos-connector-node ${install_dir}/connector/nodejs
|
||||
rm -rf ${install_dir}/connector/nodejs/.git ||:
|
||||
|
||||
git clone --depth 1 https://github.com/taosdata/taos-connector-dotnet ${install_dir}/connector/dotnet
|
||||
rm -rf ${install_dir}/connector/dotnet/.git ||:
|
||||
|
||||
git clone --depth 1 https://github.com/taosdata/libtaos-rs ${install_dir}/connector/rust
|
||||
rm -rf ${install_dir}/connector/rust/.git ||:
|
||||
# cp -r ${connector_dir}/python ${install_dir}/connector
|
||||
# cp -r ${connector_dir}/nodejs ${install_dir}/connector
|
||||
fi
|
||||
fi
|
||||
|
||||
# Copy release note
|
||||
cp ${script_dir}/release_note ${install_dir}
|
||||
|
||||
# exit 1
|
||||
|
||||
cd ${release_dir}
|
||||
|
||||
# install_dir has been distinguishes cluster from edege, so comments this code
|
||||
pkg_name=${install_dir}-${osType}-${cpuType}
|
||||
|
||||
taostools_pkg_name=${taostools_install_dir}-${osType}-${cpuType}
|
||||
|
||||
# if [ "$verMode" == "cluster" ]; then
|
||||
# pkg_name=${install_dir}-${osType}-${cpuType}
|
||||
# elif [ "$verMode" == "edge" ]; then
|
||||
# pkg_name=${install_dir}-${osType}-${cpuType}
|
||||
# else
|
||||
# echo "unknow verMode, nor cluster or edge"
|
||||
# exit 1
|
||||
# fi
|
||||
|
||||
if [[ "$verType" == "beta" ]] || [[ "$verType" == "preRelease" ]]; then
|
||||
pkg_name=${install_dir}-${verType}-${osType}-${cpuType}
|
||||
taostools_pkg_name=${taostools_install_dir}-${verType}-${osType}-${cpuType}
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
pkg_name=${pkg_name}
|
||||
taostools_pkg_name=${taostools_pkg_name}
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$pagMode" == "lite" ]; then
|
||||
pkg_name=${pkg_name}-Lite
|
||||
fi
|
||||
|
||||
tar -zcv -f "$(basename ${pkg_name}).tar.gz" "$(basename ${install_dir})" --remove-files || :
|
||||
exitcode=$?
|
||||
if [ "$exitcode" != "0" ]; then
|
||||
echo "tar ${pkg_name}.tar.gz error !!!"
|
||||
exit $exitcode
|
||||
fi
|
||||
|
||||
if [ -n "${taostools_bin_files}" ]; then
|
||||
wget https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh -O ${taostools_install_dir}/bin/TDinsight.sh && echo "TDinsight.sh downloaded!"|| echo "failed to download TDinsight.sh"
|
||||
tar -zcv -f "$(basename ${taostools_pkg_name}).tar.gz" "$(basename ${taostools_install_dir})" --remove-files || :
|
||||
exitcode=$?
|
||||
if [ "$exitcode" != "0" ]; then
|
||||
echo "tar ${taostools_pkg_name}.tar.gz error !!!"
|
||||
exit $exitcode
|
||||
fi
|
||||
fi
|
||||
|
||||
cd ${curr_dir}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,142 +1,142 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Script to stop the service and uninstall TSDB
|
||||
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
bin_link_dir="/usr/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
lib64_link_dir="/usr/lib64"
|
||||
inc_link_dir="/usr/include"
|
||||
|
||||
data_link_dir="/usr/local/taos/data"
|
||||
log_link_dir="/usr/local/taos/log"
|
||||
cfg_link_dir="/usr/local/taos/cfg"
|
||||
|
||||
service_config_dir="/etc/systemd/system"
|
||||
taos_service_name="taosd"
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
initd_mod=0
|
||||
service_mod=2
|
||||
if pidof systemd &> /dev/null; then
|
||||
service_mod=0
|
||||
elif $(which service &> /dev/null); then
|
||||
service_mod=1
|
||||
service_config_dir="/etc/init.d"
|
||||
if $(which chkconfig &> /dev/null); then
|
||||
initd_mod=1
|
||||
elif $(which insserv &> /dev/null); then
|
||||
initd_mod=2
|
||||
elif $(which update-rc.d &> /dev/null); then
|
||||
initd_mod=3
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
|
||||
function kill_taosadapter() {
|
||||
pid=$(ps -ef | grep "taosadapter" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function kill_taosd() {
|
||||
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_service_on_systemd() {
|
||||
taosadapter_service_config="${service_config_dir}/taosadapter.service"
|
||||
if systemctl is-active --quiet taosadapter; then
|
||||
echo "taosadapter is running, stopping it..."
|
||||
${csudo}systemctl stop taosadapter &> /dev/null || echo &> /dev/null
|
||||
fi
|
||||
|
||||
taosd_service_config="${service_config_dir}/${taos_service_name}.service"
|
||||
|
||||
if systemctl is-active --quiet ${taos_service_name}; then
|
||||
echo "TDengine taosd is running, stopping it..."
|
||||
${csudo}systemctl stop ${taos_service_name} &> /dev/null || echo &> /dev/null
|
||||
fi
|
||||
${csudo}systemctl disable ${taos_service_name} &> /dev/null || echo &> /dev/null
|
||||
|
||||
${csudo}rm -f ${taosd_service_config}
|
||||
|
||||
[ -f ${taosadapter_service_config} ] && ${csudo}rm -f ${taosadapter_service_config}
|
||||
|
||||
}
|
||||
|
||||
function clean_service_on_sysvinit() {
|
||||
#restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
|
||||
#${csudo}sed -i "\|${restart_config_str}|d" /etc/inittab || :
|
||||
|
||||
if pidof taosd &> /dev/null; then
|
||||
echo "TDengine taosd is running, stopping it..."
|
||||
${csudo}service taosd stop || :
|
||||
fi
|
||||
|
||||
if ((${initd_mod}==1)); then
|
||||
${csudo}chkconfig --del taosd || :
|
||||
elif ((${initd_mod}==2)); then
|
||||
${csudo}insserv -r taosd || :
|
||||
elif ((${initd_mod}==3)); then
|
||||
${csudo}update-rc.d -f taosd remove || :
|
||||
fi
|
||||
|
||||
${csudo}rm -f ${service_config_dir}/taosd || :
|
||||
|
||||
if $(which init &> /dev/null); then
|
||||
${csudo}init q || :
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_service() {
|
||||
if ((${service_mod}==0)); then
|
||||
clean_service_on_systemd
|
||||
elif ((${service_mod}==1)); then
|
||||
clean_service_on_sysvinit
|
||||
else
|
||||
# must manual stop taosd
|
||||
kill_taosadapter
|
||||
kill_taosd
|
||||
fi
|
||||
}
|
||||
|
||||
# Stop service and disable booting start.
|
||||
clean_service
|
||||
|
||||
# Remove all links
|
||||
${csudo}rm -f ${bin_link_dir}/taos || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosd || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosadapter || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosBenchmark || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosdemo || :
|
||||
${csudo}rm -f ${bin_link_dir}/set_core || :
|
||||
${csudo}rm -f ${cfg_link_dir}/*.new || :
|
||||
${csudo}rm -f ${inc_link_dir}/taos.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
|
||||
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
||||
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
|
||||
|
||||
${csudo}rm -f ${log_link_dir} || :
|
||||
${csudo}rm -f ${data_link_dir} || :
|
||||
|
||||
if ((${service_mod}==2)); then
|
||||
kill_taosadapter
|
||||
kill_taosd
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}TDengine is removed successfully!${NC}"
|
||||
#!/bin/bash
|
||||
#
|
||||
# Script to stop the service and uninstall TSDB
|
||||
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
bin_link_dir="/usr/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
lib64_link_dir="/usr/lib64"
|
||||
inc_link_dir="/usr/include"
|
||||
|
||||
data_link_dir="/usr/local/taos/data"
|
||||
log_link_dir="/usr/local/taos/log"
|
||||
cfg_link_dir="/usr/local/taos/cfg"
|
||||
|
||||
service_config_dir="/etc/systemd/system"
|
||||
taos_service_name="taosd"
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
initd_mod=0
|
||||
service_mod=2
|
||||
if pidof systemd &> /dev/null; then
|
||||
service_mod=0
|
||||
elif $(which service &> /dev/null); then
|
||||
service_mod=1
|
||||
service_config_dir="/etc/init.d"
|
||||
if $(which chkconfig &> /dev/null); then
|
||||
initd_mod=1
|
||||
elif $(which insserv &> /dev/null); then
|
||||
initd_mod=2
|
||||
elif $(which update-rc.d &> /dev/null); then
|
||||
initd_mod=3
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
|
||||
function kill_taosadapter() {
|
||||
pid=$(ps -ef | grep "taosadapter" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function kill_taosd() {
|
||||
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_service_on_systemd() {
|
||||
taosadapter_service_config="${service_config_dir}/taosadapter.service"
|
||||
if systemctl is-active --quiet taosadapter; then
|
||||
echo "taosadapter is running, stopping it..."
|
||||
${csudo}systemctl stop taosadapter &> /dev/null || echo &> /dev/null
|
||||
fi
|
||||
|
||||
taosd_service_config="${service_config_dir}/${taos_service_name}.service"
|
||||
|
||||
if systemctl is-active --quiet ${taos_service_name}; then
|
||||
echo "TDengine taosd is running, stopping it..."
|
||||
${csudo}systemctl stop ${taos_service_name} &> /dev/null || echo &> /dev/null
|
||||
fi
|
||||
${csudo}systemctl disable ${taos_service_name} &> /dev/null || echo &> /dev/null
|
||||
|
||||
${csudo}rm -f ${taosd_service_config}
|
||||
|
||||
[ -f ${taosadapter_service_config} ] && ${csudo}rm -f ${taosadapter_service_config}
|
||||
|
||||
}
|
||||
|
||||
function clean_service_on_sysvinit() {
|
||||
#restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
|
||||
#${csudo}sed -i "\|${restart_config_str}|d" /etc/inittab || :
|
||||
|
||||
if pidof taosd &> /dev/null; then
|
||||
echo "TDengine taosd is running, stopping it..."
|
||||
${csudo}service taosd stop || :
|
||||
fi
|
||||
|
||||
if ((${initd_mod}==1)); then
|
||||
${csudo}chkconfig --del taosd || :
|
||||
elif ((${initd_mod}==2)); then
|
||||
${csudo}insserv -r taosd || :
|
||||
elif ((${initd_mod}==3)); then
|
||||
${csudo}update-rc.d -f taosd remove || :
|
||||
fi
|
||||
|
||||
${csudo}rm -f ${service_config_dir}/taosd || :
|
||||
|
||||
if $(which init &> /dev/null); then
|
||||
${csudo}init q || :
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_service() {
|
||||
if ((${service_mod}==0)); then
|
||||
clean_service_on_systemd
|
||||
elif ((${service_mod}==1)); then
|
||||
clean_service_on_sysvinit
|
||||
else
|
||||
# must manual stop taosd
|
||||
kill_taosadapter
|
||||
kill_taosd
|
||||
fi
|
||||
}
|
||||
|
||||
# Stop service and disable booting start.
|
||||
clean_service
|
||||
|
||||
# Remove all links
|
||||
${csudo}rm -f ${bin_link_dir}/taos || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosd || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosadapter || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosBenchmark || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosdemo || :
|
||||
${csudo}rm -f ${bin_link_dir}/set_core || :
|
||||
${csudo}rm -f ${cfg_link_dir}/*.new || :
|
||||
${csudo}rm -f ${inc_link_dir}/taos.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
|
||||
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
||||
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
|
||||
|
||||
${csudo}rm -f ${log_link_dir} || :
|
||||
${csudo}rm -f ${data_link_dir} || :
|
||||
|
||||
if ((${service_mod}==2)); then
|
||||
kill_taosadapter
|
||||
kill_taosd
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}TDengine is removed successfully!${NC}"
|
||||
|
|
|
@ -1,136 +1,136 @@
|
|||
taos-1.6.4.0 (Release on 2019-12-01)
|
||||
Bug fixed:
|
||||
1.Look for possible causes of file corruption and fix them
|
||||
2.Encapsulate memory allocation functions to reduce the possibility of crashes
|
||||
3.Increase Arm64 compilation options
|
||||
4.Remove most of the warnings in the code
|
||||
5.Provide a variety of connector usage documents
|
||||
6.Network connection can be selected in udp and tcp
|
||||
7.Allow the maximum number of Tags to be 32
|
||||
8.Bugs reported by the user
|
||||
|
||||
taos-1.5.2.6 (Release on 2019-05-13)
|
||||
Bug fixed:
|
||||
- Nchar strings sometimes were wrongly truncated on Window
|
||||
- Importing data from file throws an error of "invalid SQL"
|
||||
|
||||
taos-1.5.2.5 (Release on 2019-05-13)
|
||||
Bug fixed:
|
||||
- Long timespan data import sometimes affects query result
|
||||
- Synchronzation of cluster dnodes worked incorrectly when importing
|
||||
|
||||
taos-1.5.2.4 (Release on 2019-05-10)
|
||||
New Features:
|
||||
- Optimized Windows client installation: now users don't need to copy taos.dll manually
|
||||
- Changed the priority of taos.cfg and JDBC URL: parameters in JDCB URL now has a higher priority than parameters in taos.cfg
|
||||
Bug fixed:
|
||||
- Expired data files were not deleted corrected
|
||||
- Occasionally importing returned "affected rows" which larger than 0, but 0 row was actually written into db
|
||||
- Commit log is occupied by too many import-to-file requests, which blocked further data importing
|
||||
- Cloud service shows a wrong number of available days with current balance
|
||||
- Other minor issues
|
||||
|
||||
taos-1.5.1 (Release on 2019-04-09)
|
||||
New Features:
|
||||
- Maximum number of rows returned by "top/bottom" methods increased from 20 to 100
|
||||
- Improved the performance of "first/last" methods
|
||||
- Increased system stability
|
||||
Bug fixed:
|
||||
- Connection failure when query on huge STables through TPC
|
||||
- The primary timestamp is occasionally returned as NULL in some queries
|
||||
- Operation failure when updating a tag value to NULL
|
||||
- Stream calculation couldn't start at certain occasions
|
||||
|
||||
taos-1.5.0 (Release on 2019-03-11)
|
||||
New Features:
|
||||
- New syntax to automatically create tables when inserting values into non-existing tables
|
||||
- New syntax "slimit/soffset" to pagenate groups in a query result set
|
||||
- Support "top/bottom" queries on a supertable
|
||||
- High performance statistic aggregation function "apercentile"
|
||||
- Remove "first_t/last_t" functions; improve the performance of "first/last" function
|
||||
- Add pre-aggregation for bool type values
|
||||
- Supports fixed-length streaming computation, i.e. users may define an end time for a stream
|
||||
- New JAVA API for SQL subscription, supports table/supertable/SQL query subscription
|
||||
Bug fixed:
|
||||
- Data file broken issue when frequently using "import"
|
||||
- Using "spread" on a super table may return negative values
|
||||
- RPC bug that random network packets might cause the RPC module to crush
|
||||
|
||||
taos-1.4.15 (Released on 2019-01-23)
|
||||
New Features:
|
||||
- JDBC Driver now supports configuring timezone, locale, cfgdir in JDBC url
|
||||
- A new API is added to validate if a table creation sql statement is correct in syntax without actually creating that table
|
||||
Bugs Fixed:
|
||||
- "select last(*) from STable" sometimest returned incorrect number of rows
|
||||
- JDBC driver method ResultSetMetaData.getColumnClassName() returned wrong values.
|
||||
- Web shell automatically changed query string to lower case
|
||||
|
||||
taos-1.4.14 (Released on 2018-12-22)
|
||||
New Features:
|
||||
- C Driver support for integration with Python
|
||||
- JDBC Driver support for integration with R and MATLAB
|
||||
|
||||
taos-1.4.13 (Released on 2018-12-14)
|
||||
Bugs Fixed:
|
||||
- Clients failed to connect to server due to unexpected and invalid packets recieved by the server.
|
||||
Features Added:
|
||||
- Add support to HikariCP in TSDB JDBC driver.
|
||||
|
||||
taos-1.4.12 (Released on 2018-12-08)
|
||||
Bugs Fixed:
|
||||
- Querying data while inserting into the database might return incomplete resultsets.
|
||||
Features Added:
|
||||
- A new python driver is added.
|
||||
- Increased system stability.
|
||||
- Changed meaning of database configuration paramerter 'ablocks'. 'ablocks' used to refer to the number of total cache blocks in memory, now it refers to average number of cache blocks for each table in memory.
|
||||
|
||||
taos-1.4.11 (Released on 2018-11-23)
|
||||
Bugs Fixed:
|
||||
- Thread memory leaking during high-frequency committing.
|
||||
- Master dnode selection failure caused by accidental network issues.
|
||||
Features Added:
|
||||
- Change keyword "metrics" to "stables", i.e. supertables; the previous query "show metrics" is now changed to "show stables".
|
||||
- Add an error message mechanism in C# driver. An error with message "Failed to connect to server" is thrown when fetching data experienced a network connection interruption during data transmitting.
|
||||
|
||||
taos-1.4.10 (Released on 2018-11-13)
|
||||
Bugs Fixed:
|
||||
- Taosdump failed while exporting extremely large datasets to a .sql file.
|
||||
- Commit status did not change correctly if the last commit was triggered by commit threshold time (ctime) and no more new data was written to DB during the next ctime period.
|
||||
Features Added:
|
||||
- Support importing historical data from Telegraf interface.
|
||||
- Support MyBatis framework in TSDB JDBC Driver.
|
||||
- Change result set row indexing in JDBC Driver. Result set row indexes now starts from 1 instead of 0.
|
||||
|
||||
taos-1.4.9 (Released on 2018-11-02)
|
||||
Bugs Fixed:
|
||||
- Dumping data using UTF-8 format in client shell failed.
|
||||
- Tag query failed using C# Driver.
|
||||
- Committing data to disk failed if DB files were corrupted.
|
||||
- Continuously pressing Ctrl+c in client shell for multiple times produced a segmentation fault.
|
||||
Features Added:
|
||||
- Changed the display pattern in shell for taosdump.
|
||||
- Add a check to the status of an existing resultset before firing a new query in a single JDBC connection. A connection can only have a single open resultset, and the resultset must be closed before one can execute new queries.
|
||||
|
||||
|
||||
taos-1.4.7 (Released on 2018-10-25)
|
||||
Bug Fixed:
|
||||
- UTF-8 encoding in JDBC Driver did not give the correct Chinese characters.
|
||||
- Fix crash error when where clause is too long.
|
||||
Features Added:
|
||||
- Add check on database properties, force ablocks to be at least (4 * tables) in a vnode.
|
||||
- Check if pVgroup is empty in sdb.
|
||||
|
||||
taos-1.4.6 (Released on 2018-10-21)
|
||||
Bug Fixed:
|
||||
- Fix wrong symbol addition while export csv file.
|
||||
Features Added:
|
||||
- Update grafana plugins.
|
||||
- Update python drivers.
|
||||
- Add error code explanation in JDBC Driver.
|
||||
- Prohibit login while the version of server and client are not match.
|
||||
|
||||
taos-1.4.5 (Released on 2018-10-17)
|
||||
Bug Fixed:
|
||||
- Fix HTTP request truncation bug in Telegraf interface.
|
||||
Features Added:
|
||||
- Support nchar and null object in JDBC Driver.
|
||||
taos-1.6.4.0 (Release on 2019-12-01)
|
||||
Bug fixed:
|
||||
1.Look for possible causes of file corruption and fix them
|
||||
2.Encapsulate memory allocation functions to reduce the possibility of crashes
|
||||
3.Increase Arm64 compilation options
|
||||
4.Remove most of the warnings in the code
|
||||
5.Provide a variety of connector usage documents
|
||||
6.Network connection can be selected in udp and tcp
|
||||
7.Allow the maximum number of Tags to be 32
|
||||
8.Bugs reported by the user
|
||||
|
||||
taos-1.5.2.6 (Release on 2019-05-13)
|
||||
Bug fixed:
|
||||
- Nchar strings sometimes were wrongly truncated on Window
|
||||
- Importing data from file throws an error of "invalid SQL"
|
||||
|
||||
taos-1.5.2.5 (Release on 2019-05-13)
|
||||
Bug fixed:
|
||||
- Long timespan data import sometimes affects query result
|
||||
- Synchronzation of cluster dnodes worked incorrectly when importing
|
||||
|
||||
taos-1.5.2.4 (Release on 2019-05-10)
|
||||
New Features:
|
||||
- Optimized Windows client installation: now users don't need to copy taos.dll manually
|
||||
- Changed the priority of taos.cfg and JDBC URL: parameters in JDCB URL now has a higher priority than parameters in taos.cfg
|
||||
Bug fixed:
|
||||
- Expired data files were not deleted corrected
|
||||
- Occasionally importing returned "affected rows" which larger than 0, but 0 row was actually written into db
|
||||
- Commit log is occupied by too many import-to-file requests, which blocked further data importing
|
||||
- Cloud service shows a wrong number of available days with current balance
|
||||
- Other minor issues
|
||||
|
||||
taos-1.5.1 (Release on 2019-04-09)
|
||||
New Features:
|
||||
- Maximum number of rows returned by "top/bottom" methods increased from 20 to 100
|
||||
- Improved the performance of "first/last" methods
|
||||
- Increased system stability
|
||||
Bug fixed:
|
||||
- Connection failure when query on huge STables through TPC
|
||||
- The primary timestamp is occasionally returned as NULL in some queries
|
||||
- Operation failure when updating a tag value to NULL
|
||||
- Stream calculation couldn't start at certain occasions
|
||||
|
||||
taos-1.5.0 (Release on 2019-03-11)
|
||||
New Features:
|
||||
- New syntax to automatically create tables when inserting values into non-existing tables
|
||||
- New syntax "slimit/soffset" to pagenate groups in a query result set
|
||||
- Support "top/bottom" queries on a supertable
|
||||
- High performance statistic aggregation function "apercentile"
|
||||
- Remove "first_t/last_t" functions; improve the performance of "first/last" function
|
||||
- Add pre-aggregation for bool type values
|
||||
- Supports fixed-length streaming computation, i.e. users may define an end time for a stream
|
||||
- New JAVA API for SQL subscription, supports table/supertable/SQL query subscription
|
||||
Bug fixed:
|
||||
- Data file broken issue when frequently using "import"
|
||||
- Using "spread" on a super table may return negative values
|
||||
- RPC bug that random network packets might cause the RPC module to crush
|
||||
|
||||
taos-1.4.15 (Released on 2019-01-23)
|
||||
New Features:
|
||||
- JDBC Driver now supports configuring timezone, locale, cfgdir in JDBC url
|
||||
- A new API is added to validate if a table creation sql statement is correct in syntax without actually creating that table
|
||||
Bugs Fixed:
|
||||
- "select last(*) from STable" sometimest returned incorrect number of rows
|
||||
- JDBC driver method ResultSetMetaData.getColumnClassName() returned wrong values.
|
||||
- Web shell automatically changed query string to lower case
|
||||
|
||||
taos-1.4.14 (Released on 2018-12-22)
|
||||
New Features:
|
||||
- C Driver support for integration with Python
|
||||
- JDBC Driver support for integration with R and MATLAB
|
||||
|
||||
taos-1.4.13 (Released on 2018-12-14)
|
||||
Bugs Fixed:
|
||||
- Clients failed to connect to server due to unexpected and invalid packets recieved by the server.
|
||||
Features Added:
|
||||
- Add support to HikariCP in TSDB JDBC driver.
|
||||
|
||||
taos-1.4.12 (Released on 2018-12-08)
|
||||
Bugs Fixed:
|
||||
- Querying data while inserting into the database might return incomplete resultsets.
|
||||
Features Added:
|
||||
- A new python driver is added.
|
||||
- Increased system stability.
|
||||
- Changed meaning of database configuration paramerter 'ablocks'. 'ablocks' used to refer to the number of total cache blocks in memory, now it refers to average number of cache blocks for each table in memory.
|
||||
|
||||
taos-1.4.11 (Released on 2018-11-23)
|
||||
Bugs Fixed:
|
||||
- Thread memory leaking during high-frequency committing.
|
||||
- Master dnode selection failure caused by accidental network issues.
|
||||
Features Added:
|
||||
- Change keyword "metrics" to "stables", i.e. supertables; the previous query "show metrics" is now changed to "show stables".
|
||||
- Add an error message mechanism in C# driver. An error with message "Failed to connect to server" is thrown when fetching data experienced a network connection interruption during data transmitting.
|
||||
|
||||
taos-1.4.10 (Released on 2018-11-13)
|
||||
Bugs Fixed:
|
||||
- Taosdump failed while exporting extremely large datasets to a .sql file.
|
||||
- Commit status did not change correctly if the last commit was triggered by commit threshold time (ctime) and no more new data was written to DB during the next ctime period.
|
||||
Features Added:
|
||||
- Support importing historical data from Telegraf interface.
|
||||
- Support MyBatis framework in TSDB JDBC Driver.
|
||||
- Change result set row indexing in JDBC Driver. Result set row indexes now starts from 1 instead of 0.
|
||||
|
||||
taos-1.4.9 (Released on 2018-11-02)
|
||||
Bugs Fixed:
|
||||
- Dumping data using UTF-8 format in client shell failed.
|
||||
- Tag query failed using C# Driver.
|
||||
- Committing data to disk failed if DB files were corrupted.
|
||||
- Continuously pressing Ctrl+c in client shell for multiple times produced a segmentation fault.
|
||||
Features Added:
|
||||
- Changed the display pattern in shell for taosdump.
|
||||
- Add a check to the status of an existing resultset before firing a new query in a single JDBC connection. A connection can only have a single open resultset, and the resultset must be closed before one can execute new queries.
|
||||
|
||||
|
||||
taos-1.4.7 (Released on 2018-10-25)
|
||||
Bug Fixed:
|
||||
- UTF-8 encoding in JDBC Driver did not give the correct Chinese characters.
|
||||
- Fix crash error when where clause is too long.
|
||||
Features Added:
|
||||
- Add check on database properties, force ablocks to be at least (4 * tables) in a vnode.
|
||||
- Check if pVgroup is empty in sdb.
|
||||
|
||||
taos-1.4.6 (Released on 2018-10-21)
|
||||
Bug Fixed:
|
||||
- Fix wrong symbol addition while export csv file.
|
||||
Features Added:
|
||||
- Update grafana plugins.
|
||||
- Update python drivers.
|
||||
- Add error code explanation in JDBC Driver.
|
||||
- Prohibit login while the version of server and client are not match.
|
||||
|
||||
taos-1.4.5 (Released on 2018-10-17)
|
||||
Bug Fixed:
|
||||
- Fix HTTP request truncation bug in Telegraf interface.
|
||||
Features Added:
|
||||
- Support nchar and null object in JDBC Driver.
|
||||
|
|
|
@ -1,130 +1,130 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Script to stop the service and uninstall TDengine's arbitrator
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
verMode=edge
|
||||
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
#install main path
|
||||
install_main_dir="/usr/local/tarbitrator"
|
||||
bin_link_dir="/usr/bin"
|
||||
#inc_link_dir="/usr/include"
|
||||
|
||||
service_config_dir="/etc/systemd/system"
|
||||
tarbitrator_service_name="tarbitratord"
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
initd_mod=0
|
||||
service_mod=2
|
||||
if pidof systemd &> /dev/null; then
|
||||
service_mod=0
|
||||
elif $(which service &> /dev/null); then
|
||||
service_mod=1
|
||||
service_config_dir="/etc/init.d"
|
||||
if $(which chkconfig &> /dev/null); then
|
||||
initd_mod=1
|
||||
elif $(which insserv &> /dev/null); then
|
||||
initd_mod=2
|
||||
elif $(which update-rc.d &> /dev/null); then
|
||||
initd_mod=3
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
|
||||
function kill_tarbitrator() {
|
||||
pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
function clean_bin() {
|
||||
# Remove link
|
||||
${csudo}rm -f ${bin_link_dir}/tarbitrator || :
|
||||
}
|
||||
|
||||
function clean_header() {
|
||||
# Remove link
|
||||
${csudo}rm -f ${inc_link_dir}/taos.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
|
||||
}
|
||||
|
||||
function clean_log() {
|
||||
# Remove link
|
||||
${csudo}rm -rf /arbitrator.log || :
|
||||
}
|
||||
|
||||
function clean_service_on_systemd() {
|
||||
tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service"
|
||||
|
||||
if systemctl is-active --quiet ${tarbitrator_service_name}; then
|
||||
echo "TDengine tarbitrator is running, stopping it..."
|
||||
${csudo}systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
|
||||
fi
|
||||
${csudo}systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
|
||||
|
||||
${csudo}rm -f ${tarbitratord_service_config}
|
||||
}
|
||||
|
||||
function clean_service_on_sysvinit() {
|
||||
if pidof tarbitrator &> /dev/null; then
|
||||
echo "TDengine's tarbitrator is running, stopping it..."
|
||||
${csudo}service tarbitratord stop || :
|
||||
fi
|
||||
|
||||
if ((${initd_mod}==1)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo}chkconfig --del tarbitratord || :
|
||||
fi
|
||||
elif ((${initd_mod}==2)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo}insserv -r tarbitratord || :
|
||||
fi
|
||||
elif ((${initd_mod}==3)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo}update-rc.d -f tarbitratord remove || :
|
||||
fi
|
||||
fi
|
||||
|
||||
${csudo}rm -f ${service_config_dir}/tarbitratord || :
|
||||
|
||||
if $(which init &> /dev/null); then
|
||||
${csudo}init q || :
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_service() {
|
||||
if ((${service_mod}==0)); then
|
||||
clean_service_on_systemd
|
||||
elif ((${service_mod}==1)); then
|
||||
clean_service_on_sysvinit
|
||||
else
|
||||
# must manual stop
|
||||
kill_tarbitrator
|
||||
fi
|
||||
}
|
||||
|
||||
# Stop service and disable booting start.
|
||||
clean_service
|
||||
# Remove binary file and links
|
||||
clean_bin
|
||||
# Remove header file.
|
||||
##clean_header
|
||||
# Remove log file
|
||||
clean_log
|
||||
|
||||
${csudo}rm -rf ${install_main_dir}
|
||||
|
||||
echo -e "${GREEN}TDengine's arbitrator is removed successfully!${NC}"
|
||||
#!/bin/bash
|
||||
#
|
||||
# Script to stop the service and uninstall TDengine's arbitrator
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
verMode=edge
|
||||
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
#install main path
|
||||
install_main_dir="/usr/local/tarbitrator"
|
||||
bin_link_dir="/usr/bin"
|
||||
#inc_link_dir="/usr/include"
|
||||
|
||||
service_config_dir="/etc/systemd/system"
|
||||
tarbitrator_service_name="tarbitratord"
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
initd_mod=0
|
||||
service_mod=2
|
||||
if pidof systemd &> /dev/null; then
|
||||
service_mod=0
|
||||
elif $(which service &> /dev/null); then
|
||||
service_mod=1
|
||||
service_config_dir="/etc/init.d"
|
||||
if $(which chkconfig &> /dev/null); then
|
||||
initd_mod=1
|
||||
elif $(which insserv &> /dev/null); then
|
||||
initd_mod=2
|
||||
elif $(which update-rc.d &> /dev/null); then
|
||||
initd_mod=3
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
|
||||
function kill_tarbitrator() {
|
||||
pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
function clean_bin() {
|
||||
# Remove link
|
||||
${csudo}rm -f ${bin_link_dir}/tarbitrator || :
|
||||
}
|
||||
|
||||
function clean_header() {
|
||||
# Remove link
|
||||
${csudo}rm -f ${inc_link_dir}/taos.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
|
||||
}
|
||||
|
||||
function clean_log() {
|
||||
# Remove link
|
||||
${csudo}rm -rf /arbitrator.log || :
|
||||
}
|
||||
|
||||
function clean_service_on_systemd() {
|
||||
tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service"
|
||||
|
||||
if systemctl is-active --quiet ${tarbitrator_service_name}; then
|
||||
echo "TDengine tarbitrator is running, stopping it..."
|
||||
${csudo}systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
|
||||
fi
|
||||
${csudo}systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
|
||||
|
||||
${csudo}rm -f ${tarbitratord_service_config}
|
||||
}
|
||||
|
||||
function clean_service_on_sysvinit() {
|
||||
if pidof tarbitrator &> /dev/null; then
|
||||
echo "TDengine's tarbitrator is running, stopping it..."
|
||||
${csudo}service tarbitratord stop || :
|
||||
fi
|
||||
|
||||
if ((${initd_mod}==1)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo}chkconfig --del tarbitratord || :
|
||||
fi
|
||||
elif ((${initd_mod}==2)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo}insserv -r tarbitratord || :
|
||||
fi
|
||||
elif ((${initd_mod}==3)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo}update-rc.d -f tarbitratord remove || :
|
||||
fi
|
||||
fi
|
||||
|
||||
${csudo}rm -f ${service_config_dir}/tarbitratord || :
|
||||
|
||||
if $(which init &> /dev/null); then
|
||||
${csudo}init q || :
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_service() {
|
||||
if ((${service_mod}==0)); then
|
||||
clean_service_on_systemd
|
||||
elif ((${service_mod}==1)); then
|
||||
clean_service_on_sysvinit
|
||||
else
|
||||
# must manual stop
|
||||
kill_tarbitrator
|
||||
fi
|
||||
}
|
||||
|
||||
# Stop service and disable booting start.
|
||||
clean_service
|
||||
# Remove binary file and links
|
||||
clean_bin
|
||||
# Remove header file.
|
||||
##clean_header
|
||||
# Remove log file
|
||||
clean_log
|
||||
|
||||
${csudo}rm -rf ${install_main_dir}
|
||||
|
||||
echo -e "${GREEN}TDengine's arbitrator is removed successfully!${NC}"
|
||||
|
|
|
@ -1,85 +1,85 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Script to stop the client and uninstall database, but retain the config and log files.
|
||||
set -e
|
||||
# set -x
|
||||
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
installDir="/usr/local/taos"
|
||||
clientName="taos"
|
||||
uninstallScript="rmtaos"
|
||||
|
||||
#install main path
|
||||
install_main_dir=${installDir}
|
||||
|
||||
log_link_dir=${installDir}/log
|
||||
cfg_link_dir=${installDir}/cfg
|
||||
bin_link_dir="/usr/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
lib64_link_dir="/usr/lib64"
|
||||
inc_link_dir="/usr/include"
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
function kill_client() {
|
||||
if [ -n "$(pidof ${clientName})" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_bin() {
|
||||
# Remove link
|
||||
${csudo}rm -f ${bin_link_dir}/${clientName} || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosdemo || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosdump || :
|
||||
${csudo}rm -f ${bin_link_dir}/${uninstallScript} || :
|
||||
${csudo}rm -f ${bin_link_dir}/set_core || :
|
||||
}
|
||||
|
||||
function clean_lib() {
|
||||
# Remove link
|
||||
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
||||
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
|
||||
#${csudo}rm -rf ${v15_java_app_dir} || :
|
||||
}
|
||||
|
||||
function clean_header() {
|
||||
# Remove link
|
||||
${csudo}rm -f ${inc_link_dir}/taos.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
|
||||
}
|
||||
|
||||
function clean_config() {
|
||||
# Remove link
|
||||
${csudo}rm -f ${cfg_link_dir}/* || :
|
||||
}
|
||||
|
||||
function clean_log() {
|
||||
# Remove link
|
||||
${csudo}rm -rf ${log_link_dir} || :
|
||||
}
|
||||
|
||||
# Stop client.
|
||||
kill_client
|
||||
# Remove binary file and links
|
||||
clean_bin
|
||||
# Remove header file.
|
||||
clean_header
|
||||
# Remove lib file
|
||||
clean_lib
|
||||
# Remove link log directory
|
||||
clean_log
|
||||
# Remove link configuration file
|
||||
clean_config
|
||||
|
||||
${csudo}rm -rf ${install_main_dir}
|
||||
|
||||
echo -e "${GREEN}TDengine client is removed successfully!${NC}"
|
||||
echo
|
||||
#!/bin/bash
|
||||
#
|
||||
# Script to stop the client and uninstall database, but retain the config and log files.
|
||||
set -e
|
||||
# set -x
|
||||
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
installDir="/usr/local/taos"
|
||||
clientName="taos"
|
||||
uninstallScript="rmtaos"
|
||||
|
||||
#install main path
|
||||
install_main_dir=${installDir}
|
||||
|
||||
log_link_dir=${installDir}/log
|
||||
cfg_link_dir=${installDir}/cfg
|
||||
bin_link_dir="/usr/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
lib64_link_dir="/usr/lib64"
|
||||
inc_link_dir="/usr/include"
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
function kill_client() {
|
||||
if [ -n "$(pidof ${clientName})" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_bin() {
|
||||
# Remove link
|
||||
${csudo}rm -f ${bin_link_dir}/${clientName} || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosdemo || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosdump || :
|
||||
${csudo}rm -f ${bin_link_dir}/${uninstallScript} || :
|
||||
${csudo}rm -f ${bin_link_dir}/set_core || :
|
||||
}
|
||||
|
||||
function clean_lib() {
|
||||
# Remove link
|
||||
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
||||
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
|
||||
#${csudo}rm -rf ${v15_java_app_dir} || :
|
||||
}
|
||||
|
||||
function clean_header() {
|
||||
# Remove link
|
||||
${csudo}rm -f ${inc_link_dir}/taos.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
|
||||
}
|
||||
|
||||
function clean_config() {
|
||||
# Remove link
|
||||
${csudo}rm -f ${cfg_link_dir}/* || :
|
||||
}
|
||||
|
||||
function clean_log() {
|
||||
# Remove link
|
||||
${csudo}rm -rf ${log_link_dir} || :
|
||||
}
|
||||
|
||||
# Stop client.
|
||||
kill_client
|
||||
# Remove binary file and links
|
||||
clean_bin
|
||||
# Remove header file.
|
||||
clean_header
|
||||
# Remove lib file
|
||||
clean_lib
|
||||
# Remove link log directory
|
||||
clean_log
|
||||
# Remove link configuration file
|
||||
clean_config
|
||||
|
||||
${csudo}rm -rf ${install_main_dir}
|
||||
|
||||
echo -e "${GREEN}TDengine client is removed successfully!${NC}"
|
||||
echo
|
||||
|
|
|
@ -1,39 +1,39 @@
|
|||
#!/bin/bash
|
||||
|
||||
# This script is used to repaire links when you what to move TDengine
|
||||
# data to other places and to access data.
|
||||
|
||||
# Read link path
|
||||
read -p "Please enter link directory such as /var/lib/taos/tsdb: " linkDir
|
||||
|
||||
while true; do
|
||||
if [ ! -d $linkDir ]; then
|
||||
read -p "Paht not exists, please enter the correct link path:" linkDir
|
||||
continue
|
||||
fi
|
||||
break
|
||||
done
|
||||
|
||||
declare -A dirHash
|
||||
|
||||
for linkFile in $(find -L $linkDir -xtype l); do
|
||||
targetFile=$(readlink -f $linkFile)
|
||||
echo "targetFile: ${targetFile}"
|
||||
# TODO : Extract directory part and basename part
|
||||
dirName=$(dirname $(dirname ${targetFile}))
|
||||
baseName=$(basename $(dirname ${targetFile}))/$(basename ${targetFile})
|
||||
|
||||
# TODO :
|
||||
newDir="${dirHash["$dirName"]}"
|
||||
if [ -z "${dirHash["$dirName"]}" ]; then
|
||||
read -p "Please enter the directory to replace ${dirName}:" newDir
|
||||
|
||||
read -p "Do you want to replcace all[y/N]?" replcaceAll
|
||||
if [[ ( "${replcaceAll}" == "y") || ( "${replcaceAll}" == "Y") ]]; then
|
||||
dirHash["$dirName"]="$newDir"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Replcace the file
|
||||
ln -sf "${newDir}/${baseName}" "${linkFile}"
|
||||
done
|
||||
#!/bin/bash
|
||||
|
||||
# This script is used to repaire links when you what to move TDengine
|
||||
# data to other places and to access data.
|
||||
|
||||
# Read link path
|
||||
read -p "Please enter link directory such as /var/lib/taos/tsdb: " linkDir
|
||||
|
||||
while true; do
|
||||
if [ ! -d $linkDir ]; then
|
||||
read -p "Paht not exists, please enter the correct link path:" linkDir
|
||||
continue
|
||||
fi
|
||||
break
|
||||
done
|
||||
|
||||
declare -A dirHash
|
||||
|
||||
for linkFile in $(find -L $linkDir -xtype l); do
|
||||
targetFile=$(readlink -f $linkFile)
|
||||
echo "targetFile: ${targetFile}"
|
||||
# TODO : Extract directory part and basename part
|
||||
dirName=$(dirname $(dirname ${targetFile}))
|
||||
baseName=$(basename $(dirname ${targetFile}))/$(basename ${targetFile})
|
||||
|
||||
# TODO :
|
||||
newDir="${dirHash["$dirName"]}"
|
||||
if [ -z "${dirHash["$dirName"]}" ]; then
|
||||
read -p "Please enter the directory to replace ${dirName}:" newDir
|
||||
|
||||
read -p "Do you want to replcace all[y/N]?" replcaceAll
|
||||
if [[ ( "${replcaceAll}" == "y") || ( "${replcaceAll}" == "Y") ]]; then
|
||||
dirHash["$dirName"]="$newDir"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Replcace the file
|
||||
ln -sf "${newDir}/${baseName}" "${linkFile}"
|
||||
done
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
#!/bin/bash
|
||||
[[ -x /usr/bin/taosadapter ]] && /usr/bin/taosadapter &
|
||||
taosd
|
||||
#!/bin/bash
|
||||
[[ -x /usr/bin/taosadapter ]] && /usr/bin/taosadapter &
|
||||
taosd
|
||||
|
|
|
@ -1,40 +1,40 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# This file is used to set config for core when taosd crash
|
||||
|
||||
# Color setting
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
GREEN_DARK='\033[0;32m'
|
||||
GREEN_UNDERLINE='\033[4;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
# set -e
|
||||
# set -x
|
||||
corePath=$1
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
if [[ ! -n ${corePath} ]]; then
|
||||
echo -e -n "${GREEN}Please enter a file directory to save the coredump file${NC}:"
|
||||
read corePath
|
||||
while true; do
|
||||
if [[ ! -z "$corePath" ]]; then
|
||||
break
|
||||
else
|
||||
read -p "Please enter a file directory to save the coredump file:" corePath
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
ulimit -c unlimited
|
||||
${csudo}sed -i '/ulimit -c unlimited/d' /etc/profile ||:
|
||||
${csudo}sed -i '$a\ulimit -c unlimited' /etc/profile ||:
|
||||
source /etc/profile
|
||||
|
||||
${csudo}mkdir -p ${corePath} ||:
|
||||
${csudo}sysctl -w kernel.core_pattern=${corePath}/core-%e-%p ||:
|
||||
${csudo}echo "${corePath}/core-%e-%p" | ${csudo}tee /proc/sys/kernel/core_pattern ||:
|
||||
#!/bin/bash
|
||||
#
|
||||
# This file is used to set config for core when taosd crash
|
||||
|
||||
# Color setting
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
GREEN_DARK='\033[0;32m'
|
||||
GREEN_UNDERLINE='\033[4;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
# set -e
|
||||
# set -x
|
||||
corePath=$1
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
if [[ ! -n ${corePath} ]]; then
|
||||
echo -e -n "${GREEN}Please enter a file directory to save the coredump file${NC}:"
|
||||
read corePath
|
||||
while true; do
|
||||
if [[ ! -z "$corePath" ]]; then
|
||||
break
|
||||
else
|
||||
read -p "Please enter a file directory to save the coredump file:" corePath
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
ulimit -c unlimited
|
||||
${csudo}sed -i '/ulimit -c unlimited/d' /etc/profile ||:
|
||||
${csudo}sed -i '$a\ulimit -c unlimited' /etc/profile ||:
|
||||
source /etc/profile
|
||||
|
||||
${csudo}mkdir -p ${corePath} ||:
|
||||
${csudo}sysctl -w kernel.core_pattern=${corePath}/core-%e-%p ||:
|
||||
${csudo}echo "${corePath}/core-%e-%p" | ${csudo}tee /proc/sys/kernel/core_pattern ||:
|
||||
|
|
|
@ -1,144 +1,144 @@
|
|||
# Usage:
|
||||
# sudo gdb -x ./taosd-dump-cfg.gdb
|
||||
|
||||
define attach_pidof
|
||||
if $argc != 1
|
||||
help attach_pidof
|
||||
else
|
||||
shell echo -e "\
|
||||
set \$PID = "$(echo $(pidof $arg0) 0 | cut -d " " -f 1)"\n\
|
||||
if \$PID > 0\n\
|
||||
attach "$(pidof -s $arg0)"\n\
|
||||
else\n\
|
||||
print \"Process '"$arg0"' not found\"\n\
|
||||
end" > /tmp/gdb.pidof
|
||||
source /tmp/gdb.pidof
|
||||
end
|
||||
end
|
||||
|
||||
document attach_pidof
|
||||
Attach to process by name
|
||||
Usage: attach_pidof PROG_NAME
|
||||
end
|
||||
|
||||
set $TAOS_CFG_VTYPE_INT8 = 0
|
||||
set $TAOS_CFG_VTYPE_INT16 = 1
|
||||
set $TAOS_CFG_VTYPE_INT32 = 2
|
||||
set $TAOS_CFG_VTYPE_FLOAT = 3
|
||||
set $TAOS_CFG_VTYPE_STRING = 4
|
||||
set $TAOS_CFG_VTYPE_IPSTR = 5
|
||||
set $TAOS_CFG_VTYPE_DIRECTORY = 6
|
||||
|
||||
set $TSDB_CFG_CTYPE_B_CONFIG = 1U
|
||||
set $TSDB_CFG_CTYPE_B_SHOW = 2U
|
||||
set $TSDB_CFG_CTYPE_B_LOG = 4U
|
||||
set $TSDB_CFG_CTYPE_B_CLIENT = 8U
|
||||
set $TSDB_CFG_CTYPE_B_OPTION = 16U
|
||||
set $TSDB_CFG_CTYPE_B_NOT_PRINT = 32U
|
||||
|
||||
set $TSDB_CFG_PRINT_LEN = 53
|
||||
|
||||
define print_blank
|
||||
if $argc == 1
|
||||
set $blank_len = $arg0
|
||||
while $blank_len > 0
|
||||
printf "%s", " "
|
||||
set $blank_len = $blank_len - 1
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
define dump_cfg
|
||||
if $argc != 1
|
||||
help dump_cfg
|
||||
else
|
||||
set $blen = $TSDB_CFG_PRINT_LEN - (int)strlen($arg0.option)
|
||||
if $blen < 0
|
||||
$blen = 0
|
||||
end
|
||||
#printf "%s: %d\n", "******blen: ", $blen
|
||||
printf "%s: ", $arg0.option
|
||||
print_blank $blen
|
||||
|
||||
if $arg0.valType == $TAOS_CFG_VTYPE_INT8
|
||||
printf "%d\n", *((int8_t *) $arg0.ptr)
|
||||
else
|
||||
if $arg0.valType == $TAOS_CFG_VTYPE_INT16
|
||||
printf "%d\n", *((int16_t *) $arg0.ptr)
|
||||
else
|
||||
if $arg0.valType == $TAOS_CFG_VTYPE_INT32
|
||||
printf "%d\n", *((int32_t *) $arg0.ptr)
|
||||
else
|
||||
if $arg0.valType == $TAOS_CFG_VTYPE_FLOAT
|
||||
printf "%f\n", *((float *) $arg0.ptr)
|
||||
else
|
||||
printf "%s\n", $arg0.ptr
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
document dump_cfg
|
||||
Dump a cfg entry
|
||||
Usage: dump_cfg cfg
|
||||
end
|
||||
|
||||
set pagination off
|
||||
|
||||
attach_pidof taosd
|
||||
|
||||
set $idx=0
|
||||
#print tsGlobalConfigNum
|
||||
#set $end=$1
|
||||
set $end=tsGlobalConfigNum
|
||||
|
||||
p "*=*=*=*=*=*=*=*=*= taos global config:"
|
||||
#while ($idx .lt. $end)
|
||||
while ($idx < $end)
|
||||
# print tsGlobalConfig[$idx].option
|
||||
set $cfg = tsGlobalConfig[$idx]
|
||||
set $tsce = tscEmbedded
|
||||
# p "1"
|
||||
if ($tsce == 0)
|
||||
if !($cfg.cfgType & $TSDB_CFG_CTYPE_B_CLIENT)
|
||||
end
|
||||
else
|
||||
if $cfg.cfgType & $TSDB_CFG_CTYPE_B_NOT_PRINT
|
||||
else
|
||||
if !($cfg.cfgType & $TSDB_CFG_CTYPE_B_SHOW)
|
||||
else
|
||||
dump_cfg $cfg
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
set $idx=$idx+1
|
||||
end
|
||||
|
||||
set $idx=0
|
||||
|
||||
p "*=*=*=*=*=*=*=*=*= taos local config:"
|
||||
while ($idx < $end)
|
||||
set $cfg = tsGlobalConfig[$idx]
|
||||
set $tsce = tscEmbedded
|
||||
if ($tsce == 0)
|
||||
if !($cfg.cfgType & $TSDB_CFG_CTYPE_B_CLIENT)
|
||||
end
|
||||
else
|
||||
if $cfg.cfgType & $TSDB_CFG_CTYPE_B_NOT_PRINT
|
||||
else
|
||||
if ($cfg.cfgType & $TSDB_CFG_CTYPE_B_SHOW)
|
||||
else
|
||||
dump_cfg $cfg
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
set $idx=$idx+1
|
||||
end
|
||||
|
||||
detach
|
||||
|
||||
quit
|
||||
# Usage:
|
||||
# sudo gdb -x ./taosd-dump-cfg.gdb
|
||||
|
||||
define attach_pidof
|
||||
if $argc != 1
|
||||
help attach_pidof
|
||||
else
|
||||
shell echo -e "\
|
||||
set \$PID = "$(echo $(pidof $arg0) 0 | cut -d " " -f 1)"\n\
|
||||
if \$PID > 0\n\
|
||||
attach "$(pidof -s $arg0)"\n\
|
||||
else\n\
|
||||
print \"Process '"$arg0"' not found\"\n\
|
||||
end" > /tmp/gdb.pidof
|
||||
source /tmp/gdb.pidof
|
||||
end
|
||||
end
|
||||
|
||||
document attach_pidof
|
||||
Attach to process by name
|
||||
Usage: attach_pidof PROG_NAME
|
||||
end
|
||||
|
||||
set $TAOS_CFG_VTYPE_INT8 = 0
|
||||
set $TAOS_CFG_VTYPE_INT16 = 1
|
||||
set $TAOS_CFG_VTYPE_INT32 = 2
|
||||
set $TAOS_CFG_VTYPE_FLOAT = 3
|
||||
set $TAOS_CFG_VTYPE_STRING = 4
|
||||
set $TAOS_CFG_VTYPE_IPSTR = 5
|
||||
set $TAOS_CFG_VTYPE_DIRECTORY = 6
|
||||
|
||||
set $TSDB_CFG_CTYPE_B_CONFIG = 1U
|
||||
set $TSDB_CFG_CTYPE_B_SHOW = 2U
|
||||
set $TSDB_CFG_CTYPE_B_LOG = 4U
|
||||
set $TSDB_CFG_CTYPE_B_CLIENT = 8U
|
||||
set $TSDB_CFG_CTYPE_B_OPTION = 16U
|
||||
set $TSDB_CFG_CTYPE_B_NOT_PRINT = 32U
|
||||
|
||||
set $TSDB_CFG_PRINT_LEN = 53
|
||||
|
||||
define print_blank
|
||||
if $argc == 1
|
||||
set $blank_len = $arg0
|
||||
while $blank_len > 0
|
||||
printf "%s", " "
|
||||
set $blank_len = $blank_len - 1
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
define dump_cfg
|
||||
if $argc != 1
|
||||
help dump_cfg
|
||||
else
|
||||
set $blen = $TSDB_CFG_PRINT_LEN - (int)strlen($arg0.option)
|
||||
if $blen < 0
|
||||
$blen = 0
|
||||
end
|
||||
#printf "%s: %d\n", "******blen: ", $blen
|
||||
printf "%s: ", $arg0.option
|
||||
print_blank $blen
|
||||
|
||||
if $arg0.valType == $TAOS_CFG_VTYPE_INT8
|
||||
printf "%d\n", *((int8_t *) $arg0.ptr)
|
||||
else
|
||||
if $arg0.valType == $TAOS_CFG_VTYPE_INT16
|
||||
printf "%d\n", *((int16_t *) $arg0.ptr)
|
||||
else
|
||||
if $arg0.valType == $TAOS_CFG_VTYPE_INT32
|
||||
printf "%d\n", *((int32_t *) $arg0.ptr)
|
||||
else
|
||||
if $arg0.valType == $TAOS_CFG_VTYPE_FLOAT
|
||||
printf "%f\n", *((float *) $arg0.ptr)
|
||||
else
|
||||
printf "%s\n", $arg0.ptr
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
document dump_cfg
|
||||
Dump a cfg entry
|
||||
Usage: dump_cfg cfg
|
||||
end
|
||||
|
||||
set pagination off
|
||||
|
||||
attach_pidof taosd
|
||||
|
||||
set $idx=0
|
||||
#print tsGlobalConfigNum
|
||||
#set $end=$1
|
||||
set $end=tsGlobalConfigNum
|
||||
|
||||
p "*=*=*=*=*=*=*=*=*= taos global config:"
|
||||
#while ($idx .lt. $end)
|
||||
while ($idx < $end)
|
||||
# print tsGlobalConfig[$idx].option
|
||||
set $cfg = tsGlobalConfig[$idx]
|
||||
set $tsce = tscEmbedded
|
||||
# p "1"
|
||||
if ($tsce == 0)
|
||||
if !($cfg.cfgType & $TSDB_CFG_CTYPE_B_CLIENT)
|
||||
end
|
||||
else
|
||||
if $cfg.cfgType & $TSDB_CFG_CTYPE_B_NOT_PRINT
|
||||
else
|
||||
if !($cfg.cfgType & $TSDB_CFG_CTYPE_B_SHOW)
|
||||
else
|
||||
dump_cfg $cfg
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
set $idx=$idx+1
|
||||
end
|
||||
|
||||
set $idx=0
|
||||
|
||||
p "*=*=*=*=*=*=*=*=*= taos local config:"
|
||||
while ($idx < $end)
|
||||
set $cfg = tsGlobalConfig[$idx]
|
||||
set $tsce = tscEmbedded
|
||||
if ($tsce == 0)
|
||||
if !($cfg.cfgType & $TSDB_CFG_CTYPE_B_CLIENT)
|
||||
end
|
||||
else
|
||||
if $cfg.cfgType & $TSDB_CFG_CTYPE_B_NOT_PRINT
|
||||
else
|
||||
if ($cfg.cfgType & $TSDB_CFG_CTYPE_B_SHOW)
|
||||
else
|
||||
dump_cfg $cfg
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
set $idx=$idx+1
|
||||
end
|
||||
|
||||
detach
|
||||
|
||||
quit
|
||||
|
|
|
@ -60,7 +60,7 @@ static void registerRequest(SRequestObj *pRequest) {
|
|||
static void deregisterRequest(SRequestObj *pRequest) {
|
||||
assert(pRequest != NULL);
|
||||
|
||||
STscObj *pTscObj = pRequest->pTscObj;
|
||||
STscObj * pTscObj = pRequest->pTscObj;
|
||||
SInstanceSummary *pActivity = &pTscObj->pAppInfo->summary;
|
||||
|
||||
int32_t currentInst = atomic_sub_fetch_64((int64_t *)&pActivity->currentRequests, 1);
|
||||
|
@ -91,7 +91,6 @@ static bool clientRpcRfp(int32_t code) {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
// TODO refactor
|
||||
void *openTransporter(const char *user, const char *auth, int32_t numOfThread) {
|
||||
SRpcInit rpcInit;
|
||||
|
@ -105,10 +104,6 @@ void *openTransporter(const char *user, const char *auth, int32_t numOfThread) {
|
|||
rpcInit.connType = TAOS_CONN_CLIENT;
|
||||
rpcInit.user = (char *)user;
|
||||
rpcInit.idleTime = tsShellActivityTimer * 1000;
|
||||
rpcInit.ckey = "key";
|
||||
rpcInit.spi = 1;
|
||||
rpcInit.secret = (char *)auth;
|
||||
|
||||
void *pDnodeConn = rpcOpen(&rpcInit);
|
||||
if (pDnodeConn == NULL) {
|
||||
tscError("failed to init connection to server");
|
||||
|
@ -318,7 +313,7 @@ int taos_options_imp(TSDB_OPTION option, const char *str) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
SConfig *pCfg = taosGetCfg();
|
||||
SConfig * pCfg = taosGetCfg();
|
||||
SConfigItem *pItem = NULL;
|
||||
|
||||
switch (option) {
|
||||
|
|
|
@ -291,7 +291,7 @@ int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList
|
|||
|
||||
SQueryResult res = {.code = 0, .numOfRows = 0, .msgSize = ERROR_MSG_BUF_DEFAULT_SIZE, .msg = pRequest->msgBuf};
|
||||
int32_t code = schedulerExecJob(pTransporter, pNodeList, pDag, &pRequest->body.queryJob, pRequest->sqlstr,
|
||||
pRequest->metric.start, &res);
|
||||
pRequest->metric.start, &res);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
if (pRequest->body.queryJob != 0) {
|
||||
schedulerFreeJob(pRequest->body.queryJob);
|
||||
|
@ -325,7 +325,7 @@ int32_t getQueryPlan(SRequestObj* pRequest, SQuery* pQuery, SArray** pNodeList)
|
|||
int32_t validateSversion(SRequestObj* pRequest, void* res) {
|
||||
SArray* pArray = NULL;
|
||||
int32_t code = 0;
|
||||
|
||||
|
||||
if (TDMT_VND_SUBMIT == pRequest->type) {
|
||||
SSubmitRsp* pRsp = (SSubmitRsp*)res;
|
||||
if (pRsp->nBlocks <= 0) {
|
||||
|
@ -337,10 +337,10 @@ int32_t validateSversion(SRequestObj* pRequest, void* res) {
|
|||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
|
||||
for (int32_t i = 0; i < pRsp->nBlocks; ++i) {
|
||||
SSubmitBlkRsp *blk = pRsp->pBlocks + i;
|
||||
STbSVersion tbSver = {.tbFName = blk->tblFName, .sver = blk->sver};
|
||||
SSubmitBlkRsp* blk = pRsp->pBlocks + i;
|
||||
STbSVersion tbSver = {.tbFName = blk->tblFName, .sver = blk->sver};
|
||||
taosArrayPush(pArray, &tbSver);
|
||||
}
|
||||
} else if (TDMT_VND_QUERY == pRequest->type) {
|
||||
|
@ -381,7 +381,7 @@ void freeRequestRes(SRequestObj* pRequest, void* res) {
|
|||
if (NULL == res) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
if (TDMT_VND_SUBMIT == pRequest->type) {
|
||||
tFreeSSubmitRsp((SSubmitRsp*)res);
|
||||
} else if (TDMT_VND_QUERY == pRequest->type) {
|
||||
|
@ -1038,7 +1038,6 @@ TSDB_SERVER_STATUS taos_check_server_status(const char* fqdn, int port, char* de
|
|||
SRpcInit rpcInit = {0};
|
||||
char pass[TSDB_PASSWORD_LEN + 1] = {0};
|
||||
|
||||
taosEncryptPass_c((uint8_t*)("_pwd"), strlen("_pwd"), pass);
|
||||
rpcInit.label = "CHK";
|
||||
rpcInit.numOfThreads = 1;
|
||||
rpcInit.cfp = NULL;
|
||||
|
@ -1046,9 +1045,6 @@ TSDB_SERVER_STATUS taos_check_server_status(const char* fqdn, int port, char* de
|
|||
rpcInit.connType = TAOS_CONN_CLIENT;
|
||||
rpcInit.idleTime = tsShellActivityTimer * 1000;
|
||||
rpcInit.user = "_dnd";
|
||||
rpcInit.ckey = "_key";
|
||||
rpcInit.spi = 1;
|
||||
rpcInit.secret = pass;
|
||||
|
||||
clientRpc = rpcOpen(&rpcInit);
|
||||
if (clientRpc == NULL) {
|
||||
|
|
|
@ -126,7 +126,6 @@ static void vmProcessWriteQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO
|
|||
syncGetEpSet(vnodeGetSyncHandle(pVnode->pImpl), &newEpSet);
|
||||
newEpSet.inUse = (newEpSet.inUse + 1) % newEpSet.numOfEps;
|
||||
tmsgSendRedirectRsp(&rsp, &newEpSet);
|
||||
|
||||
} else if (ret == TAOS_SYNC_PROPOSE_OTHER_ERROR) {
|
||||
rsp.code = TSDB_CODE_SYN_INTERNAL_ERROR;
|
||||
tmsgSendRsp(&rsp);
|
||||
|
|
|
@ -103,7 +103,7 @@ static inline int32_t dmPushToProcQueue(SProc *proc, SProcQueue *queue, SRpcMsg
|
|||
return -1;
|
||||
}
|
||||
|
||||
if (ftype == DND_FUNC_REQ && IsReq(pMsg) && pMsg->code == 0 && handle != 0) {
|
||||
if (ftype == DND_FUNC_REQ && IsReq(pMsg) && pMsg->code == 0 && handle != 0 && pMsg->info.noResp == 0) {
|
||||
if (taosHashPut(proc->hash, &handle, sizeof(int64_t), &pMsg->info, sizeof(SRpcConnInfo)) != 0) {
|
||||
taosThreadMutexUnlock(&queue->mutex);
|
||||
return -1;
|
||||
|
|
|
@ -49,9 +49,9 @@ int32_t dmProcessNodeMsg(SMgmtWrapper *pWrapper, SRpcMsg *pMsg) {
|
|||
}
|
||||
|
||||
static void dmProcessRpcMsg(SDnode *pDnode, SRpcMsg *pRpc, SEpSet *pEpSet) {
|
||||
SDnodeTrans *pTrans = &pDnode->trans;
|
||||
SDnodeTrans * pTrans = &pDnode->trans;
|
||||
int32_t code = -1;
|
||||
SRpcMsg *pMsg = NULL;
|
||||
SRpcMsg * pMsg = NULL;
|
||||
bool needRelease = false;
|
||||
SDnodeHandle *pHandle = &pTrans->msgHandles[TMSG_INDEX(pRpc->msgType)];
|
||||
SMgmtWrapper *pWrapper = NULL;
|
||||
|
@ -179,11 +179,11 @@ int32_t dmInitMsgHandle(SDnode *pDnode) {
|
|||
|
||||
for (EDndNodeType ntype = DNODE; ntype < NODE_END; ++ntype) {
|
||||
SMgmtWrapper *pWrapper = &pDnode->wrappers[ntype];
|
||||
SArray *pArray = (*pWrapper->func.getHandlesFp)();
|
||||
SArray * pArray = (*pWrapper->func.getHandlesFp)();
|
||||
if (pArray == NULL) return -1;
|
||||
|
||||
for (int32_t i = 0; i < taosArrayGetSize(pArray); ++i) {
|
||||
SMgmtHandle *pMgmt = taosArrayGet(pArray, i);
|
||||
SMgmtHandle * pMgmt = taosArrayGet(pArray, i);
|
||||
SDnodeHandle *pHandle = &pTrans->msgHandles[TMSG_INDEX(pMgmt->msgType)];
|
||||
if (pMgmt->needCheckVgId) {
|
||||
pHandle->needCheckVgId = pMgmt->needCheckVgId;
|
||||
|
@ -200,47 +200,6 @@ int32_t dmInitMsgHandle(SDnode *pDnode) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void dmSendRpcRedirectRsp(const SRpcMsg *pMsg) {
|
||||
SDnode *pDnode = dmInstance();
|
||||
SEpSet epSet = {0};
|
||||
dmGetMnodeEpSet(&pDnode->data, &epSet);
|
||||
|
||||
dDebug("RPC %p, req is redirected, num:%d use:%d", pMsg->info.handle, epSet.numOfEps, epSet.inUse);
|
||||
for (int32_t i = 0; i < epSet.numOfEps; ++i) {
|
||||
dDebug("mnode index:%d %s:%u", i, epSet.eps[i].fqdn, epSet.eps[i].port);
|
||||
if (strcmp(epSet.eps[i].fqdn, tsLocalFqdn) == 0 && epSet.eps[i].port == tsServerPort) {
|
||||
epSet.inUse = (i + 1) % epSet.numOfEps;
|
||||
}
|
||||
|
||||
epSet.eps[i].port = htons(epSet.eps[i].port);
|
||||
}
|
||||
|
||||
SMEpSet msg = {.epSet = epSet};
|
||||
int32_t len = tSerializeSMEpSet(NULL, 0, &msg);
|
||||
|
||||
SRpcMsg rsp = {
|
||||
.code = TSDB_CODE_RPC_REDIRECT,
|
||||
.info = pMsg->info,
|
||||
.contLen = len,
|
||||
};
|
||||
rsp.pCont = rpcMallocCont(len);
|
||||
tSerializeSMEpSet(rsp.pCont, len, &msg);
|
||||
rpcSendResponse(&rsp);
|
||||
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
}
|
||||
|
||||
static inline void dmSendRecv(SEpSet *pEpSet, SRpcMsg *pReq, SRpcMsg *pRsp) {
|
||||
SDnode *pDnode = dmInstance();
|
||||
if (pDnode->status != DND_STAT_RUNNING) {
|
||||
pRsp->code = TSDB_CODE_NODE_OFFLINE;
|
||||
rpcFreeCont(pReq->pCont);
|
||||
pReq->pCont = NULL;
|
||||
} else {
|
||||
rpcSendRecv(pDnode->trans.clientRpc, pEpSet, pReq, pRsp);
|
||||
}
|
||||
}
|
||||
|
||||
static inline int32_t dmSendReq(const SEpSet *pEpSet, SRpcMsg *pMsg) {
|
||||
SDnode *pDnode = dmInstance();
|
||||
if (pDnode->status != DND_STAT_RUNNING) {
|
||||
|
@ -257,39 +216,38 @@ static inline int32_t dmSendReq(const SEpSet *pEpSet, SRpcMsg *pMsg) {
|
|||
|
||||
static inline void dmSendRsp(SRpcMsg *pMsg) {
|
||||
SMgmtWrapper *pWrapper = pMsg->info.wrapper;
|
||||
if (pMsg->code == TSDB_CODE_NODE_REDIRECT) {
|
||||
dmSendRpcRedirectRsp(pMsg);
|
||||
if (InChildProc(pWrapper)) {
|
||||
dmPutToProcPQueue(&pWrapper->proc, pMsg, DND_FUNC_RSP);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
pMsg->pCont = NULL;
|
||||
} else {
|
||||
if (InChildProc(pWrapper)) {
|
||||
dmPutToProcPQueue(&pWrapper->proc, pMsg, DND_FUNC_RSP);
|
||||
} else {
|
||||
rpcSendResponse(pMsg);
|
||||
}
|
||||
rpcSendResponse(pMsg);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void dmSendRedirectRsp(SRpcMsg *pMsg, const SEpSet *pNewEpSet) {
|
||||
SMgmtWrapper *pWrapper = pMsg->info.wrapper;
|
||||
if (InChildProc(pWrapper)) {
|
||||
dmPutToProcPQueue(&pWrapper->proc, pMsg, DND_FUNC_RSP);
|
||||
} else {
|
||||
SRpcMsg rsp = {0};
|
||||
SMEpSet msg = {.epSet = *pNewEpSet};
|
||||
int32_t len = tSerializeSMEpSet(NULL, 0, &msg);
|
||||
rsp.pCont = rpcMallocCont(len);
|
||||
rsp.contLen = len;
|
||||
tSerializeSMEpSet(rsp.pCont, len, &msg);
|
||||
SRpcMsg rsp = {.code = TSDB_CODE_RPC_REDIRECT, .info = pMsg->info};
|
||||
SMEpSet msg = {.epSet = *pNewEpSet};
|
||||
int32_t contLen = tSerializeSMEpSet(NULL, 0, &msg);
|
||||
|
||||
rsp.code = TSDB_CODE_RPC_REDIRECT;
|
||||
rsp.info = pMsg->info;
|
||||
rpcSendResponse(&rsp);
|
||||
rsp.pCont = rpcMallocCont(contLen);
|
||||
if (rsp.pCont == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
} else {
|
||||
tSerializeSMEpSet(rsp.pCont, contLen, &msg);
|
||||
rsp.contLen = contLen;
|
||||
}
|
||||
dmSendRsp(&rsp);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
pMsg->pCont = NULL;
|
||||
}
|
||||
|
||||
static inline void dmRegisterBrokenLinkArg(SRpcMsg *pMsg) {
|
||||
SMgmtWrapper *pWrapper = pMsg->info.wrapper;
|
||||
if (InChildProc(pWrapper)) {
|
||||
dmPutToProcPQueue(&pWrapper->proc, pMsg, DND_FUNC_REGIST);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
pMsg->pCont = NULL;
|
||||
} else {
|
||||
rpcRegisterBrokenLinkArg(pMsg);
|
||||
}
|
||||
|
@ -318,15 +276,9 @@ int32_t dmInitClient(SDnode *pDnode) {
|
|||
rpcInit.connType = TAOS_CONN_CLIENT;
|
||||
rpcInit.idleTime = tsShellActivityTimer * 1000;
|
||||
rpcInit.user = INTERNAL_USER;
|
||||
rpcInit.ckey = INTERNAL_CKEY;
|
||||
rpcInit.spi = 1;
|
||||
rpcInit.parent = pDnode;
|
||||
rpcInit.rfp = rpcRfp;
|
||||
|
||||
char pass[TSDB_PASSWORD_LEN + 1] = {0};
|
||||
taosEncryptPass_c((uint8_t *)(INTERNAL_SECRET), strlen(INTERNAL_SECRET), pass);
|
||||
rpcInit.secret = pass;
|
||||
|
||||
pTrans->clientRpc = rpcOpen(&rpcInit);
|
||||
if (pTrans->clientRpc == NULL) {
|
||||
dError("failed to init dnode rpc client");
|
||||
|
@ -391,3 +343,34 @@ SMsgCb dmGetMsgcb(SDnode *pDnode) {
|
|||
};
|
||||
return msgCb;
|
||||
}
|
||||
|
||||
static void dmSendMnodeRedirectRsp(SRpcMsg *pMsg) {
|
||||
SDnode *pDnode = dmInstance();
|
||||
SEpSet epSet = {0};
|
||||
dmGetMnodeEpSet(&pDnode->data, &epSet);
|
||||
|
||||
dDebug("msg:%p, is redirected, num:%d use:%d", pMsg, epSet.numOfEps, epSet.inUse);
|
||||
for (int32_t i = 0; i < epSet.numOfEps; ++i) {
|
||||
dDebug("mnode index:%d %s:%u", i, epSet.eps[i].fqdn, epSet.eps[i].port);
|
||||
if (strcmp(epSet.eps[i].fqdn, tsLocalFqdn) == 0 && epSet.eps[i].port == tsServerPort) {
|
||||
epSet.inUse = (i + 1) % epSet.numOfEps;
|
||||
}
|
||||
|
||||
epSet.eps[i].port = htons(epSet.eps[i].port);
|
||||
}
|
||||
|
||||
SRpcMsg rsp = {.code = TSDB_CODE_RPC_REDIRECT, .info = pMsg->info};
|
||||
SMEpSet msg = {.epSet = epSet};
|
||||
int32_t contLen = tSerializeSMEpSet(NULL, 0, &msg);
|
||||
rsp.pCont = rpcMallocCont(contLen);
|
||||
if (rsp.pCont == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
} else {
|
||||
tSerializeSMEpSet(rsp.pCont, contLen, &msg);
|
||||
rsp.contLen = contLen;
|
||||
}
|
||||
|
||||
dmSendRsp(&rsp);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
pMsg->pCont = NULL;
|
||||
}
|
||||
|
|
|
@ -48,10 +48,10 @@ void TestClient::DoInit() {
|
|||
rpcInit.connType = TAOS_CONN_CLIENT;
|
||||
rpcInit.idleTime = 30 * 1000;
|
||||
rpcInit.user = (char*)this->user;
|
||||
rpcInit.ckey = (char*)"key";
|
||||
// rpcInit.ckey = (char*)"key";
|
||||
rpcInit.parent = this;
|
||||
rpcInit.secret = (char*)secretEncrypt;
|
||||
rpcInit.spi = 1;
|
||||
// rpcInit.secret = (char*)secretEncrypt;
|
||||
// rpcInit.spi = 1;
|
||||
|
||||
clientRpc = rpcOpen(&rpcInit);
|
||||
ASSERT(clientRpc);
|
||||
|
|
|
@ -85,10 +85,9 @@ int vnodeAsyncCommit(SVnode* pVnode);
|
|||
int32_t vnodeSyncOpen(SVnode* pVnode, char* path);
|
||||
int32_t vnodeSyncStart(SVnode* pVnode);
|
||||
void vnodeSyncClose(SVnode* pVnode);
|
||||
void vnodeSyncSetQ(SVnode* pVnode, void* qHandle);
|
||||
void vnodeSyncSetRpc(SVnode* pVnode, void* rpcHandle);
|
||||
int32_t vnodeSyncEqMsg(void* qHandle, SRpcMsg* pMsg);
|
||||
int32_t vnodeSendMsg(void* rpcHandle, const SEpSet* pEpSet, SRpcMsg* pMsg);
|
||||
void vnodeSyncSetMsgCb(SVnode* pVnode);
|
||||
int32_t vnodeSyncEqMsg(const SMsgCb* msgcb, SRpcMsg* pMsg);
|
||||
int32_t vnodeSyncSendMsg(const SEpSet* pEpSet, SRpcMsg* pMsg);
|
||||
void vnodeSyncCommitCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta);
|
||||
void vnodeSyncPreCommitCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta);
|
||||
void vnodeSyncRollBackCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta);
|
||||
|
|
|
@ -112,19 +112,18 @@ int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) {
|
|||
if (pIter == NULL) break;
|
||||
pExec = (STqExec*)pIter;
|
||||
if (pExec->subType == TOPIC_SUB_TYPE__DB) {
|
||||
if (isAdd) {
|
||||
continue;
|
||||
} else {
|
||||
if (!isAdd) {
|
||||
int32_t sz = taosArrayGetSize(tbUidList);
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
int64_t tbUid = *(int64_t*)taosArrayGet(tbUidList, i);
|
||||
taosHashPut(pExec->pDropTbUid, &tbUid, sizeof(int64_t), NULL, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
for (int32_t i = 0; i < 5; i++) {
|
||||
int32_t code = qUpdateQualifiedTableId(pExec->task[i], tbUidList, isAdd);
|
||||
ASSERT(code == 0);
|
||||
} else {
|
||||
for (int32_t i = 0; i < 5; i++) {
|
||||
int32_t code = qUpdateQualifiedTableId(pExec->task[i], tbUidList, isAdd);
|
||||
ASSERT(code == 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
@ -1059,6 +1058,57 @@ int32_t tqProcessStreamTrigger(STQ* pTq, void* data, int32_t dataLen, int32_t wo
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t tqProcessStreamTriggerNew(STQ* pTq, SSubmitReq* data) {
|
||||
SStreamDataSubmit* pSubmit = NULL;
|
||||
|
||||
// build data
|
||||
pSubmit = taosAllocateQitem(sizeof(SStreamDataSubmit), DEF_QITEM);
|
||||
if (pSubmit == NULL) return -1;
|
||||
pSubmit->dataRef = taosMemoryMalloc(sizeof(int32_t));
|
||||
if (pSubmit->dataRef == NULL) goto FAIL;
|
||||
*pSubmit->dataRef = 1;
|
||||
pSubmit->data = data;
|
||||
pSubmit->type = STREAM_INPUT__DATA_BLOCK;
|
||||
|
||||
void* pIter = NULL;
|
||||
while (1) {
|
||||
pIter = taosHashIterate(pTq->pStreamTasks, pIter);
|
||||
if (pIter == NULL) break;
|
||||
SStreamTask* pTask = (SStreamTask*)pIter;
|
||||
if (pTask->inputType == TASK_INPUT_TYPE__SUMBIT_BLOCK) {
|
||||
streamEnqueueDataSubmit(pTask, pSubmit);
|
||||
// TODO cal back pressure
|
||||
}
|
||||
// check run
|
||||
int8_t execStatus = atomic_load_8(&pTask->status);
|
||||
if (execStatus == TASK_STATUS__IDLE || execStatus == TASK_STATUS__CLOSING) {
|
||||
SStreamTaskRunReq* pReq = taosMemoryMalloc(sizeof(SStreamTaskRunReq));
|
||||
if (pReq == NULL) continue;
|
||||
// TODO: do we need htonl?
|
||||
pReq->head.vgId = pTq->pVnode->config.vgId;
|
||||
pReq->streamId = pTask->streamId;
|
||||
pReq->taskId = pTask->taskId;
|
||||
SRpcMsg msg = {
|
||||
.msgType = 0,
|
||||
.pCont = pReq,
|
||||
.contLen = sizeof(SStreamTaskRunReq),
|
||||
};
|
||||
tmsgPutToQueue(&pTq->pVnode->msgCb, FETCH_QUEUE, &msg);
|
||||
}
|
||||
}
|
||||
streamDataSubmitRefDec(pSubmit);
|
||||
|
||||
return 0;
|
||||
FAIL:
|
||||
if (pSubmit) {
|
||||
if (pSubmit->dataRef) {
|
||||
taosMemoryFree(pSubmit->dataRef);
|
||||
}
|
||||
taosFreeQitem(pSubmit);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t tqProcessTaskExec(STQ* pTq, char* msg, int32_t msgLen, int32_t workerId) {
|
||||
SStreamTaskExecReq req;
|
||||
tDecodeSStreamTaskExecReq(msg, &req);
|
||||
|
|
|
@ -34,21 +34,11 @@ STqReadHandle* tqInitSubmitMsgScanner(SMeta* pMeta) {
|
|||
|
||||
int32_t tqReadHandleSetMsg(STqReadHandle* pReadHandle, SSubmitReq* pMsg, int64_t ver) {
|
||||
pReadHandle->pMsg = pMsg;
|
||||
// pMsg->length = htonl(pMsg->length);
|
||||
// pMsg->numOfBlocks = htonl(pMsg->numOfBlocks);
|
||||
|
||||
// iterate and convert
|
||||
if (tInitSubmitMsgIter(pMsg, &pReadHandle->msgIter) < 0) return -1;
|
||||
while (true) {
|
||||
if (tGetSubmitMsgNext(&pReadHandle->msgIter, &pReadHandle->pBlock) < 0) return -1;
|
||||
if (pReadHandle->pBlock == NULL) break;
|
||||
|
||||
// pReadHandle->pBlock->uid = htobe64(pReadHandle->pBlock->uid);
|
||||
// pReadHandle->pBlock->suid = htobe64(pReadHandle->pBlock->suid);
|
||||
// pReadHandle->pBlock->sversion = htonl(pReadHandle->pBlock->sversion);
|
||||
// pReadHandle->pBlock->dataLen = htonl(pReadHandle->pBlock->dataLen);
|
||||
// pReadHandle->pBlock->schemaLen = htonl(pReadHandle->pBlock->schemaLen);
|
||||
// pReadHandle->pBlock->numOfRows = htons(pReadHandle->pBlock->numOfRows);
|
||||
}
|
||||
|
||||
if (tInitSubmitMsgIter(pMsg, &pReadHandle->msgIter) < 0) return -1;
|
||||
|
|
|
@ -180,8 +180,7 @@ void vnodeClose(SVnode *pVnode) {
|
|||
|
||||
// start the sync timer after the queue is ready
|
||||
int32_t vnodeStart(SVnode *pVnode) {
|
||||
vnodeSyncSetQ(pVnode, NULL);
|
||||
vnodeSyncSetRpc(pVnode, NULL);
|
||||
vnodeSyncSetMsgCb(pVnode);
|
||||
vnodeSyncStart(pVnode);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -27,9 +27,8 @@ int32_t vnodeSyncOpen(SVnode *pVnode, char *path) {
|
|||
syncInfo.pWal = pVnode->pWal;
|
||||
|
||||
syncInfo.pFsm = syncVnodeMakeFsm(pVnode);
|
||||
syncInfo.rpcClient = NULL;
|
||||
syncInfo.FpSendMsg = vnodeSendMsg;
|
||||
syncInfo.queue = NULL;
|
||||
syncInfo.msgcb = NULL;
|
||||
syncInfo.FpSendMsg = vnodeSyncSendMsg;
|
||||
syncInfo.FpEqMsg = vnodeSyncEqMsg;
|
||||
|
||||
pVnode->sync = syncOpen(&syncInfo);
|
||||
|
@ -53,31 +52,13 @@ void vnodeSyncClose(SVnode *pVnode) {
|
|||
syncStop(pVnode->sync);
|
||||
}
|
||||
|
||||
void vnodeSyncSetQ(SVnode *pVnode, void *qHandle) { syncSetQ(pVnode->sync, (void *)(&(pVnode->msgCb))); }
|
||||
void vnodeSyncSetMsgCb(SVnode *pVnode) { syncSetMsgCb(pVnode->sync, &pVnode->msgCb); }
|
||||
|
||||
void vnodeSyncSetRpc(SVnode *pVnode, void *rpcHandle) { syncSetRpc(pVnode->sync, (void *)(&(pVnode->msgCb))); }
|
||||
int32_t vnodeSyncEqMsg(const SMsgCb *msgcb, SRpcMsg *pMsg) { return tmsgPutToQueue(msgcb, SYNC_QUEUE, pMsg); }
|
||||
|
||||
int32_t vnodeSyncEqMsg(void *qHandle, SRpcMsg *pMsg) {
|
||||
int32_t ret = 0;
|
||||
SMsgCb *pMsgCb = qHandle;
|
||||
if (pMsgCb->queueFps[SYNC_QUEUE] != NULL) {
|
||||
tmsgPutToQueue(qHandle, SYNC_QUEUE, pMsg);
|
||||
} else {
|
||||
vError("vnodeSyncEqMsg queue is NULL, SYNC_QUEUE:%d", SYNC_QUEUE);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int32_t vnodeSendMsg(void *rpcHandle, const SEpSet *pEpSet, SRpcMsg *pMsg) {
|
||||
int32_t ret = 0;
|
||||
SMsgCb *pMsgCb = rpcHandle;
|
||||
if (pMsgCb->queueFps[SYNC_QUEUE] != NULL) {
|
||||
pMsg->info.noResp = 1;
|
||||
tmsgSendReq(pEpSet, pMsg);
|
||||
} else {
|
||||
vError("vnodeSendMsg queue is NULL, SYNC_QUEUE:%d", SYNC_QUEUE);
|
||||
}
|
||||
return ret;
|
||||
int32_t vnodeSyncSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg) {
|
||||
pMsg->info.noResp = 1;
|
||||
return tmsgSendReq(pEpSet, pMsg);
|
||||
}
|
||||
|
||||
int32_t vnodeSyncGetSnapshotCb(struct SSyncFSM *pFsm, SSnapshot *pSnapshot) {
|
||||
|
|
|
@ -27,16 +27,16 @@
|
|||
#include "trpc.h"
|
||||
|
||||
typedef struct SUdfdContext {
|
||||
uv_loop_t *loop;
|
||||
uv_loop_t * loop;
|
||||
uv_pipe_t ctrlPipe;
|
||||
uv_signal_t intrSignal;
|
||||
char listenPipeName[PATH_MAX + UDF_LISTEN_PIPE_NAME_LEN + 2];
|
||||
uv_pipe_t listeningPipe;
|
||||
|
||||
void *clientRpc;
|
||||
void * clientRpc;
|
||||
SCorEpSet mgmtEp;
|
||||
uv_mutex_t udfsMutex;
|
||||
SHashObj *udfsHash;
|
||||
SHashObj * udfsHash;
|
||||
|
||||
bool printVersion;
|
||||
} SUdfdContext;
|
||||
|
@ -45,7 +45,7 @@ SUdfdContext global;
|
|||
|
||||
typedef struct SUdfdUvConn {
|
||||
uv_stream_t *client;
|
||||
char *inputBuf;
|
||||
char * inputBuf;
|
||||
int32_t inputLen;
|
||||
int32_t inputCap;
|
||||
int32_t inputTotal;
|
||||
|
@ -65,25 +65,25 @@ typedef struct SUdf {
|
|||
uv_mutex_t lock;
|
||||
uv_cond_t condReady;
|
||||
|
||||
char name[TSDB_FUNC_NAME_LEN];
|
||||
int8_t funcType;
|
||||
int8_t scriptType;
|
||||
int8_t outputType;
|
||||
char name[TSDB_FUNC_NAME_LEN];
|
||||
int8_t funcType;
|
||||
int8_t scriptType;
|
||||
int8_t outputType;
|
||||
int32_t outputLen;
|
||||
int32_t bufSize;
|
||||
|
||||
char path[PATH_MAX];
|
||||
char path[PATH_MAX];
|
||||
|
||||
uv_lib_t lib;
|
||||
uv_lib_t lib;
|
||||
|
||||
TUdfScalarProcFunc scalarProcFunc;
|
||||
TUdfScalarProcFunc scalarProcFunc;
|
||||
|
||||
TUdfAggStartFunc aggStartFunc;
|
||||
TUdfAggProcessFunc aggProcFunc;
|
||||
TUdfAggFinishFunc aggFinishFunc;
|
||||
TUdfAggStartFunc aggStartFunc;
|
||||
TUdfAggProcessFunc aggProcFunc;
|
||||
TUdfAggFinishFunc aggFinishFunc;
|
||||
|
||||
TUdfInitFunc initFunc;
|
||||
TUdfDestroyFunc destroyFunc;
|
||||
TUdfInitFunc initFunc;
|
||||
TUdfDestroyFunc destroyFunc;
|
||||
} SUdf;
|
||||
|
||||
// TODO: add private udf structure.
|
||||
|
@ -98,9 +98,9 @@ typedef enum EUdfdRpcReqRspType {
|
|||
|
||||
typedef struct SUdfdRpcSendRecvInfo {
|
||||
EUdfdRpcReqRspType rpcType;
|
||||
int32_t code;
|
||||
void* param;
|
||||
uv_sem_t resultSem;
|
||||
int32_t code;
|
||||
void * param;
|
||||
uv_sem_t resultSem;
|
||||
} SUdfdRpcSendRecvInfo;
|
||||
|
||||
void udfdProcessRpcRsp(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet) {
|
||||
|
@ -136,7 +136,7 @@ void udfdProcessRpcRsp(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet) {
|
|||
tDeserializeSRetrieveFuncRsp(pMsg->pCont, pMsg->contLen, &retrieveRsp);
|
||||
|
||||
SFuncInfo *pFuncInfo = (SFuncInfo *)taosArrayGet(retrieveRsp.pFuncInfos, 0);
|
||||
SUdf* udf = msgInfo->param;
|
||||
SUdf * udf = msgInfo->param;
|
||||
udf->funcType = pFuncInfo->funcType;
|
||||
udf->scriptType = pFuncInfo->scriptType;
|
||||
udf->outputType = pFuncInfo->outputType;
|
||||
|
@ -145,7 +145,8 @@ void udfdProcessRpcRsp(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet) {
|
|||
|
||||
char path[PATH_MAX] = {0};
|
||||
snprintf(path, sizeof(path), "%s/lib%s.so", "/tmp", pFuncInfo->name);
|
||||
TdFilePtr file = taosOpenFile(path, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_READ | TD_FILE_TRUNC | TD_FILE_AUTO_DEL);
|
||||
TdFilePtr file =
|
||||
taosOpenFile(path, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_READ | TD_FILE_TRUNC | TD_FILE_AUTO_DEL);
|
||||
// TODO check for failure of flush to disk
|
||||
taosWriteFile(file, pFuncInfo->pCode, pFuncInfo->codeSize);
|
||||
taosCloseFile(&file);
|
||||
|
@ -168,11 +169,11 @@ int32_t udfdFillUdfInfoFromMNode(void *clientRpc, char *udfName, SUdf *udf) {
|
|||
taosArrayPush(retrieveReq.pFuncNames, udfName);
|
||||
|
||||
int32_t contLen = tSerializeSRetrieveFuncReq(NULL, 0, &retrieveReq);
|
||||
void *pReq = rpcMallocCont(contLen);
|
||||
void * pReq = rpcMallocCont(contLen);
|
||||
tSerializeSRetrieveFuncReq(pReq, contLen, &retrieveReq);
|
||||
taosArrayDestroy(retrieveReq.pFuncNames);
|
||||
|
||||
SUdfdRpcSendRecvInfo* msgInfo = taosMemoryCalloc(1, sizeof(SUdfdRpcSendRecvInfo));
|
||||
SUdfdRpcSendRecvInfo *msgInfo = taosMemoryCalloc(1, sizeof(SUdfdRpcSendRecvInfo));
|
||||
msgInfo->rpcType = UDFD_RPC_RETRIVE_FUNC;
|
||||
msgInfo->param = udf;
|
||||
uv_sem_init(&msgInfo->resultSem, 0);
|
||||
|
@ -194,7 +195,7 @@ int32_t udfdFillUdfInfoFromMNode(void *clientRpc, char *udfName, SUdf *udf) {
|
|||
int32_t udfdConnectToMnode() {
|
||||
SConnectReq connReq = {0};
|
||||
connReq.connType = CONN_TYPE__UDFD;
|
||||
tstrncpy(connReq.app, "udfd",sizeof(connReq.app));
|
||||
tstrncpy(connReq.app, "udfd", sizeof(connReq.app));
|
||||
tstrncpy(connReq.user, TSDB_DEFAULT_USER, sizeof(connReq.user));
|
||||
char pass[TSDB_PASSWORD_LEN + 1] = {0};
|
||||
taosEncryptPass_c((uint8_t *)(TSDB_DEFAULT_PASS), strlen(TSDB_DEFAULT_PASS), pass);
|
||||
|
@ -203,7 +204,7 @@ int32_t udfdConnectToMnode() {
|
|||
connReq.startTime = htobe64(taosGetTimestampMs());
|
||||
|
||||
int32_t contLen = tSerializeSConnectReq(NULL, 0, &connReq);
|
||||
void* pReq = rpcMallocCont(contLen);
|
||||
void * pReq = rpcMallocCont(contLen);
|
||||
tSerializeSConnectReq(pReq, contLen, &connReq);
|
||||
|
||||
SUdfdRpcSendRecvInfo *msgInfo = taosMemoryCalloc(1, sizeof(SUdfdRpcSendRecvInfo));
|
||||
|
@ -240,17 +241,17 @@ int32_t udfdLoadUdf(char *udfName, SUdf *udf) {
|
|||
return TSDB_CODE_UDF_LOAD_UDF_FAILURE;
|
||||
}
|
||||
|
||||
char initFuncName[TSDB_FUNC_NAME_LEN+5] = {0};
|
||||
char initFuncName[TSDB_FUNC_NAME_LEN + 5] = {0};
|
||||
char *initSuffix = "_init";
|
||||
strcpy(initFuncName, udfName);
|
||||
strncat(initFuncName, initSuffix, strlen(initSuffix));
|
||||
uv_dlsym(&udf->lib, initFuncName, (void**)(&udf->initFunc));
|
||||
uv_dlsym(&udf->lib, initFuncName, (void **)(&udf->initFunc));
|
||||
|
||||
char destroyFuncName[TSDB_FUNC_NAME_LEN+5] = {0};
|
||||
char destroyFuncName[TSDB_FUNC_NAME_LEN + 5] = {0};
|
||||
char *destroySuffix = "_destroy";
|
||||
strcpy(destroyFuncName, udfName);
|
||||
strncat(destroyFuncName, destroySuffix, strlen(destroySuffix));
|
||||
uv_dlsym(&udf->lib, destroyFuncName, (void**)(&udf->destroyFunc));
|
||||
uv_dlsym(&udf->lib, destroyFuncName, (void **)(&udf->destroyFunc));
|
||||
|
||||
if (udf->funcType == TSDB_FUNC_TYPE_SCALAR) {
|
||||
char processFuncName[TSDB_FUNC_NAME_LEN] = {0};
|
||||
|
@ -270,87 +271,86 @@ int32_t udfdLoadUdf(char *udfName, SUdf *udf) {
|
|||
strncpy(finishFuncName, processFuncName, strlen(processFuncName));
|
||||
strncat(finishFuncName, finishSuffix, strlen(finishSuffix));
|
||||
uv_dlsym(&udf->lib, finishFuncName, (void **)(&udf->aggFinishFunc));
|
||||
//TODO: merge
|
||||
// TODO: merge
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void udfdProcessSetupRequest(SUvUdfWork* uvUdf, SUdfRequest* request) {
|
||||
// TODO: tracable id from client. connect, setup, call, teardown
|
||||
fnInfo( "setup request. seq num: %" PRId64 ", udf name: %s", request->seqNum, request->setup.udfName);
|
||||
SUdfSetupRequest *setup = &request->setup;
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
SUdf *udf = NULL;
|
||||
uv_mutex_lock(&global.udfsMutex);
|
||||
SUdf **udfInHash = taosHashGet(global.udfsHash, request->setup.udfName, strlen(request->setup.udfName));
|
||||
if (udfInHash) {
|
||||
++(*udfInHash)->refCount;
|
||||
udf = *udfInHash;
|
||||
uv_mutex_unlock(&global.udfsMutex);
|
||||
} else {
|
||||
SUdf *udfNew = taosMemoryCalloc(1, sizeof(SUdf));
|
||||
udfNew->refCount = 1;
|
||||
udfNew->state = UDF_STATE_INIT;
|
||||
void udfdProcessSetupRequest(SUvUdfWork *uvUdf, SUdfRequest *request) {
|
||||
// TODO: tracable id from client. connect, setup, call, teardown
|
||||
fnInfo("setup request. seq num: %" PRId64 ", udf name: %s", request->seqNum, request->setup.udfName);
|
||||
SUdfSetupRequest *setup = &request->setup;
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
SUdf * udf = NULL;
|
||||
uv_mutex_lock(&global.udfsMutex);
|
||||
SUdf **udfInHash = taosHashGet(global.udfsHash, request->setup.udfName, strlen(request->setup.udfName));
|
||||
if (udfInHash) {
|
||||
++(*udfInHash)->refCount;
|
||||
udf = *udfInHash;
|
||||
uv_mutex_unlock(&global.udfsMutex);
|
||||
} else {
|
||||
SUdf *udfNew = taosMemoryCalloc(1, sizeof(SUdf));
|
||||
udfNew->refCount = 1;
|
||||
udfNew->state = UDF_STATE_INIT;
|
||||
|
||||
uv_mutex_init(&udfNew->lock);
|
||||
uv_cond_init(&udfNew->condReady);
|
||||
udf = udfNew;
|
||||
taosHashPut(global.udfsHash, request->setup.udfName, strlen(request->setup.udfName), &udfNew, sizeof(&udfNew));
|
||||
uv_mutex_unlock(&global.udfsMutex);
|
||||
uv_mutex_init(&udfNew->lock);
|
||||
uv_cond_init(&udfNew->condReady);
|
||||
udf = udfNew;
|
||||
taosHashPut(global.udfsHash, request->setup.udfName, strlen(request->setup.udfName), &udfNew, sizeof(&udfNew));
|
||||
uv_mutex_unlock(&global.udfsMutex);
|
||||
}
|
||||
|
||||
uv_mutex_lock(&udf->lock);
|
||||
if (udf->state == UDF_STATE_INIT) {
|
||||
udf->state = UDF_STATE_LOADING;
|
||||
code = udfdLoadUdf(setup->udfName, udf);
|
||||
if (udf->initFunc) {
|
||||
udf->initFunc();
|
||||
}
|
||||
|
||||
uv_mutex_lock(&udf->lock);
|
||||
if (udf->state == UDF_STATE_INIT) {
|
||||
udf->state = UDF_STATE_LOADING;
|
||||
code = udfdLoadUdf(setup->udfName, udf);
|
||||
if (udf->initFunc) {
|
||||
udf->initFunc();
|
||||
}
|
||||
udf->state = UDF_STATE_READY;
|
||||
uv_cond_broadcast(&udf->condReady);
|
||||
uv_mutex_unlock(&udf->lock);
|
||||
} else {
|
||||
while (udf->state != UDF_STATE_READY) {
|
||||
uv_cond_wait(&udf->condReady, &udf->lock);
|
||||
}
|
||||
uv_mutex_unlock(&udf->lock);
|
||||
udf->state = UDF_STATE_READY;
|
||||
uv_cond_broadcast(&udf->condReady);
|
||||
uv_mutex_unlock(&udf->lock);
|
||||
} else {
|
||||
while (udf->state != UDF_STATE_READY) {
|
||||
uv_cond_wait(&udf->condReady, &udf->lock);
|
||||
}
|
||||
SUdfcFuncHandle *handle = taosMemoryMalloc(sizeof(SUdfcFuncHandle));
|
||||
handle->udf = udf;
|
||||
uv_mutex_unlock(&udf->lock);
|
||||
}
|
||||
SUdfcFuncHandle *handle = taosMemoryMalloc(sizeof(SUdfcFuncHandle));
|
||||
handle->udf = udf;
|
||||
|
||||
SUdfResponse rsp;
|
||||
rsp.seqNum = request->seqNum;
|
||||
rsp.type = request->type;
|
||||
rsp.code = code;
|
||||
rsp.setupRsp.udfHandle = (int64_t)(handle);
|
||||
rsp.setupRsp.outputType = udf->outputType;
|
||||
rsp.setupRsp.outputLen = udf->outputLen;
|
||||
rsp.setupRsp.bufSize = udf->bufSize;
|
||||
SUdfResponse rsp;
|
||||
rsp.seqNum = request->seqNum;
|
||||
rsp.type = request->type;
|
||||
rsp.code = code;
|
||||
rsp.setupRsp.udfHandle = (int64_t)(handle);
|
||||
rsp.setupRsp.outputType = udf->outputType;
|
||||
rsp.setupRsp.outputLen = udf->outputLen;
|
||||
rsp.setupRsp.bufSize = udf->bufSize;
|
||||
|
||||
int32_t len = encodeUdfResponse(NULL, &rsp);
|
||||
rsp.msgLen = len;
|
||||
void *bufBegin = taosMemoryMalloc(len);
|
||||
void *buf = bufBegin;
|
||||
encodeUdfResponse(&buf, &rsp);
|
||||
int32_t len = encodeUdfResponse(NULL, &rsp);
|
||||
rsp.msgLen = len;
|
||||
void *bufBegin = taosMemoryMalloc(len);
|
||||
void *buf = bufBegin;
|
||||
encodeUdfResponse(&buf, &rsp);
|
||||
|
||||
uvUdf->output = uv_buf_init(bufBegin, len);
|
||||
uvUdf->output = uv_buf_init(bufBegin, len);
|
||||
|
||||
taosMemoryFree(uvUdf->input.base);
|
||||
return;
|
||||
taosMemoryFree(uvUdf->input.base);
|
||||
return;
|
||||
}
|
||||
|
||||
void udfdProcessCallRequest(SUvUdfWork *uvUdf, SUdfRequest *request) {
|
||||
SUdfCallRequest *call = &request->call;
|
||||
fnDebug("%" PRId64 "call request. call type %d, handle: %" PRIx64, request->seqNum, call->callType,
|
||||
call->udfHandle);
|
||||
SUdfcFuncHandle *handle = (SUdfcFuncHandle *)(call->udfHandle);
|
||||
SUdf *udf = handle->udf;
|
||||
SUdfResponse response = {0};
|
||||
SUdfResponse *rsp = &response;
|
||||
fnDebug("%" PRId64 "call request. call type %d, handle: %" PRIx64, request->seqNum, call->callType, call->udfHandle);
|
||||
SUdfcFuncHandle * handle = (SUdfcFuncHandle *)(call->udfHandle);
|
||||
SUdf * udf = handle->udf;
|
||||
SUdfResponse response = {0};
|
||||
SUdfResponse * rsp = &response;
|
||||
SUdfCallResponse *subRsp = &rsp->callRsp;
|
||||
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
switch(call->callType) {
|
||||
switch (call->callType) {
|
||||
case TSDB_UDF_CALL_SCALA_PROC: {
|
||||
SUdfColumn output = {0};
|
||||
|
||||
|
@ -363,9 +363,7 @@ void udfdProcessCallRequest(SUvUdfWork *uvUdf, SUdfRequest *request) {
|
|||
break;
|
||||
}
|
||||
case TSDB_UDF_CALL_AGG_INIT: {
|
||||
SUdfInterBuf outBuf = {.buf = taosMemoryMalloc(udf->bufSize),
|
||||
.bufLen= udf->bufSize,
|
||||
.numOfResult = 0};
|
||||
SUdfInterBuf outBuf = {.buf = taosMemoryMalloc(udf->bufSize), .bufLen = udf->bufSize, .numOfResult = 0};
|
||||
udf->aggStartFunc(&outBuf);
|
||||
subRsp->resultBuf = outBuf;
|
||||
break;
|
||||
|
@ -373,9 +371,7 @@ void udfdProcessCallRequest(SUvUdfWork *uvUdf, SUdfRequest *request) {
|
|||
case TSDB_UDF_CALL_AGG_PROC: {
|
||||
SUdfDataBlock input = {0};
|
||||
convertDataBlockToUdfDataBlock(&call->block, &input);
|
||||
SUdfInterBuf outBuf = {.buf = taosMemoryMalloc(udf->bufSize),
|
||||
.bufLen= udf->bufSize,
|
||||
.numOfResult = 0};
|
||||
SUdfInterBuf outBuf = {.buf = taosMemoryMalloc(udf->bufSize), .bufLen = udf->bufSize, .numOfResult = 0};
|
||||
code = udf->aggProcFunc(&input, &call->interBuf, &outBuf);
|
||||
freeUdfInterBuf(&call->interBuf);
|
||||
freeUdfDataDataBlock(&input);
|
||||
|
@ -384,9 +380,7 @@ void udfdProcessCallRequest(SUvUdfWork *uvUdf, SUdfRequest *request) {
|
|||
break;
|
||||
}
|
||||
case TSDB_UDF_CALL_AGG_FIN: {
|
||||
SUdfInterBuf outBuf = {.buf = taosMemoryMalloc(udf->bufSize),
|
||||
.bufLen= udf->bufSize,
|
||||
.numOfResult = 0};
|
||||
SUdfInterBuf outBuf = {.buf = taosMemoryMalloc(udf->bufSize), .bufLen = udf->bufSize, .numOfResult = 0};
|
||||
code = udf->aggFinishFunc(&call->interBuf, &outBuf);
|
||||
freeUdfInterBuf(&call->interBuf);
|
||||
subRsp->resultBuf = outBuf;
|
||||
|
@ -429,20 +423,19 @@ void udfdProcessCallRequest(SUvUdfWork *uvUdf, SUdfRequest *request) {
|
|||
}
|
||||
default:
|
||||
break;
|
||||
|
||||
}
|
||||
|
||||
taosMemoryFree(uvUdf->input.base);
|
||||
return;
|
||||
}
|
||||
|
||||
void udfdProcessTeardownRequest(SUvUdfWork* uvUdf, SUdfRequest* request) {
|
||||
void udfdProcessTeardownRequest(SUvUdfWork *uvUdf, SUdfRequest *request) {
|
||||
SUdfTeardownRequest *teardown = &request->teardown;
|
||||
fnInfo("teardown. seq number: %" PRId64 ", handle:%" PRIx64, request->seqNum, teardown->udfHandle);
|
||||
SUdfcFuncHandle *handle = (SUdfcFuncHandle *)(teardown->udfHandle);
|
||||
SUdf *udf = handle->udf;
|
||||
bool unloadUdf = false;
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
SUdf * udf = handle->udf;
|
||||
bool unloadUdf = false;
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
uv_mutex_lock(&global.udfsMutex);
|
||||
udf->refCount--;
|
||||
|
@ -568,7 +561,7 @@ bool isUdfdUvMsgComplete(SUdfdUvConn *pipe) {
|
|||
}
|
||||
|
||||
void udfdHandleRequest(SUdfdUvConn *conn) {
|
||||
uv_work_t *work = taosMemoryMalloc(sizeof(uv_work_t));
|
||||
uv_work_t * work = taosMemoryMalloc(sizeof(uv_work_t));
|
||||
SUvUdfWork *udfWork = taosMemoryMalloc(sizeof(SUvUdfWork));
|
||||
udfWork->client = conn->client;
|
||||
udfWork->input = uv_buf_init(conn->inputBuf, conn->inputLen);
|
||||
|
@ -653,11 +646,11 @@ static bool udfdRpcRfp(int32_t code) {
|
|||
}
|
||||
}
|
||||
|
||||
int initEpSetFromCfg(const char* firstEp, const char* secondEp, SCorEpSet* pEpSet) {
|
||||
int initEpSetFromCfg(const char *firstEp, const char *secondEp, SCorEpSet *pEpSet) {
|
||||
pEpSet->version = 0;
|
||||
|
||||
// init mnode ip set
|
||||
SEpSet* mgmtEpSet = &(pEpSet->epSet);
|
||||
SEpSet *mgmtEpSet = &(pEpSet->epSet);
|
||||
mgmtEpSet->numOfEps = 0;
|
||||
mgmtEpSet->inUse = 0;
|
||||
|
||||
|
@ -694,7 +687,6 @@ int initEpSetFromCfg(const char* firstEp, const char* secondEp, SCorEpSet* pEpSe
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int32_t udfdOpenClientRpc() {
|
||||
SRpcInit rpcInit = {0};
|
||||
rpcInit.label = "UDFD";
|
||||
|
@ -704,15 +696,9 @@ int32_t udfdOpenClientRpc() {
|
|||
rpcInit.connType = TAOS_CONN_CLIENT;
|
||||
rpcInit.idleTime = tsShellActivityTimer * 1000;
|
||||
rpcInit.user = TSDB_DEFAULT_USER;
|
||||
rpcInit.ckey = "key";
|
||||
rpcInit.spi = 1;
|
||||
rpcInit.parent = &global;
|
||||
rpcInit.rfp = udfdRpcRfp;
|
||||
|
||||
char pass[TSDB_PASSWORD_LEN + 1] = {0};
|
||||
taosEncryptPass_c((uint8_t *)(TSDB_DEFAULT_PASS), strlen(TSDB_DEFAULT_PASS), pass);
|
||||
rpcInit.secret = pass;
|
||||
|
||||
global.clientRpc = rpcOpen(&rpcInit);
|
||||
if (global.clientRpc == NULL) {
|
||||
fnError("failed to init dnode rpc client");
|
||||
|
@ -823,7 +809,7 @@ static int32_t udfdUvInit() {
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void udfdCloseWalkCb(uv_handle_t* handle, void* arg) {
|
||||
static void udfdCloseWalkCb(uv_handle_t *handle, void *arg) {
|
||||
if (!uv_is_closing(handle)) {
|
||||
uv_close(handle, NULL);
|
||||
}
|
||||
|
@ -883,7 +869,7 @@ int main(int argc, char *argv[]) {
|
|||
int32_t retryMnodeTimes = 0;
|
||||
int32_t code = 0;
|
||||
while (retryMnodeTimes++ < TSDB_MAX_REPLICA) {
|
||||
uv_sleep(500 * ( 1 << retryMnodeTimes));
|
||||
uv_sleep(500 * (1 << retryMnodeTimes));
|
||||
code = udfdConnectToMnode();
|
||||
if (code == 0) {
|
||||
break;
|
||||
|
|
|
@ -26,7 +26,7 @@ int32_t udf2_start(SUdfInterBuf *buf) {
|
|||
|
||||
int32_t udf2(SUdfDataBlock* block, SUdfInterBuf *interBuf, SUdfInterBuf *newInterBuf) {
|
||||
double sumSquares = *(double*)interBuf->buf;
|
||||
int8_t numOutput = 0;
|
||||
int8_t numNotNull = 0;
|
||||
for (int32_t i = 0; i < block->numOfCols; ++i) {
|
||||
SUdfColumn* col = block->udfCols[i];
|
||||
if (!(col->colMeta.type == TSDB_DATA_TYPE_INT ||
|
||||
|
@ -56,15 +56,14 @@ int32_t udf2(SUdfDataBlock* block, SUdfInterBuf *interBuf, SUdfInterBuf *newInte
|
|||
default:
|
||||
break;
|
||||
}
|
||||
numOutput = 1;
|
||||
++numNotNull;
|
||||
}
|
||||
}
|
||||
|
||||
if (numOutput == 1) {
|
||||
*(double*)(newInterBuf->buf) = sumSquares;
|
||||
newInterBuf->bufLen = sizeof(double);
|
||||
}
|
||||
if (interBuf->numOfResult == 0 && numOutput == 0) {
|
||||
*(double*)(newInterBuf->buf) = sumSquares;
|
||||
newInterBuf->bufLen = sizeof(double);
|
||||
|
||||
if (interBuf->numOfResult == 0 && numNotNull == 0) {
|
||||
newInterBuf->numOfResult = 0;
|
||||
} else {
|
||||
newInterBuf->numOfResult = 1;
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
#define MAX_INDEX_KEY_LEN 256 // test only, change later
|
||||
|
||||
#define MEM_TERM_LIMIT 10 * 10000
|
||||
#define MEM_THRESHOLD 1024 * 1024
|
||||
#define MEM_THRESHOLD 64 * 1024
|
||||
#define MEM_ESTIMATE_RADIO 1.5
|
||||
|
||||
static void indexMemRef(MemTable* tbl);
|
||||
|
|
|
@ -99,7 +99,7 @@ void fstUnFinishedNodesAddSuffix(FstUnFinishedNodes* nodes, FstSlice bs, Output
|
|||
if (fstSliceIsEmpty(s)) {
|
||||
return;
|
||||
}
|
||||
size_t sz = taosArrayGetSize(nodes->stack) - 1;
|
||||
int32_t sz = taosArrayGetSize(nodes->stack) - 1;
|
||||
FstBuilderNodeUnfinished* un = taosArrayGet(nodes->stack, sz);
|
||||
assert(un->last == NULL);
|
||||
|
||||
|
@ -130,11 +130,11 @@ void fstUnFinishedNodesAddSuffix(FstUnFinishedNodes* nodes, FstSlice bs, Output
|
|||
uint64_t fstUnFinishedNodesFindCommPrefix(FstUnFinishedNodes* node, FstSlice bs) {
|
||||
FstSlice* s = &bs;
|
||||
|
||||
size_t ssz = taosArrayGetSize(node->stack); // stack size
|
||||
int32_t ssz = taosArrayGetSize(node->stack); // stack size
|
||||
uint64_t count = 0;
|
||||
int32_t lsz; // data len
|
||||
uint8_t* data = fstSliceData(s, &lsz);
|
||||
for (size_t i = 0; i < ssz && i < lsz; i++) {
|
||||
for (int32_t i = 0; i < ssz && i < lsz; i++) {
|
||||
FstBuilderNodeUnfinished* un = taosArrayGet(node->stack, i);
|
||||
if (un->last->inp == data[i]) {
|
||||
count++;
|
||||
|
@ -147,8 +147,8 @@ uint64_t fstUnFinishedNodesFindCommPrefix(FstUnFinishedNodes* node, FstSlice bs)
|
|||
uint64_t fstUnFinishedNodesFindCommPrefixAndSetOutput(FstUnFinishedNodes* node, FstSlice bs, Output in, Output* out) {
|
||||
FstSlice* s = &bs;
|
||||
|
||||
size_t lsz = (size_t)(s->end - s->start + 1); // data len
|
||||
size_t ssz = taosArrayGetSize(node->stack); // stack size
|
||||
int32_t lsz = (size_t)(s->end - s->start + 1); // data len
|
||||
int32_t ssz = taosArrayGetSize(node->stack); // stack size
|
||||
*out = in;
|
||||
uint64_t i = 0;
|
||||
for (i = 0; i < lsz && i < ssz; i++) {
|
||||
|
@ -245,7 +245,7 @@ void fstStateCompileForOneTrans(FstCountingWriter* w, CompiledAddr addr, FstTran
|
|||
return;
|
||||
}
|
||||
void fstStateCompileForAnyTrans(FstCountingWriter* w, CompiledAddr addr, FstBuilderNode* node) {
|
||||
size_t sz = taosArrayGetSize(node->trans);
|
||||
int32_t sz = taosArrayGetSize(node->trans);
|
||||
assert(sz <= 256);
|
||||
|
||||
uint8_t tSize = 0;
|
||||
|
@ -253,7 +253,7 @@ void fstStateCompileForAnyTrans(FstCountingWriter* w, CompiledAddr addr, FstBuil
|
|||
|
||||
// finalOutput.is_zero()
|
||||
bool anyOuts = (node->finalOutput != 0);
|
||||
for (size_t i = 0; i < sz; i++) {
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
FstTransition* t = taosArrayGet(node->trans, i);
|
||||
tSize = TMAX(tSize, packDeltaSize(addr, t->addr));
|
||||
oSize = TMAX(oSize, packSize(t->out));
|
||||
|
@ -301,7 +301,7 @@ void fstStateCompileForAnyTrans(FstCountingWriter* w, CompiledAddr addr, FstBuil
|
|||
/// for (uint8_t i = 0; i < 256; i++) {
|
||||
// index[i] = 255;
|
||||
///}
|
||||
for (size_t i = 0; i < sz; i++) {
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
FstTransition* t = taosArrayGet(node->trans, i);
|
||||
index[t->inp] = i;
|
||||
// fstPackDeltaIn(w, addr, t->addr, tSize);
|
||||
|
@ -731,7 +731,7 @@ bool fstNodeFindInput(FstNode* node, uint8_t b, uint64_t* res) {
|
|||
}
|
||||
|
||||
bool fstNodeCompile(FstNode* node, void* w, CompiledAddr lastAddr, CompiledAddr addr, FstBuilderNode* builderNode) {
|
||||
size_t sz = taosArrayGetSize(builderNode->trans);
|
||||
int32_t sz = taosArrayGetSize(builderNode->trans);
|
||||
assert(sz < 256);
|
||||
if (sz == 0 && builderNode->isFinal && builderNode->finalOutput == 0) {
|
||||
return true;
|
||||
|
@ -959,8 +959,8 @@ void fstBuilderNodeUnfinishedAddOutputPrefix(FstBuilderNodeUnfinished* unNode, O
|
|||
if (FST_BUILDER_NODE_IS_FINAL(unNode->node)) {
|
||||
unNode->node->finalOutput += out;
|
||||
}
|
||||
size_t sz = taosArrayGetSize(unNode->node->trans);
|
||||
for (size_t i = 0; i < sz; i++) {
|
||||
int32_t sz = taosArrayGetSize(unNode->node->trans);
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
FstTransition* trn = taosArrayGet(unNode->node->trans, i);
|
||||
trn->out += out;
|
||||
}
|
||||
|
@ -1077,7 +1077,7 @@ bool fstGet(Fst* fst, FstSlice* b, Output* out) {
|
|||
tOut = tOut + FST_NODE_FINAL_OUTPUT(root);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < taosArrayGetSize(nodes); i++) {
|
||||
for (int32_t i = 0; i < taosArrayGetSize(nodes); i++) {
|
||||
FstNode** node = (FstNode**)taosArrayGet(nodes, i);
|
||||
fstNodeDestroy(*node);
|
||||
}
|
||||
|
@ -1352,7 +1352,7 @@ StreamWithStateResult* streamWithStateNextWith(StreamWithState* sws, StreamCallb
|
|||
StreamState s2 = {.node = nextNode, .trans = 0, .out = {.null = false, .out = out}, .autState = nextState};
|
||||
taosArrayPush(sws->stack, &s2);
|
||||
|
||||
size_t isz = taosArrayGetSize(sws->inp);
|
||||
int32_t isz = taosArrayGetSize(sws->inp);
|
||||
uint8_t* buf = (uint8_t*)taosMemoryMalloc(isz * sizeof(uint8_t));
|
||||
for (uint32_t i = 0; i < isz; i++) {
|
||||
buf[i] = *(uint8_t*)taosArrayGet(sws->inp, i);
|
||||
|
|
|
@ -116,7 +116,7 @@ TFileCache* tfileCacheCreate(const char* path) {
|
|||
continue;
|
||||
}
|
||||
TFileHeader* header = &reader->header;
|
||||
ICacheKey key = {.suid = header->suid, .colName = header->colName, .nColName = strlen(header->colName)};
|
||||
ICacheKey key = {.suid = header->suid, .colName = header->colName, .nColName = (int32_t)strlen(header->colName)};
|
||||
|
||||
char buf[128] = {0};
|
||||
int32_t sz = indexSerialCacheKey(&key, buf);
|
||||
|
@ -230,7 +230,7 @@ static int32_t tfSearchTerm(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
|
|||
indexInfo("index: %" PRIu64 ", col: %s, colVal: %s, found table info in tindex, time cost: %" PRIu64 "us",
|
||||
tem->suid, tem->colName, tem->colVal, cost);
|
||||
|
||||
ret = tfileReaderLoadTableIds((TFileReader*)reader, offset, tr->total);
|
||||
ret = tfileReaderLoadTableIds((TFileReader*)reader, (int32_t)offset, tr->total);
|
||||
cost = taosGetTimestampUs() - et;
|
||||
indexInfo("index: %" PRIu64 ", col: %s, colVal: %s, load all table info, time cost: %" PRIu64 "us", tem->suid,
|
||||
tem->colName, tem->colVal, cost);
|
||||
|
@ -890,7 +890,7 @@ static int tfileWriteFooter(TFileWriter* write) {
|
|||
char buf[sizeof(tfileMagicNumber) + 1] = {0};
|
||||
void* pBuf = (void*)buf;
|
||||
taosEncodeFixedU64((void**)(void*)&pBuf, tfileMagicNumber);
|
||||
int nwrite = write->ctx->write(write->ctx, buf, strlen(buf));
|
||||
int nwrite = write->ctx->write(write->ctx, buf, (int32_t)strlen(buf));
|
||||
|
||||
indexInfo("tfile write footer size: %d", write->ctx->size(write->ctx));
|
||||
assert(nwrite == sizeof(tfileMagicNumber));
|
||||
|
|
|
@ -37,14 +37,14 @@ static int iBinarySearch(SArray *arr, int s, int e, uint64_t k) {
|
|||
}
|
||||
|
||||
void iIntersection(SArray *inters, SArray *final) {
|
||||
int32_t sz = taosArrayGetSize(inters);
|
||||
int32_t sz = (int32_t)taosArrayGetSize(inters);
|
||||
if (sz <= 0) {
|
||||
return;
|
||||
}
|
||||
MergeIndex *mi = taosMemoryCalloc(sz, sizeof(MergeIndex));
|
||||
for (int i = 0; i < sz; i++) {
|
||||
SArray *t = taosArrayGetP(inters, i);
|
||||
mi[i].len = taosArrayGetSize(t);
|
||||
mi[i].len = (int32_t)taosArrayGetSize(t);
|
||||
mi[i].idx = 0;
|
||||
}
|
||||
|
||||
|
@ -70,7 +70,7 @@ void iIntersection(SArray *inters, SArray *final) {
|
|||
taosMemoryFreeClear(mi);
|
||||
}
|
||||
void iUnion(SArray *inters, SArray *final) {
|
||||
int32_t sz = taosArrayGetSize(inters);
|
||||
int32_t sz = (int32_t)taosArrayGetSize(inters);
|
||||
if (sz <= 0) {
|
||||
return;
|
||||
}
|
||||
|
@ -82,7 +82,7 @@ void iUnion(SArray *inters, SArray *final) {
|
|||
MergeIndex *mi = taosMemoryCalloc(sz, sizeof(MergeIndex));
|
||||
for (int i = 0; i < sz; i++) {
|
||||
SArray *t = taosArrayGetP(inters, i);
|
||||
mi[i].len = taosArrayGetSize(t);
|
||||
mi[i].len = (int32_t)taosArrayGetSize(t);
|
||||
mi[i].idx = 0;
|
||||
}
|
||||
while (1) {
|
||||
|
@ -117,8 +117,8 @@ void iUnion(SArray *inters, SArray *final) {
|
|||
}
|
||||
|
||||
void iExcept(SArray *total, SArray *except) {
|
||||
int32_t tsz = taosArrayGetSize(total);
|
||||
int32_t esz = taosArrayGetSize(except);
|
||||
int32_t tsz = (int32_t)taosArrayGetSize(total);
|
||||
int32_t esz = (int32_t)taosArrayGetSize(except);
|
||||
if (esz == 0 || tsz == 0) {
|
||||
return;
|
||||
}
|
||||
|
@ -141,7 +141,10 @@ int uidCompare(const void *a, const void *b) {
|
|||
// add more version compare
|
||||
uint64_t u1 = *(uint64_t *)a;
|
||||
uint64_t u2 = *(uint64_t *)b;
|
||||
return u1 - u2;
|
||||
if (u1 == u2) {
|
||||
return 0;
|
||||
}
|
||||
return u1 < u2 ? -1 : 1;
|
||||
}
|
||||
int verdataCompare(const void *a, const void *b) {
|
||||
SIdxVerdata *va = (SIdxVerdata *)a;
|
||||
|
|
|
@ -92,7 +92,19 @@ target_link_libraries (jsonUT
|
|||
index
|
||||
)
|
||||
|
||||
#add_test(
|
||||
# NAME index_test
|
||||
# COMMAND indexTest
|
||||
#)
|
||||
add_test(
|
||||
NAME idxtest
|
||||
COMMAND indexTest
|
||||
)
|
||||
add_test(
|
||||
NAME idxJsonUT
|
||||
COMMAND jsonUT
|
||||
)
|
||||
add_test(
|
||||
NAME idxUtilUT
|
||||
COMMAND UtilUT
|
||||
)
|
||||
add_test(
|
||||
NAME idxFstUT
|
||||
COMMAND fstUT
|
||||
)
|
||||
|
|
|
@ -48,7 +48,7 @@ class FstWriter {
|
|||
|
||||
class FstReadMemory {
|
||||
public:
|
||||
FstReadMemory(size_t size, const std::string& fileName = "/tmp/tindex.tindex") {
|
||||
FstReadMemory(int32_t size, const std::string& fileName = "/tmp/tindex.tindex") {
|
||||
_wc = writerCtxCreate(TFile, fileName.c_str(), true, 64 * 1024);
|
||||
_w = fstCountingWriterCreate(_wc);
|
||||
_size = size;
|
||||
|
@ -152,7 +152,7 @@ class FstReadMemory {
|
|||
Fst* _fst;
|
||||
FstSlice _s;
|
||||
WriterCtx* _wc;
|
||||
size_t _size;
|
||||
int32_t _size;
|
||||
};
|
||||
|
||||
#define L 100
|
||||
|
|
|
@ -714,7 +714,7 @@ class IndexObj {
|
|||
return numOfTable;
|
||||
}
|
||||
int ReadMultiMillonData(const std::string& colName, const std::string& colVal = "Hello world",
|
||||
size_t numOfTable = 100 * 10000) {
|
||||
size_t numOfTable = 100) {
|
||||
std::string tColVal = colVal;
|
||||
|
||||
int colValSize = tColVal.size();
|
||||
|
@ -896,7 +896,7 @@ TEST_F(IndexEnv2, testIndex_TrigeFlush) {
|
|||
// r
|
||||
std::cout << "failed to init" << std::endl;
|
||||
}
|
||||
int numOfTable = 100 * 10000;
|
||||
int numOfTable = 100 * 100;
|
||||
index->WriteMillonData("tag1", "Hello Wolrd", numOfTable);
|
||||
int target = index->SearchOne("tag1", "Hello Wolrd");
|
||||
std::cout << "Get Index: " << target << std::endl;
|
||||
|
@ -910,8 +910,8 @@ static void single_write_and_search(IndexObj* idx) {
|
|||
static void multi_write_and_search(IndexObj* idx) {
|
||||
int target = idx->SearchOne("tag1", "Hello");
|
||||
target = idx->SearchOne("tag2", "Test");
|
||||
idx->WriteMultiMillonData("tag1", "hello world test", 100 * 10000);
|
||||
idx->WriteMultiMillonData("tag2", "world test nothing", 100 * 10000);
|
||||
idx->WriteMultiMillonData("tag1", "hello world test", 100 * 100);
|
||||
idx->WriteMultiMillonData("tag2", "world test nothing", 100 * 10);
|
||||
}
|
||||
TEST_F(IndexEnv2, testIndex_serarch_cache_and_tfile) {
|
||||
std::string path = "/tmp/cache_and_tfile";
|
||||
|
@ -920,8 +920,8 @@ TEST_F(IndexEnv2, testIndex_serarch_cache_and_tfile) {
|
|||
}
|
||||
index->PutOne("tag1", "Hello");
|
||||
index->PutOne("tag2", "Test");
|
||||
index->WriteMultiMillonData("tag1", "Hello", 100 * 10000);
|
||||
index->WriteMultiMillonData("tag2", "Test", 100 * 10000);
|
||||
index->WriteMultiMillonData("tag1", "Hello", 100 * 100);
|
||||
index->WriteMultiMillonData("tag2", "Test", 100 * 100);
|
||||
std::thread threads[NUM_OF_THREAD];
|
||||
|
||||
for (int i = 0; i < NUM_OF_THREAD; i++) {
|
||||
|
@ -949,49 +949,49 @@ TEST_F(IndexEnv2, testIndex_MultiWrite_and_MultiRead) {
|
|||
}
|
||||
}
|
||||
|
||||
TEST_F(IndexEnv2, testIndex_restart) {
|
||||
std::string path = "/tmp/cache_and_tfile";
|
||||
if (index->Init(path) != 0) {
|
||||
}
|
||||
index->SearchOneTarget("tag1", "Hello", 10);
|
||||
index->SearchOneTarget("tag2", "Test", 10);
|
||||
}
|
||||
TEST_F(IndexEnv2, testIndex_restart1) {
|
||||
std::string path = "/tmp/cache_and_tfile";
|
||||
if (index->Init(path) != 0) {
|
||||
}
|
||||
index->ReadMultiMillonData("tag1", "coding");
|
||||
index->SearchOneTarget("tag1", "Hello", 10);
|
||||
index->SearchOneTarget("tag2", "Test", 10);
|
||||
}
|
||||
// TEST_F(IndexEnv2, testIndex_restart) {
|
||||
// std::string path = "/tmp/cache_and_tfile";
|
||||
// if (index->Init(path) != 0) {
|
||||
// }
|
||||
// index->SearchOneTarget("tag1", "Hello", 10);
|
||||
// index->SearchOneTarget("tag2", "Test", 10);
|
||||
//}
|
||||
// TEST_F(IndexEnv2, testIndex_restart1) {
|
||||
// std::string path = "/tmp/cache_and_tfile";
|
||||
// if (index->Init(path) != 0) {
|
||||
// }
|
||||
// index->ReadMultiMillonData("tag1", "coding");
|
||||
// index->SearchOneTarget("tag1", "Hello", 10);
|
||||
// index->SearchOneTarget("tag2", "Test", 10);
|
||||
//}
|
||||
|
||||
TEST_F(IndexEnv2, testIndex_read_performance) {
|
||||
std::string path = "/tmp/cache_and_tfile";
|
||||
if (index->Init(path) != 0) {
|
||||
}
|
||||
index->PutOneTarge("tag1", "Hello", 12);
|
||||
index->PutOneTarge("tag1", "Hello", 15);
|
||||
index->ReadMultiMillonData("tag1", "Hello");
|
||||
std::cout << "reader sz: " << index->SearchOne("tag1", "Hello") << std::endl;
|
||||
assert(3 == index->SearchOne("tag1", "Hello"));
|
||||
}
|
||||
TEST_F(IndexEnv2, testIndexMultiTag) {
|
||||
std::string path = "/tmp/multi_tag";
|
||||
if (index->Init(path) != 0) {
|
||||
}
|
||||
int64_t st = taosGetTimestampUs();
|
||||
int32_t num = 1000 * 10000;
|
||||
index->WriteMultiMillonData("tag1", "xxxxxxxxxxxxxxx", num);
|
||||
std::cout << "numOfRow: " << num << "\ttime cost:" << taosGetTimestampUs() - st << std::endl;
|
||||
// index->WriteMultiMillonData("tag2", "xxxxxxxxxxxxxxxxxxxxxxxxx", 100 * 10000);
|
||||
}
|
||||
// TEST_F(IndexEnv2, testIndex_read_performance) {
|
||||
// std::string path = "/tmp/cache_and_tfile";
|
||||
// if (index->Init(path) != 0) {
|
||||
// }
|
||||
// index->PutOneTarge("tag1", "Hello", 12);
|
||||
// index->PutOneTarge("tag1", "Hello", 15);
|
||||
// index->ReadMultiMillonData("tag1", "Hello");
|
||||
// std::cout << "reader sz: " << index->SearchOne("tag1", "Hello") << std::endl;
|
||||
// assert(3 == index->SearchOne("tag1", "Hello"));
|
||||
//}
|
||||
// TEST_F(IndexEnv2, testIndexMultiTag) {
|
||||
// std::string path = "/tmp/multi_tag";
|
||||
// if (index->Init(path) != 0) {
|
||||
// }
|
||||
// int64_t st = taosGetTimestampUs();
|
||||
// int32_t num = 1000 * 10000;
|
||||
// index->WriteMultiMillonData("tag1", "xxxxxxxxxxxxxxx", num);
|
||||
// std::cout << "numOfRow: " << num << "\ttime cost:" << taosGetTimestampUs() - st << std::endl;
|
||||
// // index->WriteMultiMillonData("tag2", "xxxxxxxxxxxxxxxxxxxxxxxxx", 100 * 10000);
|
||||
//}
|
||||
TEST_F(IndexEnv2, testLongComVal1) {
|
||||
std::string path = "/tmp/long_colVal";
|
||||
if (index->Init(path) != 0) {
|
||||
}
|
||||
// gen colVal by randstr
|
||||
std::string randstr = "xxxxxxxxxxxxxxxxx";
|
||||
index->WriteMultiMillonData("tag1", randstr, 100 * 10000);
|
||||
index->WriteMultiMillonData("tag1", randstr, 100 * 1000);
|
||||
}
|
||||
|
||||
TEST_F(IndexEnv2, testLongComVal2) {
|
||||
|
@ -1000,7 +1000,7 @@ TEST_F(IndexEnv2, testLongComVal2) {
|
|||
}
|
||||
// gen colVal by randstr
|
||||
std::string randstr = "abcccc fdadfafdafda";
|
||||
index->WriteMultiMillonData("tag1", randstr, 100 * 10000);
|
||||
index->WriteMultiMillonData("tag1", randstr, 100 * 1000);
|
||||
}
|
||||
TEST_F(IndexEnv2, testLongComVal3) {
|
||||
std::string path = "/tmp/long_colVal";
|
||||
|
@ -1008,7 +1008,7 @@ TEST_F(IndexEnv2, testLongComVal3) {
|
|||
}
|
||||
// gen colVal by randstr
|
||||
std::string randstr = "Yes, coding and coding and coding";
|
||||
index->WriteMultiMillonData("tag1", randstr, 100 * 10000);
|
||||
index->WriteMultiMillonData("tag1", randstr, 100 * 1000);
|
||||
}
|
||||
TEST_F(IndexEnv2, testLongComVal4) {
|
||||
std::string path = "/tmp/long_colVal";
|
||||
|
@ -1016,7 +1016,7 @@ TEST_F(IndexEnv2, testLongComVal4) {
|
|||
}
|
||||
// gen colVal by randstr
|
||||
std::string randstr = "111111 bac fdadfa";
|
||||
index->WriteMultiMillonData("tag1", randstr, 100 * 10000);
|
||||
index->WriteMultiMillonData("tag1", randstr, 100 * 100);
|
||||
}
|
||||
TEST_F(IndexEnv2, testIndex_read_performance1) {
|
||||
std::string path = "/tmp/cache_and_tfile";
|
||||
|
@ -1026,7 +1026,7 @@ TEST_F(IndexEnv2, testIndex_read_performance1) {
|
|||
index->PutOneTarge("tag1", "Hello", 15);
|
||||
index->ReadMultiMillonData("tag1", "Hello", 1000);
|
||||
std::cout << "reader sz: " << index->SearchOne("tag1", "Hello") << std::endl;
|
||||
assert(3 == index->SearchOne("tag1", "Hello"));
|
||||
EXPECT_EQ(2, index->SearchOne("tag1", "Hello"));
|
||||
}
|
||||
TEST_F(IndexEnv2, testIndex_read_performance2) {
|
||||
std::string path = "/tmp/cache_and_tfile";
|
||||
|
@ -1034,9 +1034,9 @@ TEST_F(IndexEnv2, testIndex_read_performance2) {
|
|||
}
|
||||
index->PutOneTarge("tag1", "Hello", 12);
|
||||
index->PutOneTarge("tag1", "Hello", 15);
|
||||
index->ReadMultiMillonData("tag1", "Hello", 1000 * 10);
|
||||
index->ReadMultiMillonData("tag1", "Hello", 1000);
|
||||
std::cout << "reader sz: " << index->SearchOne("tag1", "Hello") << std::endl;
|
||||
assert(3 == index->SearchOne("tag1", "Hello"));
|
||||
EXPECT_EQ(2, index->SearchOne("tag1", "Hello"));
|
||||
}
|
||||
TEST_F(IndexEnv2, testIndex_read_performance3) {
|
||||
std::string path = "/tmp/cache_and_tfile";
|
||||
|
@ -1044,9 +1044,9 @@ TEST_F(IndexEnv2, testIndex_read_performance3) {
|
|||
}
|
||||
index->PutOneTarge("tag1", "Hello", 12);
|
||||
index->PutOneTarge("tag1", "Hello", 15);
|
||||
index->ReadMultiMillonData("tag1", "Hello", 1000 * 100);
|
||||
index->ReadMultiMillonData("tag1", "Hello", 1000);
|
||||
std::cout << "reader sz: " << index->SearchOne("tag1", "Hello") << std::endl;
|
||||
assert(3 == index->SearchOne("tag1", "Hello"));
|
||||
EXPECT_EQ(2, index->SearchOne("tag1", "Hello"));
|
||||
}
|
||||
TEST_F(IndexEnv2, testIndex_read_performance4) {
|
||||
std::string path = "/tmp/cache_and_tfile";
|
||||
|
@ -1054,9 +1054,9 @@ TEST_F(IndexEnv2, testIndex_read_performance4) {
|
|||
}
|
||||
index->PutOneTarge("tag10", "Hello", 12);
|
||||
index->PutOneTarge("tag12", "Hello", 15);
|
||||
index->ReadMultiMillonData("tag10", "Hello", 1000 * 100);
|
||||
index->ReadMultiMillonData("tag10", "Hello", 1000);
|
||||
std::cout << "reader sz: " << index->SearchOne("tag1", "Hello") << std::endl;
|
||||
assert(3 == index->SearchOne("tag10", "Hello"));
|
||||
EXPECT_EQ(1, index->SearchOne("tag10", "Hello"));
|
||||
}
|
||||
TEST_F(IndexEnv2, testIndex_cache_del) {
|
||||
std::string path = "/tmp/cache_and_tfile";
|
||||
|
@ -1108,7 +1108,7 @@ TEST_F(IndexEnv2, testIndex_del) {
|
|||
index->Del("tag10", "Hello", 11);
|
||||
EXPECT_EQ(98, index->SearchOne("tag10", "Hello"));
|
||||
|
||||
index->WriteMultiMillonData("tag10", "xxxxxxxxxxxxxx", 100 * 10000);
|
||||
index->WriteMultiMillonData("tag10", "xxxxxxxxxxxxxx", 100 * 100);
|
||||
index->Del("tag10", "Hello", 17);
|
||||
EXPECT_EQ(97, index->SearchOne("tag10", "Hello"));
|
||||
}
|
||||
|
|
|
@ -154,7 +154,7 @@ TEST_F(JsonEnv, testWriteMillonData) {
|
|||
|
||||
SIndexMultiTerm* terms = indexMultiTermCreate();
|
||||
indexMultiTermAdd(terms, term);
|
||||
for (size_t i = 0; i < 100; i++) {
|
||||
for (size_t i = 0; i < 10; i++) {
|
||||
tIndexJsonPut(index, terms, i);
|
||||
}
|
||||
indexMultiTermDestroy(terms);
|
||||
|
@ -162,14 +162,14 @@ TEST_F(JsonEnv, testWriteMillonData) {
|
|||
{
|
||||
std::string colName("voltagefdadfa");
|
||||
std::string colVal("abxxxxxxxxxxxx");
|
||||
for (int i = 0; i < 1000; i++) {
|
||||
for (int i = 0; i < 10; i++) {
|
||||
colVal[i % colVal.size()] = '0' + i % 128;
|
||||
SIndexTerm* term = indexTermCreate(1, ADD_VALUE, TSDB_DATA_TYPE_BINARY, colName.c_str(), colName.size(),
|
||||
colVal.c_str(), colVal.size());
|
||||
|
||||
SIndexMultiTerm* terms = indexMultiTermCreate();
|
||||
indexMultiTermAdd(terms, term);
|
||||
for (size_t i = 0; i < 1000; i++) {
|
||||
for (size_t i = 0; i < 100; i++) {
|
||||
tIndexJsonPut(index, terms, i);
|
||||
}
|
||||
indexMultiTermDestroy(terms);
|
||||
|
@ -199,7 +199,7 @@ TEST_F(JsonEnv, testWriteMillonData) {
|
|||
SArray* result = taosArrayInit(1, sizeof(uint64_t));
|
||||
indexMultiTermQueryAdd(mq, q, QUERY_TERM);
|
||||
tIndexJsonSearch(index, mq, result);
|
||||
assert(100 == taosArrayGetSize(result));
|
||||
EXPECT_EQ(10, taosArrayGetSize(result));
|
||||
indexMultiTermQueryDestroy(mq);
|
||||
}
|
||||
{
|
||||
|
@ -229,7 +229,7 @@ TEST_F(JsonEnv, testWriteMillonData) {
|
|||
SArray* result = taosArrayInit(1, sizeof(uint64_t));
|
||||
indexMultiTermQueryAdd(mq, q, QUERY_GREATER_EQUAL);
|
||||
tIndexJsonSearch(index, mq, result);
|
||||
assert(100 == taosArrayGetSize(result));
|
||||
EXPECT_EQ(10, taosArrayGetSize(result));
|
||||
indexMultiTermQueryDestroy(mq);
|
||||
}
|
||||
}
|
||||
|
@ -385,7 +385,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_INT) {
|
|||
|
||||
SIndexMultiTerm* terms = indexMultiTermCreate();
|
||||
indexMultiTermAdd(terms, term);
|
||||
for (size_t i = 0; i < 100000; i++) {
|
||||
for (size_t i = 0; i < 1000; i++) {
|
||||
tIndexJsonPut(index, terms, i);
|
||||
}
|
||||
indexMultiTermDestroy(terms);
|
||||
|
@ -523,7 +523,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_INT2) {
|
|||
{
|
||||
int val = 10;
|
||||
std::string colName("test1");
|
||||
for (int i = 0; i < 10000; i++) {
|
||||
for (int i = 0; i < 1000; i++) {
|
||||
val += 1;
|
||||
WriteData(index, colName, TSDB_DATA_TYPE_INT, &val, sizeof(val), i);
|
||||
}
|
||||
|
@ -532,7 +532,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_INT2) {
|
|||
int val = 10;
|
||||
std::string colName("test2xxx");
|
||||
std::string colVal("xxxxxxxxxxxxxxx");
|
||||
for (int i = 0; i < 100000; i++) {
|
||||
for (int i = 0; i < 1000; i++) {
|
||||
val += 1;
|
||||
WriteData(index, colName, TSDB_DATA_TYPE_BINARY, (void*)(colVal.c_str()), colVal.size(), i);
|
||||
}
|
||||
|
@ -542,14 +542,14 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_INT2) {
|
|||
std::string colName("test1");
|
||||
int val = 9;
|
||||
Search(index, colName, TSDB_DATA_TYPE_INT, &val, sizeof(val), QUERY_GREATER_EQUAL, &res);
|
||||
EXPECT_EQ(10000, taosArrayGetSize(res));
|
||||
EXPECT_EQ(1000, taosArrayGetSize(res));
|
||||
}
|
||||
{
|
||||
SArray* res = NULL;
|
||||
std::string colName("test2xxx");
|
||||
std::string colVal("xxxxxxxxxxxxxxx");
|
||||
Search(index, colName, TSDB_DATA_TYPE_BINARY, (void*)(colVal.c_str()), colVal.size(), QUERY_TERM, &res);
|
||||
EXPECT_EQ(100000, taosArrayGetSize(res));
|
||||
EXPECT_EQ(1000, taosArrayGetSize(res));
|
||||
}
|
||||
}
|
||||
TEST_F(JsonEnv, testWriteJsonTfileAndCache_FLOAT) {
|
||||
|
|
|
@ -53,6 +53,8 @@ static bool afterGroupBy(ESqlClause clause) { return clause > SQL_CLAUSE_GROUP_B
|
|||
|
||||
static bool beforeHaving(ESqlClause clause) { return clause < SQL_CLAUSE_HAVING; }
|
||||
|
||||
static bool afterHaving(ESqlClause clause) { return clause > SQL_CLAUSE_HAVING; }
|
||||
|
||||
static int32_t addNamespace(STranslateContext* pCxt, void* pTable) {
|
||||
size_t currTotalLevel = taosArrayGetSize(pCxt->pNsLevel);
|
||||
if (currTotalLevel > pCxt->currLevel) {
|
||||
|
@ -276,6 +278,10 @@ static bool isScanPseudoColumnFunc(const SNode* pNode) {
|
|||
return (QUERY_NODE_FUNCTION == nodeType(pNode) && fmIsScanPseudoColumnFunc(((SFunctionNode*)pNode)->funcId));
|
||||
}
|
||||
|
||||
static bool isNonstandardSQLFunc(const SNode* pNode) {
|
||||
return (QUERY_NODE_FUNCTION == nodeType(pNode) && fmIsNonstandardSQLFunc(((SFunctionNode*)pNode)->funcId));
|
||||
}
|
||||
|
||||
static bool isDistinctOrderBy(STranslateContext* pCxt) {
|
||||
return (SQL_CLAUSE_ORDER_BY == pCxt->currClause && pCxt->pCurrStmt->isDistinct);
|
||||
}
|
||||
|
@ -433,6 +439,7 @@ static EDealRes translateColumnWithoutPrefix(STranslateContext* pCxt, SColumnNod
|
|||
SArray* pTables = taosArrayGetP(pCxt->pNsLevel, pCxt->currLevel);
|
||||
size_t nums = taosArrayGetSize(pTables);
|
||||
bool found = false;
|
||||
bool isInternalPk = isInternalPrimaryKey(pCol);
|
||||
for (size_t i = 0; i < nums; ++i) {
|
||||
STableNode* pTable = taosArrayGetP(pTables, i);
|
||||
if (findAndSetColumn(pCol, pTable)) {
|
||||
|
@ -440,10 +447,13 @@ static EDealRes translateColumnWithoutPrefix(STranslateContext* pCxt, SColumnNod
|
|||
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_AMBIGUOUS_COLUMN, pCol->colName);
|
||||
}
|
||||
found = true;
|
||||
if (isInternalPk) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
if (isInternalPrimaryKey(pCol)) {
|
||||
if (isInternalPk) {
|
||||
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_INVALID_INTERNAL_PK);
|
||||
} else {
|
||||
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_INVALID_COLUMN, pCol->colName);
|
||||
|
@ -703,10 +713,13 @@ static EDealRes translateOperator(STranslateContext* pCxt, SOperatorNode* pOp) {
|
|||
return DEAL_RES_CONTINUE;
|
||||
}
|
||||
|
||||
static EDealRes haveAggFunction(SNode* pNode, void* pContext) {
|
||||
static EDealRes haveAggOrNonstdFunction(SNode* pNode, void* pContext) {
|
||||
if (isAggFunc(pNode)) {
|
||||
*((bool*)pContext) = true;
|
||||
return DEAL_RES_END;
|
||||
} else if (isNonstandardSQLFunc(pNode)) {
|
||||
*((bool*)pContext) = true;
|
||||
return DEAL_RES_END;
|
||||
}
|
||||
return DEAL_RES_CONTINUE;
|
||||
}
|
||||
|
@ -743,6 +756,12 @@ static int32_t rewriteCountStar(STranslateContext* pCxt, SFunctionNode* pCount)
|
|||
return code;
|
||||
}
|
||||
|
||||
static bool hasInvalidFuncNesting(SNodeList* pParameterList) {
|
||||
bool hasInvalidFunc = false;
|
||||
nodesWalkExprs(pParameterList, haveAggOrNonstdFunction, &hasInvalidFunc);
|
||||
return hasInvalidFunc;
|
||||
}
|
||||
|
||||
static EDealRes translateFunction(STranslateContext* pCxt, SFunctionNode* pFunc) {
|
||||
SFmGetFuncInfoParam param = {.pCtg = pCxt->pParseCxt->pCatalog,
|
||||
.pRpc = pCxt->pParseCxt->pTransporter,
|
||||
|
@ -754,11 +773,12 @@ static EDealRes translateFunction(STranslateContext* pCxt, SFunctionNode* pFunc)
|
|||
if (beforeHaving(pCxt->currClause)) {
|
||||
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_ILLEGAL_USE_AGG_FUNCTION);
|
||||
}
|
||||
bool haveAggFunc = false;
|
||||
nodesWalkExprs(pFunc->pParameterList, haveAggFunction, &haveAggFunc);
|
||||
if (haveAggFunc) {
|
||||
if (hasInvalidFuncNesting(pFunc->pParameterList)) {
|
||||
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_AGG_FUNC_NESTING);
|
||||
}
|
||||
if (pCxt->pCurrStmt->hasNonstdSQLFunc) {
|
||||
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_NOT_ALLOWED_FUNC);
|
||||
}
|
||||
|
||||
pCxt->pCurrStmt->hasAggFuncs = true;
|
||||
pCxt->pCurrStmt->isTimeOrderQuery = false;
|
||||
|
@ -784,6 +804,15 @@ static EDealRes translateFunction(STranslateContext* pCxt, SFunctionNode* pFunc)
|
|||
}
|
||||
}
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == pCxt->errCode && fmIsNonstandardSQLFunc(pFunc->funcId)) {
|
||||
if (SQL_CLAUSE_SELECT != pCxt->currClause || pCxt->pCurrStmt->hasNonstdSQLFunc || pCxt->pCurrStmt->hasAggFuncs) {
|
||||
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_NOT_ALLOWED_FUNC);
|
||||
}
|
||||
if (hasInvalidFuncNesting(pFunc->pParameterList)) {
|
||||
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_AGG_FUNC_NESTING);
|
||||
}
|
||||
pCxt->pCurrStmt->hasNonstdSQLFunc = true;
|
||||
}
|
||||
return TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_CONTINUE : DEAL_RES_ERROR;
|
||||
}
|
||||
|
||||
|
@ -952,6 +981,7 @@ typedef struct CheckAggColCoexistCxt {
|
|||
STranslateContext* pTranslateCxt;
|
||||
bool existAggFunc;
|
||||
bool existCol;
|
||||
bool existNonstdFunc;
|
||||
int32_t selectFuncNum;
|
||||
} CheckAggColCoexistCxt;
|
||||
|
||||
|
@ -962,6 +992,10 @@ static EDealRes doCheckAggColCoexist(SNode* pNode, void* pContext) {
|
|||
pCxt->existAggFunc = true;
|
||||
return DEAL_RES_IGNORE_CHILD;
|
||||
}
|
||||
if (isNonstandardSQLFunc(pNode)) {
|
||||
pCxt->existNonstdFunc = true;
|
||||
return DEAL_RES_IGNORE_CHILD;
|
||||
}
|
||||
if (isScanPseudoColumnFunc(pNode) || QUERY_NODE_COLUMN == nodeType(pNode)) {
|
||||
pCxt->existCol = true;
|
||||
}
|
||||
|
@ -972,16 +1006,21 @@ static int32_t checkAggColCoexist(STranslateContext* pCxt, SSelectStmt* pSelect)
|
|||
if (NULL != pSelect->pGroupByList) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
CheckAggColCoexistCxt cxt = {.pTranslateCxt = pCxt, .existAggFunc = false, .existCol = false};
|
||||
CheckAggColCoexistCxt cxt = {
|
||||
.pTranslateCxt = pCxt, .existAggFunc = false, .existCol = false, .existNonstdFunc = false};
|
||||
nodesWalkExprs(pSelect->pProjectionList, doCheckAggColCoexist, &cxt);
|
||||
if (!pSelect->isDistinct) {
|
||||
nodesWalkExprs(pSelect->pOrderByList, doCheckAggColCoexist, &cxt);
|
||||
}
|
||||
if (1 == cxt.selectFuncNum) {
|
||||
return rewriteColsToSelectValFunc(pCxt, pSelect);
|
||||
} else if ((cxt.selectFuncNum > 1 || cxt.existAggFunc || NULL != pSelect->pWindow) && cxt.existCol) {
|
||||
}
|
||||
if ((cxt.selectFuncNum > 1 || cxt.existAggFunc || NULL != pSelect->pWindow) && cxt.existCol) {
|
||||
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_SINGLE_GROUP);
|
||||
}
|
||||
if (cxt.existNonstdFunc && cxt.existCol) {
|
||||
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC);
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
|
|
@ -164,6 +164,9 @@ static char* getSyntaxErrFormat(int32_t errCode) {
|
|||
return "Invalid function name";
|
||||
case TSDB_CODE_PAR_COMMENT_TOO_LONG:
|
||||
return "Comment too long";
|
||||
case TSDB_CODE_PAR_NOT_ALLOWED_FUNC:
|
||||
return "Some functions are allowed only in the SELECT list of a query. "
|
||||
"And, cannot be mixed with other non scalar functions or columns.";
|
||||
case TSDB_CODE_OUT_OF_MEMORY:
|
||||
return "Out of memory";
|
||||
default:
|
||||
|
|
|
@ -121,6 +121,26 @@ TEST_F(ParserSelectTest, selectFunc) {
|
|||
run("SELECT MAX(c1), c2 FROM t1 STATE_WINDOW(c3)");
|
||||
}
|
||||
|
||||
TEST_F(ParserSelectTest, nonstdFunc) {
|
||||
useDb("root", "test");
|
||||
|
||||
run("SELECT DIFF(c1) FROM t1");
|
||||
|
||||
// run("SELECT DIFF(c1) FROM t1 INTERVAL(10s)");
|
||||
}
|
||||
|
||||
TEST_F(ParserSelectTest, nonstdFuncSemanticCheck) {
|
||||
useDb("root", "test");
|
||||
|
||||
run("SELECT DIFF(c1), c2 FROM t1", TSDB_CODE_PAR_NOT_ALLOWED_FUNC, PARSER_STAGE_TRANSLATE);
|
||||
|
||||
run("SELECT DIFF(c1), tbname FROM t1", TSDB_CODE_PAR_NOT_ALLOWED_FUNC, PARSER_STAGE_TRANSLATE);
|
||||
|
||||
run("SELECT DIFF(c1), count(*) FROM t1", TSDB_CODE_PAR_NOT_ALLOWED_FUNC, PARSER_STAGE_TRANSLATE);
|
||||
|
||||
run("SELECT DIFF(c1), CSUM(c1) FROM t1", TSDB_CODE_PAR_NOT_ALLOWED_FUNC, PARSER_STAGE_TRANSLATE);
|
||||
}
|
||||
|
||||
TEST_F(ParserSelectTest, clause) {
|
||||
useDb("root", "test");
|
||||
|
||||
|
|
|
@ -24,9 +24,9 @@
|
|||
#define SPLIT_FLAG_TEST_MASK(val, mask) (((val) & (mask)) != 0)
|
||||
|
||||
typedef struct SSplitContext {
|
||||
int32_t queryId;
|
||||
int32_t groupId;
|
||||
bool split;
|
||||
uint64_t queryId;
|
||||
int32_t groupId;
|
||||
bool split;
|
||||
} SSplitContext;
|
||||
|
||||
typedef int32_t (*FSplit)(SSplitContext* pCxt, SLogicSubplan* pSubplan);
|
||||
|
|
|
@ -292,7 +292,7 @@ int32_t qwRegisterQueryBrokenLinkArg(QW_FPARAMS_DEF, SRpcHandleInfo *pConn) {
|
|||
req->taskId = htobe64(tId);
|
||||
req->refId = htobe64(rId);
|
||||
|
||||
SRpcMsg pMsg = {
|
||||
SRpcMsg brokenMsg = {
|
||||
.msgType = TDMT_VND_DROP_TASK,
|
||||
.pCont = req,
|
||||
.contLen = sizeof(STaskDropReq),
|
||||
|
@ -300,7 +300,7 @@ int32_t qwRegisterQueryBrokenLinkArg(QW_FPARAMS_DEF, SRpcHandleInfo *pConn) {
|
|||
.info = *pConn,
|
||||
};
|
||||
|
||||
tmsgRegisterBrokenLinkArg(&pMsg);
|
||||
tmsgRegisterBrokenLinkArg(&brokenMsg);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -326,7 +326,7 @@ int32_t qwRegisterHbBrokenLinkArg(SQWorker *mgmt, uint64_t sId, SRpcHandleInfo *
|
|||
QW_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
SRpcMsg pMsg = {
|
||||
SRpcMsg brokenMsg = {
|
||||
.msgType = TDMT_VND_QUERY_HEARTBEAT,
|
||||
.pCont = msg,
|
||||
.contLen = msgSize,
|
||||
|
@ -334,7 +334,7 @@ int32_t qwRegisterHbBrokenLinkArg(SQWorker *mgmt, uint64_t sId, SRpcHandleInfo *
|
|||
.info = *pConn,
|
||||
};
|
||||
|
||||
tmsgRegisterBrokenLinkArg(&pMsg);
|
||||
tmsgRegisterBrokenLinkArg(&brokenMsg);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
|
|
@ -247,6 +247,19 @@ int32_t streamTaskExec2(SStreamTask* pTask, SMsgCb* pMsgCb) {
|
|||
void* data = NULL;
|
||||
taosGetQitem(pTask->inputQAll, &data);
|
||||
if (data == NULL) break;
|
||||
|
||||
streamTaskExecImpl(pTask, data, pRes);
|
||||
|
||||
taosFreeQitem(data);
|
||||
|
||||
if (taosArrayGetSize(pRes) != 0) {
|
||||
SStreamDataBlock* resQ = taosAllocateQitem(sizeof(void**), DEF_QITEM);
|
||||
resQ->type = STREAM_INPUT__DATA_BLOCK;
|
||||
resQ->blocks = pRes;
|
||||
taosWriteQitem(pTask->outputQ, resQ);
|
||||
pRes = taosArrayInit(0, sizeof(SSDataBlock));
|
||||
if (pRes == NULL) goto FAIL;
|
||||
}
|
||||
}
|
||||
|
||||
atomic_store_8(&pTask->status, TASK_STATUS__IDLE);
|
||||
|
@ -298,62 +311,66 @@ int32_t streamTaskSink(SStreamTask* pTask, SMsgCb* pMsgCb) {
|
|||
}
|
||||
|
||||
// dispatch
|
||||
if (pTask->dispatchType == TASK_DISPATCH__INPLACE) {
|
||||
SRpcMsg dispatchMsg = {0};
|
||||
if (streamBuildExecMsg(pTask, pRes, &dispatchMsg, NULL) < 0) {
|
||||
ASSERT(0);
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t qType;
|
||||
if (pTask->dispatchMsgType == TDMT_VND_TASK_PIPE_EXEC || pTask->dispatchMsgType == TDMT_SND_TASK_PIPE_EXEC) {
|
||||
qType = FETCH_QUEUE;
|
||||
} else if (pTask->dispatchMsgType == TDMT_VND_TASK_MERGE_EXEC ||
|
||||
pTask->dispatchMsgType == TDMT_SND_TASK_MERGE_EXEC) {
|
||||
qType = MERGE_QUEUE;
|
||||
} else if (pTask->dispatchMsgType == TDMT_VND_TASK_WRITE_EXEC) {
|
||||
qType = WRITE_QUEUE;
|
||||
} else {
|
||||
ASSERT(0);
|
||||
}
|
||||
tmsgPutToQueue(pMsgCb, qType, &dispatchMsg);
|
||||
|
||||
} else if (pTask->dispatchType == TASK_DISPATCH__FIXED) {
|
||||
SRpcMsg dispatchMsg = {0};
|
||||
SEpSet* pEpSet = NULL;
|
||||
if (streamBuildExecMsg(pTask, pRes, &dispatchMsg, &pEpSet) < 0) {
|
||||
ASSERT(0);
|
||||
return -1;
|
||||
}
|
||||
|
||||
tmsgSendReq(pEpSet, &dispatchMsg);
|
||||
|
||||
} else if (pTask->dispatchType == TASK_DISPATCH__SHUFFLE) {
|
||||
SHashObj* pShuffleRes = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK);
|
||||
if (pShuffleRes == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t sz = taosArrayGetSize(pRes);
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
SSDataBlock* pDataBlock = taosArrayGet(pRes, i);
|
||||
SArray* pArray = taosHashGet(pShuffleRes, &pDataBlock->info.groupId, sizeof(int64_t));
|
||||
if (pArray == NULL) {
|
||||
pArray = taosArrayInit(0, sizeof(SSDataBlock));
|
||||
if (pArray == NULL) {
|
||||
return -1;
|
||||
}
|
||||
taosHashPut(pShuffleRes, &pDataBlock->info.groupId, sizeof(int64_t), &pArray, sizeof(void*));
|
||||
// TODO dispatch guard
|
||||
int8_t outputStatus = atomic_load_8(&pTask->outputStatus);
|
||||
if (outputStatus == TASK_OUTPUT_STATUS__NORMAL) {
|
||||
if (pTask->dispatchType == TASK_DISPATCH__INPLACE) {
|
||||
SRpcMsg dispatchMsg = {0};
|
||||
if (streamBuildExecMsg(pTask, pRes, &dispatchMsg, NULL) < 0) {
|
||||
ASSERT(0);
|
||||
return -1;
|
||||
}
|
||||
taosArrayPush(pArray, pDataBlock);
|
||||
}
|
||||
|
||||
if (streamShuffleDispatch(pTask, pMsgCb, pShuffleRes) < 0) {
|
||||
return -1;
|
||||
}
|
||||
int32_t qType;
|
||||
if (pTask->dispatchMsgType == TDMT_VND_TASK_PIPE_EXEC || pTask->dispatchMsgType == TDMT_SND_TASK_PIPE_EXEC) {
|
||||
qType = FETCH_QUEUE;
|
||||
} else if (pTask->dispatchMsgType == TDMT_VND_TASK_MERGE_EXEC ||
|
||||
pTask->dispatchMsgType == TDMT_SND_TASK_MERGE_EXEC) {
|
||||
qType = MERGE_QUEUE;
|
||||
} else if (pTask->dispatchMsgType == TDMT_VND_TASK_WRITE_EXEC) {
|
||||
qType = WRITE_QUEUE;
|
||||
} else {
|
||||
ASSERT(0);
|
||||
}
|
||||
tmsgPutToQueue(pMsgCb, qType, &dispatchMsg);
|
||||
|
||||
} else {
|
||||
ASSERT(pTask->dispatchType == TASK_DISPATCH__NONE);
|
||||
} else if (pTask->dispatchType == TASK_DISPATCH__FIXED) {
|
||||
SRpcMsg dispatchMsg = {0};
|
||||
SEpSet* pEpSet = NULL;
|
||||
if (streamBuildExecMsg(pTask, pRes, &dispatchMsg, &pEpSet) < 0) {
|
||||
ASSERT(0);
|
||||
return -1;
|
||||
}
|
||||
|
||||
tmsgSendReq(pEpSet, &dispatchMsg);
|
||||
|
||||
} else if (pTask->dispatchType == TASK_DISPATCH__SHUFFLE) {
|
||||
SHashObj* pShuffleRes = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK);
|
||||
if (pShuffleRes == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t sz = taosArrayGetSize(pRes);
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
SSDataBlock* pDataBlock = taosArrayGet(pRes, i);
|
||||
SArray* pArray = taosHashGet(pShuffleRes, &pDataBlock->info.groupId, sizeof(int64_t));
|
||||
if (pArray == NULL) {
|
||||
pArray = taosArrayInit(0, sizeof(SSDataBlock));
|
||||
if (pArray == NULL) {
|
||||
return -1;
|
||||
}
|
||||
taosHashPut(pShuffleRes, &pDataBlock->info.groupId, sizeof(int64_t), &pArray, sizeof(void*));
|
||||
}
|
||||
taosArrayPush(pArray, pDataBlock);
|
||||
}
|
||||
|
||||
if (streamShuffleDispatch(pTask, pMsgCb, pShuffleRes) < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
} else {
|
||||
ASSERT(pTask->dispatchType == TASK_DISPATCH__NONE);
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
@ -406,11 +423,32 @@ int32_t streamTaskProcessDispatchReq(SStreamTask* pTask, SMsgCb* pMsgCb, SStream
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t streamTaskProcessDispatchRsp(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamDispatchRsp* pRsp) {
|
||||
atomic_store_8(&pTask->inputStatus, pRsp->inputStatus);
|
||||
if (pRsp->inputStatus == TASK_INPUT_STATUS__BLOCKED) {
|
||||
// TODO: init recover timer
|
||||
}
|
||||
// continue dispatch
|
||||
streamTaskSink(pTask, pMsgCb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t streamTaskProcessRunReq(SStreamTask* pTask, SMsgCb* pMsgCb) {
|
||||
streamTaskExec2(pTask, pMsgCb);
|
||||
streamTaskSink(pTask, pMsgCb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t streamTaskProcessRecoverReq(SStreamTask* pTask, char* msg) {
|
||||
//
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t streamTaskProcessRecoverRsp(SStreamTask* pTask, char* msg) {
|
||||
//
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t streamExecTask(SStreamTask* pTask, SMsgCb* pMsgCb, const void* input, int32_t inputType, int32_t workId) {
|
||||
SArray* pRes = NULL;
|
||||
// source
|
||||
|
|
|
@ -36,10 +36,10 @@ typedef struct SSyncIO {
|
|||
STaosQueue *pMsgQ;
|
||||
STaosQset * pQset;
|
||||
TdThread consumerTid;
|
||||
|
||||
void * serverRpc;
|
||||
void * clientRpc;
|
||||
SEpSet myAddr;
|
||||
void *serverRpc;
|
||||
void *clientRpc;
|
||||
SEpSet myAddr;
|
||||
SMsgCb msgcb;
|
||||
|
||||
tmr_h qTimer;
|
||||
int32_t qTimerMS;
|
||||
|
@ -65,8 +65,8 @@ extern SSyncIO *gSyncIO;
|
|||
|
||||
int32_t syncIOStart(char *host, uint16_t port);
|
||||
int32_t syncIOStop();
|
||||
int32_t syncIOSendMsg(void *clientRpc, const SEpSet *pEpSet, SRpcMsg *pMsg);
|
||||
int32_t syncIOEqMsg(void *queue, SRpcMsg *pMsg);
|
||||
int32_t syncIOSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg);
|
||||
int32_t syncIOEqMsg(const SMsgCb *msgcb, SRpcMsg *pMsg);
|
||||
|
||||
int32_t syncIOQTimerStart();
|
||||
int32_t syncIOQTimerStop();
|
||||
|
|
|
@ -159,11 +159,10 @@ typedef struct SSyncNode {
|
|||
char configPath[TSDB_FILENAME_LEN * 2];
|
||||
|
||||
// sync io
|
||||
SWal* pWal;
|
||||
void* rpcClient;
|
||||
int32_t (*FpSendMsg)(void* rpcClient, const SEpSet* pEpSet, SRpcMsg* pMsg);
|
||||
void* queue;
|
||||
int32_t (*FpEqMsg)(void* queue, SRpcMsg* pMsg);
|
||||
SWal* pWal;
|
||||
const SMsgCb* msgcb;
|
||||
int32_t (*FpSendMsg)(const SEpSet* pEpSet, SRpcMsg* pMsg);
|
||||
int32_t (*FpEqMsg)(const SMsgCb* msgcb, SRpcMsg* pMsg);
|
||||
|
||||
// init internal
|
||||
SNodeInfo myNodeInfo;
|
||||
|
|
|
@ -66,7 +66,7 @@ int32_t syncIOStop() {
|
|||
return ret;
|
||||
}
|
||||
|
||||
int32_t syncIOSendMsg(void *clientRpc, const SEpSet *pEpSet, SRpcMsg *pMsg) {
|
||||
int32_t syncIOSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg) {
|
||||
assert(pEpSet->inUse == 0);
|
||||
assert(pEpSet->numOfEps == 1);
|
||||
|
||||
|
@ -83,11 +83,11 @@ int32_t syncIOSendMsg(void *clientRpc, const SEpSet *pEpSet, SRpcMsg *pMsg) {
|
|||
|
||||
pMsg->info.handle = NULL;
|
||||
pMsg->info.noResp = 1;
|
||||
rpcSendRequest(clientRpc, pEpSet, pMsg, NULL);
|
||||
rpcSendRequest(gSyncIO->clientRpc, pEpSet, pMsg, NULL);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int32_t syncIOEqMsg(void *queue, SRpcMsg *pMsg) {
|
||||
int32_t syncIOEqMsg(const SMsgCb *msgcb, SRpcMsg *pMsg) {
|
||||
int32_t ret = 0;
|
||||
char logBuf[128];
|
||||
syncRpcMsgLog2((char *)"==syncIOEqMsg==", pMsg);
|
||||
|
@ -96,7 +96,7 @@ int32_t syncIOEqMsg(void *queue, SRpcMsg *pMsg) {
|
|||
pTemp = taosAllocateQitem(sizeof(SRpcMsg), DEF_QITEM);
|
||||
memcpy(pTemp, pMsg, sizeof(SRpcMsg));
|
||||
|
||||
STaosQueue *pMsgQ = queue;
|
||||
STaosQueue *pMsgQ = gSyncIO->pMsgQ;
|
||||
taosWriteQitem(pMsgQ, pTemp);
|
||||
|
||||
return ret;
|
||||
|
@ -183,9 +183,6 @@ static int32_t syncIOStartInternal(SSyncIO *io) {
|
|||
rpcInit.sessions = 100;
|
||||
rpcInit.idleTime = 100;
|
||||
rpcInit.user = "sync-io";
|
||||
rpcInit.secret = "sync-io";
|
||||
rpcInit.ckey = "key";
|
||||
rpcInit.spi = 0;
|
||||
rpcInit.connType = TAOS_CONN_CLIENT;
|
||||
|
||||
io->clientRpc = rpcOpen(&rpcInit);
|
||||
|
@ -206,7 +203,6 @@ static int32_t syncIOStartInternal(SSyncIO *io) {
|
|||
rpcInit.cfp = syncIOProcessRequest;
|
||||
rpcInit.sessions = 1000;
|
||||
rpcInit.idleTime = 2 * 1500;
|
||||
rpcInit.afp = syncIOAuth;
|
||||
rpcInit.parent = io;
|
||||
rpcInit.connType = TAOS_CONN_SERVER;
|
||||
|
||||
|
|
|
@ -240,26 +240,14 @@ int32_t syncGetAndDelRespRpc(int64_t rid, uint64_t index, SRpcMsg* msg) {
|
|||
return ret;
|
||||
}
|
||||
|
||||
void syncSetQ(int64_t rid, void* queue) {
|
||||
void syncSetMsgCb(int64_t rid, const SMsgCb *msgcb) {
|
||||
SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid);
|
||||
if (pSyncNode == NULL) {
|
||||
sTrace("syncSetQ get pSyncNode is NULL, rid:%ld", rid);
|
||||
return;
|
||||
}
|
||||
assert(rid == pSyncNode->rid);
|
||||
pSyncNode->queue = queue;
|
||||
|
||||
taosReleaseRef(tsNodeRefId, pSyncNode->rid);
|
||||
}
|
||||
|
||||
void syncSetRpc(int64_t rid, void* rpcHandle) {
|
||||
SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid);
|
||||
if (pSyncNode == NULL) {
|
||||
sTrace("syncSetRpc get pSyncNode is NULL, rid:%ld", rid);
|
||||
return;
|
||||
}
|
||||
assert(rid == pSyncNode->rid);
|
||||
pSyncNode->rpcClient = rpcHandle;
|
||||
pSyncNode->msgcb = msgcb;
|
||||
|
||||
taosReleaseRef(tsNodeRefId, pSyncNode->rid);
|
||||
}
|
||||
|
@ -332,7 +320,7 @@ int32_t syncPropose(int64_t rid, const SRpcMsg* pMsg, bool isWeak) {
|
|||
SRpcMsg rpcMsg;
|
||||
syncClientRequest2RpcMsg(pSyncMsg, &rpcMsg);
|
||||
if (pSyncNode->FpEqMsg != NULL) {
|
||||
pSyncNode->FpEqMsg(pSyncNode->queue, &rpcMsg);
|
||||
pSyncNode->FpEqMsg(pSyncNode->msgcb, &rpcMsg);
|
||||
} else {
|
||||
sTrace("syncPropose pSyncNode->FpEqMsg is NULL");
|
||||
}
|
||||
|
@ -375,9 +363,8 @@ SSyncNode* syncNodeOpen(const SSyncInfo* pSyncInfo) {
|
|||
snprintf(pSyncNode->configPath, sizeof(pSyncNode->configPath), "%s/raft_config.json", pSyncInfo->path);
|
||||
|
||||
pSyncNode->pWal = pSyncInfo->pWal;
|
||||
pSyncNode->rpcClient = pSyncInfo->rpcClient;
|
||||
pSyncNode->msgcb = pSyncInfo->msgcb;
|
||||
pSyncNode->FpSendMsg = pSyncInfo->FpSendMsg;
|
||||
pSyncNode->queue = pSyncInfo->queue;
|
||||
pSyncNode->FpEqMsg = pSyncInfo->FpEqMsg;
|
||||
|
||||
// init raft config
|
||||
|
@ -691,7 +678,7 @@ int32_t syncNodeSendMsgById(const SRaftId* destRaftId, SSyncNode* pSyncNode, SRp
|
|||
// htonl
|
||||
syncUtilMsgHtoN(pMsg->pCont);
|
||||
|
||||
pSyncNode->FpSendMsg(pSyncNode->rpcClient, &epSet, pMsg);
|
||||
pSyncNode->FpSendMsg(&epSet, pMsg);
|
||||
} else {
|
||||
sTrace("syncNodeSendMsgById pSyncNode->FpSendMsg is NULL");
|
||||
}
|
||||
|
@ -706,7 +693,7 @@ int32_t syncNodeSendMsgByInfo(const SNodeInfo* nodeInfo, SSyncNode* pSyncNode, S
|
|||
// htonl
|
||||
syncUtilMsgHtoN(pMsg->pCont);
|
||||
|
||||
pSyncNode->FpSendMsg(pSyncNode->rpcClient, &epSet, pMsg);
|
||||
pSyncNode->FpSendMsg(&epSet, pMsg);
|
||||
} else {
|
||||
sTrace("syncNodeSendMsgByInfo pSyncNode->FpSendMsg is NULL");
|
||||
}
|
||||
|
@ -728,12 +715,12 @@ cJSON* syncNode2Json(const SSyncNode* pSyncNode) {
|
|||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->pWal);
|
||||
cJSON_AddStringToObject(pRoot, "pWal", u64buf);
|
||||
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->rpcClient);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->msgcb);
|
||||
cJSON_AddStringToObject(pRoot, "rpcClient", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->FpSendMsg);
|
||||
cJSON_AddStringToObject(pRoot, "FpSendMsg", u64buf);
|
||||
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->queue);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->msgcb);
|
||||
cJSON_AddStringToObject(pRoot, "queue", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->FpEqMsg);
|
||||
cJSON_AddStringToObject(pRoot, "FpEqMsg", u64buf);
|
||||
|
@ -1095,7 +1082,7 @@ static void syncNodeEqPingTimer(void* param, void* tmrId) {
|
|||
syncTimeout2RpcMsg(pSyncMsg, &rpcMsg);
|
||||
syncRpcMsgLog2((char*)"==syncNodeEqPingTimer==", &rpcMsg);
|
||||
if (pSyncNode->FpEqMsg != NULL) {
|
||||
pSyncNode->FpEqMsg(pSyncNode->queue, &rpcMsg);
|
||||
pSyncNode->FpEqMsg(pSyncNode->msgcb, &rpcMsg);
|
||||
} else {
|
||||
sTrace("syncNodeEqPingTimer pSyncNode->FpEqMsg is NULL");
|
||||
}
|
||||
|
@ -1118,7 +1105,7 @@ static void syncNodeEqElectTimer(void* param, void* tmrId) {
|
|||
syncTimeout2RpcMsg(pSyncMsg, &rpcMsg);
|
||||
syncRpcMsgLog2((char*)"==syncNodeEqElectTimer==", &rpcMsg);
|
||||
if (pSyncNode->FpEqMsg != NULL) {
|
||||
pSyncNode->FpEqMsg(pSyncNode->queue, &rpcMsg);
|
||||
pSyncNode->FpEqMsg(pSyncNode->msgcb, &rpcMsg);
|
||||
} else {
|
||||
sTrace("syncNodeEqElectTimer pSyncNode->FpEqMsg is NULL");
|
||||
}
|
||||
|
@ -1145,7 +1132,7 @@ static void syncNodeEqHeartbeatTimer(void* param, void* tmrId) {
|
|||
syncTimeout2RpcMsg(pSyncMsg, &rpcMsg);
|
||||
syncRpcMsgLog2((char*)"==syncNodeEqHeartbeatTimer==", &rpcMsg);
|
||||
if (pSyncNode->FpEqMsg != NULL) {
|
||||
pSyncNode->FpEqMsg(pSyncNode->queue, &rpcMsg);
|
||||
pSyncNode->FpEqMsg(pSyncNode->msgcb, &rpcMsg);
|
||||
} else {
|
||||
sTrace("syncNodeEqHeartbeatTimer pSyncNode->FpEqMsg is NULL");
|
||||
}
|
||||
|
@ -1175,10 +1162,10 @@ static int32_t syncNodeEqNoop(SSyncNode* ths) {
|
|||
assert(pSyncMsg->dataLen == entryLen);
|
||||
memcpy(pSyncMsg->data, serialized, entryLen);
|
||||
|
||||
SRpcMsg rpcMsg;
|
||||
SRpcMsg rpcMsg = {0};
|
||||
syncClientRequest2RpcMsg(pSyncMsg, &rpcMsg);
|
||||
if (ths->FpEqMsg != NULL) {
|
||||
ths->FpEqMsg(ths->queue, &rpcMsg);
|
||||
ths->FpEqMsg(ths->msgcb, &rpcMsg);
|
||||
} else {
|
||||
sTrace("syncNodeEqNoop pSyncNode->FpEqMsg is NULL");
|
||||
}
|
||||
|
|
|
@ -100,9 +100,8 @@ SWal* createWal(char* path, int32_t vgId) {
|
|||
int64_t createSyncNode(int32_t replicaNum, int32_t myIndex, int32_t vgId, SWal* pWal, char* path, bool isStandBy) {
|
||||
SSyncInfo syncInfo;
|
||||
syncInfo.vgId = vgId;
|
||||
syncInfo.rpcClient = gSyncIO->clientRpc;
|
||||
syncInfo.msgcb = &gSyncIO->msgcb;
|
||||
syncInfo.FpSendMsg = syncIOSendMsg;
|
||||
syncInfo.queue = gSyncIO->pMsgQ;
|
||||
syncInfo.FpEqMsg = syncIOEqMsg;
|
||||
syncInfo.pFsm = createFsm();
|
||||
snprintf(syncInfo.path, sizeof(syncInfo.path), "%s_sync_replica%d_index%d", path, replicaNum, myIndex);
|
||||
|
|
|
@ -44,9 +44,8 @@ SWal* createWal(char* path, int32_t vgId) {
|
|||
SSyncNode* createSyncNode(int32_t replicaNum, int32_t myIndex, int32_t vgId, SWal* pWal, char* path) {
|
||||
SSyncInfo syncInfo;
|
||||
syncInfo.vgId = vgId;
|
||||
syncInfo.rpcClient = gSyncIO->clientRpc;
|
||||
syncInfo.msgcb = &gSyncIO->msgcb;
|
||||
syncInfo.FpSendMsg = syncIOSendMsg;
|
||||
syncInfo.queue = gSyncIO->pMsgQ;
|
||||
syncInfo.FpEqMsg = syncIOEqMsg;
|
||||
syncInfo.pFsm = NULL;
|
||||
snprintf(syncInfo.path, sizeof(syncInfo.path), "%s_sync_replica%d_index%d", path, replicaNum, myIndex);
|
||||
|
|
|
@ -31,9 +31,8 @@ SSyncNode *pSyncNode;
|
|||
|
||||
SSyncNode *syncNodeInit() {
|
||||
syncInfo.vgId = 1234;
|
||||
syncInfo.rpcClient = gSyncIO->clientRpc;
|
||||
syncInfo.msgcb = &gSyncIO->msgcb;
|
||||
syncInfo.FpSendMsg = syncIOSendMsg;
|
||||
syncInfo.queue = gSyncIO->pMsgQ;
|
||||
syncInfo.FpEqMsg = syncIOEqMsg;
|
||||
syncInfo.pFsm = pFsm;
|
||||
snprintf(syncInfo.path, sizeof(syncInfo.path), "%s", "./");
|
||||
|
|
|
@ -25,9 +25,7 @@ SSyncFSM* pFsm;
|
|||
|
||||
SSyncNode* syncNodeInit() {
|
||||
syncInfo.vgId = 1234;
|
||||
syncInfo.rpcClient = gSyncIO->clientRpc;
|
||||
syncInfo.FpSendMsg = syncIOSendMsg;
|
||||
syncInfo.queue = gSyncIO->pMsgQ;
|
||||
syncInfo.FpEqMsg = syncIOEqMsg;
|
||||
syncInfo.pFsm = pFsm;
|
||||
snprintf(syncInfo.path, sizeof(syncInfo.path), "%s", "./");
|
||||
|
@ -99,7 +97,7 @@ int main(int argc, char** argv) {
|
|||
SyncPingReply* pSyncMsg = syncPingReplyBuild2(&pSyncNode->myRaftId, &pSyncNode->myRaftId, 1000, "syncEnqTest");
|
||||
SRpcMsg rpcMsg;
|
||||
syncPingReply2RpcMsg(pSyncMsg, &rpcMsg);
|
||||
pSyncNode->FpEqMsg(pSyncNode->queue, &rpcMsg);
|
||||
pSyncNode->FpEqMsg(pSyncNode->msgcb, &rpcMsg);
|
||||
taosMsleep(1000);
|
||||
}
|
||||
|
||||
|
|
|
@ -43,7 +43,7 @@ int main() {
|
|||
SRpcMsg rpcMsg;
|
||||
syncPingReply2RpcMsg(pSyncMsg, &rpcMsg);
|
||||
|
||||
syncIOSendMsg(gSyncIO->clientRpc, &epSet, &rpcMsg);
|
||||
syncIOSendMsg(&epSet, &rpcMsg);
|
||||
taosSsleep(1);
|
||||
}
|
||||
|
||||
|
|
|
@ -25,9 +25,8 @@ SSyncFSM* pFsm;
|
|||
|
||||
SSyncNode* syncNodeInit() {
|
||||
syncInfo.vgId = 1234;
|
||||
syncInfo.rpcClient = gSyncIO->clientRpc;
|
||||
syncInfo.msgcb = &gSyncIO->msgcb;
|
||||
syncInfo.FpSendMsg = syncIOSendMsg;
|
||||
syncInfo.queue = gSyncIO->pMsgQ;
|
||||
syncInfo.FpEqMsg = syncIOEqMsg;
|
||||
syncInfo.pFsm = pFsm;
|
||||
snprintf(syncInfo.path, sizeof(syncInfo.path), "%s", "./");
|
||||
|
@ -103,7 +102,7 @@ int main(int argc, char** argv) {
|
|||
|
||||
SEpSet epSet;
|
||||
syncUtilnodeInfo2EpSet(&pSyncNode->myNodeInfo, &epSet);
|
||||
pSyncNode->FpSendMsg(pSyncNode->rpcClient, &epSet, &rpcMsg);
|
||||
pSyncNode->FpSendMsg(&epSet, &rpcMsg);
|
||||
|
||||
taosMsleep(1000);
|
||||
}
|
||||
|
|
|
@ -28,9 +28,8 @@ SSyncNode* pSyncNode;
|
|||
|
||||
SSyncNode* syncNodeInit() {
|
||||
syncInfo.vgId = 1234;
|
||||
syncInfo.rpcClient = gSyncIO->clientRpc;
|
||||
syncInfo.msgcb = &gSyncIO->msgcb;
|
||||
syncInfo.FpSendMsg = syncIOSendMsg;
|
||||
syncInfo.queue = gSyncIO->pMsgQ;
|
||||
syncInfo.FpEqMsg = syncIOEqMsg;
|
||||
syncInfo.pFsm = pFsm;
|
||||
snprintf(syncInfo.path, sizeof(syncInfo.path), "%s", "./");
|
||||
|
|
|
@ -25,9 +25,8 @@ SSyncFSM* pFsm;
|
|||
|
||||
SSyncNode* syncNodeInit() {
|
||||
syncInfo.vgId = 1234;
|
||||
syncInfo.rpcClient = gSyncIO->clientRpc;
|
||||
syncInfo.msgcb = &gSyncIO->msgcb;
|
||||
syncInfo.FpSendMsg = syncIOSendMsg;
|
||||
syncInfo.queue = gSyncIO->pMsgQ;
|
||||
syncInfo.FpEqMsg = syncIOEqMsg;
|
||||
syncInfo.pFsm = pFsm;
|
||||
snprintf(syncInfo.path, sizeof(syncInfo.path), "%s", "./sync_init_test");
|
||||
|
|
|
@ -25,9 +25,8 @@ SSyncFSM* pFsm;
|
|||
|
||||
SSyncNode* syncNodeInit() {
|
||||
syncInfo.vgId = 1234;
|
||||
syncInfo.rpcClient = gSyncIO->clientRpc;
|
||||
syncInfo.msgcb = &gSyncIO->msgcb;
|
||||
syncInfo.FpSendMsg = syncIOSendMsg;
|
||||
syncInfo.queue = gSyncIO->pMsgQ;
|
||||
syncInfo.FpEqMsg = syncIOEqMsg;
|
||||
syncInfo.pFsm = pFsm;
|
||||
snprintf(syncInfo.path, sizeof(syncInfo.path), "%s", "./");
|
||||
|
|
|
@ -25,9 +25,8 @@ SSyncFSM* pFsm;
|
|||
|
||||
SSyncNode* syncNodeInit() {
|
||||
syncInfo.vgId = 1234;
|
||||
syncInfo.rpcClient = gSyncIO->clientRpc;
|
||||
syncInfo.msgcb = &gSyncIO->msgcb;
|
||||
syncInfo.FpSendMsg = syncIOSendMsg;
|
||||
syncInfo.queue = gSyncIO->pMsgQ;
|
||||
syncInfo.FpEqMsg = syncIOEqMsg;
|
||||
syncInfo.pFsm = pFsm;
|
||||
snprintf(syncInfo.path, sizeof(syncInfo.path), "%s", "./");
|
||||
|
|
|
@ -25,9 +25,8 @@ SSyncFSM* pFsm;
|
|||
|
||||
SSyncNode* syncNodeInit() {
|
||||
syncInfo.vgId = 1234;
|
||||
syncInfo.rpcClient = gSyncIO->clientRpc;
|
||||
syncInfo.msgcb = &gSyncIO->msgcb;
|
||||
syncInfo.FpSendMsg = syncIOSendMsg;
|
||||
syncInfo.queue = gSyncIO->pMsgQ;
|
||||
syncInfo.FpEqMsg = syncIOEqMsg;
|
||||
syncInfo.pFsm = pFsm;
|
||||
snprintf(syncInfo.path, sizeof(syncInfo.path), "%s", "./");
|
||||
|
|
|
@ -97,9 +97,8 @@ SWal* createWal(char* path, int32_t vgId) {
|
|||
int64_t createSyncNode(int32_t replicaNum, int32_t myIndex, int32_t vgId, SWal* pWal, char* path) {
|
||||
SSyncInfo syncInfo;
|
||||
syncInfo.vgId = vgId;
|
||||
syncInfo.rpcClient = gSyncIO->clientRpc;
|
||||
syncInfo.msgcb = &gSyncIO->msgcb;
|
||||
syncInfo.FpSendMsg = syncIOSendMsg;
|
||||
syncInfo.queue = gSyncIO->pMsgQ;
|
||||
syncInfo.FpEqMsg = syncIOEqMsg;
|
||||
syncInfo.pFsm = createFsm();
|
||||
snprintf(syncInfo.path, sizeof(syncInfo.path), "%s_sync_replica%d_index%d", path, replicaNum, myIndex);
|
||||
|
|
|
@ -83,9 +83,8 @@ void initFsm() {
|
|||
|
||||
SSyncNode *syncNodeInit() {
|
||||
syncInfo.vgId = 1234;
|
||||
syncInfo.rpcClient = gSyncIO->clientRpc;
|
||||
syncInfo.msgcb = &gSyncIO->msgcb;
|
||||
syncInfo.FpSendMsg = syncIOSendMsg;
|
||||
syncInfo.queue = gSyncIO->pMsgQ;
|
||||
syncInfo.FpEqMsg = syncIOEqMsg;
|
||||
syncInfo.pFsm = pFsm;
|
||||
snprintf(syncInfo.path, sizeof(syncInfo.path), "%s", pDir);
|
||||
|
@ -200,7 +199,7 @@ int main(int argc, char **argv) {
|
|||
SyncClientRequest *pSyncClientRequest = pMsg1;
|
||||
SRpcMsg rpcMsg;
|
||||
syncClientRequest2RpcMsg(pSyncClientRequest, &rpcMsg);
|
||||
gSyncNode->FpEqMsg(gSyncNode->queue, &rpcMsg);
|
||||
gSyncNode->FpEqMsg(gSyncNode->msgcb, &rpcMsg);
|
||||
|
||||
taosMsleep(1000);
|
||||
}
|
||||
|
|
|
@ -27,9 +27,8 @@ SSyncNode* pSyncNode;
|
|||
|
||||
SSyncNode* syncNodeInit() {
|
||||
syncInfo.vgId = 1234;
|
||||
syncInfo.rpcClient = gSyncIO->clientRpc;
|
||||
syncInfo.msgcb = &gSyncIO->msgcb;
|
||||
syncInfo.FpSendMsg = syncIOSendMsg;
|
||||
syncInfo.queue = gSyncIO->pMsgQ;
|
||||
syncInfo.FpEqMsg = syncIOEqMsg;
|
||||
syncInfo.pFsm = pFsm;
|
||||
snprintf(syncInfo.path, sizeof(syncInfo.path), "%s", "./");
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue