Merge branch '3.0' into cpwu/3.0
This commit is contained in:
commit
b0cb4fe6a2
137
Jenkinsfile2
137
Jenkinsfile2
|
@ -33,16 +33,17 @@ def abort_previous(){
|
|||
milestone(buildNumber)
|
||||
}
|
||||
def pre_test(){
|
||||
sh 'hostname'
|
||||
sh '''
|
||||
hostname
|
||||
date
|
||||
sudo rmtaos || echo "taosd has not installed"
|
||||
'''
|
||||
sh '''
|
||||
killall -9 taosd ||echo "no taosd running"
|
||||
killall -9 gdb || echo "no gdb running"
|
||||
killall -9 python3.8 || echo "no python program running"
|
||||
cd ${WK}
|
||||
git reset --hard
|
||||
git fetch || git fetch
|
||||
cd ${WKC}
|
||||
git reset --hard
|
||||
git fetch || git fetch
|
||||
'''
|
||||
script {
|
||||
if (env.CHANGE_TARGET == 'master') {
|
||||
|
@ -81,10 +82,10 @@ def pre_test(){
|
|||
git pull >/dev/null
|
||||
git fetch origin +refs/pull/${CHANGE_ID}/merge
|
||||
git checkout -qf FETCH_HEAD
|
||||
git log|head -n20
|
||||
git log -5
|
||||
cd ${WK}
|
||||
git pull >/dev/null
|
||||
git log|head -n20
|
||||
git log -5
|
||||
'''
|
||||
} else if (env.CHANGE_URL =~ /\/TDinternal\//) {
|
||||
sh '''
|
||||
|
@ -92,10 +93,10 @@ def pre_test(){
|
|||
git pull >/dev/null
|
||||
git fetch origin +refs/pull/${CHANGE_ID}/merge
|
||||
git checkout -qf FETCH_HEAD
|
||||
git log|head -n20
|
||||
git log -5
|
||||
cd ${WKC}
|
||||
git pull >/dev/null
|
||||
git log|head -n20
|
||||
git log -5
|
||||
'''
|
||||
} else {
|
||||
sh '''
|
||||
|
@ -106,21 +107,10 @@ def pre_test(){
|
|||
cd ${WKC}
|
||||
git submodule update --init --recursive
|
||||
'''
|
||||
sh '''
|
||||
cd ${WK}
|
||||
export TZ=Asia/Harbin
|
||||
date
|
||||
rm -rf debug
|
||||
mkdir debug
|
||||
cd debug
|
||||
cmake .. > /dev/null
|
||||
make -j4> /dev/null
|
||||
'''
|
||||
sh '''
|
||||
cd ${WKPY}
|
||||
git reset --hard
|
||||
git pull
|
||||
pip3 install .
|
||||
'''
|
||||
return 1
|
||||
}
|
||||
|
@ -131,12 +121,14 @@ def pre_test_win(){
|
|||
time /t
|
||||
taskkill /f /t /im python.exe
|
||||
taskkill /f /t /im bash.exe
|
||||
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDengine
|
||||
rd /s /Q C:\\workspace\\%EXECUTOR_NUMBER%\\TDengine\\debug
|
||||
rd /s /Q C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\debug
|
||||
exit 0
|
||||
'''
|
||||
bat '''
|
||||
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDengine
|
||||
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
|
||||
git reset --hard
|
||||
git fetch || git fetch
|
||||
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
|
||||
git reset --hard
|
||||
git fetch || git fetch
|
||||
git checkout -f
|
||||
|
@ -144,39 +136,73 @@ def pre_test_win(){
|
|||
script {
|
||||
if (env.CHANGE_TARGET == 'master') {
|
||||
bat '''
|
||||
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDengine
|
||||
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
|
||||
git checkout master
|
||||
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
|
||||
git checkout master
|
||||
'''
|
||||
} else if(env.CHANGE_TARGET == '2.0') {
|
||||
bat '''
|
||||
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDengine
|
||||
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
|
||||
git checkout 2.0
|
||||
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
|
||||
git checkout 2.0
|
||||
'''
|
||||
} else if(env.CHANGE_TARGET == '3.0') {
|
||||
bat '''
|
||||
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDengine
|
||||
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
|
||||
git checkout 3.0
|
||||
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
|
||||
git checkout 3.0
|
||||
'''
|
||||
} else {
|
||||
bat '''
|
||||
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDengine
|
||||
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
|
||||
git checkout develop
|
||||
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
|
||||
git checkout develop
|
||||
'''
|
||||
}
|
||||
}
|
||||
script {
|
||||
if (env.CHANGE_URL =~ /\/TDengine\//) {
|
||||
bat '''
|
||||
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
|
||||
git pull
|
||||
git log -5
|
||||
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
|
||||
git pull
|
||||
git fetch origin +refs/pull/${CHANGE_ID}/merge
|
||||
git checkout -qf FETCH_HEAD
|
||||
git log -5
|
||||
'''
|
||||
} else if (env.CHANGE_URL =~ /\/TDinternal\//) {
|
||||
bat '''
|
||||
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
|
||||
git pull
|
||||
git fetch origin +refs/pull/${CHANGE_ID}/merge
|
||||
git checkout -qf FETCH_HEAD
|
||||
git log -5
|
||||
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
|
||||
git pull
|
||||
git log -5
|
||||
'''
|
||||
} else {
|
||||
sh '''
|
||||
echo "unmatched reposiotry ${CHANGE_URL}"
|
||||
'''
|
||||
}
|
||||
}
|
||||
bat '''
|
||||
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDengine
|
||||
git branch
|
||||
git pull || git pull
|
||||
git fetch origin +refs/pull/%CHANGE_ID%/merge
|
||||
git checkout -qf FETCH_HEAD
|
||||
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
|
||||
git submodule update --init --recursive
|
||||
'''
|
||||
}
|
||||
def pre_test_build_win() {
|
||||
bat '''
|
||||
echo "building ..."
|
||||
time /t
|
||||
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDengine
|
||||
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
|
||||
mkdir debug
|
||||
cd debug
|
||||
call "C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Community\\VC\\Auxiliary\\Build\\vcvarsall.bat" x64
|
||||
|
@ -192,6 +218,7 @@ pipeline {
|
|||
agent none
|
||||
options { skipDefaultCheckout() }
|
||||
environment{
|
||||
WKDIR = '/var/lib/jenkins/workspace'
|
||||
WK = '/var/lib/jenkins/workspace/TDinternal'
|
||||
WKC = '/var/lib/jenkins/workspace/TDinternal/community'
|
||||
WKPY = '/var/lib/jenkins/workspace/taos-connector-python'
|
||||
|
@ -206,39 +233,23 @@ pipeline {
|
|||
changeRequest()
|
||||
}
|
||||
steps {
|
||||
timeout(time: 45, unit: 'MINUTES'){
|
||||
timeout(time: 40, unit: 'MINUTES'){
|
||||
pre_test()
|
||||
script {
|
||||
if (env.CHANGE_URL =~ /\/TDengine\//) {
|
||||
sh '''
|
||||
cd ${WK}/debug
|
||||
ctest -VV
|
||||
'''
|
||||
sh '''
|
||||
export LD_LIBRARY_PATH=${WK}/debug/build/lib
|
||||
cd ${WKC}/tests/system-test
|
||||
./fulltest.sh
|
||||
'''
|
||||
} else if (env.CHANGE_URL =~ /\/TDinternal\//) {
|
||||
sh '''
|
||||
cd ${WKC}/debug
|
||||
ctest -VV
|
||||
'''
|
||||
sh '''
|
||||
export LD_LIBRARY_PATH=${WKC}/debug/build/lib
|
||||
cd ${WKC}/tests/system-test
|
||||
./fulltest.sh
|
||||
'''
|
||||
} else {
|
||||
sh '''
|
||||
echo "unmatched reposiotry ${CHANGE_URL}"
|
||||
'''
|
||||
}
|
||||
sh '''
|
||||
cd ${WKC}/tests/parallel_test
|
||||
date
|
||||
time ./container_build.sh -w ${WKDIR} -t 8 -e
|
||||
rm -f /tmp/cases.task
|
||||
./collect_cases.sh -e
|
||||
'''
|
||||
sh '''
|
||||
cd ${WKC}/tests/parallel_test
|
||||
export DEFAULT_RETRY_TIME=2
|
||||
date
|
||||
timeout 2100 time ./run.sh -e -m /home/m.json -t /tmp/cases.task -b ${BRANCH_NAME} -l ${WKDIR}/log -o 480
|
||||
'''
|
||||
}
|
||||
sh '''
|
||||
cd ${WKC}/tests
|
||||
./test-all.sh b1fq
|
||||
'''
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,25 +14,6 @@ MESSAGE(STATUS "Project binary files output path: " ${PROJECT_BINARY_DIR})
|
|||
MESSAGE(STATUS "Project executable files output path: " ${EXECUTABLE_OUTPUT_PATH})
|
||||
MESSAGE(STATUS "Project library files output path: " ${LIBRARY_OUTPUT_PATH})
|
||||
|
||||
find_package(Git QUIET)
|
||||
if(GIT_FOUND AND EXISTS "${TD_SOURCE_DIR}/.git")
|
||||
# Update submodules as needed
|
||||
option(GIT_SUBMODULE "Check submodules during build" ON)
|
||||
if(GIT_SUBMODULE)
|
||||
message(STATUS "Submodule update")
|
||||
execute_process(COMMAND cd ${TD_SOURCE_DIR} && ${GIT_EXECUTABLE} submodule update --init --recursive
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
RESULT_VARIABLE GIT_SUBMOD_RESULT)
|
||||
if(NOT GIT_SUBMOD_RESULT EQUAL "0")
|
||||
message(WARNING "git submodule update --init --recursive failed with ${GIT_SUBMOD_RESULT}, please checkout submodules")
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(NOT EXISTS "${TD_SOURCE_DIR}/tools/taos-tools/CMakeLists.txt")
|
||||
message(WARNING "The submodules were not downloaded! GIT_SUBMODULE was turned off or failed. Please update submodules manually if you need build them.")
|
||||
endif()
|
||||
|
||||
if (NOT DEFINED TD_GRANT)
|
||||
SET(TD_GRANT FALSE)
|
||||
endif()
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
IF (TD_LINUX)
|
||||
SET(TD_MAKE_INSTALL_SH "${TD_SOURCE_DIR}/packaging/make_install.sh")
|
||||
SET(TD_MAKE_INSTALL_SH "${TD_SOURCE_DIR}/packaging/tools/make_install.sh")
|
||||
INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")")
|
||||
INSTALL(CODE "execute_process(COMMAND bash ${TD_MAKE_INSTALL_SH} ${TD_SOURCE_DIR} ${PROJECT_BINARY_DIR} Linux ${TD_VER_NUMBER})")
|
||||
ELSEIF (TD_WINDOWS)
|
||||
|
|
|
@ -78,6 +78,12 @@ option(
|
|||
OFF
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_WITH_BDB
|
||||
"If build with BDB"
|
||||
OFF
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_WITH_LUCENE
|
||||
"If build with lucene"
|
||||
|
|
|
@ -78,9 +78,9 @@ if(${BUILD_WITH_UV})
|
|||
endif(${BUILD_WITH_UV})
|
||||
|
||||
# bdb
|
||||
#if(${BUILD_WITH_BDB})
|
||||
#cat("${TD_SUPPORT_DIR}/bdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
#endif(${BUILD_WITH_BDB})
|
||||
if(${BUILD_WITH_BDB})
|
||||
cat("${TD_SUPPORT_DIR}/bdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
endif(${BUILD_WITH_BDB})
|
||||
|
||||
# sqlite
|
||||
if(${BUILD_WITH_SQLITE})
|
||||
|
@ -365,7 +365,7 @@ if(${BUILD_ADDR2LINE})
|
|||
if(HAVE_LIBELF_H OR HAVE_LIBELF_LIBELF_H)
|
||||
target_link_libraries(libdwarf PUBLIC libelf)
|
||||
endif()
|
||||
target_include_directories(libdwarf SYSTEM PUBLIC "libdwarf/src/lib/libdwarf" ${CMAKE_BINARY_DIR}/contrib)
|
||||
target_include_directories(libdwarf SYSTEM PUBLIC "libdwarf/src/lib/libdwarf" ${CMAKE_CURRENT_BINARY_DIR})
|
||||
file(READ "addr2line/addr2line.c" ADDR2LINE_CONTENT)
|
||||
string(REPLACE "static int" "int" ADDR2LINE_CONTENT "${ADDR2LINE_CONTENT}")
|
||||
string(REPLACE "static void" "void" ADDR2LINE_CONTENT "${ADDR2LINE_CONTENT}")
|
||||
|
|
|
@ -7,9 +7,9 @@ if(${BUILD_WITH_LUCENE})
|
|||
add_subdirectory(lucene)
|
||||
endif(${BUILD_WITH_LUCENE})
|
||||
|
||||
#if(${BUILD_WITH_BDB})
|
||||
#add_subdirectory(bdb)
|
||||
#endif(${BUILD_WITH_BDB})
|
||||
if(${BUILD_WITH_BDB})
|
||||
add_subdirectory(bdb)
|
||||
endif(${BUILD_WITH_BDB})
|
||||
|
||||
if(${BUILD_WITH_SQLITE})
|
||||
add_subdirectory(sqlite)
|
||||
|
|
|
@ -61,7 +61,7 @@ int32_t init_env() {
|
|||
taos_free_result(pRes);
|
||||
|
||||
pRes =
|
||||
taos_query(pConn, "create stable if not exists st1 (ts timestamp, c1 int, c2 float, c3 binary(10)) tags(t1 int)");
|
||||
taos_query(pConn, "create stable if not exists st1 (ts timestamp, c1 int, c2 float, c3 binary(16)) tags(t1 int)");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create super table st1, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
|
@ -106,8 +106,8 @@ int32_t create_topic() {
|
|||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
/*pRes = taos_query(pConn, "create topic topic_ctb_column as abc1");*/
|
||||
pRes = taos_query(pConn, "create topic topic_ctb_column as select ts, c1, c2, c3 from st1");
|
||||
pRes = taos_query(pConn, "create topic topic_ctb_column as abc1");
|
||||
/*pRes = taos_query(pConn, "create topic topic_ctb_column as select ts, c1, c2, c3 from st1");*/
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create topic topic_ctb_column, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
|
|
|
@ -37,7 +37,8 @@ typedef enum {
|
|||
TSDB_STREAM_TABLE = 4, // table created from stream computing
|
||||
TSDB_TEMP_TABLE = 5, // temp table created by nest query
|
||||
TSDB_SYSTEM_TABLE = 6,
|
||||
TSDB_TABLE_MAX = 7
|
||||
TSDB_TSMA_TABLE = 7, // time-range-wise sma
|
||||
TSDB_TABLE_MAX = 8
|
||||
} ETableType;
|
||||
|
||||
typedef enum {
|
||||
|
|
|
@ -201,18 +201,17 @@ typedef struct SExprInfo {
|
|||
} SExprInfo;
|
||||
|
||||
typedef struct {
|
||||
const char* key;
|
||||
int32_t keyLen;
|
||||
uint8_t type;
|
||||
int16_t length;
|
||||
const char* key;
|
||||
int32_t keyLen;
|
||||
uint8_t type;
|
||||
union{
|
||||
const char* value;
|
||||
int64_t i;
|
||||
uint64_t u;
|
||||
double d;
|
||||
float f;
|
||||
int64_t i;
|
||||
uint64_t u;
|
||||
double d;
|
||||
float f;
|
||||
};
|
||||
int32_t valueLen;
|
||||
int32_t length;
|
||||
} SSmlKv;
|
||||
|
||||
#define QUERY_ASC_FORWARD_STEP 1
|
||||
|
|
|
@ -34,7 +34,6 @@ extern int32_t tsVersion;
|
|||
extern int32_t tsStatusInterval;
|
||||
|
||||
// common
|
||||
extern int32_t tsMaxConnections;
|
||||
extern int32_t tsMaxShellConns;
|
||||
extern int32_t tsShellActivityTimer;
|
||||
extern int32_t tsCompressMsgSize;
|
||||
|
@ -52,6 +51,7 @@ extern int32_t tsVnodeShmSize;
|
|||
extern int32_t tsQnodeShmSize;
|
||||
extern int32_t tsSnodeShmSize;
|
||||
extern int32_t tsBnodeShmSize;
|
||||
extern int32_t tsNumOfShmThreads;
|
||||
|
||||
// queue & threads
|
||||
extern int32_t tsNumOfRpcThreads;
|
||||
|
@ -68,6 +68,7 @@ extern int32_t tsNumOfQnodeQueryThreads;
|
|||
extern int32_t tsNumOfQnodeFetchThreads;
|
||||
extern int32_t tsNumOfSnodeSharedThreads;
|
||||
extern int32_t tsNumOfSnodeUniqueThreads;
|
||||
extern int64_t tsRpcQueueMemoryAllowed;
|
||||
|
||||
// monitor
|
||||
extern bool tsEnableMonitor;
|
||||
|
|
|
@ -1670,6 +1670,7 @@ typedef struct SVDropStbReq {
|
|||
int32_t tEncodeSVDropStbReq(SEncoder* pCoder, const SVDropStbReq* pReq);
|
||||
int32_t tDecodeSVDropStbReq(SDecoder* pCoder, SVDropStbReq* pReq);
|
||||
|
||||
// TDMT_VND_CREATE_TABLE ==============
|
||||
#define TD_CREATE_IF_NOT_EXISTS 0x1
|
||||
typedef struct SVCreateTbReq {
|
||||
int32_t flags;
|
||||
|
@ -1759,6 +1760,43 @@ typedef struct {
|
|||
int32_t tEncodeSVDropTbBatchRsp(SEncoder* pCoder, const SVDropTbBatchRsp* pRsp);
|
||||
int32_t tDecodeSVDropTbBatchRsp(SDecoder* pCoder, SVDropTbBatchRsp* pRsp);
|
||||
|
||||
// TDMT_VND_ALTER_TABLE =====================
|
||||
typedef struct {
|
||||
const char* tbName;
|
||||
int8_t action;
|
||||
const char* colName;
|
||||
// TSDB_ALTER_TABLE_ADD_COLUMN
|
||||
int8_t type;
|
||||
int8_t flags;
|
||||
int32_t bytes;
|
||||
// TSDB_ALTER_TABLE_DROP_COLUMN
|
||||
// TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES
|
||||
int32_t colModBytes;
|
||||
// TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME
|
||||
const char* colNewName;
|
||||
// TSDB_ALTER_TABLE_UPDATE_TAG_VAL
|
||||
const char* tagName;
|
||||
int8_t isNull;
|
||||
uint32_t nTagVal;
|
||||
const uint8_t* pTagVal;
|
||||
// TSDB_ALTER_TABLE_UPDATE_OPTIONS
|
||||
int8_t updateTTL;
|
||||
int32_t newTTL;
|
||||
int8_t updateComment;
|
||||
const char* newComment;
|
||||
} SVAlterTbReq;
|
||||
|
||||
int32_t tEncodeSVAlterTbReq(SEncoder* pEncoder, const SVAlterTbReq* pReq);
|
||||
int32_t tDecodeSVAlterTbReq(SDecoder* pDecoder, SVAlterTbReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
int32_t code;
|
||||
} SVAlterTbRsp;
|
||||
|
||||
int32_t tEncodeSVAlterTbRsp(SEncoder* pEncoder, const SVAlterTbRsp* pRsp);
|
||||
int32_t tDecodeSVAlterTbRsp(SDecoder* pDecoder, SVAlterTbRsp* pRsp);
|
||||
// ======================
|
||||
|
||||
typedef struct {
|
||||
SMsgHead head;
|
||||
int64_t uid;
|
||||
|
@ -2160,26 +2198,23 @@ int32_t tSerializeSMDropSmaReq(void* buf, int32_t bufLen, SMDropSmaReq* pReq);
|
|||
int32_t tDeserializeSMDropSmaReq(void* buf, int32_t bufLen, SMDropSmaReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
int8_t version; // for compatibility(default 0)
|
||||
int8_t intervalUnit; // MACRO: TIME_UNIT_XXX
|
||||
int8_t slidingUnit; // MACRO: TIME_UNIT_XXX
|
||||
int8_t timezoneInt; // sma data expired if timezone changes.
|
||||
char indexName[TSDB_INDEX_NAME_LEN];
|
||||
int32_t exprLen;
|
||||
int32_t tagsFilterLen;
|
||||
int64_t indexUid;
|
||||
tb_uid_t tableUid; // super/child/common table uid
|
||||
int64_t interval;
|
||||
int64_t offset; // use unit by precision of DB
|
||||
int64_t sliding;
|
||||
char* expr; // sma expression
|
||||
char* tagsFilter;
|
||||
int8_t version; // for compatibility(default 0)
|
||||
int8_t intervalUnit; // MACRO: TIME_UNIT_XXX
|
||||
int8_t slidingUnit; // MACRO: TIME_UNIT_XXX
|
||||
int8_t timezoneInt; // sma data expired if timezone changes.
|
||||
char indexName[TSDB_INDEX_NAME_LEN];
|
||||
int32_t exprLen;
|
||||
int32_t tagsFilterLen;
|
||||
int64_t indexUid;
|
||||
tb_uid_t tableUid; // super/child/common table uid
|
||||
int64_t interval;
|
||||
int64_t offset; // use unit by precision of DB
|
||||
int64_t sliding;
|
||||
const char* expr; // sma expression
|
||||
const char* tagsFilter;
|
||||
} STSma; // Time-range-wise SMA
|
||||
|
||||
typedef struct {
|
||||
int64_t ver; // use a general definition
|
||||
STSma tSma;
|
||||
} SVCreateTSmaReq;
|
||||
typedef STSma SVCreateTSmaReq;
|
||||
|
||||
typedef struct {
|
||||
int8_t type; // 0 status report, 1 update data
|
||||
|
@ -2188,7 +2223,6 @@ typedef struct {
|
|||
} STSmaMsg;
|
||||
|
||||
typedef struct {
|
||||
int64_t ver; // use a general definition
|
||||
int64_t indexUid;
|
||||
char indexName[TSDB_INDEX_NAME_LEN];
|
||||
} SVDropTSmaReq;
|
||||
|
@ -2197,28 +2231,21 @@ typedef struct {
|
|||
int tmp; // TODO: to avoid compile error
|
||||
} SVCreateTSmaRsp, SVDropTSmaRsp;
|
||||
|
||||
#if 0
|
||||
int32_t tSerializeSVCreateTSmaReq(void** buf, SVCreateTSmaReq* pReq);
|
||||
void* tDeserializeSVCreateTSmaReq(void* buf, SVCreateTSmaReq* pReq);
|
||||
int32_t tSerializeSVDropTSmaReq(void** buf, SVDropTSmaReq* pReq);
|
||||
void* tDeserializeSVDropTSmaReq(void* buf, SVDropTSmaReq* pReq);
|
||||
#endif
|
||||
|
||||
// RSma: Rollup SMA
|
||||
typedef struct {
|
||||
int64_t interval;
|
||||
int32_t retention; // unit: day
|
||||
uint16_t days; // unit: day
|
||||
int8_t intervalUnit;
|
||||
} SSmaParams;
|
||||
int32_t tEncodeSVCreateTSmaReq(SEncoder* pCoder, const SVCreateTSmaReq* pReq);
|
||||
int32_t tDecodeSVCreateTSmaReq(SDecoder* pCoder, SVCreateTSmaReq* pReq);
|
||||
int32_t tEncodeSVDropTSmaReq(SEncoder* pCoder, const SVDropTSmaReq* pReq);
|
||||
int32_t tDecodeSVDropTSmaReq(SDecoder* pCoder, SVDropTSmaReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
STSma tsma;
|
||||
float xFilesFactor;
|
||||
SArray* smaParams; // SSmaParams
|
||||
} SRSma;
|
||||
|
||||
typedef struct {
|
||||
uint32_t number;
|
||||
STSma* tSma;
|
||||
int32_t number;
|
||||
STSma* tSma;
|
||||
} STSmaWrapper;
|
||||
|
||||
static FORCE_INLINE void tdDestroyTSma(STSma* pSma) {
|
||||
|
@ -2245,96 +2272,26 @@ static FORCE_INLINE void* tdFreeTSmaWrapper(STSmaWrapper* pSW) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static FORCE_INLINE int32_t tEncodeTSma(void** buf, const STSma* pSma) {
|
||||
int32_t tlen = 0;
|
||||
int32_t tEncodeSVCreateTSmaReq(SEncoder* pCoder, const SVCreateTSmaReq* pReq);
|
||||
int32_t tDecodeSVCreateTSmaReq(SDecoder* pCoder, SVCreateTSmaReq* pReq);
|
||||
|
||||
tlen += taosEncodeFixedI8(buf, pSma->version);
|
||||
tlen += taosEncodeFixedI8(buf, pSma->intervalUnit);
|
||||
tlen += taosEncodeFixedI8(buf, pSma->slidingUnit);
|
||||
tlen += taosEncodeFixedI8(buf, pSma->timezoneInt);
|
||||
tlen += taosEncodeString(buf, pSma->indexName);
|
||||
tlen += taosEncodeFixedI32(buf, pSma->exprLen);
|
||||
tlen += taosEncodeFixedI32(buf, pSma->tagsFilterLen);
|
||||
tlen += taosEncodeFixedI64(buf, pSma->indexUid);
|
||||
tlen += taosEncodeFixedI64(buf, pSma->tableUid);
|
||||
tlen += taosEncodeFixedI64(buf, pSma->interval);
|
||||
tlen += taosEncodeFixedI64(buf, pSma->offset);
|
||||
tlen += taosEncodeFixedI64(buf, pSma->sliding);
|
||||
int32_t tEncodeTSma(SEncoder* pCoder, const STSma* pSma);
|
||||
int32_t tDecodeTSma(SDecoder* pCoder, STSma* pSma);
|
||||
|
||||
if (pSma->exprLen > 0) {
|
||||
tlen += taosEncodeString(buf, pSma->expr);
|
||||
static int32_t tEncodeTSmaWrapper(SEncoder* pEncoder, const STSmaWrapper* pReq) {
|
||||
if (tEncodeI32(pEncoder, pReq->number) < 0) return -1;
|
||||
for (int32_t i = 0; i < pReq->number; ++i) {
|
||||
tEncodeTSma(pEncoder, pReq->tSma + i);
|
||||
}
|
||||
|
||||
if (pSma->tagsFilterLen > 0) {
|
||||
tlen += taosEncodeString(buf, pSma->tagsFilter);
|
||||
}
|
||||
|
||||
return tlen;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static FORCE_INLINE int32_t tEncodeTSmaWrapper(void** buf, const STSmaWrapper* pSW) {
|
||||
int32_t tlen = 0;
|
||||
|
||||
tlen += taosEncodeFixedU32(buf, pSW->number);
|
||||
for (uint32_t i = 0; i < pSW->number; ++i) {
|
||||
tlen += tEncodeTSma(buf, pSW->tSma + i);
|
||||
static int32_t tDecodeTSmaWrapper(SDecoder* pDecoder, STSmaWrapper* pReq) {
|
||||
if (tDecodeI32(pDecoder, &pReq->number) < 0) return -1;
|
||||
for (int32_t i = 0; i < pReq->number; ++i) {
|
||||
tDecodeTSma(pDecoder, pReq->tSma + i);
|
||||
}
|
||||
return tlen;
|
||||
}
|
||||
|
||||
static FORCE_INLINE void* tDecodeTSma(void* buf, STSma* pSma) {
|
||||
buf = taosDecodeFixedI8(buf, &pSma->version);
|
||||
buf = taosDecodeFixedI8(buf, &pSma->intervalUnit);
|
||||
buf = taosDecodeFixedI8(buf, &pSma->slidingUnit);
|
||||
buf = taosDecodeFixedI8(buf, &pSma->timezoneInt);
|
||||
buf = taosDecodeStringTo(buf, pSma->indexName);
|
||||
buf = taosDecodeFixedI32(buf, &pSma->exprLen);
|
||||
buf = taosDecodeFixedI32(buf, &pSma->tagsFilterLen);
|
||||
buf = taosDecodeFixedI64(buf, &pSma->indexUid);
|
||||
buf = taosDecodeFixedI64(buf, &pSma->tableUid);
|
||||
buf = taosDecodeFixedI64(buf, &pSma->interval);
|
||||
buf = taosDecodeFixedI64(buf, &pSma->offset);
|
||||
buf = taosDecodeFixedI64(buf, &pSma->sliding);
|
||||
|
||||
if (pSma->exprLen > 0) {
|
||||
if ((buf = taosDecodeString(buf, &pSma->expr)) == NULL) {
|
||||
tdDestroyTSma(pSma);
|
||||
return NULL;
|
||||
}
|
||||
} else {
|
||||
pSma->expr = NULL;
|
||||
}
|
||||
|
||||
if (pSma->tagsFilterLen > 0) {
|
||||
if ((buf = taosDecodeString(buf, &pSma->tagsFilter)) == NULL) {
|
||||
tdDestroyTSma(pSma);
|
||||
return NULL;
|
||||
}
|
||||
} else {
|
||||
pSma->tagsFilter = NULL;
|
||||
}
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
static FORCE_INLINE void* tDecodeTSmaWrapper(void* buf, STSmaWrapper* pSW) {
|
||||
buf = taosDecodeFixedU32(buf, &pSW->number);
|
||||
|
||||
pSW->tSma = (STSma*)taosMemoryCalloc(pSW->number, sizeof(STSma));
|
||||
if (pSW->tSma == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for (uint32_t i = 0; i < pSW->number; ++i) {
|
||||
if ((buf = tDecodeTSma(buf, pSW->tSma + i)) == NULL) {
|
||||
for (uint32_t j = i; j >= 0; --i) {
|
||||
tdDestroyTSma(pSW->tSma + j);
|
||||
}
|
||||
taosMemoryFree(pSW->tSma);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
return buf;
|
||||
return 0;
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
|
@ -2574,6 +2531,14 @@ static FORCE_INLINE void tDeleteSMqAskEpRsp(SMqAskEpRsp* pRsp) {
|
|||
taosArrayDestroyEx(pRsp->topics, (void (*)(void*))tDeleteSMqSubTopicEp);
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
void* data;
|
||||
} SStreamDispatchReq;
|
||||
|
||||
typedef struct {
|
||||
int8_t status;
|
||||
} SStreamDispatchRsp;
|
||||
|
||||
#define TD_AUTO_CREATE_TABLE 0x1
|
||||
typedef struct {
|
||||
int64_t suid;
|
||||
|
|
|
@ -37,11 +37,12 @@ typedef enum {
|
|||
QUEUE_MAX,
|
||||
} EQueueType;
|
||||
|
||||
typedef int32_t (*PutToQueueFp)(SMgmtWrapper* pWrapper, SRpcMsg* pReq);
|
||||
typedef int32_t (*GetQueueSizeFp)(SMgmtWrapper* pWrapper, int32_t vgId, EQueueType qtype);
|
||||
typedef int32_t (*PutToQueueFp)(void *pMgmt, SRpcMsg* pReq);
|
||||
typedef int32_t (*GetQueueSizeFp)(void *pMgmt, int32_t vgId, EQueueType qtype);
|
||||
typedef int32_t (*SendReqFp)(SMgmtWrapper* pWrapper, const SEpSet* epSet, SRpcMsg* pReq);
|
||||
typedef int32_t (*SendMnodeReqFp)(SMgmtWrapper* pWrapper, SRpcMsg* pReq);
|
||||
typedef void (*SendRspFp)(SMgmtWrapper* pWrapper, const SRpcMsg* pRsp);
|
||||
typedef void (*SendMnodeRecvFp)(SMgmtWrapper* pWrapper, SRpcMsg* pReq, SRpcMsg* pRsp);
|
||||
typedef void (*SendRedirectRspFp)(SMgmtWrapper* pWrapper, const SRpcMsg* pRsp, const SEpSet* pNewEpSet);
|
||||
typedef void (*RegisterBrokenLinkArgFp)(SMgmtWrapper* pWrapper, SRpcMsg* pMsg);
|
||||
typedef void (*ReleaseHandleFp)(SMgmtWrapper* pWrapper, void* handle, int8_t type);
|
||||
|
@ -49,23 +50,26 @@ typedef void (*ReportStartup)(SMgmtWrapper* pWrapper, const char* name, const ch
|
|||
|
||||
typedef struct {
|
||||
SMgmtWrapper* pWrapper;
|
||||
void* pMgmt;
|
||||
void* clientRpc;
|
||||
PutToQueueFp queueFps[QUEUE_MAX];
|
||||
GetQueueSizeFp qsizeFp;
|
||||
SendReqFp sendReqFp;
|
||||
SendRspFp sendRspFp;
|
||||
SendMnodeRecvFp sendMnodeRecvFp;
|
||||
SendRedirectRspFp sendRedirectRspFp;
|
||||
RegisterBrokenLinkArgFp registerBrokenLinkArgFp;
|
||||
ReleaseHandleFp releaseHandleFp;
|
||||
ReportStartup reportStartupFp;
|
||||
void* clientRpc;
|
||||
} SMsgCb;
|
||||
|
||||
void tmsgSetDefaultMsgCb(const SMsgCb* pMsgCb);
|
||||
int32_t tmsgPutToQueue(const SMsgCb* pMsgCb, EQueueType qtype, SRpcMsg* pReq);
|
||||
int32_t tmsgGetQueueSize(const SMsgCb* pMsgCb, int32_t vgId, EQueueType qtype);
|
||||
int32_t tmsgSendReq(const SMsgCb* pMsgCb, const SEpSet* epSet, SRpcMsg* pReq);
|
||||
void tmsgSendRsp(const SRpcMsg* pRsp);
|
||||
void tmsgSendRedirectRsp(const SRpcMsg* pRsp, const SEpSet* pNewEpSet);
|
||||
void tmsgSendRsp(SRpcMsg* pRsp);
|
||||
void tmsgSendMnodeRecv(SRpcMsg* pReq, SRpcMsg* pRsp);
|
||||
void tmsgSendRedirectRsp(SRpcMsg* pRsp, const SEpSet* pNewEpSet);
|
||||
void tmsgRegisterBrokenLinkArg(const SMsgCb* pMsgCb, SRpcMsg* pMsg);
|
||||
void tmsgReleaseHandle(void* handle, int8_t type);
|
||||
void tmsgReportStartup(const char* name, const char* desc);
|
||||
|
|
|
@ -59,10 +59,11 @@ static FORCE_INLINE int64_t taosGetTimestamp(int32_t precision) {
|
|||
* precision == TSDB_TIME_PRECISION_NANO, it returns timestamp in nanosecond.
|
||||
*/
|
||||
static FORCE_INLINE int64_t taosGetTimestampToday(int32_t precision) {
|
||||
int64_t factor = (precision == TSDB_TIME_PRECISION_MILLI) ? 1000 :
|
||||
(precision == TSDB_TIME_PRECISION_MICRO) ? 1000000 : 1000000000;
|
||||
time_t t = taosTime(NULL);
|
||||
struct tm * tm= taosLocalTime(&t, NULL);
|
||||
int64_t factor = (precision == TSDB_TIME_PRECISION_MILLI) ? 1000
|
||||
: (precision == TSDB_TIME_PRECISION_MICRO) ? 1000000
|
||||
: 1000000000;
|
||||
time_t t = taosTime(NULL);
|
||||
struct tm* tm = taosLocalTime(&t, NULL);
|
||||
tm->tm_hour = 0;
|
||||
tm->tm_min = 0;
|
||||
tm->tm_sec = 0;
|
||||
|
@ -79,13 +80,13 @@ int32_t parseNatualDuration(const char* token, int32_t tokenLen, int64_t* durati
|
|||
|
||||
int32_t taosParseTime(const char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t dayligth);
|
||||
void deltaToUtcInitOnce();
|
||||
char getPrecisionUnit(int32_t precision);
|
||||
char getPrecisionUnit(int32_t precision);
|
||||
|
||||
int64_t convertTimePrecision(int64_t time, int32_t fromPrecision, int32_t toPrecision);
|
||||
int64_t convertTimeFromPrecisionToUnit(int64_t time, int32_t fromPrecision, char toUnit);
|
||||
int32_t convertStringToTimestamp(int16_t type, char *inputData, int64_t timePrec, int64_t *timeVal);
|
||||
int32_t convertStringToTimestamp(int16_t type, char* inputData, int64_t timePrec, int64_t* timeVal);
|
||||
|
||||
void taosFormatUtcTime(char *buf, int32_t bufLen, int64_t time, int32_t precision);
|
||||
void taosFormatUtcTime(char* buf, int32_t bufLen, int64_t time, int32_t precision);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -173,6 +173,7 @@ typedef struct SqlFunctionCtx {
|
|||
SInputColumnInfoData input;
|
||||
SResultDataInfo resDataInfo;
|
||||
uint32_t order; // data block scanner order: asc|desc
|
||||
uint8_t scanFlag; // record current running step, default: 0
|
||||
////////////////////////////////////////////////////////////////
|
||||
int32_t startRow; // start row index
|
||||
int32_t size; // handled processed row number
|
||||
|
@ -183,7 +184,6 @@ typedef struct SqlFunctionCtx {
|
|||
bool hasNull; // null value exist in current block, TODO remove it
|
||||
bool requireNull; // require null in some function, TODO remove it
|
||||
int32_t columnIndex; // TODO remove it
|
||||
uint8_t currentStage; // record current running step, default: 0
|
||||
bool isAggSet;
|
||||
int64_t startTs; // timestamp range of current query when function is executed on a specific data block, TODO remove it
|
||||
bool stableQuery;
|
||||
|
@ -309,7 +309,7 @@ void qAddUdfInfo(uint64_t id, struct SUdfInfo* pUdfInfo);
|
|||
void qRemoveUdfInfo(uint64_t id, struct SUdfInfo* pUdfInfo);
|
||||
|
||||
/**
|
||||
* create udfd proxy, called once in process that call setupUdf/callUdfxxx/teardownUdf
|
||||
* create udfd proxy, called once in process that call doSetupUdf/callUdfxxx/doTeardownUdf
|
||||
* @return error code
|
||||
*/
|
||||
int32_t udfcOpen();
|
||||
|
|
|
@ -39,16 +39,6 @@ extern "C" {
|
|||
//======================================================================================
|
||||
//begin API to taosd and qworker
|
||||
|
||||
typedef void *UdfcFuncHandle;
|
||||
|
||||
/**
|
||||
* setup udf
|
||||
* @param udf, in
|
||||
* @param handle, out
|
||||
* @return error code
|
||||
*/
|
||||
int32_t setupUdf(char udfName[], UdfcFuncHandle *handle);
|
||||
|
||||
typedef struct SUdfColumnMeta {
|
||||
int16_t type;
|
||||
int32_t bytes;
|
||||
|
@ -95,32 +85,44 @@ typedef struct SUdfInterBuf {
|
|||
char* buf;
|
||||
int8_t numOfResult; //zero or one
|
||||
} SUdfInterBuf;
|
||||
typedef void *UdfcFuncHandle;
|
||||
|
||||
/**
|
||||
* setup udf
|
||||
* @param udf, in
|
||||
* @param funcHandle, out
|
||||
* @return error code
|
||||
*/
|
||||
int32_t doSetupUdf(char udfName[], UdfcFuncHandle *funcHandle);
|
||||
// output: interBuf
|
||||
int32_t callUdfAggInit(UdfcFuncHandle handle, SUdfInterBuf *interBuf);
|
||||
int32_t doCallUdfAggInit(UdfcFuncHandle handle, SUdfInterBuf *interBuf);
|
||||
// input: block, state
|
||||
// output: newState
|
||||
int32_t callUdfAggProcess(UdfcFuncHandle handle, SSDataBlock *block, SUdfInterBuf *state, SUdfInterBuf *newState);
|
||||
int32_t doCallUdfAggProcess(UdfcFuncHandle handle, SSDataBlock *block, SUdfInterBuf *state, SUdfInterBuf *newState);
|
||||
// input: interBuf
|
||||
// output: resultData
|
||||
int32_t callUdfAggFinalize(UdfcFuncHandle handle, SUdfInterBuf *interBuf, SUdfInterBuf *resultData);
|
||||
int32_t doCallUdfAggFinalize(UdfcFuncHandle handle, SUdfInterBuf *interBuf, SUdfInterBuf *resultData);
|
||||
// input: interbuf1, interbuf2
|
||||
// output: resultBuf
|
||||
int32_t callUdfAggMerge(UdfcFuncHandle handle, SUdfInterBuf *interBuf1, SUdfInterBuf *interBuf2, SUdfInterBuf *resultBuf);
|
||||
int32_t doCallUdfAggMerge(UdfcFuncHandle handle, SUdfInterBuf *interBuf1, SUdfInterBuf *interBuf2, SUdfInterBuf *resultBuf);
|
||||
// input: block
|
||||
// output: resultData
|
||||
int32_t callUdfScalarFunc(UdfcFuncHandle handle, SScalarParam *input, int32_t numOfCols, SScalarParam *output);
|
||||
int32_t doCallUdfScalarFunc(UdfcFuncHandle handle, SScalarParam *input, int32_t numOfCols, SScalarParam *output);
|
||||
/**
|
||||
* tearn down udf
|
||||
* @param handle
|
||||
* @return
|
||||
*/
|
||||
int32_t teardownUdf(UdfcFuncHandle handle);
|
||||
int32_t doTeardownUdf(UdfcFuncHandle handle);
|
||||
|
||||
bool udfAggGetEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv);
|
||||
bool udfAggInit(struct SqlFunctionCtx *pCtx, struct SResultRowEntryInfo* pResultCellInfo);
|
||||
int32_t udfAggProcess(struct SqlFunctionCtx *pCtx);
|
||||
int32_t udfAggFinalize(struct SqlFunctionCtx *pCtx, SSDataBlock* pBlock);
|
||||
|
||||
int32_t callUdfScalarFunc(char *udfName, SScalarParam *input, int32_t numOfCols, SScalarParam *output);
|
||||
|
||||
int32_t cleanUpUdfs();
|
||||
// end API to taosd and qworker
|
||||
//=============================================================================================================================
|
||||
// begin API to UDF writer.
|
||||
|
|
|
@ -345,6 +345,7 @@ bool nodesIsUnaryOp(const SOperatorNode* pOp);
|
|||
bool nodesIsArithmeticOp(const SOperatorNode* pOp);
|
||||
bool nodesIsComparisonOp(const SOperatorNode* pOp);
|
||||
bool nodesIsJsonOp(const SOperatorNode* pOp);
|
||||
bool nodesIsRegularOp(const SOperatorNode* pOp);
|
||||
|
||||
bool nodesIsTimeorderQuery(const SNode* pQuery);
|
||||
bool nodesIsTimelineQuery(const SNode* pQuery);
|
||||
|
|
|
@ -51,14 +51,12 @@ typedef struct STableComInfo {
|
|||
} STableComInfo;
|
||||
|
||||
typedef struct SIndexMeta {
|
||||
|
||||
#ifdef WINDOWS
|
||||
size_t avoidCompilationErrors;
|
||||
#endif
|
||||
|
||||
} SIndexMeta;
|
||||
|
||||
|
||||
/*
|
||||
* ASSERT(sizeof(SCTableMeta) == 24)
|
||||
* ASSERT(tableType == TSDB_CHILD_TABLE)
|
||||
|
@ -95,7 +93,7 @@ typedef struct SDBVgInfo {
|
|||
int32_t vgVersion;
|
||||
int8_t hashMethod;
|
||||
int32_t numOfTable; // DB's table num, unit is TSDB_TABLE_NUM_UNIT
|
||||
SHashObj *vgHash; //key:vgId, value:SVgroupInfo
|
||||
SHashObj* vgHash; // key:vgId, value:SVgroupInfo
|
||||
} SDBVgInfo;
|
||||
|
||||
typedef struct SUseDbOutput {
|
||||
|
@ -135,7 +133,7 @@ typedef struct SMsgSendInfo {
|
|||
} SMsgSendInfo;
|
||||
|
||||
typedef struct SQueryNodeStat {
|
||||
int32_t tableNum; // vg table number, unit is TSDB_TABLE_NUM_UNIT
|
||||
int32_t tableNum; // vg table number, unit is TSDB_TABLE_NUM_UNIT
|
||||
} SQueryNodeStat;
|
||||
|
||||
int32_t initTaskQueue();
|
||||
|
@ -172,7 +170,7 @@ const SSchema* tGetTbnameColumnSchema();
|
|||
bool tIsValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTags);
|
||||
|
||||
int32_t queryCreateTableMetaFromMsg(STableMetaRsp* msg, bool isSuperTable, STableMeta** pMeta);
|
||||
char *jobTaskStatusStr(int32_t status);
|
||||
char* jobTaskStatusStr(int32_t status);
|
||||
|
||||
SSchema createSchema(int8_t type, int32_t bytes, col_id_t colId, const char* name);
|
||||
|
||||
|
@ -184,62 +182,87 @@ extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t
|
|||
#define SET_META_TYPE_TABLE(t) (t) = META_TYPE_TABLE
|
||||
#define SET_META_TYPE_BOTH_TABLE(t) (t) = META_TYPE_BOTH_TABLE
|
||||
|
||||
#define NEED_CLIENT_RM_TBLMETA_ERROR(_code) ((_code) == TSDB_CODE_PAR_TABLE_NOT_EXIST || (_code) == TSDB_CODE_VND_TB_NOT_EXIST)
|
||||
#define NEED_CLIENT_REFRESH_VG_ERROR(_code) ((_code) == TSDB_CODE_VND_HASH_MISMATCH || (_code) == TSDB_CODE_VND_INVALID_VGROUP_ID)
|
||||
#define NEED_CLIENT_RM_TBLMETA_ERROR(_code) \
|
||||
((_code) == TSDB_CODE_PAR_TABLE_NOT_EXIST || (_code) == TSDB_CODE_VND_TB_NOT_EXIST)
|
||||
#define NEED_CLIENT_REFRESH_VG_ERROR(_code) \
|
||||
((_code) == TSDB_CODE_VND_HASH_MISMATCH || (_code) == TSDB_CODE_VND_INVALID_VGROUP_ID)
|
||||
#define NEED_CLIENT_REFRESH_TBLMETA_ERROR(_code) ((_code) == TSDB_CODE_TDB_TABLE_RECREATED)
|
||||
#define NEED_CLIENT_HANDLE_ERROR(_code) (NEED_CLIENT_RM_TBLMETA_ERROR(_code) || NEED_CLIENT_REFRESH_VG_ERROR(_code) || NEED_CLIENT_REFRESH_TBLMETA_ERROR(_code))
|
||||
#define NEED_CLIENT_HANDLE_ERROR(_code) \
|
||||
(NEED_CLIENT_RM_TBLMETA_ERROR(_code) || NEED_CLIENT_REFRESH_VG_ERROR(_code) || \
|
||||
NEED_CLIENT_REFRESH_TBLMETA_ERROR(_code))
|
||||
|
||||
#define NEED_SCHEDULER_RETRY_ERROR(_code) ((_code) == TSDB_CODE_RPC_REDIRECT || (_code) == TSDB_CODE_RPC_NETWORK_UNAVAIL)
|
||||
#define NEED_SCHEDULER_RETRY_ERROR(_code) \
|
||||
((_code) == TSDB_CODE_RPC_REDIRECT || (_code) == TSDB_CODE_RPC_NETWORK_UNAVAIL)
|
||||
|
||||
#define REQUEST_MAX_TRY_TIMES 5
|
||||
|
||||
#define qFatal(...) \
|
||||
do { \
|
||||
if (qDebugFlag & DEBUG_FATAL) { \
|
||||
taosPrintLog("QRY FATAL ", DEBUG_FATAL, qDebugFlag, __VA_ARGS__); \
|
||||
} \
|
||||
#define qFatal(...) \
|
||||
do { \
|
||||
if (qDebugFlag & DEBUG_FATAL) { \
|
||||
taosPrintLog("QRY FATAL ", DEBUG_FATAL, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
|
||||
} \
|
||||
} while (0)
|
||||
#define qError(...) \
|
||||
do { \
|
||||
if (qDebugFlag & DEBUG_ERROR) { \
|
||||
taosPrintLog("QRY ERROR ", DEBUG_ERROR, qDebugFlag, __VA_ARGS__); \
|
||||
} \
|
||||
#define qError(...) \
|
||||
do { \
|
||||
if (qDebugFlag & DEBUG_ERROR) { \
|
||||
taosPrintLog("QRY ERROR ", DEBUG_ERROR, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
|
||||
} \
|
||||
} while (0)
|
||||
#define qWarn(...) \
|
||||
do { \
|
||||
if (qDebugFlag & DEBUG_WARN) { \
|
||||
taosPrintLog("QRY WARN ", DEBUG_WARN, qDebugFlag, __VA_ARGS__); \
|
||||
} \
|
||||
#define qWarn(...) \
|
||||
do { \
|
||||
if (qDebugFlag & DEBUG_WARN) { \
|
||||
taosPrintLog("QRY WARN ", DEBUG_WARN, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
|
||||
} \
|
||||
} while (0)
|
||||
#define qInfo(...) \
|
||||
do { \
|
||||
if (qDebugFlag & DEBUG_INFO) { \
|
||||
taosPrintLog("QRY ", DEBUG_INFO, qDebugFlag, __VA_ARGS__); \
|
||||
} \
|
||||
#define qInfo(...) \
|
||||
do { \
|
||||
if (qDebugFlag & DEBUG_INFO) { \
|
||||
taosPrintLog("QRY ", DEBUG_INFO, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
|
||||
} \
|
||||
} while (0)
|
||||
#define qDebug(...) \
|
||||
do { \
|
||||
if (qDebugFlag & DEBUG_DEBUG) { \
|
||||
taosPrintLog("QRY ", DEBUG_DEBUG, qDebugFlag, __VA_ARGS__); \
|
||||
} \
|
||||
#define qDebug(...) \
|
||||
do { \
|
||||
if (qDebugFlag & DEBUG_DEBUG) { \
|
||||
taosPrintLog("QRY ", DEBUG_DEBUG, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
|
||||
} \
|
||||
} while (0)
|
||||
#define qTrace(...) \
|
||||
do { \
|
||||
if (qDebugFlag & DEBUG_TRACE) { \
|
||||
taosPrintLog("QRY ", DEBUG_TRACE, qDebugFlag, __VA_ARGS__); \
|
||||
} \
|
||||
#define qTrace(...) \
|
||||
do { \
|
||||
if (qDebugFlag & DEBUG_TRACE) { \
|
||||
taosPrintLog("QRY ", DEBUG_TRACE, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
|
||||
} \
|
||||
} while (0)
|
||||
#define qDebugL(...) \
|
||||
do { \
|
||||
if (qDebugFlag & DEBUG_DEBUG) { \
|
||||
taosPrintLongString("QRY ", DEBUG_DEBUG, qDebugFlag, __VA_ARGS__); \
|
||||
} \
|
||||
#define qDebugL(...) \
|
||||
do { \
|
||||
if (qDebugFlag & DEBUG_DEBUG) { \
|
||||
taosPrintLongString("QRY ", DEBUG_DEBUG, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define QRY_ERR_RET(c) do { int32_t _code = c; if (_code != TSDB_CODE_SUCCESS) { terrno = _code; return _code; } } while (0)
|
||||
#define QRY_RET(c) do { int32_t _code = c; if (_code != TSDB_CODE_SUCCESS) { terrno = _code; } return _code; } while (0)
|
||||
#define QRY_ERR_JRET(c) do { code = c; if (code != TSDB_CODE_SUCCESS) { terrno = code; goto _return; } } while (0)
|
||||
|
||||
#define QRY_ERR_RET(c) \
|
||||
do { \
|
||||
int32_t _code = c; \
|
||||
if (_code != TSDB_CODE_SUCCESS) { \
|
||||
terrno = _code; \
|
||||
return _code; \
|
||||
} \
|
||||
} while (0)
|
||||
#define QRY_RET(c) \
|
||||
do { \
|
||||
int32_t _code = c; \
|
||||
if (_code != TSDB_CODE_SUCCESS) { \
|
||||
terrno = _code; \
|
||||
} \
|
||||
return _code; \
|
||||
} while (0)
|
||||
#define QRY_ERR_JRET(c) \
|
||||
do { \
|
||||
code = c; \
|
||||
if (code != TSDB_CODE_SUCCESS) { \
|
||||
terrno = code; \
|
||||
goto _return; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "os.h"
|
||||
#include "tdatablock.h"
|
||||
#include "tmsg.h"
|
||||
#include "tmsgcb.h"
|
||||
|
@ -29,8 +30,23 @@ extern "C" {
|
|||
typedef struct SStreamTask SStreamTask;
|
||||
|
||||
enum {
|
||||
STREAM_TASK_STATUS__RUNNING = 1,
|
||||
STREAM_TASK_STATUS__STOP,
|
||||
TASK_STATUS__IDLE = 1,
|
||||
TASK_STATUS__EXECUTING,
|
||||
TASK_STATUS__CLOSING,
|
||||
};
|
||||
|
||||
enum {
|
||||
TASK_INPUT_STATUS__NORMAL = 1,
|
||||
TASK_INPUT_STATUS__BLOCKED,
|
||||
TASK_INPUT_STATUS__RECOVER,
|
||||
TASK_INPUT_STATUS__STOP,
|
||||
TASK_INPUT_STATUS__FAILED,
|
||||
};
|
||||
|
||||
enum {
|
||||
TASK_OUTPUT_STATUS__NORMAL = 1,
|
||||
TASK_OUTPUT_STATUS__WAIT,
|
||||
TASK_OUTPUT_STATUS__BLOCKED,
|
||||
};
|
||||
|
||||
enum {
|
||||
|
@ -38,10 +54,64 @@ enum {
|
|||
STREAM_CREATED_BY__SMA,
|
||||
};
|
||||
|
||||
enum {
|
||||
STREAM_INPUT__DATA_SUBMIT = 1,
|
||||
STREAM_INPUT__DATA_BLOCK,
|
||||
STREAM_INPUT__CHECKPOINT,
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
int32_t nodeId; // 0 for snode
|
||||
SEpSet epSet;
|
||||
} SStreamTaskEp;
|
||||
int8_t type;
|
||||
|
||||
int32_t sourceVg;
|
||||
int64_t sourceVer;
|
||||
|
||||
int32_t* dataRef;
|
||||
SSubmitReq* data;
|
||||
} SStreamDataSubmit;
|
||||
|
||||
typedef struct {
|
||||
int8_t type;
|
||||
|
||||
int32_t sourceVg;
|
||||
int64_t sourceVer;
|
||||
|
||||
SArray* blocks; // SArray<SSDataBlock*>
|
||||
} SStreamDataBlock;
|
||||
|
||||
typedef struct {
|
||||
int8_t type;
|
||||
} SStreamCheckpoint;
|
||||
|
||||
static FORCE_INLINE SStreamDataSubmit* streamDataSubmitNew(SSubmitReq* pReq) {
|
||||
SStreamDataSubmit* pDataSubmit = (SStreamDataSubmit*)taosMemoryCalloc(1, sizeof(SStreamDataSubmit));
|
||||
if (pDataSubmit == NULL) return NULL;
|
||||
pDataSubmit->data = pReq;
|
||||
pDataSubmit->dataRef = (int32_t*)taosMemoryMalloc(sizeof(int32_t));
|
||||
if (pDataSubmit->data == NULL) goto FAIL;
|
||||
*pDataSubmit->dataRef = 1;
|
||||
return pDataSubmit;
|
||||
FAIL:
|
||||
taosMemoryFree(pDataSubmit);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static FORCE_INLINE void streamDataSubmitRefInc(SStreamDataSubmit* pDataSubmit) {
|
||||
//
|
||||
atomic_add_fetch_32(pDataSubmit->dataRef, 1);
|
||||
}
|
||||
|
||||
static FORCE_INLINE void streamDataSubmitRefDec(SStreamDataSubmit* pDataSubmit) {
|
||||
int32_t ref = atomic_sub_fetch_32(pDataSubmit->dataRef, 1);
|
||||
ASSERT(ref >= 0);
|
||||
if (ref == 0) {
|
||||
taosMemoryFree(pDataSubmit->data);
|
||||
taosMemoryFree(pDataSubmit->dataRef);
|
||||
}
|
||||
}
|
||||
|
||||
int32_t streamDataBlockEncode(void** buf, const SStreamDataBlock* pOutput);
|
||||
void* streamDataBlockDecode(const void* buf, SStreamDataBlock* pInput);
|
||||
|
||||
typedef struct {
|
||||
void* inputHandle;
|
||||
|
@ -122,9 +192,15 @@ enum {
|
|||
TASK_SINK__FETCH,
|
||||
};
|
||||
|
||||
enum {
|
||||
TASK_INPUT_TYPE__SUMBIT_BLOCK = 1,
|
||||
TASK_INPUT_TYPE__DATA_BLOCK,
|
||||
};
|
||||
|
||||
struct SStreamTask {
|
||||
int64_t streamId;
|
||||
int32_t taskId;
|
||||
int8_t inputType;
|
||||
int8_t status;
|
||||
|
||||
int8_t sourceType;
|
||||
|
@ -155,9 +231,13 @@ struct SStreamTask {
|
|||
STaskDispatcherShuffle shuffleDispatcher;
|
||||
};
|
||||
|
||||
// msg buffer
|
||||
int32_t memUsed;
|
||||
int8_t inputStatus;
|
||||
int8_t outputStatus;
|
||||
|
||||
STaosQueue* inputQ;
|
||||
STaosQall* inputQAll;
|
||||
STaosQueue* outputQ;
|
||||
STaosQall* outputQAll;
|
||||
|
||||
// application storage
|
||||
void* ahandle;
|
||||
|
@ -199,10 +279,16 @@ typedef struct {
|
|||
SArray* res; // SArray<SSDataBlock>
|
||||
} SStreamSinkReq;
|
||||
|
||||
int32_t streamEnqueueData(SStreamTask* pTask, const void* input, int32_t inputType);
|
||||
int32_t streamEnqueueDataSubmit(SStreamTask* pTask, SStreamDataSubmit* input);
|
||||
int32_t streamEnqueueDataBlk(SStreamTask* pTask, SStreamDataBlock* input);
|
||||
int32_t streamDequeueOutput(SStreamTask* pTask, void** output);
|
||||
|
||||
int32_t streamExecTask(SStreamTask* pTask, SMsgCb* pMsgCb, const void* input, int32_t inputType, int32_t workId);
|
||||
|
||||
int32_t streamTaskRun(SStreamTask* pTask);
|
||||
|
||||
int32_t streamTaskHandleInput(SStreamTask* pTask, void* data);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -144,6 +144,7 @@ int32_t syncInit();
|
|||
void syncCleanUp();
|
||||
int64_t syncOpen(const SSyncInfo* pSyncInfo);
|
||||
void syncStart(int64_t rid);
|
||||
void syncStartStandBy(int64_t rid);
|
||||
void syncStop(int64_t rid);
|
||||
int32_t syncReconfig(int64_t rid, const SSyncCfg* pSyncCfg);
|
||||
ESyncState syncGetMyRole(int64_t rid);
|
||||
|
|
|
@ -23,92 +23,92 @@ extern "C" {
|
|||
// If the error is in a third-party library, place this header file under the third-party library header file.
|
||||
// When you want to use this feature, you should find or add the same function in the following section.
|
||||
#ifndef ALLOW_FORBID_FUNC
|
||||
#define __atomic_load_n __ATOMIC_LOAD_N_FUNC_TAOS_FORBID
|
||||
#define __atomic_store_n __ATOMIC_STORE_N_FUNC_TAOS_FORBID
|
||||
#define __atomic_exchange_n __ATOMIC_EXCHANGE_N_FUNC_TAOS_FORBID
|
||||
#define __sync_val_compare_and_swap __SYNC_VAL_COMPARE_AND_SWAP_FUNC_TAOS_FORBID
|
||||
#define __atomic_add_fetch __ATOMIC_ADD_FETCH_FUNC_TAOS_FORBID
|
||||
#define __atomic_fetch_add __ATOMIC_FETCH_ADD_FUNC_TAOS_FORBID
|
||||
#define __atomic_sub_fetch __ATOMIC_SUB_FETCH_FUNC_TAOS_FORBID
|
||||
#define __atomic_fetch_sub __ATOMIC_FETCH_SUB_FUNC_TAOS_FORBID
|
||||
#define __atomic_and_fetch __ATOMIC_AND_FETCH_FUNC_TAOS_FORBID
|
||||
#define __atomic_fetch_and __ATOMIC_FETCH_AND_FUNC_TAOS_FORBID
|
||||
#define __atomic_or_fetch __ATOMIC_OR_FETCH_FUNC_TAOS_FORBID
|
||||
#define __atomic_fetch_or __ATOMIC_FETCH_OR_FUNC_TAOS_FORBID
|
||||
#define __atomic_xor_fetch __ATOMIC_XOR_FETCH_FUNC_TAOS_FORBID
|
||||
#define __atomic_fetch_xor __ATOMIC_FETCH_XOR_FUNC_TAOS_FORBID
|
||||
#define __atomic_load_n __ATOMIC_LOAD_N_FUNC_TAOS_FORBID
|
||||
#define __atomic_store_n __ATOMIC_STORE_N_FUNC_TAOS_FORBID
|
||||
#define __atomic_exchange_n __ATOMIC_EXCHANGE_N_FUNC_TAOS_FORBID
|
||||
#define __sync_val_compare_and_swap __SYNC_VAL_COMPARE_AND_SWAP_FUNC_TAOS_FORBID
|
||||
#define __atomic_add_fetch __ATOMIC_ADD_FETCH_FUNC_TAOS_FORBID
|
||||
#define __atomic_fetch_add __ATOMIC_FETCH_ADD_FUNC_TAOS_FORBID
|
||||
#define __atomic_sub_fetch __ATOMIC_SUB_FETCH_FUNC_TAOS_FORBID
|
||||
#define __atomic_fetch_sub __ATOMIC_FETCH_SUB_FUNC_TAOS_FORBID
|
||||
#define __atomic_and_fetch __ATOMIC_AND_FETCH_FUNC_TAOS_FORBID
|
||||
#define __atomic_fetch_and __ATOMIC_FETCH_AND_FUNC_TAOS_FORBID
|
||||
#define __atomic_or_fetch __ATOMIC_OR_FETCH_FUNC_TAOS_FORBID
|
||||
#define __atomic_fetch_or __ATOMIC_FETCH_OR_FUNC_TAOS_FORBID
|
||||
#define __atomic_xor_fetch __ATOMIC_XOR_FETCH_FUNC_TAOS_FORBID
|
||||
#define __atomic_fetch_xor __ATOMIC_FETCH_XOR_FUNC_TAOS_FORBID
|
||||
#endif
|
||||
|
||||
int8_t atomic_load_8(int8_t volatile *ptr);
|
||||
int8_t atomic_load_8(int8_t volatile *ptr);
|
||||
int16_t atomic_load_16(int16_t volatile *ptr);
|
||||
int32_t atomic_load_32(int32_t volatile *ptr);
|
||||
int64_t atomic_load_64(int64_t volatile *ptr);
|
||||
void* atomic_load_ptr(void *ptr);
|
||||
void atomic_store_8(int8_t volatile *ptr, int8_t val);
|
||||
void atomic_store_16(int16_t volatile *ptr, int16_t val);
|
||||
void atomic_store_32(int32_t volatile *ptr, int32_t val);
|
||||
void atomic_store_64(int64_t volatile *ptr, int64_t val);
|
||||
void atomic_store_ptr(void *ptr, void *val);
|
||||
int8_t atomic_exchange_8(int8_t volatile *ptr, int8_t val);
|
||||
void *atomic_load_ptr(void *ptr);
|
||||
void atomic_store_8(int8_t volatile *ptr, int8_t val);
|
||||
void atomic_store_16(int16_t volatile *ptr, int16_t val);
|
||||
void atomic_store_32(int32_t volatile *ptr, int32_t val);
|
||||
void atomic_store_64(int64_t volatile *ptr, int64_t val);
|
||||
void atomic_store_ptr(void *ptr, void *val);
|
||||
int8_t atomic_exchange_8(int8_t volatile *ptr, int8_t val);
|
||||
int16_t atomic_exchange_16(int16_t volatile *ptr, int16_t val);
|
||||
int32_t atomic_exchange_32(int32_t volatile *ptr, int32_t val);
|
||||
int64_t atomic_exchange_64(int64_t volatile *ptr, int64_t val);
|
||||
void* atomic_exchange_ptr(void *ptr, void *val);
|
||||
int8_t atomic_val_compare_exchange_8(int8_t volatile *ptr, int8_t oldval, int8_t newval);
|
||||
void *atomic_exchange_ptr(void *ptr, void *val);
|
||||
int8_t atomic_val_compare_exchange_8(int8_t volatile *ptr, int8_t oldval, int8_t newval);
|
||||
int16_t atomic_val_compare_exchange_16(int16_t volatile *ptr, int16_t oldval, int16_t newval);
|
||||
int32_t atomic_val_compare_exchange_32(int32_t volatile *ptr, int32_t oldval, int32_t newval);
|
||||
int64_t atomic_val_compare_exchange_64(int64_t volatile *ptr, int64_t oldval, int64_t newval);
|
||||
void* atomic_val_compare_exchange_ptr(void *ptr, void *oldval, void *newval);
|
||||
int8_t atomic_add_fetch_8(int8_t volatile *ptr, int8_t val);
|
||||
void *atomic_val_compare_exchange_ptr(void *ptr, void *oldval, void *newval);
|
||||
int8_t atomic_add_fetch_8(int8_t volatile *ptr, int8_t val);
|
||||
int16_t atomic_add_fetch_16(int16_t volatile *ptr, int16_t val);
|
||||
int32_t atomic_add_fetch_32(int32_t volatile *ptr, int32_t val);
|
||||
int64_t atomic_add_fetch_64(int64_t volatile *ptr, int64_t val);
|
||||
void* atomic_add_fetch_ptr(void *ptr, void *val);
|
||||
int8_t atomic_fetch_add_8(int8_t volatile *ptr, int8_t val);
|
||||
void *atomic_add_fetch_ptr(void *ptr, void *val);
|
||||
int8_t atomic_fetch_add_8(int8_t volatile *ptr, int8_t val);
|
||||
int16_t atomic_fetch_add_16(int16_t volatile *ptr, int16_t val);
|
||||
int32_t atomic_fetch_add_32(int32_t volatile *ptr, int32_t val);
|
||||
int64_t atomic_fetch_add_64(int64_t volatile *ptr, int64_t val);
|
||||
void* atomic_fetch_add_ptr(void *ptr, void *val);
|
||||
int8_t atomic_sub_fetch_8(int8_t volatile *ptr, int8_t val);
|
||||
void *atomic_fetch_add_ptr(void *ptr, void *val);
|
||||
int8_t atomic_sub_fetch_8(int8_t volatile *ptr, int8_t val);
|
||||
int16_t atomic_sub_fetch_16(int16_t volatile *ptr, int16_t val);
|
||||
int32_t atomic_sub_fetch_32(int32_t volatile *ptr, int32_t val);
|
||||
int64_t atomic_sub_fetch_64(int64_t volatile *ptr, int64_t val);
|
||||
void* atomic_sub_fetch_ptr(void *ptr, void *val);
|
||||
int8_t atomic_fetch_sub_8(int8_t volatile *ptr, int8_t val);
|
||||
void *atomic_sub_fetch_ptr(void *ptr, void *val);
|
||||
int8_t atomic_fetch_sub_8(int8_t volatile *ptr, int8_t val);
|
||||
int16_t atomic_fetch_sub_16(int16_t volatile *ptr, int16_t val);
|
||||
int32_t atomic_fetch_sub_32(int32_t volatile *ptr, int32_t val);
|
||||
int64_t atomic_fetch_sub_64(int64_t volatile *ptr, int64_t val);
|
||||
void* atomic_fetch_sub_ptr(void *ptr, void *val);
|
||||
int8_t atomic_and_fetch_8(int8_t volatile *ptr, int8_t val);
|
||||
void *atomic_fetch_sub_ptr(void *ptr, void *val);
|
||||
int8_t atomic_and_fetch_8(int8_t volatile *ptr, int8_t val);
|
||||
int16_t atomic_and_fetch_16(int16_t volatile *ptr, int16_t val);
|
||||
int32_t atomic_and_fetch_32(int32_t volatile *ptr, int32_t val);
|
||||
int64_t atomic_and_fetch_64(int64_t volatile *ptr, int64_t val);
|
||||
void* atomic_and_fetch_ptr(void *ptr, void *val);
|
||||
int8_t atomic_fetch_and_8(int8_t volatile *ptr, int8_t val);
|
||||
void *atomic_and_fetch_ptr(void *ptr, void *val);
|
||||
int8_t atomic_fetch_and_8(int8_t volatile *ptr, int8_t val);
|
||||
int16_t atomic_fetch_and_16(int16_t volatile *ptr, int16_t val);
|
||||
int32_t atomic_fetch_and_32(int32_t volatile *ptr, int32_t val);
|
||||
int64_t atomic_fetch_and_64(int64_t volatile *ptr, int64_t val);
|
||||
void* atomic_fetch_and_ptr(void *ptr, void *val);
|
||||
int8_t atomic_or_fetch_8(int8_t volatile *ptr, int8_t val);
|
||||
void *atomic_fetch_and_ptr(void *ptr, void *val);
|
||||
int8_t atomic_or_fetch_8(int8_t volatile *ptr, int8_t val);
|
||||
int16_t atomic_or_fetch_16(int16_t volatile *ptr, int16_t val);
|
||||
int32_t atomic_or_fetch_32(int32_t volatile *ptr, int32_t val);
|
||||
int64_t atomic_or_fetch_64(int64_t volatile *ptr, int64_t val);
|
||||
void* atomic_or_fetch_ptr(void *ptr, void *val);
|
||||
int8_t atomic_fetch_or_8(int8_t volatile *ptr, int8_t val);
|
||||
void *atomic_or_fetch_ptr(void *ptr, void *val);
|
||||
int8_t atomic_fetch_or_8(int8_t volatile *ptr, int8_t val);
|
||||
int16_t atomic_fetch_or_16(int16_t volatile *ptr, int16_t val);
|
||||
int32_t atomic_fetch_or_32(int32_t volatile *ptr, int32_t val);
|
||||
int64_t atomic_fetch_or_64(int64_t volatile *ptr, int64_t val);
|
||||
void* atomic_fetch_or_ptr(void *ptr, void *val);
|
||||
int8_t atomic_xor_fetch_8(int8_t volatile *ptr, int8_t val);
|
||||
void *atomic_fetch_or_ptr(void *ptr, void *val);
|
||||
int8_t atomic_xor_fetch_8(int8_t volatile *ptr, int8_t val);
|
||||
int16_t atomic_xor_fetch_16(int16_t volatile *ptr, int16_t val);
|
||||
int32_t atomic_xor_fetch_32(int32_t volatile *ptr, int32_t val);
|
||||
int64_t atomic_xor_fetch_64(int64_t volatile *ptr, int64_t val);
|
||||
void* atomic_xor_fetch_ptr(void *ptr, void *val);
|
||||
int8_t atomic_fetch_xor_8(int8_t volatile *ptr, int8_t val);
|
||||
void *atomic_xor_fetch_ptr(void *ptr, void *val);
|
||||
int8_t atomic_fetch_xor_8(int8_t volatile *ptr, int8_t val);
|
||||
int16_t atomic_fetch_xor_16(int16_t volatile *ptr, int16_t val);
|
||||
int32_t atomic_fetch_xor_32(int32_t volatile *ptr, int32_t val);
|
||||
int64_t atomic_fetch_xor_64(int64_t volatile *ptr, int64_t val);
|
||||
void* atomic_fetch_xor_ptr(void *ptr, void *val);
|
||||
void *atomic_fetch_xor_ptr(void *ptr, void *val);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -37,6 +37,7 @@ typedef int32_t TdUcs4;
|
|||
#define wcstombs WCSTOMBS_FUNC_TAOS_FORBID
|
||||
#define wcsncpy WCSNCPY_FUNC_TAOS_FORBID
|
||||
#define wchar_t WCHAR_T_TYPE_TAOS_FORBID
|
||||
#define strcasestr STR_CASE_STR_FORBID
|
||||
#endif
|
||||
|
||||
#ifdef WINDOWS
|
||||
|
@ -69,6 +70,8 @@ int32_t taosMbsToWchars(TdWchar *pWchars, const char *pStrs, int32_t size);
|
|||
int32_t taosWcharToMb(char *pStr, TdWchar wchar);
|
||||
int32_t taosWcharsToMbs(char *pStrs, TdWchar *pWchars, int32_t size);
|
||||
|
||||
char *taosStrCaseStr(const char *str, const char *pattern);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -89,6 +89,7 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_REPEAT_INIT TAOS_DEF_ERROR_CODE(0, 0x0115)
|
||||
#define TSDB_CODE_DUP_KEY TAOS_DEF_ERROR_CODE(0, 0x0116)
|
||||
#define TSDB_CODE_NEED_RETRY TAOS_DEF_ERROR_CODE(0, 0x0117)
|
||||
#define TSDB_CODE_OUT_OF_RPC_MEMORY_QUEUE TAOS_DEF_ERROR_CODE(0, 0x0118)
|
||||
|
||||
#define TSDB_CODE_REF_NO_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0140)
|
||||
#define TSDB_CODE_REF_FULL TAOS_DEF_ERROR_CODE(0, 0x0141)
|
||||
|
@ -322,6 +323,9 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_VND_SMA_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0516)
|
||||
#define TSDB_CODE_VND_HASH_MISMATCH TAOS_DEF_ERROR_CODE(0, 0x0517)
|
||||
#define TSDB_CODE_VND_TABLE_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0518)
|
||||
#define TSDB_CODE_VND_INVALID_TABLE_ACTION TAOS_DEF_ERROR_CODE(0, 0x0519)
|
||||
#define TSDB_CODE_VND_COL_ALREADY_EXISTS TAOS_DEF_ERROR_CODE(0, 0x051a)
|
||||
#define TSDB_CODE_VND_TABLE_COL_NOT_EXISTS TAOS_DEF_ERROR_CODE(0, 0x051b)
|
||||
|
||||
// tsdb
|
||||
#define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600)
|
||||
|
@ -353,7 +357,8 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_TDB_TABLE_RECREATED TAOS_DEF_ERROR_CODE(0, 0x061A)
|
||||
#define TSDB_CODE_TDB_TDB_ENV_OPEN_ERROR TAOS_DEF_ERROR_CODE(0, 0x061B)
|
||||
#define TSDB_CODE_TDB_NO_SMA_INDEX_IN_META TAOS_DEF_ERROR_CODE(0, 0x061C)
|
||||
#define TSDB_CODE_TDB_INVALID_SMA_STAT TAOS_DEF_ERROR_CODE(0, 0x062D)
|
||||
#define TSDB_CODE_TDB_INVALID_SMA_STAT TAOS_DEF_ERROR_CODE(0, 0x061D)
|
||||
#define TSDB_CODE_TDB_TSMA_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x061E)
|
||||
|
||||
// query
|
||||
#define TSDB_CODE_QRY_INVALID_QHANDLE TAOS_DEF_ERROR_CODE(0, 0x0700)
|
||||
|
@ -635,6 +640,9 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_PAR_PERMISSION_DENIED TAOS_DEF_ERROR_CODE(0, 0x2644)
|
||||
#define TSDB_CODE_PAR_INVALID_STREAM_QUERY TAOS_DEF_ERROR_CODE(0, 0x2645)
|
||||
#define TSDB_CODE_PAR_INVALID_INTERNAL_PK TAOS_DEF_ERROR_CODE(0, 0x2646)
|
||||
#define TSDB_CODE_PAR_INVALID_TIMELINE_FUNC TAOS_DEF_ERROR_CODE(0, 0x2647)
|
||||
#define TSDB_CODE_PAR_INVALID_PASSWD TAOS_DEF_ERROR_CODE(0, 0x2648)
|
||||
#define TSDB_CODE_PAR_INVALID_ALTER_TABLE TAOS_DEF_ERROR_CODE(0, 0x2649)
|
||||
|
||||
//planner
|
||||
#define TSDB_CODE_PLAN_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x2700)
|
||||
|
@ -656,6 +664,7 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_UDF_LOAD_UDF_FAILURE TAOS_DEF_ERROR_CODE(0, 0x2905)
|
||||
#define TSDB_CODE_UDF_INVALID_STATE TAOS_DEF_ERROR_CODE(0, 0x2906)
|
||||
#define TSDB_CODE_UDF_INVALID_INPUT TAOS_DEF_ERROR_CODE(0, 0x2907)
|
||||
#define TSDB_CODE_UDF_NO_FUNC_HANDLE TAOS_DEF_ERROR_CODE(0, 0x2908)
|
||||
|
||||
#define TSDB_CODE_SML_INVALID_PROTOCOL_TYPE TAOS_DEF_ERROR_CODE(0, 0x3000)
|
||||
#define TSDB_CODE_SML_INVALID_PRECISION_TYPE TAOS_DEF_ERROR_CODE(0, 0x3001)
|
||||
|
|
|
@ -425,9 +425,12 @@ enum {
|
|||
SND_WORKER_TYPE__UNIQUE,
|
||||
};
|
||||
|
||||
#define MNODE_HANDLE -1
|
||||
#define QNODE_HANDLE 1
|
||||
#define DEFAULT_HANDLE 0
|
||||
#define MNODE_HANDLE -1
|
||||
#define QNODE_HANDLE -2
|
||||
#define SNODE_HANDLE -3
|
||||
#define VNODE_HANDLE -4
|
||||
#define BNODE_HANDLE -5
|
||||
|
||||
#define TSDB_CONFIG_OPTION_LEN 16
|
||||
#define TSDB_CONIIG_VALUE_LEN 48
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
|
||||
#include "tcoding.h"
|
||||
#include "tlist.h"
|
||||
// #include "tfreelist.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
|
|
@ -1,60 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef _TD_UTIL_FREELIST_H_
|
||||
#define _TD_UTIL_FREELIST_H_
|
||||
|
||||
#include "tlist.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
struct SFreeListNode {
|
||||
TD_SLIST_NODE(SFreeListNode);
|
||||
char payload[];
|
||||
};
|
||||
|
||||
typedef TD_SLIST(SFreeListNode) SFreeList;
|
||||
|
||||
#define TFL_MALLOC(PTR, TYPE, SIZE, LIST) \
|
||||
do { \
|
||||
void *ptr = taosMemoryMalloc((SIZE) + sizeof(struct SFreeListNode)); \
|
||||
if (ptr) { \
|
||||
TD_SLIST_PUSH((LIST), (struct SFreeListNode *)ptr); \
|
||||
ptr = ((struct SFreeListNode *)ptr)->payload; \
|
||||
(PTR) = (TYPE)(ptr); \
|
||||
}else{ \
|
||||
(PTR) = NULL; \
|
||||
} \
|
||||
}while(0);
|
||||
|
||||
#define tFreeListInit(pFL) TD_SLIST_INIT(pFL)
|
||||
|
||||
static FORCE_INLINE void tFreeListClear(SFreeList *pFL) {
|
||||
struct SFreeListNode *pNode;
|
||||
for (;;) {
|
||||
pNode = TD_SLIST_HEAD(pFL);
|
||||
if (pNode == NULL) break;
|
||||
TD_SLIST_POP(pFL);
|
||||
taosMemoryFree(pNode);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /*_TD_UTIL_FREELIST_H_*/
|
|
@ -22,17 +22,12 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifdef WINDOWS
|
||||
#define tjsonGetNumberValue(pJson, pName, val) -1
|
||||
#else
|
||||
#define tjsonGetNumberValue(pJson, pName, val) \
|
||||
({ \
|
||||
uint64_t _tmp = 0; \
|
||||
int32_t _code = tjsonGetUBigIntValue(pJson, pName, &_tmp); \
|
||||
val = _tmp; \
|
||||
_code; \
|
||||
})
|
||||
#endif
|
||||
#define tjsonGetNumberValue(pJson, pName, val, code) \
|
||||
do { \
|
||||
uint64_t _tmp = 0; \
|
||||
code = tjsonGetUBigIntValue(pJson, pName, &_tmp); \
|
||||
val = _tmp; \
|
||||
} while (0)
|
||||
|
||||
typedef void SJson;
|
||||
|
||||
|
|
|
@ -61,6 +61,7 @@ extern int32_t tqDebugFlag;
|
|||
extern int32_t fsDebugFlag;
|
||||
extern int32_t metaDebugFlag;
|
||||
extern int32_t fnDebugFlag;
|
||||
extern int32_t smaDebugFlag;
|
||||
|
||||
int32_t taosInitLog(const char *logName, int32_t maxFiles);
|
||||
void taosCloseLog();
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#define _TD_UTIL_PROCESS_H_
|
||||
|
||||
#include "os.h"
|
||||
#include "tqueue.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
@ -25,7 +26,7 @@ extern "C" {
|
|||
typedef enum { PROC_FUNC_REQ = 1, PROC_FUNC_RSP, PROC_FUNC_REGIST, PROC_FUNC_RELEASE } EProcFuncType;
|
||||
|
||||
typedef struct SProcObj SProcObj;
|
||||
typedef void *(*ProcMallocFp)(int32_t contLen);
|
||||
typedef void *(*ProcMallocFp)(int32_t contLen, EQItype itype);
|
||||
typedef void *(*ProcFreeFp)(void *pCont);
|
||||
typedef void (*ProcConsumeFp)(void *parent, void *pHead, int16_t headLen, void *pBody, int32_t bodyLen,
|
||||
EProcFuncType ftype);
|
||||
|
|
|
@ -48,18 +48,24 @@ typedef struct {
|
|||
int32_t threadNum;
|
||||
} SQueueInfo;
|
||||
|
||||
typedef enum {
|
||||
DEF_QITEM = 0,
|
||||
RPC_QITEM = 1,
|
||||
} EQItype;
|
||||
|
||||
typedef void (*FItem)(SQueueInfo *pInfo, void *pItem);
|
||||
typedef void (*FItems)(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfItems);
|
||||
|
||||
STaosQueue *taosOpenQueue();
|
||||
void taosCloseQueue(STaosQueue *queue);
|
||||
void taosSetQueueFp(STaosQueue *queue, FItem itemFp, FItems itemsFp);
|
||||
void *taosAllocateQitem(int32_t size);
|
||||
void *taosAllocateQitem(int32_t size, EQItype itype);
|
||||
void taosFreeQitem(void *pItem);
|
||||
void taosWriteQitem(STaosQueue *queue, void *pItem);
|
||||
int32_t taosReadQitem(STaosQueue *queue, void **ppItem);
|
||||
bool taosQueueEmpty(STaosQueue *queue);
|
||||
int32_t taosQueueSize(STaosQueue *queue);
|
||||
int32_t taosQueueItemSize(STaosQueue *queue);
|
||||
int64_t taosQueueMemorySize(STaosQueue *queue);
|
||||
|
||||
STaosQall *taosAllocateQall();
|
||||
void taosFreeQall(STaosQall *qall);
|
||||
|
@ -77,8 +83,8 @@ int32_t taosGetQueueNumber(STaosQset *qset);
|
|||
int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, void **ahandle, FItem *itemFp);
|
||||
int32_t taosReadAllQitemsFromQset(STaosQset *qset, STaosQall *qall, void **ahandle, FItems *itemsFp);
|
||||
void taosResetQsetThread(STaosQset *qset, void *pItem);
|
||||
int32_t taosGetQueueItemsNumber(STaosQueue *queue);
|
||||
int32_t taosGetQsetItemsNumber(STaosQset *qset);
|
||||
|
||||
extern int64_t tsRpcQueueMemoryAllowed;
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
[Unit]
|
||||
Description=Nginx For TDengine Service
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=forking
|
||||
PIDFile=/usr/local/nginxd/logs/nginx.pid
|
||||
ExecStart=/usr/local/nginxd/sbin/nginx
|
||||
ExecStop=/usr/local/nginxd/sbin/nginx -s stop
|
||||
TimeoutStopSec=1000000s
|
||||
LimitNOFILE=infinity
|
||||
LimitNPROC=infinity
|
||||
LimitCORE=infinity
|
||||
TimeoutStartSec=0
|
||||
StandardOutput=null
|
||||
Restart=always
|
||||
StartLimitBurst=3
|
||||
StartLimitInterval=60s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
|
@ -0,0 +1,312 @@
|
|||
########################################################
|
||||
# #
|
||||
# TDengine Configuration #
|
||||
# Any questions, please email support@taosdata.com #
|
||||
# #
|
||||
########################################################
|
||||
|
||||
# first fully qualified domain name (FQDN) for TDengine system
|
||||
# firstEp hostname:6030
|
||||
|
||||
# local fully qualified domain name (FQDN)
|
||||
# fqdn hostname
|
||||
|
||||
# first port number for the connection (12 continuous UDP/TCP port number are used)
|
||||
# serverPort 6030
|
||||
|
||||
# log file's directory
|
||||
# logDir /var/log/taos
|
||||
|
||||
# data file's directory
|
||||
# dataDir /var/lib/taos
|
||||
|
||||
# temporary file's directory
|
||||
# tempDir /tmp/
|
||||
|
||||
# the arbitrator's fully qualified domain name (FQDN) for TDengine system, for cluster only
|
||||
# arbitrator arbitrator_hostname:6042
|
||||
|
||||
# number of threads per CPU core
|
||||
# numOfThreadsPerCore 1.0
|
||||
|
||||
# number of threads to commit cache data
|
||||
# numOfCommitThreads 4
|
||||
|
||||
# the proportion of total CPU cores available for query processing
|
||||
# 2.0: the query threads will be set to double of the CPU cores.
|
||||
# 1.0: all CPU cores are available for query processing [default].
|
||||
# 0.5: only half of the CPU cores are available for query.
|
||||
# 0.0: only one core available.
|
||||
# ratioOfQueryCores 1.0
|
||||
|
||||
# the last_row/first/last aggregator will not change the original column name in the result fields
|
||||
keepColumnName 1
|
||||
|
||||
# number of management nodes in the system
|
||||
# numOfMnodes 1
|
||||
|
||||
# enable/disable backuping vnode directory when removing vnode
|
||||
# vnodeBak 1
|
||||
|
||||
# enable/disable installation / usage report
|
||||
# telemetryReporting 1
|
||||
|
||||
# enable/disable load balancing
|
||||
# balance 1
|
||||
|
||||
# role for dnode. 0 - any, 1 - mnode, 2 - dnode
|
||||
# role 0
|
||||
|
||||
# max timer control blocks
|
||||
# maxTmrCtrl 512
|
||||
|
||||
# time interval of system monitor, seconds
|
||||
# monitorInterval 30
|
||||
|
||||
# number of seconds allowed for a dnode to be offline, for cluster only
|
||||
# offlineThreshold 864000
|
||||
|
||||
# RPC re-try timer, millisecond
|
||||
# rpcTimer 300
|
||||
|
||||
# RPC maximum time for ack, seconds.
|
||||
# rpcMaxTime 600
|
||||
|
||||
# time interval of dnode status reporting to mnode, seconds, for cluster only
|
||||
# statusInterval 1
|
||||
|
||||
# time interval of heart beat from shell to dnode, seconds
|
||||
# shellActivityTimer 3
|
||||
|
||||
# minimum sliding window time, milli-second
|
||||
# minSlidingTime 10
|
||||
|
||||
# minimum time window, milli-second
|
||||
# minIntervalTime 10
|
||||
|
||||
# maximum delay before launching a stream computation, milli-second
|
||||
# maxStreamCompDelay 20000
|
||||
|
||||
# maximum delay before launching a stream computation for the first time, milli-second
|
||||
# maxFirstStreamCompDelay 10000
|
||||
|
||||
# retry delay when a stream computation fails, milli-second
|
||||
# retryStreamCompDelay 10
|
||||
|
||||
# the delayed time for launching a stream computation, from 0.1(default, 10% of whole computing time window) to 0.9
|
||||
# streamCompDelayRatio 0.1
|
||||
|
||||
# max number of vgroups per db, 0 means configured automatically
|
||||
# maxVgroupsPerDb 0
|
||||
|
||||
# max number of tables per vnode
|
||||
# maxTablesPerVnode 1000000
|
||||
|
||||
# cache block size (Mbyte)
|
||||
# cache 16
|
||||
|
||||
# number of cache blocks per vnode
|
||||
# blocks 6
|
||||
|
||||
# number of days per DB file
|
||||
# days 10
|
||||
|
||||
# number of days to keep DB file
|
||||
# keep 3650
|
||||
|
||||
# minimum rows of records in file block
|
||||
# minRows 100
|
||||
|
||||
# maximum rows of records in file block
|
||||
# maxRows 4096
|
||||
|
||||
# the number of acknowledgments required for successful data writing
|
||||
# quorum 1
|
||||
|
||||
# enable/disable compression
|
||||
# comp 2
|
||||
|
||||
# write ahead log (WAL) level, 0: no wal; 1: write wal, but no fysnc; 2: write wal, and call fsync
|
||||
# walLevel 1
|
||||
|
||||
# if walLevel is set to 2, the cycle of fsync being executed, if set to 0, fsync is called right away
|
||||
# fsync 3000
|
||||
|
||||
# number of replications, for cluster only
|
||||
# replica 1
|
||||
|
||||
# the compressed rpc message, option:
|
||||
# -1 (no compression)
|
||||
# 0 (all message compressed),
|
||||
# > 0 (rpc message body which larger than this value will be compressed)
|
||||
# compressMsgSize -1
|
||||
|
||||
# query retrieved column data compression option:
|
||||
# -1 (no compression)
|
||||
# 0 (all retrieved column data compressed),
|
||||
# > 0 (any retrieved column size greater than this value all data will be compressed.)
|
||||
# compressColData -1
|
||||
|
||||
# max length of an SQL
|
||||
# maxSQLLength 65480
|
||||
|
||||
# max length of WildCards
|
||||
# maxWildCardsLength 100
|
||||
|
||||
# the maximum number of records allowed for super table time sorting
|
||||
# maxNumOfOrderedRes 100000
|
||||
|
||||
# system time zone
|
||||
# timezone Asia/Shanghai (CST, +0800)
|
||||
# system time zone (for windows 10)
|
||||
# timezone UTC-8
|
||||
|
||||
# system locale
|
||||
# locale en_US.UTF-8
|
||||
|
||||
# default system charset
|
||||
# charset UTF-8
|
||||
|
||||
# max number of connections allowed in dnode
|
||||
# maxShellConns 5000
|
||||
|
||||
# max number of connections allowed in client
|
||||
# maxConnections 5000
|
||||
|
||||
# stop writing logs when the disk size of the log folder is less than this value
|
||||
# minimalLogDirGB 1.0
|
||||
|
||||
# stop writing temporary files when the disk size of the tmp folder is less than this value
|
||||
# minimalTmpDirGB 1.0
|
||||
|
||||
# if disk free space is less than this value, taosd service exit directly within startup process
|
||||
# minimalDataDirGB 2.0
|
||||
|
||||
# One mnode is equal to the number of vnode consumed
|
||||
# mnodeEqualVnodeNum 4
|
||||
|
||||
# enbale/disable http service
|
||||
# http 1
|
||||
|
||||
# enable/disable system monitor
|
||||
# monitor 1
|
||||
|
||||
# enable/disable recording the SQL statements via restful interface
|
||||
# httpEnableRecordSql 0
|
||||
|
||||
# number of threads used to process http requests
|
||||
# httpMaxThreads 2
|
||||
|
||||
# maximum number of rows returned by the restful interface
|
||||
# restfulRowLimit 10240
|
||||
|
||||
# database name must be specified in restful interface if the following parameter is set, off by default
|
||||
# httpDbNameMandatory 1
|
||||
|
||||
# http keep alive, default is 30 seconds
|
||||
# httpKeepAlive 30000
|
||||
|
||||
# The following parameter is used to limit the maximum number of lines in log files.
|
||||
# max number of lines per log filters
|
||||
# numOfLogLines 10000000
|
||||
|
||||
# enable/disable async log
|
||||
# asyncLog 1
|
||||
|
||||
# time of keeping log files, days
|
||||
# logKeepDays 0
|
||||
|
||||
|
||||
# The following parameters are used for debug purpose only.
|
||||
# debugFlag 8 bits mask: FILE-SCREEN-UNUSED-HeartBeat-DUMP-TRACE_WARN-ERROR
|
||||
# 131: output warning and error
|
||||
# 135: output debug, warning and error
|
||||
# 143: output trace, debug, warning and error to log
|
||||
# 199: output debug, warning and error to both screen and file
|
||||
# 207: output trace, debug, warning and error to both screen and file
|
||||
|
||||
# debug flag for all log type, take effect when non-zero value
|
||||
# debugFlag 0
|
||||
|
||||
# debug flag for meta management messages
|
||||
# mDebugFlag 135
|
||||
|
||||
# debug flag for dnode messages
|
||||
# dDebugFlag 135
|
||||
|
||||
# debug flag for sync module
|
||||
# sDebugFlag 135
|
||||
|
||||
# debug flag for WAL
|
||||
# wDebugFlag 135
|
||||
|
||||
# debug flag for SDB
|
||||
# sdbDebugFlag 135
|
||||
|
||||
# debug flag for RPC
|
||||
# rpcDebugFlag 131
|
||||
|
||||
# debug flag for TAOS TIMER
|
||||
# tmrDebugFlag 131
|
||||
|
||||
# debug flag for TDengine client
|
||||
# cDebugFlag 131
|
||||
|
||||
# debug flag for JNI
|
||||
# jniDebugFlag 131
|
||||
|
||||
# debug flag for storage
|
||||
# uDebugFlag 131
|
||||
|
||||
# debug flag for http server
|
||||
# httpDebugFlag 131
|
||||
|
||||
# debug flag for monitor
|
||||
# monDebugFlag 131
|
||||
|
||||
# debug flag for query
|
||||
# qDebugFlag 131
|
||||
|
||||
# debug flag for vnode
|
||||
# vDebugFlag 131
|
||||
|
||||
# debug flag for TSDB
|
||||
# tsdbDebugFlag 131
|
||||
|
||||
# debug flag for continue query
|
||||
# cqDebugFlag 131
|
||||
|
||||
# enable/disable recording the SQL in taos client
|
||||
# enableRecordSql 0
|
||||
|
||||
# generate core file when service crash
|
||||
# enableCoreFile 1
|
||||
|
||||
# maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden
|
||||
# maxBinaryDisplayWidth 30
|
||||
|
||||
# enable/disable stream (continuous query)
|
||||
# stream 1
|
||||
|
||||
# in retrieve blocking model, only in 50% query threads will be used in query processing in dnode
|
||||
# retrieveBlockingModel 0
|
||||
|
||||
# the maximum allowed query buffer size in MB during query processing for each data node
|
||||
# -1 no limit (default)
|
||||
# 0 no query allowed, queries are disabled
|
||||
# queryBufferSize -1
|
||||
|
||||
# percent of redundant data in tsdb meta will compact meta data,0 means donot compact
|
||||
# tsdbMetaCompactRatio 0
|
||||
|
||||
# default string type used for storing JSON String, options can be binary/nchar, default is nchar
|
||||
# defaultJSONStrType nchar
|
||||
|
||||
# force TCP transmission
|
||||
# rpcForceTcp 0
|
||||
|
||||
# unit MB. Flush vnode wal file if walSize > walFlushSize and walSize > cache*0.5*blocks
|
||||
# walFlushSize 1024
|
||||
|
||||
# unit Hour. Latency of data migration
|
||||
# keepTimeOffset 0
|
|
@ -0,0 +1,21 @@
|
|||
[Unit]
|
||||
Description=TDengine server service
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart=/usr/bin/taosd
|
||||
ExecStartPre=/usr/local/taos/bin/startPre.sh
|
||||
TimeoutStopSec=1000000s
|
||||
LimitNOFILE=infinity
|
||||
LimitNPROC=infinity
|
||||
LimitCORE=infinity
|
||||
TimeoutStartSec=0
|
||||
StandardOutput=null
|
||||
Restart=always
|
||||
StartLimitBurst=3
|
||||
StartLimitInterval=60s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
|
@ -0,0 +1,20 @@
|
|||
[Unit]
|
||||
Description=TDengine arbitrator service
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart=/usr/bin/tarbitrator
|
||||
TimeoutStopSec=1000000s
|
||||
LimitNOFILE=infinity
|
||||
LimitNPROC=infinity
|
||||
LimitCORE=infinity
|
||||
TimeoutStartSec=0
|
||||
StandardOutput=null
|
||||
Restart=always
|
||||
StartLimitBurst=3
|
||||
StartLimitInterval=60s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
|
@ -0,0 +1,252 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# This file is used to install database on linux systems. The operating system
|
||||
# is required to use systemd to manage services at boot
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
verMode=edge
|
||||
pagMode=full
|
||||
|
||||
iplist=""
|
||||
serverFqdn=""
|
||||
|
||||
# -----------------------Variables definition---------------------
|
||||
script_dir="../release"
|
||||
# Dynamic directory
|
||||
data_dir="/var/lib/taos"
|
||||
log_dir="/var/log/taos"
|
||||
|
||||
data_link_dir="/usr/local/taos/data"
|
||||
log_link_dir="/usr/local/taos/log"
|
||||
|
||||
cfg_install_dir="/etc/taos"
|
||||
|
||||
bin_link_dir="/usr/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
lib64_link_dir="/usr/lib64"
|
||||
inc_link_dir="/usr/include"
|
||||
|
||||
#install main path
|
||||
install_main_dir="/usr/local/taos"
|
||||
|
||||
# old bin dir
|
||||
sbin_dir="/usr/local/taos/bin"
|
||||
|
||||
temp_version=""
|
||||
fin_result=""
|
||||
|
||||
service_config_dir="/etc/systemd/system"
|
||||
nginx_port=6060
|
||||
nginx_dir="/usr/local/nginxd"
|
||||
|
||||
# Color setting
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
GREEN_DARK='\033[0;32m'
|
||||
GREEN_UNDERLINE='\033[4;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
# ============================= get input parameters =================================================
|
||||
|
||||
# install.sh -v [server | client] -e [yes | no] -i [systemd | service | ...]
|
||||
|
||||
# set parameters by default value
|
||||
interactiveFqdn=yes # [yes | no]
|
||||
verType=server # [server | client]
|
||||
initType=systemd # [systemd | service | ...]
|
||||
|
||||
while getopts "hv:d:" arg
|
||||
do
|
||||
case $arg in
|
||||
d)
|
||||
#echo "interactiveFqdn=$OPTARG"
|
||||
script_dir=$( echo $OPTARG )
|
||||
;;
|
||||
h)
|
||||
echo "Usage: `basename $0` -d scripy_path"
|
||||
exit 0
|
||||
;;
|
||||
?) #unknow option
|
||||
echo "unkonw argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
#echo "verType=${verType} interactiveFqdn=${interactiveFqdn}"
|
||||
|
||||
function kill_process() {
|
||||
pid=$(ps -ef | grep "$1" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function check_file() {
|
||||
#check file whether exists
|
||||
if [ ! -e $1/$2 ];then
|
||||
echo -e "$1/$2 \033[31mnot exists\033[0m!quit"
|
||||
fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
|
||||
echo -e $fin_result
|
||||
exit 8
|
||||
fi
|
||||
}
|
||||
|
||||
function get_package_name() {
|
||||
var=$1
|
||||
if [[ $1 =~ 'aarch' ]];then
|
||||
echo ${var::-21}
|
||||
else
|
||||
echo ${var::-17}
|
||||
fi
|
||||
}
|
||||
|
||||
function check_link() {
|
||||
#check Link whether exists or broken
|
||||
if [ -L $1 ] ; then
|
||||
if [ ! -e $1 ] ; then
|
||||
echo -e "$1 \033[31Broken link\033[0m"
|
||||
fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
|
||||
echo -e $fin_result
|
||||
exit 8
|
||||
fi
|
||||
else
|
||||
echo -e "$1 \033[31mnot exists\033[0m!quit"
|
||||
fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
|
||||
echo -e $fin_result
|
||||
exit 8
|
||||
fi
|
||||
}
|
||||
|
||||
function check_main_path() {
|
||||
#check install main dir and all sub dir
|
||||
main_dir=("" "cfg" "bin" "connector" "driver" "examples" "include" "init.d")
|
||||
for i in "${main_dir[@]}";do
|
||||
check_file ${install_main_dir} $i
|
||||
done
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
nginx_main_dir=("admin" "conf" "html" "sbin" "logs")
|
||||
for i in "${nginx_main_dir[@]}";do
|
||||
check_file ${nginx_dir} $i
|
||||
done
|
||||
fi
|
||||
echo -e "Check main path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function check_bin_path() {
|
||||
# check install bin dir and all sub dir
|
||||
bin_dir=("taos" "taosd" "taosadapter" "taosdemo" "remove.sh" "tarbitrator" "set_core.sh")
|
||||
for i in "${bin_dir[@]}";do
|
||||
check_file ${sbin_dir} $i
|
||||
done
|
||||
lbin_dir=("taos" "taosd" "taosadapter" "taosdemo" "rmtaos" "tarbitrator" "set_core")
|
||||
for i in "${lbin_dir[@]}";do
|
||||
check_link ${bin_link_dir}/$i
|
||||
done
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
check_file ${nginx_dir}/sbin nginx
|
||||
fi
|
||||
echo -e "Check bin path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function check_lib_path() {
|
||||
# check all links
|
||||
check_link ${lib_link_dir}/libtaos.so
|
||||
check_link ${lib_link_dir}/libtaos.so.1
|
||||
|
||||
if [[ -d ${lib64_link_dir} ]]; then
|
||||
check_link ${lib64_link_dir}/libtaos.so
|
||||
check_link ${lib64_link_dir}/libtaos.so.1
|
||||
fi
|
||||
echo -e "Check lib path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function check_header_path() {
|
||||
# check all header
|
||||
header_dir=("taos.h" "taosdef.h" "taoserror.h")
|
||||
for i in "${header_dir[@]}";do
|
||||
check_link ${inc_link_dir}/$i
|
||||
done
|
||||
echo -e "Check bin path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function check_taosadapter_config_dir() {
|
||||
# check all config
|
||||
check_file ${cfg_install_dir} taosadapter.toml
|
||||
check_file ${cfg_install_dir} taosadapter.service
|
||||
check_file ${install_main_dir}/cfg taosadapter.toml.org
|
||||
echo -e "Check conf path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function check_config_dir() {
|
||||
# check all config
|
||||
check_file ${cfg_install_dir} taos.cfg
|
||||
check_file ${install_main_dir}/cfg taos.cfg.org
|
||||
echo -e "Check conf path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function check_log_path() {
|
||||
# check log path
|
||||
check_file ${log_dir}
|
||||
echo -e "Check log path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function check_data_path() {
|
||||
# check data path
|
||||
check_file ${data_dir}
|
||||
echo -e "Check data path:\033[32mOK\033[0m!"
|
||||
}
|
||||
|
||||
function install_TDengine() {
|
||||
cd ${script_dir}
|
||||
tar zxf $1
|
||||
temp_version=$(get_package_name $1)
|
||||
cd $(get_package_name $1)
|
||||
echo -e "\033[32muninstall TDengine && install TDengine...\033[0m"
|
||||
rmtaos >/dev/null 2>&1 || echo 'taosd not installed' && echo -e '\n\n' |./install.sh >/dev/null 2>&1
|
||||
echo -e "\033[32mTDengine has been installed!\033[0m"
|
||||
echo -e "\033[32mTDengine is starting...\033[0m"
|
||||
kill_process taos && systemctl start taosd && sleep 10
|
||||
}
|
||||
|
||||
function test_TDengine() {
|
||||
check_main_path
|
||||
check_bin_path
|
||||
check_lib_path
|
||||
check_header_path
|
||||
check_config_dir
|
||||
check_taosadapter_config_dir
|
||||
check_log_path
|
||||
check_data_path
|
||||
result=`taos -s 'create database test ;create table test.tt(ts timestamp ,i int);insert into test.tt values(now,11);select * from test.tt' 2>&1 ||:`
|
||||
if [[ $result =~ "Unable to establish" ]];then
|
||||
echo -e "\033[31mTDengine connect failed\033[0m"
|
||||
fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
|
||||
echo -e $fin_result
|
||||
exit 8
|
||||
fi
|
||||
echo -e "Check TDengine connect:\033[32mOK\033[0m!"
|
||||
fin_result=$fin_result"\033[32m$temp_version\033[0m test OK!\n"
|
||||
}
|
||||
# ## ==============================Main program starts from here============================
|
||||
TD_package_name=`ls ${script_dir}/*server*gz |awk -F '/' '{print $NF}' `
|
||||
temp=`pwd`
|
||||
for i in $TD_package_name;do
|
||||
if [[ $i =~ 'enterprise' ]];then
|
||||
verMode="cluster"
|
||||
else
|
||||
verMode=""
|
||||
fi
|
||||
cd $temp
|
||||
install_TDengine $i
|
||||
test_TDengine
|
||||
done
|
||||
echo "============================================================"
|
||||
echo -e $fin_result
|
|
@ -0,0 +1,13 @@
|
|||
Package: tdengine
|
||||
Version: 1.0.0
|
||||
Section: utils
|
||||
Priority: optional
|
||||
#Essential: no
|
||||
#Depends: no
|
||||
#Suggests: no
|
||||
Architecture: amd64
|
||||
Installed-Size: 66666
|
||||
Maintainer: support@taosdata.com
|
||||
Provides: taosdata
|
||||
Homepage: http://taosdata.com
|
||||
Description: Big Data Platform Designed and Optimized for IoT.
|
|
@ -0,0 +1,13 @@
|
|||
#!/bin/bash
|
||||
#set -x
|
||||
#path=`pwd`
|
||||
insmetaPath="/usr/local/taos/script"
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
${csudo}chmod -R 744 ${insmetaPath}
|
||||
cd ${insmetaPath}
|
||||
${csudo}./post.sh
|
|
@ -0,0 +1,2 @@
|
|||
#!/bin/bash
|
||||
|
|
@ -0,0 +1,40 @@
|
|||
#!/bin/bash
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
# Stop the service if running
|
||||
if pidof taosd &> /dev/null; then
|
||||
if pidof systemd &> /dev/null; then
|
||||
${csudo}systemctl stop taosd || :
|
||||
elif $(which service &> /dev/null); then
|
||||
${csudo}service taosd stop || :
|
||||
else
|
||||
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
fi
|
||||
echo "Stop taosd service success!"
|
||||
sleep 1
|
||||
fi
|
||||
|
||||
# if taos.cfg already softlink, remove it
|
||||
cfg_install_dir="/etc/taos"
|
||||
install_main_dir="/usr/local/taos"
|
||||
if [ -f "${install_main_dir}/taos.cfg" ]; then
|
||||
${csudo}rm -f ${install_main_dir}/cfg/taos.cfg || :
|
||||
fi
|
||||
|
||||
if [ -f "${install_main_dir}/taosadapter.toml" ]; then
|
||||
${csudo}rm -f ${install_main_dir}/cfg/taosadapter.toml || :
|
||||
fi
|
||||
|
||||
if [ -f "${install_main_dir}/taosadapter.service" ]; then
|
||||
${csudo}rm -f ${install_main_dir}/cfg/taosadapter.service || :
|
||||
fi
|
||||
|
||||
# there can not libtaos.so*, otherwise ln -s error
|
||||
${csudo}rm -f ${install_main_dir}/driver/libtaos* || :
|
|
@ -0,0 +1,42 @@
|
|||
#!/bin/bash
|
||||
|
||||
insmetaPath="/usr/local/taos/script"
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
${csudo}chmod -R 744 ${insmetaPath} || :
|
||||
#cd ${insmetaPath}
|
||||
#${csudo}./preun.sh
|
||||
if [ -f ${insmetaPath}/preun.sh ]; then
|
||||
cd ${insmetaPath}
|
||||
${csudo}./preun.sh
|
||||
else
|
||||
bin_link_dir="/usr/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
inc_link_dir="/usr/include"
|
||||
|
||||
data_link_dir="/usr/local/taos/data"
|
||||
log_link_dir="/usr/local/taos/log"
|
||||
cfg_link_dir="/usr/local/taos/cfg"
|
||||
|
||||
# Remove all links
|
||||
${csudo}rm -f ${bin_link_dir}/taos || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosd || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosadapter || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosdemo || :
|
||||
${csudo}rm -f ${cfg_link_dir}/* || :
|
||||
${csudo}rm -f ${inc_link_dir}/taos.h || :
|
||||
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
||||
|
||||
${csudo}rm -f ${log_link_dir} || :
|
||||
${csudo}rm -f ${data_link_dir} || :
|
||||
|
||||
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
fi
|
||||
|
|
@ -0,0 +1,147 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Generate deb package for ubuntu
|
||||
set -e
|
||||
# set -x
|
||||
|
||||
#curr_dir=$(pwd)
|
||||
compile_dir=$1
|
||||
output_dir=$2
|
||||
tdengine_ver=$3
|
||||
cpuType=$4
|
||||
osType=$5
|
||||
verMode=$6
|
||||
verType=$7
|
||||
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -f ${script_dir}/../..)"
|
||||
pkg_dir="${top_dir}/debworkroom"
|
||||
|
||||
#echo "curr_dir: ${curr_dir}"
|
||||
#echo "top_dir: ${top_dir}"
|
||||
#echo "script_dir: ${script_dir}"
|
||||
echo "compile_dir: ${compile_dir}"
|
||||
echo "pkg_dir: ${pkg_dir}"
|
||||
|
||||
if [ -d ${pkg_dir} ]; then
|
||||
rm -rf ${pkg_dir}
|
||||
fi
|
||||
mkdir -p ${pkg_dir}
|
||||
cd ${pkg_dir}
|
||||
|
||||
libfile="libtaos.so.${tdengine_ver}"
|
||||
|
||||
# create install dir
|
||||
install_home_path="/usr/local/taos"
|
||||
mkdir -p ${pkg_dir}${install_home_path}
|
||||
mkdir -p ${pkg_dir}${install_home_path}/bin
|
||||
mkdir -p ${pkg_dir}${install_home_path}/cfg
|
||||
#mkdir -p ${pkg_dir}${install_home_path}/connector
|
||||
mkdir -p ${pkg_dir}${install_home_path}/driver
|
||||
mkdir -p ${pkg_dir}${install_home_path}/examples
|
||||
mkdir -p ${pkg_dir}${install_home_path}/include
|
||||
#mkdir -p ${pkg_dir}${install_home_path}/init.d
|
||||
mkdir -p ${pkg_dir}${install_home_path}/script
|
||||
|
||||
cp ${compile_dir}/../packaging/cfg/taos.cfg ${pkg_dir}${install_home_path}/cfg
|
||||
if [ -f "${compile_dir}/test/cfg/taosadapter.toml" ]; then
|
||||
cp ${compile_dir}/test/cfg/taosadapter.toml ${pkg_dir}${install_home_path}/cfg || :
|
||||
fi
|
||||
if [ -f "${compile_dir}/test/cfg/taosadapter.service" ]; then
|
||||
cp ${compile_dir}/test/cfg/taosadapter.service ${pkg_dir}${install_home_path}/cfg || :
|
||||
fi
|
||||
|
||||
#cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_path}/init.d
|
||||
cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script
|
||||
cp ${compile_dir}/../packaging/tools/preun.sh ${pkg_dir}${install_home_path}/script
|
||||
cp ${compile_dir}/../packaging/tools/startPre.sh ${pkg_dir}${install_home_path}/bin
|
||||
cp ${compile_dir}/../packaging/tools/set_core.sh ${pkg_dir}${install_home_path}/bin
|
||||
cp ${compile_dir}/../packaging/tools/taosd-dump-cfg.gdb ${pkg_dir}${install_home_path}/bin
|
||||
|
||||
cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin
|
||||
#cp ${compile_dir}/build/bin/taosBenchmark ${pkg_dir}${install_home_path}/bin
|
||||
|
||||
if [ -f "${compile_dir}/build/bin/taosadapter" ]; then
|
||||
cp ${compile_dir}/build/bin/taosadapter ${pkg_dir}${install_home_path}/bin ||:
|
||||
fi
|
||||
|
||||
cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin
|
||||
cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver
|
||||
cp ${compile_dir}/../src/inc/taos.h ${pkg_dir}${install_home_path}/include
|
||||
cp ${compile_dir}/../src/inc/taosdef.h ${pkg_dir}${install_home_path}/include
|
||||
cp ${compile_dir}/../src/inc/taoserror.h ${pkg_dir}${install_home_path}/include
|
||||
cp -r ${top_dir}/examples/* ${pkg_dir}${install_home_path}/examples
|
||||
#cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector
|
||||
#cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector
|
||||
#cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector
|
||||
#cp ${compile_dir}/build/lib/taos-jdbcdriver*.* ${pkg_dir}${install_home_path}/connector ||:
|
||||
|
||||
install_user_local_path="/usr/local"
|
||||
|
||||
if [ -f ${compile_dir}/build/bin/jemalloc-config ]; then
|
||||
mkdir -p ${pkg_dir}${install_user_local_path}/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
|
||||
cp ${compile_dir}/build/bin/jemalloc-config ${pkg_dir}${install_user_local_path}/bin/
|
||||
if [ -f ${compile_dir}/build/bin/jemalloc.sh ]; then
|
||||
cp ${compile_dir}/build/bin/jemalloc.sh ${pkg_dir}${install_user_local_path}/bin/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/bin/jeprof ]; then
|
||||
cp ${compile_dir}/build/bin/jeprof ${pkg_dir}${install_user_local_path}/bin/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/include/jemalloc/jemalloc.h ]; then
|
||||
cp ${compile_dir}/build/include/jemalloc/jemalloc.h ${pkg_dir}${install_user_local_path}/include/jemalloc/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/lib/libjemalloc.so.2 ]; then
|
||||
cp ${compile_dir}/build/lib/libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/
|
||||
ln -sf libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/libjemalloc.so
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/lib/libjemalloc.a ]; then
|
||||
cp ${compile_dir}/build/lib/libjemalloc.a ${pkg_dir}${install_user_local_path}/lib/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/lib/libjemalloc_pic.a ]; then
|
||||
cp ${compile_dir}/build/lib/libjemalloc_pic.a ${pkg_dir}${install_user_local_path}/lib/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ]; then
|
||||
cp ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ${pkg_dir}${install_user_local_path}/lib/pkgconfig/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/share/doc/jemalloc/jemalloc.html ]; then
|
||||
cp ${compile_dir}/build/share/doc/jemalloc/jemalloc.html ${pkg_dir}${install_user_local_path}/share/doc/jemalloc/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/share/man/man3/jemalloc.3 ]; then
|
||||
cp ${compile_dir}/build/share/man/man3/jemalloc.3 ${pkg_dir}${install_user_local_path}/share/man/man3/
|
||||
fi
|
||||
fi
|
||||
|
||||
cp -r ${compile_dir}/../packaging/deb/DEBIAN ${pkg_dir}/
|
||||
chmod 755 ${pkg_dir}/DEBIAN/*
|
||||
|
||||
# modify version of control
|
||||
debver="Version: "$tdengine_ver
|
||||
sed -i "2c$debver" ${pkg_dir}/DEBIAN/control
|
||||
|
||||
#get taos version, then set deb name
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
debname="TDengine-server-"${tdengine_ver}-${osType}-${cpuType}
|
||||
elif [ "$verMode" == "edge" ]; then
|
||||
debname="TDengine-server"-${tdengine_ver}-${osType}-${cpuType}
|
||||
else
|
||||
echo "unknow verMode, nor cluster or edge"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$verType" == "beta" ]; then
|
||||
debname="TDengine-server-"${tdengine_ver}-${verType}-${osType}-${cpuType}".deb"
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
debname=${debname}".deb"
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# make deb package
|
||||
dpkg -b ${pkg_dir} $debname
|
||||
echo "make deb package success!"
|
||||
|
||||
cp ${pkg_dir}/*.deb ${output_dir}
|
||||
|
||||
# clean temp dir
|
||||
rm -rf ${pkg_dir}
|
|
@ -0,0 +1,95 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Modified from original source: Elastic Search
|
||||
# https://github.com/elasticsearch/elasticsearch
|
||||
# Thank you to the Elastic Search authors
|
||||
#
|
||||
# chkconfig: 2345 99 01
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: TDengine
|
||||
# Required-Start: $local_fs $network $syslog
|
||||
# Required-Stop: $local_fs $network $syslog
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: Starts TDengine taosd
|
||||
# Description: Starts TDengine taosd, a time-series database engine
|
||||
### END INIT INFO
|
||||
|
||||
set -e
|
||||
|
||||
PATH="/bin:/usr/bin:/sbin:/usr/sbin"
|
||||
NAME="TDengine"
|
||||
USER="root"
|
||||
GROUP="root"
|
||||
DAEMON="/usr/local/taos/bin/taosd"
|
||||
DAEMON_OPTS=""
|
||||
|
||||
HTTPD_NAME="taosadapter"
|
||||
DAEMON_HTTPD_NAME=$HTTPD_NAME
|
||||
DAEMON_HTTPD="/usr/local/taos/bin/$HTTPD_NAME"
|
||||
|
||||
PID_FILE="/var/run/$NAME.pid"
|
||||
APPARGS=""
|
||||
|
||||
# Maximum number of open files
|
||||
MAX_OPEN_FILES=65535
|
||||
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
|
||||
log_action_begin_msg "Starting TDengine..."
|
||||
$DAEMON_HTTPD &
|
||||
if start-stop-daemon --test --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS &> /dev/null; then
|
||||
|
||||
touch "$PID_FILE" && chown "$USER":"$GROUP" "$PID_FILE"
|
||||
|
||||
if [ -n "$MAX_OPEN_FILES" ]; then
|
||||
ulimit -n $MAX_OPEN_FILES
|
||||
fi
|
||||
|
||||
start-stop-daemon --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS
|
||||
|
||||
log_end_msg $?
|
||||
fi
|
||||
;;
|
||||
|
||||
stop)
|
||||
log_action_begin_msg "Stopping TDengine..."
|
||||
pkill -9 $DAEMON_HTTPD_NAME
|
||||
set +e
|
||||
if [ -f "$PID_FILE" ]; then
|
||||
start-stop-daemon --stop --pidfile "$PID_FILE" --user "$USER" --retry=TERM/120/KILL/5 > /dev/null
|
||||
if [ $? -eq 1 ]; then
|
||||
log_action_cont_msg "TSD is not running but pid file exists, cleaning up"
|
||||
elif [ $? -eq 3 ]; then
|
||||
PID="`cat $PID_FILE`"
|
||||
log_failure_msg "Failed to stop TDengine (pid $PID)"
|
||||
exit 1
|
||||
fi
|
||||
rm -f "$PID_FILE"
|
||||
else
|
||||
log_action_cont_msg "TDengine was not running"
|
||||
fi
|
||||
log_action_end_msg 0
|
||||
set -e
|
||||
;;
|
||||
|
||||
restart|force-reload)
|
||||
if [ -f "$PID_FILE" ]; then
|
||||
$0 stop
|
||||
sleep 1
|
||||
fi
|
||||
$0 start
|
||||
;;
|
||||
status)
|
||||
status_of_proc -p "$PID_FILE" "$DAEMON" "$NAME"
|
||||
;;
|
||||
*)
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
|
@ -0,0 +1,88 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Modified from original source: Elastic Search
|
||||
# https://github.com/elasticsearch/elasticsearch
|
||||
# Thank you to the Elastic Search authors
|
||||
#
|
||||
# chkconfig: 2345 99 01
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: taoscluster
|
||||
# Required-Start: $local_fs $network $syslog
|
||||
# Required-Stop: $local_fs $network $syslog
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: Starts taoscluster tarbitrator
|
||||
# Description: Starts taoscluster tarbitrator, a arbitrator
|
||||
### END INIT INFO
|
||||
|
||||
set -e
|
||||
|
||||
PATH="/bin:/usr/bin:/sbin:/usr/sbin"
|
||||
NAME="taoscluster"
|
||||
USER="root"
|
||||
GROUP="root"
|
||||
DAEMON="/usr/local/taos/bin/tarbitrator"
|
||||
DAEMON_OPTS=""
|
||||
PID_FILE="/var/run/$NAME.pid"
|
||||
APPARGS=""
|
||||
|
||||
# Maximum number of open files
|
||||
MAX_OPEN_FILES=65535
|
||||
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
|
||||
log_action_begin_msg "Starting tarbitrator..."
|
||||
if start-stop-daemon --test --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS &> /dev/null; then
|
||||
|
||||
touch "$PID_FILE" && chown "$USER":"$GROUP" "$PID_FILE"
|
||||
|
||||
if [ -n "$MAX_OPEN_FILES" ]; then
|
||||
ulimit -n $MAX_OPEN_FILES
|
||||
fi
|
||||
|
||||
start-stop-daemon --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS
|
||||
|
||||
log_end_msg $?
|
||||
fi
|
||||
;;
|
||||
|
||||
stop)
|
||||
log_action_begin_msg "Stopping tarbitrator..."
|
||||
set +e
|
||||
if [ -f "$PID_FILE" ]; then
|
||||
start-stop-daemon --stop --pidfile "$PID_FILE" --user "$USER" --retry=TERM/120/KILL/5 > /dev/null
|
||||
if [ $? -eq 1 ]; then
|
||||
log_action_cont_msg "TSD is not running but pid file exists, cleaning up"
|
||||
elif [ $? -eq 3 ]; then
|
||||
PID="`cat $PID_FILE`"
|
||||
log_failure_msg "Failed to stop tarbitrator (pid $PID)"
|
||||
exit 1
|
||||
fi
|
||||
rm -f "$PID_FILE"
|
||||
else
|
||||
log_action_cont_msg "tarbitrator was not running"
|
||||
fi
|
||||
log_action_end_msg 0
|
||||
set -e
|
||||
;;
|
||||
|
||||
restart|force-reload)
|
||||
if [ -f "$PID_FILE" ]; then
|
||||
$0 stop
|
||||
sleep 1
|
||||
fi
|
||||
$0 start
|
||||
;;
|
||||
status)
|
||||
status_of_proc -p "$PID_FILE" "$DAEMON" "$NAME"
|
||||
;;
|
||||
*)
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
|
@ -0,0 +1,32 @@
|
|||
FROM ubuntu:18.04
|
||||
|
||||
WORKDIR /root
|
||||
|
||||
ARG pkgFile
|
||||
ARG dirName
|
||||
ARG cpuType
|
||||
RUN echo ${pkgFile} && echo ${dirName}
|
||||
|
||||
COPY ${pkgFile} /root/
|
||||
RUN tar -zxf ${pkgFile}
|
||||
WORKDIR /root/
|
||||
RUN cd /root/${dirName}/ && /bin/bash install.sh -e no && cd /root
|
||||
RUN rm /root/${pkgFile}
|
||||
RUN rm -rf /root/${dirName}
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
RUN apt-get clean && apt-get update && apt-get install -y locales tzdata netcat && locale-gen en_US.UTF-8
|
||||
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib" \
|
||||
LC_CTYPE=en_US.UTF-8 \
|
||||
LANG=en_US.UTF-8 \
|
||||
LC_ALL=en_US.UTF-8
|
||||
|
||||
COPY ./bin/* /usr/bin/
|
||||
|
||||
ENV TINI_VERSION v0.19.0
|
||||
RUN bash -c 'echo -e "Downloading tini-${cpuType} ..."'
|
||||
ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-${cpuType} /tini
|
||||
RUN chmod +x /tini
|
||||
ENTRYPOINT ["/tini", "--", "/usr/bin/entrypoint.sh"]
|
||||
CMD ["taosd"]
|
||||
VOLUME [ "/var/lib/taos", "/var/log/taos", "/corefile" ]
|
|
@ -0,0 +1,664 @@
|
|||
# TDengine Docker Image Quick Reference
|
||||
|
||||
## What is TDengine?
|
||||
|
||||
TDengine is an open-sourced big data platform under [GNU AGPL v3.0](http://www.gnu.org/licenses/agpl-3.0.html), designed and optimized for the Internet of Things (IoT), Connected Cars, Industrial IoT, and IT Infrastructure and Application Monitoring. Besides the 10x faster time-series database, it provides caching, stream computing, message queuing and other functionalities to reduce the complexity and cost of development and operation.
|
||||
|
||||
- **10x Faster on Insert/Query Speeds**: Through the innovative design on storage, on a single-core machine, over 20K requests can be processed, millions of data points can be ingested, and over 10 million data points can be retrieved in a second. It is 10 times faster than other databases.
|
||||
|
||||
- **1/5 Hardware/Cloud Service Costs**: Compared with typical big data solutions, less than 1/5 of computing resources are required. Via column-based storage and tuned compression algorithms for different data types, less than 1/10 of storage space is needed.
|
||||
|
||||
- **Full Stack for Time-Series Data**: By integrating a database with message queuing, caching, and stream computing features together, it is no longer necessary to integrate Kafka/Redis/HBase/Spark or other software. It makes the system architecture much simpler and more robust.
|
||||
|
||||
- **Powerful Data Analysis**: Whether it is 10 years or one minute ago, data can be queried just by specifying the time range. Data can be aggregated over time, multiple time streams or both. Ad Hoc queries or analyses can be executed via TDengine shell, Python, R or Matlab.
|
||||
|
||||
- **Seamless Integration with Other Tools**: Telegraf, Grafana, Matlab, R, and other tools can be integrated with TDengine without a line of code. MQTT, OPC, Hadoop, Spark, and many others will be integrated soon.
|
||||
|
||||
- **Zero Management, No Learning Curve**: It takes only seconds to download, install, and run it successfully; there are no other dependencies. Automatic partitioning on tables or DBs. Standard SQL is used, with C/C++, Python, JDBC, Go and RESTful connectors.
|
||||
|
||||
## How to use this image
|
||||
|
||||
### Start a TDengine instance with RESTful API exposed
|
||||
|
||||
Simply, you can use `docker run` to start a TDengine instance and connect it with restful connectors(eg. [JDBC-RESTful](https://www.taosdata.com/cn/documentation/connector/java)).
|
||||
|
||||
```bash
|
||||
docker run -d --name tdengine -p 6041:6041 tdengine/tdengine
|
||||
```
|
||||
|
||||
This command starts a docker container by name `tdengine` with TDengine server running, and maps the container's HTTP port 6041 to the host's port 6041. If you have `curl` in your host, you can list the databases by the command:
|
||||
|
||||
```bash
|
||||
curl -u root:taosdata -d "show databases" localhost:6041/rest/sql
|
||||
```
|
||||
|
||||
You can execute the `taos` shell command in the container:
|
||||
|
||||
```bash
|
||||
$ docker exec -it tdengine taos
|
||||
|
||||
Welcome to the TDengine shell from Linux, Client Version:2.4.0.0
|
||||
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
|
||||
|
||||
taos> show databases;
|
||||
name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status |
|
||||
====================================================================================================================================================================================================================================================================================
|
||||
log | 2022-01-17 13:57:22.270 | 10 | 1 | 1 | 1 | 10 | 30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | 0 | us | 0 | ready |
|
||||
Query OK, 1 row(s) in set (0.002843s)
|
||||
```
|
||||
|
||||
Since TDengine use container hostname to establish connections, it's a bit more complex to use taos shell and native connectors(such as JDBC-JNI) with TDengine container instance. This is the recommended way to expose ports and use TDengine with docker in simple cases. If you want to use taos shell or taosc/connectors smoothly outside the `tdengine` container, see next use cases that match you need.
|
||||
|
||||
### Start with host network
|
||||
|
||||
```bash
|
||||
docker run -d --name tdengine --network host tdengine/tdengine
|
||||
```
|
||||
|
||||
Starts container with `host` network will use host's hostname as fqdn instead of container id. It's much like starting natively with `systemd` in host. After installing the client, you can use `taos` shell as normal in host path.
|
||||
|
||||
```bash
|
||||
$ taos
|
||||
|
||||
Welcome to the TDengine shell from Linux, Client Version:2.4.0.0
|
||||
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
|
||||
|
||||
taos> show dnodes;
|
||||
id | end_point | vnodes | cores | status | role | create_time | offline reason |
|
||||
======================================================================================================================================
|
||||
1 | host:6030 | 1 | 8 | ready | any | 2022-01-17 22:10:32.619 | |
|
||||
Query OK, 1 row(s) in set (0.003233s)
|
||||
```
|
||||
|
||||
### Start with exposed ports and specified hostname
|
||||
|
||||
Set the fqdn explicitly will help you to use in other environment or applications. We provide environment variable `TAOS_FQDN` or `fqdn` config option to explicitly set the hostname used by TDengine container instance(s).
|
||||
|
||||
Use `TAOS_FQDN` variable within `docker run` command:
|
||||
|
||||
```bash
|
||||
docker run -d \
|
||||
--name tdengine \
|
||||
-e TAOS_FQDN=tdengine \
|
||||
-p 6030-6049:6030-6049 \
|
||||
-p 6030-6049:6030-6049/udp \
|
||||
tdengine/tdengine
|
||||
```
|
||||
|
||||
This command starts a docker container with TDengine server running and maps the container's TCP ports from 6030 to 6049 to the host's ports from 6030 to 6049 with TCP protocol and UDP ports range 6030-6039 to the host's UDP ports 6030-6039. If the host is already running TDengine server and occupying the same port(s), you need to map the container's port to a different unused port segment. (Please see TDengine 2.0 Port Description for details). In order to support TDengine clients accessing TDengine server services, both TCP and UDP ports need to be exposed by default(unless `rpcForceTcp` is set to `1`).
|
||||
|
||||
If you want to use taos shell or native connectors([JDBC-JNI](https://www.taosdata.com/cn/documentation/connector/java), or [driver-go](https://github.com/taosdata/driver-go)), you need to make sure the `TAOS_FQDN` is resolvable at `/etc/hosts` or with custom DNS service.
|
||||
|
||||
If you set the `TAOS_FQDN` to host's hostname, it will works as using `hosts` network like previous use case. Otherwise, like in `-e TAOS_FQDN=tdengine`, you can add the hostname record `tdengine` into `/etc/hosts` (use `127.0.0.1` here in host path, if use TDengine client/application in other hosts, you should set the right ip to the host eg. `192.168.10.1`(check the real ip in host with `hostname -i` or `ip route list default`) to make the TDengine endpoint resolvable):
|
||||
|
||||
```bash
|
||||
echo 127.0.0.1 tdengine |sudo tee -a /etc/hosts
|
||||
```
|
||||
|
||||
Then you can use `taos` with the host `tdengine`:
|
||||
|
||||
```bash
|
||||
taos -h tdengine
|
||||
```
|
||||
|
||||
Or develop/test applications with native connectors. As in python:
|
||||
|
||||
```python
|
||||
import taos;
|
||||
conn = taos.connect(host = "tdengine")
|
||||
res = conn.query("show databases")
|
||||
for row in res.fetch_all_into_dict():
|
||||
print(row)
|
||||
```
|
||||
|
||||
See the results:
|
||||
|
||||
```bash
|
||||
Python 3.8.10 (default, Nov 26 2021, 20:14:08)
|
||||
[GCC 9.3.0] on linux
|
||||
Type "help", "copyright", "credits" or "license" for more information.
|
||||
>>> import taos;
|
||||
>>> conn = taos.connect(host = "tdengine")
|
||||
>>> res = conn.query("show databases")
|
||||
>>> for row in res.fetch_all_into_dict():
|
||||
... print(row)
|
||||
...
|
||||
{'name': 'log', 'created_time': datetime.datetime(2022, 1, 17, 22, 56, 2, 490000), 'ntables': 11, 'vgroups': 1, 'replica': 1, 'quorum': 1, 'days': 10, 'keep': '30', 'cache(MB)': 1, 'blocks': 3, 'minrows': 100, 'maxrows': 4096, 'wallevel': 1, 'fsync': 3000, 'comp': 2, 'cachelast': 0, 'precision': 'us', 'update': 0, 'status': 'ready'}
|
||||
```
|
||||
|
||||
### Start with specific network
|
||||
|
||||
Alternatively, you can use TDengine natively by using specific network.
|
||||
|
||||
First, create network for TDengine server and client/application.
|
||||
|
||||
```bash
|
||||
docker network create td-net
|
||||
```
|
||||
|
||||
Start TDengine instance with service name as fqdn (explicitly set with `TAOS_FQDN`):
|
||||
|
||||
```bash
|
||||
docker run -d --name tdengine --network td-net \
|
||||
-e TAOS_FQDN=tdengine \
|
||||
tdengine/tdengine
|
||||
```
|
||||
|
||||
Start TDengine client in another container with the specific network:
|
||||
|
||||
```bash
|
||||
docker run --rm -it --network td-net -e TAOS_FIRST_EP=tdengine tdengine/tdengine taos
|
||||
# or
|
||||
docker run --rm -it --network td-net -e tdengine/tdengine taos -h tdengine
|
||||
```
|
||||
|
||||
When you build your application with docker, you should add the TDengine client in the dockerfile, as based on `ubuntu:20.04` image, install the client like this:
|
||||
|
||||
```dockerfile
|
||||
FROM ubuntu:20.04
|
||||
RUN apt-get update && apt-get install -y wget
|
||||
ENV TDENGINE_VERSION=2.4.0.0
|
||||
RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
&& cd TDengine-client-${TDENGINE_VERSION} \
|
||||
&& ./install_client.sh \
|
||||
&& cd ../ \
|
||||
&& rm -rf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz TDengine-client-${TDENGINE_VERSION}
|
||||
## add your application next, eg. go, build it in builder stage, copy the binary to the runtime
|
||||
#COPY --from=builder /path/to/build/app /usr/bin/
|
||||
#CMD ["app"]
|
||||
```
|
||||
|
||||
Here is an Go example app:
|
||||
|
||||
<!-- code-spell-checker:disable -->
|
||||
<!-- markdownlint-disable MD010 -->
|
||||
|
||||
```go
|
||||
/*
|
||||
* In this test program, we'll create a database and insert 4 records then select out.
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"flag"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
_ "github.com/taosdata/driver-go/v2/taosSql"
|
||||
)
|
||||
|
||||
type config struct {
|
||||
hostName string
|
||||
serverPort string
|
||||
user string
|
||||
password string
|
||||
}
|
||||
|
||||
var configPara config
|
||||
var taosDriverName = "taosSql"
|
||||
var url string
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&configPara.hostName, "h", "", "The host to connect to TDengine server.")
|
||||
flag.StringVar(&configPara.serverPort, "p", "", "The TCP/IP port number to use for the connection to TDengine server.")
|
||||
flag.StringVar(&configPara.user, "u", "root", "The TDengine user name to use when connecting to the server.")
|
||||
flag.StringVar(&configPara.password, "P", "taosdata", "The password to use when connecting to the server.")
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
func printAllArgs() {
|
||||
fmt.Printf("============= args parse result: =============\n")
|
||||
fmt.Printf("hostName: %v\n", configPara.hostName)
|
||||
fmt.Printf("serverPort: %v\n", configPara.serverPort)
|
||||
fmt.Printf("usr: %v\n", configPara.user)
|
||||
fmt.Printf("password: %v\n", configPara.password)
|
||||
fmt.Printf("================================================\n")
|
||||
}
|
||||
|
||||
func main() {
|
||||
printAllArgs()
|
||||
|
||||
url = "root:taosdata@/tcp(" + configPara.hostName + ":" + configPara.serverPort + ")/"
|
||||
|
||||
taos, err := sql.Open(taosDriverName, url)
|
||||
checkErr(err, "open database error")
|
||||
defer taos.Close()
|
||||
|
||||
taos.Exec("create database if not exists test")
|
||||
taos.Exec("use test")
|
||||
taos.Exec("create table if not exists tb1 (ts timestamp, a int)")
|
||||
_, err = taos.Exec("insert into tb1 values(now, 0)(now+1s,1)(now+2s,2)(now+3s,3)")
|
||||
checkErr(err, "failed to insert")
|
||||
rows, err := taos.Query("select * from tb1")
|
||||
checkErr(err, "failed to select")
|
||||
|
||||
defer rows.Close()
|
||||
for rows.Next() {
|
||||
var r struct {
|
||||
ts time.Time
|
||||
a int
|
||||
}
|
||||
err := rows.Scan(&r.ts, &r.a)
|
||||
if err != nil {
|
||||
fmt.Println("scan error:\n", err)
|
||||
return
|
||||
}
|
||||
fmt.Println(r.ts, r.a)
|
||||
}
|
||||
}
|
||||
|
||||
func checkErr(err error, prompt string) {
|
||||
if err != nil {
|
||||
fmt.Println("ERROR: %s\n", prompt)
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
<!-- markdownlint-enable MD010 -->
|
||||
<!-- code-spell-checker:enable -->
|
||||
|
||||
Full version of dockerfile could be:
|
||||
|
||||
```dockerfile
|
||||
FROM golang:1.17.6-buster as builder
|
||||
ENV TDENGINE_VERSION=2.4.0.0
|
||||
RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
&& cd TDengine-client-${TDENGINE_VERSION} \
|
||||
&& ./install_client.sh \
|
||||
&& cd ../ \
|
||||
&& rm -rf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz TDengine-client-${TDENGINE_VERSION}
|
||||
WORKDIR /usr/src/app/
|
||||
ENV GOPROXY="https://goproxy.io,direct"
|
||||
COPY ./main.go ./go.mod ./go.sum /usr/src/app/
|
||||
RUN go env && go mod tidy && go build
|
||||
|
||||
FROM ubuntu:20.04
|
||||
RUN apt-get update && apt-get install -y wget
|
||||
ENV TDENGINE_VERSION=2.4.0.0
|
||||
RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
&& cd TDengine-client-${TDENGINE_VERSION} \
|
||||
&& ./install_client.sh \
|
||||
&& cd ../ \
|
||||
&& rm -rf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz TDengine-client-${TDENGINE_VERSION}
|
||||
|
||||
## add your application next, eg. go, build it in builder stage, copy the binary to the runtime
|
||||
COPY --from=builder /usr/src/app/app /usr/bin/
|
||||
CMD ["app"]
|
||||
```
|
||||
|
||||
Suppose you have `main.go`, `go.mod` `go.sum`, `app.dockerfile`, build the app and run it with network `td-net`:
|
||||
|
||||
```bash
|
||||
$ docker build -t app -f app.dockerfile
|
||||
$ docker run --rm --network td-net app -h tdengine -p 6030
|
||||
============= args parse result: =============
|
||||
hostName: tdengine
|
||||
serverPort: 6030
|
||||
usr: root
|
||||
password: taosdata
|
||||
================================================
|
||||
2022-01-17 15:56:55.48 +0000 UTC 0
|
||||
2022-01-17 15:56:56.48 +0000 UTC 1
|
||||
2022-01-17 15:56:57.48 +0000 UTC 2
|
||||
2022-01-17 15:56:58.48 +0000 UTC 3
|
||||
2022-01-17 15:58:01.842 +0000 UTC 0
|
||||
2022-01-17 15:58:02.842 +0000 UTC 1
|
||||
2022-01-17 15:58:03.842 +0000 UTC 2
|
||||
2022-01-17 15:58:04.842 +0000 UTC 3
|
||||
2022-01-18 01:43:48.029 +0000 UTC 0
|
||||
2022-01-18 01:43:49.029 +0000 UTC 1
|
||||
2022-01-18 01:43:50.029 +0000 UTC 2
|
||||
2022-01-18 01:43:51.029 +0000 UTC 3
|
||||
```
|
||||
|
||||
Now you must be much familiar with developing and testing with TDengine, let's see some more complex cases.
|
||||
|
||||
### Start with docker-compose with multiple nodes(instances)
|
||||
|
||||
Start a 2-replicas-2-mnodes-2-dnodes-1-arbitrator TDengine cluster with `docker-compose` is quite simple. Save the file as `docker-compose.yml`:
|
||||
|
||||
```yaml
|
||||
version: "3"
|
||||
services:
|
||||
arbitrator:
|
||||
image: tdengine/tdengine:$VERSION
|
||||
command: tarbitrator
|
||||
td-1:
|
||||
image: tdengine/tdengine:$VERSION
|
||||
environment:
|
||||
TAOS_FQDN: "td-1"
|
||||
TAOS_FIRST_EP: "td-1"
|
||||
TAOS_NUM_OF_MNODES: "2"
|
||||
TAOS_REPLICA: "2"
|
||||
TAOS_ARBITRATOR: arbitrator:6042
|
||||
volumes:
|
||||
- taosdata-td1:/var/lib/taos/
|
||||
- taoslog-td1:/var/log/taos/
|
||||
td-2:
|
||||
image: tdengine/tdengine:$VERSION
|
||||
environment:
|
||||
TAOS_FQDN: "td-2"
|
||||
TAOS_FIRST_EP: "td-1"
|
||||
TAOS_NUM_OF_MNODES: "2"
|
||||
TAOS_REPLICA: "2"
|
||||
TAOS_ARBITRATOR: arbitrator:6042
|
||||
volumes:
|
||||
- taosdata-td2:/var/lib/taos/
|
||||
- taoslog-td2:/var/log/taos/
|
||||
volumes:
|
||||
taosdata-td1:
|
||||
taoslog-td1:
|
||||
taosdata-td2:
|
||||
taoslog-td2:
|
||||
```
|
||||
|
||||
You may notice that:
|
||||
|
||||
- We use `VERSION` environment variable to set `tdengine` image tag version once.
|
||||
- **`TAOS_FIRST_EP`** **MUST** be set to join the newly created instances into an existing TDengine cluster. If you want more instances, use `TAOS_SECOND_EP` in case of HA(High Availability) concerns.
|
||||
- `TAOS_NUM_OF_MNODES` is for setting number of mnodes for the cluster.
|
||||
- `TAOS_REPLICA` set the default database replicas, `2` means there're one master and one slave copy of data. The `replica` option should be `1 <= replica <= 3`, and not greater than dnodes number.
|
||||
- `TAOS_ARBITRATOR` set the arbitrator entrypoint of the cluster for failover/election stuff. It's better to use arbitrator in a two nodes cluster.
|
||||
- The way to start an arbitrator service is as easy as abc: just add command name `tarbitrator`(which is the binary name of arbitrator daemon) in docker-compose service option: `command: tarbitrator`, and everything is ok now.
|
||||
|
||||
Now run `docker-compose up -d` with version specified:
|
||||
|
||||
```bash
|
||||
$ VERSION=2.4.0.0 docker-compose up -d
|
||||
Creating network "test_default" with the default driver
|
||||
Creating volume "test_taosdata-td1" with default driver
|
||||
Creating volume "test_taoslog-td1" with default driver
|
||||
Creating volume "test_taosdata-td2" with default driver
|
||||
Creating volume "test_taoslog-td2" with default driver
|
||||
Creating test_td-1_1 ... done
|
||||
Creating test_arbitrator_1 ... done
|
||||
Creating test_td-2_1 ... done
|
||||
```
|
||||
|
||||
Check the status:
|
||||
|
||||
```bash
|
||||
$ docker-compose ps
|
||||
Name Command State Ports
|
||||
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
test_arbitrator_1 /usr/bin/entrypoint.sh tar ... Up 6030/tcp, 6031/tcp, 6032/tcp, 6033/tcp, 6034/tcp, 6035/tcp, 6036/tcp, 6037/tcp, 6038/tcp, 6039/tcp, 6040/tcp, 6041/tcp, 6042/tcp
|
||||
test_td-1_1 /usr/bin/entrypoint.sh taosd Up 6030/tcp, 6031/tcp, 6032/tcp, 6033/tcp, 6034/tcp, 6035/tcp, 6036/tcp, 6037/tcp, 6038/tcp, 6039/tcp, 6040/tcp, 6041/tcp, 6042/tcp
|
||||
test_td-2_1 /usr/bin/entrypoint.sh taosd Up 6030/tcp, 6031/tcp, 6032/tcp, 6033/tcp, 6034/tcp, 6035/tcp, 6036/tcp, 6037/tcp, 6038/tcp, 6039/tcp, 6040/tcp, 6041/tcp, 6042/tcp
|
||||
```
|
||||
|
||||
Check dnodes with taos shell:
|
||||
|
||||
```bash
|
||||
$ docker-compose exec td-1 taos -s "show dnodes"
|
||||
|
||||
Welcome to the TDengine shell from Linux, Client Version:2.4.0.0
|
||||
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
|
||||
|
||||
taos> show dnodes
|
||||
id | end_point | vnodes | cores | status | role | create_time | offline reason |
|
||||
======================================================================================================================================
|
||||
1 | td-1:6030 | 1 | 8 | ready | any | 2022-01-18 02:47:42.871 | |
|
||||
2 | td-2:6030 | 0 | 8 | ready | any | 2022-01-18 02:47:43.518 | |
|
||||
0 | arbitrator:6042 | 0 | 0 | ready | arb | 2022-01-18 02:47:43.633 | - |
|
||||
Query OK, 3 row(s) in set (0.000811s)
|
||||
```
|
||||
|
||||
### Start a TDengine cluster with scaled taosadapter service
|
||||
|
||||
In previous use case, you could see the way to start other services built with TDengine(`taosd` as the default command). There's another important service you should know:
|
||||
|
||||
> **taosAdapter** is a TDengine’s companion tool and is a bridge/adapter between TDengine cluster and application. It provides an easy-to-use and efficient way to ingest data from data collections agents(like Telegraf, StatsD, CollectD) directly. It also provides InfluxDB/OpenTSDB compatible data ingestion interface to allow InfluxDB/OpenTSDB applications to immigrate to TDengine seamlessly.
|
||||
|
||||
`taosadapter` is running inside `tdengine` image by default, you can disable it by `TAOS_DISABLE_ADAPTER=true`. Running `taosadapter` in a separate container is like how `arbitrator` does:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
# ...
|
||||
adapter:
|
||||
image: tdengine/tdengine:$VERSION
|
||||
command: taosadapter
|
||||
```
|
||||
|
||||
`taosadapter` could be scaled with docker-compose, so that you can manage the `taosadapter` nodes easily. Here is an example shows 4-`taosadapter` instances in a TDengine cluster(much like previous use cases):
|
||||
|
||||
```yaml
|
||||
version: "3"
|
||||
|
||||
networks:
|
||||
inter:
|
||||
api:
|
||||
|
||||
services:
|
||||
arbitrator:
|
||||
image: tdengine/tdengine:$VERSION
|
||||
command: tarbitrator
|
||||
networks:
|
||||
- inter
|
||||
td-1:
|
||||
image: tdengine/tdengine:$VERSION
|
||||
networks:
|
||||
- inter
|
||||
environment:
|
||||
TAOS_FQDN: "td-1"
|
||||
TAOS_FIRST_EP: "td-1"
|
||||
TAOS_NUM_OF_MNODES: "2"
|
||||
TAOS_REPLICA: "2"
|
||||
TAOS_ARBITRATOR: arbitrator:6042
|
||||
volumes:
|
||||
- taosdata-td1:/var/lib/taos/
|
||||
- taoslog-td1:/var/log/taos/
|
||||
td-2:
|
||||
image: tdengine/tdengine:$VERSION
|
||||
networks:
|
||||
- inter
|
||||
environment:
|
||||
TAOS_FQDN: "td-2"
|
||||
TAOS_FIRST_EP: "td-1"
|
||||
TAOS_NUM_OF_MNODES: "2"
|
||||
TAOS_REPLICA: "2"
|
||||
TAOS_ARBITRATOR: arbitrator:6042
|
||||
volumes:
|
||||
- taosdata-td2:/var/lib/taos/
|
||||
- taoslog-td2:/var/log/taos/
|
||||
adapter:
|
||||
image: tdengine/tdengine:$VERSION
|
||||
command: taosadapter
|
||||
networks:
|
||||
- inter
|
||||
environment:
|
||||
TAOS_FIRST_EP: "td-1"
|
||||
TAOS_SECOND_EP: "td-2"
|
||||
deploy:
|
||||
replicas: 4
|
||||
nginx:
|
||||
image: nginx
|
||||
depends_on:
|
||||
- adapter
|
||||
networks:
|
||||
- inter
|
||||
- api
|
||||
ports:
|
||||
- 6041:6041
|
||||
- 6044:6044/udp
|
||||
command: [
|
||||
"sh",
|
||||
"-c",
|
||||
"while true;
|
||||
do curl -s http://adapter:6041/-/ping >/dev/null && break;
|
||||
done;
|
||||
printf 'server{listen 6041;location /{proxy_pass http://adapter:6041;}}'
|
||||
> /etc/nginx/conf.d/rest.conf;
|
||||
printf 'stream{server{listen 6044 udp;proxy_pass adapter:6044;}}'
|
||||
>> /etc/nginx/nginx.conf;cat /etc/nginx/nginx.conf;
|
||||
nginx -g 'daemon off;'",
|
||||
]
|
||||
volumes:
|
||||
taosdata-td1:
|
||||
taoslog-td1:
|
||||
taosdata-td2:
|
||||
taoslog-td2:
|
||||
```
|
||||
|
||||
Start the cluster:
|
||||
|
||||
```bash
|
||||
$ VERSION=2.4.0.0 docker-compose up -d
|
||||
Creating network "docker_inter" with the default driver
|
||||
Creating network "docker_api" with the default driver
|
||||
Creating volume "docker_taosdata-td1" with default driver
|
||||
Creating volume "docker_taoslog-td1" with default driver
|
||||
Creating volume "docker_taosdata-td2" with default driver
|
||||
Creating volume "docker_taoslog-td2" with default driver
|
||||
Creating docker_td-2_1 ... done
|
||||
Creating docker_arbitrator_1 ... done
|
||||
Creating docker_td-1_1 ... done
|
||||
Creating docker_adapter_1 ... done
|
||||
Creating docker_adapter_2 ... done
|
||||
Creating docker_adapter_3 ... done
|
||||
```
|
||||
|
||||
It will start a TDengine cluster with two dnodes and four taosadapter instances, expose ports 6041/tcp and 6044/udp to host.
|
||||
|
||||
`6041` is the RESTful API endpoint port, you can verify that the RESTful interface taosAdapter provides working using the `curl` command.
|
||||
|
||||
```bash
|
||||
$ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' 127.0.0.1:6041/rest/sql
|
||||
{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["log","2022-01-18 04:37:42.902",16,1,1,1,10,"30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":1}
|
||||
```
|
||||
|
||||
If you run curl in batch(here we use [hyperfine](https://github.com/sharkdp/hyperfine) - a command-line benchmarking tool), the requests are balanced into 4 adapter instances.
|
||||
|
||||
```bash
|
||||
hyperfine -m10 'curl -u root:taosdata localhost:6041/rest/sql -d "describe log.log"'
|
||||
```
|
||||
|
||||
View the logs with `docker-compose logs`:
|
||||
|
||||
```bash
|
||||
$ docker-compose logs adapter
|
||||
# some logs skipped
|
||||
adapter_2 | 01/18 04:57:44.616529 00000039 TAOS_ADAPTER info "| 200 | 162.185µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=18
|
||||
adapter_1 | 01/18 04:57:44.627695 00000039 TAOS_ADAPTER info "| 200 | 145.485µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=17
|
||||
adapter_3 | 01/18 04:57:44.639165 00000040 TAOS_ADAPTER info "| 200 | 146.913µs | 172.21.0.9 | POST | /rest/sql " sessionID=17 model=web
|
||||
adapter_4 | 01/18 04:57:44.650829 00000039 TAOS_ADAPTER info "| 200 | 153.201µs | 172.21.0.9 | POST | /rest/sql " sessionID=17 model=web
|
||||
adapter_2 | 01/18 04:57:44.662422 00000039 TAOS_ADAPTER info "| 200 | 211.393µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=19
|
||||
adapter_1 | 01/18 04:57:44.673426 00000039 TAOS_ADAPTER info "| 200 | 154.714µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=18
|
||||
adapter_3 | 01/18 04:57:44.684788 00000040 TAOS_ADAPTER info "| 200 | 131.876µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=18
|
||||
adapter_4 | 01/18 04:57:44.696261 00000039 TAOS_ADAPTER info "| 200 | 162.173µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=18
|
||||
adapter_2 | 01/18 04:57:44.707414 00000039 TAOS_ADAPTER info "| 200 | 164.419µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=20
|
||||
adapter_1 | 01/18 04:57:44.720842 00000039 TAOS_ADAPTER info "| 200 | 179.374µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=19
|
||||
adapter_3 | 01/18 04:57:44.732184 00000040 TAOS_ADAPTER info "| 200 | 141.174µs | 172.21.0.9 | POST | /rest/sql " sessionID=19 model=web
|
||||
adapter_4 | 01/18 04:57:44.744024 00000039 TAOS_ADAPTER info "| 200 | 159.774µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=19
|
||||
adapter_2 | 01/18 04:57:44.773732 00000039 TAOS_ADAPTER info "| 200 | 178.993µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=21
|
||||
adapter_1 | 01/18 04:57:44.796518 00000039 TAOS_ADAPTER info "| 200 | 238.24µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=20
|
||||
adapter_3 | 01/18 04:57:44.810744 00000040 TAOS_ADAPTER info "| 200 | 176.133µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=20
|
||||
adapter_4 | 01/18 04:57:44.826395 00000039 TAOS_ADAPTER info "| 200 | 149.215µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=20
|
||||
```
|
||||
|
||||
`6044/udp` is the [StatsD](https://github.com/statsd/statsd)-compatible port, you can verify this feature with `nc` command(usually provided by `netcat` package).
|
||||
|
||||
```bash
|
||||
echo "foo:1|c" | nc -u -w0 127.0.0.1 6044
|
||||
```
|
||||
|
||||
Check the result in `taos` shell with `docker-compose exec`:
|
||||
|
||||
```bash
|
||||
$ dc exec td-1 taos
|
||||
|
||||
Welcome to the TDengine shell from Linux, Client Version:2.4.0.0
|
||||
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
|
||||
|
||||
taos> show databases;
|
||||
name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status |
|
||||
====================================================================================================================================================================================================================================================================================
|
||||
log | 2022-01-18 04:37:42.902 | 17 | 1 | 1 | 1 | 10 | 30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | 0 | us | 0 | ready |
|
||||
statsd | 2022-01-18 04:45:02.563 | 1 | 1 | 2 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ns | 2 | ready |
|
||||
Query OK, 2 row(s) in set (0.001838s)
|
||||
|
||||
taos> select * from statsd.foo;
|
||||
ts | value | metric_type |
|
||||
=======================================================================================
|
||||
2022-01-18 04:45:02.563422822 | 1 | counter |
|
||||
Query OK, 1 row(s) in set (0.003854s)
|
||||
```
|
||||
|
||||
Use `docker-compose up -d adapter=1 to reduce the instances to 1
|
||||
|
||||
### Deploy TDengine cluster in Docker Swarm with `docker-compose.yml`
|
||||
|
||||
If you use docker swarm mode, it will schedule arbitrator/taosd/taosadapter services into different hosts automatically. If you've no experience with k8s/kubernetes, this is the most convenient way to scale out the TDengine cluster with multiple hosts/servers.
|
||||
|
||||
Use the `docker-compose.yml` file in previous use case, and deploy with `docker stack` or `docker deploy`:
|
||||
|
||||
```bash
|
||||
$ VERSION=2.4.0 docker stack deploy -c docker-compose.yml taos
|
||||
Creating network taos_inter
|
||||
Creating network taos_api
|
||||
Creating service taos_arbitrator
|
||||
Creating service taos_td-1
|
||||
Creating service taos_td-2
|
||||
Creating service taos_adapter
|
||||
Creating service taos_nginx
|
||||
```
|
||||
|
||||
Now you've created a TDengine cluster with multiple host servers.
|
||||
|
||||
Use `docker service` or `docker stack` to manage the cluster:
|
||||
|
||||
<!-- code-spell-checker:disable -->
|
||||
|
||||
```bash
|
||||
$ docker stack ps taos
|
||||
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS
|
||||
79ni8temw59n taos_nginx.1 nginx:latest TM1701 Running Running about a minute ago
|
||||
3e94u72msiyg taos_adapter.1 tdengine/tdengine:2.4.0 TM1702 Running Running 56 seconds ago
|
||||
100amjkwzsc6 taos_td-2.1 tdengine/tdengine:2.4.0 TM1703 Running Running about a minute ago
|
||||
pkjehr2vvaaa taos_td-1.1 tdengine/tdengine:2.4.0 TM1704 Running Running 2 minutes ago
|
||||
tpzvgpsr1qkt taos_arbitrator.1 tdengine/tdengine:2.4.0 TM1705 Running Running 2 minutes ago
|
||||
rvss3g5yg6fa taos_adapter.2 tdengine/tdengine:2.4.0 TM1706 Running Running 56 seconds ago
|
||||
i2augxamfllf taos_adapter.3 tdengine/tdengine:2.4.0 TM1707 Running Running 56 seconds ago
|
||||
lmjyhzccpvpg taos_adapter.4 tdengine/tdengine:2.4.0 TM1708 Running Running 56 seconds ago
|
||||
$ docker service ls
|
||||
ID NAME MODE REPLICAS IMAGE PORTS
|
||||
561t4lu6nfw6 taos_adapter replicated 4/4 tdengine/tdengine:2.4.0
|
||||
3hk5ct3q90sm taos_arbitrator replicated 1/1 tdengine/tdengine:2.4.0
|
||||
d8qr52envqzu taos_nginx replicated 1/1 nginx:latest *:6041->6041/tcp, *:6044->6044/udp
|
||||
2isssfvjk747 taos_td-1 replicated 1/1 tdengine/tdengine:2.4.0
|
||||
9pzw7u02ichv taos_td-2 replicated 1/1 tdengine/tdengine:2.4.0
|
||||
```
|
||||
|
||||
<!-- code-spell-checker:enable -->
|
||||
|
||||
It shows that there are two dnodes, one arbitrator, four taosadapter and one nginx reverse-forward service in this cluster.
|
||||
|
||||
You can scale down the taosadapter replicas to `1` by `docker service`:
|
||||
|
||||
```bash
|
||||
$ docker service scale taos_adapter=1
|
||||
taos_adapter scaled to 1
|
||||
overall progress: 1 out of 1 tasks
|
||||
1/1: running [==================================================>]
|
||||
verify: Service converged
|
||||
|
||||
$ docker service ls -f name=taos_adapter
|
||||
ID NAME MODE REPLICAS IMAGE PORTS
|
||||
561t4lu6nfw6 taos_adapter replicated 1/1 tdengine/tdengine:2.4.0
|
||||
```
|
||||
|
||||
Now it remains only 1 taosadapter instance in the cluster.
|
||||
|
||||
When you want to remove the cluster, just type:
|
||||
|
||||
```bash
|
||||
docker stack rm taos
|
||||
```
|
||||
|
||||
### Environment Variables
|
||||
|
||||
When you start `tdengine` image, you can adjust the configuration of TDengine by passing environment variables on the `docker run` command line or in the docker compose file. You can use all of the environment variables that passed to taosd or taosadapter.
|
|
@ -0,0 +1,83 @@
|
|||
#!/bin/sh
|
||||
set -e
|
||||
# for TZ awareness
|
||||
if [ "$TZ" != "" ]; then
|
||||
ln -sf /usr/share/zoneinfo/$TZ /etc/localtime
|
||||
echo $TZ >/etc/timezone
|
||||
fi
|
||||
|
||||
# option to disable taosadapter, default is no
|
||||
DISABLE_ADAPTER=${TAOS_DISABLE_ADAPTER:-0}
|
||||
unset TAOS_DISABLE_ADAPTER
|
||||
|
||||
# to get mnodeEpSet from data dir
|
||||
DATA_DIR=${TAOS_DATA_DIR:-/var/lib/taos}
|
||||
|
||||
# append env to custom taos.cfg
|
||||
CFG_DIR=/tmp/taos
|
||||
CFG_FILE=$CFG_DIR/taos.cfg
|
||||
|
||||
mkdir -p $CFG_DIR >/dev/null 2>&1
|
||||
|
||||
[ -f /etc/taos/taos.cfg ] && cat /etc/taos/taos.cfg | grep -E -v "^#|^\s*$" >$CFG_FILE
|
||||
env-to-cfg >>$CFG_FILE
|
||||
|
||||
FQDN=$(cat $CFG_FILE | grep -E -v "^#|^$" | grep fqdn | tail -n1 | sed -E 's/.*fqdn\s+//')
|
||||
|
||||
# ensure the fqdn is resolved as localhost
|
||||
grep "$FQDN" /etc/hosts >/dev/null || echo "127.0.0.1 $FQDN" >>/etc/hosts
|
||||
|
||||
# parse first ep host and port
|
||||
FIRST_EP_HOST=${TAOS_FIRST_EP%:*}
|
||||
FIRST_EP_PORT=${TAOS_FIRST_EP#*:}
|
||||
|
||||
# in case of custom server port
|
||||
SERVER_PORT=$(cat $CFG_FILE | grep -E -v "^#|^$" | grep serverPort | tail -n1 | sed -E 's/.*serverPort\s+//')
|
||||
SERVER_PORT=${SERVER_PORT:-6030}
|
||||
|
||||
# for other binaries like interpreters
|
||||
if echo $1 | grep -E "taosd$" - >/dev/null; then
|
||||
true # will run taosd
|
||||
else
|
||||
cp -f $CFG_FILE /etc/taos/taos.cfg || true
|
||||
$@
|
||||
exit $?
|
||||
fi
|
||||
|
||||
set +e
|
||||
ulimit -c unlimited
|
||||
# set core files pattern, maybe failed
|
||||
sysctl -w kernel.core_pattern=/corefile/core-$FQDN-%e-%p >/dev/null >&1
|
||||
set -e
|
||||
|
||||
if [ "$DISABLE_ADAPTER" = "0" ]; then
|
||||
which taosadapter >/dev/null && taosadapter &
|
||||
# wait for 6041 port ready
|
||||
for _ in $(seq 1 20); do
|
||||
nc -z localhost 6041 && break
|
||||
sleep 0.5
|
||||
done
|
||||
fi
|
||||
|
||||
# if has mnode ep set or the host is first ep or not for cluster, just start.
|
||||
if [ -f "$DATA_DIR/dnode/mnodeEpSet.json" ] ||
|
||||
[ "$TAOS_FQDN" = "$FIRST_EP_HOST" ]; then
|
||||
$@ -c $CFG_DIR
|
||||
# others will first wait the first ep ready.
|
||||
else
|
||||
if [ "$TAOS_FIRST_EP" = "" ]; then
|
||||
echo "run TDengine with single node."
|
||||
$@ -c $CFG_DIR
|
||||
exit $?
|
||||
fi
|
||||
while true; do
|
||||
es=0
|
||||
taos -h $FIRST_EP_HOST -P $FIRST_EP_PORT -n startup >/dev/null || es=$?
|
||||
if [ "$es" -eq 0 ]; then
|
||||
taos -h $FIRST_EP_HOST -P $FIRST_EP_PORT -s "create dnode \"$FQDN:$SERVER_PORT\";"
|
||||
break
|
||||
fi
|
||||
sleep 1s
|
||||
done
|
||||
$@ -c $CFG_DIR
|
||||
fi
|
|
@ -0,0 +1,13 @@
|
|||
#!/bin/sh
|
||||
set -e
|
||||
self=$0
|
||||
|
||||
snake_to_camel_case() {
|
||||
echo $1 | awk -F _ '{printf "%s", $1; for(i=2; i<=NF; i++) printf "%s", toupper(substr($i,1,1)) substr($i,2); print"";}'
|
||||
}
|
||||
|
||||
if echo $1 | grep -E "^$" - >/dev/null; then
|
||||
export |grep -E 'TAOS_.*' -o| sed 's/TAOS_//' |tr A-Z a-z | awk -F"=" '{print "name=$(""'$self' " $1"); echo $name "$2}' |sh
|
||||
else
|
||||
snake_to_camel_case $1
|
||||
fi
|
|
@ -0,0 +1,77 @@
|
|||
version: "3"
|
||||
|
||||
networks:
|
||||
inter:
|
||||
api:
|
||||
|
||||
services:
|
||||
arbitrator:
|
||||
image: tdengine/tdengine:$VERSION
|
||||
command: tarbitrator
|
||||
networks:
|
||||
- inter
|
||||
td-1:
|
||||
image: tdengine/tdengine:$VERSION
|
||||
networks:
|
||||
- inter
|
||||
environment:
|
||||
TAOS_FQDN: "td-1"
|
||||
TAOS_FIRST_EP: "td-1"
|
||||
TAOS_NUM_OF_MNODES: "2"
|
||||
TAOS_REPLICA: "2"
|
||||
TAOS_ARBITRATOR: arbitrator:6042
|
||||
volumes:
|
||||
- taosdata-td1:/var/lib/taos/
|
||||
- taoslog-td1:/var/log/taos/
|
||||
td-2:
|
||||
image: tdengine/tdengine:$VERSION
|
||||
networks:
|
||||
- inter
|
||||
environment:
|
||||
TAOS_FQDN: "td-2"
|
||||
TAOS_FIRST_EP: "td-1"
|
||||
TAOS_NUM_OF_MNODES: "2"
|
||||
TAOS_REPLICA: "2"
|
||||
TAOS_ARBITRATOR: arbitrator:6042
|
||||
volumes:
|
||||
- taosdata-td2:/var/lib/taos/
|
||||
- taoslog-td2:/var/log/taos/
|
||||
adapter:
|
||||
image: tdengine/tdengine:$VERSION
|
||||
command: taosadapter
|
||||
networks:
|
||||
- inter
|
||||
environment:
|
||||
TAOS_FIRST_EP: "td-1"
|
||||
TOAS_SECOND_EP: "td-2"
|
||||
deploy:
|
||||
replicas: 4
|
||||
update_config:
|
||||
parallelism: 4
|
||||
nginx:
|
||||
image: nginx
|
||||
depends_on:
|
||||
- adapter
|
||||
networks:
|
||||
- inter
|
||||
- api
|
||||
ports:
|
||||
- 6041:6041
|
||||
- 6044:6044/udp
|
||||
command: [
|
||||
"sh",
|
||||
"-c",
|
||||
"while true;
|
||||
do curl -s http://adapter:6041/-/ping >/dev/null && break;
|
||||
done;
|
||||
printf 'server{listen 6041;location /{proxy_pass http://adapter:6041;}}'
|
||||
> /etc/nginx/conf.d/rest.conf;
|
||||
printf 'stream{server{listen 6044 udp;proxy_pass adapter:6044;}}'
|
||||
>> /etc/nginx/nginx.conf;cat /etc/nginx/nginx.conf;
|
||||
nginx -g 'daemon off;'",
|
||||
]
|
||||
volumes:
|
||||
taosdata-td1:
|
||||
taoslog-td1:
|
||||
taosdata-td2:
|
||||
taoslog-td2:
|
|
@ -0,0 +1,82 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
# dockerbuild.sh
|
||||
# -n [version number]
|
||||
# -p [xxxx]
|
||||
# -V [stable | beta]
|
||||
|
||||
# set parameters by default value
|
||||
version=""
|
||||
passWord=""
|
||||
verType=""
|
||||
|
||||
while getopts "hn:p:V:" arg
|
||||
do
|
||||
case $arg in
|
||||
n)
|
||||
#echo "version=$OPTARG"
|
||||
version=$(echo $OPTARG)
|
||||
;;
|
||||
p)
|
||||
#echo "passWord=$OPTARG"
|
||||
passWord=$(echo $OPTARG)
|
||||
;;
|
||||
V)
|
||||
#echo "verType=$OPTARG"
|
||||
verType=$(echo $OPTARG)
|
||||
;;
|
||||
h)
|
||||
echo "Usage: `basename $0` -n [version number] "
|
||||
echo " -p [password for docker hub] "
|
||||
exit 0
|
||||
;;
|
||||
?) #unknow option
|
||||
echo "unkonw argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
echo "version=${version}"
|
||||
|
||||
#docker manifest rm tdengine/tdengine
|
||||
#docker manifest rm tdengine/tdengine:${version}
|
||||
if [ "$verType" == "beta" ]; then
|
||||
docker manifest create -a tdengine/tdengine-beta:${version} tdengine/tdengine-amd64-beta:${version} tdengine/tdengine-aarch64-beta:${version} tdengine/tdengine-aarch32-beta:${version}
|
||||
docker manifest create -a tdengine/tdengine-beta:latest tdengine/tdengine-amd64-beta:latest tdengine/tdengine-aarch64-beta:latest tdengine/tdengine-aarch32-beta:latest
|
||||
docker manifest rm tdengine/tdengine-beta:${version}
|
||||
docker manifest rm tdengine/tdengine-beta:latest
|
||||
docker manifest create -a tdengine/tdengine-beta:${version} tdengine/tdengine-amd64-beta:${version} tdengine/tdengine-aarch64-beta:${version} tdengine/tdengine-aarch32-beta:${version}
|
||||
docker manifest create -a tdengine/tdengine-beta:latest tdengine/tdengine-amd64-beta:latest tdengine/tdengine-aarch64-beta:latest tdengine/tdengine-aarch32-beta:latest
|
||||
docker manifest inspect tdengine/tdengine:latest
|
||||
docker manifest inspect tdengine/tdengine:${version}
|
||||
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
|
||||
docker manifest push tdengine/tdengine-beta:${version}
|
||||
docker manifest push tdengine/tdengine-beta:latest
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
docker manifest create -a tdengine/tdengine:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version}
|
||||
docker manifest create -a tdengine/tdengine:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest
|
||||
docker manifest rm tdengine/tdengine:latest
|
||||
docker manifest rm tdengine/tdengine:${version}
|
||||
docker manifest create -a tdengine/tdengine:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version}
|
||||
docker manifest create -a tdengine/tdengine:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest
|
||||
docker manifest inspect tdengine/tdengine:latest
|
||||
docker manifest inspect tdengine/tdengine:${version}
|
||||
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
|
||||
docker manifest push tdengine/tdengine:${version}
|
||||
docker manifest push tdengine/tdengine:latest
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# docker manifest create -a tdengine/${dockername}:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version}
|
||||
# docker manifest create -a tdengine/${dockername}:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest
|
||||
|
||||
# docker login -u tdengine -p ${passWord} #replace the docker registry username and password
|
||||
|
||||
# docker manifest push tdengine/tdengine:latest
|
||||
|
||||
# # how set latest version ???
|
|
@ -0,0 +1,174 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
# dockerbuild.sh
|
||||
# -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...]
|
||||
# -n [version number]
|
||||
# -p [password for docker hub]
|
||||
# -V [stable | beta]
|
||||
# -f [pkg file]
|
||||
|
||||
# set parameters by default value
|
||||
cpuType=""
|
||||
cpuTypeAlias=""
|
||||
version=""
|
||||
passWord=""
|
||||
pkgFile=""
|
||||
verType="stable"
|
||||
dockerLatest="n"
|
||||
|
||||
while getopts "hc:n:p:f:V:a:b:" arg
|
||||
do
|
||||
case $arg in
|
||||
c)
|
||||
#echo "cpuType=$OPTARG"
|
||||
cpuType=$(echo $OPTARG)
|
||||
;;
|
||||
n)
|
||||
#echo "version=$OPTARG"
|
||||
version=$(echo $OPTARG)
|
||||
;;
|
||||
p)
|
||||
#echo "passWord=$OPTARG"
|
||||
passWord=$(echo $OPTARG)
|
||||
;;
|
||||
f)
|
||||
#echo "pkgFile=$OPTARG"
|
||||
pkgFile=$(echo $OPTARG)
|
||||
;;
|
||||
b)
|
||||
#echo "branchName=$OPTARG"
|
||||
branchName=$(echo $OPTARG)
|
||||
;;
|
||||
V)
|
||||
#echo "verType=$OPTARG"
|
||||
verType=$(echo $OPTARG)
|
||||
;;
|
||||
a)
|
||||
#echo "dockerLatest=$OPTARG"
|
||||
dockerLatest=$(echo $OPTARG)
|
||||
;;
|
||||
h)
|
||||
echo "Usage: `basename $0` -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...] "
|
||||
echo " -n [version number] "
|
||||
echo " -p [password for docker hub] "
|
||||
echo " -V [stable | beta] "
|
||||
echo " -f [pkg file] "
|
||||
echo " -a [y | n ] "
|
||||
exit 0
|
||||
;;
|
||||
?) #unknow option
|
||||
echo "unkonw argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
|
||||
# Check_verison()
|
||||
# {
|
||||
# }
|
||||
|
||||
|
||||
if [ "$verType" == "beta" ]; then
|
||||
dockername=${cpuType}-${verType}
|
||||
dirName=${pkgFile%-beta*}
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
dockername=${cpuType}
|
||||
dirName=${pkgFile%-Linux*}
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
echo "cpuType=${cpuType} version=${version} pkgFile=${pkgFile} verType=${verType} "
|
||||
echo "$(pwd)"
|
||||
echo "====NOTES: ${pkgFile} must be in the same directory as dockerbuild.sh===="
|
||||
|
||||
scriptDir=$(dirname $(readlink -f $0))
|
||||
comunityArchiveDir=/nas/TDengine/v$version/community # community version’package directory
|
||||
communityDir=${scriptDir}/../../../community
|
||||
DockerfilePath=${communityDir}/packaging/docker/
|
||||
Dockerfile=${communityDir}/packaging/docker/Dockerfile
|
||||
cd ${scriptDir}
|
||||
cp -f ${comunityArchiveDir}/${pkgFile} .
|
||||
|
||||
echo "dirName=${dirName}"
|
||||
|
||||
if [[ "${cpuType}" == "x64" ]] || [[ "${cpuType}" == "amd64" ]]; then
|
||||
cpuTypeAlias="amd64"
|
||||
elif [[ "${cpuType}" == "aarch64" ]]; then
|
||||
cpuTypeAlias="arm64"
|
||||
elif [[ "${cpuType}" == "aarch32" ]]; then
|
||||
cpuTypeAlias="armhf"
|
||||
else
|
||||
echo "Unknown cpuType: ${cpuType}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
docker build --rm -f "${Dockerfile}" --network=host -t tdengine/tdengine-${dockername}:${version} "." --build-arg pkgFile=${pkgFile} --build-arg dirName=${dirName} --build-arg cpuType=${cpuTypeAlias}
|
||||
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
|
||||
docker push tdengine/tdengine-${dockername}:${version}
|
||||
|
||||
if [ -n "$(docker ps -aq)" ] ;then
|
||||
echo "delete docker process"
|
||||
docker stop $(docker ps -aq)
|
||||
docker rm $(docker ps -aq)
|
||||
fi
|
||||
|
||||
if [ -n "$(pidof taosd)" ] ;then
|
||||
echo "kill taosd "
|
||||
kill -9 $(pidof taosd)
|
||||
fi
|
||||
|
||||
if [ -n "$(pidof power)" ] ;then
|
||||
echo "kill power "
|
||||
kill -9 $(pidof power)
|
||||
fi
|
||||
|
||||
|
||||
echo ">>>>>>>>>>>>> check whether tdengine/tdengine-${dockername}:${version} has been published"
|
||||
docker run -d --name doctest -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine-${dockername}:${version}
|
||||
sleep 2
|
||||
curl -u root:taosdata -d 'show variables;' 127.0.0.1:6041/rest/sql > temp1.data
|
||||
data_version=$( cat temp1.data |jq .data| jq '.[]' |grep "version" -A 2 -B 1 | jq ".[1]")
|
||||
echo "${data_version}"
|
||||
if [ "${data_version}" == "\"${version}\"" ] ; then
|
||||
echo "docker version is right "
|
||||
else
|
||||
echo "docker version is wrong "
|
||||
exit 1
|
||||
fi
|
||||
rm -rf temp1.data
|
||||
|
||||
# set this version to latest version
|
||||
if [ ${dockerLatest} == 'y' ] ;then
|
||||
docker tag tdengine/tdengine-${dockername}:${version} tdengine/tdengine-${dockername}:latest
|
||||
docker push tdengine/tdengine-${dockername}:latest
|
||||
echo ">>>>>>>>>>>>> check whether tdengine/tdengine-${dockername}:latest has been published correctly"
|
||||
docker run -d --name doctestla -p 7030-7049:6030-6049 -p 7030-7049:6030-6049/udp tdengine/tdengine-${dockername}:latest
|
||||
sleep 2
|
||||
curl -u root:taosdata -d 'show variables;' 127.0.0.1:7041/rest/sql > temp2.data
|
||||
version_latest=` cat temp2.data |jq .data| jq '.[]' |grep "version" -A 2 -B 1 | jq ".[1]" `
|
||||
echo "${version_latest}"
|
||||
if [ "${version_latest}" == "\"${version}\"" ] ; then
|
||||
echo "docker version is right "
|
||||
else
|
||||
echo "docker version is wrong "
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
rm -rf temp2.data
|
||||
|
||||
if [ -n "$(docker ps -aq)" ] ;then
|
||||
echo "delte docker process"
|
||||
docker stop $(docker ps -aq)
|
||||
docker rm $(docker ps -aq)
|
||||
fi
|
||||
|
||||
cd ${scriptDir}
|
||||
rm -f ${pkgFile}
|
|
@ -0,0 +1,56 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
# dockerbuild.sh
|
||||
# -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...]
|
||||
# -n [version number]
|
||||
# -p [password for docker hub]
|
||||
|
||||
# set parameters by default value
|
||||
cpuType=aarch64
|
||||
verNumber=""
|
||||
passWord=""
|
||||
|
||||
while getopts "hc:n:p:f:" arg
|
||||
do
|
||||
case $arg in
|
||||
c)
|
||||
#echo "cpuType=$OPTARG"
|
||||
cpuType=$(echo $OPTARG)
|
||||
;;
|
||||
n)
|
||||
#echo "verNumber=$OPTARG"
|
||||
verNumber=$(echo $OPTARG)
|
||||
;;
|
||||
p)
|
||||
#echo "passWord=$OPTARG"
|
||||
passWord=$(echo $OPTARG)
|
||||
;;
|
||||
h)
|
||||
echo "Usage: `basename $0` -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...] "
|
||||
echo " -n [version number] "
|
||||
echo " -p [password for docker hub] "
|
||||
exit 0
|
||||
;;
|
||||
?) #unknow option
|
||||
echo "unkonw argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
pkgFile=TDengine-server-${verNumber}-Linux-${cpuType}.tar.gz
|
||||
|
||||
echo "cpuType=${cpuType} verNumber=${verNumber} pkgFile=${pkgFile} "
|
||||
|
||||
scriptDir=`pwd`
|
||||
pkgDir=$scriptDir/../../release/
|
||||
|
||||
cp -f ${pkgDir}/${pkgFile} .
|
||||
|
||||
./dockerbuild.sh -c ${cpuType} -f ${pkgFile} -n ${verNumber} -p ${passWord}
|
||||
|
||||
rm -f ${pkgFile}
|
|
@ -0,0 +1,62 @@
|
|||
@echo off
|
||||
|
||||
set internal_dir=%~dp0\..\..\
|
||||
set community_dir=%~dp0\..
|
||||
cd %community_dir%
|
||||
git checkout -- .
|
||||
cd %community_dir%\packaging
|
||||
|
||||
:: %1 name %2 version
|
||||
if !%1==! GOTO USAGE
|
||||
if !%2==! GOTO USAGE
|
||||
if %1 == taos GOTO TAOS
|
||||
if %1 == power GOTO POWER
|
||||
if %1 == tq GOTO TQ
|
||||
if %1 == pro GOTO PRO
|
||||
if %1 == kh GOTO KH
|
||||
if %1 == jh GOTO JH
|
||||
GOTO USAGE
|
||||
|
||||
:TAOS
|
||||
goto RELEASE
|
||||
|
||||
:POWER
|
||||
call sed_power.bat %community_dir%
|
||||
goto RELEASE
|
||||
|
||||
:TQ
|
||||
call sed_tq.bat %community_dir%
|
||||
goto RELEASE
|
||||
|
||||
:PRO
|
||||
call sed_pro.bat %community_dir%
|
||||
goto RELEASE
|
||||
|
||||
:KH
|
||||
call sed_kh.bat %community_dir%
|
||||
goto RELEASE
|
||||
|
||||
:JH
|
||||
call sed_jh.bat %community_dir%
|
||||
goto RELEASE
|
||||
|
||||
:RELEASE
|
||||
echo release windows-client-64 for %1, version: %2
|
||||
if not exist %internal_dir%\debug\ver-%2-64bit-%1 (
|
||||
md %internal_dir%\debug\ver-%2-64bit-%1
|
||||
) else (
|
||||
rd /S /Q %internal_dir%\debug\ver-%2-64bit-%1
|
||||
md %internal_dir%\debug\ver-%2-64bit-%1
|
||||
)
|
||||
cd %internal_dir%\debug\ver-%2-64bit-%1
|
||||
call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" amd64
|
||||
cmake ../../ -G "NMake Makefiles" -DVERNUMBER=%2 -DCPUTYPE=x64
|
||||
set CL=/MP4
|
||||
nmake install
|
||||
goto EXIT0
|
||||
|
||||
:USAGE
|
||||
echo Usage: release.bat $productName $version
|
||||
goto EXIT0
|
||||
|
||||
:EXIT0
|
|
@ -21,17 +21,17 @@ echo "script_dir: ${script_dir}"
|
|||
echo "top_dir: ${top_dir}"
|
||||
|
||||
cd ${top_dir}
|
||||
git checkout -- .
|
||||
git checkout 3.0
|
||||
git pull || :
|
||||
# git checkout -- .
|
||||
# git checkout 3.0
|
||||
# git pull || :
|
||||
|
||||
echo "curr_dir: ${curr_dir}"
|
||||
|
||||
# 2. cmake executable file
|
||||
compile_dir="${top_dir}/debug"
|
||||
if [ -d ${compile_dir} ]; then
|
||||
rm -rf ${compile_dir}
|
||||
fi
|
||||
# if [ -d ${compile_dir} ]; then
|
||||
# rm -rf ${compile_dir}
|
||||
# fi
|
||||
|
||||
mkdir -p ${compile_dir}
|
||||
|
||||
|
@ -56,19 +56,18 @@ mkdir -p ${install_dir}/bin
|
|||
mkdir -p ${install_dir}/lib
|
||||
mkdir -p ${install_dir}/inc
|
||||
|
||||
install_files="${script_dir}/install.sh"
|
||||
chmod a+x ${script_dir}/install.sh || :
|
||||
install_files="${script_dir}/tools/install.sh"
|
||||
chmod a+x ${script_dir}/tools/install.sh || :
|
||||
cp ${install_files} ${install_dir}
|
||||
|
||||
header_files="${top_dir}/include/client/taos.h ${top_dir}/include/util/taoserror.h"
|
||||
cp ${header_files} ${install_dir}/inc
|
||||
|
||||
bin_files="${compile_dir}/source/dnode/mgmt/taosd ${compile_dir}/tools/shell/taos ${compile_dir}/tests/test/c/create_table ${compile_dir}/tests/test/c/tmq_sim ${script_dir}/remove.sh ${compile_dir}/build/bin/taosBenchmark ${compile_dir}/build/bin/taosdump"
|
||||
bin_files="${compile_dir}/build/bin/taosd ${compile_dir}/build/bin/taos ${compile_dir}/build/bin/create_table ${compile_dir}/build/bin/tmq_sim ${script_dir}/tools/remove.sh ${compile_dir}/build/bin/taosBenchmark ${compile_dir}/build/bin/taosdump"
|
||||
cp -rf ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
|
||||
|
||||
cp -rf ${compile_dir}/source/client/libtaos.so ${install_dir}/lib/
|
||||
cp -rf ${compile_dir}/source/libs/tdb/libtdb.so ${install_dir}/lib/
|
||||
cp -rf ${compile_dir}/build/lib/libavro* ${install_dir}/lib/ > /dev/null || echo -e "failed to copy avro libraries"
|
||||
cp ${compile_dir}/build/lib/libtaos.so ${install_dir}/lib/
|
||||
cp ${compile_dir}/build/lib/libavro* ${install_dir}/lib/ > /dev/null || echo -e "failed to copy avro libraries"
|
||||
cp -rf ${compile_dir}/build/lib/pkgconfig ${install_dir}/lib/ > /dev/null || echo -e "failed to copy pkgconfig directory"
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1,87 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Generate rpm package for centos
|
||||
|
||||
set -e
|
||||
# set -x
|
||||
|
||||
#curr_dir=$(pwd)
|
||||
compile_dir=$1
|
||||
output_dir=$2
|
||||
tdengine_ver=$3
|
||||
cpuType=$4
|
||||
osType=$5
|
||||
verMode=$6
|
||||
verType=$7
|
||||
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -f ${script_dir}/../..)"
|
||||
pkg_dir="${top_dir}/rpmworkroom"
|
||||
spec_file="${script_dir}/tdengine.spec"
|
||||
|
||||
#echo "curr_dir: ${curr_dir}"
|
||||
#echo "top_dir: ${top_dir}"
|
||||
#echo "script_dir: ${script_dir}"
|
||||
echo "compile_dir: ${compile_dir}"
|
||||
echo "pkg_dir: ${pkg_dir}"
|
||||
echo "spec_file: ${spec_file}"
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
function cp_rpm_package() {
|
||||
local cur_dir
|
||||
cd $1
|
||||
cur_dir=$(pwd)
|
||||
|
||||
for dirlist in "$(ls ${cur_dir})"; do
|
||||
if test -d ${dirlist}; then
|
||||
cd ${dirlist}
|
||||
cp_rpm_package ${cur_dir}/${dirlist}
|
||||
cd ..
|
||||
fi
|
||||
if test -e ${dirlist}; then
|
||||
cp ${cur_dir}/${dirlist} ${output_dir}/TDengine-${tdengine_ver}.rpm
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
if [ -d ${pkg_dir} ]; then
|
||||
${csudo}rm -rf ${pkg_dir}
|
||||
fi
|
||||
${csudo}mkdir -p ${pkg_dir}
|
||||
cd ${pkg_dir}
|
||||
|
||||
${csudo}mkdir -p BUILD BUILDROOT RPMS SOURCES SPECS SRPMS
|
||||
|
||||
${csudo}rpmbuild --define="_version ${tdengine_ver}" --define="_topdir ${pkg_dir}" --define="_compiledir ${compile_dir}" -bb ${spec_file}
|
||||
|
||||
# copy rpm package to output_dir, and modify package name, then clean temp dir
|
||||
#${csudo}cp -rf RPMS/* ${output_dir}
|
||||
cp_rpm_package ${pkg_dir}/RPMS
|
||||
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
rpmname="TDengine-server-"${tdengine_ver}-${osType}-${cpuType}
|
||||
elif [ "$verMode" == "edge" ]; then
|
||||
rpmname="TDengine-server"-${tdengine_ver}-${osType}-${cpuType}
|
||||
else
|
||||
echo "unknow verMode, nor cluster or edge"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$verType" == "beta" ]; then
|
||||
rpmname="TDengine-server-"${tdengine_ver}-${verType}-${osType}-${cpuType}".rpm"
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
rpmname=${rpmname}".rpm"
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mv ${output_dir}/TDengine-${tdengine_ver}.rpm ${output_dir}/${rpmname}
|
||||
|
||||
cd ..
|
||||
${csudo}rm -rf ${pkg_dir}
|
|
@ -0,0 +1,145 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# taosd This shell script takes care of starting and stopping TDengine.
|
||||
#
|
||||
# chkconfig: 2345 99 01
|
||||
# description: TDengine is a districuted, scalable, high-performance Time Series Database
|
||||
# (TSDB). More than just a pure database, TDengine also provides the ability
|
||||
# to do stream computing, aggregation etc.
|
||||
#
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: taosd
|
||||
# Required-Start: $network $local_fs $remote_fs
|
||||
# Required-Stop: $network $local_fs $remote_fs
|
||||
# Short-Description: start and stop taosd
|
||||
# Description: TDengine is a districuted, scalable, high-performance Time Series Database
|
||||
# (TSDB). More than just a pure database, TDengine also provides the ability
|
||||
# to do stream computing, aggregation etc.
|
||||
### END INIT INFO
|
||||
|
||||
# Source init functions
|
||||
. /etc/init.d/functions
|
||||
|
||||
# Maximum number of open files
|
||||
MAX_OPEN_FILES=65535
|
||||
|
||||
# Default program options
|
||||
NAME=taosd
|
||||
PROG=/usr/local/taos/bin/taosd
|
||||
USER=root
|
||||
GROUP=root
|
||||
|
||||
# Default directories
|
||||
LOCK_DIR=/var/lock/subsys
|
||||
PID_DIR=/var/run/$NAME
|
||||
|
||||
# Set file names
|
||||
LOCK_FILE=$LOCK_DIR/$NAME
|
||||
PID_FILE=$PID_DIR/$NAME.pid
|
||||
|
||||
[ -e $PID_DIR ] || mkdir -p $PID_DIR
|
||||
|
||||
PROG_OPTS=""
|
||||
|
||||
start() {
|
||||
echo -n "Starting ${NAME}: "
|
||||
# check identity
|
||||
curid="`id -u -n`"
|
||||
if [ "$curid" != root ] && [ "$curid" != "$USER" ] ; then
|
||||
echo "Must be run as root or $USER, but was run as $curid"
|
||||
return 1
|
||||
fi
|
||||
# Sets the maximum number of open file descriptors allowed.
|
||||
ulimit -n $MAX_OPEN_FILES
|
||||
curulimit="`ulimit -n`"
|
||||
if [ "$curulimit" -lt $MAX_OPEN_FILES ] ; then
|
||||
echo "'ulimit -n' must be greater than or equal to $MAX_OPEN_FILES, is $curulimit"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [ "`id -u -n`" == root ] ; then
|
||||
# Changes the owner of the lock, and the pid files to allow
|
||||
# non-root OpenTSDB daemons to run /usr/share/opentsdb/bin/opentsdb_restart.py.
|
||||
touch $LOCK_FILE && chown $USER:$GROUP $LOCK_FILE
|
||||
touch $PID_FILE && chown $USER:$GROUP $PID_FILE
|
||||
daemon --user $USER --pidfile $PID_FILE "$PROG $PROG_OPTS &> /dev/null &"
|
||||
else
|
||||
# Don't have to change user.
|
||||
daemon --pidfile $PID_FILE "$PROG $PROG_OPTS &> /dev/null &"
|
||||
fi
|
||||
retval=$?
|
||||
sleep 2
|
||||
echo
|
||||
[ $retval -eq 0 ] && (findproc > $PID_FILE && touch $LOCK_FILE)
|
||||
return $retval
|
||||
}
|
||||
|
||||
stop() {
|
||||
echo -n "Stopping ${NAME}: "
|
||||
killproc -p $PID_FILE $NAME
|
||||
retval=$?
|
||||
echo
|
||||
# Non-root users don't have enough permission to remove pid and lock files.
|
||||
# So, the opentsdb_restart.py cannot get rid of the files, and the command
|
||||
# "service opentsdb status" will complain about the existing pid file.
|
||||
# Makes the pid file empty.
|
||||
echo > $PID_FILE
|
||||
[ $retval -eq 0 ] && (rm -f $PID_FILE && rm -f $LOCK_FILE)
|
||||
return $retval
|
||||
}
|
||||
|
||||
restart() {
|
||||
stop
|
||||
start
|
||||
}
|
||||
|
||||
reload() {
|
||||
restart
|
||||
}
|
||||
|
||||
force_reload() {
|
||||
restart
|
||||
}
|
||||
|
||||
rh_status() {
|
||||
# run checks to determine if the service is running or use generic status
|
||||
status -p $PID_FILE -l $LOCK_FILE $NAME
|
||||
}
|
||||
|
||||
rh_status_q() {
|
||||
rh_status >/dev/null 2>&1
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
rh_status_q && exit 0
|
||||
$1
|
||||
;;
|
||||
stop)
|
||||
rh_status_q || exit 0
|
||||
$1
|
||||
;;
|
||||
restart)
|
||||
$1
|
||||
;;
|
||||
reload)
|
||||
rh_status_q || exit 7
|
||||
$1
|
||||
;;
|
||||
force-reload)
|
||||
force_reload
|
||||
;;
|
||||
status)
|
||||
rh_status
|
||||
;;
|
||||
condrestart|try-restart)
|
||||
rh_status_q || exit 0
|
||||
restart
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
|
||||
exit 2
|
||||
esac
|
||||
|
||||
exit $?
|
|
@ -0,0 +1,141 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# tarbitratord This shell script takes care of starting and stopping tarbitrator.
|
||||
#
|
||||
# chkconfig: 2345 99 01
|
||||
# description: tarbitrator is a arbitrator used in TDengine cluster.
|
||||
#
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: taoscluster
|
||||
# Required-Start: $network $local_fs $remote_fs
|
||||
# Required-Stop: $network $local_fs $remote_fs
|
||||
# Short-Description: start and stop tarbitrator
|
||||
# Description: tarbitrator is a arbitrator used in TDengine cluster.
|
||||
### END INIT INFO
|
||||
|
||||
# Source init functions
|
||||
. /etc/init.d/functions
|
||||
|
||||
# Maximum number of open files
|
||||
MAX_OPEN_FILES=65535
|
||||
|
||||
# Default program options
|
||||
NAME=tarbitrator
|
||||
PROG=/usr/local/taos/bin/tarbitrator
|
||||
USER=root
|
||||
GROUP=root
|
||||
|
||||
# Default directories
|
||||
LOCK_DIR=/var/lock/subsys
|
||||
PID_DIR=/var/run/$NAME
|
||||
|
||||
# Set file names
|
||||
LOCK_FILE=$LOCK_DIR/$NAME
|
||||
PID_FILE=$PID_DIR/$NAME.pid
|
||||
|
||||
[ -e $PID_DIR ] || mkdir -p $PID_DIR
|
||||
|
||||
PROG_OPTS=""
|
||||
|
||||
start() {
|
||||
echo -n "Starting ${NAME}: "
|
||||
# check identity
|
||||
curid="`id -u -n`"
|
||||
if [ "$curid" != root ] && [ "$curid" != "$USER" ] ; then
|
||||
echo "Must be run as root or $USER, but was run as $curid"
|
||||
return 1
|
||||
fi
|
||||
# Sets the maximum number of open file descriptors allowed.
|
||||
ulimit -n $MAX_OPEN_FILES
|
||||
curulimit="`ulimit -n`"
|
||||
if [ "$curulimit" -lt $MAX_OPEN_FILES ] ; then
|
||||
echo "'ulimit -n' must be greater than or equal to $MAX_OPEN_FILES, is $curulimit"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [ "`id -u -n`" == root ] ; then
|
||||
# Changes the owner of the lock, and the pid files to allow
|
||||
# non-root OpenTSDB daemons to run /usr/share/opentsdb/bin/opentsdb_restart.py.
|
||||
touch $LOCK_FILE && chown $USER:$GROUP $LOCK_FILE
|
||||
touch $PID_FILE && chown $USER:$GROUP $PID_FILE
|
||||
daemon --user $USER --pidfile $PID_FILE "$PROG $PROG_OPTS &> /dev/null &"
|
||||
else
|
||||
# Don't have to change user.
|
||||
daemon --pidfile $PID_FILE "$PROG $PROG_OPTS &> /dev/null &"
|
||||
fi
|
||||
retval=$?
|
||||
sleep 2
|
||||
echo
|
||||
[ $retval -eq 0 ] && (findproc > $PID_FILE && touch $LOCK_FILE)
|
||||
return $retval
|
||||
}
|
||||
|
||||
stop() {
|
||||
echo -n "Stopping ${NAME}: "
|
||||
killproc -p $PID_FILE $NAME
|
||||
retval=$?
|
||||
echo
|
||||
# Non-root users don't have enough permission to remove pid and lock files.
|
||||
# So, the opentsdb_restart.py cannot get rid of the files, and the command
|
||||
# "service opentsdb status" will complain about the existing pid file.
|
||||
# Makes the pid file empty.
|
||||
echo > $PID_FILE
|
||||
[ $retval -eq 0 ] && (rm -f $PID_FILE && rm -f $LOCK_FILE)
|
||||
return $retval
|
||||
}
|
||||
|
||||
restart() {
|
||||
stop
|
||||
start
|
||||
}
|
||||
|
||||
reload() {
|
||||
restart
|
||||
}
|
||||
|
||||
force_reload() {
|
||||
restart
|
||||
}
|
||||
|
||||
rh_status() {
|
||||
# run checks to determine if the service is running or use generic status
|
||||
status -p $PID_FILE -l $LOCK_FILE $NAME
|
||||
}
|
||||
|
||||
rh_status_q() {
|
||||
rh_status >/dev/null 2>&1
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
rh_status_q && exit 0
|
||||
$1
|
||||
;;
|
||||
stop)
|
||||
rh_status_q || exit 0
|
||||
$1
|
||||
;;
|
||||
restart)
|
||||
$1
|
||||
;;
|
||||
reload)
|
||||
rh_status_q || exit 7
|
||||
$1
|
||||
;;
|
||||
force-reload)
|
||||
force_reload
|
||||
;;
|
||||
status)
|
||||
rh_status
|
||||
;;
|
||||
condrestart|try-restart)
|
||||
rh_status_q || exit 0
|
||||
restart
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
|
||||
exit 2
|
||||
esac
|
||||
|
||||
exit $?
|
|
@ -0,0 +1,236 @@
|
|||
%define homepath /usr/local/taos
|
||||
%define userlocalpath /usr/local
|
||||
%define cfg_install_dir /etc/taos
|
||||
%define __strip /bin/true
|
||||
|
||||
Name: tdengine
|
||||
Version: %{_version}
|
||||
Release: 3%{?dist}
|
||||
Summary: tdengine from taosdata
|
||||
Group: Application/Database
|
||||
License: AGPL
|
||||
URL: www.taosdata.com
|
||||
AutoReqProv: no
|
||||
|
||||
#BuildRoot: %_topdir/BUILDROOT
|
||||
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root
|
||||
|
||||
#Prefix: /usr/local/taos
|
||||
|
||||
#BuildRequires:
|
||||
#Requires:
|
||||
|
||||
%description
|
||||
Big Data Platform Designed and Optimized for IoT
|
||||
|
||||
#"prep" Nothing needs to be done
|
||||
#%prep
|
||||
#%setup -q
|
||||
#%setup -T
|
||||
|
||||
#"build" Nothing needs to be done
|
||||
#%build
|
||||
#%configure
|
||||
#make %{?_smp_mflags}
|
||||
|
||||
%install
|
||||
#make install DESTDIR=%{buildroot}
|
||||
rm -rf %{buildroot}
|
||||
|
||||
echo topdir: %{_topdir}
|
||||
echo version: %{_version}
|
||||
echo buildroot: %{buildroot}
|
||||
|
||||
libfile="libtaos.so.%{_version}"
|
||||
|
||||
# create install path, and cp file
|
||||
mkdir -p %{buildroot}%{homepath}/bin
|
||||
mkdir -p %{buildroot}%{homepath}/cfg
|
||||
#mkdir -p %{buildroot}%{homepath}/connector
|
||||
mkdir -p %{buildroot}%{homepath}/driver
|
||||
mkdir -p %{buildroot}%{homepath}/examples
|
||||
mkdir -p %{buildroot}%{homepath}/include
|
||||
#mkdir -p %{buildroot}%{homepath}/init.d
|
||||
mkdir -p %{buildroot}%{homepath}/script
|
||||
|
||||
cp %{_compiledir}/../packaging/cfg/taos.cfg %{buildroot}%{homepath}/cfg
|
||||
if [ -f %{_compiledir}/test/cfg/taosadapter.toml ]; then
|
||||
cp %{_compiledir}/test/cfg/taosadapter.toml %{buildroot}%{homepath}/cfg
|
||||
fi
|
||||
if [ -f %{_compiledir}/test/cfg/taosadapter.service ]; then
|
||||
cp %{_compiledir}/test/cfg/taosadapter.service %{buildroot}%{homepath}/cfg
|
||||
fi
|
||||
#cp %{_compiledir}/../packaging/rpm/taosd %{buildroot}%{homepath}/init.d
|
||||
cp %{_compiledir}/../packaging/tools/post.sh %{buildroot}%{homepath}/script
|
||||
cp %{_compiledir}/../packaging/tools/preun.sh %{buildroot}%{homepath}/script
|
||||
cp %{_compiledir}/../packaging/tools/startPre.sh %{buildroot}%{homepath}/bin
|
||||
cp %{_compiledir}/../packaging/tools/set_core.sh %{buildroot}%{homepath}/bin
|
||||
cp %{_compiledir}/../packaging/tools/taosd-dump-cfg.gdb %{buildroot}%{homepath}/bin
|
||||
cp %{_compiledir}/build/bin/taos %{buildroot}%{homepath}/bin
|
||||
cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin
|
||||
#cp %{_compiledir}/build/bin/taosBenchmark %{buildroot}%{homepath}/bin
|
||||
|
||||
if [ -f %{_compiledir}/build/bin/taosadapter ]; then
|
||||
cp %{_compiledir}/build/bin/taosadapter %{buildroot}%{homepath}/bin ||:
|
||||
fi
|
||||
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
|
||||
cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include
|
||||
cp %{_compiledir}/../src/inc/taosdef.h %{buildroot}%{homepath}/include
|
||||
cp %{_compiledir}/../src/inc/taoserror.h %{buildroot}%{homepath}/include
|
||||
#cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector
|
||||
#cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector
|
||||
#cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector
|
||||
#cp %{_compiledir}/build/lib/taos-jdbcdriver*.* %{buildroot}%{homepath}/connector ||:
|
||||
cp -r %{_compiledir}/../examples/* %{buildroot}%{homepath}/examples
|
||||
|
||||
if [ -f %{_compiledir}/build/bin/jemalloc-config ]; then
|
||||
mkdir -p %{buildroot}%{userlocalpath}/bin
|
||||
mkdir -p %{buildroot}%{userlocalpath}/lib
|
||||
mkdir -p %{buildroot}%{userlocalpath}/lib/pkgconfig
|
||||
mkdir -p %{buildroot}%{userlocalpath}/include
|
||||
mkdir -p %{buildroot}%{userlocalpath}/include/jemalloc
|
||||
mkdir -p %{buildroot}%{userlocalpath}/share
|
||||
mkdir -p %{buildroot}%{userlocalpath}/share/doc
|
||||
mkdir -p %{buildroot}%{userlocalpath}/share/doc/jemalloc
|
||||
mkdir -p %{buildroot}%{userlocalpath}/share/man
|
||||
mkdir -p %{buildroot}%{userlocalpath}/share/man/man3
|
||||
|
||||
cp %{_compiledir}/build/bin/jemalloc-config %{buildroot}%{userlocalpath}/bin/
|
||||
if [ -f %{_compiledir}/build/bin/jemalloc.sh ]; then
|
||||
cp %{_compiledir}/build/bin/jemalloc.sh %{buildroot}%{userlocalpath}/bin/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/bin/jeprof ]; then
|
||||
cp %{_compiledir}/build/bin/jeprof %{buildroot}%{userlocalpath}/bin/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/include/jemalloc/jemalloc.h ]; then
|
||||
cp %{_compiledir}/build/include/jemalloc/jemalloc.h %{buildroot}%{userlocalpath}/include/jemalloc/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/lib/libjemalloc.so.2 ]; then
|
||||
cp %{_compiledir}/build/lib/libjemalloc.so.2 %{buildroot}%{userlocalpath}/lib/
|
||||
ln -sf libjemalloc.so.2 %{buildroot}%{userlocalpath}/lib/libjemalloc.so
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/lib/libjemalloc.a ]; then
|
||||
cp %{_compiledir}/build/lib/libjemalloc.a %{buildroot}%{userlocalpath}/lib/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/lib/libjemalloc_pic.a ]; then
|
||||
cp %{_compiledir}/build/lib/libjemalloc_pic.a %{buildroot}%{userlocalpath}/lib/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/lib/pkgconfig/jemalloc.pc ]; then
|
||||
cp %{_compiledir}/build/lib/pkgconfig/jemalloc.pc %{buildroot}%{userlocalpath}/lib/pkgconfig/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/share/doc/jemalloc/jemalloc.html ]; then
|
||||
cp %{_compiledir}/build/share/doc/jemalloc/jemalloc.html %{buildroot}%{userlocalpath}/share/doc/jemalloc/
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/share/man/man3/jemalloc.3 ]; then
|
||||
cp %{_compiledir}/build/share/man/man3/jemalloc.3 %{buildroot}%{userlocalpath}/share/man/man3/
|
||||
fi
|
||||
fi
|
||||
|
||||
#Scripts executed before installation
|
||||
%pre
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
# Stop the service if running
|
||||
if pidof taosd &> /dev/null; then
|
||||
if pidof systemd &> /dev/null; then
|
||||
${csudo}systemctl stop taosd || :
|
||||
elif $(which service &> /dev/null); then
|
||||
${csudo}service taosd stop || :
|
||||
else
|
||||
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
fi
|
||||
echo "Stop taosd service success!"
|
||||
sleep 1
|
||||
fi
|
||||
# if taos.cfg already exist, remove it
|
||||
if [ -f %{cfg_install_dir}/taos.cfg ]; then
|
||||
${csudo}rm -f %{cfg_install_dir}/cfg/taos.cfg || :
|
||||
fi
|
||||
|
||||
# if taosadapter.toml already exist, remove it
|
||||
if [ -f %{cfg_install_dir}/taosadapter.toml ]; then
|
||||
${csudo}rm -f %{cfg_install_dir}/cfg/taosadapter.toml || :
|
||||
fi
|
||||
|
||||
# there can not libtaos.so*, otherwise ln -s error
|
||||
${csudo}rm -f %{homepath}/driver/libtaos* || :
|
||||
|
||||
#Scripts executed after installation
|
||||
%post
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
cd %{homepath}/script
|
||||
${csudo}./post.sh
|
||||
|
||||
# Scripts executed before uninstall
|
||||
%preun
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
# only remove package to call preun.sh, not but update(2)
|
||||
if [ $1 -eq 0 ];then
|
||||
#cd %{homepath}/script
|
||||
#${csudo}./preun.sh
|
||||
|
||||
if [ -f %{homepath}/script/preun.sh ]; then
|
||||
cd %{homepath}/script
|
||||
${csudo}./preun.sh
|
||||
else
|
||||
bin_link_dir="/usr/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
inc_link_dir="/usr/include"
|
||||
|
||||
data_link_dir="/usr/local/taos/data"
|
||||
log_link_dir="/usr/local/taos/log"
|
||||
cfg_link_dir="/usr/local/taos/cfg"
|
||||
|
||||
# Remove all links
|
||||
${csudo}rm -f ${bin_link_dir}/taos || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosd || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosadapter || :
|
||||
${csudo}rm -f ${cfg_link_dir}/* || :
|
||||
${csudo}rm -f ${inc_link_dir}/taos.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
|
||||
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
||||
|
||||
${csudo}rm -f ${log_link_dir} || :
|
||||
${csudo}rm -f ${data_link_dir} || :
|
||||
|
||||
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Scripts executed after uninstall
|
||||
%postun
|
||||
|
||||
# clean build dir
|
||||
%clean
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
${csudo}rm -rf %{buildroot}
|
||||
|
||||
#Specify the files to be packaged
|
||||
%files
|
||||
/*
|
||||
#%doc
|
||||
|
||||
#Setting default permissions
|
||||
%defattr (-,root,root,0755)
|
||||
#%{prefix}
|
||||
|
||||
#%changelog
|
|
@ -0,0 +1,52 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
CSI=$(echo -e "\033[")
|
||||
CRED="${CSI}1;31m"
|
||||
CFAILURE="$CRED"
|
||||
CEND="${CSI}0m"
|
||||
if [ -n "$(grep 'Aliyun Linux release' /etc/issue)" -o -e /etc/redhat-release ]; then
|
||||
OS=CentOS
|
||||
[ -n "$(grep ' 7\.' /etc/redhat-release 2> /dev/null)" ] && CentOS_RHEL_version=7
|
||||
[ -n "$(grep ' 6\.' /etc/redhat-release 2> /dev/null)" -o -n "$(grep 'Aliyun Linux release6 15' /etc/issue)" ] && CentOS_RHEL_version=6
|
||||
[ -n "$(grep ' 5\.' /etc/redhat-release 2> /dev/null)" -o -n "$(grep 'Aliyun Linux release5' /etc/issue)" ] && CentOS_RHEL_version=5
|
||||
elif [ -n "$(grep 'Amazon Linux AMI release' /etc/issue)" -o -e /etc/system-release ]; then
|
||||
OS=CentOS
|
||||
CentOS_RHEL_version=6
|
||||
elif [ -n "$(grep 'bian' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == "Debian" ]; then
|
||||
OS=Debian
|
||||
[ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; }
|
||||
Debian_version=$(lsb_release -sr | awk -F. '{print $1}')
|
||||
elif [ -n "$(grep 'Deepin' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == "Deepin" ]; then
|
||||
OS=Debian
|
||||
[ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; }
|
||||
Debian_version=$(lsb_release -sr | awk -F. '{print $1}')
|
||||
elif [ -n "$(grep 'Kali GNU/Linux Rolling' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == "Kali" ]; then
|
||||
OS=Debian
|
||||
[ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; }
|
||||
if [ -n "$(grep 'VERSION="2016.*"' /etc/os-release)" ]; then
|
||||
Debian_version=8
|
||||
else
|
||||
echo "${CFAILURE}Does not support this OS, Please contact the author! ${CEND}"
|
||||
kill -9 $$
|
||||
fi
|
||||
elif [ -n "$(grep 'Ubuntu' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == "Ubuntu" -o -n "$(grep 'Linux Mint' /etc/issue)" ]; then
|
||||
OS=Ubuntu
|
||||
[ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; }
|
||||
Ubuntu_version=$(lsb_release -sr | awk -F. '{print $1}')
|
||||
[ -n "$(grep 'Linux Mint 18' /etc/issue)" ] && Ubuntu_version=16
|
||||
elif [ -n "$(grep 'elementary' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == 'elementary' ]; then
|
||||
OS=Ubuntu
|
||||
[ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; }
|
||||
Ubuntu_version=16
|
||||
else
|
||||
echo "${CFAILURE}Does not support this OS, Please contact the author! ${CEND}"
|
||||
kill -9 $$
|
||||
fi
|
||||
|
||||
echo "${CFAILURE}${OS}${CEND}"
|
||||
if [ "$OS" == 'CentOS' ]; then
|
||||
echo ${CentOS_RHEL_version}
|
||||
else
|
||||
echo ${Ubuntu_version}
|
||||
fi
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
|
||||
log_dir=$1
|
||||
result_file=$2
|
||||
|
||||
if [ ! -n "$1" ];then
|
||||
echo "Pleas input the director of taosdlog."
|
||||
echo "usage: ./get_client.sh <taosdlog directory> <result file>"
|
||||
exit 1
|
||||
else
|
||||
log_dir=$1
|
||||
fi
|
||||
|
||||
if [ ! -n "$2" ];then
|
||||
result_file=clientInfo.txt
|
||||
else
|
||||
result_file=$2
|
||||
fi
|
||||
|
||||
grep "new TCP connection" ${log_dir}/taosdlog.* | sed -e "s/0x.* from / /"|sed -e "s/,.*$//"|sed -e "s/:[0-9]*$//"|sort -r|uniq -f 2|sort -k 3 -r|uniq -f 2 > ${result_file}
|
|
@ -0,0 +1,14 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# This file is used to install TAOS time-series database on linux systems. The operating system
|
||||
# is required to use systemd to manage services at boot
|
||||
|
||||
set -e
|
||||
# set -x
|
||||
|
||||
# -----------------------Variables definition---------------------
|
||||
OS=$(cat /etc/*-release | grep "^NAME=" | cut -d= -f2)
|
||||
len=$(echo ${#OS})
|
||||
len=$((len-2))
|
||||
retval=$(echo -ne ${OS:1:${len}} | cut -d" " -f1)
|
||||
echo -ne $retval
|
|
@ -0,0 +1,15 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# This file is used to install TAOS time-series database on linux systems. The operating system
|
||||
# is required to use systemd to manage services at boot
|
||||
|
||||
set -e
|
||||
# set -x
|
||||
|
||||
# -----------------------Variables definition---------------------
|
||||
verinfo=$(cat $1 | grep " version" | cut -d '"' -f2)
|
||||
verinfo=$(echo $verinfo | tr "\n" " ")
|
||||
len=$(echo ${#verinfo})
|
||||
len=$((len-1))
|
||||
retval=$(echo -ne ${verinfo:0:${len}})
|
||||
echo -ne $retval
|
|
@ -215,15 +215,9 @@ function install_lib() {
|
|||
${csudo} ln -s ${install_main_dir}/lib/libtaos.* ${lib_link_dir}/libtaos.so.1
|
||||
${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
|
||||
|
||||
${csudo} ln -s ${install_main_dir}/lib/libtdb.* ${lib_link_dir}/libtdb.so.1
|
||||
${csudo} ln -s ${lib_link_dir}/libtdb.so.1 ${lib_link_dir}/libtdb.so
|
||||
|
||||
if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then
|
||||
${csudo} ln -s ${install_main_dir}/lib/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
|
||||
${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
|
||||
|
||||
${csudo} ln -s ${install_main_dir}/lib/libtdb.* ${lib64_link_dir}/libtdb.so.1 || :
|
||||
${csudo} ln -s ${lib64_link_dir}/libtdb.so.1 ${lib64_link_dir}/libtdb.so || :
|
||||
fi
|
||||
|
||||
${csudo} ldconfig
|
|
@ -0,0 +1,339 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# This file is used to install database on linux systems. The operating system
|
||||
# is required to use systemd to manage services at boot
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
# -----------------------Variables definition---------------------
|
||||
script_dir=$(dirname $(readlink -f "$0"))
|
||||
|
||||
bin_link_dir="/usr/bin"
|
||||
#inc_link_dir="/usr/include"
|
||||
|
||||
#install main path
|
||||
install_main_dir="/usr/local/tarbitrator"
|
||||
|
||||
# old bin dir
|
||||
bin_dir="/usr/local/tarbitrator/bin"
|
||||
|
||||
service_config_dir="/etc/systemd/system"
|
||||
|
||||
# Color setting
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
GREEN_DARK='\033[0;32m'
|
||||
GREEN_UNDERLINE='\033[4;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
csudo=""
|
||||
if command -v sudo >/dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
update_flag=0
|
||||
|
||||
initd_mod=0
|
||||
service_mod=2
|
||||
if pidof systemd &>/dev/null; then
|
||||
service_mod=0
|
||||
elif $(which service &>/dev/null); then
|
||||
service_mod=1
|
||||
service_config_dir="/etc/init.d"
|
||||
if $(which chkconfig &>/dev/null); then
|
||||
initd_mod=1
|
||||
elif $(which insserv &>/dev/null); then
|
||||
initd_mod=2
|
||||
elif $(which update-rc.d &>/dev/null); then
|
||||
initd_mod=3
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
|
||||
# get the operating system type for using the corresponding init file
|
||||
# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification
|
||||
#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
|
||||
if [[ -e /etc/os-release ]]; then
|
||||
osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) || :
|
||||
else
|
||||
osinfo=""
|
||||
fi
|
||||
#echo "osinfo: ${osinfo}"
|
||||
os_type=0
|
||||
if echo $osinfo | grep -qwi "ubuntu"; then
|
||||
# echo "This is ubuntu system"
|
||||
os_type=1
|
||||
elif echo $osinfo | grep -qwi "debian"; then
|
||||
# echo "This is debian system"
|
||||
os_type=1
|
||||
elif echo $osinfo | grep -qwi "Kylin"; then
|
||||
# echo "This is Kylin system"
|
||||
os_type=1
|
||||
elif echo $osinfo | grep -qwi "centos"; then
|
||||
# echo "This is centos system"
|
||||
os_type=2
|
||||
elif echo $osinfo | grep -qwi "fedora"; then
|
||||
# echo "This is fedora system"
|
||||
os_type=2
|
||||
else
|
||||
echo " osinfo: ${osinfo}"
|
||||
echo " This is an officially unverified linux system,"
|
||||
echo " if there are any problems with the installation and operation, "
|
||||
echo " please feel free to contact taosdata.com for support."
|
||||
os_type=1
|
||||
fi
|
||||
|
||||
function kill_tarbitrator() {
|
||||
pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function install_main_path() {
|
||||
#create install main dir and all sub dir
|
||||
${csudo}rm -rf ${install_main_dir} || :
|
||||
${csudo}mkdir -p ${install_main_dir}
|
||||
${csudo}mkdir -p ${install_main_dir}/bin
|
||||
#${csudo}mkdir -p ${install_main_dir}/include
|
||||
${csudo}mkdir -p ${install_main_dir}/init.d
|
||||
}
|
||||
|
||||
function install_bin() {
|
||||
# Remove links
|
||||
${csudo}rm -f ${bin_link_dir}/rmtarbitrator || :
|
||||
${csudo}rm -f ${bin_link_dir}/tarbitrator || :
|
||||
${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/*
|
||||
|
||||
#Make link
|
||||
[ -x ${install_main_dir}/bin/remove_arbi.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove_arbi.sh ${bin_link_dir}/rmtarbitrator || :
|
||||
[ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo}ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :
|
||||
}
|
||||
|
||||
function install_header() {
|
||||
${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || :
|
||||
${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/*
|
||||
${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
|
||||
${csudo}ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h
|
||||
${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
|
||||
}
|
||||
|
||||
function install_jemalloc() {
|
||||
jemalloc_dir=${script_dir}/jemalloc
|
||||
|
||||
if [ -d ${jemalloc_dir} ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/bin
|
||||
|
||||
if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/bin/jeprof ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/include/jemalloc
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/lib
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib
|
||||
${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
|
||||
${csudo}/usr/bin/install -c -d /usr/local/lib
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
|
||||
fi
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/share/doc/jemalloc
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/share/man/man3
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3
|
||||
fi
|
||||
|
||||
if [ -d /etc/ld.so.conf.d ]; then
|
||||
echo "/usr/local/lib" | ${csudo}tee /etc/ld.so.conf.d/jemalloc.conf >/dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf"
|
||||
${csudo}ldconfig
|
||||
else
|
||||
echo "/etc/ld.so.conf.d not found!"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_service_on_sysvinit() {
|
||||
if pidof tarbitrator &>/dev/null; then
|
||||
${csudo}service tarbitratord stop || :
|
||||
fi
|
||||
|
||||
if ((${initd_mod} == 1)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo}chkconfig --del tarbitratord || :
|
||||
fi
|
||||
elif ((${initd_mod} == 2)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo}insserv -r tarbitratord || :
|
||||
fi
|
||||
elif ((${initd_mod} == 3)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo}update-rc.d -f tarbitratord remove || :
|
||||
fi
|
||||
fi
|
||||
|
||||
${csudo}rm -f ${service_config_dir}/tarbitratord || :
|
||||
|
||||
if $(which init &>/dev/null); then
|
||||
${csudo}init q || :
|
||||
fi
|
||||
}
|
||||
|
||||
function install_service_on_sysvinit() {
|
||||
clean_service_on_sysvinit
|
||||
sleep 1
|
||||
|
||||
if ((${os_type} == 1)); then
|
||||
${csudo}cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord
|
||||
${csudo}cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord
|
||||
elif ((${os_type} == 2)); then
|
||||
${csudo}cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord
|
||||
${csudo}cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord
|
||||
fi
|
||||
|
||||
if ((${initd_mod} == 1)); then
|
||||
${csudo}chkconfig --add tarbitratord || :
|
||||
${csudo}chkconfig --level 2345 tarbitratord on || :
|
||||
elif ((${initd_mod} == 2)); then
|
||||
${csudo}insserv tarbitratord || :
|
||||
${csudo}insserv -d tarbitratord || :
|
||||
elif ((${initd_mod} == 3)); then
|
||||
${csudo}update-rc.d tarbitratord defaults || :
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_service_on_systemd() {
|
||||
tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
|
||||
if systemctl is-active --quiet tarbitratord; then
|
||||
echo "tarbitrator is running, stopping it..."
|
||||
${csudo}systemctl stop tarbitratord &>/dev/null || echo &>/dev/null
|
||||
fi
|
||||
${csudo}systemctl disable tarbitratord &>/dev/null || echo &>/dev/null
|
||||
|
||||
${csudo}rm -f ${tarbitratord_service_config}
|
||||
}
|
||||
|
||||
function install_service_on_systemd() {
|
||||
clean_service_on_systemd
|
||||
|
||||
tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
|
||||
|
||||
${csudo}bash -c "echo '[Unit]' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo '[Install]' >> ${tarbitratord_service_config}"
|
||||
${csudo}bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}"
|
||||
${csudo}systemctl enable tarbitratord
|
||||
}
|
||||
|
||||
function install_service() {
|
||||
if ((${service_mod} == 0)); then
|
||||
install_service_on_systemd
|
||||
elif ((${service_mod} == 1)); then
|
||||
install_service_on_sysvinit
|
||||
else
|
||||
kill_tarbitrator
|
||||
fi
|
||||
}
|
||||
|
||||
function update_TDengine() {
|
||||
# Start to update
|
||||
echo -e "${GREEN}Start to update TDengine's arbitrator ...${NC}"
|
||||
# Stop the service if running
|
||||
if pidof tarbitrator &>/dev/null; then
|
||||
if ((${service_mod} == 0)); then
|
||||
${csudo}systemctl stop tarbitratord || :
|
||||
elif ((${service_mod} == 1)); then
|
||||
${csudo}service tarbitratord stop || :
|
||||
else
|
||||
kill_tarbitrator
|
||||
fi
|
||||
sleep 1
|
||||
fi
|
||||
|
||||
install_main_path
|
||||
#install_header
|
||||
install_bin
|
||||
install_service
|
||||
install_jemalloc
|
||||
|
||||
echo
|
||||
if ((${service_mod} == 0)); then
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}systemctl start tarbitratord${NC}"
|
||||
elif ((${service_mod} == 1)); then
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}service tarbitratord start${NC}"
|
||||
else
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ./tarbitrator${NC}"
|
||||
fi
|
||||
echo
|
||||
echo -e "\033[44;32;1mTDengine's arbitrator is updated successfully!${NC}"
|
||||
}
|
||||
|
||||
function install_TDengine() {
|
||||
# Start to install
|
||||
echo -e "${GREEN}Start to install TDengine's arbitrator ...${NC}"
|
||||
|
||||
install_main_path
|
||||
#install_header
|
||||
install_bin
|
||||
install_service
|
||||
install_jemalloc
|
||||
|
||||
echo
|
||||
if ((${service_mod} == 0)); then
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}systemctl start tarbitratord${NC}"
|
||||
elif ((${service_mod} == 1)); then
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}service tarbitratord start${NC}"
|
||||
else
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: tarbitrator${NC}"
|
||||
fi
|
||||
|
||||
echo -e "\033[44;32;1mTDengine's arbitrator is installed successfully!${NC}"
|
||||
echo
|
||||
}
|
||||
|
||||
## ==============================Main program starts from here============================
|
||||
# Install server and client
|
||||
if [ -x ${bin_dir}/tarbitrator ]; then
|
||||
update_flag=1
|
||||
update_TDengine
|
||||
else
|
||||
install_TDengine
|
||||
fi
|
|
@ -0,0 +1,320 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# This file is used to install TDengine client on linux systems. The operating system
|
||||
# is required to use systemd to manage services at boot
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
# -----------------------Variables definition---------------------
|
||||
|
||||
dataDir="/var/lib/taos"
|
||||
logDir="/var/log/taos"
|
||||
productName="TDengine"
|
||||
installDir="/usr/local/taos"
|
||||
configDir="/etc/taos"
|
||||
serverName="taosd"
|
||||
clientName="taos"
|
||||
uninstallScript="rmtaos"
|
||||
configFile="taos.cfg"
|
||||
tarName="taos.tar.gz"
|
||||
|
||||
osType=Linux
|
||||
pagMode=full
|
||||
verMode=edge
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
script_dir=$(dirname $(readlink -f "$0"))
|
||||
# Dynamic directory
|
||||
data_dir=${dataDir}
|
||||
log_dir=${logDir}
|
||||
else
|
||||
script_dir=`dirname $0`
|
||||
cd ${script_dir}
|
||||
script_dir="$(pwd)"
|
||||
data_dir=${dataDir}
|
||||
log_dir=~/${productName}/log
|
||||
fi
|
||||
|
||||
log_link_dir="${installDir}/log"
|
||||
|
||||
cfg_install_dir=${configDir}
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
bin_link_dir="/usr/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
lib64_link_dir="/usr/lib64"
|
||||
inc_link_dir="/usr/include"
|
||||
else
|
||||
bin_link_dir="/usr/local/bin"
|
||||
lib_link_dir="/usr/local/lib"
|
||||
inc_link_dir="/usr/local/include"
|
||||
fi
|
||||
|
||||
#install main path
|
||||
install_main_dir="${installDir}"
|
||||
|
||||
# old bin dir
|
||||
bin_dir="${installDir}/bin"
|
||||
|
||||
# Color setting
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
GREEN_DARK='\033[0;32m'
|
||||
GREEN_UNDERLINE='\033[4;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
update_flag=0
|
||||
|
||||
function kill_client() {
|
||||
pid=$(ps -ef | grep "${clientName}" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function install_main_path() {
|
||||
#create install main dir and all sub dir
|
||||
${csudo}rm -rf ${install_main_dir} || :
|
||||
${csudo}mkdir -p ${install_main_dir}
|
||||
${csudo}mkdir -p ${install_main_dir}/cfg
|
||||
${csudo}mkdir -p ${install_main_dir}/bin
|
||||
${csudo}mkdir -p ${install_main_dir}/driver
|
||||
if [ $productName == "TDengine" ]; then
|
||||
${csudo}mkdir -p ${install_main_dir}/examples
|
||||
fi
|
||||
${csudo}mkdir -p ${install_main_dir}/include
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
${csudo}mkdir -p ${install_main_dir}/connector
|
||||
fi
|
||||
}
|
||||
|
||||
function install_bin() {
|
||||
# Remove links
|
||||
${csudo}rm -f ${bin_link_dir}/${clientName} || :
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo}rm -f ${bin_link_dir}/taosdemo || :
|
||||
fi
|
||||
${csudo}rm -f ${bin_link_dir}/${uninstallScript} || :
|
||||
${csudo}rm -f ${bin_link_dir}/set_core || :
|
||||
|
||||
${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/*
|
||||
|
||||
#Make link
|
||||
[ -x ${install_main_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} || :
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
|
||||
fi
|
||||
[ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/${uninstallScript} || :
|
||||
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
|
||||
}
|
||||
|
||||
function clean_lib() {
|
||||
sudo rm -f /usr/lib/libtaos.* || :
|
||||
sudo rm -rf ${lib_dir} || :
|
||||
}
|
||||
|
||||
function install_lib() {
|
||||
# Remove links
|
||||
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
||||
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
|
||||
#${csudo}rm -rf ${v15_java_app_dir} || :
|
||||
|
||||
${csudo}cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/*
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
|
||||
${csudo}ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
|
||||
|
||||
if [ -d "${lib64_link_dir}" ]; then
|
||||
${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
|
||||
${csudo}ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
|
||||
fi
|
||||
else
|
||||
${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib
|
||||
${csudo}ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib
|
||||
fi
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo}ldconfig
|
||||
else
|
||||
${csudo}update_dyld_shared_cache
|
||||
fi
|
||||
}
|
||||
|
||||
function install_header() {
|
||||
${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || :
|
||||
${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/*
|
||||
${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
|
||||
${csudo}ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h
|
||||
${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
|
||||
}
|
||||
|
||||
function install_jemalloc() {
|
||||
jemalloc_dir=${script_dir}/jemalloc
|
||||
|
||||
if [ -d ${jemalloc_dir} ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/bin
|
||||
|
||||
if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/bin/jeprof ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/include/jemalloc
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/lib
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib
|
||||
${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
|
||||
${csudo}/usr/bin/install -c -d /usr/local/lib
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
|
||||
fi
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/share/doc/jemalloc
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/share/man/man3
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3
|
||||
fi
|
||||
|
||||
if [ -d /etc/ld.so.conf.d ]; then
|
||||
echo "/usr/local/lib" | ${csudo}tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf"
|
||||
${csudo}ldconfig
|
||||
else
|
||||
echo "/etc/ld.so.conf.d not found!"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function install_config() {
|
||||
if [ ! -f ${cfg_install_dir}/${configFile} ]; then
|
||||
${csudo}mkdir -p ${cfg_install_dir}
|
||||
[ -f ${script_dir}/cfg/${configFile} ] && ${csudo}cp ${script_dir}/cfg/${configFile} ${cfg_install_dir}
|
||||
${csudo}chmod 644 ${cfg_install_dir}/*
|
||||
fi
|
||||
|
||||
${csudo}cp -f ${script_dir}/cfg/${configFile} ${install_main_dir}/cfg/${configFile}.org
|
||||
${csudo}ln -s ${cfg_install_dir}/${configFile} ${install_main_dir}/cfg
|
||||
}
|
||||
|
||||
|
||||
function install_log() {
|
||||
${csudo}rm -rf ${log_dir} || :
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir}
|
||||
else
|
||||
mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir}
|
||||
fi
|
||||
${csudo}ln -s ${log_dir} ${install_main_dir}/log
|
||||
}
|
||||
|
||||
function install_connector() {
|
||||
${csudo}cp -rf ${script_dir}/connector/ ${install_main_dir}/
|
||||
}
|
||||
|
||||
function install_examples() {
|
||||
if [ -d ${script_dir}/examples ]; then
|
||||
${csudo}cp -rf ${script_dir}/examples/* ${install_main_dir}/examples
|
||||
fi
|
||||
}
|
||||
|
||||
function update_TDengine() {
|
||||
# Start to update
|
||||
if [ ! -e ${tarName} ]; then
|
||||
echo "File ${tarName} does not exist"
|
||||
exit 1
|
||||
fi
|
||||
tar -zxf ${tarName}
|
||||
|
||||
echo -e "${GREEN}Start to update ${productName} client...${NC}"
|
||||
# Stop the client shell if running
|
||||
if pidof ${clientName} &> /dev/null; then
|
||||
kill_client
|
||||
sleep 1
|
||||
fi
|
||||
|
||||
install_main_path
|
||||
|
||||
install_log
|
||||
install_header
|
||||
install_lib
|
||||
install_jemalloc
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
install_connector
|
||||
fi
|
||||
install_examples
|
||||
install_bin
|
||||
install_config
|
||||
|
||||
echo
|
||||
echo -e "\033[44;32;1m${productName} client is updated successfully!${NC}"
|
||||
|
||||
rm -rf $(tar -tf ${tarName})
|
||||
}
|
||||
|
||||
function install_TDengine() {
|
||||
# Start to install
|
||||
if [ ! -e ${tarName} ]; then
|
||||
echo "File ${tarName} does not exist"
|
||||
exit 1
|
||||
fi
|
||||
tar -zxf ${tarName}
|
||||
|
||||
echo -e "${GREEN}Start to install ${productName} client...${NC}"
|
||||
|
||||
install_main_path
|
||||
install_log
|
||||
install_header
|
||||
install_lib
|
||||
install_jemalloc
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
install_connector
|
||||
fi
|
||||
install_examples
|
||||
install_bin
|
||||
install_config
|
||||
|
||||
echo
|
||||
echo -e "\033[44;32;1m${productName} client is installed successfully!${NC}"
|
||||
|
||||
rm -rf $(tar -tf ${tarName})
|
||||
}
|
||||
|
||||
|
||||
## ==============================Main program starts from here============================
|
||||
# Install or updata client and client
|
||||
# if server is already install, don't install client
|
||||
if [ -e ${bin_dir}/${serverName} ]; then
|
||||
echo -e "\033[44;32;1mThere are already installed ${productName} server, so don't need install client!${NC}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ -x ${bin_dir}/${clientName} ]; then
|
||||
update_flag=1
|
||||
update_TDengine
|
||||
else
|
||||
install_TDengine
|
||||
fi
|
|
@ -169,6 +169,7 @@ function install_bin() {
|
|||
${csudo}rm -f ${bin_link_dir}/${clientName} || :
|
||||
${csudo}rm -f ${bin_link_dir}/${serverName} || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosadapter || :
|
||||
${csudo}rm -f ${bin_link_dir}/udfd || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosdemo || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosdump || :
|
||||
|
||||
|
@ -183,8 +184,9 @@ function install_bin() {
|
|||
[ -f ${install_main_dir}/bin/taosBenchmark ] && ${csudo}ln -sf ${install_main_dir}/bin/taosBenchmark ${install_main_dir}/bin/taosdemo || :
|
||||
[ -f ${binary_dir}/build/bin/taosdump ] && ${csudo}cp -r ${binary_dir}/build/bin/taosdump ${install_main_dir}/bin || :
|
||||
[ -f ${binary_dir}/build/bin/taosadapter ] && ${csudo}cp -r ${binary_dir}/build/bin/taosadapter ${install_main_dir}/bin || :
|
||||
[ -f ${binary_dir}/build/bin/udfd ] && ${csudo}cp -r ${binary_dir}/build/bin/udfd ${install_main_dir}/bin || :
|
||||
${csudo}cp -r ${binary_dir}/build/bin/${serverName} ${install_main_dir}/bin || :
|
||||
${csudo}cp -r ${binary_dir}/build/bin/tarbitrator ${install_main_dir}/bin || :
|
||||
# ${csudo}cp -r ${binary_dir}/build/bin/tarbitrator ${install_main_dir}/bin || :
|
||||
|
||||
${csudo}cp -r ${script_dir}/taosd-dump-cfg.gdb ${install_main_dir}/bin || :
|
||||
${csudo}cp -r ${script_dir}/remove.sh ${install_main_dir}/bin || :
|
||||
|
@ -197,6 +199,7 @@ function install_bin() {
|
|||
[ -x ${install_main_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} || :
|
||||
[ -x ${install_main_dir}/bin/${serverName} ] && ${csudo}ln -s ${install_main_dir}/bin/${serverName} ${bin_link_dir}/${serverName} || :
|
||||
[ -x ${install_main_dir}/bin/taosadapter ] && ${csudo}ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter || :
|
||||
[ -x ${install_main_dir}/bin/udfd ] && ${csudo}ln -s ${install_main_dir}/bin/udfd ${bin_link_dir}/udfd || :
|
||||
[ -x ${install_main_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
|
||||
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
|
||||
[ -x ${install_main_dir}/bin/perfMonitor ] && ${csudo}ln -s ${install_main_dir}/bin/perfMonitor ${bin_link_dir}/perfMonitor || :
|
||||
|
@ -213,6 +216,7 @@ function install_bin() {
|
|||
[ -x ${install_main_dir}/bin/${clientName} ] || [ -x ${install_main_2_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} || ${csudo}ln -s ${install_main_2_dir}/bin/${clientName} || :
|
||||
[ -x ${install_main_dir}/bin/${serverName} ] || [ -x ${install_main_2_dir}/bin/${serverName} ] && ${csudo}ln -s ${install_main_dir}/bin/${serverName} ${bin_link_dir}/${serverName} || ${csudo}ln -s ${install_main_2_dir}/bin/${serverName} || :
|
||||
[ -x ${install_main_dir}/bin/taosadapter ] || [ -x ${install_main_2_dir}/bin/taosadapter ] && ${csudo}ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter || ${csudo}ln -s ${install_main_2_dir}/bin/taosadapter || :
|
||||
[ -x ${install_main_dir}/bin/udfd ] || [ -x ${install_main_2_dir}/bin/udfd ] && ${csudo}ln -s ${install_main_dir}/bin/udfd ${bin_link_dir}/udfd || ${csudo}ln -s ${install_main_2_dir}/bin/udfd || :
|
||||
[ -x ${install_main_dir}/bin/taosdump ] || [ -x ${install_main_2_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || ln -s ${install_main_2_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
|
||||
[ -x ${install_main_dir}/bin/taosdemo ] || [ -x ${install_main_2_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || ln -s ${install_main_2_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
|
||||
fi
|
||||
|
@ -368,15 +372,15 @@ function install_config() {
|
|||
if [ ! -f ${cfg_install_dir}/${configFile} ]; then
|
||||
${csudo}mkdir -p ${cfg_install_dir}
|
||||
[ -f ${script_dir}/../cfg/${configFile} ] &&
|
||||
${csudo}cp ${script_dir}/../cfg/${configFile} ${cfg_install_dir} || :
|
||||
${csudo}cp ${script_dir}/../cfg/${configFile} ${cfg_install_dir}
|
||||
${csudo}chmod 644 ${cfg_install_dir}/${configFile}
|
||||
${csudo}cp -f ${script_dir}/../cfg/${configFile} \
|
||||
${cfg_install_dir}/${configFile}.${verNumber} || :
|
||||
${cfg_install_dir}/${configFile}.${verNumber}
|
||||
${csudo}ln -s ${cfg_install_dir}/${configFile} \
|
||||
${install_main_dir}/cfg/${configFile}
|
||||
else
|
||||
${csudo}cp -f ${script_dir}/../cfg/${configFile} \
|
||||
${cfg_install_dir}/${configFile}.${verNumber} || :
|
||||
${cfg_install_dir}/${configFile}.${verNumber}
|
||||
fi
|
||||
}
|
||||
|
||||
|
@ -471,10 +475,10 @@ function install_service_on_sysvinit() {
|
|||
|
||||
if ((${os_type} == 1)); then
|
||||
# ${csudo}cp -f ${script_dir}/../deb/${serverName} ${install_main_dir}/init.d
|
||||
${csudo}cp ${script_dir}/../deb/${serverName} ${service_config_dir} && ${csudo}chmod a+x ${service_config_dir}/${serverName} || :
|
||||
${csudo}cp ${script_dir}/../deb/${serverName} ${service_config_dir} && ${csudo}chmod a+x ${service_config_dir}/${serverName}
|
||||
elif ((${os_type} == 2)); then
|
||||
# ${csudo}cp -f ${script_dir}/../rpm/${serverName} ${install_main_dir}/init.d
|
||||
${csudo}cp ${script_dir}/../rpm/${serverName} ${service_config_dir} && ${csudo}chmod a+x ${service_config_dir}/${serverName} || :
|
||||
${csudo}cp ${script_dir}/../rpm/${serverName} ${service_config_dir} && ${csudo}chmod a+x ${service_config_dir}/${serverName}
|
||||
fi
|
||||
|
||||
if ((${initd_mod} == 1)); then
|
|
@ -0,0 +1,71 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Generate arbitrator's tar.gz setup package for all os system
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
curr_dir=$(pwd)
|
||||
compile_dir=$1
|
||||
version=$2
|
||||
build_time=$3
|
||||
cpuType=$4
|
||||
osType=$5
|
||||
verMode=$6
|
||||
verType=$7
|
||||
pagMode=$8
|
||||
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -f ${script_dir}/../..)"
|
||||
|
||||
productName="TDengine"
|
||||
|
||||
# create compressed install file.
|
||||
build_dir="${compile_dir}/build"
|
||||
code_dir="${top_dir}/src"
|
||||
release_dir="${top_dir}/release"
|
||||
|
||||
#package_name='linux'
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
install_dir="${release_dir}/${productName}-enterprise-arbitrator-${version}"
|
||||
else
|
||||
install_dir="${release_dir}/${productName}-arbitrator-${version}"
|
||||
fi
|
||||
|
||||
# Directories and files.
|
||||
bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi.sh"
|
||||
install_files="${script_dir}/install_arbi.sh"
|
||||
|
||||
#header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h"
|
||||
init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord
|
||||
init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord
|
||||
|
||||
# make directories.
|
||||
mkdir -p ${install_dir} && cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install_arbi.sh || :
|
||||
#mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc || :
|
||||
mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
|
||||
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
|
||||
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
|
||||
|
||||
cd ${release_dir}
|
||||
|
||||
# install_dir has been distinguishes cluster from edege, so comments this code
|
||||
pkg_name=${install_dir}-${osType}-${cpuType}
|
||||
|
||||
if [[ "$verType" == "beta" ]] || [[ "$verType" == "preRelease" ]]; then
|
||||
pkg_name=${install_dir}-${verType}-${osType}-${cpuType}
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
pkg_name=${pkg_name}
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
|
||||
exitcode=$?
|
||||
if [ "$exitcode" != "0" ]; then
|
||||
echo "tar ${pkg_name}.tar.gz error !!!"
|
||||
exit $exitcode
|
||||
fi
|
||||
|
||||
cd ${curr_dir}
|
|
@ -0,0 +1,246 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Generate tar.gz package for linux client in all os system
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
curr_dir=$(pwd)
|
||||
compile_dir=$1
|
||||
version=$2
|
||||
build_time=$3
|
||||
cpuType=$4
|
||||
osType=$5
|
||||
verMode=$6
|
||||
verType=$7
|
||||
pagMode=$8
|
||||
dbName=$9
|
||||
|
||||
productName="TDengine"
|
||||
clientName="taos"
|
||||
configFile="taos.cfg"
|
||||
tarName="taos.tar.gz"
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -f ${script_dir}/../..)"
|
||||
else
|
||||
script_dir=$(dirname $0)
|
||||
cd ${script_dir}
|
||||
script_dir="$(pwd)"
|
||||
top_dir=${script_dir}/../..
|
||||
fi
|
||||
|
||||
# create compressed install file.
|
||||
build_dir="${compile_dir}/build"
|
||||
code_dir="${top_dir}/src"
|
||||
release_dir="${top_dir}/release"
|
||||
|
||||
#package_name='linux'
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
install_dir="${release_dir}/${productName}-enterprise-client-${version}"
|
||||
else
|
||||
install_dir="${release_dir}/${productName}-client-${version}"
|
||||
fi
|
||||
|
||||
# Directories and files.
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
if [ "$pagMode" == "lite" ]; then
|
||||
strip ${build_dir}/bin/${clientName}
|
||||
bin_files="${build_dir}/bin/${clientName} \
|
||||
${script_dir}/remove_client.sh"
|
||||
else
|
||||
bin_files="${build_dir}/bin/${clientName} \
|
||||
${script_dir}/remove_client.sh \
|
||||
${script_dir}/set_core.sh \
|
||||
${script_dir}/get_client.sh"
|
||||
fi
|
||||
lib_files="${build_dir}/lib/libtaos.so.${version}"
|
||||
else
|
||||
bin_files="${build_dir}/bin/${clientName} ${script_dir}/remove_client.sh"
|
||||
lib_files="${build_dir}/lib/libtaos.${version}.dylib"
|
||||
fi
|
||||
|
||||
header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h"
|
||||
if [ "$dbName" != "taos" ]; then
|
||||
cfg_dir="${top_dir}/../enterprise/packaging/cfg"
|
||||
else
|
||||
cfg_dir="${top_dir}/packaging/cfg"
|
||||
fi
|
||||
|
||||
install_files="${script_dir}/install_client.sh"
|
||||
|
||||
# make directories.
|
||||
mkdir -p ${install_dir}
|
||||
mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
|
||||
mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/${configFile} ${install_dir}/cfg/${configFile}
|
||||
mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/*
|
||||
|
||||
if [ -f ${build_dir}/bin/jemalloc-config ]; then
|
||||
mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
|
||||
cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin
|
||||
if [ -f ${build_dir}/bin/jemalloc.sh ]; then
|
||||
cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin
|
||||
fi
|
||||
if [ -f ${build_dir}/bin/jeprof ]; then
|
||||
cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin
|
||||
fi
|
||||
if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then
|
||||
cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then
|
||||
cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib
|
||||
ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/libjemalloc.a ]; then
|
||||
cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
|
||||
cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then
|
||||
cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig
|
||||
fi
|
||||
if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then
|
||||
cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc
|
||||
fi
|
||||
if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then
|
||||
cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3
|
||||
fi
|
||||
fi
|
||||
|
||||
cd ${install_dir}
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
tar -zcv -f ${tarName} * --remove-files || :
|
||||
else
|
||||
tar -zcv -f ${tarName} * || :
|
||||
mv ${tarName} ..
|
||||
rm -rf ./*
|
||||
mv ../${tarName} .
|
||||
fi
|
||||
|
||||
cd ${curr_dir}
|
||||
cp ${install_files} ${install_dir}
|
||||
if [ "$osType" == "Darwin" ]; then
|
||||
sed 's/osType=Linux/osType=Darwin/g' ${install_dir}/install_client.sh >>install_client_temp.sh
|
||||
mv install_client_temp.sh ${install_dir}/install_client.sh
|
||||
fi
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install_client.sh >>install_client_temp.sh
|
||||
mv install_client_temp.sh ${install_dir}/install_client.sh
|
||||
fi
|
||||
|
||||
if [ "$pagMode" == "lite" ]; then
|
||||
sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install_client.sh >>install_client_temp.sh
|
||||
mv install_client_temp.sh ${install_dir}/install_client.sh
|
||||
fi
|
||||
chmod a+x ${install_dir}/install_client.sh
|
||||
|
||||
if [[ $productName == "TDengine" ]]; then
|
||||
# Copy example code
|
||||
mkdir -p ${install_dir}/examples
|
||||
examples_dir="${top_dir}/examples"
|
||||
cp -r ${examples_dir}/c ${install_dir}/examples
|
||||
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
||||
cp -r ${examples_dir}/JDBC ${install_dir}/examples
|
||||
cp -r ${examples_dir}/matlab ${install_dir}/examples
|
||||
cp -r ${examples_dir}/python ${install_dir}/examples
|
||||
cp -r ${examples_dir}/R ${install_dir}/examples
|
||||
cp -r ${examples_dir}/go ${install_dir}/examples
|
||||
cp -r ${examples_dir}/nodejs ${install_dir}/examples
|
||||
cp -r ${examples_dir}/C# ${install_dir}/examples
|
||||
mkdir -p ${install_dir}/examples/taosbenchmark-json && cp ${examples_dir}/../src/kit/taos-tools/example/* ${install_dir}/examples/taosbenchmark-json
|
||||
fi
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
# Copy connector
|
||||
connector_dir="${code_dir}/connector"
|
||||
mkdir -p ${install_dir}/connector
|
||||
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
cp ${build_dir}/lib/*.jar ${install_dir}/connector || :
|
||||
fi
|
||||
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
|
||||
cp -r ${connector_dir}/go ${install_dir}/connector
|
||||
else
|
||||
echo "WARNING: go connector not found, please check if want to use it!"
|
||||
fi
|
||||
git clone --depth 1 https://github.com/taosdata/taos-connector-python ${install_dir}/connector/python
|
||||
rm -rf ${install_dir}/connector/python/.git ||:
|
||||
# cp -r ${connector_dir}/python ${install_dir}/connector
|
||||
git clone --depth 1 https://github.com/taosdata/taos-connector-node ${install_dir}/connector/nodejs
|
||||
rm -rf ${install_dir}/connector/nodejs/.git ||:
|
||||
|
||||
git clone --depth 1 https://github.com/taosdata/taos-connector-dotnet ${install_dir}/connector/dotnet
|
||||
rm -rf ${install_dir}/connector/dotnet/.git ||:
|
||||
# cp -r ${connector_dir}/nodejs ${install_dir}/connector
|
||||
git clone --depth 1 https://github.com/taosdata/libtaos-rs ${install_dir}/connector/rust
|
||||
rm -rf ${install_dir}/connector/rust/.git ||:
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
# Copy driver
|
||||
mkdir -p ${install_dir}/driver
|
||||
cp ${lib_files} ${install_dir}/driver
|
||||
|
||||
# Copy connector
|
||||
connector_dir="${code_dir}/connector"
|
||||
mkdir -p ${install_dir}/connector
|
||||
|
||||
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
cp ${build_dir}/lib/*.jar ${install_dir}/connector || :
|
||||
fi
|
||||
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
|
||||
cp -r ${connector_dir}/go ${install_dir}/connector
|
||||
else
|
||||
echo "WARNING: go connector not found, please check if want to use it!"
|
||||
fi
|
||||
cp -r ${connector_dir}/python ${install_dir}/connector
|
||||
cp -r ${connector_dir}/nodejs ${install_dir}/connector
|
||||
fi
|
||||
# Copy release note
|
||||
# cp ${script_dir}/release_note ${install_dir}
|
||||
|
||||
# exit 1
|
||||
|
||||
cd ${release_dir}
|
||||
|
||||
# install_dir has been distinguishes cluster from edege, so comments this code
|
||||
pkg_name=${install_dir}-${osType}-${cpuType}
|
||||
|
||||
# if [ "$verMode" == "cluster" ]; then
|
||||
# pkg_name=${install_dir}-${osType}-${cpuType}
|
||||
# elif [ "$verMode" == "edge" ]; then
|
||||
# pkg_name=${install_dir}-${osType}-${cpuType}
|
||||
# else
|
||||
# echo "unknow verMode, nor cluster or edge"
|
||||
# exit 1
|
||||
# fi
|
||||
|
||||
if [[ "$verType" == "beta" ]] || [[ "$verType" == "preRelease" ]]; then
|
||||
pkg_name=${install_dir}-${verType}-${osType}-${cpuType}
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
pkg_name=${pkg_name}
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$pagMode" == "lite" ]; then
|
||||
pkg_name=${pkg_name}-Lite
|
||||
fi
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
|
||||
else
|
||||
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) || :
|
||||
mv "$(basename ${pkg_name}).tar.gz" ..
|
||||
rm -rf ./*
|
||||
mv ../"$(basename ${pkg_name}).tar.gz" .
|
||||
fi
|
||||
|
||||
cd ${curr_dir}
|
|
@ -0,0 +1,378 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Generate tar.gz package for all os system
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
curr_dir=$(pwd)
|
||||
compile_dir=$1
|
||||
version=$2
|
||||
build_time=$3
|
||||
cpuType=$4
|
||||
osType=$5
|
||||
verMode=$6
|
||||
verType=$7
|
||||
pagMode=$8
|
||||
versionComp=$9
|
||||
dbName=${10}
|
||||
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -f ${script_dir}/../..)"
|
||||
|
||||
productName="TDengine"
|
||||
serverName="taosd"
|
||||
clientName="taos"
|
||||
configFile="taos.cfg"
|
||||
tarName="taos.tar.gz"
|
||||
dumpName="taosdump"
|
||||
benchmarkName="taosBenchmark"
|
||||
toolsName="taostools"
|
||||
adapterName="taosadapter"
|
||||
defaultPasswd="taosdata"
|
||||
|
||||
# create compressed install file.
|
||||
build_dir="${compile_dir}/build"
|
||||
code_dir="${top_dir}/src"
|
||||
release_dir="${top_dir}/release"
|
||||
|
||||
#package_name='linux'
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
install_dir="${release_dir}/${productName}-enterprise-server-${version}"
|
||||
else
|
||||
install_dir="${release_dir}/${productName}-server-${version}"
|
||||
fi
|
||||
|
||||
if [ -d ${top_dir}/src/kit/taos-tools/packaging/deb ]; then
|
||||
cd ${top_dir}/src/kit/taos-tools/packaging/deb
|
||||
[ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0"
|
||||
|
||||
taostools_ver=$(git describe --tags | sed -e 's/ver-//g' | awk -F '-' '{print $1}')
|
||||
taostools_install_dir="${release_dir}/${clientName}Tools-${taostools_ver}"
|
||||
|
||||
cd ${curr_dir}
|
||||
else
|
||||
taostools_install_dir="${release_dir}/${clientName}Tools-${version}"
|
||||
fi
|
||||
|
||||
# Directories and files
|
||||
if [ "$pagMode" == "lite" ]; then
|
||||
strip ${build_dir}/bin/${serverName}
|
||||
strip ${build_dir}/bin/${clientName}
|
||||
# lite version doesn't include taosadapter, which will lead to no restful interface
|
||||
bin_files="${build_dir}/bin/${serverName} ${build_dir}/bin/${clientName} ${script_dir}/remove.sh ${script_dir}/startPre.sh ${build_dir}/bin/taosBenchmark"
|
||||
taostools_bin_files=""
|
||||
else
|
||||
|
||||
wget https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh -O ${build_dir}/bin/TDinsight.sh \
|
||||
&& echo "TDinsight.sh downloaded!" \
|
||||
|| echo "failed to download TDinsight.sh"
|
||||
# download TDinsight caches
|
||||
orig_pwd=$(pwd)
|
||||
tdinsight_caches=""
|
||||
cd ${build_dir}/bin/ && \
|
||||
chmod +x TDinsight.sh
|
||||
tdinsight_caches=$(./TDinsight.sh --download-only | xargs -i printf "${build_dir}/bin/{} ")
|
||||
cd $orig_pwd
|
||||
echo "TDinsight caches: $tdinsight_caches"
|
||||
|
||||
taostools_bin_files=" ${build_dir}/bin/taosdump \
|
||||
${build_dir}/bin/taosBenchmark \
|
||||
${build_dir}/bin/TDinsight.sh \
|
||||
$tdinsight_caches"
|
||||
|
||||
bin_files="${build_dir}/bin/${serverName} \
|
||||
${build_dir}/bin/${clientName} \
|
||||
${taostools_bin_files} \
|
||||
${build_dir}/bin/taosadapter \
|
||||
${build_dir}/bin/tarbitrator\
|
||||
${script_dir}/remove.sh \
|
||||
${script_dir}/set_core.sh \
|
||||
${script_dir}/run_taosd_and_taosadapter.sh \
|
||||
${script_dir}/startPre.sh \
|
||||
${script_dir}/taosd-dump-cfg.gdb"
|
||||
fi
|
||||
|
||||
lib_files="${build_dir}/lib/libtaos.so.${version}"
|
||||
header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h"
|
||||
|
||||
if [ "$dbName" != "taos" ]; then
|
||||
cfg_dir="${top_dir}/../enterprise/packaging/cfg"
|
||||
else
|
||||
cfg_dir="${top_dir}/packaging/cfg"
|
||||
fi
|
||||
|
||||
install_files="${script_dir}/install.sh"
|
||||
nginx_dir="${code_dir}/../../enterprise/src/plugins/web"
|
||||
|
||||
init_file_deb=${script_dir}/../deb/taosd
|
||||
init_file_rpm=${script_dir}/../rpm/taosd
|
||||
init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord
|
||||
init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord
|
||||
|
||||
# make directories.
|
||||
mkdir -p ${install_dir}
|
||||
mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
|
||||
mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/${configFile} ${install_dir}/cfg/${configFile}
|
||||
|
||||
if [ -f "${compile_dir}/test/cfg/taosadapter.toml" ]; then
|
||||
cp ${compile_dir}/test/cfg/taosadapter.toml ${install_dir}/cfg || :
|
||||
fi
|
||||
|
||||
if [ -f "${compile_dir}/test/cfg/taosadapter.service" ]; then
|
||||
cp ${compile_dir}/test/cfg/taosadapter.service ${install_dir}/cfg || :
|
||||
fi
|
||||
|
||||
if [ -f "${cfg_dir}/${serverName}.service" ]; then
|
||||
cp ${cfg_dir}/${serverName}.service ${install_dir}/cfg || :
|
||||
fi
|
||||
|
||||
if [ -f "${top_dir}/packaging/cfg/tarbitratord.service" ]; then
|
||||
cp ${top_dir}/packaging/cfg/tarbitratord.service ${install_dir}/cfg || :
|
||||
fi
|
||||
|
||||
if [ -f "${top_dir}/packaging/cfg/nginxd.service" ]; then
|
||||
cp ${top_dir}/packaging/cfg/nginxd.service ${install_dir}/cfg || :
|
||||
fi
|
||||
|
||||
mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
|
||||
mkdir -p ${install_dir}/init.d && cp ${init_file_deb} ${install_dir}/init.d/${serverName}.deb
|
||||
mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/${serverName}.rpm
|
||||
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
|
||||
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
|
||||
|
||||
if [ $adapterName != "taosadapter" ]; then
|
||||
mv ${install_dir}/cfg/taosadapter.toml ${install_dir}/cfg/$adapterName.toml
|
||||
sed -i "s/path = \"\/var\/log\/taos\"/path = \"\/var\/log\/${productName}\"/g" ${install_dir}/cfg/$adapterName.toml
|
||||
sed -i "s/password = \"taosdata\"/password = \"${defaultPasswd}\"/g" ${install_dir}/cfg/$adapterName.toml
|
||||
|
||||
mv ${install_dir}/cfg/taosadapter.service ${install_dir}/cfg/$adapterName.service
|
||||
sed -i "s/TDengine/${productName}/g" ${install_dir}/cfg/$adapterName.service
|
||||
sed -i "s/taosAdapter/${adapterName}/g" ${install_dir}/cfg/$adapterName.service
|
||||
sed -i "s/taosadapter/${adapterName}/g" ${install_dir}/cfg/$adapterName.service
|
||||
|
||||
mv ${install_dir}/bin/taosadapter ${install_dir}/bin/${adapterName}
|
||||
mv ${install_dir}/bin/run_taosd_and_taosadapter.sh ${install_dir}/bin/run_${serverName}_and_${adapterName}.sh
|
||||
mv ${install_dir}/bin/taosd-dump-cfg.gdb ${install_dir}/bin/${serverName}-dump-cfg.gdb
|
||||
fi
|
||||
|
||||
if [ -n "${taostools_bin_files}" ]; then
|
||||
mkdir -p ${taostools_install_dir} || echo -e "failed to create ${taostools_install_dir}"
|
||||
mkdir -p ${taostools_install_dir}/bin \
|
||||
&& cp ${taostools_bin_files} ${taostools_install_dir}/bin \
|
||||
&& chmod a+x ${taostools_install_dir}/bin/* || :
|
||||
|
||||
if [ -f ${top_dir}/src/kit/taos-tools/packaging/tools/install-taostools.sh ]; then
|
||||
cp ${top_dir}/src/kit/taos-tools/packaging/tools/install-taostools.sh \
|
||||
${taostools_install_dir}/ > /dev/null \
|
||||
&& chmod a+x ${taostools_install_dir}/install-taostools.sh \
|
||||
|| echo -e "failed to copy install-taostools.sh"
|
||||
else
|
||||
echo -e "install-taostools.sh not found"
|
||||
fi
|
||||
|
||||
if [ -f ${top_dir}/src/kit/taos-tools/packaging/tools/uninstall-taostools.sh ]; then
|
||||
cp ${top_dir}/src/kit/taos-tools/packaging/tools/uninstall-taostools.sh \
|
||||
${taostools_install_dir}/ > /dev/null \
|
||||
&& chmod a+x ${taostools_install_dir}/uninstall-taostools.sh \
|
||||
|| echo -e "failed to copy uninstall-taostools.sh"
|
||||
else
|
||||
echo -e "uninstall-taostools.sh not found"
|
||||
fi
|
||||
|
||||
if [ -f ${build_dir}/lib/libavro.so.23.0.0 ]; then
|
||||
mkdir -p ${taostools_install_dir}/avro/{lib,lib/pkgconfig} || echo -e "failed to create ${taostools_install_dir}/avro"
|
||||
cp ${build_dir}/lib/libavro.* ${taostools_install_dir}/avro/lib
|
||||
cp ${build_dir}/lib/pkgconfig/avro-c.pc ${taostools_install_dir}/avro/lib/pkgconfig
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -f ${build_dir}/bin/jemalloc-config ]; then
|
||||
mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
|
||||
cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin
|
||||
if [ -f ${build_dir}/bin/jemalloc.sh ]; then
|
||||
cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin
|
||||
fi
|
||||
if [ -f ${build_dir}/bin/jeprof ]; then
|
||||
cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin
|
||||
fi
|
||||
if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then
|
||||
cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then
|
||||
cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib
|
||||
ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/libjemalloc.a ]; then
|
||||
cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
|
||||
cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then
|
||||
cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig
|
||||
fi
|
||||
if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then
|
||||
cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc
|
||||
fi
|
||||
if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then
|
||||
cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove.sh >>remove_temp.sh
|
||||
mv remove_temp.sh ${install_dir}/bin/remove.sh
|
||||
|
||||
mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd
|
||||
cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png
|
||||
rm -rf ${install_dir}/nginxd/png
|
||||
|
||||
if [ "$cpuType" == "aarch64" ]; then
|
||||
cp -f ${install_dir}/nginxd/sbin/arm/64bit/nginx ${install_dir}/nginxd/sbin/
|
||||
elif [ "$cpuType" == "aarch32" ]; then
|
||||
cp -f ${install_dir}/nginxd/sbin/arm/32bit/nginx ${install_dir}/nginxd/sbin/
|
||||
fi
|
||||
rm -rf ${install_dir}/nginxd/sbin/arm
|
||||
fi
|
||||
|
||||
cd ${install_dir}
|
||||
tar -zcv -f ${tarName} * --remove-files || :
|
||||
exitcode=$?
|
||||
if [ "$exitcode" != "0" ]; then
|
||||
echo "tar ${tarName} error !!!"
|
||||
exit $exitcode
|
||||
fi
|
||||
|
||||
cd ${curr_dir}
|
||||
cp ${install_files} ${install_dir}
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install.sh >>install_temp.sh
|
||||
mv install_temp.sh ${install_dir}/install.sh
|
||||
fi
|
||||
if [ "$pagMode" == "lite" ]; then
|
||||
sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install.sh >>install_temp.sh
|
||||
mv install_temp.sh ${install_dir}/install.sh
|
||||
fi
|
||||
chmod a+x ${install_dir}/install.sh
|
||||
|
||||
if [[ $dbName == "taos" ]]; then
|
||||
# Copy example code
|
||||
mkdir -p ${install_dir}/examples
|
||||
examples_dir="${top_dir}/examples"
|
||||
cp -r ${examples_dir}/c ${install_dir}/examples
|
||||
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
||||
if [ -d ${examples_dir}/JDBC/connectionPools/target ]; then
|
||||
rm -rf ${examples_dir}/JDBC/connectionPools/target
|
||||
fi
|
||||
if [ -d ${examples_dir}/JDBC/JDBCDemo/target ]; then
|
||||
rm -rf ${examples_dir}/JDBC/JDBCDemo/target
|
||||
fi
|
||||
if [ -d ${examples_dir}/JDBC/mybatisplus-demo/target ]; then
|
||||
rm -rf ${examples_dir}/JDBC/mybatisplus-demo/target
|
||||
fi
|
||||
if [ -d ${examples_dir}/JDBC/springbootdemo/target ]; then
|
||||
rm -rf ${examples_dir}/JDBC/springbootdemo/target
|
||||
fi
|
||||
if [ -d ${examples_dir}/JDBC/SpringJdbcTemplate/target ]; then
|
||||
rm -rf ${examples_dir}/JDBC/SpringJdbcTemplate/target
|
||||
fi
|
||||
if [ -d ${examples_dir}/JDBC/taosdemo/target ]; then
|
||||
rm -rf ${examples_dir}/JDBC/taosdemo/target
|
||||
fi
|
||||
|
||||
cp -r ${examples_dir}/JDBC ${install_dir}/examples
|
||||
cp -r ${examples_dir}/matlab ${install_dir}/examples
|
||||
cp -r ${examples_dir}/python ${install_dir}/examples
|
||||
cp -r ${examples_dir}/R ${install_dir}/examples
|
||||
cp -r ${examples_dir}/go ${install_dir}/examples
|
||||
cp -r ${examples_dir}/nodejs ${install_dir}/examples
|
||||
cp -r ${examples_dir}/C# ${install_dir}/examples
|
||||
mkdir -p ${install_dir}/examples/taosbenchmark-json && cp ${examples_dir}/../src/kit/taos-tools/example/* ${install_dir}/examples/taosbenchmark-json
|
||||
fi
|
||||
fi
|
||||
|
||||
# Copy driver
|
||||
mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" >${install_dir}/driver/vercomp.txt
|
||||
|
||||
# Copy connector
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
connector_dir="${code_dir}/connector"
|
||||
mkdir -p ${install_dir}/connector
|
||||
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
||||
cp ${build_dir}/lib/*.jar ${install_dir}/connector || :
|
||||
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
|
||||
cp -r ${connector_dir}/go ${install_dir}/connector
|
||||
else
|
||||
echo "WARNING: go connector not found, please check if want to use it!"
|
||||
fi
|
||||
git clone --depth 1 https://github.com/taosdata/taos-connector-python ${install_dir}/connector/python
|
||||
rm -rf ${install_dir}/connector/python/.git ||:
|
||||
|
||||
git clone --depth 1 https://github.com/taosdata/taos-connector-node ${install_dir}/connector/nodejs
|
||||
rm -rf ${install_dir}/connector/nodejs/.git ||:
|
||||
|
||||
git clone --depth 1 https://github.com/taosdata/taos-connector-dotnet ${install_dir}/connector/dotnet
|
||||
rm -rf ${install_dir}/connector/dotnet/.git ||:
|
||||
|
||||
git clone --depth 1 https://github.com/taosdata/libtaos-rs ${install_dir}/connector/rust
|
||||
rm -rf ${install_dir}/connector/rust/.git ||:
|
||||
# cp -r ${connector_dir}/python ${install_dir}/connector
|
||||
# cp -r ${connector_dir}/nodejs ${install_dir}/connector
|
||||
fi
|
||||
fi
|
||||
|
||||
# Copy release note
|
||||
cp ${script_dir}/release_note ${install_dir}
|
||||
|
||||
# exit 1
|
||||
|
||||
cd ${release_dir}
|
||||
|
||||
# install_dir has been distinguishes cluster from edege, so comments this code
|
||||
pkg_name=${install_dir}-${osType}-${cpuType}
|
||||
|
||||
taostools_pkg_name=${taostools_install_dir}-${osType}-${cpuType}
|
||||
|
||||
# if [ "$verMode" == "cluster" ]; then
|
||||
# pkg_name=${install_dir}-${osType}-${cpuType}
|
||||
# elif [ "$verMode" == "edge" ]; then
|
||||
# pkg_name=${install_dir}-${osType}-${cpuType}
|
||||
# else
|
||||
# echo "unknow verMode, nor cluster or edge"
|
||||
# exit 1
|
||||
# fi
|
||||
|
||||
if [[ "$verType" == "beta" ]] || [[ "$verType" == "preRelease" ]]; then
|
||||
pkg_name=${install_dir}-${verType}-${osType}-${cpuType}
|
||||
taostools_pkg_name=${taostools_install_dir}-${verType}-${osType}-${cpuType}
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
pkg_name=${pkg_name}
|
||||
taostools_pkg_name=${taostools_pkg_name}
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$pagMode" == "lite" ]; then
|
||||
pkg_name=${pkg_name}-Lite
|
||||
fi
|
||||
|
||||
tar -zcv -f "$(basename ${pkg_name}).tar.gz" "$(basename ${install_dir})" --remove-files || :
|
||||
exitcode=$?
|
||||
if [ "$exitcode" != "0" ]; then
|
||||
echo "tar ${pkg_name}.tar.gz error !!!"
|
||||
exit $exitcode
|
||||
fi
|
||||
|
||||
if [ -n "${taostools_bin_files}" ]; then
|
||||
wget https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh -O ${taostools_install_dir}/bin/TDinsight.sh && echo "TDinsight.sh downloaded!"|| echo "failed to download TDinsight.sh"
|
||||
tar -zcv -f "$(basename ${taostools_pkg_name}).tar.gz" "$(basename ${taostools_install_dir})" --remove-files || :
|
||||
exitcode=$?
|
||||
if [ "$exitcode" != "0" ]; then
|
||||
echo "tar ${taostools_pkg_name}.tar.gz error !!!"
|
||||
exit $exitcode
|
||||
fi
|
||||
fi
|
||||
|
||||
cd ${curr_dir}
|
|
@ -0,0 +1,539 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# This file is used to install tdengine rpm package on centos systems. The operating system
|
||||
# is required to use systemd to manage services at boot
|
||||
# set -x
|
||||
|
||||
iplist=""
|
||||
serverFqdn=""
|
||||
|
||||
# -----------------------Variables definition---------------------
|
||||
script_dir=$(dirname $(readlink -f "$0"))
|
||||
# Dynamic directory
|
||||
data_dir="/var/lib/taos"
|
||||
log_dir="/var/log/taos"
|
||||
data_link_dir="/usr/local/taos/data"
|
||||
log_link_dir="/usr/local/taos/log"
|
||||
install_main_dir="/usr/local/taos"
|
||||
|
||||
# static directory
|
||||
cfg_dir="/usr/local/taos/cfg"
|
||||
bin_dir="/usr/local/taos/bin"
|
||||
lib_dir="/usr/local/taos/driver"
|
||||
init_d_dir="/usr/local/taos/init.d"
|
||||
inc_dir="/usr/local/taos/include"
|
||||
|
||||
cfg_install_dir="/etc/taos"
|
||||
bin_link_dir="/usr/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
lib64_link_dir="/usr/lib64"
|
||||
inc_link_dir="/usr/include"
|
||||
|
||||
service_config_dir="/etc/systemd/system"
|
||||
|
||||
|
||||
# Color setting
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
GREEN_DARK='\033[0;32m'
|
||||
GREEN_UNDERLINE='\033[4;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
initd_mod=0
|
||||
service_mod=2
|
||||
if pidof systemd &> /dev/null; then
|
||||
service_mod=0
|
||||
elif $(which service &> /dev/null); then
|
||||
service_mod=1
|
||||
service_config_dir="/etc/init.d"
|
||||
if $(which chkconfig &> /dev/null); then
|
||||
initd_mod=1
|
||||
elif $(which insserv &> /dev/null); then
|
||||
initd_mod=2
|
||||
elif $(which update-rc.d &> /dev/null); then
|
||||
initd_mod=3
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
|
||||
function kill_taosadapter() {
|
||||
# ${csudo}pkill -f taosadapter || :
|
||||
pid=$(ps -ef | grep "taosadapter" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function kill_taosd() {
|
||||
# ${csudo}pkill -f taosd || :
|
||||
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function install_include() {
|
||||
${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h|| :
|
||||
${csudo}ln -s ${inc_dir}/taos.h ${inc_link_dir}/taos.h
|
||||
${csudo}ln -s ${inc_dir}/taosdef.h ${inc_link_dir}/taosdef.h
|
||||
${csudo}ln -s ${inc_dir}/taoserror.h ${inc_link_dir}/taoserror.h
|
||||
}
|
||||
|
||||
function install_lib() {
|
||||
${csudo}rm -f ${lib_link_dir}/libtaos* || :
|
||||
${csudo}rm -f ${lib64_link_dir}/libtaos* || :
|
||||
|
||||
${csudo}ln -s ${lib_dir}/libtaos.* ${lib_link_dir}/libtaos.so.1
|
||||
${csudo}ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
|
||||
|
||||
if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then
|
||||
${csudo}ln -s ${lib_dir}/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
|
||||
${csudo}ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
|
||||
fi
|
||||
|
||||
${csudo}ldconfig
|
||||
}
|
||||
|
||||
function install_bin() {
|
||||
# Remove links
|
||||
${csudo}rm -f ${bin_link_dir}/taos || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosd || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosadapter || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosBenchmark || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosdemo || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosdump || :
|
||||
${csudo}rm -f ${bin_link_dir}/rmtaos || :
|
||||
${csudo}rm -f ${bin_link_dir}/set_core || :
|
||||
|
||||
${csudo}chmod 0555 ${bin_dir}/*
|
||||
|
||||
#Make link
|
||||
[ -x ${bin_dir}/taos ] && ${csudo}ln -s ${bin_dir}/taos ${bin_link_dir}/taos || :
|
||||
[ -x ${bin_dir}/taosd ] && ${csudo}ln -s ${bin_dir}/taosd ${bin_link_dir}/taosd || :
|
||||
[ -x ${bin_dir}/taosadapter ] && ${csudo}ln -s ${bin_dir}/taosadapter ${bin_link_dir}/taosadapter || :
|
||||
[ -x ${bin_dir}/taosBenchmark ] && ${csudo}ln -sf ${bin_dir}/taosBenchmark ${bin_link_dir}/taosdemo || :
|
||||
[ -x ${bin_dir}/TDinsight.sh ] && ${csudo}ln -sf ${bin_dir}/TDinsight.sh ${bin_link_dir}/TDinsight.sh || :
|
||||
[ -x ${bin_dir}/taosdump ] && ${csudo}ln -s ${bin_dir}/taosdump ${bin_link_dir}/taosdump || :
|
||||
[ -x ${bin_dir}/set_core.sh ] && ${csudo}ln -s ${bin_dir}/set_core.sh ${bin_link_dir}/set_core || :
|
||||
}
|
||||
|
||||
function add_newHostname_to_hosts() {
|
||||
localIp="127.0.0.1"
|
||||
OLD_IFS="$IFS"
|
||||
IFS=" "
|
||||
iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}')
|
||||
arr=($iphost)
|
||||
IFS="$OLD_IFS"
|
||||
for s in "${arr[@]}"
|
||||
do
|
||||
if [[ "$s" == "$localIp" ]]; then
|
||||
return
|
||||
fi
|
||||
done
|
||||
${csudo}echo "127.0.0.1 $1" >> /etc/hosts ||:
|
||||
}
|
||||
|
||||
function set_hostname() {
|
||||
echo -e -n "${GREEN}Please enter one hostname(must not be 'localhost')${NC}:"
|
||||
read newHostname
|
||||
while true; do
|
||||
if [[ ! -z "$newHostname" && "$newHostname" != "localhost" ]]; then
|
||||
break
|
||||
else
|
||||
read -p "Please enter one hostname(must not be 'localhost'):" newHostname
|
||||
fi
|
||||
done
|
||||
|
||||
${csudo}hostname $newHostname ||:
|
||||
retval=`echo $?`
|
||||
if [[ $retval != 0 ]]; then
|
||||
echo
|
||||
echo "set hostname fail!"
|
||||
return
|
||||
fi
|
||||
#echo -e -n "$(hostnamectl status --static)"
|
||||
#echo -e -n "$(hostnamectl status --transient)"
|
||||
#echo -e -n "$(hostnamectl status --pretty)"
|
||||
|
||||
#ubuntu/centos /etc/hostname
|
||||
if [[ -e /etc/hostname ]]; then
|
||||
${csudo}echo $newHostname > /etc/hostname ||:
|
||||
fi
|
||||
|
||||
#debian: #HOSTNAME=yourname
|
||||
if [[ -e /etc/sysconfig/network ]]; then
|
||||
${csudo}sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network ||:
|
||||
fi
|
||||
|
||||
${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/taos.cfg
|
||||
serverFqdn=$newHostname
|
||||
|
||||
if [[ -e /etc/hosts ]]; then
|
||||
add_newHostname_to_hosts $newHostname
|
||||
fi
|
||||
}
|
||||
|
||||
function is_correct_ipaddr() {
|
||||
newIp=$1
|
||||
OLD_IFS="$IFS"
|
||||
IFS=" "
|
||||
arr=($iplist)
|
||||
IFS="$OLD_IFS"
|
||||
for s in "${arr[@]}"
|
||||
do
|
||||
if [[ "$s" == "$newIp" ]]; then
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
function set_ipAsFqdn() {
|
||||
iplist=$(ip address |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F "/" '{print $1}') ||:
|
||||
if [ -z "$iplist" ]; then
|
||||
iplist=$(ifconfig |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F ":" '{print $2}') ||:
|
||||
fi
|
||||
|
||||
if [ -z "$iplist" ]; then
|
||||
echo
|
||||
echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}"
|
||||
localFqdn="127.0.0.1"
|
||||
# Write the local FQDN to configuration file
|
||||
${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg
|
||||
serverFqdn=$localFqdn
|
||||
echo
|
||||
return
|
||||
fi
|
||||
|
||||
echo -e -n "${GREEN}Please choose an IP from local IP list${NC}:"
|
||||
echo
|
||||
echo -e -n "${GREEN}$iplist${NC}"
|
||||
echo
|
||||
echo
|
||||
echo -e -n "${GREEN}Notes: if IP is used as the node name, data can NOT be migrated to other machine directly${NC}:"
|
||||
read localFqdn
|
||||
while true; do
|
||||
if [ ! -z "$localFqdn" ]; then
|
||||
# Check if correct ip address
|
||||
is_correct_ipaddr $localFqdn
|
||||
retval=`echo $?`
|
||||
if [[ $retval != 0 ]]; then
|
||||
read -p "Please choose an IP from local IP list:" localFqdn
|
||||
else
|
||||
# Write the local FQDN to configuration file
|
||||
${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg
|
||||
serverFqdn=$localFqdn
|
||||
break
|
||||
fi
|
||||
else
|
||||
read -p "Please choose an IP from local IP list:" localFqdn
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
function local_fqdn_check() {
|
||||
#serverFqdn=$(hostname)
|
||||
echo
|
||||
echo -e -n "System hostname is: ${GREEN}$serverFqdn${NC}"
|
||||
echo
|
||||
if [[ "$serverFqdn" == "" ]] || [[ "$serverFqdn" == "localhost" ]]; then
|
||||
echo -e -n "${GREEN}It is strongly recommended to configure a hostname for this machine ${NC}"
|
||||
echo
|
||||
|
||||
while true
|
||||
do
|
||||
read -r -p "Set hostname now? [Y/n] " input
|
||||
if [ ! -n "$input" ]; then
|
||||
set_hostname
|
||||
break
|
||||
else
|
||||
case $input in
|
||||
[yY][eE][sS]|[yY])
|
||||
set_hostname
|
||||
break
|
||||
;;
|
||||
|
||||
[nN][oO]|[nN])
|
||||
set_ipAsFqdn
|
||||
break
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "Invalid input..."
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
function install_taosadapter_config() {
|
||||
if [ ! -f "${cfg_install_dir}/taosadapter.toml" ]; then
|
||||
[ ! -d %{cfg_install_dir} ] &&
|
||||
${csudo}${csudo}mkdir -p ${cfg_install_dir}
|
||||
[ -f ${cfg_dir}/taosadapter.toml ] && ${csudo}cp ${cfg_dir}/taosadapter.toml ${cfg_install_dir}
|
||||
[ -f ${cfg_install_dir}/taosadapter.toml ] &&
|
||||
${csudo}chmod 644 ${cfg_install_dir}/taosadapter.toml
|
||||
fi
|
||||
|
||||
[ -f ${cfg_dir}/taosadapter.toml ] &&
|
||||
${csudo}mv ${cfg_dir}/taosadapter.toml ${cfg_dir}/taosadapter.toml.new
|
||||
|
||||
[ -f ${cfg_install_dir}/taosadapter.toml ] &&
|
||||
${csudo}ln -s ${cfg_install_dir}/taosadapter.toml ${cfg_dir}
|
||||
}
|
||||
|
||||
function install_config() {
|
||||
if [ ! -f "${cfg_install_dir}/taos.cfg" ]; then
|
||||
${csudo}${csudo}mkdir -p ${cfg_install_dir}
|
||||
[ -f ${cfg_dir}/taos.cfg ] && ${csudo}cp ${cfg_dir}/taos.cfg ${cfg_install_dir}
|
||||
${csudo}chmod 644 ${cfg_install_dir}/*
|
||||
fi
|
||||
|
||||
# Save standard input to 6 and open / dev / TTY on standard input
|
||||
exec 6<&0 0</dev/tty
|
||||
|
||||
local_fqdn_check
|
||||
|
||||
# restore the backup standard input, and turn off 6
|
||||
exec 0<&6 6<&-
|
||||
|
||||
${csudo}mv ${cfg_dir}/taos.cfg ${cfg_dir}/taos.cfg.new
|
||||
${csudo}ln -s ${cfg_install_dir}/taos.cfg ${cfg_dir}
|
||||
#FQDN_FORMAT="(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)"
|
||||
#FQDN_FORMAT="(:[1-6][0-9][0-9][0-9][0-9]$)"
|
||||
#PORT_FORMAT="(/[1-6][0-9][0-9][0-9][0-9]?/)"
|
||||
#FQDN_PATTERN=":[0-9]{1,5}$"
|
||||
|
||||
# first full-qualified domain name (FQDN) for TDengine cluster system
|
||||
echo
|
||||
echo -e -n "${GREEN}Enter FQDN:port (like h1.taosdata.com:6030) of an existing TDengine cluster node to join${NC}"
|
||||
echo
|
||||
echo -e -n "${GREEN}OR leave it blank to build one${NC}:"
|
||||
#read firstEp
|
||||
if exec < /dev/tty; then
|
||||
read firstEp;
|
||||
fi
|
||||
while true; do
|
||||
if [ ! -z "$firstEp" ]; then
|
||||
# check the format of the firstEp
|
||||
#if [[ $firstEp == $FQDN_PATTERN ]]; then
|
||||
# Write the first FQDN to configuration file
|
||||
${csudo}sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/taos.cfg
|
||||
break
|
||||
#else
|
||||
# read -p "Please enter the correct FQDN:port: " firstEp
|
||||
#fi
|
||||
else
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
# user email
|
||||
#EMAIL_PATTERN='^[A-Za-z0-9\u4e00-\u9fa5]+@[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+)+$'
|
||||
#EMAIL_PATTERN='^[\w-]+(\.[\w-]+)*@[\w-]+(\.[\w-]+)+$'
|
||||
#EMAIL_PATTERN="^[\w-]+(\.[\w-]+)*@[\w-]+(\.[\w-]+)+$"
|
||||
echo
|
||||
echo -e -n "${GREEN}Enter your email address for priority support or enter empty to skip${NC}: "
|
||||
read emailAddr
|
||||
while true; do
|
||||
if [ ! -z "$emailAddr" ]; then
|
||||
# check the format of the emailAddr
|
||||
#if [[ "$emailAddr" =~ $EMAIL_PATTERN ]]; then
|
||||
# Write the email address to temp file
|
||||
email_file="${install_main_dir}/email"
|
||||
${csudo}bash -c "echo $emailAddr > ${email_file}"
|
||||
break
|
||||
#else
|
||||
# read -p "Please enter the correct email address: " emailAddr
|
||||
#fi
|
||||
else
|
||||
break
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
function clean_service_on_sysvinit() {
|
||||
#restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
|
||||
#${csudo}sed -i "\|${restart_config_str}|d" /etc/inittab || :
|
||||
|
||||
if pidof taosd &> /dev/null; then
|
||||
${csudo}service taosd stop || :
|
||||
fi
|
||||
|
||||
if ((${initd_mod}==1)); then
|
||||
${csudo}chkconfig --del taosd || :
|
||||
elif ((${initd_mod}==2)); then
|
||||
${csudo}insserv -r taosd || :
|
||||
elif ((${initd_mod}==3)); then
|
||||
${csudo}update-rc.d -f taosd remove || :
|
||||
fi
|
||||
|
||||
${csudo}rm -f ${service_config_dir}/taosd || :
|
||||
|
||||
if $(which init &> /dev/null); then
|
||||
${csudo}init q || :
|
||||
fi
|
||||
}
|
||||
|
||||
function install_service_on_sysvinit() {
|
||||
clean_service_on_sysvinit
|
||||
|
||||
sleep 1
|
||||
|
||||
# Install taosd service
|
||||
${csudo}cp %{init_d_dir}/taosd ${service_config_dir} && ${csudo}chmod a+x ${service_config_dir}/taosd
|
||||
|
||||
#restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
|
||||
#${csudo}grep -q -F "$restart_config_str" /etc/inittab || ${csudo}bash -c "echo '${restart_config_str}' >> /etc/inittab"
|
||||
|
||||
if ((${initd_mod}==1)); then
|
||||
${csudo}chkconfig --add taosd || :
|
||||
${csudo}chkconfig --level 2345 taosd on || :
|
||||
elif ((${initd_mod}==2)); then
|
||||
${csudo}insserv taosd || :
|
||||
${csudo}insserv -d taosd || :
|
||||
elif ((${initd_mod}==3)); then
|
||||
${csudo}update-rc.d taosd defaults || :
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_service_on_systemd() {
|
||||
taosd_service_config="${service_config_dir}/taosd.service"
|
||||
|
||||
# taosd service already is stoped before install in preinst script
|
||||
#if systemctl is-active --quiet taosd; then
|
||||
# echo "TDengine is running, stopping it..."
|
||||
# ${csudo}systemctl stop taosd &> /dev/null || echo &> /dev/null
|
||||
#fi
|
||||
${csudo}systemctl disable taosd &> /dev/null || echo &> /dev/null
|
||||
|
||||
${csudo}rm -f ${taosd_service_config}
|
||||
}
|
||||
|
||||
# taos:2345:respawn:/etc/init.d/taosd start
|
||||
|
||||
function install_service_on_systemd() {
|
||||
clean_service_on_systemd
|
||||
|
||||
taosd_service_config="${service_config_dir}/taosd.service"
|
||||
|
||||
${csudo}bash -c "echo '[Unit]' >> ${taosd_service_config}"
|
||||
${csudo}bash -c "echo 'Description=TDengine server service' >> ${taosd_service_config}"
|
||||
${csudo}bash -c "echo 'After=network-online.target' >> ${taosd_service_config}"
|
||||
${csudo}bash -c "echo 'Wants=network-online.target' >> ${taosd_service_config}"
|
||||
${csudo}bash -c "echo >> ${taosd_service_config}"
|
||||
${csudo}bash -c "echo '[Service]' >> ${taosd_service_config}"
|
||||
${csudo}bash -c "echo 'Type=simple' >> ${taosd_service_config}"
|
||||
${csudo}bash -c "echo 'ExecStart=/usr/bin/taosd' >> ${taosd_service_config}"
|
||||
${csudo}bash -c "echo 'ExecStartPre=/usr/local/taos/bin/startPre.sh' >> ${taosd_service_config}"
|
||||
${csudo}bash -c "echo 'TimeoutStopSec=1000000s' >> ${taosd_service_config}"
|
||||
${csudo}bash -c "echo 'LimitNOFILE=infinity' >> ${taosd_service_config}"
|
||||
${csudo}bash -c "echo 'LimitNPROC=infinity' >> ${taosd_service_config}"
|
||||
${csudo}bash -c "echo 'LimitCORE=infinity' >> ${taosd_service_config}"
|
||||
${csudo}bash -c "echo 'TimeoutStartSec=0' >> ${taosd_service_config}"
|
||||
${csudo}bash -c "echo 'StandardOutput=null' >> ${taosd_service_config}"
|
||||
${csudo}bash -c "echo 'Restart=always' >> ${taosd_service_config}"
|
||||
${csudo}bash -c "echo 'StartLimitBurst=3' >> ${taosd_service_config}"
|
||||
${csudo}bash -c "echo 'StartLimitInterval=60s' >> ${taosd_service_config}"
|
||||
${csudo}bash -c "echo >> ${taosd_service_config}"
|
||||
${csudo}bash -c "echo '[Install]' >> ${taosd_service_config}"
|
||||
${csudo}bash -c "echo 'WantedBy=multi-user.target' >> ${taosd_service_config}"
|
||||
${csudo}systemctl enable taosd
|
||||
}
|
||||
|
||||
function install_taosadapter_service() {
|
||||
if ((${service_mod}==0)); then
|
||||
[ -f ${script_dir}/../cfg/taosadapter.service ] &&\
|
||||
${csudo}cp ${script_dir}/../cfg/taosadapter.service \
|
||||
${service_config_dir}/ || :
|
||||
${csudo}systemctl daemon-reload
|
||||
fi
|
||||
}
|
||||
|
||||
function install_service() {
|
||||
if ((${service_mod}==0)); then
|
||||
install_service_on_systemd
|
||||
elif ((${service_mod}==1)); then
|
||||
install_service_on_sysvinit
|
||||
else
|
||||
# manual start taosd
|
||||
kill_taosadapter
|
||||
kill_taosd
|
||||
fi
|
||||
}
|
||||
|
||||
function install_TDengine() {
|
||||
echo -e "${GREEN}Start to install TDengine...${NC}"
|
||||
|
||||
#install log and data dir , then ln to /usr/local/taos
|
||||
${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir}
|
||||
${csudo}mkdir -p ${data_dir}
|
||||
|
||||
${csudo}rm -rf ${log_link_dir} || :
|
||||
${csudo}rm -rf ${data_link_dir} || :
|
||||
|
||||
${csudo}ln -s ${log_dir} ${log_link_dir} || :
|
||||
${csudo}ln -s ${data_dir} ${data_link_dir} || :
|
||||
|
||||
# Install include, lib, binary and service
|
||||
install_include
|
||||
install_lib
|
||||
install_bin
|
||||
install_config
|
||||
install_taosadapter_config
|
||||
install_taosadapter_service
|
||||
install_service
|
||||
|
||||
# Ask if to start the service
|
||||
#echo
|
||||
#echo -e "\033[44;32;1mTDengine is installed successfully!${NC}"
|
||||
echo
|
||||
echo -e "${GREEN_DARK}To configure TDengine ${NC}: edit /etc/taos/taos.cfg"
|
||||
if ((${service_mod}==0)); then
|
||||
echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo}systemctl start taosd${NC}"
|
||||
elif ((${service_mod}==1)); then
|
||||
echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo}update-rc.d taosd default ${RED} for the first time${NC}"
|
||||
echo -e " : ${csudo}service taosd start ${RED} after${NC}"
|
||||
else
|
||||
echo -e "${GREEN_DARK}To start TDengine ${NC}: ./taosd${NC}"
|
||||
fi
|
||||
|
||||
|
||||
|
||||
if [ ! -z "$firstEp" ]; then
|
||||
tmpFqdn=${firstEp%%:*}
|
||||
substr=":"
|
||||
if [[ $firstEp =~ $substr ]];then
|
||||
tmpPort=${firstEp#*:}
|
||||
else
|
||||
tmpPort=""
|
||||
fi
|
||||
if [[ "$tmpPort" != "" ]];then
|
||||
echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}"
|
||||
else
|
||||
echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}"
|
||||
fi
|
||||
echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}"
|
||||
echo
|
||||
elif [ ! -z "$serverFqdn" ]; then
|
||||
echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $serverFqdn${GREEN_DARK} to login into TDengine server${NC}"
|
||||
echo
|
||||
fi
|
||||
echo
|
||||
echo -e "\033[44;32;1mTDengine is installed successfully!${NC}"
|
||||
}
|
||||
|
||||
|
||||
## ==============================Main program starts from here============================
|
||||
serverFqdn=$(hostname)
|
||||
install_TDengine
|
|
@ -0,0 +1,142 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Script to stop the service and uninstall TSDB
|
||||
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
bin_link_dir="/usr/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
lib64_link_dir="/usr/lib64"
|
||||
inc_link_dir="/usr/include"
|
||||
|
||||
data_link_dir="/usr/local/taos/data"
|
||||
log_link_dir="/usr/local/taos/log"
|
||||
cfg_link_dir="/usr/local/taos/cfg"
|
||||
|
||||
service_config_dir="/etc/systemd/system"
|
||||
taos_service_name="taosd"
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
initd_mod=0
|
||||
service_mod=2
|
||||
if pidof systemd &> /dev/null; then
|
||||
service_mod=0
|
||||
elif $(which service &> /dev/null); then
|
||||
service_mod=1
|
||||
service_config_dir="/etc/init.d"
|
||||
if $(which chkconfig &> /dev/null); then
|
||||
initd_mod=1
|
||||
elif $(which insserv &> /dev/null); then
|
||||
initd_mod=2
|
||||
elif $(which update-rc.d &> /dev/null); then
|
||||
initd_mod=3
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
|
||||
function kill_taosadapter() {
|
||||
pid=$(ps -ef | grep "taosadapter" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function kill_taosd() {
|
||||
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_service_on_systemd() {
|
||||
taosadapter_service_config="${service_config_dir}/taosadapter.service"
|
||||
if systemctl is-active --quiet taosadapter; then
|
||||
echo "taosadapter is running, stopping it..."
|
||||
${csudo}systemctl stop taosadapter &> /dev/null || echo &> /dev/null
|
||||
fi
|
||||
|
||||
taosd_service_config="${service_config_dir}/${taos_service_name}.service"
|
||||
|
||||
if systemctl is-active --quiet ${taos_service_name}; then
|
||||
echo "TDengine taosd is running, stopping it..."
|
||||
${csudo}systemctl stop ${taos_service_name} &> /dev/null || echo &> /dev/null
|
||||
fi
|
||||
${csudo}systemctl disable ${taos_service_name} &> /dev/null || echo &> /dev/null
|
||||
|
||||
${csudo}rm -f ${taosd_service_config}
|
||||
|
||||
[ -f ${taosadapter_service_config} ] && ${csudo}rm -f ${taosadapter_service_config}
|
||||
|
||||
}
|
||||
|
||||
function clean_service_on_sysvinit() {
|
||||
#restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
|
||||
#${csudo}sed -i "\|${restart_config_str}|d" /etc/inittab || :
|
||||
|
||||
if pidof taosd &> /dev/null; then
|
||||
echo "TDengine taosd is running, stopping it..."
|
||||
${csudo}service taosd stop || :
|
||||
fi
|
||||
|
||||
if ((${initd_mod}==1)); then
|
||||
${csudo}chkconfig --del taosd || :
|
||||
elif ((${initd_mod}==2)); then
|
||||
${csudo}insserv -r taosd || :
|
||||
elif ((${initd_mod}==3)); then
|
||||
${csudo}update-rc.d -f taosd remove || :
|
||||
fi
|
||||
|
||||
${csudo}rm -f ${service_config_dir}/taosd || :
|
||||
|
||||
if $(which init &> /dev/null); then
|
||||
${csudo}init q || :
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_service() {
|
||||
if ((${service_mod}==0)); then
|
||||
clean_service_on_systemd
|
||||
elif ((${service_mod}==1)); then
|
||||
clean_service_on_sysvinit
|
||||
else
|
||||
# must manual stop taosd
|
||||
kill_taosadapter
|
||||
kill_taosd
|
||||
fi
|
||||
}
|
||||
|
||||
# Stop service and disable booting start.
|
||||
clean_service
|
||||
|
||||
# Remove all links
|
||||
${csudo}rm -f ${bin_link_dir}/taos || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosd || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosadapter || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosBenchmark || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosdemo || :
|
||||
${csudo}rm -f ${bin_link_dir}/set_core || :
|
||||
${csudo}rm -f ${cfg_link_dir}/*.new || :
|
||||
${csudo}rm -f ${inc_link_dir}/taos.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
|
||||
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
||||
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
|
||||
|
||||
${csudo}rm -f ${log_link_dir} || :
|
||||
${csudo}rm -f ${data_link_dir} || :
|
||||
|
||||
if ((${service_mod}==2)); then
|
||||
kill_taosadapter
|
||||
kill_taosd
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}TDengine is removed successfully!${NC}"
|
|
@ -0,0 +1,136 @@
|
|||
taos-1.6.4.0 (Release on 2019-12-01)
|
||||
Bug fixed:
|
||||
1.Look for possible causes of file corruption and fix them
|
||||
2.Encapsulate memory allocation functions to reduce the possibility of crashes
|
||||
3.Increase Arm64 compilation options
|
||||
4.Remove most of the warnings in the code
|
||||
5.Provide a variety of connector usage documents
|
||||
6.Network connection can be selected in udp and tcp
|
||||
7.Allow the maximum number of Tags to be 32
|
||||
8.Bugs reported by the user
|
||||
|
||||
taos-1.5.2.6 (Release on 2019-05-13)
|
||||
Bug fixed:
|
||||
- Nchar strings sometimes were wrongly truncated on Window
|
||||
- Importing data from file throws an error of "invalid SQL"
|
||||
|
||||
taos-1.5.2.5 (Release on 2019-05-13)
|
||||
Bug fixed:
|
||||
- Long timespan data import sometimes affects query result
|
||||
- Synchronzation of cluster dnodes worked incorrectly when importing
|
||||
|
||||
taos-1.5.2.4 (Release on 2019-05-10)
|
||||
New Features:
|
||||
- Optimized Windows client installation: now users don't need to copy taos.dll manually
|
||||
- Changed the priority of taos.cfg and JDBC URL: parameters in JDCB URL now has a higher priority than parameters in taos.cfg
|
||||
Bug fixed:
|
||||
- Expired data files were not deleted corrected
|
||||
- Occasionally importing returned "affected rows" which larger than 0, but 0 row was actually written into db
|
||||
- Commit log is occupied by too many import-to-file requests, which blocked further data importing
|
||||
- Cloud service shows a wrong number of available days with current balance
|
||||
- Other minor issues
|
||||
|
||||
taos-1.5.1 (Release on 2019-04-09)
|
||||
New Features:
|
||||
- Maximum number of rows returned by "top/bottom" methods increased from 20 to 100
|
||||
- Improved the performance of "first/last" methods
|
||||
- Increased system stability
|
||||
Bug fixed:
|
||||
- Connection failure when query on huge STables through TPC
|
||||
- The primary timestamp is occasionally returned as NULL in some queries
|
||||
- Operation failure when updating a tag value to NULL
|
||||
- Stream calculation couldn't start at certain occasions
|
||||
|
||||
taos-1.5.0 (Release on 2019-03-11)
|
||||
New Features:
|
||||
- New syntax to automatically create tables when inserting values into non-existing tables
|
||||
- New syntax "slimit/soffset" to pagenate groups in a query result set
|
||||
- Support "top/bottom" queries on a supertable
|
||||
- High performance statistic aggregation function "apercentile"
|
||||
- Remove "first_t/last_t" functions; improve the performance of "first/last" function
|
||||
- Add pre-aggregation for bool type values
|
||||
- Supports fixed-length streaming computation, i.e. users may define an end time for a stream
|
||||
- New JAVA API for SQL subscription, supports table/supertable/SQL query subscription
|
||||
Bug fixed:
|
||||
- Data file broken issue when frequently using "import"
|
||||
- Using "spread" on a super table may return negative values
|
||||
- RPC bug that random network packets might cause the RPC module to crush
|
||||
|
||||
taos-1.4.15 (Released on 2019-01-23)
|
||||
New Features:
|
||||
- JDBC Driver now supports configuring timezone, locale, cfgdir in JDBC url
|
||||
- A new API is added to validate if a table creation sql statement is correct in syntax without actually creating that table
|
||||
Bugs Fixed:
|
||||
- "select last(*) from STable" sometimest returned incorrect number of rows
|
||||
- JDBC driver method ResultSetMetaData.getColumnClassName() returned wrong values.
|
||||
- Web shell automatically changed query string to lower case
|
||||
|
||||
taos-1.4.14 (Released on 2018-12-22)
|
||||
New Features:
|
||||
- C Driver support for integration with Python
|
||||
- JDBC Driver support for integration with R and MATLAB
|
||||
|
||||
taos-1.4.13 (Released on 2018-12-14)
|
||||
Bugs Fixed:
|
||||
- Clients failed to connect to server due to unexpected and invalid packets recieved by the server.
|
||||
Features Added:
|
||||
- Add support to HikariCP in TSDB JDBC driver.
|
||||
|
||||
taos-1.4.12 (Released on 2018-12-08)
|
||||
Bugs Fixed:
|
||||
- Querying data while inserting into the database might return incomplete resultsets.
|
||||
Features Added:
|
||||
- A new python driver is added.
|
||||
- Increased system stability.
|
||||
- Changed meaning of database configuration paramerter 'ablocks'. 'ablocks' used to refer to the number of total cache blocks in memory, now it refers to average number of cache blocks for each table in memory.
|
||||
|
||||
taos-1.4.11 (Released on 2018-11-23)
|
||||
Bugs Fixed:
|
||||
- Thread memory leaking during high-frequency committing.
|
||||
- Master dnode selection failure caused by accidental network issues.
|
||||
Features Added:
|
||||
- Change keyword "metrics" to "stables", i.e. supertables; the previous query "show metrics" is now changed to "show stables".
|
||||
- Add an error message mechanism in C# driver. An error with message "Failed to connect to server" is thrown when fetching data experienced a network connection interruption during data transmitting.
|
||||
|
||||
taos-1.4.10 (Released on 2018-11-13)
|
||||
Bugs Fixed:
|
||||
- Taosdump failed while exporting extremely large datasets to a .sql file.
|
||||
- Commit status did not change correctly if the last commit was triggered by commit threshold time (ctime) and no more new data was written to DB during the next ctime period.
|
||||
Features Added:
|
||||
- Support importing historical data from Telegraf interface.
|
||||
- Support MyBatis framework in TSDB JDBC Driver.
|
||||
- Change result set row indexing in JDBC Driver. Result set row indexes now starts from 1 instead of 0.
|
||||
|
||||
taos-1.4.9 (Released on 2018-11-02)
|
||||
Bugs Fixed:
|
||||
- Dumping data using UTF-8 format in client shell failed.
|
||||
- Tag query failed using C# Driver.
|
||||
- Committing data to disk failed if DB files were corrupted.
|
||||
- Continuously pressing Ctrl+c in client shell for multiple times produced a segmentation fault.
|
||||
Features Added:
|
||||
- Changed the display pattern in shell for taosdump.
|
||||
- Add a check to the status of an existing resultset before firing a new query in a single JDBC connection. A connection can only have a single open resultset, and the resultset must be closed before one can execute new queries.
|
||||
|
||||
|
||||
taos-1.4.7 (Released on 2018-10-25)
|
||||
Bug Fixed:
|
||||
- UTF-8 encoding in JDBC Driver did not give the correct Chinese characters.
|
||||
- Fix crash error when where clause is too long.
|
||||
Features Added:
|
||||
- Add check on database properties, force ablocks to be at least (4 * tables) in a vnode.
|
||||
- Check if pVgroup is empty in sdb.
|
||||
|
||||
taos-1.4.6 (Released on 2018-10-21)
|
||||
Bug Fixed:
|
||||
- Fix wrong symbol addition while export csv file.
|
||||
Features Added:
|
||||
- Update grafana plugins.
|
||||
- Update python drivers.
|
||||
- Add error code explanation in JDBC Driver.
|
||||
- Prohibit login while the version of server and client are not match.
|
||||
|
||||
taos-1.4.5 (Released on 2018-10-17)
|
||||
Bug Fixed:
|
||||
- Fix HTTP request truncation bug in Telegraf interface.
|
||||
Features Added:
|
||||
- Support nchar and null object in JDBC Driver.
|
|
@ -0,0 +1,130 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Script to stop the service and uninstall TDengine's arbitrator
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
verMode=edge
|
||||
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
#install main path
|
||||
install_main_dir="/usr/local/tarbitrator"
|
||||
bin_link_dir="/usr/bin"
|
||||
#inc_link_dir="/usr/include"
|
||||
|
||||
service_config_dir="/etc/systemd/system"
|
||||
tarbitrator_service_name="tarbitratord"
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
initd_mod=0
|
||||
service_mod=2
|
||||
if pidof systemd &> /dev/null; then
|
||||
service_mod=0
|
||||
elif $(which service &> /dev/null); then
|
||||
service_mod=1
|
||||
service_config_dir="/etc/init.d"
|
||||
if $(which chkconfig &> /dev/null); then
|
||||
initd_mod=1
|
||||
elif $(which insserv &> /dev/null); then
|
||||
initd_mod=2
|
||||
elif $(which update-rc.d &> /dev/null); then
|
||||
initd_mod=3
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
|
||||
function kill_tarbitrator() {
|
||||
pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
function clean_bin() {
|
||||
# Remove link
|
||||
${csudo}rm -f ${bin_link_dir}/tarbitrator || :
|
||||
}
|
||||
|
||||
function clean_header() {
|
||||
# Remove link
|
||||
${csudo}rm -f ${inc_link_dir}/taos.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
|
||||
}
|
||||
|
||||
function clean_log() {
|
||||
# Remove link
|
||||
${csudo}rm -rf /arbitrator.log || :
|
||||
}
|
||||
|
||||
function clean_service_on_systemd() {
|
||||
tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service"
|
||||
|
||||
if systemctl is-active --quiet ${tarbitrator_service_name}; then
|
||||
echo "TDengine tarbitrator is running, stopping it..."
|
||||
${csudo}systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
|
||||
fi
|
||||
${csudo}systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
|
||||
|
||||
${csudo}rm -f ${tarbitratord_service_config}
|
||||
}
|
||||
|
||||
function clean_service_on_sysvinit() {
|
||||
if pidof tarbitrator &> /dev/null; then
|
||||
echo "TDengine's tarbitrator is running, stopping it..."
|
||||
${csudo}service tarbitratord stop || :
|
||||
fi
|
||||
|
||||
if ((${initd_mod}==1)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo}chkconfig --del tarbitratord || :
|
||||
fi
|
||||
elif ((${initd_mod}==2)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo}insserv -r tarbitratord || :
|
||||
fi
|
||||
elif ((${initd_mod}==3)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo}update-rc.d -f tarbitratord remove || :
|
||||
fi
|
||||
fi
|
||||
|
||||
${csudo}rm -f ${service_config_dir}/tarbitratord || :
|
||||
|
||||
if $(which init &> /dev/null); then
|
||||
${csudo}init q || :
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_service() {
|
||||
if ((${service_mod}==0)); then
|
||||
clean_service_on_systemd
|
||||
elif ((${service_mod}==1)); then
|
||||
clean_service_on_sysvinit
|
||||
else
|
||||
# must manual stop
|
||||
kill_tarbitrator
|
||||
fi
|
||||
}
|
||||
|
||||
# Stop service and disable booting start.
|
||||
clean_service
|
||||
# Remove binary file and links
|
||||
clean_bin
|
||||
# Remove header file.
|
||||
##clean_header
|
||||
# Remove log file
|
||||
clean_log
|
||||
|
||||
${csudo}rm -rf ${install_main_dir}
|
||||
|
||||
echo -e "${GREEN}TDengine's arbitrator is removed successfully!${NC}"
|
|
@ -0,0 +1,85 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Script to stop the client and uninstall database, but retain the config and log files.
|
||||
set -e
|
||||
# set -x
|
||||
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
installDir="/usr/local/taos"
|
||||
clientName="taos"
|
||||
uninstallScript="rmtaos"
|
||||
|
||||
#install main path
|
||||
install_main_dir=${installDir}
|
||||
|
||||
log_link_dir=${installDir}/log
|
||||
cfg_link_dir=${installDir}/cfg
|
||||
bin_link_dir="/usr/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
lib64_link_dir="/usr/lib64"
|
||||
inc_link_dir="/usr/include"
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
function kill_client() {
|
||||
if [ -n "$(pidof ${clientName})" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_bin() {
|
||||
# Remove link
|
||||
${csudo}rm -f ${bin_link_dir}/${clientName} || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosdemo || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosdump || :
|
||||
${csudo}rm -f ${bin_link_dir}/${uninstallScript} || :
|
||||
${csudo}rm -f ${bin_link_dir}/set_core || :
|
||||
}
|
||||
|
||||
function clean_lib() {
|
||||
# Remove link
|
||||
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
||||
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
|
||||
#${csudo}rm -rf ${v15_java_app_dir} || :
|
||||
}
|
||||
|
||||
function clean_header() {
|
||||
# Remove link
|
||||
${csudo}rm -f ${inc_link_dir}/taos.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
|
||||
}
|
||||
|
||||
function clean_config() {
|
||||
# Remove link
|
||||
${csudo}rm -f ${cfg_link_dir}/* || :
|
||||
}
|
||||
|
||||
function clean_log() {
|
||||
# Remove link
|
||||
${csudo}rm -rf ${log_link_dir} || :
|
||||
}
|
||||
|
||||
# Stop client.
|
||||
kill_client
|
||||
# Remove binary file and links
|
||||
clean_bin
|
||||
# Remove header file.
|
||||
clean_header
|
||||
# Remove lib file
|
||||
clean_lib
|
||||
# Remove link log directory
|
||||
clean_log
|
||||
# Remove link configuration file
|
||||
clean_config
|
||||
|
||||
${csudo}rm -rf ${install_main_dir}
|
||||
|
||||
echo -e "${GREEN}TDengine client is removed successfully!${NC}"
|
||||
echo
|
|
@ -0,0 +1,39 @@
|
|||
#!/bin/bash
|
||||
|
||||
# This script is used to repaire links when you what to move TDengine
|
||||
# data to other places and to access data.
|
||||
|
||||
# Read link path
|
||||
read -p "Please enter link directory such as /var/lib/taos/tsdb: " linkDir
|
||||
|
||||
while true; do
|
||||
if [ ! -d $linkDir ]; then
|
||||
read -p "Paht not exists, please enter the correct link path:" linkDir
|
||||
continue
|
||||
fi
|
||||
break
|
||||
done
|
||||
|
||||
declare -A dirHash
|
||||
|
||||
for linkFile in $(find -L $linkDir -xtype l); do
|
||||
targetFile=$(readlink -f $linkFile)
|
||||
echo "targetFile: ${targetFile}"
|
||||
# TODO : Extract directory part and basename part
|
||||
dirName=$(dirname $(dirname ${targetFile}))
|
||||
baseName=$(basename $(dirname ${targetFile}))/$(basename ${targetFile})
|
||||
|
||||
# TODO :
|
||||
newDir="${dirHash["$dirName"]}"
|
||||
if [ -z "${dirHash["$dirName"]}" ]; then
|
||||
read -p "Please enter the directory to replace ${dirName}:" newDir
|
||||
|
||||
read -p "Do you want to replcace all[y/N]?" replcaceAll
|
||||
if [[ ( "${replcaceAll}" == "y") || ( "${replcaceAll}" == "Y") ]]; then
|
||||
dirHash["$dirName"]="$newDir"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Replcace the file
|
||||
ln -sf "${newDir}/${baseName}" "${linkFile}"
|
||||
done
|
|
@ -0,0 +1,3 @@
|
|||
#!/bin/bash
|
||||
[[ -x /usr/bin/taosadapter ]] && /usr/bin/taosadapter &
|
||||
taosd
|
|
@ -0,0 +1,40 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# This file is used to set config for core when taosd crash
|
||||
|
||||
# Color setting
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
GREEN_DARK='\033[0;32m'
|
||||
GREEN_UNDERLINE='\033[4;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
# set -e
|
||||
# set -x
|
||||
corePath=$1
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo "
|
||||
fi
|
||||
|
||||
if [[ ! -n ${corePath} ]]; then
|
||||
echo -e -n "${GREEN}Please enter a file directory to save the coredump file${NC}:"
|
||||
read corePath
|
||||
while true; do
|
||||
if [[ ! -z "$corePath" ]]; then
|
||||
break
|
||||
else
|
||||
read -p "Please enter a file directory to save the coredump file:" corePath
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
ulimit -c unlimited
|
||||
${csudo}sed -i '/ulimit -c unlimited/d' /etc/profile ||:
|
||||
${csudo}sed -i '$a\ulimit -c unlimited' /etc/profile ||:
|
||||
source /etc/profile
|
||||
|
||||
${csudo}mkdir -p ${corePath} ||:
|
||||
${csudo}sysctl -w kernel.core_pattern=${corePath}/core-%e-%p ||:
|
||||
${csudo}echo "${corePath}/core-%e-%p" | ${csudo}tee /proc/sys/kernel/core_pattern ||:
|
|
@ -0,0 +1,51 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# if enable core dump, set start count to 3, disable core dump, set start count to 20.
|
||||
# set -e
|
||||
# set -x
|
||||
|
||||
serverName="taosd"
|
||||
logDir="/var/log/taos"
|
||||
|
||||
taosd=/etc/systemd/system/${serverName}.service
|
||||
line=$(grep StartLimitBurst ${taosd})
|
||||
num=${line##*=}
|
||||
#echo "burst num: ${num}"
|
||||
|
||||
startSeqFile=${logDir}/.startSeq
|
||||
recordFile=${logDir}/.startRecord
|
||||
|
||||
startSeq=0
|
||||
|
||||
if [[ ! -e ${startSeqFile} ]]; then
|
||||
startSeq=0
|
||||
else
|
||||
startSeq=$(cat ${startSeqFile})
|
||||
fi
|
||||
|
||||
nextSeq=$(expr $startSeq + 1)
|
||||
echo "${nextSeq}" >${startSeqFile}
|
||||
|
||||
curTime=$(date "+%Y-%m-%d %H:%M:%S")
|
||||
echo "startSeq:${startSeq} startPre.sh exec ${curTime}, burstCnt:${num}" >>${recordFile}
|
||||
|
||||
coreFlag=$(ulimit -c)
|
||||
echo "coreFlag: ${coreFlag}" >>${recordFile}
|
||||
|
||||
if [ ${coreFlag} = "0" ]; then
|
||||
#echo "core is 0"
|
||||
if [ ${num} != "20" ]; then
|
||||
sed -i "s/^.*StartLimitBurst.*$/StartLimitBurst=20/" ${taosd}
|
||||
systemctl daemon-reload
|
||||
echo "modify burst count from ${num} to 20" >>${recordFile}
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ${coreFlag} = "unlimited" ]; then
|
||||
#echo "core is unlimited"
|
||||
if [ ${num} != "3" ]; then
|
||||
sed -i "s/^.*StartLimitBurst.*$/StartLimitBurst=3/" ${taosd}
|
||||
systemctl daemon-reload
|
||||
echo "modify burst count from ${num} to 3" >>${recordFile}
|
||||
fi
|
||||
fi
|
|
@ -0,0 +1,144 @@
|
|||
# Usage:
|
||||
# sudo gdb -x ./taosd-dump-cfg.gdb
|
||||
|
||||
define attach_pidof
|
||||
if $argc != 1
|
||||
help attach_pidof
|
||||
else
|
||||
shell echo -e "\
|
||||
set \$PID = "$(echo $(pidof $arg0) 0 | cut -d " " -f 1)"\n\
|
||||
if \$PID > 0\n\
|
||||
attach "$(pidof -s $arg0)"\n\
|
||||
else\n\
|
||||
print \"Process '"$arg0"' not found\"\n\
|
||||
end" > /tmp/gdb.pidof
|
||||
source /tmp/gdb.pidof
|
||||
end
|
||||
end
|
||||
|
||||
document attach_pidof
|
||||
Attach to process by name
|
||||
Usage: attach_pidof PROG_NAME
|
||||
end
|
||||
|
||||
set $TAOS_CFG_VTYPE_INT8 = 0
|
||||
set $TAOS_CFG_VTYPE_INT16 = 1
|
||||
set $TAOS_CFG_VTYPE_INT32 = 2
|
||||
set $TAOS_CFG_VTYPE_FLOAT = 3
|
||||
set $TAOS_CFG_VTYPE_STRING = 4
|
||||
set $TAOS_CFG_VTYPE_IPSTR = 5
|
||||
set $TAOS_CFG_VTYPE_DIRECTORY = 6
|
||||
|
||||
set $TSDB_CFG_CTYPE_B_CONFIG = 1U
|
||||
set $TSDB_CFG_CTYPE_B_SHOW = 2U
|
||||
set $TSDB_CFG_CTYPE_B_LOG = 4U
|
||||
set $TSDB_CFG_CTYPE_B_CLIENT = 8U
|
||||
set $TSDB_CFG_CTYPE_B_OPTION = 16U
|
||||
set $TSDB_CFG_CTYPE_B_NOT_PRINT = 32U
|
||||
|
||||
set $TSDB_CFG_PRINT_LEN = 53
|
||||
|
||||
define print_blank
|
||||
if $argc == 1
|
||||
set $blank_len = $arg0
|
||||
while $blank_len > 0
|
||||
printf "%s", " "
|
||||
set $blank_len = $blank_len - 1
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
define dump_cfg
|
||||
if $argc != 1
|
||||
help dump_cfg
|
||||
else
|
||||
set $blen = $TSDB_CFG_PRINT_LEN - (int)strlen($arg0.option)
|
||||
if $blen < 0
|
||||
$blen = 0
|
||||
end
|
||||
#printf "%s: %d\n", "******blen: ", $blen
|
||||
printf "%s: ", $arg0.option
|
||||
print_blank $blen
|
||||
|
||||
if $arg0.valType == $TAOS_CFG_VTYPE_INT8
|
||||
printf "%d\n", *((int8_t *) $arg0.ptr)
|
||||
else
|
||||
if $arg0.valType == $TAOS_CFG_VTYPE_INT16
|
||||
printf "%d\n", *((int16_t *) $arg0.ptr)
|
||||
else
|
||||
if $arg0.valType == $TAOS_CFG_VTYPE_INT32
|
||||
printf "%d\n", *((int32_t *) $arg0.ptr)
|
||||
else
|
||||
if $arg0.valType == $TAOS_CFG_VTYPE_FLOAT
|
||||
printf "%f\n", *((float *) $arg0.ptr)
|
||||
else
|
||||
printf "%s\n", $arg0.ptr
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
document dump_cfg
|
||||
Dump a cfg entry
|
||||
Usage: dump_cfg cfg
|
||||
end
|
||||
|
||||
set pagination off
|
||||
|
||||
attach_pidof taosd
|
||||
|
||||
set $idx=0
|
||||
#print tsGlobalConfigNum
|
||||
#set $end=$1
|
||||
set $end=tsGlobalConfigNum
|
||||
|
||||
p "*=*=*=*=*=*=*=*=*= taos global config:"
|
||||
#while ($idx .lt. $end)
|
||||
while ($idx < $end)
|
||||
# print tsGlobalConfig[$idx].option
|
||||
set $cfg = tsGlobalConfig[$idx]
|
||||
set $tsce = tscEmbedded
|
||||
# p "1"
|
||||
if ($tsce == 0)
|
||||
if !($cfg.cfgType & $TSDB_CFG_CTYPE_B_CLIENT)
|
||||
end
|
||||
else
|
||||
if $cfg.cfgType & $TSDB_CFG_CTYPE_B_NOT_PRINT
|
||||
else
|
||||
if !($cfg.cfgType & $TSDB_CFG_CTYPE_B_SHOW)
|
||||
else
|
||||
dump_cfg $cfg
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
set $idx=$idx+1
|
||||
end
|
||||
|
||||
set $idx=0
|
||||
|
||||
p "*=*=*=*=*=*=*=*=*= taos local config:"
|
||||
while ($idx < $end)
|
||||
set $cfg = tsGlobalConfig[$idx]
|
||||
set $tsce = tscEmbedded
|
||||
if ($tsce == 0)
|
||||
if !($cfg.cfgType & $TSDB_CFG_CTYPE_B_CLIENT)
|
||||
end
|
||||
else
|
||||
if $cfg.cfgType & $TSDB_CFG_CTYPE_B_NOT_PRINT
|
||||
else
|
||||
if ($cfg.cfgType & $TSDB_CFG_CTYPE_B_SHOW)
|
||||
else
|
||||
dump_cfg $cfg
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
set $idx=$idx+1
|
||||
end
|
||||
|
||||
detach
|
||||
|
||||
quit
|
|
@ -63,7 +63,7 @@ typedef struct SStmtBindInfo {
|
|||
int8_t tbType;
|
||||
bool tagsCached;
|
||||
void* boundTags;
|
||||
char tbName[TSDB_TABLE_FNAME_LEN];;
|
||||
char tbName[TSDB_TABLE_FNAME_LEN];
|
||||
char tbFName[TSDB_TABLE_FNAME_LEN];
|
||||
char stbFName[TSDB_TABLE_FNAME_LEN];
|
||||
SName sname;
|
||||
|
@ -71,7 +71,6 @@ typedef struct SStmtBindInfo {
|
|||
|
||||
typedef struct SStmtExecInfo {
|
||||
int32_t affectedRows;
|
||||
bool emptyRes;
|
||||
SRequestObj* pRequest;
|
||||
SHashObj* pVgHash;
|
||||
SHashObj* pBlockHash;
|
||||
|
@ -87,7 +86,6 @@ typedef struct SStmtSQLInfo {
|
|||
char* sqlStr;
|
||||
int32_t sqlLen;
|
||||
SArray* nodeList;
|
||||
SQueryPlan* pQueryPlan;
|
||||
SStmtQueryResInfo queryRes;
|
||||
bool autoCreateTbl;
|
||||
} SStmtSQLInfo;
|
||||
|
|
|
@ -91,7 +91,7 @@ void *openTransporter(const char *user, const char *auth, int32_t numOfThread) {
|
|||
rpcInit.label = "TSC";
|
||||
rpcInit.numOfThreads = numOfThread;
|
||||
rpcInit.cfp = processMsgFromServer;
|
||||
rpcInit.sessions = tsMaxConnections;
|
||||
rpcInit.sessions = 1024;
|
||||
rpcInit.connType = TAOS_CONN_CLIENT;
|
||||
rpcInit.user = (char *)user;
|
||||
rpcInit.idleTime = tsShellActivityTimer * 1000;
|
||||
|
|
|
@ -172,7 +172,8 @@ int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtC
|
|||
.msgLen = ERROR_MSG_BUF_DEFAULT_SIZE,
|
||||
.pTransporter = pTscObj->pAppInfo->pTransporter,
|
||||
.pStmtCb = pStmtCb,
|
||||
.pUser = pTscObj->user};
|
||||
.pUser = pTscObj->user,
|
||||
.isSuperUser = (0 == strcmp(pTscObj->user, TSDB_DEFAULT_USER))};
|
||||
|
||||
cxt.mgmtEpSet = getEpSet_s(&pTscObj->pAppInfo->mgmtEp);
|
||||
int32_t code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &cxt.pCatalog);
|
||||
|
@ -517,8 +518,9 @@ STscObj* taosConnectImpl(const char* user, const char* auth, const char* db, __t
|
|||
if (pRequest->code != TSDB_CODE_SUCCESS) {
|
||||
const char* errorMsg =
|
||||
(pRequest->code == TSDB_CODE_RPC_FQDN_ERROR) ? taos_errstr(pRequest) : tstrerror(pRequest->code);
|
||||
printf("failed to connect to server, reason: %s\n\n", errorMsg);
|
||||
fprintf(stderr,"failed to connect to server, reason: %s\n\n", errorMsg);
|
||||
|
||||
terrno = pRequest->code;
|
||||
destroyRequest(pRequest);
|
||||
taos_close(pTscObj);
|
||||
pTscObj = NULL;
|
||||
|
@ -947,8 +949,7 @@ int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableR
|
|||
|
||||
// TODO handle the compressed case
|
||||
pResultInfo->totalRows += pResultInfo->numOfRows;
|
||||
return setResultDataPtr(pResultInfo, pResultInfo->fields, pResultInfo->numOfCols, pResultInfo->numOfRows,
|
||||
convertUcs4);
|
||||
return setResultDataPtr(pResultInfo, pResultInfo->fields, pResultInfo->numOfCols, pResultInfo->numOfRows, convertUcs4);
|
||||
}
|
||||
|
||||
TSDB_SERVER_STATUS taos_check_server_status(const char* fqdn, int port, char* details, int maxlen) {
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -279,7 +279,6 @@ int32_t stmtCleanExecInfo(STscStmt* pStmt, bool keepTable, bool freeRequest) {
|
|||
}
|
||||
|
||||
pStmt->exec.autoCreateTbl = false;
|
||||
pStmt->exec.emptyRes = false;
|
||||
|
||||
if (keepTable) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -298,7 +297,6 @@ int32_t stmtCleanSQLInfo(STscStmt* pStmt) {
|
|||
taosMemoryFree(pStmt->sql.queryRes.userFields);
|
||||
taosMemoryFree(pStmt->sql.sqlStr);
|
||||
qDestroyQuery(pStmt->sql.pQuery);
|
||||
qDestroyQueryPlan(pStmt->sql.pQueryPlan);
|
||||
taosArrayDestroy(pStmt->sql.nodeList);
|
||||
|
||||
void* pIter = taosHashIterate(pStmt->sql.pTableCache, NULL);
|
||||
|
@ -599,6 +597,8 @@ int32_t stmtFetchColFields(STscStmt* pStmt, int32_t* fieldNum, TAOS_FIELD** fiel
|
|||
int stmtBindBatch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int32_t colIdx) {
|
||||
STscStmt* pStmt = (STscStmt*)stmt;
|
||||
|
||||
STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_BIND));
|
||||
|
||||
if (pStmt->bInfo.needParse && pStmt->sql.runTimes && pStmt->sql.type > 0 &&
|
||||
STMT_TYPE_MULTI_INSERT != pStmt->sql.type) {
|
||||
pStmt->bInfo.needParse = false;
|
||||
|
@ -617,21 +617,42 @@ int stmtBindBatch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int32_t colIdx) {
|
|||
STMT_ERR_RET(stmtParseSql(pStmt));
|
||||
}
|
||||
|
||||
STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_BIND));
|
||||
|
||||
if (STMT_TYPE_QUERY == pStmt->sql.type) {
|
||||
if (NULL == pStmt->sql.pQueryPlan) {
|
||||
STMT_ERR_RET(getQueryPlan(pStmt->exec.pRequest, pStmt->sql.pQuery, &pStmt->sql.nodeList));
|
||||
pStmt->sql.pQueryPlan = pStmt->exec.pRequest->body.pDag;
|
||||
pStmt->exec.pRequest->body.pDag = NULL;
|
||||
STMT_ERR_RET(stmtBackupQueryFields(pStmt));
|
||||
} else {
|
||||
STMT_ERR_RET(stmtRestoreQueryFields(pStmt));
|
||||
}
|
||||
STMT_ERR_RET(qStmtBindParams(pStmt->sql.pQuery, bind, colIdx, pStmt->exec.pRequest->requestId));
|
||||
|
||||
SParseContext ctx = {.requestId = pStmt->exec.pRequest->requestId,
|
||||
.acctId = pStmt->taos->acctId,
|
||||
.db = pStmt->exec.pRequest->pDb,
|
||||
.topicQuery = false,
|
||||
.pSql = pStmt->sql.sqlStr,
|
||||
.sqlLen = pStmt->sql.sqlLen,
|
||||
.pMsg = pStmt->exec.pRequest->msgBuf,
|
||||
.msgLen = ERROR_MSG_BUF_DEFAULT_SIZE,
|
||||
.pTransporter = pStmt->taos->pAppInfo->pTransporter,
|
||||
.pStmtCb = NULL,
|
||||
.pUser = pStmt->taos->user};
|
||||
ctx.mgmtEpSet = getEpSet_s(&pStmt->taos->pAppInfo->mgmtEp);
|
||||
STMT_ERR_RET(catalogGetHandle(pStmt->taos->pAppInfo->clusterId, &ctx.pCatalog));
|
||||
|
||||
STMT_ERR_RET(qStmtParseQuerySql(&ctx, pStmt->sql.pQuery));
|
||||
|
||||
STMT_RET(qStmtBindParam(pStmt->sql.pQueryPlan, bind, colIdx, pStmt->exec.pRequest->requestId, &pStmt->exec.emptyRes));
|
||||
}
|
||||
if (pStmt->sql.pQuery->haveResultSet) {
|
||||
setResSchemaInfo(&pStmt->exec.pRequest->body.resInfo, pStmt->sql.pQuery->pResSchema, pStmt->sql.pQuery->numOfResCols);
|
||||
setResPrecision(&pStmt->exec.pRequest->body.resInfo, pStmt->sql.pQuery->precision);
|
||||
}
|
||||
|
||||
TSWAP(pStmt->exec.pRequest->dbList, pStmt->sql.pQuery->pDbList);
|
||||
TSWAP(pStmt->exec.pRequest->tableList, pStmt->sql.pQuery->pTableList);
|
||||
|
||||
//if (STMT_TYPE_QUERY == pStmt->sql.queryRes) {
|
||||
// STMT_ERR_RET(stmtRestoreQueryFields(pStmt));
|
||||
//}
|
||||
|
||||
//STMT_ERR_RET(stmtBackupQueryFields(pStmt));
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
STableDataBlocks **pDataBlock = (STableDataBlocks**)taosHashGet(pStmt->exec.pBlockHash, pStmt->bInfo.tbFName, strlen(pStmt->bInfo.tbFName));
|
||||
if (NULL == pDataBlock) {
|
||||
tscError("table %s not found in exec blockHash", pStmt->bInfo.tbFName);
|
||||
|
@ -736,11 +757,7 @@ int stmtExec(TAOS_STMT *stmt) {
|
|||
STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_EXECUTE));
|
||||
|
||||
if (STMT_TYPE_QUERY == pStmt->sql.type) {
|
||||
if (pStmt->exec.emptyRes) {
|
||||
pStmt->exec.pRequest->type = TSDB_SQL_RETRIEVE_EMPTY_RESULT;
|
||||
} else {
|
||||
scheduleQuery(pStmt->exec.pRequest, pStmt->sql.pQueryPlan, pStmt->sql.nodeList, NULL);
|
||||
}
|
||||
launchQueryImpl(pStmt->exec.pRequest, pStmt->sql.pQuery, TSDB_CODE_SUCCESS, true, NULL);
|
||||
} else {
|
||||
STMT_ERR_RET(qBuildStmtOutput(pStmt->sql.pQuery, pStmt->exec.pVgHash, pStmt->exec.pBlockHash));
|
||||
launchQueryImpl(pStmt->exec.pRequest, pStmt->sql.pQuery, TSDB_CODE_SUCCESS, true, (autoCreateTbl ? (void**)&pRsp : NULL));
|
||||
|
@ -839,16 +856,7 @@ int stmtGetParamNum(TAOS_STMT* stmt, int* nums) {
|
|||
}
|
||||
|
||||
if (STMT_TYPE_QUERY == pStmt->sql.type) {
|
||||
if (NULL == pStmt->sql.pQueryPlan) {
|
||||
STMT_ERR_RET(getQueryPlan(pStmt->exec.pRequest, pStmt->sql.pQuery, &pStmt->sql.nodeList));
|
||||
pStmt->sql.pQueryPlan = pStmt->exec.pRequest->body.pDag;
|
||||
pStmt->exec.pRequest->body.pDag = NULL;
|
||||
STMT_ERR_RET(stmtBackupQueryFields(pStmt));
|
||||
} else {
|
||||
STMT_ERR_RET(stmtRestoreQueryFields(pStmt));
|
||||
}
|
||||
|
||||
*nums = taosArrayGetSize(pStmt->sql.pQueryPlan->pPlaceholderValues);
|
||||
*nums = taosArrayGetSize(pStmt->sql.pQuery->pPlaceholderValues);
|
||||
} else {
|
||||
STMT_ERR_RET(stmtFetchColFields(stmt, nums, NULL));
|
||||
}
|
||||
|
|
|
@ -185,6 +185,7 @@ typedef struct {
|
|||
int32_t async;
|
||||
tsem_t rspSem;
|
||||
tmq_resp_err_t rspErr;
|
||||
SArray* offsets;
|
||||
} SMqCommitCbParam;
|
||||
|
||||
tmq_conf_t* tmq_conf_new() {
|
||||
|
@ -246,10 +247,13 @@ tmq_conf_res_t tmq_conf_set(tmq_conf_t* conf, const char* key, const char* value
|
|||
if (strcmp(key, "msg.with.table.name") == 0) {
|
||||
if (strcmp(value, "true") == 0) {
|
||||
conf->withTbName = 1;
|
||||
return TMQ_CONF_OK;
|
||||
} else if (strcmp(value, "false") == 0) {
|
||||
conf->withTbName = 0;
|
||||
return TMQ_CONF_OK;
|
||||
} else if (strcmp(value, "none") == 0) {
|
||||
conf->withTbName = -1;
|
||||
return TMQ_CONF_OK;
|
||||
} else {
|
||||
return TMQ_CONF_INVALID;
|
||||
}
|
||||
|
@ -312,7 +316,7 @@ static int32_t tmqMakeTopicVgKey(char* dst, const char* topicName, int32_t vg) {
|
|||
|
||||
void tmqAssignDelayedHbTask(void* param, void* tmrId) {
|
||||
tmq_t* tmq = (tmq_t*)param;
|
||||
int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t));
|
||||
int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM);
|
||||
*pTaskType = TMQ_DELAYED_TASK__HB;
|
||||
taosWriteQitem(tmq->delayedTask, pTaskType);
|
||||
tsem_post(&tmq->rspSem);
|
||||
|
@ -320,7 +324,7 @@ void tmqAssignDelayedHbTask(void* param, void* tmrId) {
|
|||
|
||||
void tmqAssignDelayedCommitTask(void* param, void* tmrId) {
|
||||
tmq_t* tmq = (tmq_t*)param;
|
||||
int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t));
|
||||
int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM);
|
||||
*pTaskType = TMQ_DELAYED_TASK__COMMIT;
|
||||
taosWriteQitem(tmq->delayedTask, pTaskType);
|
||||
tsem_post(&tmq->rspSem);
|
||||
|
@ -328,7 +332,7 @@ void tmqAssignDelayedCommitTask(void* param, void* tmrId) {
|
|||
|
||||
void tmqAssignDelayedReportTask(void* param, void* tmrId) {
|
||||
tmq_t* tmq = (tmq_t*)param;
|
||||
int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t));
|
||||
int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM);
|
||||
*pTaskType = TMQ_DELAYED_TASK__REPORT;
|
||||
taosWriteQitem(tmq->delayedTask, pTaskType);
|
||||
tsem_post(&tmq->rspSem);
|
||||
|
@ -395,6 +399,9 @@ int32_t tmqCommitCb(void* param, const SDataBuf* pMsg, int32_t code) {
|
|||
if (!pParam->async)
|
||||
tsem_post(&pParam->rspSem);
|
||||
else {
|
||||
if (pParam->offsets) {
|
||||
taosArrayDestroy(pParam->offsets);
|
||||
}
|
||||
tsem_destroy(&pParam->rspSem);
|
||||
/*if (pParam->pArray) {*/
|
||||
/*taosArrayDestroy(pParam->pArray);*/
|
||||
|
@ -540,10 +547,10 @@ tmq_resp_err_t tmq_commit(tmq_t* tmq, const tmq_topic_vgroup_list_t* offsets, in
|
|||
// build msg
|
||||
// send to mnode
|
||||
SMqCMCommitOffsetReq req;
|
||||
SArray* pArray = NULL;
|
||||
SArray* pOffsets = NULL;
|
||||
|
||||
if (offsets == NULL) {
|
||||
pArray = taosArrayInit(0, sizeof(SMqOffset));
|
||||
pOffsets = taosArrayInit(0, sizeof(SMqOffset));
|
||||
for (int i = 0; i < taosArrayGetSize(tmq->clientTopics); i++) {
|
||||
SMqClientTopic* pTopic = taosArrayGet(tmq->clientTopics, i);
|
||||
for (int j = 0; j < taosArrayGetSize(pTopic->vgs); j++) {
|
||||
|
@ -553,11 +560,11 @@ tmq_resp_err_t tmq_commit(tmq_t* tmq, const tmq_topic_vgroup_list_t* offsets, in
|
|||
strcpy(offset.cgroup, tmq->groupId);
|
||||
offset.vgId = pVg->vgId;
|
||||
offset.offset = pVg->currentOffset;
|
||||
taosArrayPush(pArray, &offset);
|
||||
taosArrayPush(pOffsets, &offset);
|
||||
}
|
||||
}
|
||||
req.num = pArray->size;
|
||||
req.offsets = pArray->pData;
|
||||
req.num = pOffsets->size;
|
||||
req.offsets = pOffsets->pData;
|
||||
} else {
|
||||
req.num = taosArrayGetSize(&offsets->container);
|
||||
req.offsets = (SMqOffset*)offsets->container.pData;
|
||||
|
@ -591,6 +598,7 @@ tmq_resp_err_t tmq_commit(tmq_t* tmq, const tmq_topic_vgroup_list_t* offsets, in
|
|||
pParam->tmq = tmq;
|
||||
tsem_init(&pParam->rspSem, 0, 0);
|
||||
pParam->async = async;
|
||||
pParam->offsets = pOffsets;
|
||||
|
||||
pRequest->body.requestMsg = (SDataBuf){
|
||||
.pData = buf,
|
||||
|
@ -613,8 +621,8 @@ tmq_resp_err_t tmq_commit(tmq_t* tmq, const tmq_topic_vgroup_list_t* offsets, in
|
|||
tsem_destroy(&pParam->rspSem);
|
||||
taosMemoryFree(pParam);
|
||||
|
||||
if (pArray) {
|
||||
taosArrayDestroy(pArray);
|
||||
if (pOffsets) {
|
||||
taosArrayDestroy(pOffsets);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -840,7 +848,7 @@ int32_t tmqPollCb(void* param, const SDataBuf* pMsg, int32_t code) {
|
|||
tscWarn("mismatch rsp from vg %d, epoch %d, current epoch %d", pParam->vgId, msgEpoch, tmqEpoch);
|
||||
}
|
||||
|
||||
SMqPollRspWrapper* pRspWrapper = taosAllocateQitem(sizeof(SMqPollRspWrapper));
|
||||
SMqPollRspWrapper* pRspWrapper = taosAllocateQitem(sizeof(SMqPollRspWrapper), DEF_QITEM);
|
||||
if (pRspWrapper == NULL) {
|
||||
tscWarn("msg discard from vg %d, epoch %d since out of memory", pParam->vgId, pParam->epoch);
|
||||
goto CREATE_MSG_FAIL;
|
||||
|
@ -979,7 +987,7 @@ int32_t tmqAskEpCb(void* param, const SDataBuf* pMsg, int32_t code) {
|
|||
tmqUpdateEp(tmq, head->epoch, &rsp);
|
||||
tDeleteSMqAskEpRsp(&rsp);
|
||||
} else {
|
||||
SMqAskEpRspWrapper* pWrapper = taosAllocateQitem(sizeof(SMqAskEpRspWrapper));
|
||||
SMqAskEpRspWrapper* pWrapper = taosAllocateQitem(sizeof(SMqAskEpRspWrapper), DEF_QITEM);
|
||||
if (pWrapper == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
code = -1;
|
||||
|
@ -1015,7 +1023,7 @@ int32_t tmqAskEp(tmq_t* tmq, bool async) {
|
|||
atomic_store_32(&tmq->epSkipCnt, 0);
|
||||
#endif
|
||||
int32_t tlen = sizeof(SMqAskEpReq);
|
||||
SMqAskEpReq* req = taosMemoryMalloc(tlen);
|
||||
SMqAskEpReq* req = taosMemoryCalloc(1, tlen);
|
||||
if (req == NULL) {
|
||||
tscError("failed to malloc get subscribe ep buf");
|
||||
/*atomic_store_8(&tmq->epStatus, 0);*/
|
||||
|
@ -1025,7 +1033,7 @@ int32_t tmqAskEp(tmq_t* tmq, bool async) {
|
|||
req->epoch = htonl(tmq->epoch);
|
||||
strcpy(req->cgroup, tmq->groupId);
|
||||
|
||||
SMqAskEpCbParam* pParam = taosMemoryMalloc(sizeof(SMqAskEpCbParam));
|
||||
SMqAskEpCbParam* pParam = taosMemoryCalloc(1, sizeof(SMqAskEpCbParam));
|
||||
if (pParam == NULL) {
|
||||
tscError("failed to malloc subscribe param");
|
||||
taosMemoryFree(req);
|
||||
|
@ -1107,7 +1115,7 @@ SMqPollReq* tmqBuildConsumeReqImpl(tmq_t* tmq, int64_t waitTime, SMqClientTopic*
|
|||
reqOffset = tmq->resetOffsetCfg;
|
||||
}
|
||||
|
||||
SMqPollReq* pReq = taosMemoryMalloc(sizeof(SMqPollReq));
|
||||
SMqPollReq* pReq = taosMemoryCalloc(1, sizeof(SMqPollReq));
|
||||
if (pReq == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -26,7 +26,7 @@ static const SSysDbTableSchema dnodesSchema[] = {
|
|||
{.name = "id", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT},
|
||||
{.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "vnodes", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT},
|
||||
{.name = "max_vnodes", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT},
|
||||
{.name = "support_vnodes", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT},
|
||||
{.name = "status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
|
||||
{.name = "note", .bytes = 256 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
|
|
|
@ -33,18 +33,18 @@ int32_t tsStatusInterval = 1; // second
|
|||
|
||||
// common
|
||||
int32_t tsMaxShellConns = 50000;
|
||||
int32_t tsMaxConnections = 50000;
|
||||
int32_t tsShellActivityTimer = 3; // second
|
||||
bool tsEnableSlaveQuery = true;
|
||||
bool tsPrintAuth = false;
|
||||
|
||||
// multi process
|
||||
bool tsMultiProcess = false;
|
||||
int32_t tsMnodeShmSize = TSDB_MAX_WAL_SIZE * 2;
|
||||
int32_t tsVnodeShmSize = TSDB_MAX_WAL_SIZE * 10;
|
||||
int32_t tsQnodeShmSize = TSDB_MAX_WAL_SIZE * 4;
|
||||
int32_t tsSnodeShmSize = TSDB_MAX_WAL_SIZE * 4;
|
||||
int32_t tsBnodeShmSize = TSDB_MAX_WAL_SIZE * 4;
|
||||
int32_t tsMnodeShmSize = TSDB_MAX_WAL_SIZE * 2 + 128;
|
||||
int32_t tsVnodeShmSize = TSDB_MAX_WAL_SIZE * 10 + 128;
|
||||
int32_t tsQnodeShmSize = TSDB_MAX_WAL_SIZE * 4 + 128;
|
||||
int32_t tsSnodeShmSize = TSDB_MAX_WAL_SIZE * 4 + 128;
|
||||
int32_t tsBnodeShmSize = TSDB_MAX_WAL_SIZE * 4 + 128;
|
||||
int32_t tsNumOfShmThreads = 1;
|
||||
|
||||
// queue & threads
|
||||
int32_t tsNumOfRpcThreads = 1;
|
||||
|
@ -280,6 +280,7 @@ int32_t taosAddClientLogCfg(SConfig *pCfg) {
|
|||
if (cfgAddInt32(pCfg, "cDebugFlag", cDebugFlag, 0, 255, 1) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "uDebugFlag", uDebugFlag, 0, 255, 1) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "rpcDebugFlag", rpcDebugFlag, 0, 255, 1) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "qDebugFlag", qDebugFlag, 0, 255, 1) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "tmrDebugFlag", tmrDebugFlag, 0, 255, 1) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "jniDebugFlag", jniDebugFlag, 0, 255, 1) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "simDebugFlag", 143, 0, 255, 1) != 0) return -1;
|
||||
|
@ -298,6 +299,7 @@ static int32_t taosAddServerLogCfg(SConfig *pCfg) {
|
|||
if (cfgAddInt32(pCfg, "tqDebugFlag", tqDebugFlag, 0, 255, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "fsDebugFlag", fsDebugFlag, 0, 255, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "fnDebugFlag", fnDebugFlag, 0, 255, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "smaDebugFlag", smaDebugFlag, 0, 255, 0) != 0) return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -351,15 +353,14 @@ static int32_t taosAddSystemCfg(SConfig *pCfg) {
|
|||
}
|
||||
|
||||
static int32_t taosAddServerCfg(SConfig *pCfg) {
|
||||
if (cfgAddInt32(pCfg, "supportVnodes", 256, 0, 4096, 0) != 0) return -1;
|
||||
if (cfgAddDir(pCfg, "dataDir", tsDataDir, 0) != 0) return -1;
|
||||
if (cfgAddFloat(pCfg, "minimalDataDirGB", 2.0f, 0.001f, 10000000, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "maxNumOfDistinctRes", tsMaxNumOfDistinctResults, 10 * 10000, 10000 * 10000, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "maxConnections", tsMaxConnections, 1, 100000, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "supportVnodes", 256, 0, 4096, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "maxShellConns", tsMaxShellConns, 10, 50000000, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "statusInterval", tsStatusInterval, 1, 30, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "minSlidingTime", tsMinSlidingTime, 10, 1000000, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "minIntervalTime", tsMinIntervalTime, 1, 1000000, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "maxNumOfDistinctRes", tsMaxNumOfDistinctResults, 10 * 10000, 10000 * 10000, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "maxStreamCompDelay", tsMaxStreamComputDelay, 10, 1000000000, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "maxFirstStreamCompDelay", tsStreamCompStartDelay, 1000, 1000000000, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "retryStreamCompDelay", tsRetryStreamCompDelay, 10, 1000000000, 0) != 0) return -1;
|
||||
|
@ -371,11 +372,12 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
|||
if (cfgAddBool(pCfg, "deadLockKillQuery", tsDeadLockKillQuery, 0) != 0) return -1;
|
||||
|
||||
if (cfgAddBool(pCfg, "multiProcess", tsMultiProcess, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "mnodeShmSize", tsMnodeShmSize, 4096, INT32_MAX, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "vnodeShmSize", tsVnodeShmSize, 4096, INT32_MAX, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "qnodeShmSize", tsQnodeShmSize, 4096, INT32_MAX, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "snodeShmSize", tsSnodeShmSize, 4096, INT32_MAX, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "bnodeShmSize", tsBnodeShmSize, 4096, INT32_MAX, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "mnodeShmSize", tsMnodeShmSize, TSDB_MAX_WAL_SIZE + 128, INT32_MAX, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "vnodeShmSize", tsVnodeShmSize, TSDB_MAX_WAL_SIZE + 128, INT32_MAX, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "qnodeShmSize", tsQnodeShmSize, TSDB_MAX_WAL_SIZE + 128, INT32_MAX, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "snodeShmSize", tsSnodeShmSize, TSDB_MAX_WAL_SIZE + 128, INT32_MAX, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "bnodeShmSize", tsBnodeShmSize, TSDB_MAX_WAL_SIZE + 128, INT32_MAX, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "mumOfShmThreads", tsNumOfShmThreads, 1, 1024, 0) != 0) return -1;
|
||||
|
||||
tsNumOfRpcThreads = tsNumOfCores / 2;
|
||||
tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 1, 4);
|
||||
|
@ -429,6 +431,10 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
|||
tsNumOfSnodeUniqueThreads = TRANGE(tsNumOfSnodeUniqueThreads, 2, 4);
|
||||
if (cfgAddInt32(pCfg, "numOfSnodeUniqueThreads", tsNumOfSnodeUniqueThreads, 1, 1024, 0) != 0) return -1;
|
||||
|
||||
tsRpcQueueMemoryAllowed = tsTotalMemoryKB * 1024 * 0.1;
|
||||
tsRpcQueueMemoryAllowed = TRANGE(tsRpcQueueMemoryAllowed, TSDB_MAX_WAL_SIZE * 10L, TSDB_MAX_WAL_SIZE * 10000L);
|
||||
if (cfgAddInt64(pCfg, "rpcQueueMemoryAllowed", tsRpcQueueMemoryAllowed, 1, INT64_MAX, 0) != 0) return -1;
|
||||
|
||||
if (cfgAddBool(pCfg, "monitor", tsEnableMonitor, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "monitorInterval", tsMonitorInterval, 1, 200000, 0) != 0) return -1;
|
||||
if (cfgAddString(pCfg, "monitorFqdn", tsMonitorFqdn, 0) != 0) return -1;
|
||||
|
@ -458,6 +464,7 @@ static void taosSetClientLogCfg(SConfig *pCfg) {
|
|||
tsLogKeepDays = cfgGetItem(pCfg, "logKeepDays")->i32;
|
||||
cDebugFlag = cfgGetItem(pCfg, "cDebugFlag")->i32;
|
||||
uDebugFlag = cfgGetItem(pCfg, "uDebugFlag")->i32;
|
||||
qDebugFlag = cfgGetItem(pCfg, "qDebugFlag")->i32;
|
||||
rpcDebugFlag = cfgGetItem(pCfg, "rpcDebugFlag")->i32;
|
||||
tmrDebugFlag = cfgGetItem(pCfg, "tmrDebugFlag")->i32;
|
||||
jniDebugFlag = cfgGetItem(pCfg, "jniDebugFlag")->i32;
|
||||
|
@ -474,6 +481,7 @@ static void taosSetServerLogCfg(SConfig *pCfg) {
|
|||
tqDebugFlag = cfgGetItem(pCfg, "tqDebugFlag")->i32;
|
||||
fsDebugFlag = cfgGetItem(pCfg, "fsDebugFlag")->i32;
|
||||
fnDebugFlag = cfgGetItem(pCfg, "fnDebugFlag")->i32;
|
||||
smaDebugFlag = cfgGetItem(pCfg, "smaDebugFlag")->i32;
|
||||
}
|
||||
|
||||
static int32_t taosSetClientCfg(SConfig *pCfg) {
|
||||
|
@ -533,12 +541,11 @@ static void taosSetSystemCfg(SConfig *pCfg) {
|
|||
|
||||
static int32_t taosSetServerCfg(SConfig *pCfg) {
|
||||
tsDataSpace.reserved = cfgGetItem(pCfg, "minimalDataDirGB")->fval;
|
||||
tsMaxNumOfDistinctResults = cfgGetItem(pCfg, "maxNumOfDistinctRes")->i32;
|
||||
tsMaxConnections = cfgGetItem(pCfg, "maxConnections")->i32;
|
||||
tsMaxShellConns = cfgGetItem(pCfg, "maxShellConns")->i32;
|
||||
tsStatusInterval = cfgGetItem(pCfg, "statusInterval")->i32;
|
||||
tsMinSlidingTime = cfgGetItem(pCfg, "minSlidingTime")->i32;
|
||||
tsMinIntervalTime = cfgGetItem(pCfg, "minIntervalTime")->i32;
|
||||
tsMaxNumOfDistinctResults = cfgGetItem(pCfg, "maxNumOfDistinctRes")->i32;
|
||||
tsMaxStreamComputDelay = cfgGetItem(pCfg, "maxStreamCompDelay")->i32;
|
||||
tsStreamCompStartDelay = cfgGetItem(pCfg, "maxFirstStreamCompDelay")->i32;
|
||||
tsRetryStreamCompDelay = cfgGetItem(pCfg, "retryStreamCompDelay")->i32;
|
||||
|
@ -569,6 +576,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
|
|||
tsNumOfQnodeFetchThreads = cfgGetItem(pCfg, "numOfQnodeFetchThreads")->i32;
|
||||
tsNumOfSnodeSharedThreads = cfgGetItem(pCfg, "numOfSnodeSharedThreads")->i32;
|
||||
tsNumOfSnodeUniqueThreads = cfgGetItem(pCfg, "numOfSnodeUniqueThreads")->i32;
|
||||
tsRpcQueueMemoryAllowed = cfgGetItem(pCfg, "rpcQueueMemoryAllowed")->i64;
|
||||
|
||||
tsEnableMonitor = cfgGetItem(pCfg, "monitor")->bval;
|
||||
tsMonitorInterval = cfgGetItem(pCfg, "monitorInterval")->i32;
|
||||
|
|
|
@ -3562,39 +3562,92 @@ int tDecodeSVCreateTbBatchRsp(SDecoder *pCoder, SVCreateTbBatchRsp *pRsp) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t tSerializeSVCreateTSmaReq(void **buf, SVCreateTSmaReq *pReq) {
|
||||
int32_t tlen = 0;
|
||||
|
||||
tlen += taosEncodeFixedI64(buf, pReq->ver);
|
||||
tlen += tEncodeTSma(buf, &pReq->tSma);
|
||||
|
||||
return tlen;
|
||||
}
|
||||
|
||||
void *tDeserializeSVCreateTSmaReq(void *buf, SVCreateTSmaReq *pReq) {
|
||||
buf = taosDecodeFixedI64(buf, &(pReq->ver));
|
||||
|
||||
if ((buf = tDecodeTSma(buf, &pReq->tSma)) == NULL) {
|
||||
tdDestroyTSma(&pReq->tSma);
|
||||
int32_t tEncodeTSma(SEncoder *pCoder, const STSma *pSma) {
|
||||
if (tEncodeI8(pCoder, pSma->version) < 0) return -1;
|
||||
if (tEncodeI8(pCoder, pSma->intervalUnit) < 0) return -1;
|
||||
if (tEncodeI8(pCoder, pSma->slidingUnit) < 0) return -1;
|
||||
if (tEncodeI8(pCoder, pSma->timezoneInt) < 0) return -1;
|
||||
if (tEncodeCStr(pCoder, pSma->indexName) < 0) return -1;
|
||||
if (tEncodeI32(pCoder, pSma->exprLen) < 0) return -1;
|
||||
if (tEncodeI32(pCoder, pSma->tagsFilterLen) < 0) return -1;
|
||||
if (tEncodeI64(pCoder, pSma->indexUid) < 0) return -1;
|
||||
if (tEncodeI64(pCoder, pSma->tableUid) < 0) return -1;
|
||||
if (tEncodeI64(pCoder, pSma->interval) < 0) return -1;
|
||||
if (tEncodeI64(pCoder, pSma->offset) < 0) return -1;
|
||||
if (tEncodeI64(pCoder, pSma->sliding) < 0) return -1;
|
||||
if (pSma->exprLen > 0) {
|
||||
if (tEncodeCStr(pCoder, pSma->expr) < 0) return -1;
|
||||
}
|
||||
return buf;
|
||||
if (pSma->tagsFilterLen > 0) {
|
||||
if (tEncodeCStr(pCoder, pSma->tagsFilter) < 0) return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tSerializeSVDropTSmaReq(void **buf, SVDropTSmaReq *pReq) {
|
||||
int32_t tlen = 0;
|
||||
int32_t tDecodeTSma(SDecoder *pCoder, STSma *pSma) {
|
||||
if (tDecodeI8(pCoder, &pSma->version) < 0) return -1;
|
||||
if (tDecodeI8(pCoder, &pSma->intervalUnit) < 0) return -1;
|
||||
if (tDecodeI8(pCoder, &pSma->slidingUnit) < 0) return -1;
|
||||
if (tDecodeI8(pCoder, &pSma->timezoneInt) < 0) return -1;
|
||||
if (tDecodeCStrTo(pCoder, pSma->indexName) < 0) return -1;
|
||||
if (tDecodeI32(pCoder, &pSma->exprLen) < 0) return -1;
|
||||
if (tDecodeI32(pCoder, &pSma->tagsFilterLen) < 0) return -1;
|
||||
if (tDecodeI64(pCoder, &pSma->indexUid) < 0) return -1;
|
||||
if (tDecodeI64(pCoder, &pSma->tableUid) < 0) return -1;
|
||||
if (tDecodeI64(pCoder, &pSma->interval) < 0) return -1;
|
||||
if (tDecodeI64(pCoder, &pSma->offset) < 0) return -1;
|
||||
if (tDecodeI64(pCoder, &pSma->sliding) < 0) return -1;
|
||||
if (pSma->exprLen > 0) {
|
||||
if (tDecodeCStr(pCoder, &pSma->expr) < 0) return -1;
|
||||
} else {
|
||||
pSma->expr = NULL;
|
||||
}
|
||||
if (pSma->tagsFilterLen > 0) {
|
||||
if (tDecodeCStr(pCoder, &pSma->tagsFilter) < 0) return -1;
|
||||
} else {
|
||||
pSma->tagsFilter = NULL;
|
||||
}
|
||||
|
||||
tlen += taosEncodeFixedI64(buf, pReq->ver);
|
||||
tlen += taosEncodeFixedI64(buf, pReq->indexUid);
|
||||
tlen += taosEncodeString(buf, pReq->indexName);
|
||||
|
||||
return tlen;
|
||||
return 0;
|
||||
}
|
||||
void *tDeserializeSVDropTSmaReq(void *buf, SVDropTSmaReq *pReq) {
|
||||
buf = taosDecodeFixedI64(buf, &(pReq->ver));
|
||||
buf = taosDecodeFixedI64(buf, &(pReq->indexUid));
|
||||
buf = taosDecodeStringTo(buf, pReq->indexName);
|
||||
|
||||
return buf;
|
||||
int32_t tEncodeSVCreateTSmaReq(SEncoder *pCoder, const SVCreateTSmaReq *pReq) {
|
||||
if (tStartEncode(pCoder) < 0) return -1;
|
||||
|
||||
tEncodeTSma(pCoder, pReq);
|
||||
|
||||
tEndEncode(pCoder);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tDecodeSVCreateTSmaReq(SDecoder *pCoder, SVCreateTSmaReq *pReq) {
|
||||
if (tStartDecode(pCoder) < 0) return -1;
|
||||
|
||||
tDecodeTSma(pCoder, pReq);
|
||||
|
||||
tEndDecode(pCoder);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tEncodeSVDropTSmaReq(SEncoder *pCoder, const SVDropTSmaReq *pReq) {
|
||||
if (tStartEncode(pCoder) < 0) return -1;
|
||||
|
||||
if (tEncodeI64(pCoder, pReq->indexUid) < 0) return -1;
|
||||
if (tEncodeCStr(pCoder, pReq->indexName) < 0) return -1;
|
||||
|
||||
tEndEncode(pCoder);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tDecodeSVDropTSmaReq(SDecoder *pCoder, SVDropTSmaReq *pReq) {
|
||||
if (tStartDecode(pCoder) < 0) return -1;
|
||||
|
||||
if (tDecodeI64(pCoder, &pReq->indexUid) < 0) return -1;
|
||||
if (tDecodeCStrTo(pCoder, pReq->indexName) < 0) return -1;
|
||||
|
||||
tEndDecode(pCoder);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tSerializeSCMCreateStreamReq(void *buf, int32_t bufLen, const SCMCreateStreamReq *pReq) {
|
||||
|
@ -4110,3 +4163,113 @@ void tFreeSSubmitRsp(SSubmitRsp *pRsp) {
|
|||
|
||||
taosMemoryFree(pRsp);
|
||||
}
|
||||
|
||||
int32_t tEncodeSVAlterTbReq(SEncoder *pEncoder, const SVAlterTbReq *pReq) {
|
||||
if (tStartEncode(pEncoder) < 0) return -1;
|
||||
|
||||
if (tEncodeCStr(pEncoder, pReq->tbName) < 0) return -1;
|
||||
if (tEncodeI8(pEncoder, pReq->action) < 0) return -1;
|
||||
switch (pReq->action) {
|
||||
case TSDB_ALTER_TABLE_ADD_COLUMN:
|
||||
if (tEncodeCStr(pEncoder, pReq->colName) < 0) return -1;
|
||||
if (tEncodeI8(pEncoder, pReq->type) < 0) return -1;
|
||||
if (tEncodeI8(pEncoder, pReq->flags) < 0) return -1;
|
||||
if (tEncodeI32v(pEncoder, pReq->bytes) < 0) return -1;
|
||||
break;
|
||||
case TSDB_ALTER_TABLE_DROP_COLUMN:
|
||||
if (tEncodeCStr(pEncoder, pReq->colName) < 0) return -1;
|
||||
break;
|
||||
case TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES:
|
||||
if (tEncodeCStr(pEncoder, pReq->colName) < 0) return -1;
|
||||
if (tEncodeI32v(pEncoder, pReq->colModBytes) < 0) return -1;
|
||||
break;
|
||||
case TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME:
|
||||
if (tEncodeCStr(pEncoder, pReq->colName) < 0) return -1;
|
||||
if (tEncodeCStr(pEncoder, pReq->colNewName) < 0) return -1;
|
||||
break;
|
||||
case TSDB_ALTER_TABLE_UPDATE_TAG_VAL:
|
||||
if (tEncodeCStr(pEncoder, pReq->tagName) < 0) return -1;
|
||||
if (tEncodeI8(pEncoder, pReq->isNull) < 0) return -1;
|
||||
if (!pReq->isNull) {
|
||||
if (tEncodeBinary(pEncoder, pReq->pTagVal, pReq->nTagVal) < 0) return -1;
|
||||
}
|
||||
break;
|
||||
case TSDB_ALTER_TABLE_UPDATE_OPTIONS:
|
||||
if (tEncodeI8(pEncoder, pReq->updateTTL) < 0) return -1;
|
||||
if (pReq->updateTTL) {
|
||||
if (tEncodeI32v(pEncoder, pReq->newTTL) < 0) return -1;
|
||||
}
|
||||
if (tEncodeI8(pEncoder, pReq->updateComment) < 0) return -1;
|
||||
if (pReq->updateComment) {
|
||||
if (tEncodeCStr(pEncoder, pReq->newComment) < 0) return -1;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
tEndEncode(pEncoder);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tDecodeSVAlterTbReq(SDecoder *pDecoder, SVAlterTbReq *pReq) {
|
||||
if (tStartDecode(pDecoder) < 0) return -1;
|
||||
|
||||
if (tDecodeCStr(pDecoder, &pReq->tbName) < 0) return -1;
|
||||
if (tDecodeI8(pDecoder, &pReq->action) < 0) return -1;
|
||||
switch (pReq->action) {
|
||||
case TSDB_ALTER_TABLE_ADD_COLUMN:
|
||||
if (tDecodeCStr(pDecoder, &pReq->colName) < 0) return -1;
|
||||
if (tDecodeI8(pDecoder, &pReq->type) < 0) return -1;
|
||||
if (tDecodeI8(pDecoder, &pReq->flags) < 0) return -1;
|
||||
if (tDecodeI32v(pDecoder, &pReq->bytes) < 0) return -1;
|
||||
break;
|
||||
case TSDB_ALTER_TABLE_DROP_COLUMN:
|
||||
if (tDecodeCStr(pDecoder, &pReq->colName) < 0) return -1;
|
||||
break;
|
||||
case TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES:
|
||||
if (tDecodeCStr(pDecoder, &pReq->colName) < 0) return -1;
|
||||
if (tDecodeI32v(pDecoder, &pReq->colModBytes) < 0) return -1;
|
||||
break;
|
||||
case TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME:
|
||||
if (tDecodeCStr(pDecoder, &pReq->colName) < 0) return -1;
|
||||
if (tDecodeCStr(pDecoder, &pReq->colNewName) < 0) return -1;
|
||||
break;
|
||||
case TSDB_ALTER_TABLE_UPDATE_TAG_VAL:
|
||||
if (tDecodeCStr(pDecoder, &pReq->tagName) < 0) return -1;
|
||||
if (tDecodeI8(pDecoder, &pReq->isNull) < 0) return -1;
|
||||
if (!pReq->isNull) {
|
||||
if (tDecodeBinary(pDecoder, &pReq->pTagVal, &pReq->nTagVal) < 0) return -1;
|
||||
}
|
||||
break;
|
||||
case TSDB_ALTER_TABLE_UPDATE_OPTIONS:
|
||||
if (tDecodeI8(pDecoder, &pReq->updateTTL) < 0) return -1;
|
||||
if (pReq->updateTTL) {
|
||||
if (tDecodeI32v(pDecoder, &pReq->newTTL) < 0) return -1;
|
||||
}
|
||||
if (tDecodeI8(pDecoder, &pReq->updateComment) < 0) return -1;
|
||||
if (pReq->updateComment) {
|
||||
if (tDecodeCStr(pDecoder, &pReq->newComment) < 0) return -1;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
tEndDecode(pDecoder);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tEncodeSVAlterTbRsp(SEncoder *pEncoder, const SVAlterTbRsp *pRsp) {
|
||||
if (tStartEncode(pEncoder) < 0) return -1;
|
||||
if (tEncodeI32(pEncoder, pRsp->code) < 0) return -1;
|
||||
tEndEncode(pEncoder);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tDecodeSVAlterTbRsp(SDecoder *pDecoder, SVAlterTbRsp *pRsp) {
|
||||
if (tStartDecode(pDecoder) < 0) return -1;
|
||||
if (tDecodeI32(pDecoder, &pRsp->code) < 0) return -1;
|
||||
tEndDecode(pDecoder);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -19,12 +19,16 @@
|
|||
|
||||
static SMsgCb tsDefaultMsgCb;
|
||||
|
||||
void tmsgSetDefaultMsgCb(const SMsgCb* pMsgCb) { tsDefaultMsgCb = *pMsgCb; }
|
||||
void tmsgSetDefaultMsgCb(const SMsgCb* pMsgCb) {
|
||||
// if (tsDefaultMsgCb.pWrapper == NULL) {
|
||||
tsDefaultMsgCb = *pMsgCb;
|
||||
//}
|
||||
}
|
||||
|
||||
int32_t tmsgPutToQueue(const SMsgCb* pMsgCb, EQueueType qtype, SRpcMsg* pReq) {
|
||||
PutToQueueFp fp = pMsgCb->queueFps[qtype];
|
||||
if (fp != NULL) {
|
||||
return (*fp)(pMsgCb->pWrapper, pReq);
|
||||
return (*fp)(pMsgCb->pMgmt, pReq);
|
||||
} else {
|
||||
terrno = TSDB_CODE_INVALID_PTR;
|
||||
return -1;
|
||||
|
@ -34,7 +38,7 @@ int32_t tmsgPutToQueue(const SMsgCb* pMsgCb, EQueueType qtype, SRpcMsg* pReq) {
|
|||
int32_t tmsgGetQueueSize(const SMsgCb* pMsgCb, int32_t vgId, EQueueType qtype) {
|
||||
GetQueueSizeFp fp = pMsgCb->qsizeFp;
|
||||
if (fp != NULL) {
|
||||
return (*fp)(pMsgCb->pWrapper, vgId, qtype);
|
||||
return (*fp)(pMsgCb->pMgmt, vgId, qtype);
|
||||
} else {
|
||||
terrno = TSDB_CODE_INVALID_PTR;
|
||||
return -1;
|
||||
|
@ -51,7 +55,7 @@ int32_t tmsgSendReq(const SMsgCb* pMsgCb, const SEpSet* epSet, SRpcMsg* pReq) {
|
|||
}
|
||||
}
|
||||
|
||||
void tmsgSendRsp(const SRpcMsg* pRsp) {
|
||||
void tmsgSendRsp(SRpcMsg* pRsp) {
|
||||
SendRspFp fp = tsDefaultMsgCb.sendRspFp;
|
||||
if (fp != NULL) {
|
||||
return (*fp)(tsDefaultMsgCb.pWrapper, pRsp);
|
||||
|
@ -60,7 +64,7 @@ void tmsgSendRsp(const SRpcMsg* pRsp) {
|
|||
}
|
||||
}
|
||||
|
||||
void tmsgSendRedirectRsp(const SRpcMsg* pRsp, const SEpSet* pNewEpSet) {
|
||||
void tmsgSendRedirectRsp(SRpcMsg* pRsp, const SEpSet* pNewEpSet) {
|
||||
SendRedirectRspFp fp = tsDefaultMsgCb.sendRedirectRspFp;
|
||||
if (fp != NULL) {
|
||||
(*fp)(tsDefaultMsgCb.pWrapper, pRsp, pNewEpSet);
|
||||
|
@ -69,6 +73,15 @@ void tmsgSendRedirectRsp(const SRpcMsg* pRsp, const SEpSet* pNewEpSet) {
|
|||
}
|
||||
}
|
||||
|
||||
void tmsgSendMnodeRecv(SRpcMsg* pReq, SRpcMsg* pRsp) {
|
||||
SendMnodeRecvFp fp = tsDefaultMsgCb.sendMnodeRecvFp;
|
||||
if (fp != NULL) {
|
||||
(*fp)(tsDefaultMsgCb.pWrapper, pReq, pRsp);
|
||||
} else {
|
||||
terrno = TSDB_CODE_INVALID_PTR;
|
||||
}
|
||||
}
|
||||
|
||||
void tmsgRegisterBrokenLinkArg(const SMsgCb* pMsgCb, SRpcMsg* pMsg) {
|
||||
RegisterBrokenLinkArgFp fp = pMsgCb->registerBrokenLinkArgFp;
|
||||
if (fp != NULL) {
|
||||
|
|
|
@ -317,7 +317,11 @@ void buildChildTableName(RandTableName* rName) {
|
|||
for (int j = 0; j < size; ++j) {
|
||||
SSmlKv* tagKv = taosArrayGetP(rName->tags, j);
|
||||
taosStringBuilderAppendStringLen(&sb, tagKv->key, tagKv->keyLen);
|
||||
taosStringBuilderAppendStringLen(&sb, tagKv->value, tagKv->valueLen);
|
||||
if(IS_VAR_DATA_TYPE(tagKv->type)){
|
||||
taosStringBuilderAppendStringLen(&sb, tagKv->value, tagKv->length);
|
||||
}else{
|
||||
taosStringBuilderAppendStringLen(&sb, (char*)(&(tagKv->value)), tagKv->length);
|
||||
}
|
||||
}
|
||||
size_t len = 0;
|
||||
char* keyJoined = taosStringBuilderGetResult(&sb, &len);
|
||||
|
|
|
@ -402,7 +402,8 @@ tDataTypeDescriptor tDataTypes[TSDB_DATA_TYPE_MAX] = {
|
|||
{TSDB_DATA_TYPE_UINT, 12, INT_BYTES, "INT UNSIGNED", 0, UINT32_MAX, tsCompressInt, tsDecompressInt, getStatics_u32},
|
||||
{TSDB_DATA_TYPE_UBIGINT, 15, LONG_BYTES, "BIGINT UNSIGNED", 0, UINT64_MAX, tsCompressBigint, tsDecompressBigint,
|
||||
getStatics_u64},
|
||||
{TSDB_DATA_TYPE_JSON, 4, TSDB_MAX_JSON_TAG_LEN, "JSON", 0, 0, tsCompressString, tsDecompressString, getStatics_nchr},
|
||||
{TSDB_DATA_TYPE_JSON, 4, TSDB_MAX_JSON_TAG_LEN, "JSON", 0, 0, tsCompressString, tsDecompressString,
|
||||
getStatics_nchr},
|
||||
};
|
||||
|
||||
char tTokenTypeSwitcher[13] = {
|
||||
|
|
|
@ -1,16 +1,17 @@
|
|||
add_subdirectory(interface)
|
||||
add_subdirectory(implement)
|
||||
add_subdirectory(node_mgmt)
|
||||
add_subdirectory(node_util)
|
||||
add_subdirectory(mgmt_bnode)
|
||||
add_subdirectory(mgmt_mnode)
|
||||
add_subdirectory(mgmt_qnode)
|
||||
add_subdirectory(mgmt_snode)
|
||||
add_subdirectory(mgmt_vnode)
|
||||
add_subdirectory(mgmt_dnode)
|
||||
add_subdirectory(test)
|
||||
|
||||
aux_source_directory(exe EXEC_SRC)
|
||||
add_executable(taosd ${EXEC_SRC})
|
||||
target_include_directories(
|
||||
taosd
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/implement/inc"
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/node_mgmt/inc"
|
||||
)
|
||||
target_link_libraries(taosd dnode)
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
*/
|
||||
|
||||
#define _DEFAULT_SOURCE
|
||||
#include "dmImp.h"
|
||||
#include "dmMgmt.h"
|
||||
#include "tconfig.h"
|
||||
|
||||
#define DM_APOLLO_URL "The apollo string to use when configuring the server, such as: -a 'jsonFile:./tests/cfg.json', cfg.json text can be '{\"fqdn\":\"td1\"}'."
|
||||
|
@ -163,14 +163,14 @@ static SDnodeOpt dmGetOpt() {
|
|||
|
||||
static int32_t dmInitLog() {
|
||||
char logName[12] = {0};
|
||||
snprintf(logName, sizeof(logName), "%slog", dmLogName(global.ntype));
|
||||
snprintf(logName, sizeof(logName), "%slog", dmNodeLogName(global.ntype));
|
||||
return taosCreateLog(logName, 1, configDir, global.envCmd, global.envFile, global.apolloUrl, global.pArgs, 0);
|
||||
}
|
||||
|
||||
static void dmSetProcInfo(int32_t argc, char **argv) {
|
||||
taosSetProcPath(argc, argv);
|
||||
if (global.ntype != DNODE && global.ntype != NODE_END) {
|
||||
const char *name = dmProcName(global.ntype);
|
||||
const char *name = dmNodeProcName(global.ntype);
|
||||
taosSetProcName(argc, argv, name);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,87 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef _TD_DND_IMP_H_
|
||||
#define _TD_DND_IMP_H_
|
||||
|
||||
#include "dmInt.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
int32_t dmOpenNode(SMgmtWrapper *pWrapper);
|
||||
void dmCloseNode(SMgmtWrapper *pWrapper);
|
||||
|
||||
// dmTransport.c
|
||||
int32_t dmInitServer(SDnode *pDnode);
|
||||
void dmCleanupServer(SDnode *pDnode);
|
||||
int32_t dmInitClient(SDnode *pDnode);
|
||||
void dmCleanupClient(SDnode *pDnode);
|
||||
SProcCfg dmGenProcCfg(SMgmtWrapper *pWrapper);
|
||||
SMsgCb dmGetMsgcb(SMgmtWrapper *pWrapper);
|
||||
int32_t dmInitMsgHandle(SDnode *pDnode);
|
||||
void dmSendRecv(SDnode *pDnode, SEpSet *pEpSet, SRpcMsg *pReq, SRpcMsg *pRsp);
|
||||
void dmSendToMnodeRecv(SDnode *pDnode, SRpcMsg *pReq, SRpcMsg *pRsp);
|
||||
|
||||
// dmEps.c
|
||||
int32_t dmReadEps(SDnode *pDnode);
|
||||
int32_t dmWriteEps(SDnode *pDnode);
|
||||
void dmUpdateEps(SDnode *pDnode, SArray *pDnodeEps);
|
||||
|
||||
// dmHandle.c
|
||||
void dmSendStatusReq(SDnode *pDnode);
|
||||
int32_t dmProcessConfigReq(SDnode *pDnode, SNodeMsg *pMsg);
|
||||
int32_t dmProcessAuthRsp(SDnode *pDnode, SNodeMsg *pMsg);
|
||||
int32_t dmProcessGrantRsp(SDnode *pDnode, SNodeMsg *pMsg);
|
||||
int32_t dmProcessCreateNodeReq(SDnode *pDnode, EDndNodeType ntype, SNodeMsg *pMsg);
|
||||
int32_t dmProcessDropNodeReq(SDnode *pDnode, EDndNodeType ntype, SNodeMsg *pMsg);
|
||||
|
||||
// dmMonitor.c
|
||||
void dmGetVnodeLoads(SDnode *pDnode, SMonVloadInfo *pInfo);
|
||||
void dmGetMnodeLoads(SDnode *pDnode, SMonMloadInfo *pInfo);
|
||||
void dmSendMonitorReport(SDnode *pDnode);
|
||||
|
||||
// dmWorker.c
|
||||
int32_t dmStartStatusThread(SDnode *pDnode);
|
||||
void dmStopStatusThread(SDnode *pDnode);
|
||||
int32_t dmStartMonitorThread(SDnode *pDnode);
|
||||
void dmStopMonitorThread(SDnode *pDnode);
|
||||
int32_t dmStartWorker(SDnode *pDnode);
|
||||
void dmStopWorker(SDnode *pDnode);
|
||||
int32_t dmProcessMgmtMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg);
|
||||
int32_t dmProcessStatusMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg);
|
||||
|
||||
// mgmt nodes
|
||||
void dmSetMgmtFp(SMgmtWrapper *pWrapper);
|
||||
void bmSetMgmtFp(SMgmtWrapper *pWrapper);
|
||||
void qmSetMgmtFp(SMgmtWrapper *pWrapper);
|
||||
void smSetMgmtFp(SMgmtWrapper *pWrapper);
|
||||
void vmSetMgmtFp(SMgmtWrapper *pWrapper);
|
||||
void mmSetMgmtFp(SMgmtWrapper *pWrapper);
|
||||
|
||||
void vmGetVnodeLoads(SMgmtWrapper *pWrapper, SMonVloadInfo *pInfo);
|
||||
void mmGetMnodeLoads(SMgmtWrapper *pWrapper, SMonMloadInfo *pInfo);
|
||||
void mmGetMonitorInfo(SMgmtWrapper *pWrapper, SMonMmInfo *mmInfo);
|
||||
void vmGetMonitorInfo(SMgmtWrapper *pWrapper, SMonVmInfo *vmInfo);
|
||||
void qmGetMonitorInfo(SMgmtWrapper *pWrapper, SMonQmInfo *qmInfo);
|
||||
void smGetMonitorInfo(SMgmtWrapper *pWrapper, SMonSmInfo *smInfo);
|
||||
void bmGetMonitorInfo(SMgmtWrapper *pWrapper, SMonBmInfo *bmInfo);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /*_TD_DND_IMP_H_*/
|
|
@ -1,299 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http:www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#define _DEFAULT_SOURCE
|
||||
#include "dmImp.h"
|
||||
|
||||
static void dmUpdateDnodeCfg(SDnode *pDnode, SDnodeCfg *pCfg) {
|
||||
if (pDnode->data.dnodeId == 0 || pDnode->data.clusterId == 0) {
|
||||
dInfo("set dnodeId:%d clusterId:%" PRId64, pCfg->dnodeId, pCfg->clusterId);
|
||||
taosWLockLatch(&pDnode->data.latch);
|
||||
pDnode->data.dnodeId = pCfg->dnodeId;
|
||||
pDnode->data.clusterId = pCfg->clusterId;
|
||||
dmWriteEps(pDnode);
|
||||
taosWUnLockLatch(&pDnode->data.latch);
|
||||
}
|
||||
}
|
||||
|
||||
static int32_t dmProcessStatusRsp(SDnode *pDnode, SRpcMsg *pRsp) {
|
||||
if (pRsp->code != TSDB_CODE_SUCCESS) {
|
||||
if (pRsp->code == TSDB_CODE_MND_DNODE_NOT_EXIST && !pDnode->data.dropped && pDnode->data.dnodeId > 0) {
|
||||
dInfo("dnode:%d, set to dropped since not exist in mnode", pDnode->data.dnodeId);
|
||||
pDnode->data.dropped = 1;
|
||||
dmWriteEps(pDnode);
|
||||
}
|
||||
} else {
|
||||
SStatusRsp statusRsp = {0};
|
||||
if (pRsp->pCont != NULL && pRsp->contLen > 0 &&
|
||||
tDeserializeSStatusRsp(pRsp->pCont, pRsp->contLen, &statusRsp) == 0) {
|
||||
pDnode->data.dnodeVer = statusRsp.dnodeVer;
|
||||
dmUpdateDnodeCfg(pDnode, &statusRsp.dnodeCfg);
|
||||
dmUpdateEps(pDnode, statusRsp.pDnodeEps);
|
||||
}
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
tFreeSStatusRsp(&statusRsp);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
void dmSendStatusReq(SDnode *pDnode) {
|
||||
SStatusReq req = {0};
|
||||
|
||||
taosRLockLatch(&pDnode->data.latch);
|
||||
req.sver = tsVersion;
|
||||
req.dnodeVer = pDnode->data.dnodeVer;
|
||||
req.dnodeId = pDnode->data.dnodeId;
|
||||
req.clusterId = pDnode->data.clusterId;
|
||||
if (req.clusterId == 0) req.dnodeId = 0;
|
||||
req.rebootTime = pDnode->data.rebootTime;
|
||||
req.updateTime = pDnode->data.updateTime;
|
||||
req.numOfCores = tsNumOfCores;
|
||||
req.numOfSupportVnodes = pDnode->data.supportVnodes;
|
||||
tstrncpy(req.dnodeEp, pDnode->data.localEp, TSDB_EP_LEN);
|
||||
|
||||
req.clusterCfg.statusInterval = tsStatusInterval;
|
||||
req.clusterCfg.checkTime = 0;
|
||||
char timestr[32] = "1970-01-01 00:00:00.00";
|
||||
(void)taosParseTime(timestr, &req.clusterCfg.checkTime, (int32_t)strlen(timestr), TSDB_TIME_PRECISION_MILLI, 0);
|
||||
memcpy(req.clusterCfg.timezone, tsTimezoneStr, TD_TIMEZONE_LEN);
|
||||
memcpy(req.clusterCfg.locale, tsLocale, TD_LOCALE_LEN);
|
||||
memcpy(req.clusterCfg.charset, tsCharset, TD_LOCALE_LEN);
|
||||
taosRUnLockLatch(&pDnode->data.latch);
|
||||
|
||||
SMonVloadInfo vinfo = {0};
|
||||
dmGetVnodeLoads(pDnode, &vinfo);
|
||||
req.pVloads = vinfo.pVloads;
|
||||
pDnode->data.unsyncedVgId = 0;
|
||||
pDnode->data.vndState = TAOS_SYNC_STATE_LEADER;
|
||||
for (int32_t i = 0; i < taosArrayGetSize(req.pVloads); ++i) {
|
||||
SVnodeLoad *pLoad = taosArrayGet(req.pVloads, i);
|
||||
if (pLoad->syncState != TAOS_SYNC_STATE_LEADER && pLoad->syncState != TAOS_SYNC_STATE_FOLLOWER) {
|
||||
pDnode->data.unsyncedVgId = pLoad->vgId;
|
||||
pDnode->data.vndState = pLoad->syncState;
|
||||
}
|
||||
}
|
||||
|
||||
SMonMloadInfo minfo = {0};
|
||||
dmGetMnodeLoads(pDnode, &minfo);
|
||||
pDnode->data.isMnode = minfo.isMnode;
|
||||
pDnode->data.mndState = minfo.load.syncState;
|
||||
|
||||
int32_t contLen = tSerializeSStatusReq(NULL, 0, &req);
|
||||
void *pHead = rpcMallocCont(contLen);
|
||||
tSerializeSStatusReq(pHead, contLen, &req);
|
||||
tFreeSStatusReq(&req);
|
||||
|
||||
SRpcMsg rpcMsg = {.pCont = pHead, .contLen = contLen, .msgType = TDMT_MND_STATUS, .ahandle = (void *)0x9527};
|
||||
SRpcMsg rpcRsp = {0};
|
||||
|
||||
dTrace("send req:%s to mnode, app:%p", TMSG_INFO(rpcMsg.msgType), rpcMsg.ahandle);
|
||||
dmSendToMnodeRecv(pDnode, &rpcMsg, &rpcRsp);
|
||||
dmProcessStatusRsp(pDnode, &rpcRsp);
|
||||
}
|
||||
|
||||
int32_t dmProcessAuthRsp(SDnode *pDnode, SNodeMsg *pMsg) {
|
||||
SRpcMsg *pRsp = &pMsg->rpcMsg;
|
||||
dError("auth rsp is received, but not supported yet");
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t dmProcessGrantRsp(SDnode *pDnode, SNodeMsg *pMsg) {
|
||||
SRpcMsg *pRsp = &pMsg->rpcMsg;
|
||||
dError("grant rsp is received, but not supported yet");
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t dmProcessConfigReq(SDnode *pDnode, SNodeMsg *pMsg) {
|
||||
SRpcMsg *pReq = &pMsg->rpcMsg;
|
||||
SDCfgDnodeReq *pCfg = pReq->pCont;
|
||||
dError("config req is received, but not supported yet");
|
||||
return TSDB_CODE_OPS_NOT_SUPPORT;
|
||||
}
|
||||
|
||||
int32_t dmProcessCreateNodeReq(SDnode *pDnode, EDndNodeType ntype, SNodeMsg *pMsg) {
|
||||
SMgmtWrapper *pWrapper = dmAcquireWrapper(pDnode, ntype);
|
||||
if (pWrapper != NULL) {
|
||||
dmReleaseWrapper(pWrapper);
|
||||
terrno = TSDB_CODE_NODE_ALREADY_DEPLOYED;
|
||||
dError("failed to create node since %s", terrstr());
|
||||
return -1;
|
||||
}
|
||||
|
||||
taosThreadMutexLock(&pDnode->mutex);
|
||||
pWrapper = &pDnode->wrappers[ntype];
|
||||
|
||||
if (taosMkDir(pWrapper->path) != 0) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
dError("failed to create dir:%s since %s", pWrapper->path, terrstr());
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t code = (*pWrapper->fp.createFp)(pWrapper, pMsg);
|
||||
if (code != 0) {
|
||||
dError("node:%s, failed to create since %s", pWrapper->name, terrstr());
|
||||
} else {
|
||||
dDebug("node:%s, has been created", pWrapper->name);
|
||||
(void)dmOpenNode(pWrapper);
|
||||
pWrapper->required = true;
|
||||
pWrapper->deployed = true;
|
||||
pWrapper->procType = pDnode->ptype;
|
||||
}
|
||||
|
||||
taosThreadMutexUnlock(&pDnode->mutex);
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t dmProcessDropNodeReq(SDnode *pDnode, EDndNodeType ntype, SNodeMsg *pMsg) {
|
||||
SMgmtWrapper *pWrapper = dmAcquireWrapper(pDnode, ntype);
|
||||
if (pWrapper == NULL) {
|
||||
terrno = TSDB_CODE_NODE_NOT_DEPLOYED;
|
||||
dError("failed to drop node since %s", terrstr());
|
||||
return -1;
|
||||
}
|
||||
|
||||
taosThreadMutexLock(&pDnode->mutex);
|
||||
|
||||
int32_t code = (*pWrapper->fp.dropFp)(pWrapper, pMsg);
|
||||
if (code != 0) {
|
||||
dError("node:%s, failed to drop since %s", pWrapper->name, terrstr());
|
||||
} else {
|
||||
dDebug("node:%s, has been dropped", pWrapper->name);
|
||||
pWrapper->required = false;
|
||||
pWrapper->deployed = false;
|
||||
}
|
||||
|
||||
dmReleaseWrapper(pWrapper);
|
||||
|
||||
if (code == 0) {
|
||||
dmCloseNode(pWrapper);
|
||||
taosRemoveDir(pWrapper->path);
|
||||
}
|
||||
taosThreadMutexUnlock(&pDnode->mutex);
|
||||
return code;
|
||||
}
|
||||
|
||||
static void dmSetMgmtMsgHandle(SMgmtWrapper *pWrapper) {
|
||||
// Requests handled by DNODE
|
||||
dmSetMsgHandle(pWrapper, TDMT_DND_CREATE_MNODE, dmProcessMgmtMsg, DEFAULT_HANDLE);
|
||||
dmSetMsgHandle(pWrapper, TDMT_DND_DROP_MNODE, dmProcessMgmtMsg, DEFAULT_HANDLE);
|
||||
dmSetMsgHandle(pWrapper, TDMT_DND_CREATE_QNODE, dmProcessMgmtMsg, DEFAULT_HANDLE);
|
||||
dmSetMsgHandle(pWrapper, TDMT_DND_DROP_QNODE, dmProcessMgmtMsg, DEFAULT_HANDLE);
|
||||
dmSetMsgHandle(pWrapper, TDMT_DND_CREATE_SNODE, dmProcessMgmtMsg, DEFAULT_HANDLE);
|
||||
dmSetMsgHandle(pWrapper, TDMT_DND_DROP_SNODE, dmProcessMgmtMsg, DEFAULT_HANDLE);
|
||||
dmSetMsgHandle(pWrapper, TDMT_DND_CREATE_BNODE, dmProcessMgmtMsg, DEFAULT_HANDLE);
|
||||
dmSetMsgHandle(pWrapper, TDMT_DND_DROP_BNODE, dmProcessMgmtMsg, DEFAULT_HANDLE);
|
||||
dmSetMsgHandle(pWrapper, TDMT_DND_CONFIG_DNODE, dmProcessMgmtMsg, DEFAULT_HANDLE);
|
||||
|
||||
// Requests handled by MNODE
|
||||
dmSetMsgHandle(pWrapper, TDMT_MND_GRANT_RSP, dmProcessMgmtMsg, DEFAULT_HANDLE);
|
||||
dmSetMsgHandle(pWrapper, TDMT_MND_AUTH_RSP, dmProcessMgmtMsg, DEFAULT_HANDLE);
|
||||
}
|
||||
|
||||
static int32_t dmStartMgmt(SMgmtWrapper *pWrapper) {
|
||||
if (dmStartStatusThread(pWrapper->pDnode) != 0) {
|
||||
return -1;
|
||||
}
|
||||
if (dmStartMonitorThread(pWrapper->pDnode) != 0) {
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dmStopMgmt(SMgmtWrapper *pWrapper) {
|
||||
dmStopMonitorThread(pWrapper->pDnode);
|
||||
dmStopStatusThread(pWrapper->pDnode);
|
||||
}
|
||||
|
||||
static int32_t dmInitMgmt(SMgmtWrapper *pWrapper) {
|
||||
dInfo("dnode-mgmt start to init");
|
||||
SDnode *pDnode = pWrapper->pDnode;
|
||||
|
||||
pDnode->data.dnodeHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
|
||||
if (pDnode->data.dnodeHash == NULL) {
|
||||
dError("failed to init dnode hash");
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (dmReadEps(pDnode) != 0) {
|
||||
dError("failed to read file since %s", terrstr());
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (pDnode->data.dropped) {
|
||||
dError("dnode will not start since its already dropped");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (dmStartWorker(pDnode) != 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (dmInitServer(pDnode) != 0) {
|
||||
dError("failed to init transport since %s", terrstr());
|
||||
return -1;
|
||||
}
|
||||
dmReportStartup(pDnode, "dnode-transport", "initialized");
|
||||
|
||||
if (udfStartUdfd(pDnode->data.dnodeId) != 0) {
|
||||
dError("failed to start udfd");
|
||||
}
|
||||
|
||||
dInfo("dnode-mgmt is initialized");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dmCleanupMgmt(SMgmtWrapper *pWrapper) {
|
||||
dInfo("dnode-mgmt start to clean up");
|
||||
SDnode *pDnode = pWrapper->pDnode;
|
||||
|
||||
udfStopUdfd();
|
||||
|
||||
dmStopWorker(pDnode);
|
||||
|
||||
taosWLockLatch(&pDnode->data.latch);
|
||||
if (pDnode->data.dnodeEps != NULL) {
|
||||
taosArrayDestroy(pDnode->data.dnodeEps);
|
||||
pDnode->data.dnodeEps = NULL;
|
||||
}
|
||||
if (pDnode->data.dnodeHash != NULL) {
|
||||
taosHashCleanup(pDnode->data.dnodeHash);
|
||||
pDnode->data.dnodeHash = NULL;
|
||||
}
|
||||
taosWUnLockLatch(&pDnode->data.latch);
|
||||
|
||||
dmCleanupClient(pDnode);
|
||||
dmCleanupServer(pDnode);
|
||||
dInfo("dnode-mgmt is cleaned up");
|
||||
}
|
||||
|
||||
static int32_t dmRequireMgmt(SMgmtWrapper *pWrapper, bool *required) {
|
||||
*required = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dmSetMgmtFp(SMgmtWrapper *pWrapper) {
|
||||
SMgmtFp mgmtFp = {0};
|
||||
mgmtFp.openFp = dmInitMgmt;
|
||||
mgmtFp.closeFp = dmCleanupMgmt;
|
||||
mgmtFp.startFp = dmStartMgmt;
|
||||
mgmtFp.stopFp = dmStopMgmt;
|
||||
mgmtFp.requiredFp = dmRequireMgmt;
|
||||
|
||||
dmSetMgmtMsgHandle(pWrapper);
|
||||
pWrapper->name = "dnode";
|
||||
pWrapper->fp = mgmtFp;
|
||||
}
|
|
@ -1,211 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#define _DEFAULT_SOURCE
|
||||
#include "dmImp.h"
|
||||
|
||||
static void dmGetMonitorBasicInfo(SDnode *pDnode, SMonBasicInfo *pInfo) {
|
||||
pInfo->protocol = 1;
|
||||
pInfo->dnode_id = pDnode->data.dnodeId;
|
||||
pInfo->cluster_id = pDnode->data.clusterId;
|
||||
tstrncpy(pInfo->dnode_ep, tsLocalEp, TSDB_EP_LEN);
|
||||
}
|
||||
|
||||
static void dmGetMonitorDnodeInfo(SDnode *pDnode, SMonDnodeInfo *pInfo) {
|
||||
pInfo->uptime = (taosGetTimestampMs() - pDnode->data.rebootTime) / (86400000.0f);
|
||||
pInfo->has_mnode = pDnode->wrappers[MNODE].required;
|
||||
pInfo->has_qnode = pDnode->wrappers[QNODE].required;
|
||||
pInfo->has_snode = pDnode->wrappers[SNODE].required;
|
||||
pInfo->has_bnode = pDnode->wrappers[BNODE].required;
|
||||
tstrncpy(pInfo->logdir.name, tsLogDir, sizeof(pInfo->logdir.name));
|
||||
pInfo->logdir.size = tsLogSpace.size;
|
||||
tstrncpy(pInfo->tempdir.name, tsTempDir, sizeof(pInfo->tempdir.name));
|
||||
pInfo->tempdir.size = tsTempSpace.size;
|
||||
}
|
||||
|
||||
static void dmGetMonitorInfo(SDnode *pDnode, SMonDmInfo *pInfo) {
|
||||
dmGetMonitorBasicInfo(pDnode, &pInfo->basic);
|
||||
dmGetMonitorSysInfo(&pInfo->sys);
|
||||
dmGetMonitorDnodeInfo(pDnode, &pInfo->dnode);
|
||||
}
|
||||
|
||||
void dmSendMonitorReport(SDnode *pDnode) {
|
||||
if (!tsEnableMonitor || tsMonitorFqdn[0] == 0 || tsMonitorPort == 0) return;
|
||||
dTrace("send monitor report to %s:%u", tsMonitorFqdn, tsMonitorPort);
|
||||
|
||||
SMonDmInfo dmInfo = {0};
|
||||
SMonMmInfo mmInfo = {0};
|
||||
SMonVmInfo vmInfo = {0};
|
||||
SMonQmInfo qmInfo = {0};
|
||||
SMonSmInfo smInfo = {0};
|
||||
SMonBmInfo bmInfo = {0};
|
||||
|
||||
SRpcMsg req = {0};
|
||||
SRpcMsg rsp;
|
||||
SEpSet epset = {.inUse = 0, .numOfEps = 1};
|
||||
tstrncpy(epset.eps[0].fqdn, pDnode->data.localFqdn, TSDB_FQDN_LEN);
|
||||
epset.eps[0].port = tsServerPort;
|
||||
|
||||
SMgmtWrapper *pWrapper = NULL;
|
||||
dmGetMonitorInfo(pDnode, &dmInfo);
|
||||
|
||||
bool getFromAPI = !tsMultiProcess;
|
||||
pWrapper = &pDnode->wrappers[MNODE];
|
||||
if (getFromAPI) {
|
||||
if (dmMarkWrapper(pWrapper) == 0) {
|
||||
mmGetMonitorInfo(pWrapper, &mmInfo);
|
||||
dmReleaseWrapper(pWrapper);
|
||||
}
|
||||
} else {
|
||||
if (pWrapper->required) {
|
||||
req.msgType = TDMT_MON_MM_INFO;
|
||||
dmSendRecv(pDnode, &epset, &req, &rsp);
|
||||
if (rsp.code == 0 && rsp.contLen > 0) {
|
||||
tDeserializeSMonMmInfo(rsp.pCont, rsp.contLen, &mmInfo);
|
||||
}
|
||||
rpcFreeCont(rsp.pCont);
|
||||
}
|
||||
}
|
||||
|
||||
pWrapper = &pDnode->wrappers[VNODE];
|
||||
if (getFromAPI) {
|
||||
if (dmMarkWrapper(pWrapper) == 0) {
|
||||
vmGetMonitorInfo(pWrapper, &vmInfo);
|
||||
dmReleaseWrapper(pWrapper);
|
||||
}
|
||||
} else {
|
||||
if (pWrapper->required) {
|
||||
req.msgType = TDMT_MON_VM_INFO;
|
||||
dmSendRecv(pDnode, &epset, &req, &rsp);
|
||||
if (rsp.code == 0 && rsp.contLen > 0) {
|
||||
tDeserializeSMonVmInfo(rsp.pCont, rsp.contLen, &vmInfo);
|
||||
}
|
||||
rpcFreeCont(rsp.pCont);
|
||||
}
|
||||
}
|
||||
|
||||
pWrapper = &pDnode->wrappers[QNODE];
|
||||
if (getFromAPI) {
|
||||
if (dmMarkWrapper(pWrapper) == 0) {
|
||||
qmGetMonitorInfo(pWrapper, &qmInfo);
|
||||
dmReleaseWrapper(pWrapper);
|
||||
}
|
||||
} else {
|
||||
if (pWrapper->required) {
|
||||
req.msgType = TDMT_MON_QM_INFO;
|
||||
dmSendRecv(pDnode, &epset, &req, &rsp);
|
||||
if (rsp.code == 0 && rsp.contLen > 0) {
|
||||
tDeserializeSMonQmInfo(rsp.pCont, rsp.contLen, &qmInfo);
|
||||
}
|
||||
rpcFreeCont(rsp.pCont);
|
||||
}
|
||||
}
|
||||
|
||||
pWrapper = &pDnode->wrappers[SNODE];
|
||||
if (getFromAPI) {
|
||||
if (dmMarkWrapper(pWrapper) == 0) {
|
||||
smGetMonitorInfo(pWrapper, &smInfo);
|
||||
dmReleaseWrapper(pWrapper);
|
||||
}
|
||||
} else {
|
||||
if (pWrapper->required) {
|
||||
req.msgType = TDMT_MON_SM_INFO;
|
||||
dmSendRecv(pDnode, &epset, &req, &rsp);
|
||||
if (rsp.code == 0 && rsp.contLen > 0) {
|
||||
tDeserializeSMonSmInfo(rsp.pCont, rsp.contLen, &smInfo);
|
||||
}
|
||||
rpcFreeCont(rsp.pCont);
|
||||
}
|
||||
}
|
||||
|
||||
pWrapper = &pDnode->wrappers[BNODE];
|
||||
if (getFromAPI) {
|
||||
if (dmMarkWrapper(pWrapper) == 0) {
|
||||
bmGetMonitorInfo(pWrapper, &bmInfo);
|
||||
dmReleaseWrapper(pWrapper);
|
||||
}
|
||||
} else {
|
||||
if (pWrapper->required) {
|
||||
req.msgType = TDMT_MON_BM_INFO;
|
||||
dmSendRecv(pDnode, &epset, &req, &rsp);
|
||||
if (rsp.code == 0 && rsp.contLen > 0) {
|
||||
tDeserializeSMonBmInfo(rsp.pCont, rsp.contLen, &bmInfo);
|
||||
}
|
||||
rpcFreeCont(rsp.pCont);
|
||||
}
|
||||
}
|
||||
|
||||
monSetDmInfo(&dmInfo);
|
||||
monSetMmInfo(&mmInfo);
|
||||
monSetVmInfo(&vmInfo);
|
||||
monSetQmInfo(&qmInfo);
|
||||
monSetSmInfo(&smInfo);
|
||||
monSetBmInfo(&bmInfo);
|
||||
tFreeSMonMmInfo(&mmInfo);
|
||||
tFreeSMonVmInfo(&vmInfo);
|
||||
tFreeSMonQmInfo(&qmInfo);
|
||||
tFreeSMonSmInfo(&smInfo);
|
||||
tFreeSMonBmInfo(&bmInfo);
|
||||
monSendReport();
|
||||
}
|
||||
|
||||
void dmGetVnodeLoads(SDnode *pDnode, SMonVloadInfo *pInfo) {
|
||||
SMgmtWrapper *pWrapper = dmAcquireWrapper(pDnode, VNODE);
|
||||
if (pWrapper == NULL) return;
|
||||
|
||||
bool getFromAPI = !tsMultiProcess;
|
||||
if (getFromAPI) {
|
||||
vmGetVnodeLoads(pWrapper, pInfo);
|
||||
} else {
|
||||
SRpcMsg req = {.msgType = TDMT_MON_VM_LOAD};
|
||||
SRpcMsg rsp = {0};
|
||||
SEpSet epset = {.inUse = 0, .numOfEps = 1};
|
||||
tstrncpy(epset.eps[0].fqdn, pDnode->data.localFqdn, TSDB_FQDN_LEN);
|
||||
epset.eps[0].port = tsServerPort;
|
||||
|
||||
dmSendRecv(pDnode, &epset, &req, &rsp);
|
||||
if (rsp.code == 0 && rsp.contLen > 0) {
|
||||
tDeserializeSMonVloadInfo(rsp.pCont, rsp.contLen, pInfo);
|
||||
}
|
||||
rpcFreeCont(rsp.pCont);
|
||||
}
|
||||
dmReleaseWrapper(pWrapper);
|
||||
}
|
||||
|
||||
void dmGetMnodeLoads(SDnode *pDnode, SMonMloadInfo *pInfo) {
|
||||
SMgmtWrapper *pWrapper = dmAcquireWrapper(pDnode, MNODE);
|
||||
if (pWrapper == NULL) {
|
||||
pInfo->isMnode = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
bool getFromAPI = !tsMultiProcess;
|
||||
if (getFromAPI) {
|
||||
mmGetMnodeLoads(pWrapper, pInfo);
|
||||
} else {
|
||||
SRpcMsg req = {.msgType = TDMT_MON_MM_LOAD};
|
||||
SRpcMsg rsp = {0};
|
||||
SEpSet epset = {.inUse = 0, .numOfEps = 1};
|
||||
tstrncpy(epset.eps[0].fqdn, pDnode->data.localFqdn, TSDB_FQDN_LEN);
|
||||
epset.eps[0].port = tsServerPort;
|
||||
|
||||
dmSendRecv(pDnode, &epset, &req, &rsp);
|
||||
if (rsp.code == 0 && rsp.contLen > 0) {
|
||||
tDeserializeSMonMloadInfo(rsp.pCont, rsp.contLen, pInfo);
|
||||
}
|
||||
rpcFreeCont(rsp.pCont);
|
||||
}
|
||||
dmReleaseWrapper(pWrapper);
|
||||
}
|
|
@ -1,144 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#define _DEFAULT_SOURCE
|
||||
#include "dmImp.h"
|
||||
|
||||
static int32_t dmInitVars(SDnode *pDnode, const SDnodeOpt *pOption) {
|
||||
pDnode->data.dnodeId = 0;
|
||||
pDnode->data.clusterId = 0;
|
||||
pDnode->data.dnodeVer = 0;
|
||||
pDnode->data.updateTime = 0;
|
||||
pDnode->data.rebootTime = taosGetTimestampMs();
|
||||
pDnode->data.dropped = 0;
|
||||
pDnode->data.localEp = strdup(pOption->localEp);
|
||||
pDnode->data.localFqdn = strdup(pOption->localFqdn);
|
||||
pDnode->data.firstEp = strdup(pOption->firstEp);
|
||||
pDnode->data.secondEp = strdup(pOption->secondEp);
|
||||
pDnode->data.dataDir = strdup(pOption->dataDir);
|
||||
pDnode->data.disks = pOption->disks;
|
||||
pDnode->data.numOfDisks = pOption->numOfDisks;
|
||||
pDnode->data.supportVnodes = pOption->numOfSupportVnodes;
|
||||
pDnode->data.serverPort = pOption->serverPort;
|
||||
pDnode->ntype = pOption->ntype;
|
||||
|
||||
if (pDnode->data.dataDir == NULL || pDnode->data.localEp == NULL || pDnode->data.localFqdn == NULL ||
|
||||
pDnode->data.firstEp == NULL || pDnode->data.secondEp == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!tsMultiProcess || pDnode->ntype == DNODE || pDnode->ntype == NODE_END) {
|
||||
pDnode->data.lockfile = dmCheckRunning(pDnode->data.dataDir);
|
||||
if (pDnode->data.lockfile == NULL) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
taosInitRWLatch(&pDnode->data.latch);
|
||||
taosThreadMutexInit(&pDnode->mutex, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dmClearVars(SDnode *pDnode) {
|
||||
for (EDndNodeType n = DNODE; n < NODE_END; ++n) {
|
||||
SMgmtWrapper *pMgmt = &pDnode->wrappers[n];
|
||||
taosMemoryFreeClear(pMgmt->path);
|
||||
}
|
||||
if (pDnode->data.lockfile != NULL) {
|
||||
taosUnLockFile(pDnode->data.lockfile);
|
||||
taosCloseFile(&pDnode->data.lockfile);
|
||||
pDnode->data.lockfile = NULL;
|
||||
}
|
||||
taosMemoryFreeClear(pDnode->data.localEp);
|
||||
taosMemoryFreeClear(pDnode->data.localFqdn);
|
||||
taosMemoryFreeClear(pDnode->data.firstEp);
|
||||
taosMemoryFreeClear(pDnode->data.secondEp);
|
||||
taosMemoryFreeClear(pDnode->data.dataDir);
|
||||
taosThreadMutexDestroy(&pDnode->mutex);
|
||||
memset(&pDnode->mutex, 0, sizeof(pDnode->mutex));
|
||||
taosMemoryFree(pDnode);
|
||||
dDebug("dnode memory is cleared, data:%p", pDnode);
|
||||
}
|
||||
|
||||
SDnode *dmCreate(const SDnodeOpt *pOption) {
|
||||
dDebug("start to create dnode");
|
||||
int32_t code = -1;
|
||||
char path[PATH_MAX] = {0};
|
||||
SDnode *pDnode = NULL;
|
||||
|
||||
pDnode = taosMemoryCalloc(1, sizeof(SDnode));
|
||||
if (pDnode == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
if (dmInitVars(pDnode, pOption) != 0) {
|
||||
dError("failed to init variables since %s", terrstr());
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
dmSetStatus(pDnode, DND_STAT_INIT);
|
||||
dmSetMgmtFp(&pDnode->wrappers[DNODE]);
|
||||
mmSetMgmtFp(&pDnode->wrappers[MNODE]);
|
||||
vmSetMgmtFp(&pDnode->wrappers[VNODE]);
|
||||
qmSetMgmtFp(&pDnode->wrappers[QNODE]);
|
||||
smSetMgmtFp(&pDnode->wrappers[SNODE]);
|
||||
bmSetMgmtFp(&pDnode->wrappers[BNODE]);
|
||||
|
||||
for (EDndNodeType n = DNODE; n < NODE_END; ++n) {
|
||||
SMgmtWrapper *pWrapper = &pDnode->wrappers[n];
|
||||
snprintf(path, sizeof(path), "%s%s%s", pDnode->data.dataDir, TD_DIRSEP, pWrapper->name);
|
||||
pWrapper->path = strdup(path);
|
||||
pWrapper->procShm.id = -1;
|
||||
pWrapper->pDnode = pDnode;
|
||||
pWrapper->ntype = n;
|
||||
pWrapper->procType = DND_PROC_SINGLE;
|
||||
taosInitRWLatch(&pWrapper->latch);
|
||||
|
||||
if (pWrapper->path == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
if (n != DNODE && dmReadShmFile(pWrapper) != 0) {
|
||||
dError("node:%s, failed to read shm file since %s", pWrapper->name, terrstr());
|
||||
goto _OVER;
|
||||
}
|
||||
}
|
||||
|
||||
if (dmInitMsgHandle(pDnode) != 0) {
|
||||
dError("failed to init msg handles since %s", terrstr());
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
dInfo("dnode is created, data:%p", pDnode);
|
||||
code = 0;
|
||||
|
||||
_OVER:
|
||||
if (code != 0 && pDnode) {
|
||||
dmClearVars(pDnode);
|
||||
pDnode = NULL;
|
||||
dError("failed to create dnode since %s", terrstr());
|
||||
}
|
||||
|
||||
return pDnode;
|
||||
}
|
||||
|
||||
void dmClose(SDnode *pDnode) {
|
||||
if (pDnode == NULL) return;
|
||||
dmClearVars(pDnode);
|
||||
dInfo("dnode is closed, data:%p", pDnode);
|
||||
}
|
|
@ -1,195 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http:www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#define _DEFAULT_SOURCE
|
||||
#include "dmImp.h"
|
||||
|
||||
static void *dmStatusThreadFp(void *param) {
|
||||
SDnode *pDnode = param;
|
||||
int64_t lastTime = taosGetTimestampMs();
|
||||
|
||||
setThreadName("dnode-status");
|
||||
|
||||
while (1) {
|
||||
taosThreadTestCancel();
|
||||
taosMsleep(200);
|
||||
|
||||
if (pDnode->status != DND_STAT_RUNNING || pDnode->data.dropped) {
|
||||
continue;
|
||||
}
|
||||
|
||||
int64_t curTime = taosGetTimestampMs();
|
||||
float interval = (curTime - lastTime) / 1000.0f;
|
||||
if (interval >= tsStatusInterval) {
|
||||
dmSendStatusReq(pDnode);
|
||||
lastTime = curTime;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *dmMonitorThreadFp(void *param) {
|
||||
SDnode *pDnode = param;
|
||||
int64_t lastTime = taosGetTimestampMs();
|
||||
|
||||
setThreadName("dnode-monitor");
|
||||
|
||||
while (1) {
|
||||
taosThreadTestCancel();
|
||||
taosMsleep(200);
|
||||
|
||||
if (pDnode->status != DND_STAT_RUNNING || pDnode->data.dropped) {
|
||||
continue;
|
||||
}
|
||||
|
||||
int64_t curTime = taosGetTimestampMs();
|
||||
float interval = (curTime - lastTime) / 1000.0f;
|
||||
if (interval >= tsMonitorInterval) {
|
||||
dmSendMonitorReport(pDnode);
|
||||
lastTime = curTime;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int32_t dmStartStatusThread(SDnode *pDnode) {
|
||||
pDnode->data.statusThreadId = taosCreateThread(dmStatusThreadFp, pDnode);
|
||||
if (pDnode->data.statusThreadId == NULL) {
|
||||
dError("failed to init dnode status thread");
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
}
|
||||
|
||||
dmReportStartup(pDnode, "dnode-status", "initialized");
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dmStopStatusThread(SDnode *pDnode) {
|
||||
if (pDnode->data.statusThreadId != NULL) {
|
||||
taosDestoryThread(pDnode->data.statusThreadId);
|
||||
pDnode->data.statusThreadId = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t dmStartMonitorThread(SDnode *pDnode) {
|
||||
pDnode->data.monitorThreadId = taosCreateThread(dmMonitorThreadFp, pDnode);
|
||||
if (pDnode->data.monitorThreadId == NULL) {
|
||||
dError("failed to init dnode monitor thread");
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
}
|
||||
|
||||
dmReportStartup(pDnode, "dnode-monitor", "initialized");
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dmStopMonitorThread(SDnode *pDnode) {
|
||||
if (pDnode->data.monitorThreadId != NULL) {
|
||||
taosDestoryThread(pDnode->data.monitorThreadId);
|
||||
pDnode->data.monitorThreadId = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void dmProcessMgmtQueue(SQueueInfo *pInfo, SNodeMsg *pMsg) {
|
||||
SDnode *pDnode = pInfo->ahandle;
|
||||
|
||||
int32_t code = -1;
|
||||
tmsg_t msgType = pMsg->rpcMsg.msgType;
|
||||
dTrace("msg:%p, will be processed in dnode-mgmt queue", pMsg);
|
||||
|
||||
switch (msgType) {
|
||||
case TDMT_DND_CONFIG_DNODE:
|
||||
code = dmProcessConfigReq(pDnode, pMsg);
|
||||
break;
|
||||
case TDMT_MND_AUTH_RSP:
|
||||
code = dmProcessAuthRsp(pDnode, pMsg);
|
||||
break;
|
||||
case TDMT_MND_GRANT_RSP:
|
||||
code = dmProcessGrantRsp(pDnode, pMsg);
|
||||
break;
|
||||
case TDMT_DND_CREATE_MNODE:
|
||||
code = dmProcessCreateNodeReq(pDnode, MNODE, pMsg);
|
||||
break;
|
||||
case TDMT_DND_DROP_MNODE:
|
||||
code = dmProcessDropNodeReq(pDnode, MNODE, pMsg);
|
||||
break;
|
||||
case TDMT_DND_CREATE_QNODE:
|
||||
code = dmProcessCreateNodeReq(pDnode, QNODE, pMsg);
|
||||
break;
|
||||
case TDMT_DND_DROP_QNODE:
|
||||
code = dmProcessDropNodeReq(pDnode, QNODE, pMsg);
|
||||
break;
|
||||
case TDMT_DND_CREATE_SNODE:
|
||||
code = dmProcessCreateNodeReq(pDnode, SNODE, pMsg);
|
||||
break;
|
||||
case TDMT_DND_DROP_SNODE:
|
||||
code = dmProcessDropNodeReq(pDnode, SNODE, pMsg);
|
||||
break;
|
||||
case TDMT_DND_CREATE_BNODE:
|
||||
code = dmProcessCreateNodeReq(pDnode, BNODE, pMsg);
|
||||
break;
|
||||
case TDMT_DND_DROP_BNODE:
|
||||
code = dmProcessDropNodeReq(pDnode, BNODE, pMsg);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (msgType & 1u) {
|
||||
if (code != 0 && terrno != 0) code = terrno;
|
||||
SRpcMsg rsp = {
|
||||
.handle = pMsg->rpcMsg.handle,
|
||||
.ahandle = pMsg->rpcMsg.ahandle,
|
||||
.code = code,
|
||||
.refId = pMsg->rpcMsg.refId,
|
||||
};
|
||||
rpcSendResponse(&rsp);
|
||||
}
|
||||
|
||||
dTrace("msg:%p, is freed, result:0x%04x:%s", pMsg, code & 0XFFFF, tstrerror(code));
|
||||
rpcFreeCont(pMsg->rpcMsg.pCont);
|
||||
taosFreeQitem(pMsg);
|
||||
}
|
||||
|
||||
int32_t dmStartWorker(SDnode *pDnode) {
|
||||
SSingleWorkerCfg cfg = {
|
||||
.min = 1,
|
||||
.max = 1,
|
||||
.name = "dnode-mgmt",
|
||||
.fp = (FItem)dmProcessMgmtQueue,
|
||||
.param = pDnode,
|
||||
};
|
||||
if (tSingleWorkerInit(&pDnode->data.mgmtWorker, &cfg) != 0) {
|
||||
dError("failed to start dnode-mgmt worker since %s", terrstr());
|
||||
return -1;
|
||||
}
|
||||
|
||||
dDebug("dnode workers are initialized");
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dmStopWorker(SDnode *pDnode) {
|
||||
tSingleWorkerCleanup(&pDnode->data.mgmtWorker);
|
||||
dDebug("dnode workers are closed");
|
||||
}
|
||||
|
||||
int32_t dmProcessMgmtMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) {
|
||||
SSingleWorker *pWorker = &pWrapper->pDnode->data.mgmtWorker;
|
||||
dTrace("msg:%p, put into worker %s", pMsg, pWorker->name);
|
||||
taosWriteQitem(pWorker->queue, pMsg);
|
||||
return 0;
|
||||
}
|
|
@ -1,10 +0,0 @@
|
|||
aux_source_directory(src DNODE_INTERFACE)
|
||||
add_library(dnode_interface STATIC ${DNODE_INTERFACE})
|
||||
target_include_directories(
|
||||
dnode_interface
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/dnode/mgmt"
|
||||
PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
)
|
||||
target_link_libraries(
|
||||
dnode_interface cjson mnode vnode qnode snode bnode wal sync taos_static tfs monitor
|
||||
)
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue